xref: /illumos-gate/usr/src/uts/common/fs/zfs/rrwlock.c (revision e914ace2)
1f18faf3fSek /*
2f18faf3fSek  * CDDL HEADER START
3f18faf3fSek  *
4f18faf3fSek  * The contents of this file are subject to the terms of the
5f18faf3fSek  * Common Development and Distribution License (the "License").
6f18faf3fSek  * You may not use this file except in compliance with the License.
7f18faf3fSek  *
8f18faf3fSek  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9f18faf3fSek  * or http://www.opensolaris.org/os/licensing.
10f18faf3fSek  * See the License for the specific language governing permissions
11f18faf3fSek  * and limitations under the License.
12f18faf3fSek  *
13f18faf3fSek  * When distributing Covered Code, include this CDDL HEADER in each
14f18faf3fSek  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15f18faf3fSek  * If applicable, add the following below this CDDL HEADER, with the
16f18faf3fSek  * fields enclosed by brackets "[]" replaced with your own identifying
17f18faf3fSek  * information: Portions Copyright [yyyy] [name of copyright owner]
18f18faf3fSek  *
19f18faf3fSek  * CDDL HEADER END
20f18faf3fSek  */
21f18faf3fSek /*
22d47621a4STim Haley  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23f18faf3fSek  * Use is subject to license terms.
24f18faf3fSek  */
254445fffbSMatthew Ahrens /*
264445fffbSMatthew Ahrens  * Copyright (c) 2012 by Delphix. All rights reserved.
274445fffbSMatthew Ahrens  */
28f18faf3fSek 
29f18faf3fSek #include <sys/refcount.h>
30f18faf3fSek #include <sys/rrwlock.h>
31f18faf3fSek 
32f18faf3fSek /*
33f18faf3fSek  * This file contains the implementation of a re-entrant read
34f18faf3fSek  * reader/writer lock (aka "rrwlock").
35f18faf3fSek  *
36f18faf3fSek  * This is a normal reader/writer lock with the additional feature
37f18faf3fSek  * of allowing threads who have already obtained a read lock to
38f18faf3fSek  * re-enter another read lock (re-entrant read) - even if there are
39f18faf3fSek  * waiting writers.
40f18faf3fSek  *
41f18faf3fSek  * Callers who have not obtained a read lock give waiting writers priority.
42f18faf3fSek  *
43f18faf3fSek  * The rrwlock_t lock does not allow re-entrant writers, nor does it
44f18faf3fSek  * allow a re-entrant mix of reads and writes (that is, it does not
45f18faf3fSek  * allow a caller who has already obtained a read lock to be able to
46f18faf3fSek  * then grab a write lock without first dropping all read locks, and
47f18faf3fSek  * vice versa).
48f18faf3fSek  *
49f18faf3fSek  * The rrwlock_t uses tsd (thread specific data) to keep a list of
50f18faf3fSek  * nodes (rrw_node_t), where each node keeps track of which specific
51f18faf3fSek  * lock (rrw_node_t::rn_rrl) the thread has grabbed.  Since re-entering
52f18faf3fSek  * should be rare, a thread that grabs multiple reads on the same rrwlock_t
53f18faf3fSek  * will store multiple rrw_node_ts of the same 'rrn_rrl'. Nodes on the
54f18faf3fSek  * tsd list can represent a different rrwlock_t.  This allows a thread
55f18faf3fSek  * to enter multiple and unique rrwlock_ts for read locks at the same time.
56f18faf3fSek  *
57f18faf3fSek  * Since using tsd exposes some overhead, the rrwlock_t only needs to
58f18faf3fSek  * keep tsd data when writers are waiting.  If no writers are waiting, then
59f18faf3fSek  * a reader just bumps the anonymous read count (rr_anon_rcount) - no tsd
60f18faf3fSek  * is needed.  Once a writer attempts to grab the lock, readers then
61f18faf3fSek  * keep tsd data and bump the linked readers count (rr_linked_rcount).
62f18faf3fSek  *
63f18faf3fSek  * If there are waiting writers and there are anonymous readers, then a
64f18faf3fSek  * reader doesn't know if it is a re-entrant lock. But since it may be one,
65f18faf3fSek  * we allow the read to proceed (otherwise it could deadlock).  Since once
66f18faf3fSek  * waiting writers are active, readers no longer bump the anonymous count,
67f18faf3fSek  * the anonymous readers will eventually flush themselves out.  At this point,
68f18faf3fSek  * readers will be able to tell if they are a re-entrant lock (have a
69f18faf3fSek  * rrw_node_t entry for the lock) or not. If they are a re-entrant lock, then
70f18faf3fSek  * we must let the proceed.  If they are not, then the reader blocks for the
71f18faf3fSek  * waiting writers.  Hence, we do not starve writers.
72f18faf3fSek  */
73f18faf3fSek 
74f18faf3fSek /* global key for TSD */
75f18faf3fSek uint_t rrw_tsd_key;
76f18faf3fSek 
77f18faf3fSek typedef struct rrw_node {
783b2aab18SMatthew Ahrens 	struct rrw_node *rn_next;
793b2aab18SMatthew Ahrens 	rrwlock_t *rn_rrl;
803b2aab18SMatthew Ahrens 	void *rn_tag;
81f18faf3fSek } rrw_node_t;
82f18faf3fSek 
83f18faf3fSek static rrw_node_t *
rrn_find(rrwlock_t * rrl)84f18faf3fSek rrn_find(rrwlock_t *rrl)
85f18faf3fSek {
86f18faf3fSek 	rrw_node_t *rn;
87f18faf3fSek 
88*e914ace2STim Schumacher 	if (zfs_refcount_count(&rrl->rr_linked_rcount) == 0)
89f18faf3fSek 		return (NULL);
90f18faf3fSek 
91f18faf3fSek 	for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
92f18faf3fSek 		if (rn->rn_rrl == rrl)
93f18faf3fSek 			return (rn);
94f18faf3fSek 	}
95f18faf3fSek 	return (NULL);
96f18faf3fSek }
97f18faf3fSek 
98f18faf3fSek /*
99f18faf3fSek  * Add a node to the head of the singly linked list.
100f18faf3fSek  */
101f18faf3fSek static void
rrn_add(rrwlock_t * rrl,void * tag)1023b2aab18SMatthew Ahrens rrn_add(rrwlock_t *rrl, void *tag)
103f18faf3fSek {
104f18faf3fSek 	rrw_node_t *rn;
105f18faf3fSek 
106f18faf3fSek 	rn = kmem_alloc(sizeof (*rn), KM_SLEEP);
107f18faf3fSek 	rn->rn_rrl = rrl;
108f18faf3fSek 	rn->rn_next = tsd_get(rrw_tsd_key);
1093b2aab18SMatthew Ahrens 	rn->rn_tag = tag;
110f18faf3fSek 	VERIFY(tsd_set(rrw_tsd_key, rn) == 0);
111f18faf3fSek }
112f18faf3fSek 
113f18faf3fSek /*
114f18faf3fSek  * If a node is found for 'rrl', then remove the node from this
115f18faf3fSek  * thread's list and return TRUE; otherwise return FALSE.
116f18faf3fSek  */
117f18faf3fSek static boolean_t
rrn_find_and_remove(rrwlock_t * rrl,void * tag)1183b2aab18SMatthew Ahrens rrn_find_and_remove(rrwlock_t *rrl, void *tag)
119f18faf3fSek {
120f18faf3fSek 	rrw_node_t *rn;
121f18faf3fSek 	rrw_node_t *prev = NULL;
122f18faf3fSek 
123*e914ace2STim Schumacher 	if (zfs_refcount_count(&rrl->rr_linked_rcount) == 0)
124d47621a4STim Haley 		return (B_FALSE);
125f18faf3fSek 
126f18faf3fSek 	for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
1273b2aab18SMatthew Ahrens 		if (rn->rn_rrl == rrl && rn->rn_tag == tag) {
128f18faf3fSek 			if (prev)
129f18faf3fSek 				prev->rn_next = rn->rn_next;
130f18faf3fSek 			else
131f18faf3fSek 				VERIFY(tsd_set(rrw_tsd_key, rn->rn_next) == 0);
132f18faf3fSek 			kmem_free(rn, sizeof (*rn));
133f18faf3fSek 			return (B_TRUE);
134f18faf3fSek 		}
135f18faf3fSek 		prev = rn;
136f18faf3fSek 	}
137f18faf3fSek 	return (B_FALSE);
138f18faf3fSek }
139f18faf3fSek 
140f18faf3fSek void
rrw_init(rrwlock_t * rrl,boolean_t track_all)1413b2aab18SMatthew Ahrens rrw_init(rrwlock_t *rrl, boolean_t track_all)
142f18faf3fSek {
143f18faf3fSek 	mutex_init(&rrl->rr_lock, NULL, MUTEX_DEFAULT, NULL);
144f18faf3fSek 	cv_init(&rrl->rr_cv, NULL, CV_DEFAULT, NULL);
145f18faf3fSek 	rrl->rr_writer = NULL;
146*e914ace2STim Schumacher 	zfs_refcount_create(&rrl->rr_anon_rcount);
147*e914ace2STim Schumacher 	zfs_refcount_create(&rrl->rr_linked_rcount);
148f18faf3fSek 	rrl->rr_writer_wanted = B_FALSE;
1493b2aab18SMatthew Ahrens 	rrl->rr_track_all = track_all;
150f18faf3fSek }
151f18faf3fSek 
152f18faf3fSek void
rrw_destroy(rrwlock_t * rrl)153f18faf3fSek rrw_destroy(rrwlock_t *rrl)
154f18faf3fSek {
155f18faf3fSek 	mutex_destroy(&rrl->rr_lock);
156f18faf3fSek 	cv_destroy(&rrl->rr_cv);
157f18faf3fSek 	ASSERT(rrl->rr_writer == NULL);
158*e914ace2STim Schumacher 	zfs_refcount_destroy(&rrl->rr_anon_rcount);
159*e914ace2STim Schumacher 	zfs_refcount_destroy(&rrl->rr_linked_rcount);
160f18faf3fSek }
161f18faf3fSek 
1621d3f896fSArne Jansen static void
rrw_enter_read_impl(rrwlock_t * rrl,boolean_t prio,void * tag)1631d3f896fSArne Jansen rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag)
164f18faf3fSek {
165f18faf3fSek 	mutex_enter(&rrl->rr_lock);
166d47621a4STim Haley #if !defined(DEBUG) && defined(_KERNEL)
1673b2aab18SMatthew Ahrens 	if (rrl->rr_writer == NULL && !rrl->rr_writer_wanted &&
1683b2aab18SMatthew Ahrens 	    !rrl->rr_track_all) {
169d47621a4STim Haley 		rrl->rr_anon_rcount.rc_count++;
170d47621a4STim Haley 		mutex_exit(&rrl->rr_lock);
171d47621a4STim Haley 		return;
172d47621a4STim Haley 	}
173d47621a4STim Haley 	DTRACE_PROBE(zfs__rrwfastpath__rdmiss);
174d47621a4STim Haley #endif
175f18faf3fSek 	ASSERT(rrl->rr_writer != curthread);
176*e914ace2STim Schumacher 	ASSERT(zfs_refcount_count(&rrl->rr_anon_rcount) >= 0);
177f18faf3fSek 
1783b2aab18SMatthew Ahrens 	while (rrl->rr_writer != NULL || (rrl->rr_writer_wanted &&
179*e914ace2STim Schumacher 	    zfs_refcount_is_zero(&rrl->rr_anon_rcount) && !prio &&
180f18faf3fSek 	    rrn_find(rrl) == NULL))
181f18faf3fSek 		cv_wait(&rrl->rr_cv, &rrl->rr_lock);
182f18faf3fSek 
1833b2aab18SMatthew Ahrens 	if (rrl->rr_writer_wanted || rrl->rr_track_all) {
184f18faf3fSek 		/* may or may not be a re-entrant enter */
1853b2aab18SMatthew Ahrens 		rrn_add(rrl, tag);
186*e914ace2STim Schumacher 		(void) zfs_refcount_add(&rrl->rr_linked_rcount, tag);
187f18faf3fSek 	} else {
188*e914ace2STim Schumacher 		(void) zfs_refcount_add(&rrl->rr_anon_rcount, tag);
189f18faf3fSek 	}
190f18faf3fSek 	ASSERT(rrl->rr_writer == NULL);
191f18faf3fSek 	mutex_exit(&rrl->rr_lock);
192f18faf3fSek }
193f18faf3fSek 
1941d3f896fSArne Jansen void
rrw_enter_read(rrwlock_t * rrl,void * tag)1951d3f896fSArne Jansen rrw_enter_read(rrwlock_t *rrl, void *tag)
1961d3f896fSArne Jansen {
1971d3f896fSArne Jansen 	rrw_enter_read_impl(rrl, B_FALSE, tag);
1981d3f896fSArne Jansen }
1991d3f896fSArne Jansen 
2001d3f896fSArne Jansen /*
2011d3f896fSArne Jansen  * take a read lock even if there are pending write lock requests. if we want
2021d3f896fSArne Jansen  * to take a lock reentrantly, but from different threads (that have a
2031d3f896fSArne Jansen  * relationship to each other), the normal detection mechanism to overrule
2041d3f896fSArne Jansen  * the pending writer does not work, so we have to give an explicit hint here.
2051d3f896fSArne Jansen  */
2061d3f896fSArne Jansen void
rrw_enter_read_prio(rrwlock_t * rrl,void * tag)2071d3f896fSArne Jansen rrw_enter_read_prio(rrwlock_t *rrl, void *tag)
2081d3f896fSArne Jansen {
2091d3f896fSArne Jansen 	rrw_enter_read_impl(rrl, B_TRUE, tag);
2101d3f896fSArne Jansen }
2111d3f896fSArne Jansen 
2121d3f896fSArne Jansen 
2133b2aab18SMatthew Ahrens void
rrw_enter_write(rrwlock_t * rrl)214f18faf3fSek rrw_enter_write(rrwlock_t *rrl)
215f18faf3fSek {
216f18faf3fSek 	mutex_enter(&rrl->rr_lock);
217f18faf3fSek 	ASSERT(rrl->rr_writer != curthread);
218f18faf3fSek 
219*e914ace2STim Schumacher 	while (zfs_refcount_count(&rrl->rr_anon_rcount) > 0 ||
220*e914ace2STim Schumacher 	    zfs_refcount_count(&rrl->rr_linked_rcount) > 0 ||
221f18faf3fSek 	    rrl->rr_writer != NULL) {
222f18faf3fSek 		rrl->rr_writer_wanted = B_TRUE;
223f18faf3fSek 		cv_wait(&rrl->rr_cv, &rrl->rr_lock);
224f18faf3fSek 	}
225f18faf3fSek 	rrl->rr_writer_wanted = B_FALSE;
226f18faf3fSek 	rrl->rr_writer = curthread;
227f18faf3fSek 	mutex_exit(&rrl->rr_lock);
228f18faf3fSek }
229f18faf3fSek 
230f18faf3fSek void
rrw_enter(rrwlock_t * rrl,krw_t rw,void * tag)231f18faf3fSek rrw_enter(rrwlock_t *rrl, krw_t rw, void *tag)
232f18faf3fSek {
233f18faf3fSek 	if (rw == RW_READER)
234f18faf3fSek 		rrw_enter_read(rrl, tag);
235f18faf3fSek 	else
236f18faf3fSek 		rrw_enter_write(rrl);
237f18faf3fSek }
238f18faf3fSek 
239f18faf3fSek void
rrw_exit(rrwlock_t * rrl,void * tag)240f18faf3fSek rrw_exit(rrwlock_t *rrl, void *tag)
241f18faf3fSek {
242f18faf3fSek 	mutex_enter(&rrl->rr_lock);
243d47621a4STim Haley #if !defined(DEBUG) && defined(_KERNEL)
244d47621a4STim Haley 	if (!rrl->rr_writer && rrl->rr_linked_rcount.rc_count == 0) {
245d47621a4STim Haley 		rrl->rr_anon_rcount.rc_count--;
246d47621a4STim Haley 		if (rrl->rr_anon_rcount.rc_count == 0)
247d47621a4STim Haley 			cv_broadcast(&rrl->rr_cv);
248d47621a4STim Haley 		mutex_exit(&rrl->rr_lock);
249d47621a4STim Haley 		return;
250d47621a4STim Haley 	}
251d47621a4STim Haley 	DTRACE_PROBE(zfs__rrwfastpath__exitmiss);
252d47621a4STim Haley #endif
253*e914ace2STim Schumacher 	ASSERT(!zfs_refcount_is_zero(&rrl->rr_anon_rcount) ||
254*e914ace2STim Schumacher 	    !zfs_refcount_is_zero(&rrl->rr_linked_rcount) ||
255f18faf3fSek 	    rrl->rr_writer != NULL);
256f18faf3fSek 
257f18faf3fSek 	if (rrl->rr_writer == NULL) {
258d47621a4STim Haley 		int64_t count;
2593b2aab18SMatthew Ahrens 		if (rrn_find_and_remove(rrl, tag)) {
260*e914ace2STim Schumacher 			count = zfs_refcount_remove(
261*e914ace2STim Schumacher 			    &rrl->rr_linked_rcount, tag);
2623b2aab18SMatthew Ahrens 		} else {
2633b2aab18SMatthew Ahrens 			ASSERT(!rrl->rr_track_all);
264*e914ace2STim Schumacher 			count = zfs_refcount_remove(&rrl->rr_anon_rcount, tag);
2653b2aab18SMatthew Ahrens 		}
266d47621a4STim Haley 		if (count == 0)
267d47621a4STim Haley 			cv_broadcast(&rrl->rr_cv);
268f18faf3fSek 	} else {
269f18faf3fSek 		ASSERT(rrl->rr_writer == curthread);
270*e914ace2STim Schumacher 		ASSERT(zfs_refcount_is_zero(&rrl->rr_anon_rcount) &&
271*e914ace2STim Schumacher 		    zfs_refcount_is_zero(&rrl->rr_linked_rcount));
272f18faf3fSek 		rrl->rr_writer = NULL;
273f18faf3fSek 		cv_broadcast(&rrl->rr_cv);
274f18faf3fSek 	}
275f18faf3fSek 	mutex_exit(&rrl->rr_lock);
276f18faf3fSek }
277f18faf3fSek 
2783b2aab18SMatthew Ahrens /*
2793b2aab18SMatthew Ahrens  * If the lock was created with track_all, rrw_held(RW_READER) will return
2803b2aab18SMatthew Ahrens  * B_TRUE iff the current thread has the lock for reader.  Otherwise it may
2813b2aab18SMatthew Ahrens  * return B_TRUE if any thread has the lock for reader.
2823b2aab18SMatthew Ahrens  */
283f18faf3fSek boolean_t
rrw_held(rrwlock_t * rrl,krw_t rw)284f18faf3fSek rrw_held(rrwlock_t *rrl, krw_t rw)
285f18faf3fSek {
286f18faf3fSek 	boolean_t held;
287f18faf3fSek 
288f18faf3fSek 	mutex_enter(&rrl->rr_lock);
289f18faf3fSek 	if (rw == RW_WRITER) {
290f18faf3fSek 		held = (rrl->rr_writer == curthread);
291f18faf3fSek 	} else {
292*e914ace2STim Schumacher 		held = (!zfs_refcount_is_zero(&rrl->rr_anon_rcount) ||
2933b2aab18SMatthew Ahrens 		    rrn_find(rrl) != NULL);
294f18faf3fSek 	}
295f18faf3fSek 	mutex_exit(&rrl->rr_lock);
296f18faf3fSek 
297f18faf3fSek 	return (held);
298f18faf3fSek }
2994445fffbSMatthew Ahrens 
3004445fffbSMatthew Ahrens void
rrw_tsd_destroy(void * arg)3014445fffbSMatthew Ahrens rrw_tsd_destroy(void *arg)
3024445fffbSMatthew Ahrens {
3034445fffbSMatthew Ahrens 	rrw_node_t *rn = arg;
3044445fffbSMatthew Ahrens 	if (rn != NULL) {
3054445fffbSMatthew Ahrens 		panic("thread %p terminating with rrw lock %p held",
3064445fffbSMatthew Ahrens 		    (void *)curthread, (void *)rn->rn_rrl);
3074445fffbSMatthew Ahrens 	}
3084445fffbSMatthew Ahrens }
309c9030f6cSAlexander Motin 
310c9030f6cSAlexander Motin /*
311c9030f6cSAlexander Motin  * A reader-mostly lock implementation, tuning above reader-writer locks
312c9030f6cSAlexander Motin  * for hightly parallel read acquisitions, while pessimizing writes.
313c9030f6cSAlexander Motin  *
314c9030f6cSAlexander Motin  * The idea is to split single busy lock into array of locks, so that
315c9030f6cSAlexander Motin  * each reader can lock only one of them for read, depending on result
316c9030f6cSAlexander Motin  * of simple hash function.  That proportionally reduces lock congestion.
317c9030f6cSAlexander Motin  * Writer same time has to sequentially aquire write on all the locks.
318c9030f6cSAlexander Motin  * That makes write aquisition proportionally slower, but in places where
319c9030f6cSAlexander Motin  * it is used (filesystem unmount) performance is not critical.
320c9030f6cSAlexander Motin  *
321c9030f6cSAlexander Motin  * All the functions below are direct wrappers around functions above.
322c9030f6cSAlexander Motin  */
323c9030f6cSAlexander Motin void
rrm_init(rrmlock_t * rrl,boolean_t track_all)324c9030f6cSAlexander Motin rrm_init(rrmlock_t *rrl, boolean_t track_all)
325c9030f6cSAlexander Motin {
326c9030f6cSAlexander Motin 	int i;
327c9030f6cSAlexander Motin 
328c9030f6cSAlexander Motin 	for (i = 0; i < RRM_NUM_LOCKS; i++)
329c9030f6cSAlexander Motin 		rrw_init(&rrl->locks[i], track_all);
330c9030f6cSAlexander Motin }
331c9030f6cSAlexander Motin 
332c9030f6cSAlexander Motin void
rrm_destroy(rrmlock_t * rrl)333c9030f6cSAlexander Motin rrm_destroy(rrmlock_t *rrl)
334c9030f6cSAlexander Motin {
335c9030f6cSAlexander Motin 	int i;
336c9030f6cSAlexander Motin 
337c9030f6cSAlexander Motin 	for (i = 0; i < RRM_NUM_LOCKS; i++)
338c9030f6cSAlexander Motin 		rrw_destroy(&rrl->locks[i]);
339c9030f6cSAlexander Motin }
340c9030f6cSAlexander Motin 
341c9030f6cSAlexander Motin void
rrm_enter(rrmlock_t * rrl,krw_t rw,void * tag)342c9030f6cSAlexander Motin rrm_enter(rrmlock_t *rrl, krw_t rw, void *tag)
343c9030f6cSAlexander Motin {
344c9030f6cSAlexander Motin 	if (rw == RW_READER)
345c9030f6cSAlexander Motin 		rrm_enter_read(rrl, tag);
346c9030f6cSAlexander Motin 	else
347c9030f6cSAlexander Motin 		rrm_enter_write(rrl);
348c9030f6cSAlexander Motin }
349c9030f6cSAlexander Motin 
350c9030f6cSAlexander Motin /*
351c9030f6cSAlexander Motin  * This maps the current thread to a specific lock.  Note that the lock
352c9030f6cSAlexander Motin  * must be released by the same thread that acquired it.  We do this
353c9030f6cSAlexander Motin  * mapping by taking the thread pointer mod a prime number.  We examine
354c9030f6cSAlexander Motin  * only the low 32 bits of the thread pointer, because 32-bit division
355c9030f6cSAlexander Motin  * is faster than 64-bit division, and the high 32 bits have little
356c9030f6cSAlexander Motin  * entropy anyway.
357c9030f6cSAlexander Motin  */
358c9030f6cSAlexander Motin #define	RRM_TD_LOCK()	(((uint32_t)(uintptr_t)(curthread)) % RRM_NUM_LOCKS)
359c9030f6cSAlexander Motin 
360c9030f6cSAlexander Motin void
rrm_enter_read(rrmlock_t * rrl,void * tag)361c9030f6cSAlexander Motin rrm_enter_read(rrmlock_t *rrl, void *tag)
362c9030f6cSAlexander Motin {
363c9030f6cSAlexander Motin 	rrw_enter_read(&rrl->locks[RRM_TD_LOCK()], tag);
364c9030f6cSAlexander Motin }
365c9030f6cSAlexander Motin 
366c9030f6cSAlexander Motin void
rrm_enter_write(rrmlock_t * rrl)367c9030f6cSAlexander Motin rrm_enter_write(rrmlock_t *rrl)
368c9030f6cSAlexander Motin {
369c9030f6cSAlexander Motin 	int i;
370c9030f6cSAlexander Motin 
371c9030f6cSAlexander Motin 	for (i = 0; i < RRM_NUM_LOCKS; i++)
372c9030f6cSAlexander Motin 		rrw_enter_write(&rrl->locks[i]);
373c9030f6cSAlexander Motin }
374c9030f6cSAlexander Motin 
375c9030f6cSAlexander Motin void
rrm_exit(rrmlock_t * rrl,void * tag)376c9030f6cSAlexander Motin rrm_exit(rrmlock_t *rrl, void *tag)
377c9030f6cSAlexander Motin {
378c9030f6cSAlexander Motin 	int i;
379c9030f6cSAlexander Motin 
380c9030f6cSAlexander Motin 	if (rrl->locks[0].rr_writer == curthread) {
381c9030f6cSAlexander Motin 		for (i = 0; i < RRM_NUM_LOCKS; i++)
382c9030f6cSAlexander Motin 			rrw_exit(&rrl->locks[i], tag);
383c9030f6cSAlexander Motin 	} else {
384c9030f6cSAlexander Motin 		rrw_exit(&rrl->locks[RRM_TD_LOCK()], tag);
385c9030f6cSAlexander Motin 	}
386c9030f6cSAlexander Motin }
387c9030f6cSAlexander Motin 
388c9030f6cSAlexander Motin boolean_t
rrm_held(rrmlock_t * rrl,krw_t rw)389c9030f6cSAlexander Motin rrm_held(rrmlock_t *rrl, krw_t rw)
390c9030f6cSAlexander Motin {
391c9030f6cSAlexander Motin 	if (rw == RW_WRITER) {
392c9030f6cSAlexander Motin 		return (rrw_held(&rrl->locks[0], rw));
393c9030f6cSAlexander Motin 	} else {
394c9030f6cSAlexander Motin 		return (rrw_held(&rrl->locks[RRM_TD_LOCK()], rw));
395c9030f6cSAlexander Motin 	}
396c9030f6cSAlexander Motin }
397