xref: /illumos-gate/usr/src/lib/libc/port/threads/rwlock.c (revision 4a38094c)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
541efec22Sraf  * Common Development and Distribution License (the "License").
641efec22Sraf  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
2141efec22Sraf 
227c478bd9Sstevel@tonic-gate /*
23bbbbacb4SRoger A. Faulkner  * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
2448bbca81SDaniel Hoffman  * Copyright (c) 2016 by Delphix. All rights reserved.
257c478bd9Sstevel@tonic-gate  */
267c478bd9Sstevel@tonic-gate 
277c478bd9Sstevel@tonic-gate #include "lint.h"
287c478bd9Sstevel@tonic-gate #include "thr_uberdata.h"
297c478bd9Sstevel@tonic-gate #include <sys/sdt.h>
307c478bd9Sstevel@tonic-gate 
317c478bd9Sstevel@tonic-gate #define	TRY_FLAG		0x10
327c478bd9Sstevel@tonic-gate #define	READ_LOCK		0
337c478bd9Sstevel@tonic-gate #define	WRITE_LOCK		1
347c478bd9Sstevel@tonic-gate #define	READ_LOCK_TRY		(READ_LOCK | TRY_FLAG)
357c478bd9Sstevel@tonic-gate #define	WRITE_LOCK_TRY		(WRITE_LOCK | TRY_FLAG)
367c478bd9Sstevel@tonic-gate 
377c478bd9Sstevel@tonic-gate #define	NLOCKS	4	/* initial number of readlock_t structs allocated */
387c478bd9Sstevel@tonic-gate 
3941efec22Sraf #define	ASSERT_CONSISTENT_STATE(readers)		\
4041efec22Sraf 	ASSERT(!((readers) & URW_WRITE_LOCKED) ||	\
4141efec22Sraf 		((readers) & ~URW_HAS_WAITERS) == URW_WRITE_LOCKED)
4241efec22Sraf 
437c478bd9Sstevel@tonic-gate /*
447c478bd9Sstevel@tonic-gate  * Find/allocate an entry for rwlp in our array of rwlocks held for reading.
4541efec22Sraf  * We must be deferring signals for this to be safe.
46883492d5Sraf  * Else if we are returning an entry with ul_rdlockcnt == 0,
4741efec22Sraf  * it could be reassigned behind our back in a signal handler.
487c478bd9Sstevel@tonic-gate  */
497c478bd9Sstevel@tonic-gate static readlock_t *
rwl_entry(rwlock_t * rwlp)507c478bd9Sstevel@tonic-gate rwl_entry(rwlock_t *rwlp)
517c478bd9Sstevel@tonic-gate {
527c478bd9Sstevel@tonic-gate 	ulwp_t *self = curthread;
537c478bd9Sstevel@tonic-gate 	readlock_t *remembered = NULL;
547c478bd9Sstevel@tonic-gate 	readlock_t *readlockp;
557c478bd9Sstevel@tonic-gate 	uint_t nlocks;
567c478bd9Sstevel@tonic-gate 
5741efec22Sraf 	/* we must be deferring signals */
5841efec22Sraf 	ASSERT((self->ul_critical + self->ul_sigdefer) != 0);
5941efec22Sraf 
60883492d5Sraf 	if ((nlocks = self->ul_rdlockcnt) != 0)
617c478bd9Sstevel@tonic-gate 		readlockp = self->ul_readlock.array;
627c478bd9Sstevel@tonic-gate 	else {
637c478bd9Sstevel@tonic-gate 		nlocks = 1;
647c478bd9Sstevel@tonic-gate 		readlockp = &self->ul_readlock.single;
657c478bd9Sstevel@tonic-gate 	}
667c478bd9Sstevel@tonic-gate 
677c478bd9Sstevel@tonic-gate 	for (; nlocks; nlocks--, readlockp++) {
687c478bd9Sstevel@tonic-gate 		if (readlockp->rd_rwlock == rwlp)
697c478bd9Sstevel@tonic-gate 			return (readlockp);
707c478bd9Sstevel@tonic-gate 		if (readlockp->rd_count == 0 && remembered == NULL)
717c478bd9Sstevel@tonic-gate 			remembered = readlockp;
727c478bd9Sstevel@tonic-gate 	}
737c478bd9Sstevel@tonic-gate 	if (remembered != NULL) {
747c478bd9Sstevel@tonic-gate 		remembered->rd_rwlock = rwlp;
757c478bd9Sstevel@tonic-gate 		return (remembered);
767c478bd9Sstevel@tonic-gate 	}
777c478bd9Sstevel@tonic-gate 
787c478bd9Sstevel@tonic-gate 	/*
797c478bd9Sstevel@tonic-gate 	 * No entry available.  Allocate more space, converting the single
807c478bd9Sstevel@tonic-gate 	 * readlock_t entry into an array of readlock_t entries if necessary.
817c478bd9Sstevel@tonic-gate 	 */
82883492d5Sraf 	if ((nlocks = self->ul_rdlockcnt) == 0) {
837c478bd9Sstevel@tonic-gate 		/*
847c478bd9Sstevel@tonic-gate 		 * Initial allocation of the readlock_t array.
857c478bd9Sstevel@tonic-gate 		 * Convert the single entry into an array.
867c478bd9Sstevel@tonic-gate 		 */
87883492d5Sraf 		self->ul_rdlockcnt = nlocks = NLOCKS;
887c478bd9Sstevel@tonic-gate 		readlockp = lmalloc(nlocks * sizeof (readlock_t));
897c478bd9Sstevel@tonic-gate 		/*
907c478bd9Sstevel@tonic-gate 		 * The single readlock_t becomes the first entry in the array.
917c478bd9Sstevel@tonic-gate 		 */
927c478bd9Sstevel@tonic-gate 		*readlockp = self->ul_readlock.single;
937c478bd9Sstevel@tonic-gate 		self->ul_readlock.single.rd_count = 0;
947c478bd9Sstevel@tonic-gate 		self->ul_readlock.array = readlockp;
957c478bd9Sstevel@tonic-gate 		/*
967c478bd9Sstevel@tonic-gate 		 * Return the next available entry in the array.
977c478bd9Sstevel@tonic-gate 		 */
987c478bd9Sstevel@tonic-gate 		(++readlockp)->rd_rwlock = rwlp;
997c478bd9Sstevel@tonic-gate 		return (readlockp);
1007c478bd9Sstevel@tonic-gate 	}
1017c478bd9Sstevel@tonic-gate 	/*
1027c478bd9Sstevel@tonic-gate 	 * Reallocate the array, double the size each time.
1037c478bd9Sstevel@tonic-gate 	 */
1047c478bd9Sstevel@tonic-gate 	readlockp = lmalloc(nlocks * 2 * sizeof (readlock_t));
1058cd45542Sraf 	(void) memcpy(readlockp, self->ul_readlock.array,
106d4204c85Sraf 	    nlocks * sizeof (readlock_t));
1077c478bd9Sstevel@tonic-gate 	lfree(self->ul_readlock.array, nlocks * sizeof (readlock_t));
1087c478bd9Sstevel@tonic-gate 	self->ul_readlock.array = readlockp;
109883492d5Sraf 	self->ul_rdlockcnt *= 2;
1107c478bd9Sstevel@tonic-gate 	/*
1117c478bd9Sstevel@tonic-gate 	 * Return the next available entry in the newly allocated array.
1127c478bd9Sstevel@tonic-gate 	 */
1137c478bd9Sstevel@tonic-gate 	(readlockp += nlocks)->rd_rwlock = rwlp;
1147c478bd9Sstevel@tonic-gate 	return (readlockp);
1157c478bd9Sstevel@tonic-gate }
1167c478bd9Sstevel@tonic-gate 
1177c478bd9Sstevel@tonic-gate /*
1187c478bd9Sstevel@tonic-gate  * Free the array of rwlocks held for reading.
1197c478bd9Sstevel@tonic-gate  */
1207c478bd9Sstevel@tonic-gate void
rwl_free(ulwp_t * ulwp)1217c478bd9Sstevel@tonic-gate rwl_free(ulwp_t *ulwp)
1227c478bd9Sstevel@tonic-gate {
1237c478bd9Sstevel@tonic-gate 	uint_t nlocks;
1247c478bd9Sstevel@tonic-gate 
125883492d5Sraf 	if ((nlocks = ulwp->ul_rdlockcnt) != 0)
1267c478bd9Sstevel@tonic-gate 		lfree(ulwp->ul_readlock.array, nlocks * sizeof (readlock_t));
127883492d5Sraf 	ulwp->ul_rdlockcnt = 0;
1287c478bd9Sstevel@tonic-gate 	ulwp->ul_readlock.single.rd_rwlock = NULL;
1297c478bd9Sstevel@tonic-gate 	ulwp->ul_readlock.single.rd_count = 0;
1307c478bd9Sstevel@tonic-gate }
1317c478bd9Sstevel@tonic-gate 
1327c478bd9Sstevel@tonic-gate /*
1337c478bd9Sstevel@tonic-gate  * Check if a reader version of the lock is held by the current thread.
1347c478bd9Sstevel@tonic-gate  */
1357257d1b4Sraf #pragma weak _rw_read_held = rw_read_held
1367c478bd9Sstevel@tonic-gate int
rw_read_held(rwlock_t * rwlp)1377257d1b4Sraf rw_read_held(rwlock_t *rwlp)
1387c478bd9Sstevel@tonic-gate {
13941efec22Sraf 	volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers;
14041efec22Sraf 	uint32_t readers;
14141efec22Sraf 	ulwp_t *self = curthread;
1427c478bd9Sstevel@tonic-gate 	readlock_t *readlockp;
1437c478bd9Sstevel@tonic-gate 	uint_t nlocks;
14441efec22Sraf 	int rval = 0;
1457c478bd9Sstevel@tonic-gate 
14641efec22Sraf 	no_preempt(self);
1477c478bd9Sstevel@tonic-gate 
14841efec22Sraf 	readers = *rwstate;
14941efec22Sraf 	ASSERT_CONSISTENT_STATE(readers);
15041efec22Sraf 	if (!(readers & URW_WRITE_LOCKED) &&
15141efec22Sraf 	    (readers & URW_READERS_MASK) != 0) {
15241efec22Sraf 		/*
15341efec22Sraf 		 * The lock is held for reading by some thread.
15441efec22Sraf 		 * Search our array of rwlocks held for reading for a match.
15541efec22Sraf 		 */
156883492d5Sraf 		if ((nlocks = self->ul_rdlockcnt) != 0)
15741efec22Sraf 			readlockp = self->ul_readlock.array;
15841efec22Sraf 		else {
15941efec22Sraf 			nlocks = 1;
16041efec22Sraf 			readlockp = &self->ul_readlock.single;
16141efec22Sraf 		}
16241efec22Sraf 		for (; nlocks; nlocks--, readlockp++) {
16341efec22Sraf 			if (readlockp->rd_rwlock == rwlp) {
16441efec22Sraf 				if (readlockp->rd_count)
16541efec22Sraf 					rval = 1;
16641efec22Sraf 				break;
16741efec22Sraf 			}
16841efec22Sraf 		}
1697c478bd9Sstevel@tonic-gate 	}
1707c478bd9Sstevel@tonic-gate 
17141efec22Sraf 	preempt(self);
17241efec22Sraf 	return (rval);
1737c478bd9Sstevel@tonic-gate }
1747c478bd9Sstevel@tonic-gate 
1757c478bd9Sstevel@tonic-gate /*
1767c478bd9Sstevel@tonic-gate  * Check if a writer version of the lock is held by the current thread.
1777c478bd9Sstevel@tonic-gate  */
1787257d1b4Sraf #pragma weak _rw_write_held = rw_write_held
1797c478bd9Sstevel@tonic-gate int
rw_write_held(rwlock_t * rwlp)1807257d1b4Sraf rw_write_held(rwlock_t *rwlp)
1817c478bd9Sstevel@tonic-gate {
18241efec22Sraf 	volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers;
18341efec22Sraf 	uint32_t readers;
1847c478bd9Sstevel@tonic-gate 	ulwp_t *self = curthread;
18541efec22Sraf 	int rval;
18641efec22Sraf 
18741efec22Sraf 	no_preempt(self);
1887c478bd9Sstevel@tonic-gate 
18941efec22Sraf 	readers = *rwstate;
19041efec22Sraf 	ASSERT_CONSISTENT_STATE(readers);
19141efec22Sraf 	rval = ((readers & URW_WRITE_LOCKED) &&
19241efec22Sraf 	    rwlp->rwlock_owner == (uintptr_t)self &&
19341efec22Sraf 	    (rwlp->rwlock_type == USYNC_THREAD ||
19441efec22Sraf 	    rwlp->rwlock_ownerpid == self->ul_uberdata->pid));
1957c478bd9Sstevel@tonic-gate 
19641efec22Sraf 	preempt(self);
19741efec22Sraf 	return (rval);
1987c478bd9Sstevel@tonic-gate }
1997c478bd9Sstevel@tonic-gate 
2007257d1b4Sraf #pragma weak _rwlock_init = rwlock_init
2017c478bd9Sstevel@tonic-gate int
rwlock_init(rwlock_t * rwlp,int type,void * arg __unused)202*4a38094cSToomas Soome rwlock_init(rwlock_t *rwlp, int type, void *arg __unused)
2037c478bd9Sstevel@tonic-gate {
2047c5714f6Sraf 	ulwp_t *self = curthread;
2057c5714f6Sraf 
2067c478bd9Sstevel@tonic-gate 	if (type != USYNC_THREAD && type != USYNC_PROCESS)
2077c478bd9Sstevel@tonic-gate 		return (EINVAL);
2087c478bd9Sstevel@tonic-gate 	/*
2097c478bd9Sstevel@tonic-gate 	 * Once reinitialized, we can no longer be holding a read or write lock.
2107c478bd9Sstevel@tonic-gate 	 * We can do nothing about other threads that are holding read locks.
2117c478bd9Sstevel@tonic-gate 	 */
2127c5714f6Sraf 	sigoff(self);
21341efec22Sraf 	rwl_entry(rwlp)->rd_count = 0;
2147c5714f6Sraf 	sigon(self);
2158cd45542Sraf 	(void) memset(rwlp, 0, sizeof (*rwlp));
2167c478bd9Sstevel@tonic-gate 	rwlp->rwlock_type = (uint16_t)type;
2177c478bd9Sstevel@tonic-gate 	rwlp->rwlock_magic = RWL_MAGIC;
2187c478bd9Sstevel@tonic-gate 	rwlp->mutex.mutex_type = (uint8_t)type;
2197c478bd9Sstevel@tonic-gate 	rwlp->mutex.mutex_flag = LOCK_INITED;
2207c478bd9Sstevel@tonic-gate 	rwlp->mutex.mutex_magic = MUTEX_MAGIC;
2217c5714f6Sraf 
2227c5714f6Sraf 	/*
2237c5714f6Sraf 	 * This should be at the beginning of the function,
2247c5714f6Sraf 	 * but for the sake of old broken applications that
2257c5714f6Sraf 	 * do not have proper alignment for their rwlocks
2267c5714f6Sraf 	 * (and don't check the return code from rwlock_init),
2277c5714f6Sraf 	 * we put it here, after initializing the rwlock regardless.
2287c5714f6Sraf 	 */
2297c5714f6Sraf 	if (((uintptr_t)rwlp & (_LONG_LONG_ALIGNMENT - 1)) &&
2307c5714f6Sraf 	    self->ul_misaligned == 0)
2317c5714f6Sraf 		return (EINVAL);
2327c5714f6Sraf 
2337c478bd9Sstevel@tonic-gate 	return (0);
2347c478bd9Sstevel@tonic-gate }
2357c478bd9Sstevel@tonic-gate 
2367257d1b4Sraf #pragma weak pthread_rwlock_destroy = rwlock_destroy
2377257d1b4Sraf #pragma weak _rwlock_destroy = rwlock_destroy
2387c478bd9Sstevel@tonic-gate int
rwlock_destroy(rwlock_t * rwlp)2397257d1b4Sraf rwlock_destroy(rwlock_t *rwlp)
2407c478bd9Sstevel@tonic-gate {
241e54ab87fSRoger A. Faulkner 	ulwp_t *self = curthread;
242e54ab87fSRoger A. Faulkner 
2437c478bd9Sstevel@tonic-gate 	/*
2447c478bd9Sstevel@tonic-gate 	 * Once destroyed, we can no longer be holding a read or write lock.
2457c478bd9Sstevel@tonic-gate 	 * We can do nothing about other threads that are holding read locks.
2467c478bd9Sstevel@tonic-gate 	 */
247e54ab87fSRoger A. Faulkner 	sigoff(self);
24841efec22Sraf 	rwl_entry(rwlp)->rd_count = 0;
249e54ab87fSRoger A. Faulkner 	sigon(self);
2507c478bd9Sstevel@tonic-gate 	rwlp->rwlock_magic = 0;
2517c478bd9Sstevel@tonic-gate 	tdb_sync_obj_deregister(rwlp);
2527c478bd9Sstevel@tonic-gate 	return (0);
2537c478bd9Sstevel@tonic-gate }
2547c478bd9Sstevel@tonic-gate 
255bbbbacb4SRoger A. Faulkner /*
256bbbbacb4SRoger A. Faulkner  * The following four functions:
257bbbbacb4SRoger A. Faulkner  *	read_lock_try()
258bbbbacb4SRoger A. Faulkner  *	read_unlock_try()
259bbbbacb4SRoger A. Faulkner  *	write_lock_try()
260bbbbacb4SRoger A. Faulkner  *	write_unlock_try()
261bbbbacb4SRoger A. Faulkner  * lie at the heart of the fast-path code for rwlocks,
262bbbbacb4SRoger A. Faulkner  * both process-private and process-shared.
263bbbbacb4SRoger A. Faulkner  *
264bbbbacb4SRoger A. Faulkner  * They are called once without recourse to any other locking primitives.
265bbbbacb4SRoger A. Faulkner  * If they succeed, we are done and the fast-path code was successful.
266bbbbacb4SRoger A. Faulkner  * If they fail, we have to deal with lock queues, either to enqueue
267bbbbacb4SRoger A. Faulkner  * ourself and sleep or to dequeue and wake up someone else (slow paths).
268bbbbacb4SRoger A. Faulkner  *
269bbbbacb4SRoger A. Faulkner  * Unless 'ignore_waiters_flag' is true (a condition that applies only
270bbbbacb4SRoger A. Faulkner  * when read_lock_try() or write_lock_try() is called from code that
271bbbbacb4SRoger A. Faulkner  * is already in the slow path and has already acquired the queue lock),
272bbbbacb4SRoger A. Faulkner  * these functions will always fail if the waiters flag, URW_HAS_WAITERS,
273bbbbacb4SRoger A. Faulkner  * is set in the 'rwstate' word.  Thus, setting the waiters flag on the
274bbbbacb4SRoger A. Faulkner  * rwlock and acquiring the queue lock guarantees exclusive access to
275bbbbacb4SRoger A. Faulkner  * the rwlock (and is the only way to guarantee exclusive access).
276bbbbacb4SRoger A. Faulkner  */
277bbbbacb4SRoger A. Faulkner 
2787c478bd9Sstevel@tonic-gate /*
27941efec22Sraf  * Attempt to acquire a readers lock.  Return true on success.
28041efec22Sraf  */
28141efec22Sraf static int
read_lock_try(rwlock_t * rwlp,int ignore_waiters_flag)28241efec22Sraf read_lock_try(rwlock_t *rwlp, int ignore_waiters_flag)
28341efec22Sraf {
28441efec22Sraf 	volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers;
28541efec22Sraf 	uint32_t mask = ignore_waiters_flag?
286d4204c85Sraf 	    URW_WRITE_LOCKED : (URW_HAS_WAITERS | URW_WRITE_LOCKED);
28741efec22Sraf 	uint32_t readers;
28841efec22Sraf 	ulwp_t *self = curthread;
28941efec22Sraf 
29041efec22Sraf 	no_preempt(self);
29141efec22Sraf 	while (((readers = *rwstate) & mask) == 0) {
29241efec22Sraf 		if (atomic_cas_32(rwstate, readers, readers + 1) == readers) {
29341efec22Sraf 			preempt(self);
29441efec22Sraf 			return (1);
29541efec22Sraf 		}
29641efec22Sraf 	}
29741efec22Sraf 	preempt(self);
29841efec22Sraf 	return (0);
29941efec22Sraf }
30041efec22Sraf 
30141efec22Sraf /*
30241efec22Sraf  * Attempt to release a reader lock.  Return true on success.
30341efec22Sraf  */
30441efec22Sraf static int
read_unlock_try(rwlock_t * rwlp)30541efec22Sraf read_unlock_try(rwlock_t *rwlp)
30641efec22Sraf {
30741efec22Sraf 	volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers;
30841efec22Sraf 	uint32_t readers;
30941efec22Sraf 	ulwp_t *self = curthread;
31041efec22Sraf 
31141efec22Sraf 	no_preempt(self);
31241efec22Sraf 	while (((readers = *rwstate) & URW_HAS_WAITERS) == 0) {
31341efec22Sraf 		if (atomic_cas_32(rwstate, readers, readers - 1) == readers) {
31441efec22Sraf 			preempt(self);
31541efec22Sraf 			return (1);
31641efec22Sraf 		}
31741efec22Sraf 	}
31841efec22Sraf 	preempt(self);
31941efec22Sraf 	return (0);
32041efec22Sraf }
32141efec22Sraf 
32241efec22Sraf /*
32341efec22Sraf  * Attempt to acquire a writer lock.  Return true on success.
32441efec22Sraf  */
32541efec22Sraf static int
write_lock_try(rwlock_t * rwlp,int ignore_waiters_flag)32641efec22Sraf write_lock_try(rwlock_t *rwlp, int ignore_waiters_flag)
32741efec22Sraf {
32841efec22Sraf 	volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers;
32941efec22Sraf 	uint32_t mask = ignore_waiters_flag?
330d4204c85Sraf 	    (URW_WRITE_LOCKED | URW_READERS_MASK) :
331d4204c85Sraf 	    (URW_HAS_WAITERS | URW_WRITE_LOCKED | URW_READERS_MASK);
33241efec22Sraf 	ulwp_t *self = curthread;
33341efec22Sraf 	uint32_t readers;
33441efec22Sraf 
33541efec22Sraf 	no_preempt(self);
33641efec22Sraf 	while (((readers = *rwstate) & mask) == 0) {
33741efec22Sraf 		if (atomic_cas_32(rwstate, readers, readers | URW_WRITE_LOCKED)
33841efec22Sraf 		    == readers) {
33941efec22Sraf 			preempt(self);
34041efec22Sraf 			return (1);
34141efec22Sraf 		}
34241efec22Sraf 	}
34341efec22Sraf 	preempt(self);
34441efec22Sraf 	return (0);
34541efec22Sraf }
34641efec22Sraf 
34741efec22Sraf /*
34841efec22Sraf  * Attempt to release a writer lock.  Return true on success.
34941efec22Sraf  */
35041efec22Sraf static int
write_unlock_try(rwlock_t * rwlp)35141efec22Sraf write_unlock_try(rwlock_t *rwlp)
35241efec22Sraf {
35341efec22Sraf 	volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers;
35441efec22Sraf 	uint32_t readers;
35541efec22Sraf 	ulwp_t *self = curthread;
35641efec22Sraf 
35741efec22Sraf 	no_preempt(self);
35841efec22Sraf 	while (((readers = *rwstate) & URW_HAS_WAITERS) == 0) {
35941efec22Sraf 		if (atomic_cas_32(rwstate, readers, 0) == readers) {
36041efec22Sraf 			preempt(self);
36141efec22Sraf 			return (1);
36241efec22Sraf 		}
36341efec22Sraf 	}
36441efec22Sraf 	preempt(self);
36541efec22Sraf 	return (0);
36641efec22Sraf }
36741efec22Sraf 
36841efec22Sraf /*
369bbbbacb4SRoger A. Faulkner  * Release a process-private rwlock and wake up any thread(s) sleeping on it.
37041efec22Sraf  * This is called when a thread releases a lock that appears to have waiters.
3717c478bd9Sstevel@tonic-gate  */
372bbbbacb4SRoger A. Faulkner static void
rw_queue_release(rwlock_t * rwlp)373bbbbacb4SRoger A. Faulkner rw_queue_release(rwlock_t *rwlp)
3747c478bd9Sstevel@tonic-gate {
37541efec22Sraf 	volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers;
376bbbbacb4SRoger A. Faulkner 	queue_head_t *qp;
37741efec22Sraf 	uint32_t readers;
378bbbbacb4SRoger A. Faulkner 	uint32_t writer;
37941efec22Sraf 	ulwp_t **ulwpp;
3807c478bd9Sstevel@tonic-gate 	ulwp_t *ulwp;
381d4204c85Sraf 	ulwp_t *prev;
382d4204c85Sraf 	int nlwpid = 0;
383d4204c85Sraf 	int more;
384d4204c85Sraf 	int maxlwps = MAXLWPS;
38541efec22Sraf 	lwpid_t buffer[MAXLWPS];
38641efec22Sraf 	lwpid_t *lwpid = buffer;
3877c478bd9Sstevel@tonic-gate 
388bbbbacb4SRoger A. Faulkner 	qp = queue_lock(rwlp, MX);
389bbbbacb4SRoger A. Faulkner 
390bbbbacb4SRoger A. Faulkner 	/*
391bbbbacb4SRoger A. Faulkner 	 * Here is where we actually drop the lock,
392bbbbacb4SRoger A. Faulkner 	 * but we retain the URW_HAS_WAITERS flag, if it is already set.
393bbbbacb4SRoger A. Faulkner 	 */
39441efec22Sraf 	readers = *rwstate;
39541efec22Sraf 	ASSERT_CONSISTENT_STATE(readers);
396bbbbacb4SRoger A. Faulkner 	if (readers & URW_WRITE_LOCKED)	/* drop the writer lock */
397bbbbacb4SRoger A. Faulkner 		atomic_and_32(rwstate, ~URW_WRITE_LOCKED);
398bbbbacb4SRoger A. Faulkner 	else				/* drop the readers lock */
399bbbbacb4SRoger A. Faulkner 		atomic_dec_32(rwstate);
400bbbbacb4SRoger A. Faulkner 	if (!(readers & URW_HAS_WAITERS)) {	/* no waiters */
40141efec22Sraf 		queue_unlock(qp);
402bbbbacb4SRoger A. Faulkner 		return;
40341efec22Sraf 	}
404bbbbacb4SRoger A. Faulkner 
405bbbbacb4SRoger A. Faulkner 	/*
406bbbbacb4SRoger A. Faulkner 	 * The presence of the URW_HAS_WAITERS flag causes all rwlock
407bbbbacb4SRoger A. Faulkner 	 * code to go through the slow path, acquiring queue_lock(qp).
408bbbbacb4SRoger A. Faulkner 	 * Therefore, the rest of this code is safe because we are
409bbbbacb4SRoger A. Faulkner 	 * holding the queue lock and the URW_HAS_WAITERS flag is set.
410bbbbacb4SRoger A. Faulkner 	 */
411bbbbacb4SRoger A. Faulkner 
412bbbbacb4SRoger A. Faulkner 	readers = *rwstate;		/* must fetch the value again */
413bbbbacb4SRoger A. Faulkner 	ASSERT_CONSISTENT_STATE(readers);
414bbbbacb4SRoger A. Faulkner 	ASSERT(readers & URW_HAS_WAITERS);
415bbbbacb4SRoger A. Faulkner 	readers &= URW_READERS_MASK;	/* count of current readers */
416bbbbacb4SRoger A. Faulkner 	writer = 0;			/* no current writer */
41741efec22Sraf 
41841efec22Sraf 	/*
419d4204c85Sraf 	 * Examine the queue of waiters in priority order and prepare
420d4204c85Sraf 	 * to wake up as many readers as we encounter before encountering
421d4204c85Sraf 	 * a writer.  If the highest priority thread on the queue is a
42241efec22Sraf 	 * writer, stop there and wake it up.
42341efec22Sraf 	 *
42441efec22Sraf 	 * We keep track of lwpids that are to be unparked in lwpid[].
42541efec22Sraf 	 * __lwp_unpark_all() is called to unpark all of them after
42641efec22Sraf 	 * they have been removed from the sleep queue and the sleep
42741efec22Sraf 	 * queue lock has been dropped.  If we run out of space in our
42841efec22Sraf 	 * on-stack buffer, we need to allocate more but we can't call
42941efec22Sraf 	 * lmalloc() because we are holding a queue lock when the overflow
43041efec22Sraf 	 * occurs and lmalloc() acquires a lock.  We can't use alloca()
43141efec22Sraf 	 * either because the application may have allocated a small
43241efec22Sraf 	 * stack and we don't want to overrun the stack.  So we call
43341efec22Sraf 	 * alloc_lwpids() to allocate a bigger buffer using the mmap()
43441efec22Sraf 	 * system call directly since that path acquires no locks.
43541efec22Sraf 	 */
436d4204c85Sraf 	while ((ulwpp = queue_slot(qp, &prev, &more)) != NULL) {
437d4204c85Sraf 		ulwp = *ulwpp;
438d4204c85Sraf 		ASSERT(ulwp->ul_wchan == rwlp);
43941efec22Sraf 		if (ulwp->ul_writer) {
440bbbbacb4SRoger A. Faulkner 			if (writer != 0 || readers != 0)
44141efec22Sraf 				break;
44241efec22Sraf 			/* one writer to wake */
443bbbbacb4SRoger A. Faulkner 			writer++;
44441efec22Sraf 		} else {
445bbbbacb4SRoger A. Faulkner 			if (writer != 0)
44641efec22Sraf 				break;
44741efec22Sraf 			/* at least one reader to wake */
44841efec22Sraf 			readers++;
44941efec22Sraf 			if (nlwpid == maxlwps)
45041efec22Sraf 				lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps);
4517c478bd9Sstevel@tonic-gate 		}
452d4204c85Sraf 		queue_unlink(qp, ulwpp, prev);
453d4204c85Sraf 		ulwp->ul_sleepq = NULL;
454d4204c85Sraf 		ulwp->ul_wchan = NULL;
455bbbbacb4SRoger A. Faulkner 		if (writer) {
456bbbbacb4SRoger A. Faulkner 			/*
457bbbbacb4SRoger A. Faulkner 			 * Hand off the lock to the writer we will be waking.
458bbbbacb4SRoger A. Faulkner 			 */
459bbbbacb4SRoger A. Faulkner 			ASSERT((*rwstate & ~URW_HAS_WAITERS) == 0);
460bbbbacb4SRoger A. Faulkner 			atomic_or_32(rwstate, URW_WRITE_LOCKED);
461bbbbacb4SRoger A. Faulkner 			rwlp->rwlock_owner = (uintptr_t)ulwp;
462bbbbacb4SRoger A. Faulkner 		}
46341efec22Sraf 		lwpid[nlwpid++] = ulwp->ul_lwpid;
4647c478bd9Sstevel@tonic-gate 	}
465bbbbacb4SRoger A. Faulkner 
466bbbbacb4SRoger A. Faulkner 	/*
467bbbbacb4SRoger A. Faulkner 	 * This modification of rwstate must be done last.
468bbbbacb4SRoger A. Faulkner 	 * The presence of the URW_HAS_WAITERS flag causes all rwlock
469bbbbacb4SRoger A. Faulkner 	 * code to go through the slow path, acquiring queue_lock(qp).
470bbbbacb4SRoger A. Faulkner 	 * Otherwise the read_lock_try() and write_lock_try() fast paths
471bbbbacb4SRoger A. Faulkner 	 * are effective.
472bbbbacb4SRoger A. Faulkner 	 */
473d4204c85Sraf 	if (ulwpp == NULL)
47441efec22Sraf 		atomic_and_32(rwstate, ~URW_HAS_WAITERS);
475bbbbacb4SRoger A. Faulkner 
47641efec22Sraf 	if (nlwpid == 0) {
47741efec22Sraf 		queue_unlock(qp);
47841efec22Sraf 	} else {
479d4204c85Sraf 		ulwp_t *self = curthread;
48041efec22Sraf 		no_preempt(self);
48141efec22Sraf 		queue_unlock(qp);
48241efec22Sraf 		if (nlwpid == 1)
48341efec22Sraf 			(void) __lwp_unpark(lwpid[0]);
48441efec22Sraf 		else
48541efec22Sraf 			(void) __lwp_unpark_all(lwpid, nlwpid);
48641efec22Sraf 		preempt(self);
48741efec22Sraf 	}
48841efec22Sraf 	if (lwpid != buffer)
4898cd45542Sraf 		(void) munmap((caddr_t)lwpid, maxlwps * sizeof (lwpid_t));
4907c478bd9Sstevel@tonic-gate }
4917c478bd9Sstevel@tonic-gate 
4927c478bd9Sstevel@tonic-gate /*
4937c478bd9Sstevel@tonic-gate  * Common code for rdlock, timedrdlock, wrlock, timedwrlock, tryrdlock,
4947c478bd9Sstevel@tonic-gate  * and trywrlock for process-shared (USYNC_PROCESS) rwlocks.
4957c478bd9Sstevel@tonic-gate  *
4967c478bd9Sstevel@tonic-gate  * Note: if the lock appears to be contended we call __lwp_rwlock_rdlock()
4977c478bd9Sstevel@tonic-gate  * or __lwp_rwlock_wrlock() holding the mutex. These return with the mutex
4987c478bd9Sstevel@tonic-gate  * released, and if they need to sleep will release the mutex first. In the
4997c478bd9Sstevel@tonic-gate  * event of a spurious wakeup, these will return EAGAIN (because it is much
5007c478bd9Sstevel@tonic-gate  * easier for us to re-acquire the mutex here).
5017c478bd9Sstevel@tonic-gate  */
5027c478bd9Sstevel@tonic-gate int
shared_rwlock_lock(rwlock_t * rwlp,timespec_t * tsp,int rd_wr)5037c478bd9Sstevel@tonic-gate shared_rwlock_lock(rwlock_t *rwlp, timespec_t *tsp, int rd_wr)
5047c478bd9Sstevel@tonic-gate {
50541efec22Sraf 	volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers;
50641efec22Sraf 	mutex_t *mp = &rwlp->mutex;
5077c478bd9Sstevel@tonic-gate 	int try_flag;
50841efec22Sraf 	int error;
5097c478bd9Sstevel@tonic-gate 
5107c478bd9Sstevel@tonic-gate 	try_flag = (rd_wr & TRY_FLAG);
5117c478bd9Sstevel@tonic-gate 	rd_wr &= ~TRY_FLAG;
5127c478bd9Sstevel@tonic-gate 	ASSERT(rd_wr == READ_LOCK || rd_wr == WRITE_LOCK);
5137c478bd9Sstevel@tonic-gate 
5147c478bd9Sstevel@tonic-gate 	if (!try_flag) {
5157c478bd9Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, rw__block, rwlp, rd_wr);
5167c478bd9Sstevel@tonic-gate 	}
5177c478bd9Sstevel@tonic-gate 
5187c478bd9Sstevel@tonic-gate 	do {
51941efec22Sraf 		if (try_flag && (*rwstate & URW_WRITE_LOCKED)) {
52041efec22Sraf 			error = EBUSY;
52141efec22Sraf 			break;
52241efec22Sraf 		}
5238cd45542Sraf 		if ((error = mutex_lock(mp)) != 0)
5247c478bd9Sstevel@tonic-gate 			break;
5257c478bd9Sstevel@tonic-gate 		if (rd_wr == READ_LOCK) {
52641efec22Sraf 			if (read_lock_try(rwlp, 0)) {
5278cd45542Sraf 				(void) mutex_unlock(mp);
52841efec22Sraf 				break;
5297c478bd9Sstevel@tonic-gate 			}
5307c478bd9Sstevel@tonic-gate 		} else {
53141efec22Sraf 			if (write_lock_try(rwlp, 0)) {
5328cd45542Sraf 				(void) mutex_unlock(mp);
53341efec22Sraf 				break;
5347c478bd9Sstevel@tonic-gate 			}
5357c478bd9Sstevel@tonic-gate 		}
53641efec22Sraf 		atomic_or_32(rwstate, URW_HAS_WAITERS);
5375d2ed727SToomas Soome 
5385d2ed727SToomas Soome #ifdef THREAD_DEBUG
5395d2ed727SToomas Soome 		uint32_t readers;
54041efec22Sraf 		readers = *rwstate;
54141efec22Sraf 		ASSERT_CONSISTENT_STATE(readers);
5425d2ed727SToomas Soome #endif
5437c478bd9Sstevel@tonic-gate 		/*
54441efec22Sraf 		 * The calls to __lwp_rwlock_*() below will release the mutex,
545328cc3e9SRoger A. Faulkner 		 * so we need a dtrace probe here.  The owner field of the
546328cc3e9SRoger A. Faulkner 		 * mutex is cleared in the kernel when the mutex is released,
547328cc3e9SRoger A. Faulkner 		 * so we should not clear it here.
5487c478bd9Sstevel@tonic-gate 		 */
54941efec22Sraf 		DTRACE_PROBE2(plockstat, mutex__release, mp, 0);
55041efec22Sraf 		/*
55141efec22Sraf 		 * The waiters bit may be inaccurate.
55241efec22Sraf 		 * Only the kernel knows for sure.
55341efec22Sraf 		 */
55441efec22Sraf 		if (rd_wr == READ_LOCK) {
55541efec22Sraf 			if (try_flag)
55641efec22Sraf 				error = __lwp_rwlock_tryrdlock(rwlp);
55741efec22Sraf 			else
55841efec22Sraf 				error = __lwp_rwlock_rdlock(rwlp, tsp);
55941efec22Sraf 		} else {
56041efec22Sraf 			if (try_flag)
56141efec22Sraf 				error = __lwp_rwlock_trywrlock(rwlp);
56241efec22Sraf 			else
56341efec22Sraf 				error = __lwp_rwlock_wrlock(rwlp, tsp);
56441efec22Sraf 		}
56541efec22Sraf 	} while (error == EAGAIN || error == EINTR);
5667c478bd9Sstevel@tonic-gate 
56741efec22Sraf 	if (!try_flag) {
56841efec22Sraf 		DTRACE_PROBE3(plockstat, rw__blocked, rwlp, rd_wr, error == 0);
5697c478bd9Sstevel@tonic-gate 	}
5707c478bd9Sstevel@tonic-gate 
5717c478bd9Sstevel@tonic-gate 	return (error);
5727c478bd9Sstevel@tonic-gate }
5737c478bd9Sstevel@tonic-gate 
5747c478bd9Sstevel@tonic-gate /*
5757c478bd9Sstevel@tonic-gate  * Common code for rdlock, timedrdlock, wrlock, timedwrlock, tryrdlock,
5767c478bd9Sstevel@tonic-gate  * and trywrlock for process-private (USYNC_THREAD) rwlocks.
5777c478bd9Sstevel@tonic-gate  */
5787c478bd9Sstevel@tonic-gate int
rwlock_lock(rwlock_t * rwlp,timespec_t * tsp,int rd_wr)5797c478bd9Sstevel@tonic-gate rwlock_lock(rwlock_t *rwlp, timespec_t *tsp, int rd_wr)
5807c478bd9Sstevel@tonic-gate {
58141efec22Sraf 	volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers;
58241efec22Sraf 	uint32_t readers;
5837c478bd9Sstevel@tonic-gate 	ulwp_t *self = curthread;
5847c478bd9Sstevel@tonic-gate 	queue_head_t *qp;
5857c478bd9Sstevel@tonic-gate 	ulwp_t *ulwp;
5867c478bd9Sstevel@tonic-gate 	int try_flag;
587d4204c85Sraf 	int ignore_waiters_flag;
5887c478bd9Sstevel@tonic-gate 	int error = 0;
5897c478bd9Sstevel@tonic-gate 
5907c478bd9Sstevel@tonic-gate 	try_flag = (rd_wr & TRY_FLAG);
5917c478bd9Sstevel@tonic-gate 	rd_wr &= ~TRY_FLAG;
5927c478bd9Sstevel@tonic-gate 	ASSERT(rd_wr == READ_LOCK || rd_wr == WRITE_LOCK);
5937c478bd9Sstevel@tonic-gate 
5947c478bd9Sstevel@tonic-gate 	if (!try_flag) {
5957c478bd9Sstevel@tonic-gate 		DTRACE_PROBE2(plockstat, rw__block, rwlp, rd_wr);
5967c478bd9Sstevel@tonic-gate 	}
5977c478bd9Sstevel@tonic-gate 
5987c478bd9Sstevel@tonic-gate 	qp = queue_lock(rwlp, MX);
599d4204c85Sraf 	/* initial attempt to acquire the lock fails if there are waiters */
600d4204c85Sraf 	ignore_waiters_flag = 0;
6017c478bd9Sstevel@tonic-gate 	while (error == 0) {
60241efec22Sraf 		if (rd_wr == READ_LOCK) {
603d4204c85Sraf 			if (read_lock_try(rwlp, ignore_waiters_flag))
604d4204c85Sraf 				break;
60541efec22Sraf 		} else {
606d4204c85Sraf 			if (write_lock_try(rwlp, ignore_waiters_flag))
607d4204c85Sraf 				break;
60841efec22Sraf 		}
609d4204c85Sraf 		/* subsequent attempts do not fail due to waiters */
610d4204c85Sraf 		ignore_waiters_flag = 1;
61141efec22Sraf 		atomic_or_32(rwstate, URW_HAS_WAITERS);
61241efec22Sraf 		readers = *rwstate;
61341efec22Sraf 		ASSERT_CONSISTENT_STATE(readers);
61441efec22Sraf 		if ((readers & URW_WRITE_LOCKED) ||
61541efec22Sraf 		    (rd_wr == WRITE_LOCK &&
61641efec22Sraf 		    (readers & URW_READERS_MASK) != 0))
6177c478bd9Sstevel@tonic-gate 			/* EMPTY */;	/* somebody holds the lock */
618d4204c85Sraf 		else if ((ulwp = queue_waiter(qp)) == NULL) {
61941efec22Sraf 			atomic_and_32(rwstate, ~URW_HAS_WAITERS);
620bbbbacb4SRoger A. Faulkner 			ignore_waiters_flag = 0;
621bbbbacb4SRoger A. Faulkner 			continue;	/* no queued waiters, start over */
6227c478bd9Sstevel@tonic-gate 		} else {
623d4204c85Sraf 			/*
624d4204c85Sraf 			 * Do a priority check on the queued waiter (the
625d4204c85Sraf 			 * highest priority thread on the queue) to see
62648bbca81SDaniel Hoffman 			 * if we should defer to it or just grab the lock.
627d4204c85Sraf 			 */
6287c478bd9Sstevel@tonic-gate 			int our_pri = real_priority(self);
6297c478bd9Sstevel@tonic-gate 			int his_pri = real_priority(ulwp);
6307c478bd9Sstevel@tonic-gate 
6317c478bd9Sstevel@tonic-gate 			if (rd_wr == WRITE_LOCK) {
6327c478bd9Sstevel@tonic-gate 				/*
6337c478bd9Sstevel@tonic-gate 				 * We defer to a queued thread that has
6347c478bd9Sstevel@tonic-gate 				 * a higher priority than ours.
6357c478bd9Sstevel@tonic-gate 				 */
636bbbbacb4SRoger A. Faulkner 				if (his_pri <= our_pri) {
637bbbbacb4SRoger A. Faulkner 					/*
638bbbbacb4SRoger A. Faulkner 					 * Don't defer, just grab the lock.
639bbbbacb4SRoger A. Faulkner 					 */
640bbbbacb4SRoger A. Faulkner 					continue;
641bbbbacb4SRoger A. Faulkner 				}
6427c478bd9Sstevel@tonic-gate 			} else {
6437c478bd9Sstevel@tonic-gate 				/*
6447c478bd9Sstevel@tonic-gate 				 * We defer to a queued thread that has
6457c478bd9Sstevel@tonic-gate 				 * a higher priority than ours or that
6467c478bd9Sstevel@tonic-gate 				 * is a writer whose priority equals ours.
6477c478bd9Sstevel@tonic-gate 				 */
6487c478bd9Sstevel@tonic-gate 				if (his_pri < our_pri ||
649bbbbacb4SRoger A. Faulkner 				    (his_pri == our_pri && !ulwp->ul_writer)) {
650bbbbacb4SRoger A. Faulkner 					/*
651bbbbacb4SRoger A. Faulkner 					 * Don't defer, just grab the lock.
652bbbbacb4SRoger A. Faulkner 					 */
653bbbbacb4SRoger A. Faulkner 					continue;
654bbbbacb4SRoger A. Faulkner 				}
6557c478bd9Sstevel@tonic-gate 			}
6567c478bd9Sstevel@tonic-gate 		}
6577c478bd9Sstevel@tonic-gate 		/*
6587c478bd9Sstevel@tonic-gate 		 * We are about to block.
6597c478bd9Sstevel@tonic-gate 		 * If we're doing a trylock, return EBUSY instead.
6607c478bd9Sstevel@tonic-gate 		 */
6617c478bd9Sstevel@tonic-gate 		if (try_flag) {
6627c478bd9Sstevel@tonic-gate 			error = EBUSY;
6637c478bd9Sstevel@tonic-gate 			break;
6647c478bd9Sstevel@tonic-gate 		}
6657c478bd9Sstevel@tonic-gate 		/*
666d4204c85Sraf 		 * Enqueue writers ahead of readers.
6677c478bd9Sstevel@tonic-gate 		 */
6687c478bd9Sstevel@tonic-gate 		self->ul_writer = rd_wr;	/* *must* be 0 or 1 */
669d4204c85Sraf 		enqueue(qp, self, 0);
6707c478bd9Sstevel@tonic-gate 		set_parking_flag(self, 1);
6717c478bd9Sstevel@tonic-gate 		queue_unlock(qp);
6727c478bd9Sstevel@tonic-gate 		if ((error = __lwp_park(tsp, 0)) == EINTR)
673bbbbacb4SRoger A. Faulkner 			error = 0;
6747c478bd9Sstevel@tonic-gate 		set_parking_flag(self, 0);
6757c478bd9Sstevel@tonic-gate 		qp = queue_lock(rwlp, MX);
676bbbbacb4SRoger A. Faulkner 		if (self->ul_sleepq && dequeue_self(qp) == 0) {
67741efec22Sraf 			atomic_and_32(rwstate, ~URW_HAS_WAITERS);
678bbbbacb4SRoger A. Faulkner 			ignore_waiters_flag = 0;
679bbbbacb4SRoger A. Faulkner 		}
680d4204c85Sraf 		self->ul_writer = 0;
681bbbbacb4SRoger A. Faulkner 		if (rd_wr == WRITE_LOCK &&
682bbbbacb4SRoger A. Faulkner 		    (*rwstate & URW_WRITE_LOCKED) &&
683bbbbacb4SRoger A. Faulkner 		    rwlp->rwlock_owner == (uintptr_t)self) {
684bbbbacb4SRoger A. Faulkner 			/*
685bbbbacb4SRoger A. Faulkner 			 * We acquired the lock by hand-off
686bbbbacb4SRoger A. Faulkner 			 * from the previous owner,
687bbbbacb4SRoger A. Faulkner 			 */
688bbbbacb4SRoger A. Faulkner 			error = 0;	/* timedlock did not fail */
689bbbbacb4SRoger A. Faulkner 			break;
690bbbbacb4SRoger A. Faulkner 		}
6917c478bd9Sstevel@tonic-gate 	}
6927c478bd9Sstevel@tonic-gate 
693bbbbacb4SRoger A. Faulkner 	/*
694bbbbacb4SRoger A. Faulkner 	 * Make one final check to see if there are any threads left
695bbbbacb4SRoger A. Faulkner 	 * on the rwlock queue.  Clear the URW_HAS_WAITERS flag if not.
696bbbbacb4SRoger A. Faulkner 	 */
697bbbbacb4SRoger A. Faulkner 	if (qp->qh_root == NULL || qp->qh_root->qr_head == NULL)
698bbbbacb4SRoger A. Faulkner 		atomic_and_32(rwstate, ~URW_HAS_WAITERS);
699bbbbacb4SRoger A. Faulkner 
70041efec22Sraf 	queue_unlock(qp);
70141efec22Sraf 
70241efec22Sraf 	if (!try_flag) {
70341efec22Sraf 		DTRACE_PROBE3(plockstat, rw__blocked, rwlp, rd_wr, error == 0);
70441efec22Sraf 	}
7057c478bd9Sstevel@tonic-gate 
7067c478bd9Sstevel@tonic-gate 	return (error);
7077c478bd9Sstevel@tonic-gate }
7087c478bd9Sstevel@tonic-gate 
7097c478bd9Sstevel@tonic-gate int
rw_rdlock_impl(rwlock_t * rwlp,timespec_t * tsp)7107c478bd9Sstevel@tonic-gate rw_rdlock_impl(rwlock_t *rwlp, timespec_t *tsp)
7117c478bd9Sstevel@tonic-gate {
7127c478bd9Sstevel@tonic-gate 	ulwp_t *self = curthread;
7137c478bd9Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
7147c478bd9Sstevel@tonic-gate 	readlock_t *readlockp;
7157c478bd9Sstevel@tonic-gate 	tdb_rwlock_stats_t *rwsp = RWLOCK_STATS(rwlp, udp);
7167c478bd9Sstevel@tonic-gate 	int error;
7177c478bd9Sstevel@tonic-gate 
7187c478bd9Sstevel@tonic-gate 	/*
7197c478bd9Sstevel@tonic-gate 	 * If we already hold a readers lock on this rwlock,
7207c478bd9Sstevel@tonic-gate 	 * just increment our reference count and return.
7217c478bd9Sstevel@tonic-gate 	 */
72241efec22Sraf 	sigoff(self);
7237c478bd9Sstevel@tonic-gate 	readlockp = rwl_entry(rwlp);
7247c478bd9Sstevel@tonic-gate 	if (readlockp->rd_count != 0) {
72541efec22Sraf 		if (readlockp->rd_count == READ_LOCK_MAX) {
72641efec22Sraf 			sigon(self);
72741efec22Sraf 			error = EAGAIN;
72841efec22Sraf 			goto out;
72941efec22Sraf 		}
73041efec22Sraf 		sigon(self);
73141efec22Sraf 		error = 0;
73241efec22Sraf 		goto out;
7337c478bd9Sstevel@tonic-gate 	}
73441efec22Sraf 	sigon(self);
7357c478bd9Sstevel@tonic-gate 
7367c478bd9Sstevel@tonic-gate 	/*
7377c478bd9Sstevel@tonic-gate 	 * If we hold the writer lock, bail out.
7387c478bd9Sstevel@tonic-gate 	 */
7397257d1b4Sraf 	if (rw_write_held(rwlp)) {
7407c478bd9Sstevel@tonic-gate 		if (self->ul_error_detection)
7417c478bd9Sstevel@tonic-gate 			rwlock_error(rwlp, "rwlock_rdlock",
7427c478bd9Sstevel@tonic-gate 			    "calling thread owns the writer lock");
74341efec22Sraf 		error = EDEADLK;
74441efec22Sraf 		goto out;
7457c478bd9Sstevel@tonic-gate 	}
7467c478bd9Sstevel@tonic-gate 
74741efec22Sraf 	if (read_lock_try(rwlp, 0))
74841efec22Sraf 		error = 0;
74941efec22Sraf 	else if (rwlp->rwlock_type == USYNC_PROCESS)	/* kernel-level */
7507c478bd9Sstevel@tonic-gate 		error = shared_rwlock_lock(rwlp, tsp, READ_LOCK);
7517c478bd9Sstevel@tonic-gate 	else						/* user-level */
7527c478bd9Sstevel@tonic-gate 		error = rwlock_lock(rwlp, tsp, READ_LOCK);
7537c478bd9Sstevel@tonic-gate 
75441efec22Sraf out:
7557c478bd9Sstevel@tonic-gate 	if (error == 0) {
75641efec22Sraf 		sigoff(self);
75741efec22Sraf 		rwl_entry(rwlp)->rd_count++;
75841efec22Sraf 		sigon(self);
7597c478bd9Sstevel@tonic-gate 		if (rwsp)
7607c478bd9Sstevel@tonic-gate 			tdb_incr(rwsp->rw_rdlock);
76141efec22Sraf 		DTRACE_PROBE2(plockstat, rw__acquire, rwlp, READ_LOCK);
76241efec22Sraf 	} else {
76341efec22Sraf 		DTRACE_PROBE3(plockstat, rw__error, rwlp, READ_LOCK, error);
7647c478bd9Sstevel@tonic-gate 	}
7657c478bd9Sstevel@tonic-gate 
7667c478bd9Sstevel@tonic-gate 	return (error);
7677c478bd9Sstevel@tonic-gate }
7687c478bd9Sstevel@tonic-gate 
7697257d1b4Sraf #pragma weak pthread_rwlock_rdlock = rw_rdlock
7707257d1b4Sraf #pragma weak _rw_rdlock = rw_rdlock
7717c478bd9Sstevel@tonic-gate int
rw_rdlock(rwlock_t * rwlp)7727257d1b4Sraf rw_rdlock(rwlock_t *rwlp)
7737c478bd9Sstevel@tonic-gate {
7747c478bd9Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
7757c478bd9Sstevel@tonic-gate 	return (rw_rdlock_impl(rwlp, NULL));
7767c478bd9Sstevel@tonic-gate }
7777c478bd9Sstevel@tonic-gate 
7787c478bd9Sstevel@tonic-gate void
lrw_rdlock(rwlock_t * rwlp)7797c478bd9Sstevel@tonic-gate lrw_rdlock(rwlock_t *rwlp)
7807c478bd9Sstevel@tonic-gate {
7817c478bd9Sstevel@tonic-gate 	enter_critical(curthread);
7827c478bd9Sstevel@tonic-gate 	(void) rw_rdlock_impl(rwlp, NULL);
7837c478bd9Sstevel@tonic-gate }
7847c478bd9Sstevel@tonic-gate 
7857c478bd9Sstevel@tonic-gate int
pthread_rwlock_reltimedrdlock_np(pthread_rwlock_t * _RESTRICT_KYWD rwlp,const struct timespec * _RESTRICT_KYWD reltime)7867257d1b4Sraf pthread_rwlock_reltimedrdlock_np(pthread_rwlock_t *_RESTRICT_KYWD rwlp,
7877257d1b4Sraf     const struct timespec *_RESTRICT_KYWD reltime)
7887c478bd9Sstevel@tonic-gate {
7897c478bd9Sstevel@tonic-gate 	timespec_t tslocal = *reltime;
7907c478bd9Sstevel@tonic-gate 	int error;
7917c478bd9Sstevel@tonic-gate 
7927c478bd9Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
7937257d1b4Sraf 	error = rw_rdlock_impl((rwlock_t *)rwlp, &tslocal);
7947c478bd9Sstevel@tonic-gate 	if (error == ETIME)
7957c478bd9Sstevel@tonic-gate 		error = ETIMEDOUT;
7967c478bd9Sstevel@tonic-gate 	return (error);
7977c478bd9Sstevel@tonic-gate }
7987c478bd9Sstevel@tonic-gate 
7997c478bd9Sstevel@tonic-gate int
pthread_rwlock_timedrdlock(pthread_rwlock_t * _RESTRICT_KYWD rwlp,const struct timespec * _RESTRICT_KYWD abstime)8007257d1b4Sraf pthread_rwlock_timedrdlock(pthread_rwlock_t *_RESTRICT_KYWD rwlp,
8017257d1b4Sraf     const struct timespec *_RESTRICT_KYWD abstime)
8027c478bd9Sstevel@tonic-gate {
8037c478bd9Sstevel@tonic-gate 	timespec_t tslocal;
8047c478bd9Sstevel@tonic-gate 	int error;
8057c478bd9Sstevel@tonic-gate 
8067c478bd9Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
8077c478bd9Sstevel@tonic-gate 	abstime_to_reltime(CLOCK_REALTIME, abstime, &tslocal);
8087257d1b4Sraf 	error = rw_rdlock_impl((rwlock_t *)rwlp, &tslocal);
8097c478bd9Sstevel@tonic-gate 	if (error == ETIME)
8107c478bd9Sstevel@tonic-gate 		error = ETIMEDOUT;
8117c478bd9Sstevel@tonic-gate 	return (error);
8127c478bd9Sstevel@tonic-gate }
8137c478bd9Sstevel@tonic-gate 
8147c478bd9Sstevel@tonic-gate int
rw_wrlock_impl(rwlock_t * rwlp,timespec_t * tsp)8157c478bd9Sstevel@tonic-gate rw_wrlock_impl(rwlock_t *rwlp, timespec_t *tsp)
8167c478bd9Sstevel@tonic-gate {
8177c478bd9Sstevel@tonic-gate 	ulwp_t *self = curthread;
8187c478bd9Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
8197c478bd9Sstevel@tonic-gate 	tdb_rwlock_stats_t *rwsp = RWLOCK_STATS(rwlp, udp);
8207c478bd9Sstevel@tonic-gate 	int error;
8217c478bd9Sstevel@tonic-gate 
8227c478bd9Sstevel@tonic-gate 	/*
8237c478bd9Sstevel@tonic-gate 	 * If we hold a readers lock on this rwlock, bail out.
8247c478bd9Sstevel@tonic-gate 	 */
8257257d1b4Sraf 	if (rw_read_held(rwlp)) {
8267c478bd9Sstevel@tonic-gate 		if (self->ul_error_detection)
8277c478bd9Sstevel@tonic-gate 			rwlock_error(rwlp, "rwlock_wrlock",
8287c478bd9Sstevel@tonic-gate 			    "calling thread owns the readers lock");
82941efec22Sraf 		error = EDEADLK;
83041efec22Sraf 		goto out;
8317c478bd9Sstevel@tonic-gate 	}
8327c478bd9Sstevel@tonic-gate 
8337c478bd9Sstevel@tonic-gate 	/*
8347c478bd9Sstevel@tonic-gate 	 * If we hold the writer lock, bail out.
8357c478bd9Sstevel@tonic-gate 	 */
8367257d1b4Sraf 	if (rw_write_held(rwlp)) {
8377c478bd9Sstevel@tonic-gate 		if (self->ul_error_detection)
8387c478bd9Sstevel@tonic-gate 			rwlock_error(rwlp, "rwlock_wrlock",
8397c478bd9Sstevel@tonic-gate 			    "calling thread owns the writer lock");
84041efec22Sraf 		error = EDEADLK;
84141efec22Sraf 		goto out;
8427c478bd9Sstevel@tonic-gate 	}
8437c478bd9Sstevel@tonic-gate 
84441efec22Sraf 	if (write_lock_try(rwlp, 0))
84541efec22Sraf 		error = 0;
84641efec22Sraf 	else if (rwlp->rwlock_type == USYNC_PROCESS)	/* kernel-level */
8477c478bd9Sstevel@tonic-gate 		error = shared_rwlock_lock(rwlp, tsp, WRITE_LOCK);
84841efec22Sraf 	else						/* user-level */
8497c478bd9Sstevel@tonic-gate 		error = rwlock_lock(rwlp, tsp, WRITE_LOCK);
8507c478bd9Sstevel@tonic-gate 
85141efec22Sraf out:
85241efec22Sraf 	if (error == 0) {
85341efec22Sraf 		rwlp->rwlock_owner = (uintptr_t)self;
85441efec22Sraf 		if (rwlp->rwlock_type == USYNC_PROCESS)
85541efec22Sraf 			rwlp->rwlock_ownerpid = udp->pid;
85641efec22Sraf 		if (rwsp) {
85741efec22Sraf 			tdb_incr(rwsp->rw_wrlock);
85841efec22Sraf 			rwsp->rw_wrlock_begin_hold = gethrtime();
85941efec22Sraf 		}
86041efec22Sraf 		DTRACE_PROBE2(plockstat, rw__acquire, rwlp, WRITE_LOCK);
86141efec22Sraf 	} else {
86241efec22Sraf 		DTRACE_PROBE3(plockstat, rw__error, rwlp, WRITE_LOCK, error);
8637c478bd9Sstevel@tonic-gate 	}
8647c478bd9Sstevel@tonic-gate 	return (error);
8657c478bd9Sstevel@tonic-gate }
8667c478bd9Sstevel@tonic-gate 
8677257d1b4Sraf #pragma weak pthread_rwlock_wrlock = rw_wrlock
8687257d1b4Sraf #pragma weak _rw_wrlock = rw_wrlock
8697c478bd9Sstevel@tonic-gate int
rw_wrlock(rwlock_t * rwlp)8707257d1b4Sraf rw_wrlock(rwlock_t *rwlp)
8717c478bd9Sstevel@tonic-gate {
8727c478bd9Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
8737c478bd9Sstevel@tonic-gate 	return (rw_wrlock_impl(rwlp, NULL));
8747c478bd9Sstevel@tonic-gate }
8757c478bd9Sstevel@tonic-gate 
8767c478bd9Sstevel@tonic-gate void
lrw_wrlock(rwlock_t * rwlp)8777c478bd9Sstevel@tonic-gate lrw_wrlock(rwlock_t *rwlp)
8787c478bd9Sstevel@tonic-gate {
8797c478bd9Sstevel@tonic-gate 	enter_critical(curthread);
8807c478bd9Sstevel@tonic-gate 	(void) rw_wrlock_impl(rwlp, NULL);
8817c478bd9Sstevel@tonic-gate }
8827c478bd9Sstevel@tonic-gate 
8837c478bd9Sstevel@tonic-gate int
pthread_rwlock_reltimedwrlock_np(pthread_rwlock_t * _RESTRICT_KYWD rwlp,const struct timespec * _RESTRICT_KYWD reltime)8847257d1b4Sraf pthread_rwlock_reltimedwrlock_np(pthread_rwlock_t *_RESTRICT_KYWD rwlp,
8857257d1b4Sraf     const struct timespec *_RESTRICT_KYWD reltime)
8867c478bd9Sstevel@tonic-gate {
8877c478bd9Sstevel@tonic-gate 	timespec_t tslocal = *reltime;
8887c478bd9Sstevel@tonic-gate 	int error;
8897c478bd9Sstevel@tonic-gate 
8907c478bd9Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
8917257d1b4Sraf 	error = rw_wrlock_impl((rwlock_t *)rwlp, &tslocal);
8927c478bd9Sstevel@tonic-gate 	if (error == ETIME)
8937c478bd9Sstevel@tonic-gate 		error = ETIMEDOUT;
8947c478bd9Sstevel@tonic-gate 	return (error);
8957c478bd9Sstevel@tonic-gate }
8967c478bd9Sstevel@tonic-gate 
8977c478bd9Sstevel@tonic-gate int
pthread_rwlock_timedwrlock(pthread_rwlock_t * rwlp,const timespec_t * abstime)8987257d1b4Sraf pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlp, const timespec_t *abstime)
8997c478bd9Sstevel@tonic-gate {
9007c478bd9Sstevel@tonic-gate 	timespec_t tslocal;
9017c478bd9Sstevel@tonic-gate 	int error;
9027c478bd9Sstevel@tonic-gate 
9037c478bd9Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
9047c478bd9Sstevel@tonic-gate 	abstime_to_reltime(CLOCK_REALTIME, abstime, &tslocal);
9057257d1b4Sraf 	error = rw_wrlock_impl((rwlock_t *)rwlp, &tslocal);
9067c478bd9Sstevel@tonic-gate 	if (error == ETIME)
9077c478bd9Sstevel@tonic-gate 		error = ETIMEDOUT;
9087c478bd9Sstevel@tonic-gate 	return (error);
9097c478bd9Sstevel@tonic-gate }
9107c478bd9Sstevel@tonic-gate 
9117257d1b4Sraf #pragma weak pthread_rwlock_tryrdlock = rw_tryrdlock
9127c478bd9Sstevel@tonic-gate int
rw_tryrdlock(rwlock_t * rwlp)9137257d1b4Sraf rw_tryrdlock(rwlock_t *rwlp)
9147c478bd9Sstevel@tonic-gate {
9157c478bd9Sstevel@tonic-gate 	ulwp_t *self = curthread;
9167c478bd9Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
9177c478bd9Sstevel@tonic-gate 	tdb_rwlock_stats_t *rwsp = RWLOCK_STATS(rwlp, udp);
9187c478bd9Sstevel@tonic-gate 	readlock_t *readlockp;
9197c478bd9Sstevel@tonic-gate 	int error;
9207c478bd9Sstevel@tonic-gate 
9217c478bd9Sstevel@tonic-gate 	ASSERT(!curthread->ul_critical || curthread->ul_bindflags);
9227c478bd9Sstevel@tonic-gate 
9237c478bd9Sstevel@tonic-gate 	if (rwsp)
9247c478bd9Sstevel@tonic-gate 		tdb_incr(rwsp->rw_rdlock_try);
9257c478bd9Sstevel@tonic-gate 
9267c478bd9Sstevel@tonic-gate 	/*
9277c478bd9Sstevel@tonic-gate 	 * If we already hold a readers lock on this rwlock,
9287c478bd9Sstevel@tonic-gate 	 * just increment our reference count and return.
9297c478bd9Sstevel@tonic-gate 	 */
93041efec22Sraf 	sigoff(self);
9317c478bd9Sstevel@tonic-gate 	readlockp = rwl_entry(rwlp);
9327c478bd9Sstevel@tonic-gate 	if (readlockp->rd_count != 0) {
93341efec22Sraf 		if (readlockp->rd_count == READ_LOCK_MAX) {
93441efec22Sraf 			sigon(self);
93541efec22Sraf 			error = EAGAIN;
93641efec22Sraf 			goto out;
93741efec22Sraf 		}
93841efec22Sraf 		sigon(self);
93941efec22Sraf 		error = 0;
94041efec22Sraf 		goto out;
9417c478bd9Sstevel@tonic-gate 	}
94241efec22Sraf 	sigon(self);
9437c478bd9Sstevel@tonic-gate 
94441efec22Sraf 	if (read_lock_try(rwlp, 0))
94541efec22Sraf 		error = 0;
94641efec22Sraf 	else if (rwlp->rwlock_type == USYNC_PROCESS)	/* kernel-level */
9477c478bd9Sstevel@tonic-gate 		error = shared_rwlock_lock(rwlp, NULL, READ_LOCK_TRY);
9487c478bd9Sstevel@tonic-gate 	else						/* user-level */
9497c478bd9Sstevel@tonic-gate 		error = rwlock_lock(rwlp, NULL, READ_LOCK_TRY);
9507c478bd9Sstevel@tonic-gate 
95141efec22Sraf out:
95241efec22Sraf 	if (error == 0) {
95341efec22Sraf 		sigoff(self);
95441efec22Sraf 		rwl_entry(rwlp)->rd_count++;
95541efec22Sraf 		sigon(self);
95641efec22Sraf 		DTRACE_PROBE2(plockstat, rw__acquire, rwlp, READ_LOCK);
95741efec22Sraf 	} else {
95841efec22Sraf 		if (rwsp)
95941efec22Sraf 			tdb_incr(rwsp->rw_rdlock_try_fail);
96041efec22Sraf 		if (error != EBUSY) {
96141efec22Sraf 			DTRACE_PROBE3(plockstat, rw__error, rwlp, READ_LOCK,
96241efec22Sraf 			    error);
96341efec22Sraf 		}
96441efec22Sraf 	}
9657c478bd9Sstevel@tonic-gate 
9667c478bd9Sstevel@tonic-gate 	return (error);
9677c478bd9Sstevel@tonic-gate }
9687c478bd9Sstevel@tonic-gate 
9697257d1b4Sraf #pragma weak pthread_rwlock_trywrlock = rw_trywrlock
9707c478bd9Sstevel@tonic-gate int
rw_trywrlock(rwlock_t * rwlp)9717257d1b4Sraf rw_trywrlock(rwlock_t *rwlp)
9727c478bd9Sstevel@tonic-gate {
9737c478bd9Sstevel@tonic-gate 	ulwp_t *self = curthread;
9747c478bd9Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
9757c478bd9Sstevel@tonic-gate 	tdb_rwlock_stats_t *rwsp = RWLOCK_STATS(rwlp, udp);
9767c478bd9Sstevel@tonic-gate 	int error;
9777c478bd9Sstevel@tonic-gate 
97841efec22Sraf 	ASSERT(!self->ul_critical || self->ul_bindflags);
9797c478bd9Sstevel@tonic-gate 
9807c478bd9Sstevel@tonic-gate 	if (rwsp)
9817c478bd9Sstevel@tonic-gate 		tdb_incr(rwsp->rw_wrlock_try);
9827c478bd9Sstevel@tonic-gate 
98341efec22Sraf 	if (write_lock_try(rwlp, 0))
98441efec22Sraf 		error = 0;
98541efec22Sraf 	else if (rwlp->rwlock_type == USYNC_PROCESS)	/* kernel-level */
9867c478bd9Sstevel@tonic-gate 		error = shared_rwlock_lock(rwlp, NULL, WRITE_LOCK_TRY);
98741efec22Sraf 	else						/* user-level */
9887c478bd9Sstevel@tonic-gate 		error = rwlock_lock(rwlp, NULL, WRITE_LOCK_TRY);
98941efec22Sraf 
99041efec22Sraf 	if (error == 0) {
99141efec22Sraf 		rwlp->rwlock_owner = (uintptr_t)self;
99241efec22Sraf 		if (rwlp->rwlock_type == USYNC_PROCESS)
99341efec22Sraf 			rwlp->rwlock_ownerpid = udp->pid;
99441efec22Sraf 		if (rwsp)
9957c478bd9Sstevel@tonic-gate 			rwsp->rw_wrlock_begin_hold = gethrtime();
99641efec22Sraf 		DTRACE_PROBE2(plockstat, rw__acquire, rwlp, WRITE_LOCK);
99741efec22Sraf 	} else {
99841efec22Sraf 		if (rwsp)
99941efec22Sraf 			tdb_incr(rwsp->rw_wrlock_try_fail);
100041efec22Sraf 		if (error != EBUSY) {
100141efec22Sraf 			DTRACE_PROBE3(plockstat, rw__error, rwlp, WRITE_LOCK,
100241efec22Sraf 			    error);
100341efec22Sraf 		}
10047c478bd9Sstevel@tonic-gate 	}
10057c478bd9Sstevel@tonic-gate 	return (error);
10067c478bd9Sstevel@tonic-gate }
10077c478bd9Sstevel@tonic-gate 
10087257d1b4Sraf #pragma weak pthread_rwlock_unlock = rw_unlock
10097257d1b4Sraf #pragma weak _rw_unlock = rw_unlock
10107c478bd9Sstevel@tonic-gate int
rw_unlock(rwlock_t * rwlp)10117257d1b4Sraf rw_unlock(rwlock_t *rwlp)
10127c478bd9Sstevel@tonic-gate {
101341efec22Sraf 	volatile uint32_t *rwstate = (volatile uint32_t *)&rwlp->rwlock_readers;
101441efec22Sraf 	uint32_t readers;
10157c478bd9Sstevel@tonic-gate 	ulwp_t *self = curthread;
10167c478bd9Sstevel@tonic-gate 	uberdata_t *udp = self->ul_uberdata;
10177c478bd9Sstevel@tonic-gate 	tdb_rwlock_stats_t *rwsp;
101841efec22Sraf 	int rd_wr;
101941efec22Sraf 
102041efec22Sraf 	readers = *rwstate;
102141efec22Sraf 	ASSERT_CONSISTENT_STATE(readers);
102241efec22Sraf 	if (readers & URW_WRITE_LOCKED) {
102341efec22Sraf 		rd_wr = WRITE_LOCK;
102441efec22Sraf 		readers = 0;
102541efec22Sraf 	} else {
102641efec22Sraf 		rd_wr = READ_LOCK;
102741efec22Sraf 		readers &= URW_READERS_MASK;
10287c478bd9Sstevel@tonic-gate 	}
10297c478bd9Sstevel@tonic-gate 
103041efec22Sraf 	if (rd_wr == WRITE_LOCK) {
10317c478bd9Sstevel@tonic-gate 		/*
10327c478bd9Sstevel@tonic-gate 		 * Since the writer lock is held, we'd better be
10337c478bd9Sstevel@tonic-gate 		 * holding it, else we cannot legitimately be here.
10347c478bd9Sstevel@tonic-gate 		 */
10357257d1b4Sraf 		if (!rw_write_held(rwlp)) {
10367c478bd9Sstevel@tonic-gate 			if (self->ul_error_detection)
10377c478bd9Sstevel@tonic-gate 				rwlock_error(rwlp, "rwlock_unlock",
10387c478bd9Sstevel@tonic-gate 				    "writer lock held, "
10397c478bd9Sstevel@tonic-gate 				    "but not by the calling thread");
10407c478bd9Sstevel@tonic-gate 			return (EPERM);
10417c478bd9Sstevel@tonic-gate 		}
10427c478bd9Sstevel@tonic-gate 		if ((rwsp = RWLOCK_STATS(rwlp, udp)) != NULL) {
10437c478bd9Sstevel@tonic-gate 			if (rwsp->rw_wrlock_begin_hold)
10447c478bd9Sstevel@tonic-gate 				rwsp->rw_wrlock_hold_time +=
10457c478bd9Sstevel@tonic-gate 				    gethrtime() - rwsp->rw_wrlock_begin_hold;
10467c478bd9Sstevel@tonic-gate 			rwsp->rw_wrlock_begin_hold = 0;
10477c478bd9Sstevel@tonic-gate 		}
104841efec22Sraf 		rwlp->rwlock_owner = 0;
104941efec22Sraf 		rwlp->rwlock_ownerpid = 0;
105041efec22Sraf 	} else if (readers > 0) {
10517c478bd9Sstevel@tonic-gate 		/*
10527c478bd9Sstevel@tonic-gate 		 * A readers lock is held; if we don't hold one, bail out.
10537c478bd9Sstevel@tonic-gate 		 */
105441efec22Sraf 		readlock_t *readlockp;
105541efec22Sraf 
105641efec22Sraf 		sigoff(self);
105741efec22Sraf 		readlockp = rwl_entry(rwlp);
10587c478bd9Sstevel@tonic-gate 		if (readlockp->rd_count == 0) {
105941efec22Sraf 			sigon(self);
10607c478bd9Sstevel@tonic-gate 			if (self->ul_error_detection)
10617c478bd9Sstevel@tonic-gate 				rwlock_error(rwlp, "rwlock_unlock",
10627c478bd9Sstevel@tonic-gate 				    "readers lock held, "
10637c478bd9Sstevel@tonic-gate 				    "but not by the calling thread");
10647c478bd9Sstevel@tonic-gate 			return (EPERM);
10657c478bd9Sstevel@tonic-gate 		}
10667c478bd9Sstevel@tonic-gate 		/*
10677c478bd9Sstevel@tonic-gate 		 * If we hold more than one readers lock on this rwlock,
10687c478bd9Sstevel@tonic-gate 		 * just decrement our reference count and return.
10697c478bd9Sstevel@tonic-gate 		 */
10707c478bd9Sstevel@tonic-gate 		if (--readlockp->rd_count != 0) {
107141efec22Sraf 			sigon(self);
107241efec22Sraf 			goto out;
10737c478bd9Sstevel@tonic-gate 		}
107441efec22Sraf 		sigon(self);
10757c478bd9Sstevel@tonic-gate 	} else {
10767c478bd9Sstevel@tonic-gate 		/*
10777c478bd9Sstevel@tonic-gate 		 * This is a usage error.
10787c478bd9Sstevel@tonic-gate 		 * No thread should release an unowned lock.
10797c478bd9Sstevel@tonic-gate 		 */
10807c478bd9Sstevel@tonic-gate 		if (self->ul_error_detection)
10817c478bd9Sstevel@tonic-gate 			rwlock_error(rwlp, "rwlock_unlock", "lock not owned");
10827c478bd9Sstevel@tonic-gate 		return (EPERM);
10837c478bd9Sstevel@tonic-gate 	}
10847c478bd9Sstevel@tonic-gate 
108541efec22Sraf 	if (rd_wr == WRITE_LOCK && write_unlock_try(rwlp)) {
108641efec22Sraf 		/* EMPTY */;
108741efec22Sraf 	} else if (rd_wr == READ_LOCK && read_unlock_try(rwlp)) {
108841efec22Sraf 		/* EMPTY */;
108941efec22Sraf 	} else if (rwlp->rwlock_type == USYNC_PROCESS) {
10908cd45542Sraf 		(void) mutex_lock(&rwlp->mutex);
109141efec22Sraf 		(void) __lwp_rwlock_unlock(rwlp);
10928cd45542Sraf 		(void) mutex_unlock(&rwlp->mutex);
109341efec22Sraf 	} else {
1094bbbbacb4SRoger A. Faulkner 		rw_queue_release(rwlp);
10957c478bd9Sstevel@tonic-gate 	}
10967c478bd9Sstevel@tonic-gate 
109741efec22Sraf out:
109841efec22Sraf 	DTRACE_PROBE2(plockstat, rw__release, rwlp, rd_wr);
10997c478bd9Sstevel@tonic-gate 	return (0);
11007c478bd9Sstevel@tonic-gate }
11017c478bd9Sstevel@tonic-gate 
11027c478bd9Sstevel@tonic-gate void
lrw_unlock(rwlock_t * rwlp)11037c478bd9Sstevel@tonic-gate lrw_unlock(rwlock_t *rwlp)
11047c478bd9Sstevel@tonic-gate {
11057257d1b4Sraf 	(void) rw_unlock(rwlp);
11067c478bd9Sstevel@tonic-gate 	exit_critical(curthread);
11077c478bd9Sstevel@tonic-gate }
1108