17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 52be60c5eSraf * Common Development and Distribution License (the "License"). 62be60c5eSraf * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 21e8031f0aSraf 227c478bd9Sstevel@tonic-gate /* 2309ce0d4aSRoger A. Faulkner * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate * Use is subject to license terms. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate #include "lint.h" 287c478bd9Sstevel@tonic-gate #include "thr_uberdata.h" 29d4204c85Sraf #include <sys/rtpriocntl.h> 3031db3c26Sraf #include <sys/sdt.h> 3131db3c26Sraf #include <atomic.h> 327c478bd9Sstevel@tonic-gate 33d4204c85Sraf #if defined(THREAD_DEBUG) 34d4204c85Sraf #define INCR32(x) (((x) != UINT32_MAX)? (x)++ : 0) 35d4204c85Sraf #define INCR(x) ((x)++) 36d4204c85Sraf #define DECR(x) ((x)--) 37d4204c85Sraf #define MAXINCR(m, x) ((m < ++x)? (m = x) : 0) 38d4204c85Sraf #else 39d4204c85Sraf #define INCR32(x) 40d4204c85Sraf #define INCR(x) 41d4204c85Sraf #define DECR(x) 42d4204c85Sraf #define MAXINCR(m, x) 43d4204c85Sraf #endif 44d4204c85Sraf 457c478bd9Sstevel@tonic-gate /* 467c478bd9Sstevel@tonic-gate * This mutex is initialized to be held by lwp#1. 477c478bd9Sstevel@tonic-gate * It is used to block a thread that has returned from a mutex_lock() 48883492d5Sraf * of a LOCK_PRIO_INHERIT mutex with an unrecoverable error. 497c478bd9Sstevel@tonic-gate */ 507c478bd9Sstevel@tonic-gate mutex_t stall_mutex = DEFAULTMUTEX; 517c478bd9Sstevel@tonic-gate 527c478bd9Sstevel@tonic-gate static int shared_mutex_held(mutex_t *); 53883492d5Sraf static int mutex_queuelock_adaptive(mutex_t *); 54883492d5Sraf static void mutex_wakeup_all(mutex_t *); 557c478bd9Sstevel@tonic-gate 567c478bd9Sstevel@tonic-gate /* 577c478bd9Sstevel@tonic-gate * Lock statistics support functions. 587c478bd9Sstevel@tonic-gate */ 597c478bd9Sstevel@tonic-gate void 607c478bd9Sstevel@tonic-gate record_begin_hold(tdb_mutex_stats_t *msp) 617c478bd9Sstevel@tonic-gate { 627c478bd9Sstevel@tonic-gate tdb_incr(msp->mutex_lock); 637c478bd9Sstevel@tonic-gate msp->mutex_begin_hold = gethrtime(); 647c478bd9Sstevel@tonic-gate } 657c478bd9Sstevel@tonic-gate 667c478bd9Sstevel@tonic-gate hrtime_t 677c478bd9Sstevel@tonic-gate record_hold_time(tdb_mutex_stats_t *msp) 687c478bd9Sstevel@tonic-gate { 697c478bd9Sstevel@tonic-gate hrtime_t now = gethrtime(); 707c478bd9Sstevel@tonic-gate 717c478bd9Sstevel@tonic-gate if (msp->mutex_begin_hold) 727c478bd9Sstevel@tonic-gate msp->mutex_hold_time += now - msp->mutex_begin_hold; 737c478bd9Sstevel@tonic-gate msp->mutex_begin_hold = 0; 747c478bd9Sstevel@tonic-gate return (now); 757c478bd9Sstevel@tonic-gate } 767c478bd9Sstevel@tonic-gate 777c478bd9Sstevel@tonic-gate /* 787c478bd9Sstevel@tonic-gate * Called once at library initialization. 797c478bd9Sstevel@tonic-gate */ 807c478bd9Sstevel@tonic-gate void 817c478bd9Sstevel@tonic-gate mutex_setup(void) 827c478bd9Sstevel@tonic-gate { 837c478bd9Sstevel@tonic-gate if (set_lock_byte(&stall_mutex.mutex_lockw)) 847c478bd9Sstevel@tonic-gate thr_panic("mutex_setup() cannot acquire stall_mutex"); 857c478bd9Sstevel@tonic-gate stall_mutex.mutex_owner = (uintptr_t)curthread; 867c478bd9Sstevel@tonic-gate } 877c478bd9Sstevel@tonic-gate 887c478bd9Sstevel@tonic-gate /* 895d1dd9a9Sraf * The default spin count of 1000 is experimentally determined. 905d1dd9a9Sraf * On sun4u machines with any number of processors it could be raised 917c478bd9Sstevel@tonic-gate * to 10,000 but that (experimentally) makes almost no difference. 925d1dd9a9Sraf * The environment variable: 937c478bd9Sstevel@tonic-gate * _THREAD_ADAPTIVE_SPIN=count 945d1dd9a9Sraf * can be used to override and set the count in the range [0 .. 1,000,000]. 957c478bd9Sstevel@tonic-gate */ 967c478bd9Sstevel@tonic-gate int thread_adaptive_spin = 1000; 977c478bd9Sstevel@tonic-gate uint_t thread_max_spinners = 100; 987c478bd9Sstevel@tonic-gate int thread_queue_verify = 0; 997c478bd9Sstevel@tonic-gate static int ncpus; 1007c478bd9Sstevel@tonic-gate 1017c478bd9Sstevel@tonic-gate /* 1027c478bd9Sstevel@tonic-gate * Distinguish spinning for queue locks from spinning for regular locks. 1035d1dd9a9Sraf * We try harder to acquire queue locks by spinning. 1047c478bd9Sstevel@tonic-gate * The environment variable: 1057c478bd9Sstevel@tonic-gate * _THREAD_QUEUE_SPIN=count 1067c478bd9Sstevel@tonic-gate * can be used to override and set the count in the range [0 .. 1,000,000]. 1077c478bd9Sstevel@tonic-gate */ 1085d1dd9a9Sraf int thread_queue_spin = 10000; 1097c478bd9Sstevel@tonic-gate 110883492d5Sraf #define ALL_ATTRIBUTES \ 111883492d5Sraf (LOCK_RECURSIVE | LOCK_ERRORCHECK | \ 112883492d5Sraf LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT | \ 113883492d5Sraf LOCK_ROBUST) 1147c478bd9Sstevel@tonic-gate 1157c478bd9Sstevel@tonic-gate /* 116883492d5Sraf * 'type' can be one of USYNC_THREAD, USYNC_PROCESS, or USYNC_PROCESS_ROBUST, 117883492d5Sraf * augmented by zero or more the flags: 118883492d5Sraf * LOCK_RECURSIVE 119883492d5Sraf * LOCK_ERRORCHECK 120883492d5Sraf * LOCK_PRIO_INHERIT 121883492d5Sraf * LOCK_PRIO_PROTECT 122883492d5Sraf * LOCK_ROBUST 1237c478bd9Sstevel@tonic-gate */ 1247257d1b4Sraf #pragma weak _mutex_init = mutex_init 1257c478bd9Sstevel@tonic-gate /* ARGSUSED2 */ 1267c478bd9Sstevel@tonic-gate int 1277257d1b4Sraf mutex_init(mutex_t *mp, int type, void *arg) 1287c478bd9Sstevel@tonic-gate { 129883492d5Sraf int basetype = (type & ~ALL_ATTRIBUTES); 130d4204c85Sraf const pcclass_t *pccp; 131883492d5Sraf int error = 0; 132d4204c85Sraf int ceil; 133883492d5Sraf 134883492d5Sraf if (basetype == USYNC_PROCESS_ROBUST) { 135883492d5Sraf /* 136883492d5Sraf * USYNC_PROCESS_ROBUST is a deprecated historical type. 137883492d5Sraf * We change it into (USYNC_PROCESS | LOCK_ROBUST) but 138883492d5Sraf * retain the USYNC_PROCESS_ROBUST flag so we can return 139883492d5Sraf * ELOCKUNMAPPED when necessary (only USYNC_PROCESS_ROBUST 140883492d5Sraf * mutexes will ever draw ELOCKUNMAPPED). 141883492d5Sraf */ 142883492d5Sraf type |= (USYNC_PROCESS | LOCK_ROBUST); 143883492d5Sraf basetype = USYNC_PROCESS; 144883492d5Sraf } 1457c478bd9Sstevel@tonic-gate 146d4204c85Sraf if (type & LOCK_PRIO_PROTECT) 147d4204c85Sraf pccp = get_info_by_policy(SCHED_FIFO); 148d4204c85Sraf if ((basetype != USYNC_THREAD && basetype != USYNC_PROCESS) || 149883492d5Sraf (type & (LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT)) 150d4204c85Sraf == (LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT) || 151d4204c85Sraf ((type & LOCK_PRIO_PROTECT) && 152d4204c85Sraf ((ceil = *(int *)arg) < pccp->pcc_primin || 153d4204c85Sraf ceil > pccp->pcc_primax))) { 154883492d5Sraf error = EINVAL; 155883492d5Sraf } else if (type & LOCK_ROBUST) { 156883492d5Sraf /* 157883492d5Sraf * Callers of mutex_init() with the LOCK_ROBUST attribute 158883492d5Sraf * are required to pass an initially all-zero mutex. 159883492d5Sraf * Multiple calls to mutex_init() are allowed; all but 160883492d5Sraf * the first return EBUSY. A call to mutex_init() is 161883492d5Sraf * allowed to make an inconsistent robust lock consistent 162883492d5Sraf * (for historical usage, even though the proper interface 163883492d5Sraf * for this is mutex_consistent()). Note that we use 164883492d5Sraf * atomic_or_16() to set the LOCK_INITED flag so as 165883492d5Sraf * not to disturb surrounding bits (LOCK_OWNERDEAD, etc). 166883492d5Sraf */ 167883492d5Sraf if (!(mp->mutex_flag & LOCK_INITED)) { 168883492d5Sraf mp->mutex_type = (uint8_t)type; 1697257d1b4Sraf atomic_or_16(&mp->mutex_flag, LOCK_INITED); 170883492d5Sraf mp->mutex_magic = MUTEX_MAGIC; 171883492d5Sraf } else if (type != mp->mutex_type || 172d4204c85Sraf ((type & LOCK_PRIO_PROTECT) && mp->mutex_ceiling != ceil)) { 173883492d5Sraf error = EINVAL; 1747257d1b4Sraf } else if (mutex_consistent(mp) != 0) { 175883492d5Sraf error = EBUSY; 176883492d5Sraf } 177883492d5Sraf /* register a process robust mutex with the kernel */ 178883492d5Sraf if (basetype == USYNC_PROCESS) 179883492d5Sraf register_lock(mp); 180883492d5Sraf } else { 1818cd45542Sraf (void) memset(mp, 0, sizeof (*mp)); 1827c478bd9Sstevel@tonic-gate mp->mutex_type = (uint8_t)type; 1837c478bd9Sstevel@tonic-gate mp->mutex_flag = LOCK_INITED; 1847c478bd9Sstevel@tonic-gate mp->mutex_magic = MUTEX_MAGIC; 185883492d5Sraf } 186883492d5Sraf 187d4204c85Sraf if (error == 0 && (type & LOCK_PRIO_PROTECT)) { 188d4204c85Sraf mp->mutex_ceiling = ceil; 189d4204c85Sraf } 190883492d5Sraf 1917c5714f6Sraf /* 1927c5714f6Sraf * This should be at the beginning of the function, 1937c5714f6Sraf * but for the sake of old broken applications that 1947c5714f6Sraf * do not have proper alignment for their mutexes 1957c5714f6Sraf * (and don't check the return code from mutex_init), 1967c5714f6Sraf * we put it here, after initializing the mutex regardless. 1977c5714f6Sraf */ 1987c5714f6Sraf if (error == 0 && 1997c5714f6Sraf ((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && 2007c5714f6Sraf curthread->ul_misaligned == 0) 2017c5714f6Sraf error = EINVAL; 2027c5714f6Sraf 2037c478bd9Sstevel@tonic-gate return (error); 2047c478bd9Sstevel@tonic-gate } 2057c478bd9Sstevel@tonic-gate 2067c478bd9Sstevel@tonic-gate /* 207d4204c85Sraf * Delete mp from list of ceiling mutexes owned by curthread. 2087c478bd9Sstevel@tonic-gate * Return 1 if the head of the chain was updated. 2097c478bd9Sstevel@tonic-gate */ 2107c478bd9Sstevel@tonic-gate int 2117c478bd9Sstevel@tonic-gate _ceil_mylist_del(mutex_t *mp) 2127c478bd9Sstevel@tonic-gate { 2137c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 2147c478bd9Sstevel@tonic-gate mxchain_t **mcpp; 2157c478bd9Sstevel@tonic-gate mxchain_t *mcp; 2167c478bd9Sstevel@tonic-gate 217d4204c85Sraf for (mcpp = &self->ul_mxchain; 218d4204c85Sraf (mcp = *mcpp) != NULL; 219d4204c85Sraf mcpp = &mcp->mxchain_next) { 220d4204c85Sraf if (mcp->mxchain_mx == mp) { 221d4204c85Sraf *mcpp = mcp->mxchain_next; 222d4204c85Sraf lfree(mcp, sizeof (*mcp)); 223d4204c85Sraf return (mcpp == &self->ul_mxchain); 224d4204c85Sraf } 225d4204c85Sraf } 226d4204c85Sraf return (0); 2277c478bd9Sstevel@tonic-gate } 2287c478bd9Sstevel@tonic-gate 2297c478bd9Sstevel@tonic-gate /* 230d4204c85Sraf * Add mp to the list of ceiling mutexes owned by curthread. 2317c478bd9Sstevel@tonic-gate * Return ENOMEM if no memory could be allocated. 2327c478bd9Sstevel@tonic-gate */ 2337c478bd9Sstevel@tonic-gate int 2347c478bd9Sstevel@tonic-gate _ceil_mylist_add(mutex_t *mp) 2357c478bd9Sstevel@tonic-gate { 2367c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 2377c478bd9Sstevel@tonic-gate mxchain_t *mcp; 2387c478bd9Sstevel@tonic-gate 2397c478bd9Sstevel@tonic-gate if ((mcp = lmalloc(sizeof (*mcp))) == NULL) 2407c478bd9Sstevel@tonic-gate return (ENOMEM); 2417c478bd9Sstevel@tonic-gate mcp->mxchain_mx = mp; 2427c478bd9Sstevel@tonic-gate mcp->mxchain_next = self->ul_mxchain; 2437c478bd9Sstevel@tonic-gate self->ul_mxchain = mcp; 2447c478bd9Sstevel@tonic-gate return (0); 2457c478bd9Sstevel@tonic-gate } 2467c478bd9Sstevel@tonic-gate 2477c478bd9Sstevel@tonic-gate /* 248d4204c85Sraf * Helper function for _ceil_prio_inherit() and _ceil_prio_waive(), below. 249d4204c85Sraf */ 250d4204c85Sraf static void 251d4204c85Sraf set_rt_priority(ulwp_t *self, int prio) 252d4204c85Sraf { 253d4204c85Sraf pcparms_t pcparm; 254d4204c85Sraf 255d4204c85Sraf pcparm.pc_cid = self->ul_rtclassid; 256d4204c85Sraf ((rtparms_t *)pcparm.pc_clparms)->rt_tqnsecs = RT_NOCHANGE; 257d4204c85Sraf ((rtparms_t *)pcparm.pc_clparms)->rt_pri = prio; 2588cd45542Sraf (void) priocntl(P_LWPID, self->ul_lwpid, PC_SETPARMS, &pcparm); 259d4204c85Sraf } 260d4204c85Sraf 261d4204c85Sraf /* 262d4204c85Sraf * Inherit priority from ceiling. 263d4204c85Sraf * This changes the effective priority, not the assigned priority. 2647c478bd9Sstevel@tonic-gate */ 2657c478bd9Sstevel@tonic-gate void 266d4204c85Sraf _ceil_prio_inherit(int prio) 2677c478bd9Sstevel@tonic-gate { 2687c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 2697c478bd9Sstevel@tonic-gate 270d4204c85Sraf self->ul_epri = prio; 271d4204c85Sraf set_rt_priority(self, prio); 2727c478bd9Sstevel@tonic-gate } 2737c478bd9Sstevel@tonic-gate 2747c478bd9Sstevel@tonic-gate /* 2757c478bd9Sstevel@tonic-gate * Waive inherited ceiling priority. Inherit from head of owned ceiling locks 2767c478bd9Sstevel@tonic-gate * if holding at least one ceiling lock. If no ceiling locks are held at this 2777c478bd9Sstevel@tonic-gate * point, disinherit completely, reverting back to assigned priority. 2787c478bd9Sstevel@tonic-gate */ 2797c478bd9Sstevel@tonic-gate void 2807c478bd9Sstevel@tonic-gate _ceil_prio_waive(void) 2817c478bd9Sstevel@tonic-gate { 2827c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 283d4204c85Sraf mxchain_t *mcp = self->ul_mxchain; 284d4204c85Sraf int prio; 2857c478bd9Sstevel@tonic-gate 286d4204c85Sraf if (mcp == NULL) { 287d4204c85Sraf prio = self->ul_pri; 288d4204c85Sraf self->ul_epri = 0; 2897c478bd9Sstevel@tonic-gate } else { 290d4204c85Sraf prio = mcp->mxchain_mx->mutex_ceiling; 291d4204c85Sraf self->ul_epri = prio; 2927c478bd9Sstevel@tonic-gate } 293d4204c85Sraf set_rt_priority(self, prio); 2947c478bd9Sstevel@tonic-gate } 2957c478bd9Sstevel@tonic-gate 2965d1dd9a9Sraf /* 2975d1dd9a9Sraf * Clear the lock byte. Retain the waiters byte and the spinners byte. 2985d1dd9a9Sraf * Return the old value of the lock word. 2995d1dd9a9Sraf */ 3005d1dd9a9Sraf static uint32_t 3015d1dd9a9Sraf clear_lockbyte(volatile uint32_t *lockword) 3025d1dd9a9Sraf { 3035d1dd9a9Sraf uint32_t old; 3045d1dd9a9Sraf uint32_t new; 3055d1dd9a9Sraf 3065d1dd9a9Sraf do { 3075d1dd9a9Sraf old = *lockword; 3085d1dd9a9Sraf new = old & ~LOCKMASK; 3095d1dd9a9Sraf } while (atomic_cas_32(lockword, old, new) != old); 3105d1dd9a9Sraf 3115d1dd9a9Sraf return (old); 3125d1dd9a9Sraf } 3135d1dd9a9Sraf 31431db3c26Sraf /* 31531db3c26Sraf * Same as clear_lockbyte(), but operates on mutex_lockword64. 31631db3c26Sraf * The mutex_ownerpid field is cleared along with the lock byte. 31731db3c26Sraf */ 31831db3c26Sraf static uint64_t 31931db3c26Sraf clear_lockbyte64(volatile uint64_t *lockword64) 32031db3c26Sraf { 32131db3c26Sraf uint64_t old; 32231db3c26Sraf uint64_t new; 32331db3c26Sraf 32431db3c26Sraf do { 32531db3c26Sraf old = *lockword64; 32631db3c26Sraf new = old & ~LOCKMASK64; 32731db3c26Sraf } while (atomic_cas_64(lockword64, old, new) != old); 32831db3c26Sraf 32931db3c26Sraf return (old); 33031db3c26Sraf } 33131db3c26Sraf 33231db3c26Sraf /* 33331db3c26Sraf * Similar to set_lock_byte(), which only tries to set the lock byte. 3347c5714f6Sraf * Here, we attempt to set the lock byte AND the mutex_ownerpid, keeping 3357c5714f6Sraf * the remaining bytes constant. This atomic operation is required for the 3367c5714f6Sraf * correctness of process-shared robust locks, otherwise there would be 3377c5714f6Sraf * a window or vulnerability in which the lock byte had been set but the 3387c5714f6Sraf * mutex_ownerpid had not yet been set. If the process were to die in 3397c5714f6Sraf * this window of vulnerability (due to some other thread calling exit() 3407c5714f6Sraf * or the process receiving a fatal signal), the mutex would be left locked 3417c5714f6Sraf * but without a process-ID to determine which process was holding the lock. 3427c5714f6Sraf * The kernel would then be unable to mark the robust mutex as LOCK_OWNERDEAD 3437c5714f6Sraf * when the process died. For all other cases of process-shared locks, this 3447c5714f6Sraf * operation is just a convenience, for the sake of common code. 3457c5714f6Sraf * 3467c5714f6Sraf * This operation requires process-shared robust locks to be properly 3477c5714f6Sraf * aligned on an 8-byte boundary, at least on sparc machines, lest the 3487c5714f6Sraf * operation incur an alignment fault. This is automatic when locks 3497c5714f6Sraf * are declared properly using the mutex_t or pthread_mutex_t data types 3507c5714f6Sraf * and the application does not allocate dynamic memory on less than an 3517c5714f6Sraf * 8-byte boundary. See the 'horrible hack' comments below for cases 3527c5714f6Sraf * dealing with such broken applications. 35331db3c26Sraf */ 35431db3c26Sraf static int 35531db3c26Sraf set_lock_byte64(volatile uint64_t *lockword64, pid_t ownerpid) 35631db3c26Sraf { 35731db3c26Sraf uint64_t old; 35831db3c26Sraf uint64_t new; 35931db3c26Sraf 36031db3c26Sraf old = *lockword64 & ~LOCKMASK64; 36131db3c26Sraf new = old | ((uint64_t)(uint_t)ownerpid << PIDSHIFT) | LOCKBYTE64; 36231db3c26Sraf if (atomic_cas_64(lockword64, old, new) == old) 36331db3c26Sraf return (LOCKCLEAR); 36431db3c26Sraf 36531db3c26Sraf return (LOCKSET); 36631db3c26Sraf } 36731db3c26Sraf 3685d1dd9a9Sraf /* 3695d1dd9a9Sraf * Increment the spinners count in the mutex lock word. 3705d1dd9a9Sraf * Return 0 on success. Return -1 if the count would overflow. 3715d1dd9a9Sraf */ 3725d1dd9a9Sraf static int 3735d1dd9a9Sraf spinners_incr(volatile uint32_t *lockword, uint8_t max_spinners) 3745d1dd9a9Sraf { 3755d1dd9a9Sraf uint32_t old; 3765d1dd9a9Sraf uint32_t new; 3775d1dd9a9Sraf 3785d1dd9a9Sraf do { 3795d1dd9a9Sraf old = *lockword; 3805d1dd9a9Sraf if (((old & SPINNERMASK) >> SPINNERSHIFT) >= max_spinners) 3815d1dd9a9Sraf return (-1); 3825d1dd9a9Sraf new = old + (1 << SPINNERSHIFT); 3835d1dd9a9Sraf } while (atomic_cas_32(lockword, old, new) != old); 3845d1dd9a9Sraf 3855d1dd9a9Sraf return (0); 3865d1dd9a9Sraf } 3875d1dd9a9Sraf 3885d1dd9a9Sraf /* 3895d1dd9a9Sraf * Decrement the spinners count in the mutex lock word. 3905d1dd9a9Sraf * Return the new value of the lock word. 3915d1dd9a9Sraf */ 3925d1dd9a9Sraf static uint32_t 3935d1dd9a9Sraf spinners_decr(volatile uint32_t *lockword) 3945d1dd9a9Sraf { 3955d1dd9a9Sraf uint32_t old; 3965d1dd9a9Sraf uint32_t new; 3975d1dd9a9Sraf 3985d1dd9a9Sraf do { 3995d1dd9a9Sraf new = old = *lockword; 4005d1dd9a9Sraf if (new & SPINNERMASK) 4015d1dd9a9Sraf new -= (1 << SPINNERSHIFT); 4025d1dd9a9Sraf } while (atomic_cas_32(lockword, old, new) != old); 4035d1dd9a9Sraf 4045d1dd9a9Sraf return (new); 4055d1dd9a9Sraf } 4065d1dd9a9Sraf 4077c478bd9Sstevel@tonic-gate /* 4087c478bd9Sstevel@tonic-gate * Non-preemptive spin locks. Used by queue_lock(). 4097c478bd9Sstevel@tonic-gate * No lock statistics are gathered for these locks. 4105d1dd9a9Sraf * No DTrace probes are provided for these locks. 4117c478bd9Sstevel@tonic-gate */ 4127c478bd9Sstevel@tonic-gate void 4137c478bd9Sstevel@tonic-gate spin_lock_set(mutex_t *mp) 4147c478bd9Sstevel@tonic-gate { 4157c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 4167c478bd9Sstevel@tonic-gate 4177c478bd9Sstevel@tonic-gate no_preempt(self); 4187c478bd9Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 4197c478bd9Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 4207c478bd9Sstevel@tonic-gate return; 4217c478bd9Sstevel@tonic-gate } 4227c478bd9Sstevel@tonic-gate /* 4237c478bd9Sstevel@tonic-gate * Spin for a while, attempting to acquire the lock. 4247c478bd9Sstevel@tonic-gate */ 425d4204c85Sraf INCR32(self->ul_spin_lock_spin); 4267c478bd9Sstevel@tonic-gate if (mutex_queuelock_adaptive(mp) == 0 || 4277c478bd9Sstevel@tonic-gate set_lock_byte(&mp->mutex_lockw) == 0) { 4287c478bd9Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 4297c478bd9Sstevel@tonic-gate return; 4307c478bd9Sstevel@tonic-gate } 4317c478bd9Sstevel@tonic-gate /* 4327c478bd9Sstevel@tonic-gate * Try harder if we were previously at a no premption level. 4337c478bd9Sstevel@tonic-gate */ 4347c478bd9Sstevel@tonic-gate if (self->ul_preempt > 1) { 435d4204c85Sraf INCR32(self->ul_spin_lock_spin2); 4367c478bd9Sstevel@tonic-gate if (mutex_queuelock_adaptive(mp) == 0 || 4377c478bd9Sstevel@tonic-gate set_lock_byte(&mp->mutex_lockw) == 0) { 4387c478bd9Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 4397c478bd9Sstevel@tonic-gate return; 4407c478bd9Sstevel@tonic-gate } 4417c478bd9Sstevel@tonic-gate } 4427c478bd9Sstevel@tonic-gate /* 4437c478bd9Sstevel@tonic-gate * Give up and block in the kernel for the mutex. 4447c478bd9Sstevel@tonic-gate */ 445d4204c85Sraf INCR32(self->ul_spin_lock_sleep); 446*db94676fSRoger A. Faulkner (void) ___lwp_mutex_timedlock(mp, NULL, self); 4477c478bd9Sstevel@tonic-gate } 4487c478bd9Sstevel@tonic-gate 4497c478bd9Sstevel@tonic-gate void 4507c478bd9Sstevel@tonic-gate spin_lock_clear(mutex_t *mp) 4517c478bd9Sstevel@tonic-gate { 4527c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 4537c478bd9Sstevel@tonic-gate 4547c478bd9Sstevel@tonic-gate mp->mutex_owner = 0; 45541efec22Sraf if (atomic_swap_32(&mp->mutex_lockword, 0) & WAITERMASK) { 456883492d5Sraf (void) ___lwp_mutex_wakeup(mp, 0); 457d4204c85Sraf INCR32(self->ul_spin_lock_wakeup); 4587c478bd9Sstevel@tonic-gate } 4597c478bd9Sstevel@tonic-gate preempt(self); 4607c478bd9Sstevel@tonic-gate } 4617c478bd9Sstevel@tonic-gate 4627c478bd9Sstevel@tonic-gate /* 4637c478bd9Sstevel@tonic-gate * Allocate the sleep queue hash table. 4647c478bd9Sstevel@tonic-gate */ 4657c478bd9Sstevel@tonic-gate void 4667c478bd9Sstevel@tonic-gate queue_alloc(void) 4677c478bd9Sstevel@tonic-gate { 4687c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 4697c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 470d4204c85Sraf queue_head_t *qp; 4717c478bd9Sstevel@tonic-gate void *data; 4727c478bd9Sstevel@tonic-gate int i; 4737c478bd9Sstevel@tonic-gate 4747c478bd9Sstevel@tonic-gate /* 4757c478bd9Sstevel@tonic-gate * No locks are needed; we call here only when single-threaded. 4767c478bd9Sstevel@tonic-gate */ 4777c478bd9Sstevel@tonic-gate ASSERT(self == udp->ulwp_one); 4787c478bd9Sstevel@tonic-gate ASSERT(!udp->uberflags.uf_mt); 4798cd45542Sraf if ((data = mmap(NULL, 2 * QHASHSIZE * sizeof (queue_head_t), 4807c478bd9Sstevel@tonic-gate PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0)) 4817c478bd9Sstevel@tonic-gate == MAP_FAILED) 4827c478bd9Sstevel@tonic-gate thr_panic("cannot allocate thread queue_head table"); 483d4204c85Sraf udp->queue_head = qp = (queue_head_t *)data; 484d4204c85Sraf for (i = 0; i < 2 * QHASHSIZE; qp++, i++) { 485d4204c85Sraf qp->qh_type = (i < QHASHSIZE)? MX : CV; 486d4204c85Sraf qp->qh_lock.mutex_flag = LOCK_INITED; 487d4204c85Sraf qp->qh_lock.mutex_magic = MUTEX_MAGIC; 488d4204c85Sraf qp->qh_hlist = &qp->qh_def_root; 489d4204c85Sraf #if defined(THREAD_DEBUG) 490d4204c85Sraf qp->qh_hlen = 1; 491d4204c85Sraf qp->qh_hmax = 1; 492d4204c85Sraf #endif 493883492d5Sraf } 4947c478bd9Sstevel@tonic-gate } 4957c478bd9Sstevel@tonic-gate 4967c478bd9Sstevel@tonic-gate #if defined(THREAD_DEBUG) 4977c478bd9Sstevel@tonic-gate 4987c478bd9Sstevel@tonic-gate /* 4997c478bd9Sstevel@tonic-gate * Debugging: verify correctness of a sleep queue. 5007c478bd9Sstevel@tonic-gate */ 5017c478bd9Sstevel@tonic-gate void 5027c478bd9Sstevel@tonic-gate QVERIFY(queue_head_t *qp) 5037c478bd9Sstevel@tonic-gate { 5047c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 5057c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 506d4204c85Sraf queue_root_t *qrp; 5077c478bd9Sstevel@tonic-gate ulwp_t *ulwp; 5087c478bd9Sstevel@tonic-gate ulwp_t *prev; 5097c478bd9Sstevel@tonic-gate uint_t index; 510d4204c85Sraf uint32_t cnt; 5117c478bd9Sstevel@tonic-gate char qtype; 5127c478bd9Sstevel@tonic-gate void *wchan; 5137c478bd9Sstevel@tonic-gate 5147c478bd9Sstevel@tonic-gate ASSERT(qp >= udp->queue_head && (qp - udp->queue_head) < 2 * QHASHSIZE); 5157c478bd9Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, self)); 516d4204c85Sraf for (cnt = 0, qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next) { 517d4204c85Sraf cnt++; 518d4204c85Sraf ASSERT((qrp->qr_head != NULL && qrp->qr_tail != NULL) || 519d4204c85Sraf (qrp->qr_head == NULL && qrp->qr_tail == NULL)); 520d4204c85Sraf } 521d4204c85Sraf ASSERT(qp->qh_hlen == cnt && qp->qh_hmax >= cnt); 522d4204c85Sraf qtype = ((qp - udp->queue_head) < QHASHSIZE)? MX : CV; 523d4204c85Sraf ASSERT(qp->qh_type == qtype); 5247c478bd9Sstevel@tonic-gate if (!thread_queue_verify) 5257c478bd9Sstevel@tonic-gate return; 5267c478bd9Sstevel@tonic-gate /* real expensive stuff, only for _THREAD_QUEUE_VERIFY */ 527d4204c85Sraf for (cnt = 0, qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next) { 528d4204c85Sraf for (prev = NULL, ulwp = qrp->qr_head; ulwp != NULL; 529d4204c85Sraf prev = ulwp, ulwp = ulwp->ul_link) { 530d4204c85Sraf cnt++; 531d4204c85Sraf if (ulwp->ul_writer) 532d4204c85Sraf ASSERT(prev == NULL || prev->ul_writer); 533d4204c85Sraf ASSERT(ulwp->ul_qtype == qtype); 534d4204c85Sraf ASSERT(ulwp->ul_wchan != NULL); 535d4204c85Sraf ASSERT(ulwp->ul_sleepq == qp); 536d4204c85Sraf wchan = ulwp->ul_wchan; 537d4204c85Sraf ASSERT(qrp->qr_wchan == wchan); 538d4204c85Sraf index = QUEUE_HASH(wchan, qtype); 539d4204c85Sraf ASSERT(&udp->queue_head[index] == qp); 540d4204c85Sraf } 541d4204c85Sraf ASSERT(qrp->qr_tail == prev); 542d4204c85Sraf } 5437c478bd9Sstevel@tonic-gate ASSERT(qp->qh_qlen == cnt); 5447c478bd9Sstevel@tonic-gate } 5457c478bd9Sstevel@tonic-gate 5467c478bd9Sstevel@tonic-gate #else /* THREAD_DEBUG */ 5477c478bd9Sstevel@tonic-gate 5487c478bd9Sstevel@tonic-gate #define QVERIFY(qp) 5497c478bd9Sstevel@tonic-gate 5507c478bd9Sstevel@tonic-gate #endif /* THREAD_DEBUG */ 5517c478bd9Sstevel@tonic-gate 5527c478bd9Sstevel@tonic-gate /* 5537c478bd9Sstevel@tonic-gate * Acquire a queue head. 5547c478bd9Sstevel@tonic-gate */ 5557c478bd9Sstevel@tonic-gate queue_head_t * 5567c478bd9Sstevel@tonic-gate queue_lock(void *wchan, int qtype) 5577c478bd9Sstevel@tonic-gate { 5587c478bd9Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 5597c478bd9Sstevel@tonic-gate queue_head_t *qp; 560d4204c85Sraf queue_root_t *qrp; 5617c478bd9Sstevel@tonic-gate 5627c478bd9Sstevel@tonic-gate ASSERT(qtype == MX || qtype == CV); 5637c478bd9Sstevel@tonic-gate 5647c478bd9Sstevel@tonic-gate /* 5657c478bd9Sstevel@tonic-gate * It is possible that we could be called while still single-threaded. 5667c478bd9Sstevel@tonic-gate * If so, we call queue_alloc() to allocate the queue_head[] array. 5677c478bd9Sstevel@tonic-gate */ 5687c478bd9Sstevel@tonic-gate if ((qp = udp->queue_head) == NULL) { 5697c478bd9Sstevel@tonic-gate queue_alloc(); 5707c478bd9Sstevel@tonic-gate qp = udp->queue_head; 5717c478bd9Sstevel@tonic-gate } 5727c478bd9Sstevel@tonic-gate qp += QUEUE_HASH(wchan, qtype); 5737c478bd9Sstevel@tonic-gate spin_lock_set(&qp->qh_lock); 574d4204c85Sraf for (qrp = qp->qh_hlist; qrp != NULL; qrp = qrp->qr_next) 575d4204c85Sraf if (qrp->qr_wchan == wchan) 576d4204c85Sraf break; 577d4204c85Sraf if (qrp == NULL && qp->qh_def_root.qr_head == NULL) { 578d4204c85Sraf /* the default queue root is available; use it */ 579d4204c85Sraf qrp = &qp->qh_def_root; 580d4204c85Sraf qrp->qr_wchan = wchan; 581d4204c85Sraf ASSERT(qrp->qr_next == NULL); 582d4204c85Sraf ASSERT(qrp->qr_tail == NULL && 583d4204c85Sraf qrp->qr_rtcount == 0 && qrp->qr_qlen == 0); 584d4204c85Sraf } 585d4204c85Sraf qp->qh_wchan = wchan; /* valid until queue_unlock() is called */ 586d4204c85Sraf qp->qh_root = qrp; /* valid until queue_unlock() is called */ 587d4204c85Sraf INCR32(qp->qh_lockcount); 5887c478bd9Sstevel@tonic-gate QVERIFY(qp); 5897c478bd9Sstevel@tonic-gate return (qp); 5907c478bd9Sstevel@tonic-gate } 5917c478bd9Sstevel@tonic-gate 5927c478bd9Sstevel@tonic-gate /* 5937c478bd9Sstevel@tonic-gate * Release a queue head. 5947c478bd9Sstevel@tonic-gate */ 5957c478bd9Sstevel@tonic-gate void 5967c478bd9Sstevel@tonic-gate queue_unlock(queue_head_t *qp) 5977c478bd9Sstevel@tonic-gate { 5987c478bd9Sstevel@tonic-gate QVERIFY(qp); 5997c478bd9Sstevel@tonic-gate spin_lock_clear(&qp->qh_lock); 6007c478bd9Sstevel@tonic-gate } 6017c478bd9Sstevel@tonic-gate 6027c478bd9Sstevel@tonic-gate /* 6037c478bd9Sstevel@tonic-gate * For rwlock queueing, we must queue writers ahead of readers of the 6047c478bd9Sstevel@tonic-gate * same priority. We do this by making writers appear to have a half 6057c478bd9Sstevel@tonic-gate * point higher priority for purposes of priority comparisons below. 6067c478bd9Sstevel@tonic-gate */ 6077c478bd9Sstevel@tonic-gate #define CMP_PRIO(ulwp) ((real_priority(ulwp) << 1) + (ulwp)->ul_writer) 6087c478bd9Sstevel@tonic-gate 6097c478bd9Sstevel@tonic-gate void 610d4204c85Sraf enqueue(queue_head_t *qp, ulwp_t *ulwp, int force_fifo) 6117c478bd9Sstevel@tonic-gate { 612d4204c85Sraf queue_root_t *qrp; 6137c478bd9Sstevel@tonic-gate ulwp_t **ulwpp; 6147c478bd9Sstevel@tonic-gate ulwp_t *next; 6157c478bd9Sstevel@tonic-gate int pri = CMP_PRIO(ulwp); 6167c478bd9Sstevel@tonic-gate 6177c478bd9Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread)); 6187c478bd9Sstevel@tonic-gate ASSERT(ulwp->ul_sleepq != qp); 6197c478bd9Sstevel@tonic-gate 620d4204c85Sraf if ((qrp = qp->qh_root) == NULL) { 621d4204c85Sraf /* use the thread's queue root for the linkage */ 622d4204c85Sraf qrp = &ulwp->ul_queue_root; 623d4204c85Sraf qrp->qr_next = qp->qh_hlist; 624d4204c85Sraf qrp->qr_prev = NULL; 625d4204c85Sraf qrp->qr_head = NULL; 626d4204c85Sraf qrp->qr_tail = NULL; 627d4204c85Sraf qrp->qr_wchan = qp->qh_wchan; 628d4204c85Sraf qrp->qr_rtcount = 0; 629d4204c85Sraf qrp->qr_qlen = 0; 630d4204c85Sraf qrp->qr_qmax = 0; 631d4204c85Sraf qp->qh_hlist->qr_prev = qrp; 632d4204c85Sraf qp->qh_hlist = qrp; 633d4204c85Sraf qp->qh_root = qrp; 634d4204c85Sraf MAXINCR(qp->qh_hmax, qp->qh_hlen); 635d4204c85Sraf } 636d4204c85Sraf 6377c478bd9Sstevel@tonic-gate /* 6387c478bd9Sstevel@tonic-gate * LIFO queue ordering is unfair and can lead to starvation, 6397c478bd9Sstevel@tonic-gate * but it gives better performance for heavily contended locks. 6407c478bd9Sstevel@tonic-gate * We use thread_queue_fifo (range is 0..8) to determine 6417c478bd9Sstevel@tonic-gate * the frequency of FIFO vs LIFO queuing: 6427c478bd9Sstevel@tonic-gate * 0 : every 256th time (almost always LIFO) 6437c478bd9Sstevel@tonic-gate * 1 : every 128th time 6447c478bd9Sstevel@tonic-gate * 2 : every 64th time 6457c478bd9Sstevel@tonic-gate * 3 : every 32nd time 6467c478bd9Sstevel@tonic-gate * 4 : every 16th time (the default value, mostly LIFO) 6477c478bd9Sstevel@tonic-gate * 5 : every 8th time 6487c478bd9Sstevel@tonic-gate * 6 : every 4th time 6497c478bd9Sstevel@tonic-gate * 7 : every 2nd time 6507c478bd9Sstevel@tonic-gate * 8 : every time (never LIFO, always FIFO) 6517c478bd9Sstevel@tonic-gate * Note that there is always some degree of FIFO ordering. 6527c478bd9Sstevel@tonic-gate * This breaks live lock conditions that occur in applications 6537c478bd9Sstevel@tonic-gate * that are written assuming (incorrectly) that threads acquire 6547c478bd9Sstevel@tonic-gate * locks fairly, that is, in roughly round-robin order. 655d4204c85Sraf * In any event, the queue is maintained in kernel priority order. 6567c478bd9Sstevel@tonic-gate * 657d4204c85Sraf * If force_fifo is non-zero, fifo queueing is forced. 6587c478bd9Sstevel@tonic-gate * SUSV3 requires this for semaphores. 6597c478bd9Sstevel@tonic-gate */ 660d4204c85Sraf if (qrp->qr_head == NULL) { 6617c478bd9Sstevel@tonic-gate /* 6627c478bd9Sstevel@tonic-gate * The queue is empty. LIFO/FIFO doesn't matter. 6637c478bd9Sstevel@tonic-gate */ 664d4204c85Sraf ASSERT(qrp->qr_tail == NULL); 665d4204c85Sraf ulwpp = &qrp->qr_head; 666d4204c85Sraf } else if (force_fifo | 667d4204c85Sraf (((++qp->qh_qcnt << curthread->ul_queue_fifo) & 0xff) == 0)) { 6687c478bd9Sstevel@tonic-gate /* 6697c478bd9Sstevel@tonic-gate * Enqueue after the last thread whose priority is greater 6707c478bd9Sstevel@tonic-gate * than or equal to the priority of the thread being queued. 6717c478bd9Sstevel@tonic-gate * Attempt first to go directly onto the tail of the queue. 6727c478bd9Sstevel@tonic-gate */ 673d4204c85Sraf if (pri <= CMP_PRIO(qrp->qr_tail)) 674d4204c85Sraf ulwpp = &qrp->qr_tail->ul_link; 6757c478bd9Sstevel@tonic-gate else { 676d4204c85Sraf for (ulwpp = &qrp->qr_head; (next = *ulwpp) != NULL; 6777c478bd9Sstevel@tonic-gate ulwpp = &next->ul_link) 6787c478bd9Sstevel@tonic-gate if (pri > CMP_PRIO(next)) 6797c478bd9Sstevel@tonic-gate break; 6807c478bd9Sstevel@tonic-gate } 6817c478bd9Sstevel@tonic-gate } else { 6827c478bd9Sstevel@tonic-gate /* 6837c478bd9Sstevel@tonic-gate * Enqueue before the first thread whose priority is less 6847c478bd9Sstevel@tonic-gate * than or equal to the priority of the thread being queued. 6857c478bd9Sstevel@tonic-gate * Hopefully we can go directly onto the head of the queue. 6867c478bd9Sstevel@tonic-gate */ 687d4204c85Sraf for (ulwpp = &qrp->qr_head; (next = *ulwpp) != NULL; 6887c478bd9Sstevel@tonic-gate ulwpp = &next->ul_link) 6897c478bd9Sstevel@tonic-gate if (pri >= CMP_PRIO(next)) 6907c478bd9Sstevel@tonic-gate break; 6917c478bd9Sstevel@tonic-gate } 6927c478bd9Sstevel@tonic-gate if ((ulwp->ul_link = *ulwpp) == NULL) 693d4204c85Sraf qrp->qr_tail = ulwp; 6947c478bd9Sstevel@tonic-gate *ulwpp = ulwp; 6957c478bd9Sstevel@tonic-gate 6967c478bd9Sstevel@tonic-gate ulwp->ul_sleepq = qp; 697d4204c85Sraf ulwp->ul_wchan = qp->qh_wchan; 698d4204c85Sraf ulwp->ul_qtype = qp->qh_type; 699d4204c85Sraf if ((ulwp->ul_schedctl != NULL && 700d4204c85Sraf ulwp->ul_schedctl->sc_cid == ulwp->ul_rtclassid) | 701d4204c85Sraf ulwp->ul_pilocks) { 702d4204c85Sraf ulwp->ul_rtqueued = 1; 703d4204c85Sraf qrp->qr_rtcount++; 704d4204c85Sraf } 705d4204c85Sraf MAXINCR(qrp->qr_qmax, qrp->qr_qlen); 706d4204c85Sraf MAXINCR(qp->qh_qmax, qp->qh_qlen); 7077c478bd9Sstevel@tonic-gate } 7087c478bd9Sstevel@tonic-gate 7097c478bd9Sstevel@tonic-gate /* 710d4204c85Sraf * Helper function for queue_slot() and queue_slot_rt(). 711d4204c85Sraf * Try to find a non-suspended thread on the queue. 7127c478bd9Sstevel@tonic-gate */ 7137c478bd9Sstevel@tonic-gate static ulwp_t ** 714d4204c85Sraf queue_slot_runnable(ulwp_t **ulwpp, ulwp_t **prevp, int rt) 7157c478bd9Sstevel@tonic-gate { 7167c478bd9Sstevel@tonic-gate ulwp_t *ulwp; 717d4204c85Sraf ulwp_t **foundpp = NULL; 718d4204c85Sraf int priority = -1; 719d4204c85Sraf ulwp_t *prev; 720d4204c85Sraf int tpri; 7217c478bd9Sstevel@tonic-gate 722d4204c85Sraf for (prev = NULL; 723d4204c85Sraf (ulwp = *ulwpp) != NULL; 7247c478bd9Sstevel@tonic-gate prev = ulwp, ulwpp = &ulwp->ul_link) { 725d4204c85Sraf if (ulwp->ul_stop) /* skip suspended threads */ 726d4204c85Sraf continue; 727d4204c85Sraf tpri = rt? CMP_PRIO(ulwp) : 0; 728d4204c85Sraf if (tpri > priority) { 729d4204c85Sraf foundpp = ulwpp; 730d4204c85Sraf *prevp = prev; 731d4204c85Sraf priority = tpri; 732d4204c85Sraf if (!rt) 7337c478bd9Sstevel@tonic-gate break; 7347c478bd9Sstevel@tonic-gate } 7357c478bd9Sstevel@tonic-gate } 736d4204c85Sraf return (foundpp); 737d4204c85Sraf } 738d4204c85Sraf 739d4204c85Sraf /* 740d4204c85Sraf * For real-time, we search the entire queue because the dispatch 741d4204c85Sraf * (kernel) priorities may have changed since enqueueing. 742d4204c85Sraf */ 743d4204c85Sraf static ulwp_t ** 744d4204c85Sraf queue_slot_rt(ulwp_t **ulwpp_org, ulwp_t **prevp) 745d4204c85Sraf { 746d4204c85Sraf ulwp_t **ulwpp = ulwpp_org; 747d4204c85Sraf ulwp_t *ulwp = *ulwpp; 748d4204c85Sraf ulwp_t **foundpp = ulwpp; 749d4204c85Sraf int priority = CMP_PRIO(ulwp); 750d4204c85Sraf ulwp_t *prev; 751d4204c85Sraf int tpri; 7527c478bd9Sstevel@tonic-gate 753d4204c85Sraf for (prev = ulwp, ulwpp = &ulwp->ul_link; 754d4204c85Sraf (ulwp = *ulwpp) != NULL; 755d4204c85Sraf prev = ulwp, ulwpp = &ulwp->ul_link) { 756d4204c85Sraf tpri = CMP_PRIO(ulwp); 757d4204c85Sraf if (tpri > priority) { 758d4204c85Sraf foundpp = ulwpp; 759d4204c85Sraf *prevp = prev; 760d4204c85Sraf priority = tpri; 761d4204c85Sraf } 7627c478bd9Sstevel@tonic-gate } 763d4204c85Sraf ulwp = *foundpp; 764d4204c85Sraf 765d4204c85Sraf /* 766d4204c85Sraf * Try not to return a suspended thread. 767d4204c85Sraf * This mimics the old libthread's behavior. 768d4204c85Sraf */ 769d4204c85Sraf if (ulwp->ul_stop && 770d4204c85Sraf (ulwpp = queue_slot_runnable(ulwpp_org, prevp, 1)) != NULL) { 771d4204c85Sraf foundpp = ulwpp; 772d4204c85Sraf ulwp = *foundpp; 7737c478bd9Sstevel@tonic-gate } 774d4204c85Sraf ulwp->ul_rt = 1; 775d4204c85Sraf return (foundpp); 776d4204c85Sraf } 7777c478bd9Sstevel@tonic-gate 778d4204c85Sraf ulwp_t ** 779d4204c85Sraf queue_slot(queue_head_t *qp, ulwp_t **prevp, int *more) 780d4204c85Sraf { 781d4204c85Sraf queue_root_t *qrp; 782d4204c85Sraf ulwp_t **ulwpp; 783d4204c85Sraf ulwp_t *ulwp; 784d4204c85Sraf int rt; 7857c478bd9Sstevel@tonic-gate 786d4204c85Sraf ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread)); 787d4204c85Sraf 788d4204c85Sraf if ((qrp = qp->qh_root) == NULL || (ulwp = qrp->qr_head) == NULL) { 789d4204c85Sraf *more = 0; 790d4204c85Sraf return (NULL); /* no lwps on the queue */ 791d4204c85Sraf } 792d4204c85Sraf rt = (qrp->qr_rtcount != 0); 793d4204c85Sraf *prevp = NULL; 794d4204c85Sraf if (ulwp->ul_link == NULL) { /* only one lwp on the queue */ 795d4204c85Sraf *more = 0; 796d4204c85Sraf ulwp->ul_rt = rt; 797d4204c85Sraf return (&qrp->qr_head); 798d4204c85Sraf } 799d4204c85Sraf *more = 1; 800d4204c85Sraf 801d4204c85Sraf if (rt) /* real-time queue */ 802d4204c85Sraf return (queue_slot_rt(&qrp->qr_head, prevp)); 8037c478bd9Sstevel@tonic-gate /* 804d4204c85Sraf * Try not to return a suspended thread. 805d4204c85Sraf * This mimics the old libthread's behavior. 8067c478bd9Sstevel@tonic-gate */ 807d4204c85Sraf if (ulwp->ul_stop && 808d4204c85Sraf (ulwpp = queue_slot_runnable(&qrp->qr_head, prevp, 0)) != NULL) { 809d4204c85Sraf ulwp = *ulwpp; 810d4204c85Sraf ulwp->ul_rt = 0; 8117c478bd9Sstevel@tonic-gate return (ulwpp); 8127c478bd9Sstevel@tonic-gate } 813d4204c85Sraf /* 814d4204c85Sraf * The common case; just pick the first thread on the queue. 815d4204c85Sraf */ 816d4204c85Sraf ulwp->ul_rt = 0; 817d4204c85Sraf return (&qrp->qr_head); 8187c478bd9Sstevel@tonic-gate } 8197c478bd9Sstevel@tonic-gate 820d4204c85Sraf /* 821d4204c85Sraf * Common code for unlinking an lwp from a user-level sleep queue. 822d4204c85Sraf */ 823d4204c85Sraf void 82441efec22Sraf queue_unlink(queue_head_t *qp, ulwp_t **ulwpp, ulwp_t *prev) 8257c478bd9Sstevel@tonic-gate { 826d4204c85Sraf queue_root_t *qrp = qp->qh_root; 827d4204c85Sraf queue_root_t *nqrp; 828d4204c85Sraf ulwp_t *ulwp = *ulwpp; 829d4204c85Sraf ulwp_t *next; 8307c478bd9Sstevel@tonic-gate 831d4204c85Sraf ASSERT(MUTEX_OWNED(&qp->qh_lock, curthread)); 832d4204c85Sraf ASSERT(qp->qh_wchan != NULL && ulwp->ul_wchan == qp->qh_wchan); 8337c478bd9Sstevel@tonic-gate 834d4204c85Sraf DECR(qp->qh_qlen); 835d4204c85Sraf DECR(qrp->qr_qlen); 836d4204c85Sraf if (ulwp->ul_rtqueued) { 837d4204c85Sraf ulwp->ul_rtqueued = 0; 838d4204c85Sraf qrp->qr_rtcount--; 839d4204c85Sraf } 840d4204c85Sraf next = ulwp->ul_link; 841d4204c85Sraf *ulwpp = next; 842d4204c85Sraf ulwp->ul_link = NULL; 843d4204c85Sraf if (qrp->qr_tail == ulwp) 844d4204c85Sraf qrp->qr_tail = prev; 845d4204c85Sraf if (qrp == &ulwp->ul_queue_root) { 846d4204c85Sraf /* 847d4204c85Sraf * We can't continue to use the unlinked thread's 848d4204c85Sraf * queue root for the linkage. 849d4204c85Sraf */ 850d4204c85Sraf queue_root_t *qr_next = qrp->qr_next; 851d4204c85Sraf queue_root_t *qr_prev = qrp->qr_prev; 852d4204c85Sraf 853d4204c85Sraf if (qrp->qr_tail) { 854d4204c85Sraf /* switch to using the last thread's queue root */ 855d4204c85Sraf ASSERT(qrp->qr_qlen != 0); 856d4204c85Sraf nqrp = &qrp->qr_tail->ul_queue_root; 857d4204c85Sraf *nqrp = *qrp; 858d4204c85Sraf if (qr_next) 859d4204c85Sraf qr_next->qr_prev = nqrp; 860d4204c85Sraf if (qr_prev) 861d4204c85Sraf qr_prev->qr_next = nqrp; 862d4204c85Sraf else 863d4204c85Sraf qp->qh_hlist = nqrp; 864d4204c85Sraf qp->qh_root = nqrp; 865d4204c85Sraf } else { 866d4204c85Sraf /* empty queue root; just delete from the hash list */ 867d4204c85Sraf ASSERT(qrp->qr_qlen == 0); 868d4204c85Sraf if (qr_next) 869d4204c85Sraf qr_next->qr_prev = qr_prev; 870d4204c85Sraf if (qr_prev) 871d4204c85Sraf qr_prev->qr_next = qr_next; 872d4204c85Sraf else 873d4204c85Sraf qp->qh_hlist = qr_next; 874d4204c85Sraf qp->qh_root = NULL; 875d4204c85Sraf DECR(qp->qh_hlen); 876d4204c85Sraf } 877d4204c85Sraf } 8787c478bd9Sstevel@tonic-gate } 8797c478bd9Sstevel@tonic-gate 88041efec22Sraf ulwp_t * 881d4204c85Sraf dequeue(queue_head_t *qp, int *more) 88241efec22Sraf { 88341efec22Sraf ulwp_t **ulwpp; 884d4204c85Sraf ulwp_t *ulwp; 88541efec22Sraf ulwp_t *prev; 88641efec22Sraf 887d4204c85Sraf if ((ulwpp = queue_slot(qp, &prev, more)) == NULL) 88841efec22Sraf return (NULL); 889d4204c85Sraf ulwp = *ulwpp; 890d4204c85Sraf queue_unlink(qp, ulwpp, prev); 891d4204c85Sraf ulwp->ul_sleepq = NULL; 892d4204c85Sraf ulwp->ul_wchan = NULL; 893d4204c85Sraf return (ulwp); 89441efec22Sraf } 89541efec22Sraf 8967c478bd9Sstevel@tonic-gate /* 8977c478bd9Sstevel@tonic-gate * Return a pointer to the highest priority thread sleeping on wchan. 8987c478bd9Sstevel@tonic-gate */ 8997c478bd9Sstevel@tonic-gate ulwp_t * 900d4204c85Sraf queue_waiter(queue_head_t *qp) 9017c478bd9Sstevel@tonic-gate { 9027c478bd9Sstevel@tonic-gate ulwp_t **ulwpp; 903d4204c85Sraf ulwp_t *prev; 904d4204c85Sraf int more; 9057c478bd9Sstevel@tonic-gate 906d4204c85Sraf if ((ulwpp = queue_slot(qp, &prev, &more)) == NULL) 9077c478bd9Sstevel@tonic-gate return (NULL); 9087c478bd9Sstevel@tonic-gate return (*ulwpp); 9097c478bd9Sstevel@tonic-gate } 9107c478bd9Sstevel@tonic-gate 911d4204c85Sraf int 912d4204c85Sraf dequeue_self(queue_head_t *qp) 9137c478bd9Sstevel@tonic-gate { 9147c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 915d4204c85Sraf queue_root_t *qrp; 9167c478bd9Sstevel@tonic-gate ulwp_t **ulwpp; 9177c478bd9Sstevel@tonic-gate ulwp_t *ulwp; 918d4204c85Sraf ulwp_t *prev; 9197c478bd9Sstevel@tonic-gate int found = 0; 9207c478bd9Sstevel@tonic-gate 9217c478bd9Sstevel@tonic-gate ASSERT(MUTEX_OWNED(&qp->qh_lock, self)); 9227c478bd9Sstevel@tonic-gate 9237c478bd9Sstevel@tonic-gate /* find self on the sleep queue */ 924d4204c85Sraf if ((qrp = qp->qh_root) != NULL) { 925d4204c85Sraf for (prev = NULL, ulwpp = &qrp->qr_head; 926d4204c85Sraf (ulwp = *ulwpp) != NULL; 927d4204c85Sraf prev = ulwp, ulwpp = &ulwp->ul_link) { 928d4204c85Sraf if (ulwp == self) { 929d4204c85Sraf queue_unlink(qp, ulwpp, prev); 930d4204c85Sraf self->ul_cvmutex = NULL; 931d4204c85Sraf self->ul_sleepq = NULL; 932d4204c85Sraf self->ul_wchan = NULL; 933d4204c85Sraf found = 1; 934d4204c85Sraf break; 935d4204c85Sraf } 9367c478bd9Sstevel@tonic-gate } 9377c478bd9Sstevel@tonic-gate } 9387c478bd9Sstevel@tonic-gate 9397c478bd9Sstevel@tonic-gate if (!found) 9407c478bd9Sstevel@tonic-gate thr_panic("dequeue_self(): curthread not found on queue"); 9417c478bd9Sstevel@tonic-gate 942d4204c85Sraf return ((qrp = qp->qh_root) != NULL && qrp->qr_head != NULL); 9437c478bd9Sstevel@tonic-gate } 9447c478bd9Sstevel@tonic-gate 9457c478bd9Sstevel@tonic-gate /* 9467c478bd9Sstevel@tonic-gate * Called from call_user_handler() and _thrp_suspend() to take 9477c478bd9Sstevel@tonic-gate * ourself off of our sleep queue so we can grab locks. 9487c478bd9Sstevel@tonic-gate */ 9497c478bd9Sstevel@tonic-gate void 9507c478bd9Sstevel@tonic-gate unsleep_self(void) 9517c478bd9Sstevel@tonic-gate { 9527c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 9537c478bd9Sstevel@tonic-gate queue_head_t *qp; 9547c478bd9Sstevel@tonic-gate 9557c478bd9Sstevel@tonic-gate /* 9567c478bd9Sstevel@tonic-gate * Calling enter_critical()/exit_critical() here would lead 9577c478bd9Sstevel@tonic-gate * to recursion. Just manipulate self->ul_critical directly. 9587c478bd9Sstevel@tonic-gate */ 9597c478bd9Sstevel@tonic-gate self->ul_critical++; 9607c478bd9Sstevel@tonic-gate while (self->ul_sleepq != NULL) { 9617c478bd9Sstevel@tonic-gate qp = queue_lock(self->ul_wchan, self->ul_qtype); 9627c478bd9Sstevel@tonic-gate /* 9637c478bd9Sstevel@tonic-gate * We may have been moved from a CV queue to a 9647c478bd9Sstevel@tonic-gate * mutex queue while we were attempting queue_lock(). 9657c478bd9Sstevel@tonic-gate * If so, just loop around and try again. 9667c478bd9Sstevel@tonic-gate * dequeue_self() clears self->ul_sleepq. 9677c478bd9Sstevel@tonic-gate */ 968d4204c85Sraf if (qp == self->ul_sleepq) 969d4204c85Sraf (void) dequeue_self(qp); 9707c478bd9Sstevel@tonic-gate queue_unlock(qp); 9717c478bd9Sstevel@tonic-gate } 972d4204c85Sraf self->ul_writer = 0; 9737c478bd9Sstevel@tonic-gate self->ul_critical--; 9747c478bd9Sstevel@tonic-gate } 9757c478bd9Sstevel@tonic-gate 9767c478bd9Sstevel@tonic-gate /* 9777c478bd9Sstevel@tonic-gate * Common code for calling the the ___lwp_mutex_timedlock() system call. 9787c478bd9Sstevel@tonic-gate * Returns with mutex_owner and mutex_ownerpid set correctly. 9797c478bd9Sstevel@tonic-gate */ 980883492d5Sraf static int 9817c478bd9Sstevel@tonic-gate mutex_lock_kernel(mutex_t *mp, timespec_t *tsp, tdb_mutex_stats_t *msp) 9827c478bd9Sstevel@tonic-gate { 9837c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 9847c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 985883492d5Sraf int mtype = mp->mutex_type; 9867c478bd9Sstevel@tonic-gate hrtime_t begin_sleep; 987883492d5Sraf int acquired; 9887c478bd9Sstevel@tonic-gate int error; 9897c478bd9Sstevel@tonic-gate 9907c478bd9Sstevel@tonic-gate self->ul_sp = stkptr(); 9917c478bd9Sstevel@tonic-gate self->ul_wchan = mp; 9927c478bd9Sstevel@tonic-gate if (__td_event_report(self, TD_SLEEP, udp)) { 9937c478bd9Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_SLEEP; 9947c478bd9Sstevel@tonic-gate self->ul_td_evbuf.eventdata = mp; 9957c478bd9Sstevel@tonic-gate tdb_event(TD_SLEEP, udp); 9967c478bd9Sstevel@tonic-gate } 9977c478bd9Sstevel@tonic-gate if (msp) { 9987c478bd9Sstevel@tonic-gate tdb_incr(msp->mutex_sleep); 9997c478bd9Sstevel@tonic-gate begin_sleep = gethrtime(); 10007c478bd9Sstevel@tonic-gate } 10017c478bd9Sstevel@tonic-gate 10027c478bd9Sstevel@tonic-gate DTRACE_PROBE1(plockstat, mutex__block, mp); 10037c478bd9Sstevel@tonic-gate 10047c478bd9Sstevel@tonic-gate for (;;) { 1005883492d5Sraf /* 1006883492d5Sraf * A return value of EOWNERDEAD or ELOCKUNMAPPED 1007883492d5Sraf * means we successfully acquired the lock. 1008883492d5Sraf */ 1009*db94676fSRoger A. Faulkner if ((error = ___lwp_mutex_timedlock(mp, tsp, self)) != 0 && 1010883492d5Sraf error != EOWNERDEAD && error != ELOCKUNMAPPED) { 1011883492d5Sraf acquired = 0; 10127c478bd9Sstevel@tonic-gate break; 10137c478bd9Sstevel@tonic-gate } 10147c478bd9Sstevel@tonic-gate 1015883492d5Sraf if (mtype & USYNC_PROCESS) { 10167c478bd9Sstevel@tonic-gate /* 10177c478bd9Sstevel@tonic-gate * Defend against forkall(). We may be the child, 10187c478bd9Sstevel@tonic-gate * in which case we don't actually own the mutex. 10197c478bd9Sstevel@tonic-gate */ 10207c478bd9Sstevel@tonic-gate enter_critical(self); 10217c478bd9Sstevel@tonic-gate if (mp->mutex_ownerpid == udp->pid) { 10227c478bd9Sstevel@tonic-gate exit_critical(self); 1023883492d5Sraf acquired = 1; 10247c478bd9Sstevel@tonic-gate break; 10257c478bd9Sstevel@tonic-gate } 10267c478bd9Sstevel@tonic-gate exit_critical(self); 10277c478bd9Sstevel@tonic-gate } else { 1028883492d5Sraf acquired = 1; 10297c478bd9Sstevel@tonic-gate break; 10307c478bd9Sstevel@tonic-gate } 10317c478bd9Sstevel@tonic-gate } 1032328cc3e9SRoger A. Faulkner 10337c478bd9Sstevel@tonic-gate if (msp) 10347c478bd9Sstevel@tonic-gate msp->mutex_sleep_time += gethrtime() - begin_sleep; 10357c478bd9Sstevel@tonic-gate self->ul_wchan = NULL; 10367c478bd9Sstevel@tonic-gate self->ul_sp = 0; 10377c478bd9Sstevel@tonic-gate 1038883492d5Sraf if (acquired) { 1039*db94676fSRoger A. Faulkner ASSERT(mp->mutex_owner == (uintptr_t)self); 1040883492d5Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1); 1041883492d5Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 1042883492d5Sraf } else { 1043883492d5Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0); 1044883492d5Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 1045883492d5Sraf } 1046883492d5Sraf 10477c478bd9Sstevel@tonic-gate return (error); 10487c478bd9Sstevel@tonic-gate } 10497c478bd9Sstevel@tonic-gate 10507c478bd9Sstevel@tonic-gate /* 10517c478bd9Sstevel@tonic-gate * Common code for calling the ___lwp_mutex_trylock() system call. 10527c478bd9Sstevel@tonic-gate * Returns with mutex_owner and mutex_ownerpid set correctly. 10537c478bd9Sstevel@tonic-gate */ 10547c478bd9Sstevel@tonic-gate int 10557c478bd9Sstevel@tonic-gate mutex_trylock_kernel(mutex_t *mp) 10567c478bd9Sstevel@tonic-gate { 10577c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 10587c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 1059883492d5Sraf int mtype = mp->mutex_type; 10607c478bd9Sstevel@tonic-gate int error; 1061883492d5Sraf int acquired; 10627c478bd9Sstevel@tonic-gate 10637c478bd9Sstevel@tonic-gate for (;;) { 1064883492d5Sraf /* 1065883492d5Sraf * A return value of EOWNERDEAD or ELOCKUNMAPPED 1066883492d5Sraf * means we successfully acquired the lock. 1067883492d5Sraf */ 1068*db94676fSRoger A. Faulkner if ((error = ___lwp_mutex_trylock(mp, self)) != 0 && 1069883492d5Sraf error != EOWNERDEAD && error != ELOCKUNMAPPED) { 1070883492d5Sraf acquired = 0; 10717c478bd9Sstevel@tonic-gate break; 10727c478bd9Sstevel@tonic-gate } 10737c478bd9Sstevel@tonic-gate 1074883492d5Sraf if (mtype & USYNC_PROCESS) { 10757c478bd9Sstevel@tonic-gate /* 10767c478bd9Sstevel@tonic-gate * Defend against forkall(). We may be the child, 10777c478bd9Sstevel@tonic-gate * in which case we don't actually own the mutex. 10787c478bd9Sstevel@tonic-gate */ 10797c478bd9Sstevel@tonic-gate enter_critical(self); 10807c478bd9Sstevel@tonic-gate if (mp->mutex_ownerpid == udp->pid) { 10817c478bd9Sstevel@tonic-gate exit_critical(self); 1082883492d5Sraf acquired = 1; 10837c478bd9Sstevel@tonic-gate break; 10847c478bd9Sstevel@tonic-gate } 10857c478bd9Sstevel@tonic-gate exit_critical(self); 10867c478bd9Sstevel@tonic-gate } else { 1087883492d5Sraf acquired = 1; 10887c478bd9Sstevel@tonic-gate break; 10897c478bd9Sstevel@tonic-gate } 10907c478bd9Sstevel@tonic-gate } 10917c478bd9Sstevel@tonic-gate 1092883492d5Sraf if (acquired) { 1093*db94676fSRoger A. Faulkner ASSERT(mp->mutex_owner == (uintptr_t)self); 1094883492d5Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 1095883492d5Sraf } else if (error != EBUSY) { 1096883492d5Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 1097883492d5Sraf } 1098883492d5Sraf 10997c478bd9Sstevel@tonic-gate return (error); 11007c478bd9Sstevel@tonic-gate } 11017c478bd9Sstevel@tonic-gate 11027c478bd9Sstevel@tonic-gate volatile sc_shared_t * 11037c478bd9Sstevel@tonic-gate setup_schedctl(void) 11047c478bd9Sstevel@tonic-gate { 11057c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 11067c478bd9Sstevel@tonic-gate volatile sc_shared_t *scp; 11077c478bd9Sstevel@tonic-gate sc_shared_t *tmp; 11087c478bd9Sstevel@tonic-gate 11097c478bd9Sstevel@tonic-gate if ((scp = self->ul_schedctl) == NULL && /* no shared state yet */ 11107c478bd9Sstevel@tonic-gate !self->ul_vfork && /* not a child of vfork() */ 11117c478bd9Sstevel@tonic-gate !self->ul_schedctl_called) { /* haven't been called before */ 11127c478bd9Sstevel@tonic-gate enter_critical(self); 11137c478bd9Sstevel@tonic-gate self->ul_schedctl_called = &self->ul_uberdata->uberflags; 11147c478bd9Sstevel@tonic-gate if ((tmp = __schedctl()) != (sc_shared_t *)(-1)) 11157c478bd9Sstevel@tonic-gate self->ul_schedctl = scp = tmp; 11167c478bd9Sstevel@tonic-gate exit_critical(self); 11177c478bd9Sstevel@tonic-gate } 11187c478bd9Sstevel@tonic-gate /* 11197c478bd9Sstevel@tonic-gate * Unless the call to setup_schedctl() is surrounded 11207c478bd9Sstevel@tonic-gate * by enter_critical()/exit_critical(), the address 11217c478bd9Sstevel@tonic-gate * we are returning could be invalid due to a forkall() 11227c478bd9Sstevel@tonic-gate * having occurred in another thread. 11237c478bd9Sstevel@tonic-gate */ 11247c478bd9Sstevel@tonic-gate return (scp); 11257c478bd9Sstevel@tonic-gate } 11267c478bd9Sstevel@tonic-gate 11277c478bd9Sstevel@tonic-gate /* 11287c478bd9Sstevel@tonic-gate * Interfaces from libsched, incorporated into libc. 11297c478bd9Sstevel@tonic-gate * libsched.so.1 is now a filter library onto libc. 11307c478bd9Sstevel@tonic-gate */ 11317257d1b4Sraf #pragma weak schedctl_lookup = schedctl_init 11327c478bd9Sstevel@tonic-gate schedctl_t * 11337257d1b4Sraf schedctl_init(void) 11347c478bd9Sstevel@tonic-gate { 11357c478bd9Sstevel@tonic-gate volatile sc_shared_t *scp = setup_schedctl(); 11367c478bd9Sstevel@tonic-gate return ((scp == NULL)? NULL : (schedctl_t *)&scp->sc_preemptctl); 11377c478bd9Sstevel@tonic-gate } 11387c478bd9Sstevel@tonic-gate 11397c478bd9Sstevel@tonic-gate void 11407257d1b4Sraf schedctl_exit(void) 11417c478bd9Sstevel@tonic-gate { 11427c478bd9Sstevel@tonic-gate } 11437c478bd9Sstevel@tonic-gate 11447c478bd9Sstevel@tonic-gate /* 11457c478bd9Sstevel@tonic-gate * Contract private interface for java. 11467c478bd9Sstevel@tonic-gate * Set up the schedctl data if it doesn't exist yet. 11477c478bd9Sstevel@tonic-gate * Return a pointer to the pointer to the schedctl data. 11487c478bd9Sstevel@tonic-gate */ 11497c478bd9Sstevel@tonic-gate volatile sc_shared_t *volatile * 11507c478bd9Sstevel@tonic-gate _thr_schedctl(void) 11517c478bd9Sstevel@tonic-gate { 11527c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 11537c478bd9Sstevel@tonic-gate volatile sc_shared_t *volatile *ptr; 11547c478bd9Sstevel@tonic-gate 11557c478bd9Sstevel@tonic-gate if (self->ul_vfork) 11567c478bd9Sstevel@tonic-gate return (NULL); 11577c478bd9Sstevel@tonic-gate if (*(ptr = &self->ul_schedctl) == NULL) 11587c478bd9Sstevel@tonic-gate (void) setup_schedctl(); 11597c478bd9Sstevel@tonic-gate return (ptr); 11607c478bd9Sstevel@tonic-gate } 11617c478bd9Sstevel@tonic-gate 11627c478bd9Sstevel@tonic-gate /* 11637c478bd9Sstevel@tonic-gate * Block signals and attempt to block preemption. 11647c478bd9Sstevel@tonic-gate * no_preempt()/preempt() must be used in pairs but can be nested. 11657c478bd9Sstevel@tonic-gate */ 11667c478bd9Sstevel@tonic-gate void 11677c478bd9Sstevel@tonic-gate no_preempt(ulwp_t *self) 11687c478bd9Sstevel@tonic-gate { 11697c478bd9Sstevel@tonic-gate volatile sc_shared_t *scp; 11707c478bd9Sstevel@tonic-gate 11717c478bd9Sstevel@tonic-gate if (self->ul_preempt++ == 0) { 11727c478bd9Sstevel@tonic-gate enter_critical(self); 11737c478bd9Sstevel@tonic-gate if ((scp = self->ul_schedctl) != NULL || 11747c478bd9Sstevel@tonic-gate (scp = setup_schedctl()) != NULL) { 11757c478bd9Sstevel@tonic-gate /* 11767c478bd9Sstevel@tonic-gate * Save the pre-existing preempt value. 11777c478bd9Sstevel@tonic-gate */ 11787c478bd9Sstevel@tonic-gate self->ul_savpreempt = scp->sc_preemptctl.sc_nopreempt; 11797c478bd9Sstevel@tonic-gate scp->sc_preemptctl.sc_nopreempt = 1; 11807c478bd9Sstevel@tonic-gate } 11817c478bd9Sstevel@tonic-gate } 11827c478bd9Sstevel@tonic-gate } 11837c478bd9Sstevel@tonic-gate 11847c478bd9Sstevel@tonic-gate /* 11857c478bd9Sstevel@tonic-gate * Undo the effects of no_preempt(). 11867c478bd9Sstevel@tonic-gate */ 11877c478bd9Sstevel@tonic-gate void 11887c478bd9Sstevel@tonic-gate preempt(ulwp_t *self) 11897c478bd9Sstevel@tonic-gate { 11907c478bd9Sstevel@tonic-gate volatile sc_shared_t *scp; 11917c478bd9Sstevel@tonic-gate 11927c478bd9Sstevel@tonic-gate ASSERT(self->ul_preempt > 0); 11937c478bd9Sstevel@tonic-gate if (--self->ul_preempt == 0) { 11947c478bd9Sstevel@tonic-gate if ((scp = self->ul_schedctl) != NULL) { 11957c478bd9Sstevel@tonic-gate /* 11967c478bd9Sstevel@tonic-gate * Restore the pre-existing preempt value. 11977c478bd9Sstevel@tonic-gate */ 11987c478bd9Sstevel@tonic-gate scp->sc_preemptctl.sc_nopreempt = self->ul_savpreempt; 11997c478bd9Sstevel@tonic-gate if (scp->sc_preemptctl.sc_yield && 12007c478bd9Sstevel@tonic-gate scp->sc_preemptctl.sc_nopreempt == 0) { 12018cd45542Sraf yield(); 12027c478bd9Sstevel@tonic-gate if (scp->sc_preemptctl.sc_yield) { 12037c478bd9Sstevel@tonic-gate /* 12047c478bd9Sstevel@tonic-gate * Shouldn't happen. This is either 12057c478bd9Sstevel@tonic-gate * a race condition or the thread 12067c478bd9Sstevel@tonic-gate * just entered the real-time class. 12077c478bd9Sstevel@tonic-gate */ 12088cd45542Sraf yield(); 12097c478bd9Sstevel@tonic-gate scp->sc_preemptctl.sc_yield = 0; 12107c478bd9Sstevel@tonic-gate } 12117c478bd9Sstevel@tonic-gate } 12127c478bd9Sstevel@tonic-gate } 12137c478bd9Sstevel@tonic-gate exit_critical(self); 12147c478bd9Sstevel@tonic-gate } 12157c478bd9Sstevel@tonic-gate } 12167c478bd9Sstevel@tonic-gate 12177c478bd9Sstevel@tonic-gate /* 12187c478bd9Sstevel@tonic-gate * If a call to preempt() would cause the current thread to yield or to 12197c478bd9Sstevel@tonic-gate * take deferred actions in exit_critical(), then unpark the specified 12207c478bd9Sstevel@tonic-gate * lwp so it can run while we delay. Return the original lwpid if the 12217c478bd9Sstevel@tonic-gate * unpark was not performed, else return zero. The tests are a repeat 12227c478bd9Sstevel@tonic-gate * of some of the tests in preempt(), above. This is a statistical 12237c478bd9Sstevel@tonic-gate * optimization solely for cond_sleep_queue(), below. 12247c478bd9Sstevel@tonic-gate */ 12257c478bd9Sstevel@tonic-gate static lwpid_t 12267c478bd9Sstevel@tonic-gate preempt_unpark(ulwp_t *self, lwpid_t lwpid) 12277c478bd9Sstevel@tonic-gate { 12287c478bd9Sstevel@tonic-gate volatile sc_shared_t *scp = self->ul_schedctl; 12297c478bd9Sstevel@tonic-gate 12307c478bd9Sstevel@tonic-gate ASSERT(self->ul_preempt == 1 && self->ul_critical > 0); 12317c478bd9Sstevel@tonic-gate if ((scp != NULL && scp->sc_preemptctl.sc_yield) || 12327c478bd9Sstevel@tonic-gate (self->ul_curplease && self->ul_critical == 1)) { 12337c478bd9Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 12347c478bd9Sstevel@tonic-gate lwpid = 0; 12357c478bd9Sstevel@tonic-gate } 12367c478bd9Sstevel@tonic-gate return (lwpid); 12377c478bd9Sstevel@tonic-gate } 12387c478bd9Sstevel@tonic-gate 12397c478bd9Sstevel@tonic-gate /* 124016b01779Sraf * Spin for a while (if 'tryhard' is true), trying to grab the lock. 12417c478bd9Sstevel@tonic-gate * If this fails, return EBUSY and let the caller deal with it. 12427c478bd9Sstevel@tonic-gate * If this succeeds, return 0 with mutex_owner set to curthread. 12437c478bd9Sstevel@tonic-gate */ 1244883492d5Sraf static int 124516b01779Sraf mutex_trylock_adaptive(mutex_t *mp, int tryhard) 12467c478bd9Sstevel@tonic-gate { 12477c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 1248883492d5Sraf int error = EBUSY; 12497c478bd9Sstevel@tonic-gate ulwp_t *ulwp; 12507c478bd9Sstevel@tonic-gate volatile sc_shared_t *scp; 12515d1dd9a9Sraf volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw; 12525d1dd9a9Sraf volatile uint64_t *ownerp = (volatile uint64_t *)&mp->mutex_owner; 12535d1dd9a9Sraf uint32_t new_lockword; 12545d1dd9a9Sraf int count = 0; 12555d1dd9a9Sraf int max_count; 12565d1dd9a9Sraf uint8_t max_spinners; 12577c478bd9Sstevel@tonic-gate 1258883492d5Sraf ASSERT(!(mp->mutex_type & USYNC_PROCESS)); 12597c478bd9Sstevel@tonic-gate 1260328cc3e9SRoger A. Faulkner if (MUTEX_OWNED(mp, self)) 12617c478bd9Sstevel@tonic-gate return (EBUSY); 12627c478bd9Sstevel@tonic-gate 1263328cc3e9SRoger A. Faulkner enter_critical(self); 1264328cc3e9SRoger A. Faulkner 1265883492d5Sraf /* short-cut, not definitive (see below) */ 1266883492d5Sraf if (mp->mutex_flag & LOCK_NOTRECOVERABLE) { 1267883492d5Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 12685d1dd9a9Sraf error = ENOTRECOVERABLE; 12695d1dd9a9Sraf goto done; 1270883492d5Sraf } 1271883492d5Sraf 12725d1dd9a9Sraf /* 12735d1dd9a9Sraf * Make one attempt to acquire the lock before 12745d1dd9a9Sraf * incurring the overhead of the spin loop. 12755d1dd9a9Sraf */ 12765d1dd9a9Sraf if (set_lock_byte(lockp) == 0) { 12775d1dd9a9Sraf *ownerp = (uintptr_t)self; 12785d1dd9a9Sraf error = 0; 12795d1dd9a9Sraf goto done; 12805d1dd9a9Sraf } 12815d1dd9a9Sraf if (!tryhard) 12825d1dd9a9Sraf goto done; 12835d1dd9a9Sraf if (ncpus == 0) 12845d1dd9a9Sraf ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN); 12855d1dd9a9Sraf if ((max_spinners = self->ul_max_spinners) >= ncpus) 12865d1dd9a9Sraf max_spinners = ncpus - 1; 12875d1dd9a9Sraf max_count = (max_spinners != 0)? self->ul_adaptive_spin : 0; 12885d1dd9a9Sraf if (max_count == 0) 12895d1dd9a9Sraf goto done; 12907c478bd9Sstevel@tonic-gate 12917c478bd9Sstevel@tonic-gate /* 12927c478bd9Sstevel@tonic-gate * This spin loop is unfair to lwps that have already dropped into 12937c478bd9Sstevel@tonic-gate * the kernel to sleep. They will starve on a highly-contended mutex. 12947c478bd9Sstevel@tonic-gate * This is just too bad. The adaptive spin algorithm is intended 12957c478bd9Sstevel@tonic-gate * to allow programs with highly-contended locks (that is, broken 12967c478bd9Sstevel@tonic-gate * programs) to execute with reasonable speed despite their contention. 12977c478bd9Sstevel@tonic-gate * Being fair would reduce the speed of such programs and well-written 12987c478bd9Sstevel@tonic-gate * programs will not suffer in any case. 12997c478bd9Sstevel@tonic-gate */ 1300328cc3e9SRoger A. Faulkner if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1) 13015d1dd9a9Sraf goto done; 13025d1dd9a9Sraf DTRACE_PROBE1(plockstat, mutex__spin, mp); 13035d1dd9a9Sraf for (count = 1; ; count++) { 13047c478bd9Sstevel@tonic-gate if (*lockp == 0 && set_lock_byte(lockp) == 0) { 13057c478bd9Sstevel@tonic-gate *ownerp = (uintptr_t)self; 1306883492d5Sraf error = 0; 1307883492d5Sraf break; 13087c478bd9Sstevel@tonic-gate } 13095d1dd9a9Sraf if (count == max_count) 13105d1dd9a9Sraf break; 13117c478bd9Sstevel@tonic-gate SMT_PAUSE(); 13127c478bd9Sstevel@tonic-gate /* 13137c478bd9Sstevel@tonic-gate * Stop spinning if the mutex owner is not running on 13147c478bd9Sstevel@tonic-gate * a processor; it will not drop the lock any time soon 13157c478bd9Sstevel@tonic-gate * and we would just be wasting time to keep spinning. 13167c478bd9Sstevel@tonic-gate * 13177c478bd9Sstevel@tonic-gate * Note that we are looking at another thread (ulwp_t) 13187c478bd9Sstevel@tonic-gate * without ensuring that the other thread does not exit. 13197c478bd9Sstevel@tonic-gate * The scheme relies on ulwp_t structures never being 13207c478bd9Sstevel@tonic-gate * deallocated by the library (the library employs a free 13217c478bd9Sstevel@tonic-gate * list of ulwp_t structs that are reused when new threads 13227c478bd9Sstevel@tonic-gate * are created) and on schedctl shared memory never being 13237c478bd9Sstevel@tonic-gate * deallocated once created via __schedctl(). 13247c478bd9Sstevel@tonic-gate * 13257c478bd9Sstevel@tonic-gate * Thus, the worst that can happen when the spinning thread 13267c478bd9Sstevel@tonic-gate * looks at the owner's schedctl data is that it is looking 13277c478bd9Sstevel@tonic-gate * at some other thread's schedctl data. This almost never 13287c478bd9Sstevel@tonic-gate * happens and is benign when it does. 13297c478bd9Sstevel@tonic-gate */ 13307c478bd9Sstevel@tonic-gate if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL && 13317c478bd9Sstevel@tonic-gate ((scp = ulwp->ul_schedctl) == NULL || 13327c478bd9Sstevel@tonic-gate scp->sc_state != SC_ONPROC)) 13337c478bd9Sstevel@tonic-gate break; 13347c478bd9Sstevel@tonic-gate } 13355d1dd9a9Sraf new_lockword = spinners_decr(&mp->mutex_lockword); 13365d1dd9a9Sraf if (error && (new_lockword & (LOCKMASK | SPINNERMASK)) == 0) { 13375d1dd9a9Sraf /* 13385d1dd9a9Sraf * We haven't yet acquired the lock, the lock 13395d1dd9a9Sraf * is free, and there are no other spinners. 13405d1dd9a9Sraf * Make one final attempt to acquire the lock. 13415d1dd9a9Sraf * 13425d1dd9a9Sraf * This isn't strictly necessary since mutex_lock_queue() 13435d1dd9a9Sraf * (the next action this thread will take if it doesn't 13445d1dd9a9Sraf * acquire the lock here) makes one attempt to acquire 13455d1dd9a9Sraf * the lock before putting the thread to sleep. 13465d1dd9a9Sraf * 13475d1dd9a9Sraf * If the next action for this thread (on failure here) 13485d1dd9a9Sraf * were not to call mutex_lock_queue(), this would be 13495d1dd9a9Sraf * necessary for correctness, to avoid ending up with an 13505d1dd9a9Sraf * unheld mutex with waiters but no one to wake them up. 13515d1dd9a9Sraf */ 13525d1dd9a9Sraf if (set_lock_byte(lockp) == 0) { 13535d1dd9a9Sraf *ownerp = (uintptr_t)self; 13545d1dd9a9Sraf error = 0; 13555d1dd9a9Sraf } 13565d1dd9a9Sraf count++; 13575d1dd9a9Sraf } 13587c478bd9Sstevel@tonic-gate 13595d1dd9a9Sraf done: 1360883492d5Sraf if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { 1361883492d5Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 1362883492d5Sraf /* 136331db3c26Sraf * We shouldn't own the mutex. 136431db3c26Sraf * Just clear the lock; everyone has already been waked up. 1365883492d5Sraf */ 1366328cc3e9SRoger A. Faulkner *ownerp = 0; 136731db3c26Sraf (void) clear_lockbyte(&mp->mutex_lockword); 1368883492d5Sraf error = ENOTRECOVERABLE; 1369883492d5Sraf } 13707c478bd9Sstevel@tonic-gate 1371328cc3e9SRoger A. Faulkner exit_critical(self); 1372328cc3e9SRoger A. Faulkner 1373883492d5Sraf if (error) { 13745d1dd9a9Sraf if (count) { 13758cb74972SJonathan Haslam DTRACE_PROBE3(plockstat, mutex__spun, mp, 0, count); 13765d1dd9a9Sraf } 1377883492d5Sraf if (error != EBUSY) { 1378883492d5Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 1379883492d5Sraf } 1380883492d5Sraf } else { 13815d1dd9a9Sraf if (count) { 13828cb74972SJonathan Haslam DTRACE_PROBE3(plockstat, mutex__spun, mp, 1, count); 13835d1dd9a9Sraf } 1384883492d5Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); 1385883492d5Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) { 1386883492d5Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 1387883492d5Sraf error = EOWNERDEAD; 1388883492d5Sraf } 1389883492d5Sraf } 1390883492d5Sraf 1391883492d5Sraf return (error); 13927c478bd9Sstevel@tonic-gate } 13937c478bd9Sstevel@tonic-gate 13947c478bd9Sstevel@tonic-gate /* 13957c478bd9Sstevel@tonic-gate * Same as mutex_trylock_adaptive(), except specifically for queue locks. 13967c478bd9Sstevel@tonic-gate * The owner field is not set here; the caller (spin_lock_set()) sets it. 13977c478bd9Sstevel@tonic-gate */ 1398883492d5Sraf static int 13997c478bd9Sstevel@tonic-gate mutex_queuelock_adaptive(mutex_t *mp) 14007c478bd9Sstevel@tonic-gate { 14017c478bd9Sstevel@tonic-gate ulwp_t *ulwp; 14027c478bd9Sstevel@tonic-gate volatile sc_shared_t *scp; 14037c478bd9Sstevel@tonic-gate volatile uint8_t *lockp; 14047c478bd9Sstevel@tonic-gate volatile uint64_t *ownerp; 14057c478bd9Sstevel@tonic-gate int count = curthread->ul_queue_spin; 14067c478bd9Sstevel@tonic-gate 14077c478bd9Sstevel@tonic-gate ASSERT(mp->mutex_type == USYNC_THREAD); 14087c478bd9Sstevel@tonic-gate 14097c478bd9Sstevel@tonic-gate if (count == 0) 14107c478bd9Sstevel@tonic-gate return (EBUSY); 14117c478bd9Sstevel@tonic-gate 14127c478bd9Sstevel@tonic-gate lockp = (volatile uint8_t *)&mp->mutex_lockw; 14137c478bd9Sstevel@tonic-gate ownerp = (volatile uint64_t *)&mp->mutex_owner; 14147c478bd9Sstevel@tonic-gate while (--count >= 0) { 14157c478bd9Sstevel@tonic-gate if (*lockp == 0 && set_lock_byte(lockp) == 0) 14167c478bd9Sstevel@tonic-gate return (0); 14177c478bd9Sstevel@tonic-gate SMT_PAUSE(); 14187c478bd9Sstevel@tonic-gate if ((ulwp = (ulwp_t *)(uintptr_t)*ownerp) != NULL && 14197c478bd9Sstevel@tonic-gate ((scp = ulwp->ul_schedctl) == NULL || 14207c478bd9Sstevel@tonic-gate scp->sc_state != SC_ONPROC)) 14217c478bd9Sstevel@tonic-gate break; 14227c478bd9Sstevel@tonic-gate } 14237c478bd9Sstevel@tonic-gate 14247c478bd9Sstevel@tonic-gate return (EBUSY); 14257c478bd9Sstevel@tonic-gate } 14267c478bd9Sstevel@tonic-gate 14277c478bd9Sstevel@tonic-gate /* 14287c478bd9Sstevel@tonic-gate * Like mutex_trylock_adaptive(), but for process-shared mutexes. 142916b01779Sraf * Spin for a while (if 'tryhard' is true), trying to grab the lock. 14307c478bd9Sstevel@tonic-gate * If this fails, return EBUSY and let the caller deal with it. 14317c478bd9Sstevel@tonic-gate * If this succeeds, return 0 with mutex_owner set to curthread 14327c478bd9Sstevel@tonic-gate * and mutex_ownerpid set to the current pid. 14337c478bd9Sstevel@tonic-gate */ 1434883492d5Sraf static int 143516b01779Sraf mutex_trylock_process(mutex_t *mp, int tryhard) 14367c478bd9Sstevel@tonic-gate { 14377c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 14385d1dd9a9Sraf uberdata_t *udp = self->ul_uberdata; 1439883492d5Sraf int error = EBUSY; 144031db3c26Sraf volatile uint64_t *lockp = (volatile uint64_t *)&mp->mutex_lockword64; 14415d1dd9a9Sraf uint32_t new_lockword; 14425d1dd9a9Sraf int count = 0; 14435d1dd9a9Sraf int max_count; 14445d1dd9a9Sraf uint8_t max_spinners; 14457c478bd9Sstevel@tonic-gate 14467c5714f6Sraf #if defined(__sparc) && !defined(_LP64) 14477c5714f6Sraf /* horrible hack, necessary only on 32-bit sparc */ 14487c5714f6Sraf int fix_alignment_problem = 14497c5714f6Sraf (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && 14507c5714f6Sraf self->ul_misaligned && !(mp->mutex_type & LOCK_ROBUST)); 14517c5714f6Sraf #endif 14527c5714f6Sraf 1453883492d5Sraf ASSERT(mp->mutex_type & USYNC_PROCESS); 14547c478bd9Sstevel@tonic-gate 1455883492d5Sraf if (shared_mutex_held(mp)) 14567c478bd9Sstevel@tonic-gate return (EBUSY); 14577c478bd9Sstevel@tonic-gate 1458328cc3e9SRoger A. Faulkner enter_critical(self); 1459328cc3e9SRoger A. Faulkner 1460883492d5Sraf /* short-cut, not definitive (see below) */ 1461883492d5Sraf if (mp->mutex_flag & LOCK_NOTRECOVERABLE) { 1462883492d5Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 14635d1dd9a9Sraf error = ENOTRECOVERABLE; 14645d1dd9a9Sraf goto done; 1465883492d5Sraf } 1466883492d5Sraf 14675d1dd9a9Sraf /* 14685d1dd9a9Sraf * Make one attempt to acquire the lock before 14695d1dd9a9Sraf * incurring the overhead of the spin loop. 14705d1dd9a9Sraf */ 14717c5714f6Sraf #if defined(__sparc) && !defined(_LP64) 14727c5714f6Sraf /* horrible hack, necessary only on 32-bit sparc */ 14737c5714f6Sraf if (fix_alignment_problem) { 14747c5714f6Sraf if (set_lock_byte(&mp->mutex_lockw) == 0) { 14757c5714f6Sraf mp->mutex_ownerpid = udp->pid; 14767c5714f6Sraf mp->mutex_owner = (uintptr_t)self; 14777c5714f6Sraf error = 0; 14787c5714f6Sraf goto done; 14797c5714f6Sraf } 14807c5714f6Sraf } else 14817c5714f6Sraf #endif 148231db3c26Sraf if (set_lock_byte64(lockp, udp->pid) == 0) { 14835d1dd9a9Sraf mp->mutex_owner = (uintptr_t)self; 148431db3c26Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 14855d1dd9a9Sraf error = 0; 14865d1dd9a9Sraf goto done; 14875d1dd9a9Sraf } 14885d1dd9a9Sraf if (!tryhard) 14895d1dd9a9Sraf goto done; 1490883492d5Sraf if (ncpus == 0) 1491883492d5Sraf ncpus = (int)_sysconf(_SC_NPROCESSORS_ONLN); 14925d1dd9a9Sraf if ((max_spinners = self->ul_max_spinners) >= ncpus) 14935d1dd9a9Sraf max_spinners = ncpus - 1; 14945d1dd9a9Sraf max_count = (max_spinners != 0)? self->ul_adaptive_spin : 0; 14955d1dd9a9Sraf if (max_count == 0) 14965d1dd9a9Sraf goto done; 1497883492d5Sraf 14987c478bd9Sstevel@tonic-gate /* 14997c478bd9Sstevel@tonic-gate * This is a process-shared mutex. 15007c478bd9Sstevel@tonic-gate * We cannot know if the owner is running on a processor. 15017c478bd9Sstevel@tonic-gate * We just spin and hope that it is on a processor. 15027c478bd9Sstevel@tonic-gate */ 1503328cc3e9SRoger A. Faulkner if (spinners_incr(&mp->mutex_lockword, max_spinners) == -1) 15045d1dd9a9Sraf goto done; 15055d1dd9a9Sraf DTRACE_PROBE1(plockstat, mutex__spin, mp); 15065d1dd9a9Sraf for (count = 1; ; count++) { 15077c5714f6Sraf #if defined(__sparc) && !defined(_LP64) 15087c5714f6Sraf /* horrible hack, necessary only on 32-bit sparc */ 15097c5714f6Sraf if (fix_alignment_problem) { 15107c5714f6Sraf if ((*lockp & LOCKMASK64) == 0 && 15117c5714f6Sraf set_lock_byte(&mp->mutex_lockw) == 0) { 15127c5714f6Sraf mp->mutex_ownerpid = udp->pid; 15137c5714f6Sraf mp->mutex_owner = (uintptr_t)self; 15147c5714f6Sraf error = 0; 15157c5714f6Sraf break; 15167c5714f6Sraf } 15177c5714f6Sraf } else 15187c5714f6Sraf #endif 151931db3c26Sraf if ((*lockp & LOCKMASK64) == 0 && 152031db3c26Sraf set_lock_byte64(lockp, udp->pid) == 0) { 1521883492d5Sraf mp->mutex_owner = (uintptr_t)self; 152231db3c26Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 1523883492d5Sraf error = 0; 1524883492d5Sraf break; 15257c478bd9Sstevel@tonic-gate } 15265d1dd9a9Sraf if (count == max_count) 15275d1dd9a9Sraf break; 1528883492d5Sraf SMT_PAUSE(); 1529883492d5Sraf } 15305d1dd9a9Sraf new_lockword = spinners_decr(&mp->mutex_lockword); 15315d1dd9a9Sraf if (error && (new_lockword & (LOCKMASK | SPINNERMASK)) == 0) { 15325d1dd9a9Sraf /* 15335d1dd9a9Sraf * We haven't yet acquired the lock, the lock 15345d1dd9a9Sraf * is free, and there are no other spinners. 15355d1dd9a9Sraf * Make one final attempt to acquire the lock. 15365d1dd9a9Sraf * 15375d1dd9a9Sraf * This isn't strictly necessary since mutex_lock_kernel() 15385d1dd9a9Sraf * (the next action this thread will take if it doesn't 15395d1dd9a9Sraf * acquire the lock here) makes one attempt to acquire 15405d1dd9a9Sraf * the lock before putting the thread to sleep. 15415d1dd9a9Sraf * 15425d1dd9a9Sraf * If the next action for this thread (on failure here) 15435d1dd9a9Sraf * were not to call mutex_lock_kernel(), this would be 15445d1dd9a9Sraf * necessary for correctness, to avoid ending up with an 15455d1dd9a9Sraf * unheld mutex with waiters but no one to wake them up. 15465d1dd9a9Sraf */ 15477c5714f6Sraf #if defined(__sparc) && !defined(_LP64) 15487c5714f6Sraf /* horrible hack, necessary only on 32-bit sparc */ 15497c5714f6Sraf if (fix_alignment_problem) { 15507c5714f6Sraf if (set_lock_byte(&mp->mutex_lockw) == 0) { 15517c5714f6Sraf mp->mutex_ownerpid = udp->pid; 15527c5714f6Sraf mp->mutex_owner = (uintptr_t)self; 15537c5714f6Sraf error = 0; 15547c5714f6Sraf } 15557c5714f6Sraf } else 15567c5714f6Sraf #endif 155731db3c26Sraf if (set_lock_byte64(lockp, udp->pid) == 0) { 15585d1dd9a9Sraf mp->mutex_owner = (uintptr_t)self; 155931db3c26Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 15605d1dd9a9Sraf error = 0; 15615d1dd9a9Sraf } 15625d1dd9a9Sraf count++; 15635d1dd9a9Sraf } 1564883492d5Sraf 15655d1dd9a9Sraf done: 1566883492d5Sraf if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { 1567883492d5Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 15687c478bd9Sstevel@tonic-gate /* 156931db3c26Sraf * We shouldn't own the mutex. 157031db3c26Sraf * Just clear the lock; everyone has already been waked up. 15717c478bd9Sstevel@tonic-gate */ 1572883492d5Sraf mp->mutex_owner = 0; 157331db3c26Sraf /* mp->mutex_ownerpid is cleared by clear_lockbyte64() */ 157431db3c26Sraf (void) clear_lockbyte64(&mp->mutex_lockword64); 1575883492d5Sraf error = ENOTRECOVERABLE; 15767c478bd9Sstevel@tonic-gate } 15777c478bd9Sstevel@tonic-gate 1578328cc3e9SRoger A. Faulkner exit_critical(self); 1579328cc3e9SRoger A. Faulkner 1580883492d5Sraf if (error) { 15815d1dd9a9Sraf if (count) { 15828cb74972SJonathan Haslam DTRACE_PROBE3(plockstat, mutex__spun, mp, 0, count); 15835d1dd9a9Sraf } 1584883492d5Sraf if (error != EBUSY) { 1585883492d5Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 1586883492d5Sraf } 1587883492d5Sraf } else { 15885d1dd9a9Sraf if (count) { 15898cb74972SJonathan Haslam DTRACE_PROBE3(plockstat, mutex__spun, mp, 1, count); 15905d1dd9a9Sraf } 1591883492d5Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); 1592883492d5Sraf if (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED)) { 1593883492d5Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 1594883492d5Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) 1595883492d5Sraf error = EOWNERDEAD; 1596883492d5Sraf else if (mp->mutex_type & USYNC_PROCESS_ROBUST) 1597883492d5Sraf error = ELOCKUNMAPPED; 1598883492d5Sraf else 1599883492d5Sraf error = EOWNERDEAD; 1600883492d5Sraf } 1601883492d5Sraf } 1602883492d5Sraf 1603883492d5Sraf return (error); 16047c478bd9Sstevel@tonic-gate } 16057c478bd9Sstevel@tonic-gate 16067c478bd9Sstevel@tonic-gate /* 16077c478bd9Sstevel@tonic-gate * Mutex wakeup code for releasing a USYNC_THREAD mutex. 16087c478bd9Sstevel@tonic-gate * Returns the lwpid of the thread that was dequeued, if any. 16097c478bd9Sstevel@tonic-gate * The caller of mutex_wakeup() must call __lwp_unpark(lwpid) 16107c478bd9Sstevel@tonic-gate * to wake up the specified lwp. 16117c478bd9Sstevel@tonic-gate */ 1612883492d5Sraf static lwpid_t 16137c478bd9Sstevel@tonic-gate mutex_wakeup(mutex_t *mp) 16147c478bd9Sstevel@tonic-gate { 16157c478bd9Sstevel@tonic-gate lwpid_t lwpid = 0; 1616d4204c85Sraf int more; 16177c478bd9Sstevel@tonic-gate queue_head_t *qp; 16187c478bd9Sstevel@tonic-gate ulwp_t *ulwp; 16197c478bd9Sstevel@tonic-gate 16207c478bd9Sstevel@tonic-gate /* 16217c478bd9Sstevel@tonic-gate * Dequeue a waiter from the sleep queue. Don't touch the mutex 16227c478bd9Sstevel@tonic-gate * waiters bit if no one was found on the queue because the mutex 16237c478bd9Sstevel@tonic-gate * might have been deallocated or reallocated for another purpose. 16247c478bd9Sstevel@tonic-gate */ 16257c478bd9Sstevel@tonic-gate qp = queue_lock(mp, MX); 1626d4204c85Sraf if ((ulwp = dequeue(qp, &more)) != NULL) { 16277c478bd9Sstevel@tonic-gate lwpid = ulwp->ul_lwpid; 1628d4204c85Sraf mp->mutex_waiters = more; 16297c478bd9Sstevel@tonic-gate } 16307c478bd9Sstevel@tonic-gate queue_unlock(qp); 16317c478bd9Sstevel@tonic-gate return (lwpid); 16327c478bd9Sstevel@tonic-gate } 16337c478bd9Sstevel@tonic-gate 1634883492d5Sraf /* 1635883492d5Sraf * Mutex wakeup code for releasing all waiters on a USYNC_THREAD mutex. 1636883492d5Sraf */ 1637883492d5Sraf static void 1638883492d5Sraf mutex_wakeup_all(mutex_t *mp) 1639883492d5Sraf { 1640883492d5Sraf queue_head_t *qp; 1641d4204c85Sraf queue_root_t *qrp; 1642883492d5Sraf int nlwpid = 0; 1643883492d5Sraf int maxlwps = MAXLWPS; 1644883492d5Sraf ulwp_t *ulwp; 1645883492d5Sraf lwpid_t buffer[MAXLWPS]; 1646883492d5Sraf lwpid_t *lwpid = buffer; 1647883492d5Sraf 1648883492d5Sraf /* 1649883492d5Sraf * Walk the list of waiters and prepare to wake up all of them. 1650883492d5Sraf * The waiters flag has already been cleared from the mutex. 1651883492d5Sraf * 1652883492d5Sraf * We keep track of lwpids that are to be unparked in lwpid[]. 1653883492d5Sraf * __lwp_unpark_all() is called to unpark all of them after 1654883492d5Sraf * they have been removed from the sleep queue and the sleep 1655883492d5Sraf * queue lock has been dropped. If we run out of space in our 1656883492d5Sraf * on-stack buffer, we need to allocate more but we can't call 1657883492d5Sraf * lmalloc() because we are holding a queue lock when the overflow 1658883492d5Sraf * occurs and lmalloc() acquires a lock. We can't use alloca() 1659883492d5Sraf * either because the application may have allocated a small 1660883492d5Sraf * stack and we don't want to overrun the stack. So we call 1661883492d5Sraf * alloc_lwpids() to allocate a bigger buffer using the mmap() 1662883492d5Sraf * system call directly since that path acquires no locks. 1663883492d5Sraf */ 1664883492d5Sraf qp = queue_lock(mp, MX); 1665d4204c85Sraf for (;;) { 1666d4204c85Sraf if ((qrp = qp->qh_root) == NULL || 1667d4204c85Sraf (ulwp = qrp->qr_head) == NULL) 1668d4204c85Sraf break; 1669d4204c85Sraf ASSERT(ulwp->ul_wchan == mp); 1670d4204c85Sraf queue_unlink(qp, &qrp->qr_head, NULL); 1671d4204c85Sraf ulwp->ul_sleepq = NULL; 1672d4204c85Sraf ulwp->ul_wchan = NULL; 1673d4204c85Sraf if (nlwpid == maxlwps) 1674d4204c85Sraf lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps); 1675d4204c85Sraf lwpid[nlwpid++] = ulwp->ul_lwpid; 1676883492d5Sraf } 1677883492d5Sraf 1678883492d5Sraf if (nlwpid == 0) { 1679883492d5Sraf queue_unlock(qp); 1680883492d5Sraf } else { 16815d1dd9a9Sraf mp->mutex_waiters = 0; 1682883492d5Sraf no_preempt(curthread); 1683883492d5Sraf queue_unlock(qp); 1684883492d5Sraf if (nlwpid == 1) 1685883492d5Sraf (void) __lwp_unpark(lwpid[0]); 1686883492d5Sraf else 1687883492d5Sraf (void) __lwp_unpark_all(lwpid, nlwpid); 1688883492d5Sraf preempt(curthread); 1689883492d5Sraf } 1690883492d5Sraf 1691883492d5Sraf if (lwpid != buffer) 16928cd45542Sraf (void) munmap((caddr_t)lwpid, maxlwps * sizeof (lwpid_t)); 1693883492d5Sraf } 1694883492d5Sraf 16957c478bd9Sstevel@tonic-gate /* 16965d1dd9a9Sraf * Release a process-private mutex. 16975d1dd9a9Sraf * As an optimization, if there are waiters but there are also spinners 16985d1dd9a9Sraf * attempting to acquire the mutex, then don't bother waking up a waiter; 16995d1dd9a9Sraf * one of the spinners will acquire the mutex soon and it would be a waste 17005d1dd9a9Sraf * of resources to wake up some thread just to have it spin for a while 17015d1dd9a9Sraf * and then possibly go back to sleep. See mutex_trylock_adaptive(). 17027c478bd9Sstevel@tonic-gate */ 1703883492d5Sraf static lwpid_t 1704883492d5Sraf mutex_unlock_queue(mutex_t *mp, int release_all) 17057c478bd9Sstevel@tonic-gate { 1706328cc3e9SRoger A. Faulkner ulwp_t *self = curthread; 17075d1dd9a9Sraf lwpid_t lwpid = 0; 17085d1dd9a9Sraf uint32_t old_lockword; 17097c478bd9Sstevel@tonic-gate 17105d1dd9a9Sraf DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 1711328cc3e9SRoger A. Faulkner sigoff(self); 171231db3c26Sraf mp->mutex_owner = 0; 17135d1dd9a9Sraf old_lockword = clear_lockbyte(&mp->mutex_lockword); 17145d1dd9a9Sraf if ((old_lockword & WAITERMASK) && 17155d1dd9a9Sraf (release_all || (old_lockword & SPINNERMASK) == 0)) { 17167c478bd9Sstevel@tonic-gate no_preempt(self); /* ensure a prompt wakeup */ 17175d1dd9a9Sraf if (release_all) 17185d1dd9a9Sraf mutex_wakeup_all(mp); 17195d1dd9a9Sraf else 17205d1dd9a9Sraf lwpid = mutex_wakeup(mp); 17215d1dd9a9Sraf if (lwpid == 0) 17225d1dd9a9Sraf preempt(self); 1723883492d5Sraf } 1724328cc3e9SRoger A. Faulkner sigon(self); 17257c478bd9Sstevel@tonic-gate return (lwpid); 17267c478bd9Sstevel@tonic-gate } 17277c478bd9Sstevel@tonic-gate 17287c478bd9Sstevel@tonic-gate /* 17297c478bd9Sstevel@tonic-gate * Like mutex_unlock_queue(), but for process-shared mutexes. 17307c478bd9Sstevel@tonic-gate */ 1731883492d5Sraf static void 1732883492d5Sraf mutex_unlock_process(mutex_t *mp, int release_all) 17337c478bd9Sstevel@tonic-gate { 17347c5714f6Sraf ulwp_t *self = curthread; 173531db3c26Sraf uint64_t old_lockword64; 17367c478bd9Sstevel@tonic-gate 17377c478bd9Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 1738328cc3e9SRoger A. Faulkner sigoff(self); 173931db3c26Sraf mp->mutex_owner = 0; 17407c5714f6Sraf #if defined(__sparc) && !defined(_LP64) 17417c5714f6Sraf /* horrible hack, necessary only on 32-bit sparc */ 17427c5714f6Sraf if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && 17437c5714f6Sraf self->ul_misaligned && !(mp->mutex_type & LOCK_ROBUST)) { 17447c5714f6Sraf uint32_t old_lockword; 17457c5714f6Sraf mp->mutex_ownerpid = 0; 17467c5714f6Sraf old_lockword = clear_lockbyte(&mp->mutex_lockword); 17477c5714f6Sraf if ((old_lockword & WAITERMASK) && 17487c5714f6Sraf (release_all || (old_lockword & SPINNERMASK) == 0)) { 17497c5714f6Sraf no_preempt(self); /* ensure a prompt wakeup */ 17507c5714f6Sraf (void) ___lwp_mutex_wakeup(mp, release_all); 17517c5714f6Sraf preempt(self); 17527c5714f6Sraf } 1753328cc3e9SRoger A. Faulkner sigon(self); 17547c5714f6Sraf return; 17557c5714f6Sraf } 17567c5714f6Sraf #endif 175731db3c26Sraf /* mp->mutex_ownerpid is cleared by clear_lockbyte64() */ 175831db3c26Sraf old_lockword64 = clear_lockbyte64(&mp->mutex_lockword64); 175931db3c26Sraf if ((old_lockword64 & WAITERMASK64) && 176031db3c26Sraf (release_all || (old_lockword64 & SPINNERMASK64) == 0)) { 17615d1dd9a9Sraf no_preempt(self); /* ensure a prompt wakeup */ 17625d1dd9a9Sraf (void) ___lwp_mutex_wakeup(mp, release_all); 17635d1dd9a9Sraf preempt(self); 17647c478bd9Sstevel@tonic-gate } 1765328cc3e9SRoger A. Faulkner sigon(self); 17667c478bd9Sstevel@tonic-gate } 17677c478bd9Sstevel@tonic-gate 17687c478bd9Sstevel@tonic-gate void 17697c478bd9Sstevel@tonic-gate stall(void) 17707c478bd9Sstevel@tonic-gate { 17717c478bd9Sstevel@tonic-gate for (;;) 17727c478bd9Sstevel@tonic-gate (void) mutex_lock_kernel(&stall_mutex, NULL, NULL); 17737c478bd9Sstevel@tonic-gate } 17747c478bd9Sstevel@tonic-gate 17757c478bd9Sstevel@tonic-gate /* 17767c478bd9Sstevel@tonic-gate * Acquire a USYNC_THREAD mutex via user-level sleep queues. 17777c478bd9Sstevel@tonic-gate * We failed set_lock_byte(&mp->mutex_lockw) before coming here. 1778883492d5Sraf * If successful, returns with mutex_owner set correctly. 17797c478bd9Sstevel@tonic-gate */ 17807c478bd9Sstevel@tonic-gate int 17817c478bd9Sstevel@tonic-gate mutex_lock_queue(ulwp_t *self, tdb_mutex_stats_t *msp, mutex_t *mp, 17827c478bd9Sstevel@tonic-gate timespec_t *tsp) 17837c478bd9Sstevel@tonic-gate { 17847c478bd9Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 17857c478bd9Sstevel@tonic-gate queue_head_t *qp; 17867c478bd9Sstevel@tonic-gate hrtime_t begin_sleep; 17877c478bd9Sstevel@tonic-gate int error = 0; 17887c478bd9Sstevel@tonic-gate 17897c478bd9Sstevel@tonic-gate self->ul_sp = stkptr(); 17907c478bd9Sstevel@tonic-gate if (__td_event_report(self, TD_SLEEP, udp)) { 17917c478bd9Sstevel@tonic-gate self->ul_wchan = mp; 17927c478bd9Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_SLEEP; 17937c478bd9Sstevel@tonic-gate self->ul_td_evbuf.eventdata = mp; 17947c478bd9Sstevel@tonic-gate tdb_event(TD_SLEEP, udp); 17957c478bd9Sstevel@tonic-gate } 17967c478bd9Sstevel@tonic-gate if (msp) { 17977c478bd9Sstevel@tonic-gate tdb_incr(msp->mutex_sleep); 17987c478bd9Sstevel@tonic-gate begin_sleep = gethrtime(); 17997c478bd9Sstevel@tonic-gate } 18007c478bd9Sstevel@tonic-gate 18017c478bd9Sstevel@tonic-gate DTRACE_PROBE1(plockstat, mutex__block, mp); 18027c478bd9Sstevel@tonic-gate 18037c478bd9Sstevel@tonic-gate /* 18047c478bd9Sstevel@tonic-gate * Put ourself on the sleep queue, and while we are 18057c478bd9Sstevel@tonic-gate * unable to grab the lock, go park in the kernel. 18067c478bd9Sstevel@tonic-gate * Take ourself off the sleep queue after we acquire the lock. 18077c478bd9Sstevel@tonic-gate * The waiter bit can be set/cleared only while holding the queue lock. 18087c478bd9Sstevel@tonic-gate */ 18097c478bd9Sstevel@tonic-gate qp = queue_lock(mp, MX); 1810d4204c85Sraf enqueue(qp, self, 0); 18117c478bd9Sstevel@tonic-gate mp->mutex_waiters = 1; 18127c478bd9Sstevel@tonic-gate for (;;) { 18137c478bd9Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 18147c478bd9Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 1815d4204c85Sraf mp->mutex_waiters = dequeue_self(qp); 18167c478bd9Sstevel@tonic-gate break; 18177c478bd9Sstevel@tonic-gate } 18187c478bd9Sstevel@tonic-gate set_parking_flag(self, 1); 18197c478bd9Sstevel@tonic-gate queue_unlock(qp); 18207c478bd9Sstevel@tonic-gate /* 18217c478bd9Sstevel@tonic-gate * __lwp_park() will return the residual time in tsp 18227c478bd9Sstevel@tonic-gate * if we are unparked before the timeout expires. 18237c478bd9Sstevel@tonic-gate */ 18245d1dd9a9Sraf error = __lwp_park(tsp, 0); 18257c478bd9Sstevel@tonic-gate set_parking_flag(self, 0); 18267c478bd9Sstevel@tonic-gate /* 18277c478bd9Sstevel@tonic-gate * We could have taken a signal or suspended ourself. 18287c478bd9Sstevel@tonic-gate * If we did, then we removed ourself from the queue. 18297c478bd9Sstevel@tonic-gate * Someone else may have removed us from the queue 18307c478bd9Sstevel@tonic-gate * as a consequence of mutex_unlock(). We may have 18317c478bd9Sstevel@tonic-gate * gotten a timeout from __lwp_park(). Or we may still 18327c478bd9Sstevel@tonic-gate * be on the queue and this is just a spurious wakeup. 18337c478bd9Sstevel@tonic-gate */ 18347c478bd9Sstevel@tonic-gate qp = queue_lock(mp, MX); 18357c478bd9Sstevel@tonic-gate if (self->ul_sleepq == NULL) { 18365d1dd9a9Sraf if (error) { 1837d4204c85Sraf mp->mutex_waiters = queue_waiter(qp)? 1 : 0; 18385d1dd9a9Sraf if (error != EINTR) 18395d1dd9a9Sraf break; 18405d1dd9a9Sraf error = 0; 18415d1dd9a9Sraf } 18427c478bd9Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 18437c478bd9Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 18447c478bd9Sstevel@tonic-gate break; 18457c478bd9Sstevel@tonic-gate } 1846d4204c85Sraf enqueue(qp, self, 0); 18477c478bd9Sstevel@tonic-gate mp->mutex_waiters = 1; 18487c478bd9Sstevel@tonic-gate } 18497c478bd9Sstevel@tonic-gate ASSERT(self->ul_sleepq == qp && 18507c478bd9Sstevel@tonic-gate self->ul_qtype == MX && 18517c478bd9Sstevel@tonic-gate self->ul_wchan == mp); 18527c478bd9Sstevel@tonic-gate if (error) { 18535d1dd9a9Sraf if (error != EINTR) { 1854d4204c85Sraf mp->mutex_waiters = dequeue_self(qp); 18555d1dd9a9Sraf break; 18565d1dd9a9Sraf } 18575d1dd9a9Sraf error = 0; 18587c478bd9Sstevel@tonic-gate } 18597c478bd9Sstevel@tonic-gate } 18607c478bd9Sstevel@tonic-gate ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL && 18617c478bd9Sstevel@tonic-gate self->ul_wchan == NULL); 18627c478bd9Sstevel@tonic-gate self->ul_sp = 0; 18637c478bd9Sstevel@tonic-gate 18647c478bd9Sstevel@tonic-gate ASSERT(error == 0 || error == EINVAL || error == ETIME); 1865883492d5Sraf 1866883492d5Sraf if (error == 0 && (mp->mutex_flag & LOCK_NOTRECOVERABLE)) { 1867883492d5Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 1868883492d5Sraf /* 186931db3c26Sraf * We shouldn't own the mutex. 187031db3c26Sraf * Just clear the lock; everyone has already been waked up. 1871883492d5Sraf */ 1872883492d5Sraf mp->mutex_owner = 0; 187331db3c26Sraf (void) clear_lockbyte(&mp->mutex_lockword); 1874883492d5Sraf error = ENOTRECOVERABLE; 1875883492d5Sraf } 1876883492d5Sraf 1877328cc3e9SRoger A. Faulkner queue_unlock(qp); 1878328cc3e9SRoger A. Faulkner 1879328cc3e9SRoger A. Faulkner if (msp) 1880328cc3e9SRoger A. Faulkner msp->mutex_sleep_time += gethrtime() - begin_sleep; 1881328cc3e9SRoger A. Faulkner 1882883492d5Sraf if (error) { 1883883492d5Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 0); 1884883492d5Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 1885883492d5Sraf } else { 1886883492d5Sraf DTRACE_PROBE2(plockstat, mutex__blocked, mp, 1); 1887883492d5Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 1888883492d5Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) { 1889883492d5Sraf ASSERT(mp->mutex_type & LOCK_ROBUST); 1890883492d5Sraf error = EOWNERDEAD; 1891883492d5Sraf } 1892883492d5Sraf } 1893883492d5Sraf 18947c478bd9Sstevel@tonic-gate return (error); 18957c478bd9Sstevel@tonic-gate } 18967c478bd9Sstevel@tonic-gate 1897883492d5Sraf static int 1898883492d5Sraf mutex_recursion(mutex_t *mp, int mtype, int try) 1899883492d5Sraf { 19007257d1b4Sraf ASSERT(mutex_held(mp)); 1901883492d5Sraf ASSERT(mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)); 1902883492d5Sraf ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK); 1903883492d5Sraf 1904883492d5Sraf if (mtype & LOCK_RECURSIVE) { 1905883492d5Sraf if (mp->mutex_rcount == RECURSION_MAX) { 1906883492d5Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EAGAIN); 1907883492d5Sraf return (EAGAIN); 1908883492d5Sraf } 1909883492d5Sraf mp->mutex_rcount++; 1910883492d5Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 1, 0); 1911883492d5Sraf return (0); 1912883492d5Sraf } 1913883492d5Sraf if (try == MUTEX_LOCK) { 1914883492d5Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK); 1915883492d5Sraf return (EDEADLK); 1916883492d5Sraf } 1917883492d5Sraf return (EBUSY); 1918883492d5Sraf } 1919883492d5Sraf 1920883492d5Sraf /* 1921883492d5Sraf * Register this USYNC_PROCESS|LOCK_ROBUST mutex with the kernel so 1922883492d5Sraf * it can apply LOCK_OWNERDEAD|LOCK_UNMAPPED if it becomes necessary. 1923883492d5Sraf * We use tdb_hash_lock here and in the synch object tracking code in 1924883492d5Sraf * the tdb_agent.c file. There is no conflict between these two usages. 1925883492d5Sraf */ 1926883492d5Sraf void 1927883492d5Sraf register_lock(mutex_t *mp) 1928883492d5Sraf { 1929883492d5Sraf uberdata_t *udp = curthread->ul_uberdata; 1930883492d5Sraf uint_t hash = LOCK_HASH(mp); 1931883492d5Sraf robust_t *rlp; 193209ce0d4aSRoger A. Faulkner robust_t *invalid; 1933883492d5Sraf robust_t **rlpp; 1934883492d5Sraf robust_t **table; 1935883492d5Sraf 1936883492d5Sraf if ((table = udp->robustlocks) == NULL) { 1937883492d5Sraf lmutex_lock(&udp->tdb_hash_lock); 1938883492d5Sraf if ((table = udp->robustlocks) == NULL) { 1939883492d5Sraf table = lmalloc(LOCKHASHSZ * sizeof (robust_t *)); 19407257d1b4Sraf membar_producer(); 1941883492d5Sraf udp->robustlocks = table; 1942883492d5Sraf } 1943883492d5Sraf lmutex_unlock(&udp->tdb_hash_lock); 1944883492d5Sraf } 19457257d1b4Sraf membar_consumer(); 1946883492d5Sraf 1947883492d5Sraf /* 1948883492d5Sraf * First search the registered table with no locks held. 1949883492d5Sraf * This is safe because the table never shrinks 1950883492d5Sraf * and we can only get a false negative. 1951883492d5Sraf */ 1952883492d5Sraf for (rlp = table[hash]; rlp != NULL; rlp = rlp->robust_next) { 1953883492d5Sraf if (rlp->robust_lock == mp) /* already registered */ 1954883492d5Sraf return; 1955883492d5Sraf } 1956883492d5Sraf 1957883492d5Sraf /* 1958883492d5Sraf * The lock was not found. 1959883492d5Sraf * Repeat the operation with tdb_hash_lock held. 1960883492d5Sraf */ 1961883492d5Sraf lmutex_lock(&udp->tdb_hash_lock); 1962883492d5Sraf 196309ce0d4aSRoger A. Faulkner invalid = NULL; 1964883492d5Sraf for (rlpp = &table[hash]; 1965883492d5Sraf (rlp = *rlpp) != NULL; 1966883492d5Sraf rlpp = &rlp->robust_next) { 1967883492d5Sraf if (rlp->robust_lock == mp) { /* already registered */ 1968883492d5Sraf lmutex_unlock(&udp->tdb_hash_lock); 1969883492d5Sraf return; 1970883492d5Sraf } 197109ce0d4aSRoger A. Faulkner /* remember the first invalid entry, if any */ 197209ce0d4aSRoger A. Faulkner if (rlp->robust_lock == INVALID_ADDR && invalid == NULL) 197309ce0d4aSRoger A. Faulkner invalid = rlp; 1974883492d5Sraf } 1975883492d5Sraf 1976883492d5Sraf /* 1977883492d5Sraf * The lock has never been registered. 197809ce0d4aSRoger A. Faulkner * Add it to the table and register it now. 1979883492d5Sraf */ 1980c242ec1bSRoger A. Faulkner if ((rlp = invalid) != NULL) { 198109ce0d4aSRoger A. Faulkner /* 198209ce0d4aSRoger A. Faulkner * Reuse the invalid entry we found above. 198309ce0d4aSRoger A. Faulkner * The linkages are still correct. 198409ce0d4aSRoger A. Faulkner */ 1985c242ec1bSRoger A. Faulkner rlp->robust_lock = mp; 198609ce0d4aSRoger A. Faulkner membar_producer(); 198709ce0d4aSRoger A. Faulkner } else { 198809ce0d4aSRoger A. Faulkner /* 198909ce0d4aSRoger A. Faulkner * Allocate a new entry and add it to 199009ce0d4aSRoger A. Faulkner * the hash table and to the global list. 199109ce0d4aSRoger A. Faulkner */ 199209ce0d4aSRoger A. Faulkner rlp = lmalloc(sizeof (*rlp)); 199309ce0d4aSRoger A. Faulkner rlp->robust_lock = mp; 199409ce0d4aSRoger A. Faulkner rlp->robust_next = NULL; 199509ce0d4aSRoger A. Faulkner rlp->robust_list = udp->robustlist; 199609ce0d4aSRoger A. Faulkner udp->robustlist = rlp; 199709ce0d4aSRoger A. Faulkner membar_producer(); 199809ce0d4aSRoger A. Faulkner *rlpp = rlp; 199909ce0d4aSRoger A. Faulkner } 200009ce0d4aSRoger A. Faulkner 200109ce0d4aSRoger A. Faulkner lmutex_unlock(&udp->tdb_hash_lock); 200209ce0d4aSRoger A. Faulkner 2003c242ec1bSRoger A. Faulkner (void) ___lwp_mutex_register(mp, &rlp->robust_lock); 2004883492d5Sraf } 2005883492d5Sraf 2006883492d5Sraf /* 2007883492d5Sraf * This is called in the child of fork()/forkall() to start over 2008883492d5Sraf * with a clean slate. (Each process must register its own locks.) 2009883492d5Sraf * No locks are needed because all other threads are suspended or gone. 2010883492d5Sraf */ 2011883492d5Sraf void 2012c242ec1bSRoger A. Faulkner unregister_locks(void) 2013883492d5Sraf { 2014883492d5Sraf uberdata_t *udp = curthread->ul_uberdata; 2015883492d5Sraf robust_t **table; 2016883492d5Sraf robust_t *rlp; 2017883492d5Sraf robust_t *next; 2018883492d5Sraf 201909ce0d4aSRoger A. Faulkner /* 202009ce0d4aSRoger A. Faulkner * Do this first, before calling lfree(). 202109ce0d4aSRoger A. Faulkner */ 202209ce0d4aSRoger A. Faulkner table = udp->robustlocks; 202309ce0d4aSRoger A. Faulkner udp->robustlocks = NULL; 202409ce0d4aSRoger A. Faulkner rlp = udp->robustlist; 202509ce0d4aSRoger A. Faulkner udp->robustlist = NULL; 202609ce0d4aSRoger A. Faulkner 202709ce0d4aSRoger A. Faulkner /* 2028c242ec1bSRoger A. Faulkner * Do this by traversing the global list, not the hash table. 202909ce0d4aSRoger A. Faulkner */ 203009ce0d4aSRoger A. Faulkner while (rlp != NULL) { 203109ce0d4aSRoger A. Faulkner next = rlp->robust_list; 203209ce0d4aSRoger A. Faulkner lfree(rlp, sizeof (*rlp)); 203309ce0d4aSRoger A. Faulkner rlp = next; 2034883492d5Sraf } 203509ce0d4aSRoger A. Faulkner if (table != NULL) 203609ce0d4aSRoger A. Faulkner lfree(table, LOCKHASHSZ * sizeof (robust_t *)); 2037883492d5Sraf } 2038883492d5Sraf 20397c478bd9Sstevel@tonic-gate /* 20407c478bd9Sstevel@tonic-gate * Returns with mutex_owner set correctly. 20417c478bd9Sstevel@tonic-gate */ 2042d4204c85Sraf int 20437c478bd9Sstevel@tonic-gate mutex_lock_internal(mutex_t *mp, timespec_t *tsp, int try) 20447c478bd9Sstevel@tonic-gate { 20457c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 20467c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 20477c478bd9Sstevel@tonic-gate int mtype = mp->mutex_type; 20487c478bd9Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 20497c478bd9Sstevel@tonic-gate int error = 0; 2050d4204c85Sraf int noceil = try & MUTEX_NOCEIL; 2051883492d5Sraf uint8_t ceil; 2052883492d5Sraf int myprio; 20537c478bd9Sstevel@tonic-gate 2054d4204c85Sraf try &= ~MUTEX_NOCEIL; 20557c478bd9Sstevel@tonic-gate ASSERT(try == MUTEX_TRY || try == MUTEX_LOCK); 20567c478bd9Sstevel@tonic-gate 20577c478bd9Sstevel@tonic-gate if (!self->ul_schedctl_called) 20587c478bd9Sstevel@tonic-gate (void) setup_schedctl(); 20597c478bd9Sstevel@tonic-gate 20607c478bd9Sstevel@tonic-gate if (msp && try == MUTEX_TRY) 20617c478bd9Sstevel@tonic-gate tdb_incr(msp->mutex_try); 20627c478bd9Sstevel@tonic-gate 20637257d1b4Sraf if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && mutex_held(mp)) 2064883492d5Sraf return (mutex_recursion(mp, mtype, try)); 20657c478bd9Sstevel@tonic-gate 20667c478bd9Sstevel@tonic-gate if (self->ul_error_detection && try == MUTEX_LOCK && 20677257d1b4Sraf tsp == NULL && mutex_held(mp)) 20687c478bd9Sstevel@tonic-gate lock_error(mp, "mutex_lock", NULL, NULL); 20697c478bd9Sstevel@tonic-gate 2070d4204c85Sraf if ((mtype & LOCK_PRIO_PROTECT) && noceil == 0) { 2071d4204c85Sraf update_sched(self); 2072d4204c85Sraf if (self->ul_cid != self->ul_rtclassid) { 2073d4204c85Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EPERM); 2074d4204c85Sraf return (EPERM); 2075d4204c85Sraf } 2076883492d5Sraf ceil = mp->mutex_ceiling; 2077d4204c85Sraf myprio = self->ul_epri? self->ul_epri : self->ul_pri; 2078883492d5Sraf if (myprio > ceil) { 2079883492d5Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, EINVAL); 2080883492d5Sraf return (EINVAL); 20817c478bd9Sstevel@tonic-gate } 2082883492d5Sraf if ((error = _ceil_mylist_add(mp)) != 0) { 2083883492d5Sraf DTRACE_PROBE2(plockstat, mutex__error, mp, error); 2084883492d5Sraf return (error); 20857c478bd9Sstevel@tonic-gate } 2086883492d5Sraf if (myprio < ceil) 2087883492d5Sraf _ceil_prio_inherit(ceil); 2088883492d5Sraf } 20897c478bd9Sstevel@tonic-gate 2090883492d5Sraf if ((mtype & (USYNC_PROCESS | LOCK_ROBUST)) 2091883492d5Sraf == (USYNC_PROCESS | LOCK_ROBUST)) 2092883492d5Sraf register_lock(mp); 2093883492d5Sraf 2094883492d5Sraf if (mtype & LOCK_PRIO_INHERIT) { 2095883492d5Sraf /* go straight to the kernel */ 2096883492d5Sraf if (try == MUTEX_TRY) 2097883492d5Sraf error = mutex_trylock_kernel(mp); 2098883492d5Sraf else /* MUTEX_LOCK */ 2099883492d5Sraf error = mutex_lock_kernel(mp, tsp, msp); 21007c478bd9Sstevel@tonic-gate /* 2101883492d5Sraf * The kernel never sets or clears the lock byte 2102883492d5Sraf * for LOCK_PRIO_INHERIT mutexes. 2103883492d5Sraf * Set it here for consistency. 21047c478bd9Sstevel@tonic-gate */ 2105883492d5Sraf switch (error) { 2106883492d5Sraf case 0: 2107d4204c85Sraf self->ul_pilocks++; 2108883492d5Sraf mp->mutex_lockw = LOCKSET; 2109883492d5Sraf break; 2110883492d5Sraf case EOWNERDEAD: 2111883492d5Sraf case ELOCKUNMAPPED: 2112d4204c85Sraf self->ul_pilocks++; 2113883492d5Sraf mp->mutex_lockw = LOCKSET; 2114883492d5Sraf /* FALLTHROUGH */ 2115883492d5Sraf case ENOTRECOVERABLE: 2116883492d5Sraf ASSERT(mtype & LOCK_ROBUST); 2117883492d5Sraf break; 2118883492d5Sraf case EDEADLK: 2119f52756fbSRoger A. Faulkner if (try == MUTEX_TRY) { 2120f52756fbSRoger A. Faulkner error = EBUSY; 2121f52756fbSRoger A. Faulkner } else if (tsp != NULL) { /* simulate a timeout */ 2122f52756fbSRoger A. Faulkner /* 2123f52756fbSRoger A. Faulkner * Note: mutex_timedlock() never returns EINTR. 2124f52756fbSRoger A. Faulkner */ 2125f52756fbSRoger A. Faulkner timespec_t ts = *tsp; 2126f52756fbSRoger A. Faulkner timespec_t rts; 2127f52756fbSRoger A. Faulkner 2128f52756fbSRoger A. Faulkner while (__nanosleep(&ts, &rts) == EINTR) 2129f52756fbSRoger A. Faulkner ts = rts; 2130f52756fbSRoger A. Faulkner error = ETIME; 2131f52756fbSRoger A. Faulkner } else { /* simulate a deadlock */ 2132883492d5Sraf stall(); 2133f52756fbSRoger A. Faulkner } 2134883492d5Sraf break; 21357c478bd9Sstevel@tonic-gate } 2136883492d5Sraf } else if (mtype & USYNC_PROCESS) { 213716b01779Sraf error = mutex_trylock_process(mp, try == MUTEX_LOCK); 2138883492d5Sraf if (error == EBUSY && try == MUTEX_LOCK) 21397c478bd9Sstevel@tonic-gate error = mutex_lock_kernel(mp, tsp, msp); 21405d1dd9a9Sraf } else { /* USYNC_THREAD */ 214116b01779Sraf error = mutex_trylock_adaptive(mp, try == MUTEX_LOCK); 2142883492d5Sraf if (error == EBUSY && try == MUTEX_LOCK) 2143883492d5Sraf error = mutex_lock_queue(self, msp, mp, tsp); 21447c478bd9Sstevel@tonic-gate } 21457c478bd9Sstevel@tonic-gate 21467c478bd9Sstevel@tonic-gate switch (error) { 2147883492d5Sraf case 0: 21487c478bd9Sstevel@tonic-gate case EOWNERDEAD: 21497c478bd9Sstevel@tonic-gate case ELOCKUNMAPPED: 2150883492d5Sraf if (mtype & LOCK_ROBUST) 2151883492d5Sraf remember_lock(mp); 21527c478bd9Sstevel@tonic-gate if (msp) 21537c478bd9Sstevel@tonic-gate record_begin_hold(msp); 21547c478bd9Sstevel@tonic-gate break; 21557c478bd9Sstevel@tonic-gate default: 2156d4204c85Sraf if ((mtype & LOCK_PRIO_PROTECT) && noceil == 0) { 2157883492d5Sraf (void) _ceil_mylist_del(mp); 2158883492d5Sraf if (myprio < ceil) 2159883492d5Sraf _ceil_prio_waive(); 2160883492d5Sraf } 21617c478bd9Sstevel@tonic-gate if (try == MUTEX_TRY) { 21627c478bd9Sstevel@tonic-gate if (msp) 21637c478bd9Sstevel@tonic-gate tdb_incr(msp->mutex_try_fail); 21647c478bd9Sstevel@tonic-gate if (__td_event_report(self, TD_LOCK_TRY, udp)) { 21657c478bd9Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 21667c478bd9Sstevel@tonic-gate tdb_event(TD_LOCK_TRY, udp); 21677c478bd9Sstevel@tonic-gate } 21687c478bd9Sstevel@tonic-gate } 21697c478bd9Sstevel@tonic-gate break; 21707c478bd9Sstevel@tonic-gate } 21717c478bd9Sstevel@tonic-gate 21727c478bd9Sstevel@tonic-gate return (error); 21737c478bd9Sstevel@tonic-gate } 21747c478bd9Sstevel@tonic-gate 21757c478bd9Sstevel@tonic-gate int 21767c478bd9Sstevel@tonic-gate fast_process_lock(mutex_t *mp, timespec_t *tsp, int mtype, int try) 21777c478bd9Sstevel@tonic-gate { 21787c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 21797c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 21807c478bd9Sstevel@tonic-gate 21817c478bd9Sstevel@tonic-gate /* 21827c478bd9Sstevel@tonic-gate * We know that USYNC_PROCESS is set in mtype and that 21837c478bd9Sstevel@tonic-gate * zero, one, or both of the flags LOCK_RECURSIVE and 21847c478bd9Sstevel@tonic-gate * LOCK_ERRORCHECK are set, and that no other flags are set. 21857c478bd9Sstevel@tonic-gate */ 2186883492d5Sraf ASSERT((mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0); 21877c478bd9Sstevel@tonic-gate enter_critical(self); 21887c5714f6Sraf #if defined(__sparc) && !defined(_LP64) 21897c5714f6Sraf /* horrible hack, necessary only on 32-bit sparc */ 21907c5714f6Sraf if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && 21917c5714f6Sraf self->ul_misaligned) { 21927c5714f6Sraf if (set_lock_byte(&mp->mutex_lockw) == 0) { 21937c5714f6Sraf mp->mutex_ownerpid = udp->pid; 21947c5714f6Sraf mp->mutex_owner = (uintptr_t)self; 21957c5714f6Sraf exit_critical(self); 21967c5714f6Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 21977c5714f6Sraf return (0); 21987c5714f6Sraf } 21997c5714f6Sraf } else 22007c5714f6Sraf #endif 220131db3c26Sraf if (set_lock_byte64(&mp->mutex_lockword64, udp->pid) == 0) { 22027c478bd9Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 220331db3c26Sraf /* mp->mutex_ownerpid was set by set_lock_byte64() */ 22047c478bd9Sstevel@tonic-gate exit_critical(self); 22057c478bd9Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 22067c478bd9Sstevel@tonic-gate return (0); 22077c478bd9Sstevel@tonic-gate } 22087c478bd9Sstevel@tonic-gate exit_critical(self); 22097c478bd9Sstevel@tonic-gate 2210883492d5Sraf if ((mtype & (LOCK_RECURSIVE|LOCK_ERRORCHECK)) && shared_mutex_held(mp)) 2211883492d5Sraf return (mutex_recursion(mp, mtype, try)); 22127c478bd9Sstevel@tonic-gate 221316b01779Sraf if (try == MUTEX_LOCK) { 221416b01779Sraf if (mutex_trylock_process(mp, 1) == 0) 221516b01779Sraf return (0); 22167c478bd9Sstevel@tonic-gate return (mutex_lock_kernel(mp, tsp, NULL)); 221716b01779Sraf } 22187c478bd9Sstevel@tonic-gate 22197c478bd9Sstevel@tonic-gate if (__td_event_report(self, TD_LOCK_TRY, udp)) { 22207c478bd9Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 22217c478bd9Sstevel@tonic-gate tdb_event(TD_LOCK_TRY, udp); 22227c478bd9Sstevel@tonic-gate } 22237c478bd9Sstevel@tonic-gate return (EBUSY); 22247c478bd9Sstevel@tonic-gate } 22257c478bd9Sstevel@tonic-gate 22267c478bd9Sstevel@tonic-gate static int 22277c478bd9Sstevel@tonic-gate mutex_lock_impl(mutex_t *mp, timespec_t *tsp) 22287c478bd9Sstevel@tonic-gate { 22297c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 2230d4204c85Sraf int mtype = mp->mutex_type; 22317c478bd9Sstevel@tonic-gate uberflags_t *gflags; 22327c478bd9Sstevel@tonic-gate 22337c5714f6Sraf if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && 22347c5714f6Sraf self->ul_error_detection && self->ul_misaligned == 0) 22357c5714f6Sraf lock_error(mp, "mutex_lock", NULL, "mutex is misaligned"); 22367c5714f6Sraf 22377c478bd9Sstevel@tonic-gate /* 22387c478bd9Sstevel@tonic-gate * Optimize the case of USYNC_THREAD, including 22397c478bd9Sstevel@tonic-gate * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 22407c478bd9Sstevel@tonic-gate * no error detection, no lock statistics, 22417c478bd9Sstevel@tonic-gate * and the process has only a single thread. 22427c478bd9Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 22437c478bd9Sstevel@tonic-gate */ 2244d4204c85Sraf if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 2245d4204c85Sraf self->ul_uberdata->uberflags.uf_all) == 0) { 22467c478bd9Sstevel@tonic-gate /* 22477c478bd9Sstevel@tonic-gate * Only one thread exists so we don't need an atomic operation. 2248328cc3e9SRoger A. Faulkner * We do, however, need to protect against signals. 22497c478bd9Sstevel@tonic-gate */ 22507c478bd9Sstevel@tonic-gate if (mp->mutex_lockw == 0) { 2251328cc3e9SRoger A. Faulkner sigoff(self); 22527c478bd9Sstevel@tonic-gate mp->mutex_lockw = LOCKSET; 22537c478bd9Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 2254328cc3e9SRoger A. Faulkner sigon(self); 22557c478bd9Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 22567c478bd9Sstevel@tonic-gate return (0); 22577c478bd9Sstevel@tonic-gate } 2258883492d5Sraf if (mtype && MUTEX_OWNER(mp) == self) 2259883492d5Sraf return (mutex_recursion(mp, mtype, MUTEX_LOCK)); 22607c478bd9Sstevel@tonic-gate /* 22617c478bd9Sstevel@tonic-gate * We have reached a deadlock, probably because the 22627c478bd9Sstevel@tonic-gate * process is executing non-async-signal-safe code in 22637c478bd9Sstevel@tonic-gate * a signal handler and is attempting to acquire a lock 22647c478bd9Sstevel@tonic-gate * that it already owns. This is not surprising, given 22657c478bd9Sstevel@tonic-gate * bad programming practices over the years that has 22667c478bd9Sstevel@tonic-gate * resulted in applications calling printf() and such 22677c478bd9Sstevel@tonic-gate * in their signal handlers. Unless the user has told 22687c478bd9Sstevel@tonic-gate * us that the signal handlers are safe by setting: 22697c478bd9Sstevel@tonic-gate * export _THREAD_ASYNC_SAFE=1 22707c478bd9Sstevel@tonic-gate * we return EDEADLK rather than actually deadlocking. 22717c478bd9Sstevel@tonic-gate */ 22727c478bd9Sstevel@tonic-gate if (tsp == NULL && 22737c478bd9Sstevel@tonic-gate MUTEX_OWNER(mp) == self && !self->ul_async_safe) { 22747c478bd9Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__error, mp, EDEADLK); 22757c478bd9Sstevel@tonic-gate return (EDEADLK); 22767c478bd9Sstevel@tonic-gate } 22777c478bd9Sstevel@tonic-gate } 22787c478bd9Sstevel@tonic-gate 22797c478bd9Sstevel@tonic-gate /* 22807c478bd9Sstevel@tonic-gate * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 22817c478bd9Sstevel@tonic-gate * no error detection, and no lock statistics. 22827c478bd9Sstevel@tonic-gate * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 22837c478bd9Sstevel@tonic-gate */ 22847c478bd9Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL && 22857c478bd9Sstevel@tonic-gate (gflags->uf_trs_ted | 22867c478bd9Sstevel@tonic-gate (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) { 22877c478bd9Sstevel@tonic-gate if (mtype & USYNC_PROCESS) 22887c478bd9Sstevel@tonic-gate return (fast_process_lock(mp, tsp, mtype, MUTEX_LOCK)); 2289328cc3e9SRoger A. Faulkner sigoff(self); 22907c478bd9Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 22917c478bd9Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 2292328cc3e9SRoger A. Faulkner sigon(self); 22937c478bd9Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 22947c478bd9Sstevel@tonic-gate return (0); 22957c478bd9Sstevel@tonic-gate } 2296328cc3e9SRoger A. Faulkner sigon(self); 2297883492d5Sraf if (mtype && MUTEX_OWNER(mp) == self) 2298883492d5Sraf return (mutex_recursion(mp, mtype, MUTEX_LOCK)); 229916b01779Sraf if (mutex_trylock_adaptive(mp, 1) != 0) 2300883492d5Sraf return (mutex_lock_queue(self, NULL, mp, tsp)); 2301883492d5Sraf return (0); 23027c478bd9Sstevel@tonic-gate } 23037c478bd9Sstevel@tonic-gate 23047c478bd9Sstevel@tonic-gate /* else do it the long way */ 23057c478bd9Sstevel@tonic-gate return (mutex_lock_internal(mp, tsp, MUTEX_LOCK)); 23067c478bd9Sstevel@tonic-gate } 23077c478bd9Sstevel@tonic-gate 23087257d1b4Sraf #pragma weak pthread_mutex_lock = mutex_lock 23097257d1b4Sraf #pragma weak _mutex_lock = mutex_lock 23107c478bd9Sstevel@tonic-gate int 23117257d1b4Sraf mutex_lock(mutex_t *mp) 23127c478bd9Sstevel@tonic-gate { 23137c478bd9Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 23147c478bd9Sstevel@tonic-gate return (mutex_lock_impl(mp, NULL)); 23157c478bd9Sstevel@tonic-gate } 23167c478bd9Sstevel@tonic-gate 23177c478bd9Sstevel@tonic-gate int 23187257d1b4Sraf pthread_mutex_timedlock(pthread_mutex_t *_RESTRICT_KYWD mp, 23197257d1b4Sraf const struct timespec *_RESTRICT_KYWD abstime) 23207c478bd9Sstevel@tonic-gate { 23217c478bd9Sstevel@tonic-gate timespec_t tslocal; 23227c478bd9Sstevel@tonic-gate int error; 23237c478bd9Sstevel@tonic-gate 23247c478bd9Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 23257c478bd9Sstevel@tonic-gate abstime_to_reltime(CLOCK_REALTIME, abstime, &tslocal); 23267257d1b4Sraf error = mutex_lock_impl((mutex_t *)mp, &tslocal); 23277c478bd9Sstevel@tonic-gate if (error == ETIME) 23287c478bd9Sstevel@tonic-gate error = ETIMEDOUT; 23297c478bd9Sstevel@tonic-gate return (error); 23307c478bd9Sstevel@tonic-gate } 23317c478bd9Sstevel@tonic-gate 23327c478bd9Sstevel@tonic-gate int 23337257d1b4Sraf pthread_mutex_reltimedlock_np(pthread_mutex_t *_RESTRICT_KYWD mp, 23347257d1b4Sraf const struct timespec *_RESTRICT_KYWD reltime) 23357c478bd9Sstevel@tonic-gate { 23367c478bd9Sstevel@tonic-gate timespec_t tslocal; 23377c478bd9Sstevel@tonic-gate int error; 23387c478bd9Sstevel@tonic-gate 23397c478bd9Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 23407c478bd9Sstevel@tonic-gate tslocal = *reltime; 23417257d1b4Sraf error = mutex_lock_impl((mutex_t *)mp, &tslocal); 23427c478bd9Sstevel@tonic-gate if (error == ETIME) 23437c478bd9Sstevel@tonic-gate error = ETIMEDOUT; 23447c478bd9Sstevel@tonic-gate return (error); 23457c478bd9Sstevel@tonic-gate } 23467c478bd9Sstevel@tonic-gate 23477257d1b4Sraf #pragma weak pthread_mutex_trylock = mutex_trylock 23487c478bd9Sstevel@tonic-gate int 23497257d1b4Sraf mutex_trylock(mutex_t *mp) 23507c478bd9Sstevel@tonic-gate { 23517c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 23527c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 2353d4204c85Sraf int mtype = mp->mutex_type; 23547c478bd9Sstevel@tonic-gate uberflags_t *gflags; 23557c478bd9Sstevel@tonic-gate 23567c478bd9Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 2357d4204c85Sraf 23587c478bd9Sstevel@tonic-gate /* 23597c478bd9Sstevel@tonic-gate * Optimize the case of USYNC_THREAD, including 23607c478bd9Sstevel@tonic-gate * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 23617c478bd9Sstevel@tonic-gate * no error detection, no lock statistics, 23627c478bd9Sstevel@tonic-gate * and the process has only a single thread. 23637c478bd9Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 23647c478bd9Sstevel@tonic-gate */ 2365d4204c85Sraf if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 23667c478bd9Sstevel@tonic-gate udp->uberflags.uf_all) == 0) { 23677c478bd9Sstevel@tonic-gate /* 23687c478bd9Sstevel@tonic-gate * Only one thread exists so we don't need an atomic operation. 2369328cc3e9SRoger A. Faulkner * We do, however, need to protect against signals. 23707c478bd9Sstevel@tonic-gate */ 23717c478bd9Sstevel@tonic-gate if (mp->mutex_lockw == 0) { 2372328cc3e9SRoger A. Faulkner sigoff(self); 23737c478bd9Sstevel@tonic-gate mp->mutex_lockw = LOCKSET; 23747c478bd9Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 2375328cc3e9SRoger A. Faulkner sigon(self); 23767c478bd9Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 23777c478bd9Sstevel@tonic-gate return (0); 23787c478bd9Sstevel@tonic-gate } 2379883492d5Sraf if (mtype && MUTEX_OWNER(mp) == self) 2380883492d5Sraf return (mutex_recursion(mp, mtype, MUTEX_TRY)); 23817c478bd9Sstevel@tonic-gate return (EBUSY); 23827c478bd9Sstevel@tonic-gate } 23837c478bd9Sstevel@tonic-gate 23847c478bd9Sstevel@tonic-gate /* 23857c478bd9Sstevel@tonic-gate * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 23867c478bd9Sstevel@tonic-gate * no error detection, and no lock statistics. 23877c478bd9Sstevel@tonic-gate * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 23887c478bd9Sstevel@tonic-gate */ 23897c478bd9Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL && 23907c478bd9Sstevel@tonic-gate (gflags->uf_trs_ted | 23917c478bd9Sstevel@tonic-gate (mtype & ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK))) == 0) { 23927c478bd9Sstevel@tonic-gate if (mtype & USYNC_PROCESS) 23937c478bd9Sstevel@tonic-gate return (fast_process_lock(mp, NULL, mtype, MUTEX_TRY)); 2394328cc3e9SRoger A. Faulkner sigoff(self); 23957c478bd9Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 23967c478bd9Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 2397328cc3e9SRoger A. Faulkner sigon(self); 23987c478bd9Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 23997c478bd9Sstevel@tonic-gate return (0); 24007c478bd9Sstevel@tonic-gate } 2401328cc3e9SRoger A. Faulkner sigon(self); 2402883492d5Sraf if (mtype && MUTEX_OWNER(mp) == self) 2403883492d5Sraf return (mutex_recursion(mp, mtype, MUTEX_TRY)); 240416b01779Sraf if (__td_event_report(self, TD_LOCK_TRY, udp)) { 240516b01779Sraf self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 240616b01779Sraf tdb_event(TD_LOCK_TRY, udp); 24077c478bd9Sstevel@tonic-gate } 240816b01779Sraf return (EBUSY); 24097c478bd9Sstevel@tonic-gate } 24107c478bd9Sstevel@tonic-gate 24117c478bd9Sstevel@tonic-gate /* else do it the long way */ 24127c478bd9Sstevel@tonic-gate return (mutex_lock_internal(mp, NULL, MUTEX_TRY)); 24137c478bd9Sstevel@tonic-gate } 24147c478bd9Sstevel@tonic-gate 24157c478bd9Sstevel@tonic-gate int 2416883492d5Sraf mutex_unlock_internal(mutex_t *mp, int retain_robust_flags) 24177c478bd9Sstevel@tonic-gate { 24187c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 24197c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 24207c478bd9Sstevel@tonic-gate int mtype = mp->mutex_type; 24217c478bd9Sstevel@tonic-gate tdb_mutex_stats_t *msp; 2422883492d5Sraf int error = 0; 2423883492d5Sraf int release_all; 24247c478bd9Sstevel@tonic-gate lwpid_t lwpid; 24257c478bd9Sstevel@tonic-gate 242680d89c86SRoger A. Faulkner if ((mtype & (LOCK_ERRORCHECK | LOCK_ROBUST)) && 242780d89c86SRoger A. Faulkner !mutex_held(mp)) 24287c478bd9Sstevel@tonic-gate return (EPERM); 24297c478bd9Sstevel@tonic-gate 24307257d1b4Sraf if (self->ul_error_detection && !mutex_held(mp)) 24317c478bd9Sstevel@tonic-gate lock_error(mp, "mutex_unlock", NULL, NULL); 24327c478bd9Sstevel@tonic-gate 24337c478bd9Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 24347c478bd9Sstevel@tonic-gate mp->mutex_rcount--; 24357c478bd9Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 24367c478bd9Sstevel@tonic-gate return (0); 24377c478bd9Sstevel@tonic-gate } 24387c478bd9Sstevel@tonic-gate 24397c478bd9Sstevel@tonic-gate if ((msp = MUTEX_STATS(mp, udp)) != NULL) 24407c478bd9Sstevel@tonic-gate (void) record_hold_time(msp); 24417c478bd9Sstevel@tonic-gate 2442883492d5Sraf if (!retain_robust_flags && !(mtype & LOCK_PRIO_INHERIT) && 2443883492d5Sraf (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) { 244480d89c86SRoger A. Faulkner ASSERT(mtype & LOCK_ROBUST); 2445883492d5Sraf mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED); 2446883492d5Sraf mp->mutex_flag |= LOCK_NOTRECOVERABLE; 2447883492d5Sraf } 2448883492d5Sraf release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0); 2449883492d5Sraf 2450883492d5Sraf if (mtype & LOCK_PRIO_INHERIT) { 24517c478bd9Sstevel@tonic-gate no_preempt(self); 24527c478bd9Sstevel@tonic-gate mp->mutex_owner = 0; 245331db3c26Sraf /* mp->mutex_ownerpid is cleared by ___lwp_mutex_unlock() */ 24547c478bd9Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 2455883492d5Sraf mp->mutex_lockw = LOCKCLEAR; 2456d4204c85Sraf self->ul_pilocks--; 2457883492d5Sraf error = ___lwp_mutex_unlock(mp); 24587c478bd9Sstevel@tonic-gate preempt(self); 24597c478bd9Sstevel@tonic-gate } else if (mtype & USYNC_PROCESS) { 24605d1dd9a9Sraf mutex_unlock_process(mp, release_all); 24617c478bd9Sstevel@tonic-gate } else { /* USYNC_THREAD */ 2462883492d5Sraf if ((lwpid = mutex_unlock_queue(mp, release_all)) != 0) { 24637c478bd9Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 24647c478bd9Sstevel@tonic-gate preempt(self); 24657c478bd9Sstevel@tonic-gate } 24667c478bd9Sstevel@tonic-gate } 24677c478bd9Sstevel@tonic-gate 2468883492d5Sraf if (mtype & LOCK_ROBUST) 2469883492d5Sraf forget_lock(mp); 2470883492d5Sraf 2471883492d5Sraf if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp)) 2472883492d5Sraf _ceil_prio_waive(); 2473883492d5Sraf 24747c478bd9Sstevel@tonic-gate return (error); 24757c478bd9Sstevel@tonic-gate } 24767c478bd9Sstevel@tonic-gate 24777257d1b4Sraf #pragma weak pthread_mutex_unlock = mutex_unlock 24787257d1b4Sraf #pragma weak _mutex_unlock = mutex_unlock 24797c478bd9Sstevel@tonic-gate int 24807257d1b4Sraf mutex_unlock(mutex_t *mp) 24817c478bd9Sstevel@tonic-gate { 24827c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 2483d4204c85Sraf int mtype = mp->mutex_type; 24847c478bd9Sstevel@tonic-gate uberflags_t *gflags; 24857c478bd9Sstevel@tonic-gate lwpid_t lwpid; 24867c478bd9Sstevel@tonic-gate short el; 24877c478bd9Sstevel@tonic-gate 24887c478bd9Sstevel@tonic-gate /* 24897c478bd9Sstevel@tonic-gate * Optimize the case of USYNC_THREAD, including 24907c478bd9Sstevel@tonic-gate * the LOCK_RECURSIVE and LOCK_ERRORCHECK cases, 24917c478bd9Sstevel@tonic-gate * no error detection, no lock statistics, 24927c478bd9Sstevel@tonic-gate * and the process has only a single thread. 24937c478bd9Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 24947c478bd9Sstevel@tonic-gate */ 2495d4204c85Sraf if (((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) | 2496d4204c85Sraf self->ul_uberdata->uberflags.uf_all) == 0) { 24977c478bd9Sstevel@tonic-gate if (mtype) { 24987c478bd9Sstevel@tonic-gate /* 24997c478bd9Sstevel@tonic-gate * At this point we know that one or both of the 25007c478bd9Sstevel@tonic-gate * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set. 25017c478bd9Sstevel@tonic-gate */ 25027c478bd9Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self)) 25037c478bd9Sstevel@tonic-gate return (EPERM); 25047c478bd9Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 25057c478bd9Sstevel@tonic-gate mp->mutex_rcount--; 25067c478bd9Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 25077c478bd9Sstevel@tonic-gate return (0); 25087c478bd9Sstevel@tonic-gate } 25097c478bd9Sstevel@tonic-gate } 25107c478bd9Sstevel@tonic-gate /* 25117c478bd9Sstevel@tonic-gate * Only one thread exists so we don't need an atomic operation. 25127c478bd9Sstevel@tonic-gate * Also, there can be no waiters. 25137c478bd9Sstevel@tonic-gate */ 2514328cc3e9SRoger A. Faulkner sigoff(self); 25157c478bd9Sstevel@tonic-gate mp->mutex_owner = 0; 25167c478bd9Sstevel@tonic-gate mp->mutex_lockword = 0; 2517328cc3e9SRoger A. Faulkner sigon(self); 25187c478bd9Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 25197c478bd9Sstevel@tonic-gate return (0); 25207c478bd9Sstevel@tonic-gate } 25217c478bd9Sstevel@tonic-gate 25227c478bd9Sstevel@tonic-gate /* 25237c478bd9Sstevel@tonic-gate * Optimize the common cases of USYNC_THREAD or USYNC_PROCESS, 25247c478bd9Sstevel@tonic-gate * no error detection, and no lock statistics. 25257c478bd9Sstevel@tonic-gate * Include LOCK_RECURSIVE and LOCK_ERRORCHECK cases. 25267c478bd9Sstevel@tonic-gate */ 25277c478bd9Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL) { 25287c478bd9Sstevel@tonic-gate if (((el = gflags->uf_trs_ted) | mtype) == 0) { 25297c478bd9Sstevel@tonic-gate fast_unlock: 25305d1dd9a9Sraf if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) { 25317c478bd9Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 25327c478bd9Sstevel@tonic-gate preempt(self); 25337c478bd9Sstevel@tonic-gate } 25347c478bd9Sstevel@tonic-gate return (0); 25357c478bd9Sstevel@tonic-gate } 25367c478bd9Sstevel@tonic-gate if (el) /* error detection or lock statistics */ 25377c478bd9Sstevel@tonic-gate goto slow_unlock; 25387c478bd9Sstevel@tonic-gate if ((mtype & ~(LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) { 25397c478bd9Sstevel@tonic-gate /* 25407c478bd9Sstevel@tonic-gate * At this point we know that one or both of the 25417c478bd9Sstevel@tonic-gate * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set. 25427c478bd9Sstevel@tonic-gate */ 25437c478bd9Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !MUTEX_OWNED(mp, self)) 25447c478bd9Sstevel@tonic-gate return (EPERM); 25457c478bd9Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 25467c478bd9Sstevel@tonic-gate mp->mutex_rcount--; 25477c478bd9Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 25487c478bd9Sstevel@tonic-gate return (0); 25497c478bd9Sstevel@tonic-gate } 25507c478bd9Sstevel@tonic-gate goto fast_unlock; 25517c478bd9Sstevel@tonic-gate } 25527c478bd9Sstevel@tonic-gate if ((mtype & 25537c478bd9Sstevel@tonic-gate ~(USYNC_PROCESS|LOCK_RECURSIVE|LOCK_ERRORCHECK)) == 0) { 25547c478bd9Sstevel@tonic-gate /* 25557c478bd9Sstevel@tonic-gate * At this point we know that zero, one, or both of the 25567c478bd9Sstevel@tonic-gate * flags LOCK_RECURSIVE or LOCK_ERRORCHECK is set and 25577c478bd9Sstevel@tonic-gate * that the USYNC_PROCESS flag is set. 25587c478bd9Sstevel@tonic-gate */ 25597c478bd9Sstevel@tonic-gate if ((mtype & LOCK_ERRORCHECK) && !shared_mutex_held(mp)) 25607c478bd9Sstevel@tonic-gate return (EPERM); 25617c478bd9Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) { 25627c478bd9Sstevel@tonic-gate mp->mutex_rcount--; 25637c478bd9Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 1); 25647c478bd9Sstevel@tonic-gate return (0); 25657c478bd9Sstevel@tonic-gate } 25665d1dd9a9Sraf mutex_unlock_process(mp, 0); 25677c478bd9Sstevel@tonic-gate return (0); 25687c478bd9Sstevel@tonic-gate } 25697c478bd9Sstevel@tonic-gate } 25707c478bd9Sstevel@tonic-gate 25717c478bd9Sstevel@tonic-gate /* else do it the long way */ 25727c478bd9Sstevel@tonic-gate slow_unlock: 2573883492d5Sraf return (mutex_unlock_internal(mp, 0)); 25747c478bd9Sstevel@tonic-gate } 25757c478bd9Sstevel@tonic-gate 25767c478bd9Sstevel@tonic-gate /* 25777c478bd9Sstevel@tonic-gate * Internally to the library, almost all mutex lock/unlock actions 25787c478bd9Sstevel@tonic-gate * go through these lmutex_ functions, to protect critical regions. 25797257d1b4Sraf * We replicate a bit of code from mutex_lock() and mutex_unlock() 25807c478bd9Sstevel@tonic-gate * to make these functions faster since we know that the mutex type 25817c478bd9Sstevel@tonic-gate * of all internal locks is USYNC_THREAD. We also know that internal 25827c478bd9Sstevel@tonic-gate * locking can never fail, so we panic if it does. 25837c478bd9Sstevel@tonic-gate */ 25847c478bd9Sstevel@tonic-gate void 25857c478bd9Sstevel@tonic-gate lmutex_lock(mutex_t *mp) 25867c478bd9Sstevel@tonic-gate { 25877c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 25887c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 25897c478bd9Sstevel@tonic-gate 25907c478bd9Sstevel@tonic-gate ASSERT(mp->mutex_type == USYNC_THREAD); 25917c478bd9Sstevel@tonic-gate 25927c478bd9Sstevel@tonic-gate enter_critical(self); 25937c478bd9Sstevel@tonic-gate /* 25947c478bd9Sstevel@tonic-gate * Optimize the case of no lock statistics and only a single thread. 25957c478bd9Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 25967c478bd9Sstevel@tonic-gate */ 25977c478bd9Sstevel@tonic-gate if (udp->uberflags.uf_all == 0) { 25987c478bd9Sstevel@tonic-gate /* 25997c478bd9Sstevel@tonic-gate * Only one thread exists; the mutex must be free. 26007c478bd9Sstevel@tonic-gate */ 26017c478bd9Sstevel@tonic-gate ASSERT(mp->mutex_lockw == 0); 26027c478bd9Sstevel@tonic-gate mp->mutex_lockw = LOCKSET; 26037c478bd9Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 26047c478bd9Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 26057c478bd9Sstevel@tonic-gate } else { 26067c478bd9Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 26077c478bd9Sstevel@tonic-gate 26087c478bd9Sstevel@tonic-gate if (!self->ul_schedctl_called) 26097c478bd9Sstevel@tonic-gate (void) setup_schedctl(); 26107c478bd9Sstevel@tonic-gate 26117c478bd9Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) == 0) { 26127c478bd9Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 26137c478bd9Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 261416b01779Sraf } else if (mutex_trylock_adaptive(mp, 1) != 0) { 26157c478bd9Sstevel@tonic-gate (void) mutex_lock_queue(self, msp, mp, NULL); 26167c478bd9Sstevel@tonic-gate } 26177c478bd9Sstevel@tonic-gate 26187c478bd9Sstevel@tonic-gate if (msp) 26197c478bd9Sstevel@tonic-gate record_begin_hold(msp); 26207c478bd9Sstevel@tonic-gate } 26217c478bd9Sstevel@tonic-gate } 26227c478bd9Sstevel@tonic-gate 26237c478bd9Sstevel@tonic-gate void 26247c478bd9Sstevel@tonic-gate lmutex_unlock(mutex_t *mp) 26257c478bd9Sstevel@tonic-gate { 26267c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 26277c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 26287c478bd9Sstevel@tonic-gate 26297c478bd9Sstevel@tonic-gate ASSERT(mp->mutex_type == USYNC_THREAD); 26307c478bd9Sstevel@tonic-gate 26317c478bd9Sstevel@tonic-gate /* 26327c478bd9Sstevel@tonic-gate * Optimize the case of no lock statistics and only a single thread. 26337c478bd9Sstevel@tonic-gate * (Most likely a traditional single-threaded application.) 26347c478bd9Sstevel@tonic-gate */ 26357c478bd9Sstevel@tonic-gate if (udp->uberflags.uf_all == 0) { 26367c478bd9Sstevel@tonic-gate /* 26377c478bd9Sstevel@tonic-gate * Only one thread exists so there can be no waiters. 26387c478bd9Sstevel@tonic-gate */ 26397c478bd9Sstevel@tonic-gate mp->mutex_owner = 0; 26407c478bd9Sstevel@tonic-gate mp->mutex_lockword = 0; 26417c478bd9Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 26427c478bd9Sstevel@tonic-gate } else { 26437c478bd9Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 26447c478bd9Sstevel@tonic-gate lwpid_t lwpid; 26457c478bd9Sstevel@tonic-gate 26467c478bd9Sstevel@tonic-gate if (msp) 26477c478bd9Sstevel@tonic-gate (void) record_hold_time(msp); 2648883492d5Sraf if ((lwpid = mutex_unlock_queue(mp, 0)) != 0) { 26497c478bd9Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 26507c478bd9Sstevel@tonic-gate preempt(self); 26517c478bd9Sstevel@tonic-gate } 26527c478bd9Sstevel@tonic-gate } 26537c478bd9Sstevel@tonic-gate exit_critical(self); 26547c478bd9Sstevel@tonic-gate } 26557c478bd9Sstevel@tonic-gate 2656f841f6adSraf /* 2657f841f6adSraf * For specialized code in libc, like the asynchronous i/o code, 2658f841f6adSraf * the following sig_*() locking primitives are used in order 2659f841f6adSraf * to make the code asynchronous signal safe. Signals are 2660f841f6adSraf * deferred while locks acquired by these functions are held. 2661f841f6adSraf */ 2662f841f6adSraf void 2663f841f6adSraf sig_mutex_lock(mutex_t *mp) 2664f841f6adSraf { 2665e54ab87fSRoger A. Faulkner ulwp_t *self = curthread; 2666e54ab87fSRoger A. Faulkner 2667e54ab87fSRoger A. Faulkner sigoff(self); 26688cd45542Sraf (void) mutex_lock(mp); 2669f841f6adSraf } 2670f841f6adSraf 2671f841f6adSraf void 2672f841f6adSraf sig_mutex_unlock(mutex_t *mp) 2673f841f6adSraf { 2674e54ab87fSRoger A. Faulkner ulwp_t *self = curthread; 2675e54ab87fSRoger A. Faulkner 26768cd45542Sraf (void) mutex_unlock(mp); 2677e54ab87fSRoger A. Faulkner sigon(self); 2678f841f6adSraf } 2679f841f6adSraf 2680f841f6adSraf int 2681f841f6adSraf sig_mutex_trylock(mutex_t *mp) 2682f841f6adSraf { 2683e54ab87fSRoger A. Faulkner ulwp_t *self = curthread; 2684f841f6adSraf int error; 2685f841f6adSraf 2686e54ab87fSRoger A. Faulkner sigoff(self); 26878cd45542Sraf if ((error = mutex_trylock(mp)) != 0) 2688e54ab87fSRoger A. Faulkner sigon(self); 2689f841f6adSraf return (error); 2690f841f6adSraf } 2691f841f6adSraf 2692f841f6adSraf /* 2693f841f6adSraf * sig_cond_wait() is a cancellation point. 2694f841f6adSraf */ 2695f841f6adSraf int 2696f841f6adSraf sig_cond_wait(cond_t *cv, mutex_t *mp) 2697f841f6adSraf { 2698f841f6adSraf int error; 2699f841f6adSraf 2700f841f6adSraf ASSERT(curthread->ul_sigdefer != 0); 27018cd45542Sraf pthread_testcancel(); 2702a574db85Sraf error = __cond_wait(cv, mp); 2703f841f6adSraf if (error == EINTR && curthread->ul_cursig) { 2704f841f6adSraf sig_mutex_unlock(mp); 2705f841f6adSraf /* take the deferred signal here */ 2706f841f6adSraf sig_mutex_lock(mp); 2707f841f6adSraf } 27088cd45542Sraf pthread_testcancel(); 2709f841f6adSraf return (error); 2710f841f6adSraf } 2711f841f6adSraf 2712f841f6adSraf /* 2713f841f6adSraf * sig_cond_reltimedwait() is a cancellation point. 2714f841f6adSraf */ 2715f841f6adSraf int 2716f841f6adSraf sig_cond_reltimedwait(cond_t *cv, mutex_t *mp, const timespec_t *ts) 2717f841f6adSraf { 2718f841f6adSraf int error; 2719f841f6adSraf 2720f841f6adSraf ASSERT(curthread->ul_sigdefer != 0); 27218cd45542Sraf pthread_testcancel(); 2722a574db85Sraf error = __cond_reltimedwait(cv, mp, ts); 2723f841f6adSraf if (error == EINTR && curthread->ul_cursig) { 2724f841f6adSraf sig_mutex_unlock(mp); 2725f841f6adSraf /* take the deferred signal here */ 2726f841f6adSraf sig_mutex_lock(mp); 2727f841f6adSraf } 27288cd45542Sraf pthread_testcancel(); 2729f841f6adSraf return (error); 2730f841f6adSraf } 2731f841f6adSraf 2732a574db85Sraf /* 2733a574db85Sraf * For specialized code in libc, like the stdio code. 2734a574db85Sraf * the following cancel_safe_*() locking primitives are used in 2735a574db85Sraf * order to make the code cancellation-safe. Cancellation is 2736a574db85Sraf * deferred while locks acquired by these functions are held. 2737a574db85Sraf */ 2738a574db85Sraf void 2739a574db85Sraf cancel_safe_mutex_lock(mutex_t *mp) 2740a574db85Sraf { 27418cd45542Sraf (void) mutex_lock(mp); 2742a574db85Sraf curthread->ul_libc_locks++; 2743a574db85Sraf } 2744a574db85Sraf 2745a574db85Sraf int 2746a574db85Sraf cancel_safe_mutex_trylock(mutex_t *mp) 2747a574db85Sraf { 2748a574db85Sraf int error; 2749a574db85Sraf 27508cd45542Sraf if ((error = mutex_trylock(mp)) == 0) 2751a574db85Sraf curthread->ul_libc_locks++; 2752a574db85Sraf return (error); 2753a574db85Sraf } 2754a574db85Sraf 2755a574db85Sraf void 2756a574db85Sraf cancel_safe_mutex_unlock(mutex_t *mp) 2757a574db85Sraf { 2758a574db85Sraf ulwp_t *self = curthread; 2759a574db85Sraf 2760a574db85Sraf ASSERT(self->ul_libc_locks != 0); 2761a574db85Sraf 27628cd45542Sraf (void) mutex_unlock(mp); 2763a574db85Sraf 2764a574db85Sraf /* 2765a574db85Sraf * Decrement the count of locks held by cancel_safe_mutex_lock(). 2766a574db85Sraf * If we are then in a position to terminate cleanly and 2767a574db85Sraf * if there is a pending cancellation and cancellation 2768a574db85Sraf * is not disabled and we received EINTR from a recent 2769a574db85Sraf * system call then perform the cancellation action now. 2770a574db85Sraf */ 2771a574db85Sraf if (--self->ul_libc_locks == 0 && 2772a574db85Sraf !(self->ul_vfork | self->ul_nocancel | 2773a574db85Sraf self->ul_critical | self->ul_sigdefer) && 2774a574db85Sraf cancel_active()) 27757257d1b4Sraf pthread_exit(PTHREAD_CANCELED); 2776a574db85Sraf } 2777a574db85Sraf 27787c478bd9Sstevel@tonic-gate static int 27797c478bd9Sstevel@tonic-gate shared_mutex_held(mutex_t *mparg) 27807c478bd9Sstevel@tonic-gate { 27817c478bd9Sstevel@tonic-gate /* 2782883492d5Sraf * The 'volatile' is necessary to make sure the compiler doesn't 2783883492d5Sraf * reorder the tests of the various components of the mutex. 2784883492d5Sraf * They must be tested in this order: 2785883492d5Sraf * mutex_lockw 2786883492d5Sraf * mutex_owner 2787883492d5Sraf * mutex_ownerpid 2788883492d5Sraf * This relies on the fact that everywhere mutex_lockw is cleared, 2789883492d5Sraf * mutex_owner and mutex_ownerpid are cleared before mutex_lockw 2790883492d5Sraf * is cleared, and that everywhere mutex_lockw is set, mutex_owner 2791883492d5Sraf * and mutex_ownerpid are set after mutex_lockw is set, and that 2792883492d5Sraf * mutex_lockw is set or cleared with a memory barrier. 27937c478bd9Sstevel@tonic-gate */ 27947c478bd9Sstevel@tonic-gate volatile mutex_t *mp = (volatile mutex_t *)mparg; 27957c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 27967c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 27977c478bd9Sstevel@tonic-gate 2798883492d5Sraf return (MUTEX_OWNED(mp, self) && mp->mutex_ownerpid == udp->pid); 27997c478bd9Sstevel@tonic-gate } 28007c478bd9Sstevel@tonic-gate 28017257d1b4Sraf #pragma weak _mutex_held = mutex_held 28027c478bd9Sstevel@tonic-gate int 28037257d1b4Sraf mutex_held(mutex_t *mparg) 28047c478bd9Sstevel@tonic-gate { 2805883492d5Sraf volatile mutex_t *mp = (volatile mutex_t *)mparg; 2806883492d5Sraf 2807883492d5Sraf if (mparg->mutex_type & USYNC_PROCESS) 2808883492d5Sraf return (shared_mutex_held(mparg)); 28097c478bd9Sstevel@tonic-gate return (MUTEX_OWNED(mp, curthread)); 28107c478bd9Sstevel@tonic-gate } 28117c478bd9Sstevel@tonic-gate 28127257d1b4Sraf #pragma weak pthread_mutex_destroy = mutex_destroy 28137257d1b4Sraf #pragma weak _mutex_destroy = mutex_destroy 28147c478bd9Sstevel@tonic-gate int 28157257d1b4Sraf mutex_destroy(mutex_t *mp) 28167c478bd9Sstevel@tonic-gate { 2817883492d5Sraf if (mp->mutex_type & USYNC_PROCESS) 2818883492d5Sraf forget_lock(mp); 28198cd45542Sraf (void) memset(mp, 0, sizeof (*mp)); 28207c478bd9Sstevel@tonic-gate tdb_sync_obj_deregister(mp); 28217c478bd9Sstevel@tonic-gate return (0); 28227c478bd9Sstevel@tonic-gate } 28237c478bd9Sstevel@tonic-gate 28247257d1b4Sraf #pragma weak pthread_mutex_consistent_np = mutex_consistent 282580d89c86SRoger A. Faulkner #pragma weak pthread_mutex_consistent = mutex_consistent 2826883492d5Sraf int 28277257d1b4Sraf mutex_consistent(mutex_t *mp) 2828883492d5Sraf { 2829883492d5Sraf /* 2830883492d5Sraf * Do this only for an inconsistent, initialized robust lock 2831883492d5Sraf * that we hold. For all other cases, return EINVAL. 2832883492d5Sraf */ 28337257d1b4Sraf if (mutex_held(mp) && 2834883492d5Sraf (mp->mutex_type & LOCK_ROBUST) && 2835883492d5Sraf (mp->mutex_flag & LOCK_INITED) && 2836883492d5Sraf (mp->mutex_flag & (LOCK_OWNERDEAD | LOCK_UNMAPPED))) { 2837883492d5Sraf mp->mutex_flag &= ~(LOCK_OWNERDEAD | LOCK_UNMAPPED); 2838883492d5Sraf mp->mutex_rcount = 0; 2839883492d5Sraf return (0); 2840883492d5Sraf } 2841883492d5Sraf return (EINVAL); 2842883492d5Sraf } 2843883492d5Sraf 28447c478bd9Sstevel@tonic-gate /* 28457c478bd9Sstevel@tonic-gate * Spin locks are separate from ordinary mutexes, 28467c478bd9Sstevel@tonic-gate * but we use the same data structure for them. 28477c478bd9Sstevel@tonic-gate */ 28487c478bd9Sstevel@tonic-gate 28497c478bd9Sstevel@tonic-gate int 28507257d1b4Sraf pthread_spin_init(pthread_spinlock_t *lock, int pshared) 28517c478bd9Sstevel@tonic-gate { 28527c478bd9Sstevel@tonic-gate mutex_t *mp = (mutex_t *)lock; 28537c478bd9Sstevel@tonic-gate 28548cd45542Sraf (void) memset(mp, 0, sizeof (*mp)); 28557c478bd9Sstevel@tonic-gate if (pshared == PTHREAD_PROCESS_SHARED) 28567c478bd9Sstevel@tonic-gate mp->mutex_type = USYNC_PROCESS; 28577c478bd9Sstevel@tonic-gate else 28587c478bd9Sstevel@tonic-gate mp->mutex_type = USYNC_THREAD; 28597c478bd9Sstevel@tonic-gate mp->mutex_flag = LOCK_INITED; 28607c478bd9Sstevel@tonic-gate mp->mutex_magic = MUTEX_MAGIC; 28617c5714f6Sraf 28627c5714f6Sraf /* 28637c5714f6Sraf * This should be at the beginning of the function, 28647c5714f6Sraf * but for the sake of old broken applications that 28657c5714f6Sraf * do not have proper alignment for their mutexes 28667c5714f6Sraf * (and don't check the return code from pthread_spin_init), 28677c5714f6Sraf * we put it here, after initializing the mutex regardless. 28687c5714f6Sraf */ 28697c5714f6Sraf if (((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) && 28707c5714f6Sraf curthread->ul_misaligned == 0) 28717c5714f6Sraf return (EINVAL); 28727c5714f6Sraf 28737c478bd9Sstevel@tonic-gate return (0); 28747c478bd9Sstevel@tonic-gate } 28757c478bd9Sstevel@tonic-gate 28767c478bd9Sstevel@tonic-gate int 28777257d1b4Sraf pthread_spin_destroy(pthread_spinlock_t *lock) 28787c478bd9Sstevel@tonic-gate { 28798cd45542Sraf (void) memset(lock, 0, sizeof (*lock)); 28807c478bd9Sstevel@tonic-gate return (0); 28817c478bd9Sstevel@tonic-gate } 28827c478bd9Sstevel@tonic-gate 28837c478bd9Sstevel@tonic-gate int 28847257d1b4Sraf pthread_spin_trylock(pthread_spinlock_t *lock) 28857c478bd9Sstevel@tonic-gate { 28867c478bd9Sstevel@tonic-gate mutex_t *mp = (mutex_t *)lock; 28877c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 28887c478bd9Sstevel@tonic-gate int error = 0; 28897c478bd9Sstevel@tonic-gate 28907c478bd9Sstevel@tonic-gate no_preempt(self); 28917c478bd9Sstevel@tonic-gate if (set_lock_byte(&mp->mutex_lockw) != 0) 28927c478bd9Sstevel@tonic-gate error = EBUSY; 28937c478bd9Sstevel@tonic-gate else { 28947c478bd9Sstevel@tonic-gate mp->mutex_owner = (uintptr_t)self; 28957c478bd9Sstevel@tonic-gate if (mp->mutex_type == USYNC_PROCESS) 28967c478bd9Sstevel@tonic-gate mp->mutex_ownerpid = self->ul_uberdata->pid; 28977c478bd9Sstevel@tonic-gate DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, 0); 28987c478bd9Sstevel@tonic-gate } 28997c478bd9Sstevel@tonic-gate preempt(self); 29007c478bd9Sstevel@tonic-gate return (error); 29017c478bd9Sstevel@tonic-gate } 29027c478bd9Sstevel@tonic-gate 29037c478bd9Sstevel@tonic-gate int 29047257d1b4Sraf pthread_spin_lock(pthread_spinlock_t *lock) 29057c478bd9Sstevel@tonic-gate { 2906883492d5Sraf mutex_t *mp = (mutex_t *)lock; 2907883492d5Sraf ulwp_t *self = curthread; 2908883492d5Sraf volatile uint8_t *lockp = (volatile uint8_t *)&mp->mutex_lockw; 2909883492d5Sraf int count = 0; 2910883492d5Sraf 2911883492d5Sraf ASSERT(!self->ul_critical || self->ul_bindflags); 2912883492d5Sraf 2913883492d5Sraf DTRACE_PROBE1(plockstat, mutex__spin, mp); 29147c478bd9Sstevel@tonic-gate 29157c478bd9Sstevel@tonic-gate /* 29167c478bd9Sstevel@tonic-gate * We don't care whether the owner is running on a processor. 29177c478bd9Sstevel@tonic-gate * We just spin because that's what this interface requires. 29187c478bd9Sstevel@tonic-gate */ 29197c478bd9Sstevel@tonic-gate for (;;) { 29207c478bd9Sstevel@tonic-gate if (*lockp == 0) { /* lock byte appears to be clear */ 2921883492d5Sraf no_preempt(self); 2922883492d5Sraf if (set_lock_byte(lockp) == 0) 2923883492d5Sraf break; 2924883492d5Sraf preempt(self); 29257c478bd9Sstevel@tonic-gate } 29265d1dd9a9Sraf if (count < INT_MAX) 29275d1dd9a9Sraf count++; 29287c478bd9Sstevel@tonic-gate SMT_PAUSE(); 29297c478bd9Sstevel@tonic-gate } 2930883492d5Sraf mp->mutex_owner = (uintptr_t)self; 2931883492d5Sraf if (mp->mutex_type == USYNC_PROCESS) 2932883492d5Sraf mp->mutex_ownerpid = self->ul_uberdata->pid; 2933883492d5Sraf preempt(self); 29345d1dd9a9Sraf if (count) { 29358cb74972SJonathan Haslam DTRACE_PROBE3(plockstat, mutex__spun, mp, 1, count); 29365d1dd9a9Sraf } 2937883492d5Sraf DTRACE_PROBE3(plockstat, mutex__acquire, mp, 0, count); 2938883492d5Sraf return (0); 29397c478bd9Sstevel@tonic-gate } 29407c478bd9Sstevel@tonic-gate 29417c478bd9Sstevel@tonic-gate int 29427257d1b4Sraf pthread_spin_unlock(pthread_spinlock_t *lock) 29437c478bd9Sstevel@tonic-gate { 29447c478bd9Sstevel@tonic-gate mutex_t *mp = (mutex_t *)lock; 29457c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 29467c478bd9Sstevel@tonic-gate 29477c478bd9Sstevel@tonic-gate no_preempt(self); 29487c478bd9Sstevel@tonic-gate mp->mutex_owner = 0; 29497c478bd9Sstevel@tonic-gate mp->mutex_ownerpid = 0; 29507c478bd9Sstevel@tonic-gate DTRACE_PROBE2(plockstat, mutex__release, mp, 0); 295141efec22Sraf (void) atomic_swap_32(&mp->mutex_lockword, 0); 29527c478bd9Sstevel@tonic-gate preempt(self); 29537c478bd9Sstevel@tonic-gate return (0); 29547c478bd9Sstevel@tonic-gate } 29557c478bd9Sstevel@tonic-gate 29565d1dd9a9Sraf #define INITIAL_LOCKS 8 /* initial size of ul_heldlocks.array */ 2957883492d5Sraf 2958883492d5Sraf /* 2959883492d5Sraf * Find/allocate an entry for 'lock' in our array of held locks. 2960883492d5Sraf */ 2961883492d5Sraf static mutex_t ** 2962883492d5Sraf find_lock_entry(mutex_t *lock) 2963883492d5Sraf { 2964883492d5Sraf ulwp_t *self = curthread; 2965883492d5Sraf mutex_t **remembered = NULL; 2966883492d5Sraf mutex_t **lockptr; 2967883492d5Sraf uint_t nlocks; 2968883492d5Sraf 2969883492d5Sraf if ((nlocks = self->ul_heldlockcnt) != 0) 2970883492d5Sraf lockptr = self->ul_heldlocks.array; 2971883492d5Sraf else { 2972883492d5Sraf nlocks = 1; 2973883492d5Sraf lockptr = &self->ul_heldlocks.single; 2974883492d5Sraf } 2975883492d5Sraf 2976883492d5Sraf for (; nlocks; nlocks--, lockptr++) { 2977883492d5Sraf if (*lockptr == lock) 2978883492d5Sraf return (lockptr); 2979883492d5Sraf if (*lockptr == NULL && remembered == NULL) 2980883492d5Sraf remembered = lockptr; 2981883492d5Sraf } 2982883492d5Sraf if (remembered != NULL) { 2983883492d5Sraf *remembered = lock; 2984883492d5Sraf return (remembered); 2985883492d5Sraf } 2986883492d5Sraf 2987883492d5Sraf /* 2988883492d5Sraf * No entry available. Allocate more space, converting 2989883492d5Sraf * the single entry into an array of entries if necessary. 2990883492d5Sraf */ 2991883492d5Sraf if ((nlocks = self->ul_heldlockcnt) == 0) { 2992883492d5Sraf /* 2993883492d5Sraf * Initial allocation of the array. 2994883492d5Sraf * Convert the single entry into an array. 2995883492d5Sraf */ 2996883492d5Sraf self->ul_heldlockcnt = nlocks = INITIAL_LOCKS; 2997883492d5Sraf lockptr = lmalloc(nlocks * sizeof (mutex_t *)); 2998883492d5Sraf /* 2999883492d5Sraf * The single entry becomes the first entry in the array. 3000883492d5Sraf */ 3001883492d5Sraf *lockptr = self->ul_heldlocks.single; 3002883492d5Sraf self->ul_heldlocks.array = lockptr; 3003883492d5Sraf /* 3004883492d5Sraf * Return the next available entry in the array. 3005883492d5Sraf */ 3006883492d5Sraf *++lockptr = lock; 3007883492d5Sraf return (lockptr); 3008883492d5Sraf } 3009883492d5Sraf /* 3010883492d5Sraf * Reallocate the array, double the size each time. 3011883492d5Sraf */ 3012883492d5Sraf lockptr = lmalloc(nlocks * 2 * sizeof (mutex_t *)); 30138cd45542Sraf (void) memcpy(lockptr, self->ul_heldlocks.array, 3014883492d5Sraf nlocks * sizeof (mutex_t *)); 3015883492d5Sraf lfree(self->ul_heldlocks.array, nlocks * sizeof (mutex_t *)); 3016883492d5Sraf self->ul_heldlocks.array = lockptr; 3017883492d5Sraf self->ul_heldlockcnt *= 2; 3018883492d5Sraf /* 3019883492d5Sraf * Return the next available entry in the newly allocated array. 3020883492d5Sraf */ 3021883492d5Sraf *(lockptr += nlocks) = lock; 3022883492d5Sraf return (lockptr); 3023883492d5Sraf } 3024883492d5Sraf 3025883492d5Sraf /* 3026883492d5Sraf * Insert 'lock' into our list of held locks. 3027883492d5Sraf * Currently only used for LOCK_ROBUST mutexes. 3028883492d5Sraf */ 3029883492d5Sraf void 3030883492d5Sraf remember_lock(mutex_t *lock) 3031883492d5Sraf { 3032883492d5Sraf (void) find_lock_entry(lock); 3033883492d5Sraf } 3034883492d5Sraf 3035883492d5Sraf /* 3036883492d5Sraf * Remove 'lock' from our list of held locks. 3037883492d5Sraf * Currently only used for LOCK_ROBUST mutexes. 3038883492d5Sraf */ 3039883492d5Sraf void 3040883492d5Sraf forget_lock(mutex_t *lock) 3041883492d5Sraf { 3042883492d5Sraf *find_lock_entry(lock) = NULL; 3043883492d5Sraf } 3044883492d5Sraf 3045883492d5Sraf /* 3046883492d5Sraf * Free the array of held locks. 3047883492d5Sraf */ 3048883492d5Sraf void 3049883492d5Sraf heldlock_free(ulwp_t *ulwp) 3050883492d5Sraf { 3051883492d5Sraf uint_t nlocks; 3052883492d5Sraf 3053883492d5Sraf if ((nlocks = ulwp->ul_heldlockcnt) != 0) 3054883492d5Sraf lfree(ulwp->ul_heldlocks.array, nlocks * sizeof (mutex_t *)); 3055883492d5Sraf ulwp->ul_heldlockcnt = 0; 3056883492d5Sraf ulwp->ul_heldlocks.array = NULL; 3057883492d5Sraf } 3058883492d5Sraf 3059883492d5Sraf /* 3060883492d5Sraf * Mark all held LOCK_ROBUST mutexes LOCK_OWNERDEAD. 3061883492d5Sraf * Called from _thrp_exit() to deal with abandoned locks. 3062883492d5Sraf */ 3063883492d5Sraf void 3064883492d5Sraf heldlock_exit(void) 3065883492d5Sraf { 3066883492d5Sraf ulwp_t *self = curthread; 3067883492d5Sraf mutex_t **lockptr; 3068883492d5Sraf uint_t nlocks; 3069883492d5Sraf mutex_t *mp; 3070883492d5Sraf 3071883492d5Sraf if ((nlocks = self->ul_heldlockcnt) != 0) 3072883492d5Sraf lockptr = self->ul_heldlocks.array; 3073883492d5Sraf else { 3074883492d5Sraf nlocks = 1; 3075883492d5Sraf lockptr = &self->ul_heldlocks.single; 3076883492d5Sraf } 3077883492d5Sraf 3078883492d5Sraf for (; nlocks; nlocks--, lockptr++) { 3079883492d5Sraf /* 3080883492d5Sraf * The kernel takes care of transitioning held 3081883492d5Sraf * LOCK_PRIO_INHERIT mutexes to LOCK_OWNERDEAD. 3082883492d5Sraf * We avoid that case here. 3083883492d5Sraf */ 3084883492d5Sraf if ((mp = *lockptr) != NULL && 30857257d1b4Sraf mutex_held(mp) && 3086883492d5Sraf (mp->mutex_type & (LOCK_ROBUST | LOCK_PRIO_INHERIT)) == 3087883492d5Sraf LOCK_ROBUST) { 3088883492d5Sraf mp->mutex_rcount = 0; 3089883492d5Sraf if (!(mp->mutex_flag & LOCK_UNMAPPED)) 3090883492d5Sraf mp->mutex_flag |= LOCK_OWNERDEAD; 3091883492d5Sraf (void) mutex_unlock_internal(mp, 1); 3092883492d5Sraf } 3093883492d5Sraf } 3094883492d5Sraf 3095883492d5Sraf heldlock_free(self); 3096883492d5Sraf } 3097883492d5Sraf 30987257d1b4Sraf #pragma weak _cond_init = cond_init 30997c478bd9Sstevel@tonic-gate /* ARGSUSED2 */ 31007c478bd9Sstevel@tonic-gate int 31017257d1b4Sraf cond_init(cond_t *cvp, int type, void *arg) 31027c478bd9Sstevel@tonic-gate { 31037c478bd9Sstevel@tonic-gate if (type != USYNC_THREAD && type != USYNC_PROCESS) 31047c478bd9Sstevel@tonic-gate return (EINVAL); 31058cd45542Sraf (void) memset(cvp, 0, sizeof (*cvp)); 31067c478bd9Sstevel@tonic-gate cvp->cond_type = (uint16_t)type; 31077c478bd9Sstevel@tonic-gate cvp->cond_magic = COND_MAGIC; 31087c5714f6Sraf 31097c5714f6Sraf /* 31107c5714f6Sraf * This should be at the beginning of the function, 31117c5714f6Sraf * but for the sake of old broken applications that 31127c5714f6Sraf * do not have proper alignment for their condvars 31137c5714f6Sraf * (and don't check the return code from cond_init), 31147c5714f6Sraf * we put it here, after initializing the condvar regardless. 31157c5714f6Sraf */ 31167c5714f6Sraf if (((uintptr_t)cvp & (_LONG_LONG_ALIGNMENT - 1)) && 31177c5714f6Sraf curthread->ul_misaligned == 0) 31187c5714f6Sraf return (EINVAL); 31197c5714f6Sraf 31207c478bd9Sstevel@tonic-gate return (0); 31217c478bd9Sstevel@tonic-gate } 31227c478bd9Sstevel@tonic-gate 31237c478bd9Sstevel@tonic-gate /* 31247c478bd9Sstevel@tonic-gate * cond_sleep_queue(): utility function for cond_wait_queue(). 31257c478bd9Sstevel@tonic-gate * 31267c478bd9Sstevel@tonic-gate * Go to sleep on a condvar sleep queue, expect to be waked up 31277c478bd9Sstevel@tonic-gate * by someone calling cond_signal() or cond_broadcast() or due 31287c478bd9Sstevel@tonic-gate * to receiving a UNIX signal or being cancelled, or just simply 31297c478bd9Sstevel@tonic-gate * due to a spurious wakeup (like someome calling forkall()). 31307c478bd9Sstevel@tonic-gate * 31317c478bd9Sstevel@tonic-gate * The associated mutex is *not* reacquired before returning. 31327c478bd9Sstevel@tonic-gate * That must be done by the caller of cond_sleep_queue(). 31337c478bd9Sstevel@tonic-gate */ 3134883492d5Sraf static int 31357c478bd9Sstevel@tonic-gate cond_sleep_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 31367c478bd9Sstevel@tonic-gate { 31377c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 31387c478bd9Sstevel@tonic-gate queue_head_t *qp; 31397c478bd9Sstevel@tonic-gate queue_head_t *mqp; 31407c478bd9Sstevel@tonic-gate lwpid_t lwpid; 31417c478bd9Sstevel@tonic-gate int signalled; 31427c478bd9Sstevel@tonic-gate int error; 3143d4204c85Sraf int cv_wake; 3144883492d5Sraf int release_all; 31457c478bd9Sstevel@tonic-gate 31467c478bd9Sstevel@tonic-gate /* 31477c478bd9Sstevel@tonic-gate * Put ourself on the CV sleep queue, unlock the mutex, then 31487c478bd9Sstevel@tonic-gate * park ourself and unpark a candidate lwp to grab the mutex. 31497c478bd9Sstevel@tonic-gate * We must go onto the CV sleep queue before dropping the 31507c478bd9Sstevel@tonic-gate * mutex in order to guarantee atomicity of the operation. 31517c478bd9Sstevel@tonic-gate */ 31527c478bd9Sstevel@tonic-gate self->ul_sp = stkptr(); 31537c478bd9Sstevel@tonic-gate qp = queue_lock(cvp, CV); 3154d4204c85Sraf enqueue(qp, self, 0); 31557c478bd9Sstevel@tonic-gate cvp->cond_waiters_user = 1; 31567c478bd9Sstevel@tonic-gate self->ul_cvmutex = mp; 3157d4204c85Sraf self->ul_cv_wake = cv_wake = (tsp != NULL); 31587c478bd9Sstevel@tonic-gate self->ul_signalled = 0; 3159883492d5Sraf if (mp->mutex_flag & LOCK_OWNERDEAD) { 3160883492d5Sraf mp->mutex_flag &= ~LOCK_OWNERDEAD; 3161883492d5Sraf mp->mutex_flag |= LOCK_NOTRECOVERABLE; 3162883492d5Sraf } 3163883492d5Sraf release_all = ((mp->mutex_flag & LOCK_NOTRECOVERABLE) != 0); 3164883492d5Sraf lwpid = mutex_unlock_queue(mp, release_all); 31657c478bd9Sstevel@tonic-gate for (;;) { 31667c478bd9Sstevel@tonic-gate set_parking_flag(self, 1); 31677c478bd9Sstevel@tonic-gate queue_unlock(qp); 31687c478bd9Sstevel@tonic-gate if (lwpid != 0) { 31697c478bd9Sstevel@tonic-gate lwpid = preempt_unpark(self, lwpid); 31707c478bd9Sstevel@tonic-gate preempt(self); 31717c478bd9Sstevel@tonic-gate } 31727c478bd9Sstevel@tonic-gate /* 31737c478bd9Sstevel@tonic-gate * We may have a deferred signal present, 31747c478bd9Sstevel@tonic-gate * in which case we should return EINTR. 31757c478bd9Sstevel@tonic-gate * Also, we may have received a SIGCANCEL; if so 31767c478bd9Sstevel@tonic-gate * and we are cancelable we should return EINTR. 31777c478bd9Sstevel@tonic-gate * We force an immediate EINTR return from 31787c478bd9Sstevel@tonic-gate * __lwp_park() by turning our parking flag off. 31797c478bd9Sstevel@tonic-gate */ 31807c478bd9Sstevel@tonic-gate if (self->ul_cursig != 0 || 31817c478bd9Sstevel@tonic-gate (self->ul_cancelable && self->ul_cancel_pending)) 31827c478bd9Sstevel@tonic-gate set_parking_flag(self, 0); 31837c478bd9Sstevel@tonic-gate /* 31847c478bd9Sstevel@tonic-gate * __lwp_park() will return the residual time in tsp 31857c478bd9Sstevel@tonic-gate * if we are unparked before the timeout expires. 31867c478bd9Sstevel@tonic-gate */ 31877c478bd9Sstevel@tonic-gate error = __lwp_park(tsp, lwpid); 31887c478bd9Sstevel@tonic-gate set_parking_flag(self, 0); 31897c478bd9Sstevel@tonic-gate lwpid = 0; /* unpark the other lwp only once */ 31907c478bd9Sstevel@tonic-gate /* 31917c478bd9Sstevel@tonic-gate * We were waked up by cond_signal(), cond_broadcast(), 31927c478bd9Sstevel@tonic-gate * by an interrupt or timeout (EINTR or ETIME), 31937c478bd9Sstevel@tonic-gate * or we may just have gotten a spurious wakeup. 31947c478bd9Sstevel@tonic-gate */ 31957c478bd9Sstevel@tonic-gate qp = queue_lock(cvp, CV); 3196d4204c85Sraf if (!cv_wake) 3197d4204c85Sraf mqp = queue_lock(mp, MX); 31987c478bd9Sstevel@tonic-gate if (self->ul_sleepq == NULL) 31997c478bd9Sstevel@tonic-gate break; 32007c478bd9Sstevel@tonic-gate /* 32017c478bd9Sstevel@tonic-gate * We are on either the condvar sleep queue or the 32022be60c5eSraf * mutex sleep queue. Break out of the sleep if we 32032be60c5eSraf * were interrupted or we timed out (EINTR or ETIME). 32047c478bd9Sstevel@tonic-gate * Else this is a spurious wakeup; continue the loop. 32057c478bd9Sstevel@tonic-gate */ 3206d4204c85Sraf if (!cv_wake && self->ul_sleepq == mqp) { /* mutex queue */ 32072be60c5eSraf if (error) { 3208d4204c85Sraf mp->mutex_waiters = dequeue_self(mqp); 32092be60c5eSraf break; 32102be60c5eSraf } 32112be60c5eSraf tsp = NULL; /* no more timeout */ 32122be60c5eSraf } else if (self->ul_sleepq == qp) { /* condvar queue */ 32137c478bd9Sstevel@tonic-gate if (error) { 3214d4204c85Sraf cvp->cond_waiters_user = dequeue_self(qp); 32157c478bd9Sstevel@tonic-gate break; 32167c478bd9Sstevel@tonic-gate } 32177c478bd9Sstevel@tonic-gate /* 32187c478bd9Sstevel@tonic-gate * Else a spurious wakeup on the condvar queue. 32197c478bd9Sstevel@tonic-gate * __lwp_park() has already adjusted the timeout. 32207c478bd9Sstevel@tonic-gate */ 32217c478bd9Sstevel@tonic-gate } else { 32227c478bd9Sstevel@tonic-gate thr_panic("cond_sleep_queue(): thread not on queue"); 32237c478bd9Sstevel@tonic-gate } 3224d4204c85Sraf if (!cv_wake) 3225d4204c85Sraf queue_unlock(mqp); 32267c478bd9Sstevel@tonic-gate } 32277c478bd9Sstevel@tonic-gate 32287c478bd9Sstevel@tonic-gate self->ul_sp = 0; 3229d4204c85Sraf self->ul_cv_wake = 0; 3230d4204c85Sraf ASSERT(self->ul_cvmutex == NULL); 32317c478bd9Sstevel@tonic-gate ASSERT(self->ul_sleepq == NULL && self->ul_link == NULL && 32327c478bd9Sstevel@tonic-gate self->ul_wchan == NULL); 32337c478bd9Sstevel@tonic-gate 32347c478bd9Sstevel@tonic-gate signalled = self->ul_signalled; 32357c478bd9Sstevel@tonic-gate self->ul_signalled = 0; 32367c478bd9Sstevel@tonic-gate queue_unlock(qp); 3237d4204c85Sraf if (!cv_wake) 3238d4204c85Sraf queue_unlock(mqp); 32397c478bd9Sstevel@tonic-gate 32407c478bd9Sstevel@tonic-gate /* 32417c478bd9Sstevel@tonic-gate * If we were concurrently cond_signal()d and any of: 32427c478bd9Sstevel@tonic-gate * received a UNIX signal, were cancelled, or got a timeout, 32437c478bd9Sstevel@tonic-gate * then perform another cond_signal() to avoid consuming it. 32447c478bd9Sstevel@tonic-gate */ 32457c478bd9Sstevel@tonic-gate if (error && signalled) 32467257d1b4Sraf (void) cond_signal(cvp); 32477c478bd9Sstevel@tonic-gate 32487c478bd9Sstevel@tonic-gate return (error); 32497c478bd9Sstevel@tonic-gate } 32507c478bd9Sstevel@tonic-gate 32517c5714f6Sraf static void 32527c5714f6Sraf cond_wait_check_alignment(cond_t *cvp, mutex_t *mp) 32537c5714f6Sraf { 32547c5714f6Sraf if ((uintptr_t)mp & (_LONG_LONG_ALIGNMENT - 1)) 32557c5714f6Sraf lock_error(mp, "cond_wait", cvp, "mutex is misaligned"); 32567c5714f6Sraf if ((uintptr_t)cvp & (_LONG_LONG_ALIGNMENT - 1)) 32577c5714f6Sraf lock_error(mp, "cond_wait", cvp, "condvar is misaligned"); 32587c5714f6Sraf } 32597c5714f6Sraf 32607c478bd9Sstevel@tonic-gate int 32615d1dd9a9Sraf cond_wait_queue(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 32627c478bd9Sstevel@tonic-gate { 32637c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 32647c478bd9Sstevel@tonic-gate int error; 3265883492d5Sraf int merror; 32667c478bd9Sstevel@tonic-gate 32677c5714f6Sraf if (self->ul_error_detection && self->ul_misaligned == 0) 32687c5714f6Sraf cond_wait_check_alignment(cvp, mp); 32697c5714f6Sraf 32707c478bd9Sstevel@tonic-gate /* 32717c478bd9Sstevel@tonic-gate * The old thread library was programmed to defer signals 32727c478bd9Sstevel@tonic-gate * while in cond_wait() so that the associated mutex would 32737c478bd9Sstevel@tonic-gate * be guaranteed to be held when the application signal 32747c478bd9Sstevel@tonic-gate * handler was invoked. 32757c478bd9Sstevel@tonic-gate * 32767c478bd9Sstevel@tonic-gate * We do not behave this way by default; the state of the 32777c478bd9Sstevel@tonic-gate * associated mutex in the signal handler is undefined. 32787c478bd9Sstevel@tonic-gate * 32797c478bd9Sstevel@tonic-gate * To accommodate applications that depend on the old 32807c478bd9Sstevel@tonic-gate * behavior, the _THREAD_COND_WAIT_DEFER environment 32817c478bd9Sstevel@tonic-gate * variable can be set to 1 and we will behave in the 32827c478bd9Sstevel@tonic-gate * old way with respect to cond_wait(). 32837c478bd9Sstevel@tonic-gate */ 32847c478bd9Sstevel@tonic-gate if (self->ul_cond_wait_defer) 32857c478bd9Sstevel@tonic-gate sigoff(self); 32867c478bd9Sstevel@tonic-gate 32877c478bd9Sstevel@tonic-gate error = cond_sleep_queue(cvp, mp, tsp); 32887c478bd9Sstevel@tonic-gate 32897c478bd9Sstevel@tonic-gate /* 32907c478bd9Sstevel@tonic-gate * Reacquire the mutex. 32917c478bd9Sstevel@tonic-gate */ 32925d1dd9a9Sraf if ((merror = mutex_lock_impl(mp, NULL)) != 0) 3293883492d5Sraf error = merror; 32947c478bd9Sstevel@tonic-gate 32957c478bd9Sstevel@tonic-gate /* 32967c478bd9Sstevel@tonic-gate * Take any deferred signal now, after we have reacquired the mutex. 32977c478bd9Sstevel@tonic-gate */ 32987c478bd9Sstevel@tonic-gate if (self->ul_cond_wait_defer) 32997c478bd9Sstevel@tonic-gate sigon(self); 33007c478bd9Sstevel@tonic-gate 33017c478bd9Sstevel@tonic-gate return (error); 33027c478bd9Sstevel@tonic-gate } 33037c478bd9Sstevel@tonic-gate 33047c478bd9Sstevel@tonic-gate /* 33057c478bd9Sstevel@tonic-gate * cond_sleep_kernel(): utility function for cond_wait_kernel(). 33067c478bd9Sstevel@tonic-gate * See the comment ahead of cond_sleep_queue(), above. 33077c478bd9Sstevel@tonic-gate */ 3308883492d5Sraf static int 33097c478bd9Sstevel@tonic-gate cond_sleep_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 33107c478bd9Sstevel@tonic-gate { 33117c478bd9Sstevel@tonic-gate int mtype = mp->mutex_type; 33127c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 33137c478bd9Sstevel@tonic-gate int error; 33147c478bd9Sstevel@tonic-gate 3315883492d5Sraf if ((mtype & LOCK_PRIO_PROTECT) && _ceil_mylist_del(mp)) 3316883492d5Sraf _ceil_prio_waive(); 33177c478bd9Sstevel@tonic-gate 33187c478bd9Sstevel@tonic-gate self->ul_sp = stkptr(); 33197c478bd9Sstevel@tonic-gate self->ul_wchan = cvp; 3320328cc3e9SRoger A. Faulkner sigoff(self); 33217c478bd9Sstevel@tonic-gate mp->mutex_owner = 0; 332231db3c26Sraf /* mp->mutex_ownerpid is cleared by ___lwp_cond_wait() */ 3323d4204c85Sraf if (mtype & LOCK_PRIO_INHERIT) { 33247c478bd9Sstevel@tonic-gate mp->mutex_lockw = LOCKCLEAR; 3325d4204c85Sraf self->ul_pilocks--; 3326d4204c85Sraf } 33277c478bd9Sstevel@tonic-gate /* 33287c478bd9Sstevel@tonic-gate * ___lwp_cond_wait() returns immediately with EINTR if 33297c478bd9Sstevel@tonic-gate * set_parking_flag(self,0) is called on this lwp before it 33307c478bd9Sstevel@tonic-gate * goes to sleep in the kernel. sigacthandler() calls this 33317c478bd9Sstevel@tonic-gate * when a deferred signal is noted. This assures that we don't 33327c478bd9Sstevel@tonic-gate * get stuck in ___lwp_cond_wait() with all signals blocked 33337c478bd9Sstevel@tonic-gate * due to taking a deferred signal before going to sleep. 33347c478bd9Sstevel@tonic-gate */ 33357c478bd9Sstevel@tonic-gate set_parking_flag(self, 1); 33367c478bd9Sstevel@tonic-gate if (self->ul_cursig != 0 || 33377c478bd9Sstevel@tonic-gate (self->ul_cancelable && self->ul_cancel_pending)) 33387c478bd9Sstevel@tonic-gate set_parking_flag(self, 0); 33397c478bd9Sstevel@tonic-gate error = ___lwp_cond_wait(cvp, mp, tsp, 1); 33407c478bd9Sstevel@tonic-gate set_parking_flag(self, 0); 3341328cc3e9SRoger A. Faulkner sigon(self); 33427c478bd9Sstevel@tonic-gate self->ul_sp = 0; 33437c478bd9Sstevel@tonic-gate self->ul_wchan = NULL; 33447c478bd9Sstevel@tonic-gate return (error); 33457c478bd9Sstevel@tonic-gate } 33467c478bd9Sstevel@tonic-gate 33477c478bd9Sstevel@tonic-gate int 33487c478bd9Sstevel@tonic-gate cond_wait_kernel(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 33497c478bd9Sstevel@tonic-gate { 33507c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 33517c478bd9Sstevel@tonic-gate int error; 33527c478bd9Sstevel@tonic-gate int merror; 33537c478bd9Sstevel@tonic-gate 33547c5714f6Sraf if (self->ul_error_detection && self->ul_misaligned == 0) 33557c5714f6Sraf cond_wait_check_alignment(cvp, mp); 33567c5714f6Sraf 33577c478bd9Sstevel@tonic-gate /* 33587c478bd9Sstevel@tonic-gate * See the large comment in cond_wait_queue(), above. 33597c478bd9Sstevel@tonic-gate */ 33607c478bd9Sstevel@tonic-gate if (self->ul_cond_wait_defer) 33617c478bd9Sstevel@tonic-gate sigoff(self); 33627c478bd9Sstevel@tonic-gate 33637c478bd9Sstevel@tonic-gate error = cond_sleep_kernel(cvp, mp, tsp); 33647c478bd9Sstevel@tonic-gate 33657c478bd9Sstevel@tonic-gate /* 33667c478bd9Sstevel@tonic-gate * Override the return code from ___lwp_cond_wait() 33677c478bd9Sstevel@tonic-gate * with any non-zero return code from mutex_lock(). 33687c478bd9Sstevel@tonic-gate * This addresses robust lock failures in particular; 33697c478bd9Sstevel@tonic-gate * the caller must see the EOWNERDEAD or ENOTRECOVERABLE 33707c478bd9Sstevel@tonic-gate * errors in order to take corrective action. 33717c478bd9Sstevel@tonic-gate */ 33725d1dd9a9Sraf if ((merror = mutex_lock_impl(mp, NULL)) != 0) 33737c478bd9Sstevel@tonic-gate error = merror; 33747c478bd9Sstevel@tonic-gate 33757c478bd9Sstevel@tonic-gate /* 33767c478bd9Sstevel@tonic-gate * Take any deferred signal now, after we have reacquired the mutex. 33777c478bd9Sstevel@tonic-gate */ 33787c478bd9Sstevel@tonic-gate if (self->ul_cond_wait_defer) 33797c478bd9Sstevel@tonic-gate sigon(self); 33807c478bd9Sstevel@tonic-gate 33817c478bd9Sstevel@tonic-gate return (error); 33827c478bd9Sstevel@tonic-gate } 33837c478bd9Sstevel@tonic-gate 33847c478bd9Sstevel@tonic-gate /* 33857257d1b4Sraf * Common code for cond_wait() and cond_timedwait() 33867c478bd9Sstevel@tonic-gate */ 33877c478bd9Sstevel@tonic-gate int 33887c478bd9Sstevel@tonic-gate cond_wait_common(cond_t *cvp, mutex_t *mp, timespec_t *tsp) 33897c478bd9Sstevel@tonic-gate { 33907c478bd9Sstevel@tonic-gate int mtype = mp->mutex_type; 33917c478bd9Sstevel@tonic-gate hrtime_t begin_sleep = 0; 33927c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 33937c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 33947c478bd9Sstevel@tonic-gate tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 33957c478bd9Sstevel@tonic-gate tdb_mutex_stats_t *msp = MUTEX_STATS(mp, udp); 33967c478bd9Sstevel@tonic-gate uint8_t rcount; 33977c478bd9Sstevel@tonic-gate int error = 0; 33987c478bd9Sstevel@tonic-gate 33997c478bd9Sstevel@tonic-gate /* 34007c478bd9Sstevel@tonic-gate * The SUSV3 Posix spec for pthread_cond_timedwait() states: 34017c478bd9Sstevel@tonic-gate * Except in the case of [ETIMEDOUT], all these error checks 34027c478bd9Sstevel@tonic-gate * shall act as if they were performed immediately at the 34037c478bd9Sstevel@tonic-gate * beginning of processing for the function and shall cause 34047c478bd9Sstevel@tonic-gate * an error return, in effect, prior to modifying the state 34057c478bd9Sstevel@tonic-gate * of the mutex specified by mutex or the condition variable 34067c478bd9Sstevel@tonic-gate * specified by cond. 34077c478bd9Sstevel@tonic-gate * Therefore, we must return EINVAL now if the timout is invalid. 34087c478bd9Sstevel@tonic-gate */ 34097c478bd9Sstevel@tonic-gate if (tsp != NULL && 34107c478bd9Sstevel@tonic-gate (tsp->tv_sec < 0 || (ulong_t)tsp->tv_nsec >= NANOSEC)) 34117c478bd9Sstevel@tonic-gate return (EINVAL); 34127c478bd9Sstevel@tonic-gate 34137c478bd9Sstevel@tonic-gate if (__td_event_report(self, TD_SLEEP, udp)) { 34147c478bd9Sstevel@tonic-gate self->ul_sp = stkptr(); 34157c478bd9Sstevel@tonic-gate self->ul_wchan = cvp; 34167c478bd9Sstevel@tonic-gate self->ul_td_evbuf.eventnum = TD_SLEEP; 34177c478bd9Sstevel@tonic-gate self->ul_td_evbuf.eventdata = cvp; 34187c478bd9Sstevel@tonic-gate tdb_event(TD_SLEEP, udp); 34197c478bd9Sstevel@tonic-gate self->ul_sp = 0; 34207c478bd9Sstevel@tonic-gate } 34217c478bd9Sstevel@tonic-gate if (csp) { 34227c478bd9Sstevel@tonic-gate if (tsp) 34237c478bd9Sstevel@tonic-gate tdb_incr(csp->cond_timedwait); 34247c478bd9Sstevel@tonic-gate else 34257c478bd9Sstevel@tonic-gate tdb_incr(csp->cond_wait); 34267c478bd9Sstevel@tonic-gate } 34277c478bd9Sstevel@tonic-gate if (msp) 34287c478bd9Sstevel@tonic-gate begin_sleep = record_hold_time(msp); 34297c478bd9Sstevel@tonic-gate else if (csp) 34307c478bd9Sstevel@tonic-gate begin_sleep = gethrtime(); 34317c478bd9Sstevel@tonic-gate 34327c478bd9Sstevel@tonic-gate if (self->ul_error_detection) { 34337257d1b4Sraf if (!mutex_held(mp)) 34347c478bd9Sstevel@tonic-gate lock_error(mp, "cond_wait", cvp, NULL); 34357c478bd9Sstevel@tonic-gate if ((mtype & LOCK_RECURSIVE) && mp->mutex_rcount != 0) 34367c478bd9Sstevel@tonic-gate lock_error(mp, "recursive mutex in cond_wait", 34375d1dd9a9Sraf cvp, NULL); 34387c478bd9Sstevel@tonic-gate if (cvp->cond_type & USYNC_PROCESS) { 3439883492d5Sraf if (!(mtype & USYNC_PROCESS)) 34407c478bd9Sstevel@tonic-gate lock_error(mp, "cond_wait", cvp, 34415d1dd9a9Sraf "condvar process-shared, " 34425d1dd9a9Sraf "mutex process-private"); 34437c478bd9Sstevel@tonic-gate } else { 3444883492d5Sraf if (mtype & USYNC_PROCESS) 34457c478bd9Sstevel@tonic-gate lock_error(mp, "cond_wait", cvp, 34465d1dd9a9Sraf "condvar process-private, " 34475d1dd9a9Sraf "mutex process-shared"); 34487c478bd9Sstevel@tonic-gate } 34497c478bd9Sstevel@tonic-gate } 34507c478bd9Sstevel@tonic-gate 34517c478bd9Sstevel@tonic-gate /* 34527c478bd9Sstevel@tonic-gate * We deal with recursive mutexes by completely 34537c478bd9Sstevel@tonic-gate * dropping the lock and restoring the recursion 34547c478bd9Sstevel@tonic-gate * count after waking up. This is arguably wrong, 34557c478bd9Sstevel@tonic-gate * but it obeys the principle of least astonishment. 34567c478bd9Sstevel@tonic-gate */ 34577c478bd9Sstevel@tonic-gate rcount = mp->mutex_rcount; 34587c478bd9Sstevel@tonic-gate mp->mutex_rcount = 0; 3459883492d5Sraf if ((mtype & 3460883492d5Sraf (USYNC_PROCESS | LOCK_PRIO_INHERIT | LOCK_PRIO_PROTECT)) | 34617c478bd9Sstevel@tonic-gate (cvp->cond_type & USYNC_PROCESS)) 34627c478bd9Sstevel@tonic-gate error = cond_wait_kernel(cvp, mp, tsp); 34637c478bd9Sstevel@tonic-gate else 34645d1dd9a9Sraf error = cond_wait_queue(cvp, mp, tsp); 34657c478bd9Sstevel@tonic-gate mp->mutex_rcount = rcount; 34667c478bd9Sstevel@tonic-gate 34677c478bd9Sstevel@tonic-gate if (csp) { 34687c478bd9Sstevel@tonic-gate hrtime_t lapse = gethrtime() - begin_sleep; 34697c478bd9Sstevel@tonic-gate if (tsp == NULL) 34707c478bd9Sstevel@tonic-gate csp->cond_wait_sleep_time += lapse; 34717c478bd9Sstevel@tonic-gate else { 34727c478bd9Sstevel@tonic-gate csp->cond_timedwait_sleep_time += lapse; 34737c478bd9Sstevel@tonic-gate if (error == ETIME) 34747c478bd9Sstevel@tonic-gate tdb_incr(csp->cond_timedwait_timeout); 34757c478bd9Sstevel@tonic-gate } 34767c478bd9Sstevel@tonic-gate } 34777c478bd9Sstevel@tonic-gate return (error); 34787c478bd9Sstevel@tonic-gate } 34797c478bd9Sstevel@tonic-gate 34807c478bd9Sstevel@tonic-gate /* 34817257d1b4Sraf * cond_wait() is a cancellation point but __cond_wait() is not. 34827257d1b4Sraf * Internally, libc calls the non-cancellation version. 3483a574db85Sraf * Other libraries need to use pthread_setcancelstate(), as appropriate, 3484a574db85Sraf * since __cond_wait() is not exported from libc. 34857c478bd9Sstevel@tonic-gate */ 34867c478bd9Sstevel@tonic-gate int 3487a574db85Sraf __cond_wait(cond_t *cvp, mutex_t *mp) 34887c478bd9Sstevel@tonic-gate { 34897c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 34907c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 34917c478bd9Sstevel@tonic-gate uberflags_t *gflags; 34927c478bd9Sstevel@tonic-gate 349380d89c86SRoger A. Faulkner if ((mp->mutex_type & (LOCK_ERRORCHECK | LOCK_ROBUST)) && 349480d89c86SRoger A. Faulkner !mutex_held(mp)) 349580d89c86SRoger A. Faulkner return (EPERM); 349680d89c86SRoger A. Faulkner 34977c478bd9Sstevel@tonic-gate /* 34987c478bd9Sstevel@tonic-gate * Optimize the common case of USYNC_THREAD plus 34997c478bd9Sstevel@tonic-gate * no error detection, no lock statistics, and no event tracing. 35007c478bd9Sstevel@tonic-gate */ 35017c478bd9Sstevel@tonic-gate if ((gflags = self->ul_schedctl_called) != NULL && 35027c478bd9Sstevel@tonic-gate (cvp->cond_type | mp->mutex_type | gflags->uf_trs_ted | 35037c478bd9Sstevel@tonic-gate self->ul_td_events_enable | 35047c478bd9Sstevel@tonic-gate udp->tdb.tdb_ev_global_mask.event_bits[0]) == 0) 35055d1dd9a9Sraf return (cond_wait_queue(cvp, mp, NULL)); 35067c478bd9Sstevel@tonic-gate 35077c478bd9Sstevel@tonic-gate /* 35087c478bd9Sstevel@tonic-gate * Else do it the long way. 35097c478bd9Sstevel@tonic-gate */ 35107c478bd9Sstevel@tonic-gate return (cond_wait_common(cvp, mp, NULL)); 35117c478bd9Sstevel@tonic-gate } 35127c478bd9Sstevel@tonic-gate 35137257d1b4Sraf #pragma weak _cond_wait = cond_wait 35147c478bd9Sstevel@tonic-gate int 35157257d1b4Sraf cond_wait(cond_t *cvp, mutex_t *mp) 35167c478bd9Sstevel@tonic-gate { 35177c478bd9Sstevel@tonic-gate int error; 35187c478bd9Sstevel@tonic-gate 35197c478bd9Sstevel@tonic-gate _cancelon(); 3520a574db85Sraf error = __cond_wait(cvp, mp); 35217c478bd9Sstevel@tonic-gate if (error == EINTR) 35227c478bd9Sstevel@tonic-gate _canceloff(); 35237c478bd9Sstevel@tonic-gate else 35247c478bd9Sstevel@tonic-gate _canceloff_nocancel(); 35257c478bd9Sstevel@tonic-gate return (error); 35267c478bd9Sstevel@tonic-gate } 35277c478bd9Sstevel@tonic-gate 3528a574db85Sraf /* 3529a574db85Sraf * pthread_cond_wait() is a cancellation point. 3530a574db85Sraf */ 35317c478bd9Sstevel@tonic-gate int 35327257d1b4Sraf pthread_cond_wait(pthread_cond_t *_RESTRICT_KYWD cvp, 35337257d1b4Sraf pthread_mutex_t *_RESTRICT_KYWD mp) 35347c478bd9Sstevel@tonic-gate { 35357c478bd9Sstevel@tonic-gate int error; 35367c478bd9Sstevel@tonic-gate 35377257d1b4Sraf error = cond_wait((cond_t *)cvp, (mutex_t *)mp); 35387c478bd9Sstevel@tonic-gate return ((error == EINTR)? 0 : error); 35397c478bd9Sstevel@tonic-gate } 35407c478bd9Sstevel@tonic-gate 35417c478bd9Sstevel@tonic-gate /* 35427257d1b4Sraf * cond_timedwait() is a cancellation point but __cond_timedwait() is not. 35437c478bd9Sstevel@tonic-gate */ 35447c478bd9Sstevel@tonic-gate int 3545a574db85Sraf __cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime) 35467c478bd9Sstevel@tonic-gate { 35477c478bd9Sstevel@tonic-gate clockid_t clock_id = cvp->cond_clockid; 35487c478bd9Sstevel@tonic-gate timespec_t reltime; 35497c478bd9Sstevel@tonic-gate int error; 35507c478bd9Sstevel@tonic-gate 355180d89c86SRoger A. Faulkner if ((mp->mutex_type & (LOCK_ERRORCHECK | LOCK_ROBUST)) && 355280d89c86SRoger A. Faulkner !mutex_held(mp)) 355380d89c86SRoger A. Faulkner return (EPERM); 355480d89c86SRoger A. Faulkner 35557c478bd9Sstevel@tonic-gate if (clock_id != CLOCK_REALTIME && clock_id != CLOCK_HIGHRES) 35567c478bd9Sstevel@tonic-gate clock_id = CLOCK_REALTIME; 35577c478bd9Sstevel@tonic-gate abstime_to_reltime(clock_id, abstime, &reltime); 35587c478bd9Sstevel@tonic-gate error = cond_wait_common(cvp, mp, &reltime); 35597c478bd9Sstevel@tonic-gate if (error == ETIME && clock_id == CLOCK_HIGHRES) { 35607c478bd9Sstevel@tonic-gate /* 35617c478bd9Sstevel@tonic-gate * Don't return ETIME if we didn't really get a timeout. 35627c478bd9Sstevel@tonic-gate * This can happen if we return because someone resets 35637c478bd9Sstevel@tonic-gate * the system clock. Just return zero in this case, 35647c478bd9Sstevel@tonic-gate * giving a spurious wakeup but not a timeout. 35657c478bd9Sstevel@tonic-gate */ 35667c478bd9Sstevel@tonic-gate if ((hrtime_t)(uint32_t)abstime->tv_sec * NANOSEC + 35677c478bd9Sstevel@tonic-gate abstime->tv_nsec > gethrtime()) 35687c478bd9Sstevel@tonic-gate error = 0; 35697c478bd9Sstevel@tonic-gate } 35707c478bd9Sstevel@tonic-gate return (error); 35717c478bd9Sstevel@tonic-gate } 35727c478bd9Sstevel@tonic-gate 35737c478bd9Sstevel@tonic-gate int 35747257d1b4Sraf cond_timedwait(cond_t *cvp, mutex_t *mp, const timespec_t *abstime) 35757c478bd9Sstevel@tonic-gate { 35767c478bd9Sstevel@tonic-gate int error; 35777c478bd9Sstevel@tonic-gate 35787c478bd9Sstevel@tonic-gate _cancelon(); 3579a574db85Sraf error = __cond_timedwait(cvp, mp, abstime); 35807c478bd9Sstevel@tonic-gate if (error == EINTR) 35817c478bd9Sstevel@tonic-gate _canceloff(); 35827c478bd9Sstevel@tonic-gate else 35837c478bd9Sstevel@tonic-gate _canceloff_nocancel(); 35847c478bd9Sstevel@tonic-gate return (error); 35857c478bd9Sstevel@tonic-gate } 35867c478bd9Sstevel@tonic-gate 3587a574db85Sraf /* 3588a574db85Sraf * pthread_cond_timedwait() is a cancellation point. 3589a574db85Sraf */ 35907c478bd9Sstevel@tonic-gate int 35917257d1b4Sraf pthread_cond_timedwait(pthread_cond_t *_RESTRICT_KYWD cvp, 35927257d1b4Sraf pthread_mutex_t *_RESTRICT_KYWD mp, 35937257d1b4Sraf const struct timespec *_RESTRICT_KYWD abstime) 35947c478bd9Sstevel@tonic-gate { 35957c478bd9Sstevel@tonic-gate int error; 35967c478bd9Sstevel@tonic-gate 35977257d1b4Sraf error = cond_timedwait((cond_t *)cvp, (mutex_t *)mp, abstime); 35987c478bd9Sstevel@tonic-gate if (error == ETIME) 35997c478bd9Sstevel@tonic-gate error = ETIMEDOUT; 36007c478bd9Sstevel@tonic-gate else if (error == EINTR) 36017c478bd9Sstevel@tonic-gate error = 0; 36027c478bd9Sstevel@tonic-gate return (error); 36037c478bd9Sstevel@tonic-gate } 36047c478bd9Sstevel@tonic-gate 36057c478bd9Sstevel@tonic-gate /* 36067257d1b4Sraf * cond_reltimedwait() is a cancellation point but __cond_reltimedwait() is not. 36077c478bd9Sstevel@tonic-gate */ 36087c478bd9Sstevel@tonic-gate int 3609a574db85Sraf __cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime) 36107c478bd9Sstevel@tonic-gate { 36117c478bd9Sstevel@tonic-gate timespec_t tslocal = *reltime; 36127c478bd9Sstevel@tonic-gate 361380d89c86SRoger A. Faulkner if ((mp->mutex_type & (LOCK_ERRORCHECK | LOCK_ROBUST)) && 361480d89c86SRoger A. Faulkner !mutex_held(mp)) 361580d89c86SRoger A. Faulkner return (EPERM); 361680d89c86SRoger A. Faulkner 36177c478bd9Sstevel@tonic-gate return (cond_wait_common(cvp, mp, &tslocal)); 36187c478bd9Sstevel@tonic-gate } 36197c478bd9Sstevel@tonic-gate 36207c478bd9Sstevel@tonic-gate int 36217257d1b4Sraf cond_reltimedwait(cond_t *cvp, mutex_t *mp, const timespec_t *reltime) 36227c478bd9Sstevel@tonic-gate { 36237c478bd9Sstevel@tonic-gate int error; 36247c478bd9Sstevel@tonic-gate 36257c478bd9Sstevel@tonic-gate _cancelon(); 3626a574db85Sraf error = __cond_reltimedwait(cvp, mp, reltime); 36277c478bd9Sstevel@tonic-gate if (error == EINTR) 36287c478bd9Sstevel@tonic-gate _canceloff(); 36297c478bd9Sstevel@tonic-gate else 36307c478bd9Sstevel@tonic-gate _canceloff_nocancel(); 36317c478bd9Sstevel@tonic-gate return (error); 36327c478bd9Sstevel@tonic-gate } 36337c478bd9Sstevel@tonic-gate 36347c478bd9Sstevel@tonic-gate int 36357257d1b4Sraf pthread_cond_reltimedwait_np(pthread_cond_t *_RESTRICT_KYWD cvp, 36367257d1b4Sraf pthread_mutex_t *_RESTRICT_KYWD mp, 36377257d1b4Sraf const struct timespec *_RESTRICT_KYWD reltime) 36387c478bd9Sstevel@tonic-gate { 36397c478bd9Sstevel@tonic-gate int error; 36407c478bd9Sstevel@tonic-gate 36417257d1b4Sraf error = cond_reltimedwait((cond_t *)cvp, (mutex_t *)mp, reltime); 36427c478bd9Sstevel@tonic-gate if (error == ETIME) 36437c478bd9Sstevel@tonic-gate error = ETIMEDOUT; 36447c478bd9Sstevel@tonic-gate else if (error == EINTR) 36457c478bd9Sstevel@tonic-gate error = 0; 36467c478bd9Sstevel@tonic-gate return (error); 36477c478bd9Sstevel@tonic-gate } 36487c478bd9Sstevel@tonic-gate 36497257d1b4Sraf #pragma weak pthread_cond_signal = cond_signal 36507257d1b4Sraf #pragma weak _cond_signal = cond_signal 36517c478bd9Sstevel@tonic-gate int 36527257d1b4Sraf cond_signal(cond_t *cvp) 36537c478bd9Sstevel@tonic-gate { 36547c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 36557c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 36567c478bd9Sstevel@tonic-gate tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 36577c478bd9Sstevel@tonic-gate int error = 0; 3658d4204c85Sraf int more; 3659d4204c85Sraf lwpid_t lwpid; 36607c478bd9Sstevel@tonic-gate queue_head_t *qp; 36617c478bd9Sstevel@tonic-gate mutex_t *mp; 36627c478bd9Sstevel@tonic-gate queue_head_t *mqp; 36637c478bd9Sstevel@tonic-gate ulwp_t **ulwpp; 36647c478bd9Sstevel@tonic-gate ulwp_t *ulwp; 3665d4204c85Sraf ulwp_t *prev; 36667c478bd9Sstevel@tonic-gate 36677c478bd9Sstevel@tonic-gate if (csp) 36687c478bd9Sstevel@tonic-gate tdb_incr(csp->cond_signal); 36697c478bd9Sstevel@tonic-gate 36707c478bd9Sstevel@tonic-gate if (cvp->cond_waiters_kernel) /* someone sleeping in the kernel? */ 36717257d1b4Sraf error = _lwp_cond_signal(cvp); 36727c478bd9Sstevel@tonic-gate 36737c478bd9Sstevel@tonic-gate if (!cvp->cond_waiters_user) /* no one sleeping at user-level */ 36747c478bd9Sstevel@tonic-gate return (error); 36757c478bd9Sstevel@tonic-gate 36767c478bd9Sstevel@tonic-gate /* 36777c478bd9Sstevel@tonic-gate * Move someone from the condvar sleep queue to the mutex sleep 36787c478bd9Sstevel@tonic-gate * queue for the mutex that he will acquire on being waked up. 36797c478bd9Sstevel@tonic-gate * We can do this only if we own the mutex he will acquire. 36807c478bd9Sstevel@tonic-gate * If we do not own the mutex, or if his ul_cv_wake flag 36817c478bd9Sstevel@tonic-gate * is set, just dequeue and unpark him. 36827c478bd9Sstevel@tonic-gate */ 36837c478bd9Sstevel@tonic-gate qp = queue_lock(cvp, CV); 3684d4204c85Sraf ulwpp = queue_slot(qp, &prev, &more); 3685d4204c85Sraf cvp->cond_waiters_user = more; 3686d4204c85Sraf if (ulwpp == NULL) { /* no one on the sleep queue */ 36877c478bd9Sstevel@tonic-gate queue_unlock(qp); 36887c478bd9Sstevel@tonic-gate return (error); 36897c478bd9Sstevel@tonic-gate } 3690d4204c85Sraf ulwp = *ulwpp; 36917c478bd9Sstevel@tonic-gate 36927c478bd9Sstevel@tonic-gate /* 36937c478bd9Sstevel@tonic-gate * Inform the thread that he was the recipient of a cond_signal(). 36947c478bd9Sstevel@tonic-gate * This lets him deal with cond_signal() and, concurrently, 36957c478bd9Sstevel@tonic-gate * one or more of a cancellation, a UNIX signal, or a timeout. 36967c478bd9Sstevel@tonic-gate * These latter conditions must not consume a cond_signal(). 36977c478bd9Sstevel@tonic-gate */ 36987c478bd9Sstevel@tonic-gate ulwp->ul_signalled = 1; 36997c478bd9Sstevel@tonic-gate 37007c478bd9Sstevel@tonic-gate /* 37017c478bd9Sstevel@tonic-gate * Dequeue the waiter but leave his ul_sleepq non-NULL 37027c478bd9Sstevel@tonic-gate * while we move him to the mutex queue so that he can 37037c478bd9Sstevel@tonic-gate * deal properly with spurious wakeups. 37047c478bd9Sstevel@tonic-gate */ 3705d4204c85Sraf queue_unlink(qp, ulwpp, prev); 37067c478bd9Sstevel@tonic-gate 37077c478bd9Sstevel@tonic-gate mp = ulwp->ul_cvmutex; /* the mutex he will acquire */ 37087c478bd9Sstevel@tonic-gate ulwp->ul_cvmutex = NULL; 37097c478bd9Sstevel@tonic-gate ASSERT(mp != NULL); 37107c478bd9Sstevel@tonic-gate 37117c478bd9Sstevel@tonic-gate if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) { 3712d4204c85Sraf /* just wake him up */ 3713d4204c85Sraf lwpid = ulwp->ul_lwpid; 37147c478bd9Sstevel@tonic-gate no_preempt(self); 37157c478bd9Sstevel@tonic-gate ulwp->ul_sleepq = NULL; 37167c478bd9Sstevel@tonic-gate ulwp->ul_wchan = NULL; 37177c478bd9Sstevel@tonic-gate queue_unlock(qp); 37187c478bd9Sstevel@tonic-gate (void) __lwp_unpark(lwpid); 37197c478bd9Sstevel@tonic-gate preempt(self); 37207c478bd9Sstevel@tonic-gate } else { 3721d4204c85Sraf /* move him to the mutex queue */ 37227c478bd9Sstevel@tonic-gate mqp = queue_lock(mp, MX); 3723d4204c85Sraf enqueue(mqp, ulwp, 0); 37247c478bd9Sstevel@tonic-gate mp->mutex_waiters = 1; 37257c478bd9Sstevel@tonic-gate queue_unlock(mqp); 37267c478bd9Sstevel@tonic-gate queue_unlock(qp); 37277c478bd9Sstevel@tonic-gate } 37287c478bd9Sstevel@tonic-gate 37297c478bd9Sstevel@tonic-gate return (error); 37307c478bd9Sstevel@tonic-gate } 37317c478bd9Sstevel@tonic-gate 373241efec22Sraf /* 3733883492d5Sraf * Utility function called by mutex_wakeup_all(), cond_broadcast(), 3734883492d5Sraf * and rw_queue_release() to (re)allocate a big buffer to hold the 3735883492d5Sraf * lwpids of all the threads to be set running after they are removed 3736883492d5Sraf * from their sleep queues. Since we are holding a queue lock, we 3737883492d5Sraf * cannot call any function that might acquire a lock. mmap(), munmap(), 3738883492d5Sraf * lwp_unpark_all() are simple system calls and are safe in this regard. 373941efec22Sraf */ 374041efec22Sraf lwpid_t * 374141efec22Sraf alloc_lwpids(lwpid_t *lwpid, int *nlwpid_ptr, int *maxlwps_ptr) 374241efec22Sraf { 374341efec22Sraf /* 374441efec22Sraf * Allocate NEWLWPS ids on the first overflow. 374541efec22Sraf * Double the allocation each time after that. 374641efec22Sraf */ 374741efec22Sraf int nlwpid = *nlwpid_ptr; 374841efec22Sraf int maxlwps = *maxlwps_ptr; 374941efec22Sraf int first_allocation; 375041efec22Sraf int newlwps; 375141efec22Sraf void *vaddr; 375241efec22Sraf 375341efec22Sraf ASSERT(nlwpid == maxlwps); 375441efec22Sraf 375541efec22Sraf first_allocation = (maxlwps == MAXLWPS); 375641efec22Sraf newlwps = first_allocation? NEWLWPS : 2 * maxlwps; 37578cd45542Sraf vaddr = mmap(NULL, newlwps * sizeof (lwpid_t), 375841efec22Sraf PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, (off_t)0); 375941efec22Sraf 376041efec22Sraf if (vaddr == MAP_FAILED) { 376141efec22Sraf /* 376241efec22Sraf * Let's hope this never happens. 376341efec22Sraf * If it does, then we have a terrible 376441efec22Sraf * thundering herd on our hands. 376541efec22Sraf */ 376641efec22Sraf (void) __lwp_unpark_all(lwpid, nlwpid); 376741efec22Sraf *nlwpid_ptr = 0; 376841efec22Sraf } else { 37698cd45542Sraf (void) memcpy(vaddr, lwpid, maxlwps * sizeof (lwpid_t)); 377041efec22Sraf if (!first_allocation) 37718cd45542Sraf (void) munmap((caddr_t)lwpid, 377241efec22Sraf maxlwps * sizeof (lwpid_t)); 377341efec22Sraf lwpid = vaddr; 377441efec22Sraf *maxlwps_ptr = newlwps; 377541efec22Sraf } 377641efec22Sraf 377741efec22Sraf return (lwpid); 377841efec22Sraf } 37797c478bd9Sstevel@tonic-gate 37807257d1b4Sraf #pragma weak pthread_cond_broadcast = cond_broadcast 37817257d1b4Sraf #pragma weak _cond_broadcast = cond_broadcast 37827c478bd9Sstevel@tonic-gate int 37837257d1b4Sraf cond_broadcast(cond_t *cvp) 37847c478bd9Sstevel@tonic-gate { 37857c478bd9Sstevel@tonic-gate ulwp_t *self = curthread; 37867c478bd9Sstevel@tonic-gate uberdata_t *udp = self->ul_uberdata; 37877c478bd9Sstevel@tonic-gate tdb_cond_stats_t *csp = COND_STATS(cvp, udp); 37887c478bd9Sstevel@tonic-gate int error = 0; 37897c478bd9Sstevel@tonic-gate queue_head_t *qp; 3790d4204c85Sraf queue_root_t *qrp; 37917c478bd9Sstevel@tonic-gate mutex_t *mp; 37927c478bd9Sstevel@tonic-gate mutex_t *mp_cache = NULL; 379341efec22Sraf queue_head_t *mqp = NULL; 37947c478bd9Sstevel@tonic-gate ulwp_t *ulwp; 37957c478bd9Sstevel@tonic-gate int nlwpid = 0; 37967c478bd9Sstevel@tonic-gate int maxlwps = MAXLWPS; 379741efec22Sraf lwpid_t buffer[MAXLWPS]; 379841efec22Sraf lwpid_t *lwpid = buffer; 37997c478bd9Sstevel@tonic-gate 38007c478bd9Sstevel@tonic-gate if (csp) 38017c478bd9Sstevel@tonic-gate tdb_incr(csp->cond_broadcast); 38027c478bd9Sstevel@tonic-gate 38037c478bd9Sstevel@tonic-gate if (cvp->cond_waiters_kernel) /* someone sleeping in the kernel? */ 38047257d1b4Sraf error = _lwp_cond_broadcast(cvp); 38057c478bd9Sstevel@tonic-gate 38067c478bd9Sstevel@tonic-gate if (!cvp->cond_waiters_user) /* no one sleeping at user-level */ 38077c478bd9Sstevel@tonic-gate return (error); 38087c478bd9Sstevel@tonic-gate 38097c478bd9Sstevel@tonic-gate /* 38107c478bd9Sstevel@tonic-gate * Move everyone from the condvar sleep queue to the mutex sleep 38117c478bd9Sstevel@tonic-gate * queue for the mutex that they will acquire on being waked up. 38127c478bd9Sstevel@tonic-gate * We can do this only if we own the mutex they will acquire. 38137c478bd9Sstevel@tonic-gate * If we do not own the mutex, or if their ul_cv_wake flag 38147c478bd9Sstevel@tonic-gate * is set, just dequeue and unpark them. 38157c478bd9Sstevel@tonic-gate * 38167c478bd9Sstevel@tonic-gate * We keep track of lwpids that are to be unparked in lwpid[]. 38177c478bd9Sstevel@tonic-gate * __lwp_unpark_all() is called to unpark all of them after 38187c478bd9Sstevel@tonic-gate * they have been removed from the sleep queue and the sleep 38197c478bd9Sstevel@tonic-gate * queue lock has been dropped. If we run out of space in our 38207c478bd9Sstevel@tonic-gate * on-stack buffer, we need to allocate more but we can't call 38217c478bd9Sstevel@tonic-gate * lmalloc() because we are holding a queue lock when the overflow 38227c478bd9Sstevel@tonic-gate * occurs and lmalloc() acquires a lock. We can't use alloca() 382341efec22Sraf * either because the application may have allocated a small 382441efec22Sraf * stack and we don't want to overrun the stack. So we call 382541efec22Sraf * alloc_lwpids() to allocate a bigger buffer using the mmap() 38267c478bd9Sstevel@tonic-gate * system call directly since that path acquires no locks. 38277c478bd9Sstevel@tonic-gate */ 38287c478bd9Sstevel@tonic-gate qp = queue_lock(cvp, CV); 38297c478bd9Sstevel@tonic-gate cvp->cond_waiters_user = 0; 3830d4204c85Sraf for (;;) { 3831d4204c85Sraf if ((qrp = qp->qh_root) == NULL || 3832d4204c85Sraf (ulwp = qrp->qr_head) == NULL) 3833d4204c85Sraf break; 3834d4204c85Sraf ASSERT(ulwp->ul_wchan == cvp); 3835d4204c85Sraf queue_unlink(qp, &qrp->qr_head, NULL); 38367c478bd9Sstevel@tonic-gate mp = ulwp->ul_cvmutex; /* his mutex */ 38377c478bd9Sstevel@tonic-gate ulwp->ul_cvmutex = NULL; 38387c478bd9Sstevel@tonic-gate ASSERT(mp != NULL); 38397c478bd9Sstevel@tonic-gate if (ulwp->ul_cv_wake || !MUTEX_OWNED(mp, self)) { 3840d4204c85Sraf /* just wake him up */ 38417c478bd9Sstevel@tonic-gate ulwp->ul_sleepq = NULL; 38427c478bd9Sstevel@tonic-gate ulwp->ul_wchan = NULL; 384341efec22Sraf if (nlwpid == maxlwps) 384441efec22Sraf lwpid = alloc_lwpids(lwpid, &nlwpid, &maxlwps); 38457c478bd9Sstevel@tonic-gate lwpid[nlwpid++] = ulwp->ul_lwpid; 38467c478bd9Sstevel@tonic-gate } else { 3847d4204c85Sraf /* move him to the mutex queue */ 38487c478bd9Sstevel@tonic-gate if (mp != mp_cache) { 38497c478bd9Sstevel@tonic-gate mp_cache = mp; 385041efec22Sraf if (mqp != NULL) 385141efec22Sraf queue_unlock(mqp); 385241efec22Sraf mqp = queue_lock(mp, MX); 38537c478bd9Sstevel@tonic-gate } 3854d4204c85Sraf enqueue(mqp, ulwp, 0); 38557c478bd9Sstevel@tonic-gate mp->mutex_waiters = 1; 38567c478bd9Sstevel@tonic-gate } 38577c478bd9Sstevel@tonic-gate } 385841efec22Sraf if (mqp != NULL) 385941efec22Sraf queue_unlock(mqp); 386041efec22Sraf if (nlwpid == 0) { 386141efec22Sraf queue_unlock(qp); 386241efec22Sraf } else { 386341efec22Sraf no_preempt(self); 386441efec22Sraf queue_unlock(qp); 38657c478bd9Sstevel@tonic-gate if (nlwpid == 1) 38667c478bd9Sstevel@tonic-gate (void) __lwp_unpark(lwpid[0]); 38677c478bd9Sstevel@tonic-gate else 38687c478bd9Sstevel@tonic-gate (void) __lwp_unpark_all(lwpid, nlwpid); 386941efec22Sraf preempt(self); 38707c478bd9Sstevel@tonic-gate } 38717c478bd9Sstevel@tonic-gate if (lwpid != buffer) 38728cd45542Sraf (void) munmap((caddr_t)lwpid, maxlwps * sizeof (lwpid_t)); 38737c478bd9Sstevel@tonic-gate return (error); 38747c478bd9Sstevel@tonic-gate } 38757c478bd9Sstevel@tonic-gate 38767257d1b4Sraf #pragma weak pthread_cond_destroy = cond_destroy 38777c478bd9Sstevel@tonic-gate int 38787257d1b4Sraf cond_destroy(cond_t *cvp) 38797c478bd9Sstevel@tonic-gate { 38807c478bd9Sstevel@tonic-gate cvp->cond_magic = 0; 38817c478bd9Sstevel@tonic-gate tdb_sync_obj_deregister(cvp); 38827c478bd9Sstevel@tonic-gate return (0); 38837c478bd9Sstevel@tonic-gate } 38847c478bd9Sstevel@tonic-gate 38857c478bd9Sstevel@tonic-gate #if defined(THREAD_DEBUG) 38867c478bd9Sstevel@tonic-gate void 38877c478bd9Sstevel@tonic-gate assert_no_libc_locks_held(void) 38887c478bd9Sstevel@tonic-gate { 38897c478bd9Sstevel@tonic-gate ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 38907c478bd9Sstevel@tonic-gate } 38917c478bd9Sstevel@tonic-gate 38927c478bd9Sstevel@tonic-gate /* protected by link_lock */ 38937c478bd9Sstevel@tonic-gate uint64_t spin_lock_spin; 38947c478bd9Sstevel@tonic-gate uint64_t spin_lock_spin2; 38957c478bd9Sstevel@tonic-gate uint64_t spin_lock_sleep; 38967c478bd9Sstevel@tonic-gate uint64_t spin_lock_wakeup; 38977c478bd9Sstevel@tonic-gate 38987c478bd9Sstevel@tonic-gate /* 38997c478bd9Sstevel@tonic-gate * Record spin lock statistics. 39007c478bd9Sstevel@tonic-gate * Called by a thread exiting itself in thrp_exit(). 39017c478bd9Sstevel@tonic-gate * Also called via atexit() from the thread calling 39027c478bd9Sstevel@tonic-gate * exit() to do all the other threads as well. 39037c478bd9Sstevel@tonic-gate */ 39047c478bd9Sstevel@tonic-gate void 39057c478bd9Sstevel@tonic-gate record_spin_locks(ulwp_t *ulwp) 39067c478bd9Sstevel@tonic-gate { 39077c478bd9Sstevel@tonic-gate spin_lock_spin += ulwp->ul_spin_lock_spin; 39087c478bd9Sstevel@tonic-gate spin_lock_spin2 += ulwp->ul_spin_lock_spin2; 39097c478bd9Sstevel@tonic-gate spin_lock_sleep += ulwp->ul_spin_lock_sleep; 39107c478bd9Sstevel@tonic-gate spin_lock_wakeup += ulwp->ul_spin_lock_wakeup; 39117c478bd9Sstevel@tonic-gate ulwp->ul_spin_lock_spin = 0; 39127c478bd9Sstevel@tonic-gate ulwp->ul_spin_lock_spin2 = 0; 39137c478bd9Sstevel@tonic-gate ulwp->ul_spin_lock_sleep = 0; 39147c478bd9Sstevel@tonic-gate ulwp->ul_spin_lock_wakeup = 0; 39157c478bd9Sstevel@tonic-gate } 39167c478bd9Sstevel@tonic-gate 39177c478bd9Sstevel@tonic-gate /* 39187c478bd9Sstevel@tonic-gate * atexit function: dump the queue statistics to stderr. 39197c478bd9Sstevel@tonic-gate */ 39207c478bd9Sstevel@tonic-gate #include <stdio.h> 39217c478bd9Sstevel@tonic-gate void 39227c478bd9Sstevel@tonic-gate dump_queue_statistics(void) 39237c478bd9Sstevel@tonic-gate { 39247c478bd9Sstevel@tonic-gate uberdata_t *udp = curthread->ul_uberdata; 39257c478bd9Sstevel@tonic-gate queue_head_t *qp; 39267c478bd9Sstevel@tonic-gate int qn; 39277c478bd9Sstevel@tonic-gate uint64_t spin_lock_total = 0; 39287c478bd9Sstevel@tonic-gate 39297c478bd9Sstevel@tonic-gate if (udp->queue_head == NULL || thread_queue_dump == 0) 39307c478bd9Sstevel@tonic-gate return; 39317c478bd9Sstevel@tonic-gate 39327c478bd9Sstevel@tonic-gate if (fprintf(stderr, "\n%5d mutex queues:\n", QHASHSIZE) < 0 || 3933d4204c85Sraf fprintf(stderr, "queue# lockcount max qlen max hlen\n") < 0) 39347c478bd9Sstevel@tonic-gate return; 39357c478bd9Sstevel@tonic-gate for (qn = 0, qp = udp->queue_head; qn < QHASHSIZE; qn++, qp++) { 39367c478bd9Sstevel@tonic-gate if (qp->qh_lockcount == 0) 39377c478bd9Sstevel@tonic-gate continue; 39387c478bd9Sstevel@tonic-gate spin_lock_total += qp->qh_lockcount; 3939d4204c85Sraf if (fprintf(stderr, "%5d %12llu%12u%12u\n", qn, 3940d4204c85Sraf (u_longlong_t)qp->qh_lockcount, 3941d4204c85Sraf qp->qh_qmax, qp->qh_hmax) < 0) 39425d1dd9a9Sraf return; 39437c478bd9Sstevel@tonic-gate } 39447c478bd9Sstevel@tonic-gate 39457c478bd9Sstevel@tonic-gate if (fprintf(stderr, "\n%5d condvar queues:\n", QHASHSIZE) < 0 || 3946d4204c85Sraf fprintf(stderr, "queue# lockcount max qlen max hlen\n") < 0) 39477c478bd9Sstevel@tonic-gate return; 39487c478bd9Sstevel@tonic-gate for (qn = 0; qn < QHASHSIZE; qn++, qp++) { 39497c478bd9Sstevel@tonic-gate if (qp->qh_lockcount == 0) 39507c478bd9Sstevel@tonic-gate continue; 39517c478bd9Sstevel@tonic-gate spin_lock_total += qp->qh_lockcount; 3952d4204c85Sraf if (fprintf(stderr, "%5d %12llu%12u%12u\n", qn, 3953d4204c85Sraf (u_longlong_t)qp->qh_lockcount, 3954d4204c85Sraf qp->qh_qmax, qp->qh_hmax) < 0) 39555d1dd9a9Sraf return; 39567c478bd9Sstevel@tonic-gate } 39577c478bd9Sstevel@tonic-gate 39587c478bd9Sstevel@tonic-gate (void) fprintf(stderr, "\n spin_lock_total = %10llu\n", 39595d1dd9a9Sraf (u_longlong_t)spin_lock_total); 39607c478bd9Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_spin = %10llu\n", 39615d1dd9a9Sraf (u_longlong_t)spin_lock_spin); 39627c478bd9Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_spin2 = %10llu\n", 39635d1dd9a9Sraf (u_longlong_t)spin_lock_spin2); 39647c478bd9Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_sleep = %10llu\n", 39655d1dd9a9Sraf (u_longlong_t)spin_lock_sleep); 39667c478bd9Sstevel@tonic-gate (void) fprintf(stderr, " spin_lock_wakeup = %10llu\n", 39675d1dd9a9Sraf (u_longlong_t)spin_lock_wakeup); 39687c478bd9Sstevel@tonic-gate } 3969d4204c85Sraf #endif 3970