17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 57c478bd9Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 67c478bd9Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 77c478bd9Sstevel@tonic-gate * with the License. 87c478bd9Sstevel@tonic-gate * 97c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 107c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 117c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 127c478bd9Sstevel@tonic-gate * and limitations under the License. 137c478bd9Sstevel@tonic-gate * 147c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 157c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 167c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 177c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 187c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 197c478bd9Sstevel@tonic-gate * 207c478bd9Sstevel@tonic-gate * CDDL HEADER END 217c478bd9Sstevel@tonic-gate */ 227c478bd9Sstevel@tonic-gate /* 237c478bd9Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 247c478bd9Sstevel@tonic-gate * Use is subject to license terms. 257c478bd9Sstevel@tonic-gate */ 267c478bd9Sstevel@tonic-gate 277c478bd9Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 287c478bd9Sstevel@tonic-gate 297c478bd9Sstevel@tonic-gate /* 307c478bd9Sstevel@tonic-gate * VM - page locking primitives 317c478bd9Sstevel@tonic-gate */ 327c478bd9Sstevel@tonic-gate #include <sys/param.h> 337c478bd9Sstevel@tonic-gate #include <sys/t_lock.h> 347c478bd9Sstevel@tonic-gate #include <sys/vtrace.h> 357c478bd9Sstevel@tonic-gate #include <sys/debug.h> 367c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 377c478bd9Sstevel@tonic-gate #include <sys/vnode.h> 387c478bd9Sstevel@tonic-gate #include <sys/bitmap.h> 397c478bd9Sstevel@tonic-gate #include <sys/lockstat.h> 407c478bd9Sstevel@tonic-gate #include <sys/condvar_impl.h> 417c478bd9Sstevel@tonic-gate #include <vm/page.h> 427c478bd9Sstevel@tonic-gate #include <vm/seg_enum.h> 437c478bd9Sstevel@tonic-gate #include <vm/vm_dep.h> 447c478bd9Sstevel@tonic-gate 457c478bd9Sstevel@tonic-gate /* 467c478bd9Sstevel@tonic-gate * This global mutex is for logical page locking. 477c478bd9Sstevel@tonic-gate * The following fields in the page structure are protected 487c478bd9Sstevel@tonic-gate * by this lock: 497c478bd9Sstevel@tonic-gate * 507c478bd9Sstevel@tonic-gate * p_lckcnt 517c478bd9Sstevel@tonic-gate * p_cowcnt 527c478bd9Sstevel@tonic-gate */ 537c478bd9Sstevel@tonic-gate kmutex_t page_llock; 547c478bd9Sstevel@tonic-gate 557c478bd9Sstevel@tonic-gate /* 567c478bd9Sstevel@tonic-gate * This is a global lock for the logical page free list. The 577c478bd9Sstevel@tonic-gate * logical free list, in this implementation, is maintained as two 587c478bd9Sstevel@tonic-gate * separate physical lists - the cache list and the free list. 597c478bd9Sstevel@tonic-gate */ 607c478bd9Sstevel@tonic-gate kmutex_t page_freelock; 617c478bd9Sstevel@tonic-gate 627c478bd9Sstevel@tonic-gate /* 637c478bd9Sstevel@tonic-gate * The hash table, page_hash[], the p_selock fields, and the 647c478bd9Sstevel@tonic-gate * list of pages associated with vnodes are protected by arrays of mutexes. 657c478bd9Sstevel@tonic-gate * 667c478bd9Sstevel@tonic-gate * Unless the hashes are changed radically, the table sizes must be 677c478bd9Sstevel@tonic-gate * a power of two. Also, we typically need more mutexes for the 687c478bd9Sstevel@tonic-gate * vnodes since these locks are occasionally held for long periods. 697c478bd9Sstevel@tonic-gate * And since there seem to be two special vnodes (kvp and swapvp), 707c478bd9Sstevel@tonic-gate * we make room for private mutexes for them. 717c478bd9Sstevel@tonic-gate * 727c478bd9Sstevel@tonic-gate * The pse_mutex[] array holds the mutexes to protect the p_selock 737c478bd9Sstevel@tonic-gate * fields of all page_t structures. 747c478bd9Sstevel@tonic-gate * 757c478bd9Sstevel@tonic-gate * PAGE_SE_MUTEX(pp) returns the address of the appropriate mutex 767c478bd9Sstevel@tonic-gate * when given a pointer to a page_t. 777c478bd9Sstevel@tonic-gate * 787c478bd9Sstevel@tonic-gate * PSE_TABLE_SIZE must be a power of two. One could argue that we 797c478bd9Sstevel@tonic-gate * should go to the trouble of setting it up at run time and base it 807c478bd9Sstevel@tonic-gate * on memory size rather than the number of compile time CPUs. 817c478bd9Sstevel@tonic-gate * 827c478bd9Sstevel@tonic-gate * XX64 We should be using physmem size to calculate PSE_TABLE_SIZE, 837c478bd9Sstevel@tonic-gate * PSE_SHIFT, PIO_SHIFT. 847c478bd9Sstevel@tonic-gate * 857c478bd9Sstevel@tonic-gate * These might break in 64 bit world. 867c478bd9Sstevel@tonic-gate */ 877c478bd9Sstevel@tonic-gate #define PSE_SHIFT 7 /* log2(PSE_TABLE_SIZE) */ 887c478bd9Sstevel@tonic-gate 897c478bd9Sstevel@tonic-gate #define PSE_TABLE_SIZE 128 /* number of mutexes to have */ 907c478bd9Sstevel@tonic-gate 917c478bd9Sstevel@tonic-gate #define PIO_SHIFT PSE_SHIFT /* next power of 2 bigger than page_t */ 927c478bd9Sstevel@tonic-gate #define PIO_TABLE_SIZE PSE_TABLE_SIZE /* number of io mutexes to have */ 937c478bd9Sstevel@tonic-gate 947c478bd9Sstevel@tonic-gate pad_mutex_t ph_mutex[PH_TABLE_SIZE]; 957c478bd9Sstevel@tonic-gate pad_mutex_t pse_mutex[PSE_TABLE_SIZE]; 967c478bd9Sstevel@tonic-gate kmutex_t pio_mutex[PIO_TABLE_SIZE]; 977c478bd9Sstevel@tonic-gate 987c478bd9Sstevel@tonic-gate #define PAGE_SE_MUTEX(pp) \ 997c478bd9Sstevel@tonic-gate &pse_mutex[((((uintptr_t)(pp) >> PSE_SHIFT) ^ \ 1007c478bd9Sstevel@tonic-gate ((uintptr_t)(pp) >> (PSE_SHIFT << 1))) & \ 1017c478bd9Sstevel@tonic-gate (PSE_TABLE_SIZE - 1))].pad_mutex 1027c478bd9Sstevel@tonic-gate 1037c478bd9Sstevel@tonic-gate #define PAGE_IO_MUTEX(pp) \ 1047c478bd9Sstevel@tonic-gate &pio_mutex[(((uintptr_t)pp) >> PIO_SHIFT) & (PIO_TABLE_SIZE - 1)] 1057c478bd9Sstevel@tonic-gate 1067c478bd9Sstevel@tonic-gate #define PSZC_MTX_TABLE_SIZE 128 1077c478bd9Sstevel@tonic-gate #define PSZC_MTX_TABLE_SHIFT 7 1087c478bd9Sstevel@tonic-gate 1097c478bd9Sstevel@tonic-gate static pad_mutex_t pszc_mutex[PSZC_MTX_TABLE_SIZE]; 1107c478bd9Sstevel@tonic-gate 1117c478bd9Sstevel@tonic-gate #define PAGE_SZC_MUTEX(_pp) \ 1127c478bd9Sstevel@tonic-gate &pszc_mutex[((((uintptr_t)(_pp) >> PSZC_MTX_TABLE_SHIFT) ^ \ 1137c478bd9Sstevel@tonic-gate ((uintptr_t)(_pp) >> (PSZC_MTX_TABLE_SHIFT << 1)) ^ \ 1147c478bd9Sstevel@tonic-gate ((uintptr_t)(_pp) >> (3 * PSZC_MTX_TABLE_SHIFT))) & \ 1157c478bd9Sstevel@tonic-gate (PSZC_MTX_TABLE_SIZE - 1))].pad_mutex 1167c478bd9Sstevel@tonic-gate 1177c478bd9Sstevel@tonic-gate /* 1187c478bd9Sstevel@tonic-gate * The vph_mutex[] array holds the mutexes to protect the vnode chains, 1197c478bd9Sstevel@tonic-gate * (i.e., the list of pages anchored by v_pages and connected via p_vpprev 1207c478bd9Sstevel@tonic-gate * and p_vpnext). 1217c478bd9Sstevel@tonic-gate * 1227c478bd9Sstevel@tonic-gate * The page_vnode_mutex(vp) function returns the address of the appropriate 1237c478bd9Sstevel@tonic-gate * mutex from this array given a pointer to a vnode. It is complicated 1247c478bd9Sstevel@tonic-gate * by the fact that the kernel's vnode and the swapfs vnode are referenced 1257c478bd9Sstevel@tonic-gate * frequently enough to warrent their own mutexes. 1267c478bd9Sstevel@tonic-gate * 1277c478bd9Sstevel@tonic-gate * The VP_HASH_FUNC returns the index into the vph_mutex array given 1287c478bd9Sstevel@tonic-gate * an address of a vnode. 1297c478bd9Sstevel@tonic-gate */ 1307c478bd9Sstevel@tonic-gate 1317c478bd9Sstevel@tonic-gate /* 1327c478bd9Sstevel@tonic-gate * XX64 VPH_TABLE_SIZE and VP_HASH_FUNC might break in 64 bit world. 1337c478bd9Sstevel@tonic-gate * Need to review again. 1347c478bd9Sstevel@tonic-gate */ 1357c478bd9Sstevel@tonic-gate #define VPH_TABLE_SIZE (2 << VP_SHIFT) 1367c478bd9Sstevel@tonic-gate 1377c478bd9Sstevel@tonic-gate #define VP_HASH_FUNC(vp) \ 1387c478bd9Sstevel@tonic-gate ((((uintptr_t)(vp) >> 6) + \ 1397c478bd9Sstevel@tonic-gate ((uintptr_t)(vp) >> 8) + \ 1407c478bd9Sstevel@tonic-gate ((uintptr_t)(vp) >> 10) + \ 1417c478bd9Sstevel@tonic-gate ((uintptr_t)(vp) >> 12)) \ 1427c478bd9Sstevel@tonic-gate & (VPH_TABLE_SIZE - 1)) 1437c478bd9Sstevel@tonic-gate 1447c478bd9Sstevel@tonic-gate extern struct vnode kvp; 1457c478bd9Sstevel@tonic-gate 1467c478bd9Sstevel@tonic-gate kmutex_t vph_mutex[VPH_TABLE_SIZE + 2]; 1477c478bd9Sstevel@tonic-gate 1487c478bd9Sstevel@tonic-gate /* 1497c478bd9Sstevel@tonic-gate * Initialize the locks used by the Virtual Memory Management system. 1507c478bd9Sstevel@tonic-gate */ 1517c478bd9Sstevel@tonic-gate void 1527c478bd9Sstevel@tonic-gate page_lock_init() 1537c478bd9Sstevel@tonic-gate { 1547c478bd9Sstevel@tonic-gate } 1557c478bd9Sstevel@tonic-gate 1567c478bd9Sstevel@tonic-gate /* 1577c478bd9Sstevel@tonic-gate * At present we only use page ownership to aid debugging, so it's 1587c478bd9Sstevel@tonic-gate * OK if the owner field isn't exact. In the 32-bit world two thread ids 1597c478bd9Sstevel@tonic-gate * can map to the same owner because we just 'or' in 0x80000000 and 1607c478bd9Sstevel@tonic-gate * then clear the second highest bit, so that (for example) 0x2faced00 1617c478bd9Sstevel@tonic-gate * and 0xafaced00 both map to 0xafaced00. 1627c478bd9Sstevel@tonic-gate * In the 64-bit world, p_selock may not be large enough to hold a full 1637c478bd9Sstevel@tonic-gate * thread pointer. If we ever need precise ownership (e.g. if we implement 1647c478bd9Sstevel@tonic-gate * priority inheritance for page locks) then p_selock should become a 1657c478bd9Sstevel@tonic-gate * uintptr_t and SE_WRITER should be -((uintptr_t)curthread >> 2). 1667c478bd9Sstevel@tonic-gate */ 1677c478bd9Sstevel@tonic-gate #define SE_WRITER (((selock_t)(ulong_t)curthread | INT_MIN) & ~SE_EWANTED) 1687c478bd9Sstevel@tonic-gate #define SE_READER 1 1697c478bd9Sstevel@tonic-gate 1707c478bd9Sstevel@tonic-gate /* 1717c478bd9Sstevel@tonic-gate * A page that is deleted must be marked as such using the 1727c478bd9Sstevel@tonic-gate * page_lock_delete() function. The page must be exclusively locked. 1737c478bd9Sstevel@tonic-gate * The SE_DELETED marker is put in p_selock when this function is called. 1747c478bd9Sstevel@tonic-gate * SE_DELETED must be distinct from any SE_WRITER value. 1757c478bd9Sstevel@tonic-gate */ 1767c478bd9Sstevel@tonic-gate #define SE_DELETED (1 | INT_MIN) 1777c478bd9Sstevel@tonic-gate 1787c478bd9Sstevel@tonic-gate #ifdef VM_STATS 1797c478bd9Sstevel@tonic-gate uint_t vph_kvp_count; 1807c478bd9Sstevel@tonic-gate uint_t vph_swapfsvp_count; 1817c478bd9Sstevel@tonic-gate uint_t vph_other; 1827c478bd9Sstevel@tonic-gate #endif /* VM_STATS */ 1837c478bd9Sstevel@tonic-gate 1847c478bd9Sstevel@tonic-gate #ifdef VM_STATS 1857c478bd9Sstevel@tonic-gate uint_t page_lock_count; 1867c478bd9Sstevel@tonic-gate uint_t page_lock_miss; 1877c478bd9Sstevel@tonic-gate uint_t page_lock_miss_lock; 1887c478bd9Sstevel@tonic-gate uint_t page_lock_reclaim; 1897c478bd9Sstevel@tonic-gate uint_t page_lock_bad_reclaim; 1907c478bd9Sstevel@tonic-gate uint_t page_lock_same_page; 1917c478bd9Sstevel@tonic-gate uint_t page_lock_upgrade; 192*db874c57Selowe uint_t page_lock_retired; 1937c478bd9Sstevel@tonic-gate uint_t page_lock_upgrade_failed; 1947c478bd9Sstevel@tonic-gate uint_t page_lock_deleted; 1957c478bd9Sstevel@tonic-gate 1967c478bd9Sstevel@tonic-gate uint_t page_trylock_locked; 197*db874c57Selowe uint_t page_trylock_failed; 1987c478bd9Sstevel@tonic-gate uint_t page_trylock_missed; 1997c478bd9Sstevel@tonic-gate 2007c478bd9Sstevel@tonic-gate uint_t page_try_reclaim_upgrade; 2017c478bd9Sstevel@tonic-gate #endif /* VM_STATS */ 2027c478bd9Sstevel@tonic-gate 2037c478bd9Sstevel@tonic-gate /* 2047c478bd9Sstevel@tonic-gate * Acquire the "shared/exclusive" lock on a page. 2057c478bd9Sstevel@tonic-gate * 2067c478bd9Sstevel@tonic-gate * Returns 1 on success and locks the page appropriately. 2077c478bd9Sstevel@tonic-gate * 0 on failure and does not lock the page. 2087c478bd9Sstevel@tonic-gate * 2097c478bd9Sstevel@tonic-gate * If `lock' is non-NULL, it will be dropped and reacquired in the 2107c478bd9Sstevel@tonic-gate * failure case. This routine can block, and if it does 2117c478bd9Sstevel@tonic-gate * it will always return a failure since the page identity [vp, off] 2127c478bd9Sstevel@tonic-gate * or state may have changed. 2137c478bd9Sstevel@tonic-gate */ 2147c478bd9Sstevel@tonic-gate 2157c478bd9Sstevel@tonic-gate int 2167c478bd9Sstevel@tonic-gate page_lock(page_t *pp, se_t se, kmutex_t *lock, reclaim_t reclaim) 2177c478bd9Sstevel@tonic-gate { 2187c478bd9Sstevel@tonic-gate return (page_lock_es(pp, se, lock, reclaim, 0)); 2197c478bd9Sstevel@tonic-gate } 2207c478bd9Sstevel@tonic-gate 2217c478bd9Sstevel@tonic-gate /* 2227c478bd9Sstevel@tonic-gate * With the addition of reader-writer lock semantics to page_lock_es, 2237c478bd9Sstevel@tonic-gate * callers wanting an exclusive (writer) lock may prevent shared-lock 2247c478bd9Sstevel@tonic-gate * (reader) starvation by setting the es parameter to SE_EXCL_WANTED. 2257c478bd9Sstevel@tonic-gate * In this case, when an exclusive lock cannot be acquired, p_selock's 226*db874c57Selowe * SE_EWANTED bit is set. Shared-lock (reader) requests are also denied 227*db874c57Selowe * if the page is slated for retirement. 228*db874c57Selowe * 229*db874c57Selowe * The se and es parameters determine if the lock should be granted 230*db874c57Selowe * based on the following decision table: 231*db874c57Selowe * 232*db874c57Selowe * Lock wanted es flags p_selock/SE_EWANTED Action 233*db874c57Selowe * ----------- -------------- ------------------- --------- 234*db874c57Selowe * SE_EXCL any [1][2] unlocked/any grant lock, clear SE_EWANTED 235*db874c57Selowe * SE_EXCL SE_EWANTED any lock/any deny, set SE_EWANTED 236*db874c57Selowe * SE_EXCL none any lock/any deny 237*db874c57Selowe * SE_SHARED n/a [2][3] shared/0 grant 238*db874c57Selowe * SE_SHARED n/a [2][3] unlocked/0 grant 239*db874c57Selowe * SE_SHARED n/a shared/1 deny 240*db874c57Selowe * SE_SHARED n/a unlocked/1 deny 241*db874c57Selowe * SE_SHARED n/a excl/any deny 242*db874c57Selowe * 243*db874c57Selowe * Notes: 244*db874c57Selowe * [1] The code grants an exclusive lock to the caller and clears the bit 245*db874c57Selowe * SE_EWANTED whenever p_selock is unlocked, regardless of the SE_EWANTED 246*db874c57Selowe * bit's value. This was deemed acceptable as we are not concerned about 247*db874c57Selowe * exclusive-lock starvation. If this ever becomes an issue, a priority or 248*db874c57Selowe * fifo mechanism should also be implemented. Meantime, the thread that 249*db874c57Selowe * set SE_EWANTED should be prepared to catch this condition and reset it 250*db874c57Selowe * 251*db874c57Selowe * [2] Retired pages may not be locked at any time, regardless of the 252*db874c57Selowe * dispostion of se, unless the es parameter has SE_RETIRED flag set. 2537c478bd9Sstevel@tonic-gate * 254*db874c57Selowe * [3] If the page is slated for retirement the lock is denied. 2557c478bd9Sstevel@tonic-gate * 256*db874c57Selowe * Notes on values of "es": 257*db874c57Selowe * 258*db874c57Selowe * es & 1: page_lookup_create will attempt page relocation 259*db874c57Selowe * es & SE_EXCL_WANTED: caller wants SE_EWANTED set (eg. delete 260*db874c57Selowe * memory thread); this prevents reader-starvation of waiting 261*db874c57Selowe * writer thread(s) by giving priority to writers over readers. 262*db874c57Selowe * es & SE_RETIRED: caller wants to lock pages even if they are 263*db874c57Selowe * retired. Default is to deny the lock if the page is retired. 264*db874c57Selowe * 265*db874c57Selowe * And yes, we know, the semantics of this function are too complicated. 266*db874c57Selowe * It's on the list to be cleaned up. 2677c478bd9Sstevel@tonic-gate */ 2687c478bd9Sstevel@tonic-gate int 2697c478bd9Sstevel@tonic-gate page_lock_es(page_t *pp, se_t se, kmutex_t *lock, reclaim_t reclaim, int es) 2707c478bd9Sstevel@tonic-gate { 2717c478bd9Sstevel@tonic-gate int retval; 2727c478bd9Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 2737c478bd9Sstevel@tonic-gate int upgraded; 2747c478bd9Sstevel@tonic-gate int reclaim_it; 2757c478bd9Sstevel@tonic-gate 2767c478bd9Sstevel@tonic-gate ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1); 2777c478bd9Sstevel@tonic-gate 2787c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lock_count); 2797c478bd9Sstevel@tonic-gate 2807c478bd9Sstevel@tonic-gate upgraded = 0; 2817c478bd9Sstevel@tonic-gate reclaim_it = 0; 2827c478bd9Sstevel@tonic-gate 2837c478bd9Sstevel@tonic-gate mutex_enter(pse); 2847c478bd9Sstevel@tonic-gate 2857c478bd9Sstevel@tonic-gate ASSERT(((es & SE_EXCL_WANTED) == 0) || 286*db874c57Selowe ((es & SE_EXCL_WANTED) && (se == SE_EXCL))); 287*db874c57Selowe 288*db874c57Selowe if (PP_RETIRED(pp) && !(es & SE_RETIRED)) { 289*db874c57Selowe mutex_exit(pse); 290*db874c57Selowe VM_STAT_ADD(page_lock_retired); 291*db874c57Selowe return (0); 292*db874c57Selowe } 2937c478bd9Sstevel@tonic-gate 2947c478bd9Sstevel@tonic-gate if (se == SE_SHARED && es == 1 && pp->p_selock == 0) { 2957c478bd9Sstevel@tonic-gate se = SE_EXCL; 2967c478bd9Sstevel@tonic-gate } 2977c478bd9Sstevel@tonic-gate 2987c478bd9Sstevel@tonic-gate if ((reclaim == P_RECLAIM) && (PP_ISFREE(pp))) { 2997c478bd9Sstevel@tonic-gate 3007c478bd9Sstevel@tonic-gate reclaim_it = 1; 3017c478bd9Sstevel@tonic-gate if (se == SE_SHARED) { 3027c478bd9Sstevel@tonic-gate /* 3037c478bd9Sstevel@tonic-gate * This is an interesting situation. 3047c478bd9Sstevel@tonic-gate * 3057c478bd9Sstevel@tonic-gate * Remember that p_free can only change if 3067c478bd9Sstevel@tonic-gate * p_selock < 0. 3077c478bd9Sstevel@tonic-gate * p_free does not depend on our holding `pse'. 3087c478bd9Sstevel@tonic-gate * And, since we hold `pse', p_selock can not change. 3097c478bd9Sstevel@tonic-gate * So, if p_free changes on us, the page is already 3107c478bd9Sstevel@tonic-gate * exclusively held, and we would fail to get p_selock 3117c478bd9Sstevel@tonic-gate * regardless. 3127c478bd9Sstevel@tonic-gate * 3137c478bd9Sstevel@tonic-gate * We want to avoid getting the share 3147c478bd9Sstevel@tonic-gate * lock on a free page that needs to be reclaimed. 3157c478bd9Sstevel@tonic-gate * It is possible that some other thread has the share 3167c478bd9Sstevel@tonic-gate * lock and has left the free page on the cache list. 3177c478bd9Sstevel@tonic-gate * pvn_vplist_dirty() does this for brief periods. 3187c478bd9Sstevel@tonic-gate * If the se_share is currently SE_EXCL, we will fail 3197c478bd9Sstevel@tonic-gate * to acquire p_selock anyway. Blocking is the 3207c478bd9Sstevel@tonic-gate * right thing to do. 3217c478bd9Sstevel@tonic-gate * If we need to reclaim this page, we must get 3227c478bd9Sstevel@tonic-gate * exclusive access to it, force the upgrade now. 3237c478bd9Sstevel@tonic-gate * Again, we will fail to acquire p_selock if the 3247c478bd9Sstevel@tonic-gate * page is not free and block. 3257c478bd9Sstevel@tonic-gate */ 3267c478bd9Sstevel@tonic-gate upgraded = 1; 3277c478bd9Sstevel@tonic-gate se = SE_EXCL; 3287c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lock_upgrade); 3297c478bd9Sstevel@tonic-gate } 3307c478bd9Sstevel@tonic-gate } 3317c478bd9Sstevel@tonic-gate 3327c478bd9Sstevel@tonic-gate if (se == SE_EXCL) { 333*db874c57Selowe if (!(es & SE_EXCL_WANTED) && (pp->p_selock & SE_EWANTED)) { 3347c478bd9Sstevel@tonic-gate /* 3357c478bd9Sstevel@tonic-gate * if the caller wants a writer lock (but did not 3367c478bd9Sstevel@tonic-gate * specify exclusive access), and there is a pending 3377c478bd9Sstevel@tonic-gate * writer that wants exclusive access, return failure 3387c478bd9Sstevel@tonic-gate */ 3397c478bd9Sstevel@tonic-gate retval = 0; 3407c478bd9Sstevel@tonic-gate } else if ((pp->p_selock & ~SE_EWANTED) == 0) { 3417c478bd9Sstevel@tonic-gate /* no reader/writer lock held */ 3427c478bd9Sstevel@tonic-gate THREAD_KPRI_REQUEST(); 3437c478bd9Sstevel@tonic-gate /* this clears our setting of the SE_EWANTED bit */ 3447c478bd9Sstevel@tonic-gate pp->p_selock = SE_WRITER; 3457c478bd9Sstevel@tonic-gate retval = 1; 3467c478bd9Sstevel@tonic-gate } else { 3477c478bd9Sstevel@tonic-gate /* page is locked */ 348*db874c57Selowe if (es & SE_EXCL_WANTED) { 3497c478bd9Sstevel@tonic-gate /* set the SE_EWANTED bit */ 3507c478bd9Sstevel@tonic-gate pp->p_selock |= SE_EWANTED; 3517c478bd9Sstevel@tonic-gate } 3527c478bd9Sstevel@tonic-gate retval = 0; 3537c478bd9Sstevel@tonic-gate } 3547c478bd9Sstevel@tonic-gate } else { 3557c478bd9Sstevel@tonic-gate retval = 0; 3567c478bd9Sstevel@tonic-gate if (pp->p_selock >= 0) { 357*db874c57Selowe /* 358*db874c57Selowe * Readers are not allowed when excl wanted or 359*db874c57Selowe * a retire is pending. Since kvp pages can take 360*db874c57Selowe * a long time to be retired, we make an exception 361*db874c57Selowe * for them to avoid hanging threads unnecessarily. 362*db874c57Selowe */ 363*db874c57Selowe if ((pp->p_selock & SE_EWANTED) == 0) { 364*db874c57Selowe if (!PP_PR_REQ(pp) || pp->p_vnode == &kvp) { 365*db874c57Selowe pp->p_selock += SE_READER; 366*db874c57Selowe retval = 1; 367*db874c57Selowe } 3687c478bd9Sstevel@tonic-gate } 3697c478bd9Sstevel@tonic-gate } 3707c478bd9Sstevel@tonic-gate } 3717c478bd9Sstevel@tonic-gate 3727c478bd9Sstevel@tonic-gate if (retval == 0) { 3737c478bd9Sstevel@tonic-gate if ((pp->p_selock & ~SE_EWANTED) == SE_DELETED) { 3747c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lock_deleted); 3757c478bd9Sstevel@tonic-gate mutex_exit(pse); 3767c478bd9Sstevel@tonic-gate return (retval); 3777c478bd9Sstevel@tonic-gate } 3787c478bd9Sstevel@tonic-gate 3797c478bd9Sstevel@tonic-gate #ifdef VM_STATS 3807c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lock_miss); 3817c478bd9Sstevel@tonic-gate if (upgraded) { 3827c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lock_upgrade_failed); 3837c478bd9Sstevel@tonic-gate } 3847c478bd9Sstevel@tonic-gate #endif 3857c478bd9Sstevel@tonic-gate if (lock) { 3867c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lock_miss_lock); 3877c478bd9Sstevel@tonic-gate mutex_exit(lock); 3887c478bd9Sstevel@tonic-gate } 3897c478bd9Sstevel@tonic-gate 3907c478bd9Sstevel@tonic-gate /* 3917c478bd9Sstevel@tonic-gate * Now, wait for the page to be unlocked and 3927c478bd9Sstevel@tonic-gate * release the lock protecting p_cv and p_selock. 3937c478bd9Sstevel@tonic-gate */ 3947c478bd9Sstevel@tonic-gate cv_wait(&pp->p_cv, pse); 3957c478bd9Sstevel@tonic-gate mutex_exit(pse); 3967c478bd9Sstevel@tonic-gate 3977c478bd9Sstevel@tonic-gate /* 3987c478bd9Sstevel@tonic-gate * The page identity may have changed while we were 3997c478bd9Sstevel@tonic-gate * blocked. If we are willing to depend on "pp" 4007c478bd9Sstevel@tonic-gate * still pointing to a valid page structure (i.e., 4017c478bd9Sstevel@tonic-gate * assuming page structures are not dynamically allocated 4027c478bd9Sstevel@tonic-gate * or freed), we could try to lock the page if its 4037c478bd9Sstevel@tonic-gate * identity hasn't changed. 4047c478bd9Sstevel@tonic-gate * 4057c478bd9Sstevel@tonic-gate * This needs to be measured, since we come back from 4067c478bd9Sstevel@tonic-gate * cv_wait holding pse (the expensive part of this 4077c478bd9Sstevel@tonic-gate * operation) we might as well try the cheap part. 4087c478bd9Sstevel@tonic-gate * Though we would also have to confirm that dropping 4097c478bd9Sstevel@tonic-gate * `lock' did not cause any grief to the callers. 4107c478bd9Sstevel@tonic-gate */ 4117c478bd9Sstevel@tonic-gate if (lock) { 4127c478bd9Sstevel@tonic-gate mutex_enter(lock); 4137c478bd9Sstevel@tonic-gate } 4147c478bd9Sstevel@tonic-gate } else { 4157c478bd9Sstevel@tonic-gate /* 4167c478bd9Sstevel@tonic-gate * We have the page lock. 4177c478bd9Sstevel@tonic-gate * If we needed to reclaim the page, and the page 4187c478bd9Sstevel@tonic-gate * needed reclaiming (ie, it was free), then we 4197c478bd9Sstevel@tonic-gate * have the page exclusively locked. We may need 4207c478bd9Sstevel@tonic-gate * to downgrade the page. 4217c478bd9Sstevel@tonic-gate */ 4227c478bd9Sstevel@tonic-gate ASSERT((upgraded) ? 4237c478bd9Sstevel@tonic-gate ((PP_ISFREE(pp)) && PAGE_EXCL(pp)) : 1); 4247c478bd9Sstevel@tonic-gate mutex_exit(pse); 4257c478bd9Sstevel@tonic-gate 4267c478bd9Sstevel@tonic-gate /* 4277c478bd9Sstevel@tonic-gate * We now hold this page's lock, either shared or 4287c478bd9Sstevel@tonic-gate * exclusive. This will prevent its identity from changing. 4297c478bd9Sstevel@tonic-gate * The page, however, may or may not be free. If the caller 4307c478bd9Sstevel@tonic-gate * requested, and it is free, go reclaim it from the 4317c478bd9Sstevel@tonic-gate * free list. If the page can't be reclaimed, return failure 4327c478bd9Sstevel@tonic-gate * so that the caller can start all over again. 4337c478bd9Sstevel@tonic-gate * 4347c478bd9Sstevel@tonic-gate * NOTE:page_reclaim() releases the page lock (p_selock) 4357c478bd9Sstevel@tonic-gate * if it can't be reclaimed. 4367c478bd9Sstevel@tonic-gate */ 4377c478bd9Sstevel@tonic-gate if (reclaim_it) { 4387c478bd9Sstevel@tonic-gate if (!page_reclaim(pp, lock)) { 4397c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lock_bad_reclaim); 4407c478bd9Sstevel@tonic-gate retval = 0; 4417c478bd9Sstevel@tonic-gate } else { 4427c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lock_reclaim); 4437c478bd9Sstevel@tonic-gate if (upgraded) { 4447c478bd9Sstevel@tonic-gate page_downgrade(pp); 4457c478bd9Sstevel@tonic-gate } 4467c478bd9Sstevel@tonic-gate } 4477c478bd9Sstevel@tonic-gate } 4487c478bd9Sstevel@tonic-gate } 4497c478bd9Sstevel@tonic-gate return (retval); 4507c478bd9Sstevel@tonic-gate } 4517c478bd9Sstevel@tonic-gate 4527c478bd9Sstevel@tonic-gate /* 4537c478bd9Sstevel@tonic-gate * Clear the SE_EWANTED bit from p_selock. This function allows 4547c478bd9Sstevel@tonic-gate * callers of page_lock_es and page_try_reclaim_lock to clear 4557c478bd9Sstevel@tonic-gate * their setting of this bit if they decide they no longer wish 4567c478bd9Sstevel@tonic-gate * to gain exclusive access to the page. Currently only 4577c478bd9Sstevel@tonic-gate * delete_memory_thread uses this when the delete memory 4587c478bd9Sstevel@tonic-gate * operation is cancelled. 4597c478bd9Sstevel@tonic-gate */ 4607c478bd9Sstevel@tonic-gate void 4617c478bd9Sstevel@tonic-gate page_lock_clr_exclwanted(page_t *pp) 4627c478bd9Sstevel@tonic-gate { 4637c478bd9Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 4647c478bd9Sstevel@tonic-gate 4657c478bd9Sstevel@tonic-gate mutex_enter(pse); 4667c478bd9Sstevel@tonic-gate pp->p_selock &= ~SE_EWANTED; 4677c478bd9Sstevel@tonic-gate if (CV_HAS_WAITERS(&pp->p_cv)) 4687c478bd9Sstevel@tonic-gate cv_broadcast(&pp->p_cv); 4697c478bd9Sstevel@tonic-gate mutex_exit(pse); 4707c478bd9Sstevel@tonic-gate } 4717c478bd9Sstevel@tonic-gate 4727c478bd9Sstevel@tonic-gate /* 4737c478bd9Sstevel@tonic-gate * Read the comments inside of page_lock_es() carefully. 4747c478bd9Sstevel@tonic-gate * 4757c478bd9Sstevel@tonic-gate * SE_EXCL callers specifying es == SE_EXCL_WANTED will cause the 4767c478bd9Sstevel@tonic-gate * SE_EWANTED bit of p_selock to be set when the lock cannot be obtained. 4777c478bd9Sstevel@tonic-gate * This is used by threads subject to reader-starvation (eg. memory delete). 4787c478bd9Sstevel@tonic-gate * 4797c478bd9Sstevel@tonic-gate * When a thread using SE_EXCL_WANTED does not obtain the SE_EXCL lock, 4807c478bd9Sstevel@tonic-gate * it is expected that it will retry at a later time. Threads that will 4817c478bd9Sstevel@tonic-gate * not retry the lock *must* call page_lock_clr_exclwanted to clear the 4827c478bd9Sstevel@tonic-gate * SE_EWANTED bit. (When a thread using SE_EXCL_WANTED obtains the lock, 4837c478bd9Sstevel@tonic-gate * the bit is cleared.) 4847c478bd9Sstevel@tonic-gate */ 4857c478bd9Sstevel@tonic-gate int 4867c478bd9Sstevel@tonic-gate page_try_reclaim_lock(page_t *pp, se_t se, int es) 4877c478bd9Sstevel@tonic-gate { 4887c478bd9Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 4897c478bd9Sstevel@tonic-gate selock_t old; 4907c478bd9Sstevel@tonic-gate 4917c478bd9Sstevel@tonic-gate mutex_enter(pse); 4927c478bd9Sstevel@tonic-gate 4937c478bd9Sstevel@tonic-gate old = pp->p_selock; 4947c478bd9Sstevel@tonic-gate 4957c478bd9Sstevel@tonic-gate ASSERT(((es & SE_EXCL_WANTED) == 0) || 496*db874c57Selowe ((es & SE_EXCL_WANTED) && (se == SE_EXCL))); 497*db874c57Selowe 498*db874c57Selowe if (PP_RETIRED(pp) && !(es & SE_RETIRED)) { 499*db874c57Selowe mutex_exit(pse); 500*db874c57Selowe VM_STAT_ADD(page_trylock_failed); 501*db874c57Selowe return (0); 502*db874c57Selowe } 5037c478bd9Sstevel@tonic-gate 5047c478bd9Sstevel@tonic-gate if (se == SE_SHARED && es == 1 && old == 0) { 5057c478bd9Sstevel@tonic-gate se = SE_EXCL; 5067c478bd9Sstevel@tonic-gate } 5077c478bd9Sstevel@tonic-gate 5087c478bd9Sstevel@tonic-gate if (se == SE_SHARED) { 5097c478bd9Sstevel@tonic-gate if (!PP_ISFREE(pp)) { 5107c478bd9Sstevel@tonic-gate if (old >= 0) { 511*db874c57Selowe /* 512*db874c57Selowe * Readers are not allowed when excl wanted 513*db874c57Selowe * or a retire is pending. Since kvp pages can 514*db874c57Selowe * take a long time to be retired, we make an 515*db874c57Selowe * exception for them to avoid hanging threads 516*db874c57Selowe * unnecessarily. 517*db874c57Selowe */ 518*db874c57Selowe if ((old & SE_EWANTED) == 0) { 519*db874c57Selowe if (!PP_PR_REQ(pp) || 520*db874c57Selowe pp->p_vnode == &kvp) { 521*db874c57Selowe pp->p_selock = old + SE_READER; 522*db874c57Selowe mutex_exit(pse); 523*db874c57Selowe return (1); 524*db874c57Selowe } 5257c478bd9Sstevel@tonic-gate } 5267c478bd9Sstevel@tonic-gate } 5277c478bd9Sstevel@tonic-gate mutex_exit(pse); 5287c478bd9Sstevel@tonic-gate return (0); 5297c478bd9Sstevel@tonic-gate } 5307c478bd9Sstevel@tonic-gate /* 5317c478bd9Sstevel@tonic-gate * The page is free, so we really want SE_EXCL (below) 5327c478bd9Sstevel@tonic-gate */ 5337c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_try_reclaim_upgrade); 5347c478bd9Sstevel@tonic-gate } 5357c478bd9Sstevel@tonic-gate 5367c478bd9Sstevel@tonic-gate /* 5377c478bd9Sstevel@tonic-gate * The caller wants a writer lock. We try for it only if 5387c478bd9Sstevel@tonic-gate * SE_EWANTED is not set, or if the caller specified 5397c478bd9Sstevel@tonic-gate * SE_EXCL_WANTED. 5407c478bd9Sstevel@tonic-gate */ 541*db874c57Selowe if (!(old & SE_EWANTED) || (es & SE_EXCL_WANTED)) { 5427c478bd9Sstevel@tonic-gate if ((old & ~SE_EWANTED) == 0) { 5437c478bd9Sstevel@tonic-gate /* no reader/writer lock held */ 5447c478bd9Sstevel@tonic-gate THREAD_KPRI_REQUEST(); 5457c478bd9Sstevel@tonic-gate /* this clears out our setting of the SE_EWANTED bit */ 5467c478bd9Sstevel@tonic-gate pp->p_selock = SE_WRITER; 5477c478bd9Sstevel@tonic-gate mutex_exit(pse); 5487c478bd9Sstevel@tonic-gate return (1); 5497c478bd9Sstevel@tonic-gate } 5507c478bd9Sstevel@tonic-gate } 551*db874c57Selowe if (es & SE_EXCL_WANTED) { 5527c478bd9Sstevel@tonic-gate /* page is locked, set the SE_EWANTED bit */ 5537c478bd9Sstevel@tonic-gate pp->p_selock |= SE_EWANTED; 5547c478bd9Sstevel@tonic-gate } 5557c478bd9Sstevel@tonic-gate mutex_exit(pse); 5567c478bd9Sstevel@tonic-gate return (0); 5577c478bd9Sstevel@tonic-gate } 5587c478bd9Sstevel@tonic-gate 5597c478bd9Sstevel@tonic-gate /* 5607c478bd9Sstevel@tonic-gate * Acquire a page's "shared/exclusive" lock, but never block. 5617c478bd9Sstevel@tonic-gate * Returns 1 on success, 0 on failure. 5627c478bd9Sstevel@tonic-gate */ 5637c478bd9Sstevel@tonic-gate int 5647c478bd9Sstevel@tonic-gate page_trylock(page_t *pp, se_t se) 5657c478bd9Sstevel@tonic-gate { 5667c478bd9Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 5677c478bd9Sstevel@tonic-gate 5687c478bd9Sstevel@tonic-gate mutex_enter(pse); 569*db874c57Selowe if (pp->p_selock & SE_EWANTED || PP_RETIRED(pp) || 570*db874c57Selowe (se == SE_SHARED && PP_PR_REQ(pp) && pp->p_vnode != &kvp)) { 571*db874c57Selowe /* 572*db874c57Selowe * Fail if a thread wants exclusive access and page is 573*db874c57Selowe * retired, if the page is slated for retirement, or a 574*db874c57Selowe * share lock is requested. 575*db874c57Selowe */ 5767c478bd9Sstevel@tonic-gate mutex_exit(pse); 577*db874c57Selowe VM_STAT_ADD(page_trylock_failed); 5787c478bd9Sstevel@tonic-gate return (0); 5797c478bd9Sstevel@tonic-gate } 5807c478bd9Sstevel@tonic-gate 5817c478bd9Sstevel@tonic-gate if (se == SE_EXCL) { 5827c478bd9Sstevel@tonic-gate if (pp->p_selock == 0) { 5837c478bd9Sstevel@tonic-gate THREAD_KPRI_REQUEST(); 5847c478bd9Sstevel@tonic-gate pp->p_selock = SE_WRITER; 5857c478bd9Sstevel@tonic-gate mutex_exit(pse); 5867c478bd9Sstevel@tonic-gate return (1); 5877c478bd9Sstevel@tonic-gate } 5887c478bd9Sstevel@tonic-gate } else { 5897c478bd9Sstevel@tonic-gate if (pp->p_selock >= 0) { 5907c478bd9Sstevel@tonic-gate pp->p_selock += SE_READER; 5917c478bd9Sstevel@tonic-gate mutex_exit(pse); 5927c478bd9Sstevel@tonic-gate return (1); 5937c478bd9Sstevel@tonic-gate } 5947c478bd9Sstevel@tonic-gate } 5957c478bd9Sstevel@tonic-gate mutex_exit(pse); 5967c478bd9Sstevel@tonic-gate return (0); 5977c478bd9Sstevel@tonic-gate } 5987c478bd9Sstevel@tonic-gate 599*db874c57Selowe /* 600*db874c57Selowe * Variant of page_unlock() specifically for the page freelist 601*db874c57Selowe * code. The mere existence of this code is a vile hack that 602*db874c57Selowe * has resulted due to the backwards locking order of the page 603*db874c57Selowe * freelist manager; please don't call it. 604*db874c57Selowe */ 605*db874c57Selowe void 606*db874c57Selowe page_unlock_noretire(page_t *pp) 607*db874c57Selowe { 608*db874c57Selowe kmutex_t *pse = PAGE_SE_MUTEX(pp); 609*db874c57Selowe selock_t old; 610*db874c57Selowe 611*db874c57Selowe mutex_enter(pse); 612*db874c57Selowe 613*db874c57Selowe old = pp->p_selock; 614*db874c57Selowe if ((old & ~SE_EWANTED) == SE_READER) { 615*db874c57Selowe pp->p_selock = old & ~SE_READER; 616*db874c57Selowe if (CV_HAS_WAITERS(&pp->p_cv)) 617*db874c57Selowe cv_broadcast(&pp->p_cv); 618*db874c57Selowe } else if ((old & ~SE_EWANTED) == SE_DELETED) { 619*db874c57Selowe panic("page_unlock_noretire: page %p is deleted", pp); 620*db874c57Selowe } else if (old < 0) { 621*db874c57Selowe THREAD_KPRI_RELEASE(); 622*db874c57Selowe pp->p_selock &= SE_EWANTED; 623*db874c57Selowe if (CV_HAS_WAITERS(&pp->p_cv)) 624*db874c57Selowe cv_broadcast(&pp->p_cv); 625*db874c57Selowe } else if ((old & ~SE_EWANTED) > SE_READER) { 626*db874c57Selowe pp->p_selock = old - SE_READER; 627*db874c57Selowe } else { 628*db874c57Selowe panic("page_unlock_noretire: page %p is not locked", pp); 629*db874c57Selowe } 630*db874c57Selowe 631*db874c57Selowe mutex_exit(pse); 632*db874c57Selowe } 633*db874c57Selowe 6347c478bd9Sstevel@tonic-gate /* 6357c478bd9Sstevel@tonic-gate * Release the page's "shared/exclusive" lock and wake up anyone 6367c478bd9Sstevel@tonic-gate * who might be waiting for it. 6377c478bd9Sstevel@tonic-gate */ 6387c478bd9Sstevel@tonic-gate void 6397c478bd9Sstevel@tonic-gate page_unlock(page_t *pp) 6407c478bd9Sstevel@tonic-gate { 6417c478bd9Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 6427c478bd9Sstevel@tonic-gate selock_t old; 6437c478bd9Sstevel@tonic-gate 6447c478bd9Sstevel@tonic-gate mutex_enter(pse); 645*db874c57Selowe 6467c478bd9Sstevel@tonic-gate old = pp->p_selock; 6477c478bd9Sstevel@tonic-gate if ((old & ~SE_EWANTED) == SE_READER) { 6487c478bd9Sstevel@tonic-gate pp->p_selock = old & ~SE_READER; 6497c478bd9Sstevel@tonic-gate if (CV_HAS_WAITERS(&pp->p_cv)) 6507c478bd9Sstevel@tonic-gate cv_broadcast(&pp->p_cv); 6517c478bd9Sstevel@tonic-gate } else if ((old & ~SE_EWANTED) == SE_DELETED) { 6527c478bd9Sstevel@tonic-gate panic("page_unlock: page %p is deleted", pp); 6537c478bd9Sstevel@tonic-gate } else if (old < 0) { 6547c478bd9Sstevel@tonic-gate THREAD_KPRI_RELEASE(); 6557c478bd9Sstevel@tonic-gate pp->p_selock &= SE_EWANTED; 6567c478bd9Sstevel@tonic-gate if (CV_HAS_WAITERS(&pp->p_cv)) 6577c478bd9Sstevel@tonic-gate cv_broadcast(&pp->p_cv); 6587c478bd9Sstevel@tonic-gate } else if ((old & ~SE_EWANTED) > SE_READER) { 6597c478bd9Sstevel@tonic-gate pp->p_selock = old - SE_READER; 6607c478bd9Sstevel@tonic-gate } else { 6617c478bd9Sstevel@tonic-gate panic("page_unlock: page %p is not locked", pp); 6627c478bd9Sstevel@tonic-gate } 663*db874c57Selowe 664*db874c57Selowe if (pp->p_selock == 0 && PP_PR_REQ(pp)) { 665*db874c57Selowe /* 666*db874c57Selowe * Try to retire the page. If it retires, great. 667*db874c57Selowe * If not, oh well, we'll get it in the next unlock 668*db874c57Selowe * request, and repeat the cycle. Regardless, 669*db874c57Selowe * page_tryretire() will drop the page lock. 670*db874c57Selowe */ 671*db874c57Selowe if ((pp->p_toxic & PR_BUSY) == 0) { 672*db874c57Selowe THREAD_KPRI_REQUEST(); 673*db874c57Selowe pp->p_selock = SE_WRITER; 674*db874c57Selowe page_settoxic(pp, PR_BUSY); 675*db874c57Selowe mutex_exit(pse); 676*db874c57Selowe page_tryretire(pp); 677*db874c57Selowe } else { 678*db874c57Selowe pp->p_selock = SE_WRITER; 679*db874c57Selowe page_clrtoxic(pp, PR_BUSY); 680*db874c57Selowe pp->p_selock = 0; 681*db874c57Selowe mutex_exit(pse); 682*db874c57Selowe } 683*db874c57Selowe } else { 684*db874c57Selowe mutex_exit(pse); 685*db874c57Selowe } 6867c478bd9Sstevel@tonic-gate } 6877c478bd9Sstevel@tonic-gate 6887c478bd9Sstevel@tonic-gate /* 6897c478bd9Sstevel@tonic-gate * Try to upgrade the lock on the page from a "shared" to an 6907c478bd9Sstevel@tonic-gate * "exclusive" lock. Since this upgrade operation is done while 6917c478bd9Sstevel@tonic-gate * holding the mutex protecting this page, no one else can acquire this page's 6927c478bd9Sstevel@tonic-gate * lock and change the page. Thus, it is safe to drop the "shared" 6937c478bd9Sstevel@tonic-gate * lock and attempt to acquire the "exclusive" lock. 6947c478bd9Sstevel@tonic-gate * 6957c478bd9Sstevel@tonic-gate * Returns 1 on success, 0 on failure. 6967c478bd9Sstevel@tonic-gate */ 6977c478bd9Sstevel@tonic-gate int 6987c478bd9Sstevel@tonic-gate page_tryupgrade(page_t *pp) 6997c478bd9Sstevel@tonic-gate { 7007c478bd9Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 7017c478bd9Sstevel@tonic-gate 7027c478bd9Sstevel@tonic-gate mutex_enter(pse); 7037c478bd9Sstevel@tonic-gate if (!(pp->p_selock & SE_EWANTED)) { 7047c478bd9Sstevel@tonic-gate /* no threads want exclusive access, try upgrade */ 7057c478bd9Sstevel@tonic-gate if (pp->p_selock == SE_READER) { 7067c478bd9Sstevel@tonic-gate THREAD_KPRI_REQUEST(); 7077c478bd9Sstevel@tonic-gate /* convert to exclusive lock */ 7087c478bd9Sstevel@tonic-gate pp->p_selock = SE_WRITER; 7097c478bd9Sstevel@tonic-gate mutex_exit(pse); 7107c478bd9Sstevel@tonic-gate return (1); 7117c478bd9Sstevel@tonic-gate } 7127c478bd9Sstevel@tonic-gate } 7137c478bd9Sstevel@tonic-gate mutex_exit(pse); 7147c478bd9Sstevel@tonic-gate return (0); 7157c478bd9Sstevel@tonic-gate } 7167c478bd9Sstevel@tonic-gate 7177c478bd9Sstevel@tonic-gate /* 7187c478bd9Sstevel@tonic-gate * Downgrade the "exclusive" lock on the page to a "shared" lock 7197c478bd9Sstevel@tonic-gate * while holding the mutex protecting this page's p_selock field. 7207c478bd9Sstevel@tonic-gate */ 7217c478bd9Sstevel@tonic-gate void 7227c478bd9Sstevel@tonic-gate page_downgrade(page_t *pp) 7237c478bd9Sstevel@tonic-gate { 7247c478bd9Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 7257c478bd9Sstevel@tonic-gate int excl_waiting; 7267c478bd9Sstevel@tonic-gate 7277c478bd9Sstevel@tonic-gate ASSERT((pp->p_selock & ~SE_EWANTED) != SE_DELETED); 7287c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 7297c478bd9Sstevel@tonic-gate 7307c478bd9Sstevel@tonic-gate mutex_enter(pse); 7317c478bd9Sstevel@tonic-gate excl_waiting = pp->p_selock & SE_EWANTED; 7327c478bd9Sstevel@tonic-gate THREAD_KPRI_RELEASE(); 7337c478bd9Sstevel@tonic-gate pp->p_selock = SE_READER | excl_waiting; 7347c478bd9Sstevel@tonic-gate if (CV_HAS_WAITERS(&pp->p_cv)) 7357c478bd9Sstevel@tonic-gate cv_broadcast(&pp->p_cv); 7367c478bd9Sstevel@tonic-gate mutex_exit(pse); 7377c478bd9Sstevel@tonic-gate } 7387c478bd9Sstevel@tonic-gate 7397c478bd9Sstevel@tonic-gate void 7407c478bd9Sstevel@tonic-gate page_lock_delete(page_t *pp) 7417c478bd9Sstevel@tonic-gate { 7427c478bd9Sstevel@tonic-gate kmutex_t *pse = PAGE_SE_MUTEX(pp); 7437c478bd9Sstevel@tonic-gate 7447c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp)); 7457c478bd9Sstevel@tonic-gate ASSERT(pp->p_vnode == NULL); 7467c478bd9Sstevel@tonic-gate ASSERT(pp->p_offset == (u_offset_t)-1); 7477c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 7487c478bd9Sstevel@tonic-gate 7497c478bd9Sstevel@tonic-gate mutex_enter(pse); 7507c478bd9Sstevel@tonic-gate THREAD_KPRI_RELEASE(); 7517c478bd9Sstevel@tonic-gate pp->p_selock = SE_DELETED; 7527c478bd9Sstevel@tonic-gate if (CV_HAS_WAITERS(&pp->p_cv)) 7537c478bd9Sstevel@tonic-gate cv_broadcast(&pp->p_cv); 7547c478bd9Sstevel@tonic-gate mutex_exit(pse); 7557c478bd9Sstevel@tonic-gate } 7567c478bd9Sstevel@tonic-gate 7577c478bd9Sstevel@tonic-gate /* 7587c478bd9Sstevel@tonic-gate * Implement the io lock for pages 7597c478bd9Sstevel@tonic-gate */ 7607c478bd9Sstevel@tonic-gate void 7617c478bd9Sstevel@tonic-gate page_iolock_init(page_t *pp) 7627c478bd9Sstevel@tonic-gate { 7637c478bd9Sstevel@tonic-gate pp->p_iolock_state = 0; 7647c478bd9Sstevel@tonic-gate cv_init(&pp->p_io_cv, NULL, CV_DEFAULT, NULL); 7657c478bd9Sstevel@tonic-gate } 7667c478bd9Sstevel@tonic-gate 7677c478bd9Sstevel@tonic-gate /* 7687c478bd9Sstevel@tonic-gate * Acquire the i/o lock on a page. 7697c478bd9Sstevel@tonic-gate */ 7707c478bd9Sstevel@tonic-gate void 7717c478bd9Sstevel@tonic-gate page_io_lock(page_t *pp) 7727c478bd9Sstevel@tonic-gate { 7737c478bd9Sstevel@tonic-gate kmutex_t *pio; 7747c478bd9Sstevel@tonic-gate 7757c478bd9Sstevel@tonic-gate pio = PAGE_IO_MUTEX(pp); 7767c478bd9Sstevel@tonic-gate mutex_enter(pio); 7777c478bd9Sstevel@tonic-gate while (pp->p_iolock_state & PAGE_IO_INUSE) { 7787c478bd9Sstevel@tonic-gate cv_wait(&(pp->p_io_cv), pio); 7797c478bd9Sstevel@tonic-gate } 7807c478bd9Sstevel@tonic-gate pp->p_iolock_state |= PAGE_IO_INUSE; 7817c478bd9Sstevel@tonic-gate mutex_exit(pio); 7827c478bd9Sstevel@tonic-gate } 7837c478bd9Sstevel@tonic-gate 7847c478bd9Sstevel@tonic-gate /* 7857c478bd9Sstevel@tonic-gate * Release the i/o lock on a page. 7867c478bd9Sstevel@tonic-gate */ 7877c478bd9Sstevel@tonic-gate void 7887c478bd9Sstevel@tonic-gate page_io_unlock(page_t *pp) 7897c478bd9Sstevel@tonic-gate { 7907c478bd9Sstevel@tonic-gate kmutex_t *pio; 7917c478bd9Sstevel@tonic-gate 7927c478bd9Sstevel@tonic-gate pio = PAGE_IO_MUTEX(pp); 7937c478bd9Sstevel@tonic-gate mutex_enter(pio); 7947c478bd9Sstevel@tonic-gate cv_signal(&pp->p_io_cv); 7957c478bd9Sstevel@tonic-gate pp->p_iolock_state &= ~PAGE_IO_INUSE; 7967c478bd9Sstevel@tonic-gate mutex_exit(pio); 7977c478bd9Sstevel@tonic-gate } 7987c478bd9Sstevel@tonic-gate 7997c478bd9Sstevel@tonic-gate /* 8007c478bd9Sstevel@tonic-gate * Try to acquire the i/o lock on a page without blocking. 8017c478bd9Sstevel@tonic-gate * Returns 1 on success, 0 on failure. 8027c478bd9Sstevel@tonic-gate */ 8037c478bd9Sstevel@tonic-gate int 8047c478bd9Sstevel@tonic-gate page_io_trylock(page_t *pp) 8057c478bd9Sstevel@tonic-gate { 8067c478bd9Sstevel@tonic-gate kmutex_t *pio; 8077c478bd9Sstevel@tonic-gate 8087c478bd9Sstevel@tonic-gate if (pp->p_iolock_state & PAGE_IO_INUSE) 8097c478bd9Sstevel@tonic-gate return (0); 8107c478bd9Sstevel@tonic-gate 8117c478bd9Sstevel@tonic-gate pio = PAGE_IO_MUTEX(pp); 8127c478bd9Sstevel@tonic-gate mutex_enter(pio); 8137c478bd9Sstevel@tonic-gate 8147c478bd9Sstevel@tonic-gate if (pp->p_iolock_state & PAGE_IO_INUSE) { 8157c478bd9Sstevel@tonic-gate mutex_exit(pio); 8167c478bd9Sstevel@tonic-gate return (0); 8177c478bd9Sstevel@tonic-gate } 8187c478bd9Sstevel@tonic-gate pp->p_iolock_state |= PAGE_IO_INUSE; 8197c478bd9Sstevel@tonic-gate mutex_exit(pio); 8207c478bd9Sstevel@tonic-gate 8217c478bd9Sstevel@tonic-gate return (1); 8227c478bd9Sstevel@tonic-gate } 8237c478bd9Sstevel@tonic-gate 8247c478bd9Sstevel@tonic-gate /* 8257c478bd9Sstevel@tonic-gate * Assert that the i/o lock on a page is held. 8267c478bd9Sstevel@tonic-gate * Returns 1 on success, 0 on failure. 8277c478bd9Sstevel@tonic-gate */ 8287c478bd9Sstevel@tonic-gate int 8297c478bd9Sstevel@tonic-gate page_iolock_assert(page_t *pp) 8307c478bd9Sstevel@tonic-gate { 8317c478bd9Sstevel@tonic-gate return (pp->p_iolock_state & PAGE_IO_INUSE); 8327c478bd9Sstevel@tonic-gate } 8337c478bd9Sstevel@tonic-gate 8347c478bd9Sstevel@tonic-gate /* 8357c478bd9Sstevel@tonic-gate * Wrapper exported to kernel routines that are built 8367c478bd9Sstevel@tonic-gate * platform-independent (the macro is platform-dependent; 8377c478bd9Sstevel@tonic-gate * the size of vph_mutex[] is based on NCPU). 8387c478bd9Sstevel@tonic-gate * 8397c478bd9Sstevel@tonic-gate * Note that you can do stress testing on this by setting the 8407c478bd9Sstevel@tonic-gate * variable page_vnode_mutex_stress to something other than 8417c478bd9Sstevel@tonic-gate * zero in a DEBUG kernel in a debugger after loading the kernel. 8427c478bd9Sstevel@tonic-gate * Setting it after the kernel is running may not work correctly. 8437c478bd9Sstevel@tonic-gate */ 8447c478bd9Sstevel@tonic-gate #ifdef DEBUG 8457c478bd9Sstevel@tonic-gate static int page_vnode_mutex_stress = 0; 8467c478bd9Sstevel@tonic-gate #endif 8477c478bd9Sstevel@tonic-gate 8487c478bd9Sstevel@tonic-gate kmutex_t * 8497c478bd9Sstevel@tonic-gate page_vnode_mutex(vnode_t *vp) 8507c478bd9Sstevel@tonic-gate { 8517c478bd9Sstevel@tonic-gate if (vp == &kvp) 8527c478bd9Sstevel@tonic-gate return (&vph_mutex[VPH_TABLE_SIZE + 0]); 8537c478bd9Sstevel@tonic-gate #ifdef DEBUG 8547c478bd9Sstevel@tonic-gate if (page_vnode_mutex_stress != 0) 8557c478bd9Sstevel@tonic-gate return (&vph_mutex[0]); 8567c478bd9Sstevel@tonic-gate #endif 8577c478bd9Sstevel@tonic-gate 8587c478bd9Sstevel@tonic-gate return (&vph_mutex[VP_HASH_FUNC(vp)]); 8597c478bd9Sstevel@tonic-gate } 8607c478bd9Sstevel@tonic-gate 8617c478bd9Sstevel@tonic-gate kmutex_t * 8627c478bd9Sstevel@tonic-gate page_se_mutex(page_t *pp) 8637c478bd9Sstevel@tonic-gate { 8647c478bd9Sstevel@tonic-gate return (PAGE_SE_MUTEX(pp)); 8657c478bd9Sstevel@tonic-gate } 8667c478bd9Sstevel@tonic-gate 8677c478bd9Sstevel@tonic-gate #ifdef VM_STATS 8687c478bd9Sstevel@tonic-gate uint_t pszclck_stat[4]; 8697c478bd9Sstevel@tonic-gate #endif 8707c478bd9Sstevel@tonic-gate /* 8717c478bd9Sstevel@tonic-gate * Find, take and return a mutex held by hat_page_demote(). 8727c478bd9Sstevel@tonic-gate * Called by page_demote_vp_pages() before hat_page_demote() call and by 8737c478bd9Sstevel@tonic-gate * routines that want to block hat_page_demote() but can't do it 8747c478bd9Sstevel@tonic-gate * via locking all constituent pages. 8757c478bd9Sstevel@tonic-gate * 8767c478bd9Sstevel@tonic-gate * Return NULL if p_szc is 0. 8777c478bd9Sstevel@tonic-gate * 8787c478bd9Sstevel@tonic-gate * It should only be used for pages that can be demoted by hat_page_demote() 8797c478bd9Sstevel@tonic-gate * i.e. non swapfs file system pages. The logic here is lifted from 8807c478bd9Sstevel@tonic-gate * sfmmu_mlspl_enter() except there's no need to worry about p_szc increase 8817c478bd9Sstevel@tonic-gate * since the page is locked and not free. 8827c478bd9Sstevel@tonic-gate * 8837c478bd9Sstevel@tonic-gate * Hash of the root page is used to find the lock. 8847c478bd9Sstevel@tonic-gate * To find the root in the presense of hat_page_demote() chageing the location 8857c478bd9Sstevel@tonic-gate * of the root this routine relies on the fact that hat_page_demote() changes 8867c478bd9Sstevel@tonic-gate * root last. 8877c478bd9Sstevel@tonic-gate * 8887c478bd9Sstevel@tonic-gate * If NULL is returned pp's p_szc is guaranteed to be 0. If non NULL is 8897c478bd9Sstevel@tonic-gate * returned pp's p_szc may be any value. 8907c478bd9Sstevel@tonic-gate */ 8917c478bd9Sstevel@tonic-gate kmutex_t * 8927c478bd9Sstevel@tonic-gate page_szc_lock(page_t *pp) 8937c478bd9Sstevel@tonic-gate { 8947c478bd9Sstevel@tonic-gate kmutex_t *mtx; 8957c478bd9Sstevel@tonic-gate page_t *rootpp; 8967c478bd9Sstevel@tonic-gate uint_t szc; 8977c478bd9Sstevel@tonic-gate uint_t rszc; 8987c478bd9Sstevel@tonic-gate uint_t pszc = pp->p_szc; 8997c478bd9Sstevel@tonic-gate 9007c478bd9Sstevel@tonic-gate ASSERT(pp != NULL); 9017c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp)); 9027c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp)); 9037c478bd9Sstevel@tonic-gate ASSERT(pp->p_vnode != NULL); 9047c478bd9Sstevel@tonic-gate ASSERT(!IS_SWAPFSVP(pp->p_vnode)); 9057c478bd9Sstevel@tonic-gate ASSERT(pp->p_vnode != &kvp); 9067c478bd9Sstevel@tonic-gate 9077c478bd9Sstevel@tonic-gate again: 9087c478bd9Sstevel@tonic-gate if (pszc == 0) { 9097c478bd9Sstevel@tonic-gate VM_STAT_ADD(pszclck_stat[0]); 9107c478bd9Sstevel@tonic-gate return (NULL); 9117c478bd9Sstevel@tonic-gate } 9127c478bd9Sstevel@tonic-gate 9137c478bd9Sstevel@tonic-gate /* The lock lives in the root page */ 9147c478bd9Sstevel@tonic-gate 9157c478bd9Sstevel@tonic-gate rootpp = PP_GROUPLEADER(pp, pszc); 9167c478bd9Sstevel@tonic-gate mtx = PAGE_SZC_MUTEX(rootpp); 9177c478bd9Sstevel@tonic-gate mutex_enter(mtx); 9187c478bd9Sstevel@tonic-gate 9197c478bd9Sstevel@tonic-gate /* 9207c478bd9Sstevel@tonic-gate * since p_szc can only decrease if pp == rootpp 9217c478bd9Sstevel@tonic-gate * rootpp will be always the same i.e we have the right root 9227c478bd9Sstevel@tonic-gate * regardless of rootpp->p_szc. 9237c478bd9Sstevel@tonic-gate * If location of pp's root didn't change after we took 9247c478bd9Sstevel@tonic-gate * the lock we have the right root. return mutex hashed off it. 9257c478bd9Sstevel@tonic-gate */ 9267c478bd9Sstevel@tonic-gate if (pp == rootpp || (rszc = rootpp->p_szc) == pszc) { 9277c478bd9Sstevel@tonic-gate VM_STAT_ADD(pszclck_stat[1]); 9287c478bd9Sstevel@tonic-gate return (mtx); 9297c478bd9Sstevel@tonic-gate } 9307c478bd9Sstevel@tonic-gate 9317c478bd9Sstevel@tonic-gate /* 9327c478bd9Sstevel@tonic-gate * root location changed because page got demoted. 9337c478bd9Sstevel@tonic-gate * locate the new root. 9347c478bd9Sstevel@tonic-gate */ 9357c478bd9Sstevel@tonic-gate if (rszc < pszc) { 9367c478bd9Sstevel@tonic-gate szc = pp->p_szc; 9377c478bd9Sstevel@tonic-gate ASSERT(szc < pszc); 9387c478bd9Sstevel@tonic-gate mutex_exit(mtx); 9397c478bd9Sstevel@tonic-gate pszc = szc; 9407c478bd9Sstevel@tonic-gate VM_STAT_ADD(pszclck_stat[2]); 9417c478bd9Sstevel@tonic-gate goto again; 9427c478bd9Sstevel@tonic-gate } 9437c478bd9Sstevel@tonic-gate 9447c478bd9Sstevel@tonic-gate VM_STAT_ADD(pszclck_stat[3]); 9457c478bd9Sstevel@tonic-gate /* 9467c478bd9Sstevel@tonic-gate * current hat_page_demote not done yet. 9477c478bd9Sstevel@tonic-gate * wait for it to finish. 9487c478bd9Sstevel@tonic-gate */ 9497c478bd9Sstevel@tonic-gate mutex_exit(mtx); 9507c478bd9Sstevel@tonic-gate rootpp = PP_GROUPLEADER(rootpp, rszc); 9517c478bd9Sstevel@tonic-gate mtx = PAGE_SZC_MUTEX(rootpp); 9527c478bd9Sstevel@tonic-gate mutex_enter(mtx); 9537c478bd9Sstevel@tonic-gate mutex_exit(mtx); 9547c478bd9Sstevel@tonic-gate ASSERT(rootpp->p_szc < rszc); 9557c478bd9Sstevel@tonic-gate goto again; 9567c478bd9Sstevel@tonic-gate } 9577c478bd9Sstevel@tonic-gate 9587c478bd9Sstevel@tonic-gate int 9597c478bd9Sstevel@tonic-gate page_szc_lock_assert(page_t *pp) 9607c478bd9Sstevel@tonic-gate { 9617c478bd9Sstevel@tonic-gate page_t *rootpp = PP_PAGEROOT(pp); 9627c478bd9Sstevel@tonic-gate kmutex_t *mtx = PAGE_SZC_MUTEX(rootpp); 9637c478bd9Sstevel@tonic-gate 9647c478bd9Sstevel@tonic-gate return (MUTEX_HELD(mtx)); 9657c478bd9Sstevel@tonic-gate } 966