xref: /illumos-gate/usr/src/uts/common/vm/page_lock.c (revision ac52b00e)
17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate  * CDDL HEADER START
37c478bd9Sstevel@tonic-gate  *
47c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
58bc68872Selowe  * Common Development and Distribution License (the "License").
68bc68872Selowe  * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate  *
87c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate  * and limitations under the License.
127c478bd9Sstevel@tonic-gate  *
137c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate  *
197c478bd9Sstevel@tonic-gate  * CDDL HEADER END
207c478bd9Sstevel@tonic-gate  */
217c478bd9Sstevel@tonic-gate /*
22ae115bc7Smrj  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
237c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
247c478bd9Sstevel@tonic-gate  */
257c478bd9Sstevel@tonic-gate 
267c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
277c478bd9Sstevel@tonic-gate 
287c478bd9Sstevel@tonic-gate /*
297c478bd9Sstevel@tonic-gate  * VM - page locking primitives
307c478bd9Sstevel@tonic-gate  */
317c478bd9Sstevel@tonic-gate #include <sys/param.h>
327c478bd9Sstevel@tonic-gate #include <sys/t_lock.h>
337c478bd9Sstevel@tonic-gate #include <sys/vtrace.h>
347c478bd9Sstevel@tonic-gate #include <sys/debug.h>
357c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
367c478bd9Sstevel@tonic-gate #include <sys/vnode.h>
377c478bd9Sstevel@tonic-gate #include <sys/bitmap.h>
387c478bd9Sstevel@tonic-gate #include <sys/lockstat.h>
397c478bd9Sstevel@tonic-gate #include <sys/condvar_impl.h>
407c478bd9Sstevel@tonic-gate #include <vm/page.h>
417c478bd9Sstevel@tonic-gate #include <vm/seg_enum.h>
427c478bd9Sstevel@tonic-gate #include <vm/vm_dep.h>
437c478bd9Sstevel@tonic-gate 
447c478bd9Sstevel@tonic-gate /*
457c478bd9Sstevel@tonic-gate  * This global mutex is for logical page locking.
467c478bd9Sstevel@tonic-gate  * The following fields in the page structure are protected
477c478bd9Sstevel@tonic-gate  * by this lock:
487c478bd9Sstevel@tonic-gate  *
497c478bd9Sstevel@tonic-gate  *	p_lckcnt
507c478bd9Sstevel@tonic-gate  *	p_cowcnt
517c478bd9Sstevel@tonic-gate  */
527c478bd9Sstevel@tonic-gate kmutex_t page_llock;
537c478bd9Sstevel@tonic-gate 
547c478bd9Sstevel@tonic-gate /*
557c478bd9Sstevel@tonic-gate  * This is a global lock for the logical page free list.  The
567c478bd9Sstevel@tonic-gate  * logical free list, in this implementation, is maintained as two
577c478bd9Sstevel@tonic-gate  * separate physical lists - the cache list and the free list.
587c478bd9Sstevel@tonic-gate  */
597c478bd9Sstevel@tonic-gate kmutex_t  page_freelock;
607c478bd9Sstevel@tonic-gate 
617c478bd9Sstevel@tonic-gate /*
627c478bd9Sstevel@tonic-gate  * The hash table, page_hash[], the p_selock fields, and the
637c478bd9Sstevel@tonic-gate  * list of pages associated with vnodes are protected by arrays of mutexes.
647c478bd9Sstevel@tonic-gate  *
657c478bd9Sstevel@tonic-gate  * Unless the hashes are changed radically, the table sizes must be
667c478bd9Sstevel@tonic-gate  * a power of two.  Also, we typically need more mutexes for the
677c478bd9Sstevel@tonic-gate  * vnodes since these locks are occasionally held for long periods.
687c478bd9Sstevel@tonic-gate  * And since there seem to be two special vnodes (kvp and swapvp),
697c478bd9Sstevel@tonic-gate  * we make room for private mutexes for them.
707c478bd9Sstevel@tonic-gate  *
717c478bd9Sstevel@tonic-gate  * The pse_mutex[] array holds the mutexes to protect the p_selock
727c478bd9Sstevel@tonic-gate  * fields of all page_t structures.
737c478bd9Sstevel@tonic-gate  *
747c478bd9Sstevel@tonic-gate  * PAGE_SE_MUTEX(pp) returns the address of the appropriate mutex
757c478bd9Sstevel@tonic-gate  * when given a pointer to a page_t.
767c478bd9Sstevel@tonic-gate  *
777c478bd9Sstevel@tonic-gate  * PSE_TABLE_SIZE must be a power of two.  One could argue that we
787c478bd9Sstevel@tonic-gate  * should go to the trouble of setting it up at run time and base it
797c478bd9Sstevel@tonic-gate  * on memory size rather than the number of compile time CPUs.
807c478bd9Sstevel@tonic-gate  *
817c478bd9Sstevel@tonic-gate  * XX64	We should be using physmem size to calculate PSE_TABLE_SIZE,
827c478bd9Sstevel@tonic-gate  *	PSE_SHIFT, PIO_SHIFT.
837c478bd9Sstevel@tonic-gate  *
847c478bd9Sstevel@tonic-gate  *	These might break in 64 bit world.
857c478bd9Sstevel@tonic-gate  */
867c478bd9Sstevel@tonic-gate #define	PSE_SHIFT	7		/* log2(PSE_TABLE_SIZE) */
877c478bd9Sstevel@tonic-gate 
887c478bd9Sstevel@tonic-gate #define	PSE_TABLE_SIZE	128		/* number of mutexes to have */
897c478bd9Sstevel@tonic-gate 
907c478bd9Sstevel@tonic-gate #define	PIO_SHIFT	PSE_SHIFT	/* next power of 2 bigger than page_t */
917c478bd9Sstevel@tonic-gate #define	PIO_TABLE_SIZE	PSE_TABLE_SIZE	/* number of io mutexes to have */
927c478bd9Sstevel@tonic-gate 
937c478bd9Sstevel@tonic-gate pad_mutex_t	ph_mutex[PH_TABLE_SIZE];
947c478bd9Sstevel@tonic-gate pad_mutex_t	pse_mutex[PSE_TABLE_SIZE];
957c478bd9Sstevel@tonic-gate kmutex_t	pio_mutex[PIO_TABLE_SIZE];
967c478bd9Sstevel@tonic-gate 
977c478bd9Sstevel@tonic-gate #define	PAGE_SE_MUTEX(pp) \
987c478bd9Sstevel@tonic-gate 	    &pse_mutex[((((uintptr_t)(pp) >> PSE_SHIFT) ^ \
997c478bd9Sstevel@tonic-gate 		((uintptr_t)(pp) >> (PSE_SHIFT << 1))) & \
1007c478bd9Sstevel@tonic-gate 		(PSE_TABLE_SIZE - 1))].pad_mutex
1017c478bd9Sstevel@tonic-gate 
1027c478bd9Sstevel@tonic-gate #define	PAGE_IO_MUTEX(pp) \
1037c478bd9Sstevel@tonic-gate 	    &pio_mutex[(((uintptr_t)pp) >> PIO_SHIFT) & (PIO_TABLE_SIZE - 1)]
1047c478bd9Sstevel@tonic-gate 
1057c478bd9Sstevel@tonic-gate #define	PSZC_MTX_TABLE_SIZE	128
1067c478bd9Sstevel@tonic-gate #define	PSZC_MTX_TABLE_SHIFT	7
1077c478bd9Sstevel@tonic-gate 
1087c478bd9Sstevel@tonic-gate static pad_mutex_t	pszc_mutex[PSZC_MTX_TABLE_SIZE];
1097c478bd9Sstevel@tonic-gate 
1107c478bd9Sstevel@tonic-gate #define	PAGE_SZC_MUTEX(_pp) \
1117c478bd9Sstevel@tonic-gate 	    &pszc_mutex[((((uintptr_t)(_pp) >> PSZC_MTX_TABLE_SHIFT) ^ \
1127c478bd9Sstevel@tonic-gate 		((uintptr_t)(_pp) >> (PSZC_MTX_TABLE_SHIFT << 1)) ^ \
1137c478bd9Sstevel@tonic-gate 		((uintptr_t)(_pp) >> (3 * PSZC_MTX_TABLE_SHIFT))) & \
1147c478bd9Sstevel@tonic-gate 		(PSZC_MTX_TABLE_SIZE - 1))].pad_mutex
1157c478bd9Sstevel@tonic-gate 
1167c478bd9Sstevel@tonic-gate /*
1177c478bd9Sstevel@tonic-gate  * The vph_mutex[] array  holds the mutexes to protect the vnode chains,
1187c478bd9Sstevel@tonic-gate  * (i.e., the list of pages anchored by v_pages and connected via p_vpprev
1197c478bd9Sstevel@tonic-gate  * and p_vpnext).
1207c478bd9Sstevel@tonic-gate  *
1217c478bd9Sstevel@tonic-gate  * The page_vnode_mutex(vp) function returns the address of the appropriate
1227c478bd9Sstevel@tonic-gate  * mutex from this array given a pointer to a vnode.  It is complicated
1237c478bd9Sstevel@tonic-gate  * by the fact that the kernel's vnode and the swapfs vnode are referenced
1247c478bd9Sstevel@tonic-gate  * frequently enough to warrent their own mutexes.
1257c478bd9Sstevel@tonic-gate  *
1267c478bd9Sstevel@tonic-gate  * The VP_HASH_FUNC returns the index into the vph_mutex array given
1277c478bd9Sstevel@tonic-gate  * an address of a vnode.
1287c478bd9Sstevel@tonic-gate  */
1297c478bd9Sstevel@tonic-gate 
1307c478bd9Sstevel@tonic-gate /*
1317c478bd9Sstevel@tonic-gate  * XX64	VPH_TABLE_SIZE and VP_HASH_FUNC might break in 64 bit world.
1327c478bd9Sstevel@tonic-gate  *	Need to review again.
1337c478bd9Sstevel@tonic-gate  */
134*ac52b00eSqiao #if defined(_LP64)
135*ac52b00eSqiao #define	VPH_TABLE_SIZE  (1 << (VP_SHIFT + 3))
136*ac52b00eSqiao #else	/* 32 bits */
1377c478bd9Sstevel@tonic-gate #define	VPH_TABLE_SIZE	(2 << VP_SHIFT)
138*ac52b00eSqiao #endif
1397c478bd9Sstevel@tonic-gate 
1407c478bd9Sstevel@tonic-gate #define	VP_HASH_FUNC(vp) \
1417c478bd9Sstevel@tonic-gate 	((((uintptr_t)(vp) >> 6) + \
1427c478bd9Sstevel@tonic-gate 	    ((uintptr_t)(vp) >> 8) + \
1437c478bd9Sstevel@tonic-gate 	    ((uintptr_t)(vp) >> 10) + \
1447c478bd9Sstevel@tonic-gate 	    ((uintptr_t)(vp) >> 12)) \
1457c478bd9Sstevel@tonic-gate 	    & (VPH_TABLE_SIZE - 1))
1467c478bd9Sstevel@tonic-gate 
1477c478bd9Sstevel@tonic-gate extern	struct vnode	kvp;
1487c478bd9Sstevel@tonic-gate 
149ad23a2dbSjohansen /*
150ad23a2dbSjohansen  * Two slots after VPH_TABLE_SIZE are reserved in vph_mutex for kernel vnodes.
151ad23a2dbSjohansen  * The lock for kvp is VPH_TABLE_SIZE + 0, and the lock for zvp is
152ad23a2dbSjohansen  * VPH_TABLE_SIZE + 1.
153ad23a2dbSjohansen  */
154ad23a2dbSjohansen 
1557c478bd9Sstevel@tonic-gate kmutex_t	vph_mutex[VPH_TABLE_SIZE + 2];
1567c478bd9Sstevel@tonic-gate 
1577c478bd9Sstevel@tonic-gate /*
1587c478bd9Sstevel@tonic-gate  * Initialize the locks used by the Virtual Memory Management system.
1597c478bd9Sstevel@tonic-gate  */
1607c478bd9Sstevel@tonic-gate void
1617c478bd9Sstevel@tonic-gate page_lock_init()
1627c478bd9Sstevel@tonic-gate {
1637c478bd9Sstevel@tonic-gate }
1647c478bd9Sstevel@tonic-gate 
1657c478bd9Sstevel@tonic-gate /*
1667c478bd9Sstevel@tonic-gate  * At present we only use page ownership to aid debugging, so it's
1677c478bd9Sstevel@tonic-gate  * OK if the owner field isn't exact.  In the 32-bit world two thread ids
1687c478bd9Sstevel@tonic-gate  * can map to the same owner because we just 'or' in 0x80000000 and
1697c478bd9Sstevel@tonic-gate  * then clear the second highest bit, so that (for example) 0x2faced00
1707c478bd9Sstevel@tonic-gate  * and 0xafaced00 both map to 0xafaced00.
1717c478bd9Sstevel@tonic-gate  * In the 64-bit world, p_selock may not be large enough to hold a full
1727c478bd9Sstevel@tonic-gate  * thread pointer.  If we ever need precise ownership (e.g. if we implement
1737c478bd9Sstevel@tonic-gate  * priority inheritance for page locks) then p_selock should become a
1747c478bd9Sstevel@tonic-gate  * uintptr_t and SE_WRITER should be -((uintptr_t)curthread >> 2).
1757c478bd9Sstevel@tonic-gate  */
1767c478bd9Sstevel@tonic-gate #define	SE_WRITER	(((selock_t)(ulong_t)curthread | INT_MIN) & ~SE_EWANTED)
1777c478bd9Sstevel@tonic-gate #define	SE_READER	1
1787c478bd9Sstevel@tonic-gate 
1797c478bd9Sstevel@tonic-gate /*
1807c478bd9Sstevel@tonic-gate  * A page that is deleted must be marked as such using the
1817c478bd9Sstevel@tonic-gate  * page_lock_delete() function. The page must be exclusively locked.
1827c478bd9Sstevel@tonic-gate  * The SE_DELETED marker is put in p_selock when this function is called.
1837c478bd9Sstevel@tonic-gate  * SE_DELETED must be distinct from any SE_WRITER value.
1847c478bd9Sstevel@tonic-gate  */
1857c478bd9Sstevel@tonic-gate #define	SE_DELETED	(1 | INT_MIN)
1867c478bd9Sstevel@tonic-gate 
1877c478bd9Sstevel@tonic-gate #ifdef VM_STATS
1887c478bd9Sstevel@tonic-gate uint_t	vph_kvp_count;
1897c478bd9Sstevel@tonic-gate uint_t	vph_swapfsvp_count;
1907c478bd9Sstevel@tonic-gate uint_t	vph_other;
1917c478bd9Sstevel@tonic-gate #endif /* VM_STATS */
1927c478bd9Sstevel@tonic-gate 
1937c478bd9Sstevel@tonic-gate #ifdef VM_STATS
1947c478bd9Sstevel@tonic-gate uint_t	page_lock_count;
1957c478bd9Sstevel@tonic-gate uint_t	page_lock_miss;
1967c478bd9Sstevel@tonic-gate uint_t	page_lock_miss_lock;
1977c478bd9Sstevel@tonic-gate uint_t	page_lock_reclaim;
1987c478bd9Sstevel@tonic-gate uint_t	page_lock_bad_reclaim;
1997c478bd9Sstevel@tonic-gate uint_t	page_lock_same_page;
2007c478bd9Sstevel@tonic-gate uint_t	page_lock_upgrade;
201db874c57Selowe uint_t	page_lock_retired;
2027c478bd9Sstevel@tonic-gate uint_t	page_lock_upgrade_failed;
2037c478bd9Sstevel@tonic-gate uint_t	page_lock_deleted;
2047c478bd9Sstevel@tonic-gate 
2057c478bd9Sstevel@tonic-gate uint_t	page_trylock_locked;
206db874c57Selowe uint_t	page_trylock_failed;
2077c478bd9Sstevel@tonic-gate uint_t	page_trylock_missed;
2087c478bd9Sstevel@tonic-gate 
2097c478bd9Sstevel@tonic-gate uint_t	page_try_reclaim_upgrade;
2107c478bd9Sstevel@tonic-gate #endif /* VM_STATS */
2117c478bd9Sstevel@tonic-gate 
2127c478bd9Sstevel@tonic-gate /*
2137c478bd9Sstevel@tonic-gate  * Acquire the "shared/exclusive" lock on a page.
2147c478bd9Sstevel@tonic-gate  *
2157c478bd9Sstevel@tonic-gate  * Returns 1 on success and locks the page appropriately.
2167c478bd9Sstevel@tonic-gate  *	   0 on failure and does not lock the page.
2177c478bd9Sstevel@tonic-gate  *
2187c478bd9Sstevel@tonic-gate  * If `lock' is non-NULL, it will be dropped and reacquired in the
2197c478bd9Sstevel@tonic-gate  * failure case.  This routine can block, and if it does
2207c478bd9Sstevel@tonic-gate  * it will always return a failure since the page identity [vp, off]
2217c478bd9Sstevel@tonic-gate  * or state may have changed.
2227c478bd9Sstevel@tonic-gate  */
2237c478bd9Sstevel@tonic-gate 
2247c478bd9Sstevel@tonic-gate int
2257c478bd9Sstevel@tonic-gate page_lock(page_t *pp, se_t se, kmutex_t *lock, reclaim_t reclaim)
2267c478bd9Sstevel@tonic-gate {
2277c478bd9Sstevel@tonic-gate 	return (page_lock_es(pp, se, lock, reclaim, 0));
2287c478bd9Sstevel@tonic-gate }
2297c478bd9Sstevel@tonic-gate 
2307c478bd9Sstevel@tonic-gate /*
2317c478bd9Sstevel@tonic-gate  * With the addition of reader-writer lock semantics to page_lock_es,
2327c478bd9Sstevel@tonic-gate  * callers wanting an exclusive (writer) lock may prevent shared-lock
2337c478bd9Sstevel@tonic-gate  * (reader) starvation by setting the es parameter to SE_EXCL_WANTED.
2347c478bd9Sstevel@tonic-gate  * In this case, when an exclusive lock cannot be acquired, p_selock's
235db874c57Selowe  * SE_EWANTED bit is set. Shared-lock (reader) requests are also denied
236db874c57Selowe  * if the page is slated for retirement.
237db874c57Selowe  *
238db874c57Selowe  * The se and es parameters determine if the lock should be granted
239db874c57Selowe  * based on the following decision table:
240db874c57Selowe  *
241db874c57Selowe  * Lock wanted   es flags     p_selock/SE_EWANTED  Action
242db874c57Selowe  * ----------- -------------- -------------------  ---------
243db874c57Selowe  * SE_EXCL        any [1][2]   unlocked/any        grant lock, clear SE_EWANTED
244db874c57Selowe  * SE_EXCL        SE_EWANTED   any lock/any        deny, set SE_EWANTED
245db874c57Selowe  * SE_EXCL        none         any lock/any        deny
2468bc68872Selowe  * SE_SHARED      n/a [2]        shared/0          grant
2478bc68872Selowe  * SE_SHARED      n/a [2]      unlocked/0          grant
248db874c57Selowe  * SE_SHARED      n/a            shared/1          deny
249db874c57Selowe  * SE_SHARED      n/a          unlocked/1          deny
250db874c57Selowe  * SE_SHARED      n/a              excl/any        deny
251db874c57Selowe  *
252db874c57Selowe  * Notes:
253db874c57Selowe  * [1] The code grants an exclusive lock to the caller and clears the bit
254db874c57Selowe  *   SE_EWANTED whenever p_selock is unlocked, regardless of the SE_EWANTED
255db874c57Selowe  *   bit's value.  This was deemed acceptable as we are not concerned about
256db874c57Selowe  *   exclusive-lock starvation. If this ever becomes an issue, a priority or
257db874c57Selowe  *   fifo mechanism should also be implemented. Meantime, the thread that
258db874c57Selowe  *   set SE_EWANTED should be prepared to catch this condition and reset it
259db874c57Selowe  *
260db874c57Selowe  * [2] Retired pages may not be locked at any time, regardless of the
261db874c57Selowe  *   dispostion of se, unless the es parameter has SE_RETIRED flag set.
2627c478bd9Sstevel@tonic-gate  *
263db874c57Selowe  * Notes on values of "es":
264db874c57Selowe  *
265db874c57Selowe  *   es & 1: page_lookup_create will attempt page relocation
266db874c57Selowe  *   es & SE_EXCL_WANTED: caller wants SE_EWANTED set (eg. delete
267db874c57Selowe  *       memory thread); this prevents reader-starvation of waiting
268db874c57Selowe  *       writer thread(s) by giving priority to writers over readers.
269db874c57Selowe  *   es & SE_RETIRED: caller wants to lock pages even if they are
270db874c57Selowe  *       retired.  Default is to deny the lock if the page is retired.
271db874c57Selowe  *
272db874c57Selowe  * And yes, we know, the semantics of this function are too complicated.
273db874c57Selowe  * It's on the list to be cleaned up.
2747c478bd9Sstevel@tonic-gate  */
2757c478bd9Sstevel@tonic-gate int
2767c478bd9Sstevel@tonic-gate page_lock_es(page_t *pp, se_t se, kmutex_t *lock, reclaim_t reclaim, int es)
2777c478bd9Sstevel@tonic-gate {
2787c478bd9Sstevel@tonic-gate 	int		retval;
2797c478bd9Sstevel@tonic-gate 	kmutex_t	*pse = PAGE_SE_MUTEX(pp);
2807c478bd9Sstevel@tonic-gate 	int		upgraded;
2817c478bd9Sstevel@tonic-gate 	int		reclaim_it;
2827c478bd9Sstevel@tonic-gate 
2837c478bd9Sstevel@tonic-gate 	ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1);
2847c478bd9Sstevel@tonic-gate 
2857c478bd9Sstevel@tonic-gate 	VM_STAT_ADD(page_lock_count);
2867c478bd9Sstevel@tonic-gate 
2877c478bd9Sstevel@tonic-gate 	upgraded = 0;
2887c478bd9Sstevel@tonic-gate 	reclaim_it = 0;
2897c478bd9Sstevel@tonic-gate 
2907c478bd9Sstevel@tonic-gate 	mutex_enter(pse);
2917c478bd9Sstevel@tonic-gate 
2927c478bd9Sstevel@tonic-gate 	ASSERT(((es & SE_EXCL_WANTED) == 0) ||
293db874c57Selowe 	    ((es & SE_EXCL_WANTED) && (se == SE_EXCL)));
294db874c57Selowe 
295db874c57Selowe 	if (PP_RETIRED(pp) && !(es & SE_RETIRED)) {
296db874c57Selowe 		mutex_exit(pse);
297db874c57Selowe 		VM_STAT_ADD(page_lock_retired);
298db874c57Selowe 		return (0);
299db874c57Selowe 	}
3007c478bd9Sstevel@tonic-gate 
3017c478bd9Sstevel@tonic-gate 	if (se == SE_SHARED && es == 1 && pp->p_selock == 0) {
3027c478bd9Sstevel@tonic-gate 		se = SE_EXCL;
3037c478bd9Sstevel@tonic-gate 	}
3047c478bd9Sstevel@tonic-gate 
3057c478bd9Sstevel@tonic-gate 	if ((reclaim == P_RECLAIM) && (PP_ISFREE(pp))) {
3067c478bd9Sstevel@tonic-gate 
3077c478bd9Sstevel@tonic-gate 		reclaim_it = 1;
3087c478bd9Sstevel@tonic-gate 		if (se == SE_SHARED) {
3097c478bd9Sstevel@tonic-gate 			/*
3107c478bd9Sstevel@tonic-gate 			 * This is an interesting situation.
3117c478bd9Sstevel@tonic-gate 			 *
3127c478bd9Sstevel@tonic-gate 			 * Remember that p_free can only change if
3137c478bd9Sstevel@tonic-gate 			 * p_selock < 0.
3147c478bd9Sstevel@tonic-gate 			 * p_free does not depend on our holding `pse'.
3157c478bd9Sstevel@tonic-gate 			 * And, since we hold `pse', p_selock can not change.
3167c478bd9Sstevel@tonic-gate 			 * So, if p_free changes on us, the page is already
3177c478bd9Sstevel@tonic-gate 			 * exclusively held, and we would fail to get p_selock
3187c478bd9Sstevel@tonic-gate 			 * regardless.
3197c478bd9Sstevel@tonic-gate 			 *
3207c478bd9Sstevel@tonic-gate 			 * We want to avoid getting the share
3217c478bd9Sstevel@tonic-gate 			 * lock on a free page that needs to be reclaimed.
3227c478bd9Sstevel@tonic-gate 			 * It is possible that some other thread has the share
3237c478bd9Sstevel@tonic-gate 			 * lock and has left the free page on the cache list.
3247c478bd9Sstevel@tonic-gate 			 * pvn_vplist_dirty() does this for brief periods.
3257c478bd9Sstevel@tonic-gate 			 * If the se_share is currently SE_EXCL, we will fail
3267c478bd9Sstevel@tonic-gate 			 * to acquire p_selock anyway.  Blocking is the
3277c478bd9Sstevel@tonic-gate 			 * right thing to do.
3287c478bd9Sstevel@tonic-gate 			 * If we need to reclaim this page, we must get
3297c478bd9Sstevel@tonic-gate 			 * exclusive access to it, force the upgrade now.
3307c478bd9Sstevel@tonic-gate 			 * Again, we will fail to acquire p_selock if the
3317c478bd9Sstevel@tonic-gate 			 * page is not free and block.
3327c478bd9Sstevel@tonic-gate 			 */
3337c478bd9Sstevel@tonic-gate 			upgraded = 1;
3347c478bd9Sstevel@tonic-gate 			se = SE_EXCL;
3357c478bd9Sstevel@tonic-gate 			VM_STAT_ADD(page_lock_upgrade);
3367c478bd9Sstevel@tonic-gate 		}
3377c478bd9Sstevel@tonic-gate 	}
3387c478bd9Sstevel@tonic-gate 
3397c478bd9Sstevel@tonic-gate 	if (se == SE_EXCL) {
340db874c57Selowe 		if (!(es & SE_EXCL_WANTED) && (pp->p_selock & SE_EWANTED)) {
3417c478bd9Sstevel@tonic-gate 			/*
3427c478bd9Sstevel@tonic-gate 			 * if the caller wants a writer lock (but did not
3437c478bd9Sstevel@tonic-gate 			 * specify exclusive access), and there is a pending
3447c478bd9Sstevel@tonic-gate 			 * writer that wants exclusive access, return failure
3457c478bd9Sstevel@tonic-gate 			 */
3467c478bd9Sstevel@tonic-gate 			retval = 0;
3477c478bd9Sstevel@tonic-gate 		} else if ((pp->p_selock & ~SE_EWANTED) == 0) {
3487c478bd9Sstevel@tonic-gate 			/* no reader/writer lock held */
3497c478bd9Sstevel@tonic-gate 			THREAD_KPRI_REQUEST();
3507c478bd9Sstevel@tonic-gate 			/* this clears our setting of the SE_EWANTED bit */
3517c478bd9Sstevel@tonic-gate 			pp->p_selock = SE_WRITER;
3527c478bd9Sstevel@tonic-gate 			retval = 1;
3537c478bd9Sstevel@tonic-gate 		} else {
3547c478bd9Sstevel@tonic-gate 			/* page is locked */
355db874c57Selowe 			if (es & SE_EXCL_WANTED) {
3567c478bd9Sstevel@tonic-gate 				/* set the SE_EWANTED bit */
3577c478bd9Sstevel@tonic-gate 				pp->p_selock |= SE_EWANTED;
3587c478bd9Sstevel@tonic-gate 			}
3597c478bd9Sstevel@tonic-gate 			retval = 0;
3607c478bd9Sstevel@tonic-gate 		}
3617c478bd9Sstevel@tonic-gate 	} else {
3627c478bd9Sstevel@tonic-gate 		retval = 0;
3637c478bd9Sstevel@tonic-gate 		if (pp->p_selock >= 0) {
364db874c57Selowe 			if ((pp->p_selock & SE_EWANTED) == 0) {
3658bc68872Selowe 				pp->p_selock += SE_READER;
3668bc68872Selowe 				retval = 1;
3677c478bd9Sstevel@tonic-gate 			}
3687c478bd9Sstevel@tonic-gate 		}
3697c478bd9Sstevel@tonic-gate 	}
3707c478bd9Sstevel@tonic-gate 
3717c478bd9Sstevel@tonic-gate 	if (retval == 0) {
3727c478bd9Sstevel@tonic-gate 		if ((pp->p_selock & ~SE_EWANTED) == SE_DELETED) {
3737c478bd9Sstevel@tonic-gate 			VM_STAT_ADD(page_lock_deleted);
3747c478bd9Sstevel@tonic-gate 			mutex_exit(pse);
3757c478bd9Sstevel@tonic-gate 			return (retval);
3767c478bd9Sstevel@tonic-gate 		}
3777c478bd9Sstevel@tonic-gate 
3787c478bd9Sstevel@tonic-gate #ifdef VM_STATS
3797c478bd9Sstevel@tonic-gate 		VM_STAT_ADD(page_lock_miss);
3807c478bd9Sstevel@tonic-gate 		if (upgraded) {
3817c478bd9Sstevel@tonic-gate 			VM_STAT_ADD(page_lock_upgrade_failed);
3827c478bd9Sstevel@tonic-gate 		}
3837c478bd9Sstevel@tonic-gate #endif
3847c478bd9Sstevel@tonic-gate 		if (lock) {
3857c478bd9Sstevel@tonic-gate 			VM_STAT_ADD(page_lock_miss_lock);
3867c478bd9Sstevel@tonic-gate 			mutex_exit(lock);
3877c478bd9Sstevel@tonic-gate 		}
3887c478bd9Sstevel@tonic-gate 
3897c478bd9Sstevel@tonic-gate 		/*
3907c478bd9Sstevel@tonic-gate 		 * Now, wait for the page to be unlocked and
3917c478bd9Sstevel@tonic-gate 		 * release the lock protecting p_cv and p_selock.
3927c478bd9Sstevel@tonic-gate 		 */
3937c478bd9Sstevel@tonic-gate 		cv_wait(&pp->p_cv, pse);
3947c478bd9Sstevel@tonic-gate 		mutex_exit(pse);
3957c478bd9Sstevel@tonic-gate 
3967c478bd9Sstevel@tonic-gate 		/*
3977c478bd9Sstevel@tonic-gate 		 * The page identity may have changed while we were
3987c478bd9Sstevel@tonic-gate 		 * blocked.  If we are willing to depend on "pp"
3997c478bd9Sstevel@tonic-gate 		 * still pointing to a valid page structure (i.e.,
4007c478bd9Sstevel@tonic-gate 		 * assuming page structures are not dynamically allocated
4017c478bd9Sstevel@tonic-gate 		 * or freed), we could try to lock the page if its
4027c478bd9Sstevel@tonic-gate 		 * identity hasn't changed.
4037c478bd9Sstevel@tonic-gate 		 *
4047c478bd9Sstevel@tonic-gate 		 * This needs to be measured, since we come back from
4057c478bd9Sstevel@tonic-gate 		 * cv_wait holding pse (the expensive part of this
4067c478bd9Sstevel@tonic-gate 		 * operation) we might as well try the cheap part.
4077c478bd9Sstevel@tonic-gate 		 * Though we would also have to confirm that dropping
4087c478bd9Sstevel@tonic-gate 		 * `lock' did not cause any grief to the callers.
4097c478bd9Sstevel@tonic-gate 		 */
4107c478bd9Sstevel@tonic-gate 		if (lock) {
4117c478bd9Sstevel@tonic-gate 			mutex_enter(lock);
4127c478bd9Sstevel@tonic-gate 		}
4137c478bd9Sstevel@tonic-gate 	} else {
4147c478bd9Sstevel@tonic-gate 		/*
4157c478bd9Sstevel@tonic-gate 		 * We have the page lock.
4167c478bd9Sstevel@tonic-gate 		 * If we needed to reclaim the page, and the page
4177c478bd9Sstevel@tonic-gate 		 * needed reclaiming (ie, it was free), then we
4187c478bd9Sstevel@tonic-gate 		 * have the page exclusively locked.  We may need
4197c478bd9Sstevel@tonic-gate 		 * to downgrade the page.
4207c478bd9Sstevel@tonic-gate 		 */
4217c478bd9Sstevel@tonic-gate 		ASSERT((upgraded) ?
4227c478bd9Sstevel@tonic-gate 		    ((PP_ISFREE(pp)) && PAGE_EXCL(pp)) : 1);
4237c478bd9Sstevel@tonic-gate 		mutex_exit(pse);
4247c478bd9Sstevel@tonic-gate 
4257c478bd9Sstevel@tonic-gate 		/*
4267c478bd9Sstevel@tonic-gate 		 * We now hold this page's lock, either shared or
4277c478bd9Sstevel@tonic-gate 		 * exclusive.  This will prevent its identity from changing.
4287c478bd9Sstevel@tonic-gate 		 * The page, however, may or may not be free.  If the caller
4297c478bd9Sstevel@tonic-gate 		 * requested, and it is free, go reclaim it from the
4307c478bd9Sstevel@tonic-gate 		 * free list.  If the page can't be reclaimed, return failure
4317c478bd9Sstevel@tonic-gate 		 * so that the caller can start all over again.
4327c478bd9Sstevel@tonic-gate 		 *
4337c478bd9Sstevel@tonic-gate 		 * NOTE:page_reclaim() releases the page lock (p_selock)
4347c478bd9Sstevel@tonic-gate 		 *	if it can't be reclaimed.
4357c478bd9Sstevel@tonic-gate 		 */
4367c478bd9Sstevel@tonic-gate 		if (reclaim_it) {
4377c478bd9Sstevel@tonic-gate 			if (!page_reclaim(pp, lock)) {
4387c478bd9Sstevel@tonic-gate 				VM_STAT_ADD(page_lock_bad_reclaim);
4397c478bd9Sstevel@tonic-gate 				retval = 0;
4407c478bd9Sstevel@tonic-gate 			} else {
4417c478bd9Sstevel@tonic-gate 				VM_STAT_ADD(page_lock_reclaim);
4427c478bd9Sstevel@tonic-gate 				if (upgraded) {
4437c478bd9Sstevel@tonic-gate 					page_downgrade(pp);
4447c478bd9Sstevel@tonic-gate 				}
4457c478bd9Sstevel@tonic-gate 			}
4467c478bd9Sstevel@tonic-gate 		}
4477c478bd9Sstevel@tonic-gate 	}
4487c478bd9Sstevel@tonic-gate 	return (retval);
4497c478bd9Sstevel@tonic-gate }
4507c478bd9Sstevel@tonic-gate 
4517c478bd9Sstevel@tonic-gate /*
4527c478bd9Sstevel@tonic-gate  * Clear the SE_EWANTED bit from p_selock.  This function allows
4537c478bd9Sstevel@tonic-gate  * callers of page_lock_es and page_try_reclaim_lock to clear
4547c478bd9Sstevel@tonic-gate  * their setting of this bit if they decide they no longer wish
4557c478bd9Sstevel@tonic-gate  * to gain exclusive access to the page.  Currently only
4567c478bd9Sstevel@tonic-gate  * delete_memory_thread uses this when the delete memory
4577c478bd9Sstevel@tonic-gate  * operation is cancelled.
4587c478bd9Sstevel@tonic-gate  */
4597c478bd9Sstevel@tonic-gate void
4607c478bd9Sstevel@tonic-gate page_lock_clr_exclwanted(page_t *pp)
4617c478bd9Sstevel@tonic-gate {
4627c478bd9Sstevel@tonic-gate 	kmutex_t *pse = PAGE_SE_MUTEX(pp);
4637c478bd9Sstevel@tonic-gate 
4647c478bd9Sstevel@tonic-gate 	mutex_enter(pse);
4657c478bd9Sstevel@tonic-gate 	pp->p_selock &= ~SE_EWANTED;
4667c478bd9Sstevel@tonic-gate 	if (CV_HAS_WAITERS(&pp->p_cv))
4677c478bd9Sstevel@tonic-gate 		cv_broadcast(&pp->p_cv);
4687c478bd9Sstevel@tonic-gate 	mutex_exit(pse);
4697c478bd9Sstevel@tonic-gate }
4707c478bd9Sstevel@tonic-gate 
4717c478bd9Sstevel@tonic-gate /*
4727c478bd9Sstevel@tonic-gate  * Read the comments inside of page_lock_es() carefully.
4737c478bd9Sstevel@tonic-gate  *
4747c478bd9Sstevel@tonic-gate  * SE_EXCL callers specifying es == SE_EXCL_WANTED will cause the
4757c478bd9Sstevel@tonic-gate  * SE_EWANTED bit of p_selock to be set when the lock cannot be obtained.
4767c478bd9Sstevel@tonic-gate  * This is used by threads subject to reader-starvation (eg. memory delete).
4777c478bd9Sstevel@tonic-gate  *
4787c478bd9Sstevel@tonic-gate  * When a thread using SE_EXCL_WANTED does not obtain the SE_EXCL lock,
4797c478bd9Sstevel@tonic-gate  * it is expected that it will retry at a later time.  Threads that will
4807c478bd9Sstevel@tonic-gate  * not retry the lock *must* call page_lock_clr_exclwanted to clear the
4817c478bd9Sstevel@tonic-gate  * SE_EWANTED bit.  (When a thread using SE_EXCL_WANTED obtains the lock,
4827c478bd9Sstevel@tonic-gate  * the bit is cleared.)
4837c478bd9Sstevel@tonic-gate  */
4847c478bd9Sstevel@tonic-gate int
4857c478bd9Sstevel@tonic-gate page_try_reclaim_lock(page_t *pp, se_t se, int es)
4867c478bd9Sstevel@tonic-gate {
4877c478bd9Sstevel@tonic-gate 	kmutex_t *pse = PAGE_SE_MUTEX(pp);
4887c478bd9Sstevel@tonic-gate 	selock_t old;
4897c478bd9Sstevel@tonic-gate 
4907c478bd9Sstevel@tonic-gate 	mutex_enter(pse);
4917c478bd9Sstevel@tonic-gate 
4927c478bd9Sstevel@tonic-gate 	old = pp->p_selock;
4937c478bd9Sstevel@tonic-gate 
4947c478bd9Sstevel@tonic-gate 	ASSERT(((es & SE_EXCL_WANTED) == 0) ||
495db874c57Selowe 	    ((es & SE_EXCL_WANTED) && (se == SE_EXCL)));
496db874c57Selowe 
497db874c57Selowe 	if (PP_RETIRED(pp) && !(es & SE_RETIRED)) {
498db874c57Selowe 		mutex_exit(pse);
499db874c57Selowe 		VM_STAT_ADD(page_trylock_failed);
500db874c57Selowe 		return (0);
501db874c57Selowe 	}
5027c478bd9Sstevel@tonic-gate 
5037c478bd9Sstevel@tonic-gate 	if (se == SE_SHARED && es == 1 && old == 0) {
5047c478bd9Sstevel@tonic-gate 		se = SE_EXCL;
5057c478bd9Sstevel@tonic-gate 	}
5067c478bd9Sstevel@tonic-gate 
5077c478bd9Sstevel@tonic-gate 	if (se == SE_SHARED) {
5087c478bd9Sstevel@tonic-gate 		if (!PP_ISFREE(pp)) {
5097c478bd9Sstevel@tonic-gate 			if (old >= 0) {
510db874c57Selowe 				/*
511db874c57Selowe 				 * Readers are not allowed when excl wanted
512db874c57Selowe 				 */
513db874c57Selowe 				if ((old & SE_EWANTED) == 0) {
5148bc68872Selowe 					pp->p_selock = old + SE_READER;
5158bc68872Selowe 					mutex_exit(pse);
5168bc68872Selowe 					return (1);
5177c478bd9Sstevel@tonic-gate 				}
5187c478bd9Sstevel@tonic-gate 			}
5197c478bd9Sstevel@tonic-gate 			mutex_exit(pse);
5207c478bd9Sstevel@tonic-gate 			return (0);
5217c478bd9Sstevel@tonic-gate 		}
5227c478bd9Sstevel@tonic-gate 		/*
5237c478bd9Sstevel@tonic-gate 		 * The page is free, so we really want SE_EXCL (below)
5247c478bd9Sstevel@tonic-gate 		 */
5257c478bd9Sstevel@tonic-gate 		VM_STAT_ADD(page_try_reclaim_upgrade);
5267c478bd9Sstevel@tonic-gate 	}
5277c478bd9Sstevel@tonic-gate 
5287c478bd9Sstevel@tonic-gate 	/*
5297c478bd9Sstevel@tonic-gate 	 * The caller wants a writer lock.  We try for it only if
5307c478bd9Sstevel@tonic-gate 	 * SE_EWANTED is not set, or if the caller specified
5317c478bd9Sstevel@tonic-gate 	 * SE_EXCL_WANTED.
5327c478bd9Sstevel@tonic-gate 	 */
533db874c57Selowe 	if (!(old & SE_EWANTED) || (es & SE_EXCL_WANTED)) {
5347c478bd9Sstevel@tonic-gate 		if ((old & ~SE_EWANTED) == 0) {
5357c478bd9Sstevel@tonic-gate 			/* no reader/writer lock held */
5367c478bd9Sstevel@tonic-gate 			THREAD_KPRI_REQUEST();
5377c478bd9Sstevel@tonic-gate 			/* this clears out our setting of the SE_EWANTED bit */
5387c478bd9Sstevel@tonic-gate 			pp->p_selock = SE_WRITER;
5397c478bd9Sstevel@tonic-gate 			mutex_exit(pse);
5407c478bd9Sstevel@tonic-gate 			return (1);
5417c478bd9Sstevel@tonic-gate 		}
5427c478bd9Sstevel@tonic-gate 	}
543db874c57Selowe 	if (es & SE_EXCL_WANTED) {
5447c478bd9Sstevel@tonic-gate 		/* page is locked, set the SE_EWANTED bit */
5457c478bd9Sstevel@tonic-gate 		pp->p_selock |= SE_EWANTED;
5467c478bd9Sstevel@tonic-gate 	}
5477c478bd9Sstevel@tonic-gate 	mutex_exit(pse);
5487c478bd9Sstevel@tonic-gate 	return (0);
5497c478bd9Sstevel@tonic-gate }
5507c478bd9Sstevel@tonic-gate 
5517c478bd9Sstevel@tonic-gate /*
5527c478bd9Sstevel@tonic-gate  * Acquire a page's "shared/exclusive" lock, but never block.
5537c478bd9Sstevel@tonic-gate  * Returns 1 on success, 0 on failure.
5547c478bd9Sstevel@tonic-gate  */
5557c478bd9Sstevel@tonic-gate int
5567c478bd9Sstevel@tonic-gate page_trylock(page_t *pp, se_t se)
5577c478bd9Sstevel@tonic-gate {
5587c478bd9Sstevel@tonic-gate 	kmutex_t *pse = PAGE_SE_MUTEX(pp);
5597c478bd9Sstevel@tonic-gate 
5607c478bd9Sstevel@tonic-gate 	mutex_enter(pse);
561db874c57Selowe 	if (pp->p_selock & SE_EWANTED || PP_RETIRED(pp) ||
56224e9c58bSelowe 	    (se == SE_SHARED && PP_PR_NOSHARE(pp))) {
563db874c57Selowe 		/*
564db874c57Selowe 		 * Fail if a thread wants exclusive access and page is
565db874c57Selowe 		 * retired, if the page is slated for retirement, or a
566db874c57Selowe 		 * share lock is requested.
567db874c57Selowe 		 */
5687c478bd9Sstevel@tonic-gate 		mutex_exit(pse);
569db874c57Selowe 		VM_STAT_ADD(page_trylock_failed);
5707c478bd9Sstevel@tonic-gate 		return (0);
5717c478bd9Sstevel@tonic-gate 	}
5727c478bd9Sstevel@tonic-gate 
5737c478bd9Sstevel@tonic-gate 	if (se == SE_EXCL) {
5747c478bd9Sstevel@tonic-gate 		if (pp->p_selock == 0) {
5757c478bd9Sstevel@tonic-gate 			THREAD_KPRI_REQUEST();
5767c478bd9Sstevel@tonic-gate 			pp->p_selock = SE_WRITER;
5777c478bd9Sstevel@tonic-gate 			mutex_exit(pse);
5787c478bd9Sstevel@tonic-gate 			return (1);
5797c478bd9Sstevel@tonic-gate 		}
5807c478bd9Sstevel@tonic-gate 	} else {
5817c478bd9Sstevel@tonic-gate 		if (pp->p_selock >= 0) {
5827c478bd9Sstevel@tonic-gate 			pp->p_selock += SE_READER;
5837c478bd9Sstevel@tonic-gate 			mutex_exit(pse);
5847c478bd9Sstevel@tonic-gate 			return (1);
5857c478bd9Sstevel@tonic-gate 		}
5867c478bd9Sstevel@tonic-gate 	}
5877c478bd9Sstevel@tonic-gate 	mutex_exit(pse);
5887c478bd9Sstevel@tonic-gate 	return (0);
5897c478bd9Sstevel@tonic-gate }
5907c478bd9Sstevel@tonic-gate 
591db874c57Selowe /*
592db874c57Selowe  * Variant of page_unlock() specifically for the page freelist
593db874c57Selowe  * code. The mere existence of this code is a vile hack that
594db874c57Selowe  * has resulted due to the backwards locking order of the page
595db874c57Selowe  * freelist manager; please don't call it.
596db874c57Selowe  */
597db874c57Selowe void
5988b464eb8Smec page_unlock_nocapture(page_t *pp)
599db874c57Selowe {
600db874c57Selowe 	kmutex_t *pse = PAGE_SE_MUTEX(pp);
601db874c57Selowe 	selock_t old;
602db874c57Selowe 
603db874c57Selowe 	mutex_enter(pse);
604db874c57Selowe 
605db874c57Selowe 	old = pp->p_selock;
606db874c57Selowe 	if ((old & ~SE_EWANTED) == SE_READER) {
607db874c57Selowe 		pp->p_selock = old & ~SE_READER;
608db874c57Selowe 		if (CV_HAS_WAITERS(&pp->p_cv))
609db874c57Selowe 			cv_broadcast(&pp->p_cv);
610db874c57Selowe 	} else if ((old & ~SE_EWANTED) == SE_DELETED) {
6118b464eb8Smec 		panic("page_unlock_nocapture: page %p is deleted", pp);
612db874c57Selowe 	} else if (old < 0) {
613db874c57Selowe 		THREAD_KPRI_RELEASE();
614db874c57Selowe 		pp->p_selock &= SE_EWANTED;
615db874c57Selowe 		if (CV_HAS_WAITERS(&pp->p_cv))
616db874c57Selowe 			cv_broadcast(&pp->p_cv);
617db874c57Selowe 	} else if ((old & ~SE_EWANTED) > SE_READER) {
618db874c57Selowe 		pp->p_selock = old - SE_READER;
619db874c57Selowe 	} else {
6208b464eb8Smec 		panic("page_unlock_nocapture: page %p is not locked", pp);
621db874c57Selowe 	}
622db874c57Selowe 
623db874c57Selowe 	mutex_exit(pse);
624db874c57Selowe }
625db874c57Selowe 
6267c478bd9Sstevel@tonic-gate /*
6277c478bd9Sstevel@tonic-gate  * Release the page's "shared/exclusive" lock and wake up anyone
6287c478bd9Sstevel@tonic-gate  * who might be waiting for it.
6297c478bd9Sstevel@tonic-gate  */
6307c478bd9Sstevel@tonic-gate void
6317c478bd9Sstevel@tonic-gate page_unlock(page_t *pp)
6327c478bd9Sstevel@tonic-gate {
6337c478bd9Sstevel@tonic-gate 	kmutex_t *pse = PAGE_SE_MUTEX(pp);
6347c478bd9Sstevel@tonic-gate 	selock_t old;
6357c478bd9Sstevel@tonic-gate 
6367c478bd9Sstevel@tonic-gate 	mutex_enter(pse);
637db874c57Selowe 
6387c478bd9Sstevel@tonic-gate 	old = pp->p_selock;
6397c478bd9Sstevel@tonic-gate 	if ((old & ~SE_EWANTED) == SE_READER) {
6407c478bd9Sstevel@tonic-gate 		pp->p_selock = old & ~SE_READER;
6417c478bd9Sstevel@tonic-gate 		if (CV_HAS_WAITERS(&pp->p_cv))
6427c478bd9Sstevel@tonic-gate 			cv_broadcast(&pp->p_cv);
6437c478bd9Sstevel@tonic-gate 	} else if ((old & ~SE_EWANTED) == SE_DELETED) {
6447c478bd9Sstevel@tonic-gate 		panic("page_unlock: page %p is deleted", pp);
6457c478bd9Sstevel@tonic-gate 	} else if (old < 0) {
6467c478bd9Sstevel@tonic-gate 		THREAD_KPRI_RELEASE();
6477c478bd9Sstevel@tonic-gate 		pp->p_selock &= SE_EWANTED;
6487c478bd9Sstevel@tonic-gate 		if (CV_HAS_WAITERS(&pp->p_cv))
6497c478bd9Sstevel@tonic-gate 			cv_broadcast(&pp->p_cv);
6507c478bd9Sstevel@tonic-gate 	} else if ((old & ~SE_EWANTED) > SE_READER) {
6517c478bd9Sstevel@tonic-gate 		pp->p_selock = old - SE_READER;
6527c478bd9Sstevel@tonic-gate 	} else {
6537c478bd9Sstevel@tonic-gate 		panic("page_unlock: page %p is not locked", pp);
6547c478bd9Sstevel@tonic-gate 	}
655db874c57Selowe 
6568b464eb8Smec 	if (pp->p_selock == 0) {
657db874c57Selowe 		/*
6588b464eb8Smec 		 * If the T_CAPTURING bit is set, that means that we should
6598b464eb8Smec 		 * not try and capture the page again as we could recurse
6608b464eb8Smec 		 * which could lead to a stack overflow panic or spending a
6618b464eb8Smec 		 * relatively long time in the kernel making no progress.
662db874c57Selowe 		 */
6638b464eb8Smec 		if ((pp->p_toxic & PR_CAPTURE) &&
6648b464eb8Smec 		    !(curthread->t_flag & T_CAPTURING) &&
6658b464eb8Smec 		    !PP_RETIRED(pp)) {
666db874c57Selowe 			THREAD_KPRI_REQUEST();
667db874c57Selowe 			pp->p_selock = SE_WRITER;
668db874c57Selowe 			mutex_exit(pse);
6698b464eb8Smec 			page_unlock_capture(pp);
670db874c57Selowe 		} else {
671db874c57Selowe 			mutex_exit(pse);
672db874c57Selowe 		}
673db874c57Selowe 	} else {
674db874c57Selowe 		mutex_exit(pse);
675db874c57Selowe 	}
6767c478bd9Sstevel@tonic-gate }
6777c478bd9Sstevel@tonic-gate 
6787c478bd9Sstevel@tonic-gate /*
6797c478bd9Sstevel@tonic-gate  * Try to upgrade the lock on the page from a "shared" to an
6807c478bd9Sstevel@tonic-gate  * "exclusive" lock.  Since this upgrade operation is done while
6817c478bd9Sstevel@tonic-gate  * holding the mutex protecting this page, no one else can acquire this page's
6827c478bd9Sstevel@tonic-gate  * lock and change the page. Thus, it is safe to drop the "shared"
6837c478bd9Sstevel@tonic-gate  * lock and attempt to acquire the "exclusive" lock.
6847c478bd9Sstevel@tonic-gate  *
6857c478bd9Sstevel@tonic-gate  * Returns 1 on success, 0 on failure.
6867c478bd9Sstevel@tonic-gate  */
6877c478bd9Sstevel@tonic-gate int
6887c478bd9Sstevel@tonic-gate page_tryupgrade(page_t *pp)
6897c478bd9Sstevel@tonic-gate {
6907c478bd9Sstevel@tonic-gate 	kmutex_t *pse = PAGE_SE_MUTEX(pp);
6917c478bd9Sstevel@tonic-gate 
6927c478bd9Sstevel@tonic-gate 	mutex_enter(pse);
6937c478bd9Sstevel@tonic-gate 	if (!(pp->p_selock & SE_EWANTED)) {
6947c478bd9Sstevel@tonic-gate 		/* no threads want exclusive access, try upgrade */
6957c478bd9Sstevel@tonic-gate 		if (pp->p_selock == SE_READER) {
6967c478bd9Sstevel@tonic-gate 			THREAD_KPRI_REQUEST();
6977c478bd9Sstevel@tonic-gate 			/* convert to exclusive lock */
6987c478bd9Sstevel@tonic-gate 			pp->p_selock = SE_WRITER;
6997c478bd9Sstevel@tonic-gate 			mutex_exit(pse);
7007c478bd9Sstevel@tonic-gate 			return (1);
7017c478bd9Sstevel@tonic-gate 		}
7027c478bd9Sstevel@tonic-gate 	}
7037c478bd9Sstevel@tonic-gate 	mutex_exit(pse);
7047c478bd9Sstevel@tonic-gate 	return (0);
7057c478bd9Sstevel@tonic-gate }
7067c478bd9Sstevel@tonic-gate 
7077c478bd9Sstevel@tonic-gate /*
7087c478bd9Sstevel@tonic-gate  * Downgrade the "exclusive" lock on the page to a "shared" lock
7097c478bd9Sstevel@tonic-gate  * while holding the mutex protecting this page's p_selock field.
7107c478bd9Sstevel@tonic-gate  */
7117c478bd9Sstevel@tonic-gate void
7127c478bd9Sstevel@tonic-gate page_downgrade(page_t *pp)
7137c478bd9Sstevel@tonic-gate {
7147c478bd9Sstevel@tonic-gate 	kmutex_t *pse = PAGE_SE_MUTEX(pp);
7157c478bd9Sstevel@tonic-gate 	int excl_waiting;
7167c478bd9Sstevel@tonic-gate 
7177c478bd9Sstevel@tonic-gate 	ASSERT((pp->p_selock & ~SE_EWANTED) != SE_DELETED);
7187c478bd9Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp));
7197c478bd9Sstevel@tonic-gate 
7207c478bd9Sstevel@tonic-gate 	mutex_enter(pse);
7217c478bd9Sstevel@tonic-gate 	excl_waiting =  pp->p_selock & SE_EWANTED;
7227c478bd9Sstevel@tonic-gate 	THREAD_KPRI_RELEASE();
7237c478bd9Sstevel@tonic-gate 	pp->p_selock = SE_READER | excl_waiting;
7247c478bd9Sstevel@tonic-gate 	if (CV_HAS_WAITERS(&pp->p_cv))
7257c478bd9Sstevel@tonic-gate 		cv_broadcast(&pp->p_cv);
7267c478bd9Sstevel@tonic-gate 	mutex_exit(pse);
7277c478bd9Sstevel@tonic-gate }
7287c478bd9Sstevel@tonic-gate 
7297c478bd9Sstevel@tonic-gate void
7307c478bd9Sstevel@tonic-gate page_lock_delete(page_t *pp)
7317c478bd9Sstevel@tonic-gate {
7327c478bd9Sstevel@tonic-gate 	kmutex_t *pse = PAGE_SE_MUTEX(pp);
7337c478bd9Sstevel@tonic-gate 
7347c478bd9Sstevel@tonic-gate 	ASSERT(PAGE_EXCL(pp));
7357c478bd9Sstevel@tonic-gate 	ASSERT(pp->p_vnode == NULL);
7367c478bd9Sstevel@tonic-gate 	ASSERT(pp->p_offset == (u_offset_t)-1);
7377c478bd9Sstevel@tonic-gate 	ASSERT(!PP_ISFREE(pp));
7387c478bd9Sstevel@tonic-gate 
7397c478bd9Sstevel@tonic-gate 	mutex_enter(pse);
7407c478bd9Sstevel@tonic-gate 	THREAD_KPRI_RELEASE();
7417c478bd9Sstevel@tonic-gate 	pp->p_selock = SE_DELETED;
7427c478bd9Sstevel@tonic-gate 	if (CV_HAS_WAITERS(&pp->p_cv))
7437c478bd9Sstevel@tonic-gate 		cv_broadcast(&pp->p_cv);
7447c478bd9Sstevel@tonic-gate 	mutex_exit(pse);
7457c478bd9Sstevel@tonic-gate }
7467c478bd9Sstevel@tonic-gate 
7478b464eb8Smec int
7488b464eb8Smec page_deleted(page_t *pp)
7498b464eb8Smec {
7508b464eb8Smec 	return (pp->p_selock == SE_DELETED);
7518b464eb8Smec }
7528b464eb8Smec 
7537c478bd9Sstevel@tonic-gate /*
7547c478bd9Sstevel@tonic-gate  * Implement the io lock for pages
7557c478bd9Sstevel@tonic-gate  */
7567c478bd9Sstevel@tonic-gate void
7577c478bd9Sstevel@tonic-gate page_iolock_init(page_t *pp)
7587c478bd9Sstevel@tonic-gate {
7597c478bd9Sstevel@tonic-gate 	pp->p_iolock_state = 0;
7607c478bd9Sstevel@tonic-gate 	cv_init(&pp->p_io_cv, NULL, CV_DEFAULT, NULL);
7617c478bd9Sstevel@tonic-gate }
7627c478bd9Sstevel@tonic-gate 
7637c478bd9Sstevel@tonic-gate /*
7647c478bd9Sstevel@tonic-gate  * Acquire the i/o lock on a page.
7657c478bd9Sstevel@tonic-gate  */
7667c478bd9Sstevel@tonic-gate void
7677c478bd9Sstevel@tonic-gate page_io_lock(page_t *pp)
7687c478bd9Sstevel@tonic-gate {
7697c478bd9Sstevel@tonic-gate 	kmutex_t *pio;
7707c478bd9Sstevel@tonic-gate 
7717c478bd9Sstevel@tonic-gate 	pio = PAGE_IO_MUTEX(pp);
7727c478bd9Sstevel@tonic-gate 	mutex_enter(pio);
7737c478bd9Sstevel@tonic-gate 	while (pp->p_iolock_state & PAGE_IO_INUSE) {
7747c478bd9Sstevel@tonic-gate 		cv_wait(&(pp->p_io_cv), pio);
7757c478bd9Sstevel@tonic-gate 	}
7767c478bd9Sstevel@tonic-gate 	pp->p_iolock_state |= PAGE_IO_INUSE;
7777c478bd9Sstevel@tonic-gate 	mutex_exit(pio);
7787c478bd9Sstevel@tonic-gate }
7797c478bd9Sstevel@tonic-gate 
7807c478bd9Sstevel@tonic-gate /*
7817c478bd9Sstevel@tonic-gate  * Release the i/o lock on a page.
7827c478bd9Sstevel@tonic-gate  */
7837c478bd9Sstevel@tonic-gate void
7847c478bd9Sstevel@tonic-gate page_io_unlock(page_t *pp)
7857c478bd9Sstevel@tonic-gate {
7867c478bd9Sstevel@tonic-gate 	kmutex_t *pio;
7877c478bd9Sstevel@tonic-gate 
7887c478bd9Sstevel@tonic-gate 	pio = PAGE_IO_MUTEX(pp);
7897c478bd9Sstevel@tonic-gate 	mutex_enter(pio);
790a71e32b6Sstans 	cv_broadcast(&pp->p_io_cv);
7917c478bd9Sstevel@tonic-gate 	pp->p_iolock_state &= ~PAGE_IO_INUSE;
7927c478bd9Sstevel@tonic-gate 	mutex_exit(pio);
7937c478bd9Sstevel@tonic-gate }
7947c478bd9Sstevel@tonic-gate 
7957c478bd9Sstevel@tonic-gate /*
7967c478bd9Sstevel@tonic-gate  * Try to acquire the i/o lock on a page without blocking.
7977c478bd9Sstevel@tonic-gate  * Returns 1 on success, 0 on failure.
7987c478bd9Sstevel@tonic-gate  */
7997c478bd9Sstevel@tonic-gate int
8007c478bd9Sstevel@tonic-gate page_io_trylock(page_t *pp)
8017c478bd9Sstevel@tonic-gate {
8027c478bd9Sstevel@tonic-gate 	kmutex_t *pio;
8037c478bd9Sstevel@tonic-gate 
8047c478bd9Sstevel@tonic-gate 	if (pp->p_iolock_state & PAGE_IO_INUSE)
8057c478bd9Sstevel@tonic-gate 		return (0);
8067c478bd9Sstevel@tonic-gate 
8077c478bd9Sstevel@tonic-gate 	pio = PAGE_IO_MUTEX(pp);
8087c478bd9Sstevel@tonic-gate 	mutex_enter(pio);
8097c478bd9Sstevel@tonic-gate 
8107c478bd9Sstevel@tonic-gate 	if (pp->p_iolock_state & PAGE_IO_INUSE) {
8117c478bd9Sstevel@tonic-gate 		mutex_exit(pio);
8127c478bd9Sstevel@tonic-gate 		return (0);
8137c478bd9Sstevel@tonic-gate 	}
8147c478bd9Sstevel@tonic-gate 	pp->p_iolock_state |= PAGE_IO_INUSE;
8157c478bd9Sstevel@tonic-gate 	mutex_exit(pio);
8167c478bd9Sstevel@tonic-gate 
8177c478bd9Sstevel@tonic-gate 	return (1);
8187c478bd9Sstevel@tonic-gate }
8197c478bd9Sstevel@tonic-gate 
820a71e32b6Sstans /*
821a71e32b6Sstans  * Wait until the i/o lock is not held.
822a71e32b6Sstans  */
823a71e32b6Sstans void
824a71e32b6Sstans page_io_wait(page_t *pp)
825a71e32b6Sstans {
826a71e32b6Sstans 	kmutex_t *pio;
827a71e32b6Sstans 
828a71e32b6Sstans 	pio = PAGE_IO_MUTEX(pp);
829a71e32b6Sstans 	mutex_enter(pio);
830a71e32b6Sstans 	while (pp->p_iolock_state & PAGE_IO_INUSE) {
831a71e32b6Sstans 		cv_wait(&(pp->p_io_cv), pio);
832a71e32b6Sstans 	}
833a71e32b6Sstans 	mutex_exit(pio);
834a71e32b6Sstans }
835a71e32b6Sstans 
836a71e32b6Sstans /*
837a71e32b6Sstans  * Returns 1 on success, 0 on failure.
838a71e32b6Sstans  */
839a71e32b6Sstans int
840a71e32b6Sstans page_io_locked(page_t *pp)
841a71e32b6Sstans {
842a71e32b6Sstans 	return (pp->p_iolock_state & PAGE_IO_INUSE);
843a71e32b6Sstans }
844a71e32b6Sstans 
8457c478bd9Sstevel@tonic-gate /*
8467c478bd9Sstevel@tonic-gate  * Assert that the i/o lock on a page is held.
8477c478bd9Sstevel@tonic-gate  * Returns 1 on success, 0 on failure.
8487c478bd9Sstevel@tonic-gate  */
8497c478bd9Sstevel@tonic-gate int
8507c478bd9Sstevel@tonic-gate page_iolock_assert(page_t *pp)
8517c478bd9Sstevel@tonic-gate {
852a71e32b6Sstans 	return (page_io_locked(pp));
8537c478bd9Sstevel@tonic-gate }
8547c478bd9Sstevel@tonic-gate 
8557c478bd9Sstevel@tonic-gate /*
8567c478bd9Sstevel@tonic-gate  * Wrapper exported to kernel routines that are built
8577c478bd9Sstevel@tonic-gate  * platform-independent (the macro is platform-dependent;
8587c478bd9Sstevel@tonic-gate  * the size of vph_mutex[] is based on NCPU).
8597c478bd9Sstevel@tonic-gate  *
8607c478bd9Sstevel@tonic-gate  * Note that you can do stress testing on this by setting the
8617c478bd9Sstevel@tonic-gate  * variable page_vnode_mutex_stress to something other than
8627c478bd9Sstevel@tonic-gate  * zero in a DEBUG kernel in a debugger after loading the kernel.
8637c478bd9Sstevel@tonic-gate  * Setting it after the kernel is running may not work correctly.
8647c478bd9Sstevel@tonic-gate  */
8657c478bd9Sstevel@tonic-gate #ifdef DEBUG
8667c478bd9Sstevel@tonic-gate static int page_vnode_mutex_stress = 0;
8677c478bd9Sstevel@tonic-gate #endif
8687c478bd9Sstevel@tonic-gate 
8697c478bd9Sstevel@tonic-gate kmutex_t *
8707c478bd9Sstevel@tonic-gate page_vnode_mutex(vnode_t *vp)
8717c478bd9Sstevel@tonic-gate {
8727c478bd9Sstevel@tonic-gate 	if (vp == &kvp)
8737c478bd9Sstevel@tonic-gate 		return (&vph_mutex[VPH_TABLE_SIZE + 0]);
874ad23a2dbSjohansen 
875ad23a2dbSjohansen 	if (vp == &zvp)
876ad23a2dbSjohansen 		return (&vph_mutex[VPH_TABLE_SIZE + 1]);
8777c478bd9Sstevel@tonic-gate #ifdef DEBUG
8787c478bd9Sstevel@tonic-gate 	if (page_vnode_mutex_stress != 0)
8797c478bd9Sstevel@tonic-gate 		return (&vph_mutex[0]);
8807c478bd9Sstevel@tonic-gate #endif
8817c478bd9Sstevel@tonic-gate 
8827c478bd9Sstevel@tonic-gate 	return (&vph_mutex[VP_HASH_FUNC(vp)]);
8837c478bd9Sstevel@tonic-gate }
8847c478bd9Sstevel@tonic-gate 
8857c478bd9Sstevel@tonic-gate kmutex_t *
8867c478bd9Sstevel@tonic-gate page_se_mutex(page_t *pp)
8877c478bd9Sstevel@tonic-gate {
8887c478bd9Sstevel@tonic-gate 	return (PAGE_SE_MUTEX(pp));
8897c478bd9Sstevel@tonic-gate }
8907c478bd9Sstevel@tonic-gate 
8917c478bd9Sstevel@tonic-gate #ifdef VM_STATS
8927c478bd9Sstevel@tonic-gate uint_t pszclck_stat[4];
8937c478bd9Sstevel@tonic-gate #endif
8947c478bd9Sstevel@tonic-gate /*
8957c478bd9Sstevel@tonic-gate  * Find, take and return a mutex held by hat_page_demote().
8967c478bd9Sstevel@tonic-gate  * Called by page_demote_vp_pages() before hat_page_demote() call and by
8977c478bd9Sstevel@tonic-gate  * routines that want to block hat_page_demote() but can't do it
8987c478bd9Sstevel@tonic-gate  * via locking all constituent pages.
8997c478bd9Sstevel@tonic-gate  *
9007c478bd9Sstevel@tonic-gate  * Return NULL if p_szc is 0.
9017c478bd9Sstevel@tonic-gate  *
9027c478bd9Sstevel@tonic-gate  * It should only be used for pages that can be demoted by hat_page_demote()
9037c478bd9Sstevel@tonic-gate  * i.e. non swapfs file system pages.  The logic here is lifted from
9047c478bd9Sstevel@tonic-gate  * sfmmu_mlspl_enter() except there's no need to worry about p_szc increase
9057c478bd9Sstevel@tonic-gate  * since the page is locked and not free.
9067c478bd9Sstevel@tonic-gate  *
9077c478bd9Sstevel@tonic-gate  * Hash of the root page is used to find the lock.
9087c478bd9Sstevel@tonic-gate  * To find the root in the presense of hat_page_demote() chageing the location
9097c478bd9Sstevel@tonic-gate  * of the root this routine relies on the fact that hat_page_demote() changes
9107c478bd9Sstevel@tonic-gate  * root last.
9117c478bd9Sstevel@tonic-gate  *
9127c478bd9Sstevel@tonic-gate  * If NULL is returned pp's p_szc is guaranteed to be 0. If non NULL is
9137c478bd9Sstevel@tonic-gate  * returned pp's p_szc may be any value.
9147c478bd9Sstevel@tonic-gate  */
9157c478bd9Sstevel@tonic-gate kmutex_t *
9167c478bd9Sstevel@tonic-gate page_szc_lock(page_t *pp)
9177c478bd9Sstevel@tonic-gate {
9187c478bd9Sstevel@tonic-gate 	kmutex_t	*mtx;
9197c478bd9Sstevel@tonic-gate 	page_t		*rootpp;
9207c478bd9Sstevel@tonic-gate 	uint_t		szc;
9217c478bd9Sstevel@tonic-gate 	uint_t		rszc;
9227c478bd9Sstevel@tonic-gate 	uint_t		pszc = pp->p_szc;
9237c478bd9Sstevel@tonic-gate 
9247c478bd9Sstevel@tonic-gate 	ASSERT(pp != NULL);
9257c478bd9Sstevel@tonic-gate 	ASSERT(PAGE_LOCKED(pp));
9267c478bd9Sstevel@tonic-gate 	ASSERT(!PP_ISFREE(pp));
9277c478bd9Sstevel@tonic-gate 	ASSERT(pp->p_vnode != NULL);
9287c478bd9Sstevel@tonic-gate 	ASSERT(!IS_SWAPFSVP(pp->p_vnode));
929ad23a2dbSjohansen 	ASSERT(!PP_ISKAS(pp));
9307c478bd9Sstevel@tonic-gate 
9317c478bd9Sstevel@tonic-gate again:
9327c478bd9Sstevel@tonic-gate 	if (pszc == 0) {
9337c478bd9Sstevel@tonic-gate 		VM_STAT_ADD(pszclck_stat[0]);
9347c478bd9Sstevel@tonic-gate 		return (NULL);
9357c478bd9Sstevel@tonic-gate 	}
9367c478bd9Sstevel@tonic-gate 
9377c478bd9Sstevel@tonic-gate 	/* The lock lives in the root page */
9387c478bd9Sstevel@tonic-gate 
9397c478bd9Sstevel@tonic-gate 	rootpp = PP_GROUPLEADER(pp, pszc);
9407c478bd9Sstevel@tonic-gate 	mtx = PAGE_SZC_MUTEX(rootpp);
9417c478bd9Sstevel@tonic-gate 	mutex_enter(mtx);
9427c478bd9Sstevel@tonic-gate 
9437c478bd9Sstevel@tonic-gate 	/*
9447c478bd9Sstevel@tonic-gate 	 * since p_szc can only decrease if pp == rootpp
9457c478bd9Sstevel@tonic-gate 	 * rootpp will be always the same i.e we have the right root
9467c478bd9Sstevel@tonic-gate 	 * regardless of rootpp->p_szc.
9477c478bd9Sstevel@tonic-gate 	 * If location of pp's root didn't change after we took
9487c478bd9Sstevel@tonic-gate 	 * the lock we have the right root. return mutex hashed off it.
9497c478bd9Sstevel@tonic-gate 	 */
9507c478bd9Sstevel@tonic-gate 	if (pp == rootpp || (rszc = rootpp->p_szc) == pszc) {
9517c478bd9Sstevel@tonic-gate 		VM_STAT_ADD(pszclck_stat[1]);
9527c478bd9Sstevel@tonic-gate 		return (mtx);
9537c478bd9Sstevel@tonic-gate 	}
9547c478bd9Sstevel@tonic-gate 
9557c478bd9Sstevel@tonic-gate 	/*
9567c478bd9Sstevel@tonic-gate 	 * root location changed because page got demoted.
9577c478bd9Sstevel@tonic-gate 	 * locate the new root.
9587c478bd9Sstevel@tonic-gate 	 */
9597c478bd9Sstevel@tonic-gate 	if (rszc < pszc) {
9607c478bd9Sstevel@tonic-gate 		szc = pp->p_szc;
9617c478bd9Sstevel@tonic-gate 		ASSERT(szc < pszc);
9627c478bd9Sstevel@tonic-gate 		mutex_exit(mtx);
9637c478bd9Sstevel@tonic-gate 		pszc = szc;
9647c478bd9Sstevel@tonic-gate 		VM_STAT_ADD(pszclck_stat[2]);
9657c478bd9Sstevel@tonic-gate 		goto again;
9667c478bd9Sstevel@tonic-gate 	}
9677c478bd9Sstevel@tonic-gate 
9687c478bd9Sstevel@tonic-gate 	VM_STAT_ADD(pszclck_stat[3]);
9697c478bd9Sstevel@tonic-gate 	/*
9707c478bd9Sstevel@tonic-gate 	 * current hat_page_demote not done yet.
9717c478bd9Sstevel@tonic-gate 	 * wait for it to finish.
9727c478bd9Sstevel@tonic-gate 	 */
9737c478bd9Sstevel@tonic-gate 	mutex_exit(mtx);
9747c478bd9Sstevel@tonic-gate 	rootpp = PP_GROUPLEADER(rootpp, rszc);
9757c478bd9Sstevel@tonic-gate 	mtx = PAGE_SZC_MUTEX(rootpp);
9767c478bd9Sstevel@tonic-gate 	mutex_enter(mtx);
9777c478bd9Sstevel@tonic-gate 	mutex_exit(mtx);
9787c478bd9Sstevel@tonic-gate 	ASSERT(rootpp->p_szc < rszc);
9797c478bd9Sstevel@tonic-gate 	goto again;
9807c478bd9Sstevel@tonic-gate }
9817c478bd9Sstevel@tonic-gate 
9827c478bd9Sstevel@tonic-gate int
9837c478bd9Sstevel@tonic-gate page_szc_lock_assert(page_t *pp)
9847c478bd9Sstevel@tonic-gate {
9857c478bd9Sstevel@tonic-gate 	page_t *rootpp = PP_PAGEROOT(pp);
9867c478bd9Sstevel@tonic-gate 	kmutex_t *mtx = PAGE_SZC_MUTEX(rootpp);
9877c478bd9Sstevel@tonic-gate 
9887c478bd9Sstevel@tonic-gate 	return (MUTEX_HELD(mtx));
9897c478bd9Sstevel@tonic-gate }
990ae115bc7Smrj 
991ae115bc7Smrj /*
992ae115bc7Smrj  * memseg locking
993ae115bc7Smrj  */
994ae115bc7Smrj static krwlock_t memsegslock;
995ae115bc7Smrj 
996ae115bc7Smrj /*
997ae115bc7Smrj  * memlist (phys_install, phys_avail) locking.
998ae115bc7Smrj  */
999ae115bc7Smrj static krwlock_t memlists_lock;
1000ae115bc7Smrj 
1001ae115bc7Smrj void
1002ae115bc7Smrj memsegs_lock(int writer)
1003ae115bc7Smrj {
1004ae115bc7Smrj 	rw_enter(&memsegslock, writer ? RW_WRITER : RW_READER);
1005ae115bc7Smrj }
1006ae115bc7Smrj 
1007ae115bc7Smrj /*ARGSUSED*/
1008ae115bc7Smrj void
1009ae115bc7Smrj memsegs_unlock(int writer)
1010ae115bc7Smrj {
1011ae115bc7Smrj 	rw_exit(&memsegslock);
1012ae115bc7Smrj }
1013ae115bc7Smrj 
1014ae115bc7Smrj int
1015ae115bc7Smrj memsegs_lock_held(void)
1016ae115bc7Smrj {
1017ae115bc7Smrj 	return (RW_LOCK_HELD(&memsegslock));
1018ae115bc7Smrj }
1019ae115bc7Smrj 
1020ae115bc7Smrj void
1021ae115bc7Smrj memlist_read_lock(void)
1022ae115bc7Smrj {
1023ae115bc7Smrj 	rw_enter(&memlists_lock, RW_READER);
1024ae115bc7Smrj }
1025ae115bc7Smrj 
1026ae115bc7Smrj void
1027ae115bc7Smrj memlist_read_unlock(void)
1028ae115bc7Smrj {
1029ae115bc7Smrj 	rw_exit(&memlists_lock);
1030ae115bc7Smrj }
1031ae115bc7Smrj 
1032ae115bc7Smrj void
1033ae115bc7Smrj memlist_write_lock(void)
1034ae115bc7Smrj {
1035ae115bc7Smrj 	rw_enter(&memlists_lock, RW_WRITER);
1036ae115bc7Smrj }
1037ae115bc7Smrj 
1038ae115bc7Smrj void
1039ae115bc7Smrj memlist_write_unlock(void)
1040ae115bc7Smrj {
1041ae115bc7Smrj 	rw_exit(&memlists_lock);
1042ae115bc7Smrj }
1043