xref: /illumos-gate/usr/src/uts/common/vm/vm_page.c (revision 727737b4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2015, Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
24  * Copyright (c) 2015, 2016 by Delphix. All rights reserved.
25  * Copyright 2018 Joyent, Inc.
26  */
27 
28 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989  AT&T */
29 /* All Rights Reserved */
30 
31 /*
32  * University Copyright- Copyright (c) 1982, 1986, 1988
33  * The Regents of the University of California
34  * All Rights Reserved
35  *
36  * University Acknowledgment- Portions of this document are derived from
37  * software developed by the University of California, Berkeley, and its
38  * contributors.
39  */
40 
41 /*
42  * VM - physical page management.
43  */
44 
45 #include <sys/types.h>
46 #include <sys/t_lock.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/errno.h>
50 #include <sys/time.h>
51 #include <sys/vnode.h>
52 #include <sys/vm.h>
53 #include <sys/vtrace.h>
54 #include <sys/swap.h>
55 #include <sys/cmn_err.h>
56 #include <sys/tuneable.h>
57 #include <sys/sysmacros.h>
58 #include <sys/cpuvar.h>
59 #include <sys/callb.h>
60 #include <sys/debug.h>
61 #include <sys/tnf_probe.h>
62 #include <sys/condvar_impl.h>
63 #include <sys/mem_config.h>
64 #include <sys/mem_cage.h>
65 #include <sys/kmem.h>
66 #include <sys/atomic.h>
67 #include <sys/strlog.h>
68 #include <sys/mman.h>
69 #include <sys/ontrap.h>
70 #include <sys/lgrp.h>
71 #include <sys/vfs.h>
72 
73 #include <vm/hat.h>
74 #include <vm/anon.h>
75 #include <vm/page.h>
76 #include <vm/seg.h>
77 #include <vm/pvn.h>
78 #include <vm/seg_kmem.h>
79 #include <vm/vm_dep.h>
80 #include <sys/vm_usage.h>
81 #include <fs/fs_subr.h>
82 #include <sys/ddi.h>
83 #include <sys/modctl.h>
84 
85 static pgcnt_t max_page_get;	/* max page_get request size in pages */
86 pgcnt_t total_pages = 0;	/* total number of pages (used by /proc) */
87 
88 /*
89  * freemem_lock protects all freemem variables:
90  * availrmem. Also this lock protects the globals which track the
91  * availrmem changes for accurate kernel footprint calculation.
92  * See below for an explanation of these
93  * globals.
94  */
95 kmutex_t freemem_lock;
96 pgcnt_t availrmem;
97 pgcnt_t availrmem_initial;
98 
99 /*
100  * These globals track availrmem changes to get a more accurate
101  * estimate of tke kernel size. Historically pp_kernel is used for
102  * kernel size and is based on availrmem. But availrmem is adjusted for
103  * locked pages in the system not just for kernel locked pages.
104  * These new counters will track the pages locked through segvn and
105  * by explicit user locking.
106  *
107  * pages_locked : How many pages are locked because of user specified
108  * locking through mlock or plock.
109  *
110  * pages_useclaim,pages_claimed : These two variables track the
111  * claim adjustments because of the protection changes on a segvn segment.
112  *
113  * All these globals are protected by the same lock which protects availrmem.
114  */
115 pgcnt_t pages_locked = 0;
116 pgcnt_t pages_useclaim = 0;
117 pgcnt_t pages_claimed = 0;
118 
119 
120 /*
121  * new_freemem_lock protects freemem, freemem_wait & freemem_cv.
122  */
123 static kmutex_t	new_freemem_lock;
124 static uint_t	freemem_wait;	/* someone waiting for freemem */
125 static kcondvar_t freemem_cv;
126 
127 /*
128  * The logical page free list is maintained as two lists, the 'free'
129  * and the 'cache' lists.
130  * The free list contains those pages that should be reused first.
131  *
132  * The implementation of the lists is machine dependent.
133  * page_get_freelist(), page_get_cachelist(),
134  * page_list_sub(), and page_list_add()
135  * form the interface to the machine dependent implementation.
136  *
137  * Pages with p_free set are on the cache list.
138  * Pages with p_free and p_age set are on the free list,
139  *
140  * A page may be locked while on either list.
141  */
142 
143 /*
144  * free list accounting stuff.
145  *
146  *
147  * Spread out the value for the number of pages on the
148  * page free and page cache lists.  If there is just one
149  * value, then it must be under just one lock.
150  * The lock contention and cache traffic are a real bother.
151  *
152  * When we acquire and then drop a single pcf lock
153  * we can start in the middle of the array of pcf structures.
154  * If we acquire more than one pcf lock at a time, we need to
155  * start at the front to avoid deadlocking.
156  *
157  * pcf_count holds the number of pages in each pool.
158  *
159  * pcf_block is set when page_create_get_something() has asked the
160  * PSM page freelist and page cachelist routines without specifying
161  * a color and nothing came back.  This is used to block anything
162  * else from moving pages from one list to the other while the
163  * lists are searched again.  If a page is freeed while pcf_block is
164  * set, then pcf_reserve is incremented.  pcgs_unblock() takes care
165  * of clearning pcf_block, doing the wakeups, etc.
166  */
167 
168 #define	MAX_PCF_FANOUT NCPU
169 static uint_t pcf_fanout = 1; /* Will get changed at boot time */
170 static uint_t pcf_fanout_mask = 0;
171 
172 struct pcf {
173 	kmutex_t	pcf_lock;	/* protects the structure */
174 	uint_t		pcf_count;	/* page count */
175 	uint_t		pcf_wait;	/* number of waiters */
176 	uint_t		pcf_block;	/* pcgs flag to page_free() */
177 	uint_t		pcf_reserve;	/* pages freed after pcf_block set */
178 	uint_t		pcf_fill[10];	/* to line up on the caches */
179 };
180 
181 /*
182  * PCF_INDEX hash needs to be dynamic (every so often the hash changes where
183  * it will hash the cpu to).  This is done to prevent a drain condition
184  * from happening.  This drain condition will occur when pcf_count decrement
185  * occurs on cpu A and the increment of pcf_count always occurs on cpu B.  An
186  * example of this shows up with device interrupts.  The dma buffer is allocated
187  * by the cpu requesting the IO thus the pcf_count is decremented based on that.
188  * When the memory is returned by the interrupt thread, the pcf_count will be
189  * incremented based on the cpu servicing the interrupt.
190  */
191 static struct pcf pcf[MAX_PCF_FANOUT];
192 #define	PCF_INDEX() ((int)(((long)CPU->cpu_seqid) + \
193 	(randtick() >> 24)) & (pcf_fanout_mask))
194 
195 static int pcf_decrement_bucket(pgcnt_t);
196 static int pcf_decrement_multiple(pgcnt_t *, pgcnt_t, int);
197 
198 kmutex_t	pcgs_lock;		/* serializes page_create_get_ */
199 kmutex_t	pcgs_cagelock;		/* serializes NOSLEEP cage allocs */
200 kmutex_t	pcgs_wait_lock;		/* used for delay in pcgs */
201 static kcondvar_t	pcgs_cv;	/* cv for delay in pcgs */
202 
203 #ifdef VM_STATS
204 
205 /*
206  * No locks, but so what, they are only statistics.
207  */
208 
209 static struct page_tcnt {
210 	int	pc_free_cache;		/* free's into cache list */
211 	int	pc_free_dontneed;	/* free's with dontneed */
212 	int	pc_free_pageout;	/* free's from pageout */
213 	int	pc_free_free;		/* free's into free list */
214 	int	pc_free_pages;		/* free's into large page free list */
215 	int	pc_destroy_pages;	/* large page destroy's */
216 	int	pc_get_cache;		/* get's from cache list */
217 	int	pc_get_free;		/* get's from free list */
218 	int	pc_reclaim;		/* reclaim's */
219 	int	pc_abortfree;		/* abort's of free pages */
220 	int	pc_find_hit;		/* find's that find page */
221 	int	pc_find_miss;		/* find's that don't find page */
222 	int	pc_destroy_free;	/* # of free pages destroyed */
223 #define	PC_HASH_CNT	(4*PAGE_HASHAVELEN)
224 	int	pc_find_hashlen[PC_HASH_CNT+1];
225 	int	pc_addclaim_pages;
226 	int	pc_subclaim_pages;
227 	int	pc_free_replacement_page[2];
228 	int	pc_try_demote_pages[6];
229 	int	pc_demote_pages[2];
230 } pagecnt;
231 
232 uint_t	hashin_count;
233 uint_t	hashin_not_held;
234 uint_t	hashin_already;
235 
236 uint_t	hashout_count;
237 uint_t	hashout_not_held;
238 
239 uint_t	page_create_count;
240 uint_t	page_create_not_enough;
241 uint_t	page_create_not_enough_again;
242 uint_t	page_create_zero;
243 uint_t	page_create_hashout;
244 uint_t	page_create_page_lock_failed;
245 uint_t	page_create_trylock_failed;
246 uint_t	page_create_found_one;
247 uint_t	page_create_hashin_failed;
248 uint_t	page_create_dropped_phm;
249 
250 uint_t	page_create_new;
251 uint_t	page_create_exists;
252 uint_t	page_create_putbacks;
253 uint_t	page_create_overshoot;
254 
255 uint_t	page_reclaim_zero;
256 uint_t	page_reclaim_zero_locked;
257 
258 uint_t	page_rename_exists;
259 uint_t	page_rename_count;
260 
261 uint_t	page_lookup_cnt[20];
262 uint_t	page_lookup_nowait_cnt[10];
263 uint_t	page_find_cnt;
264 uint_t	page_exists_cnt;
265 uint_t	page_exists_forreal_cnt;
266 uint_t	page_lookup_dev_cnt;
267 uint_t	get_cachelist_cnt;
268 uint_t	page_create_cnt[10];
269 uint_t	alloc_pages[9];
270 uint_t	page_exphcontg[19];
271 uint_t  page_create_large_cnt[10];
272 
273 #endif
274 
275 static inline page_t *
page_hash_search(ulong_t index,vnode_t * vnode,u_offset_t off)276 page_hash_search(ulong_t index, vnode_t *vnode, u_offset_t off)
277 {
278 	uint_t mylen = 0;
279 	page_t *page;
280 
281 	for (page = page_hash[index]; page; page = page->p_hash, mylen++)
282 		if (page->p_vnode == vnode && page->p_offset == off)
283 			break;
284 
285 #ifdef	VM_STATS
286 	if (page != NULL)
287 		pagecnt.pc_find_hit++;
288 	else
289 		pagecnt.pc_find_miss++;
290 
291 	pagecnt.pc_find_hashlen[MIN(mylen, PC_HASH_CNT)]++;
292 #endif
293 
294 	return (page);
295 }
296 
297 
298 #ifdef DEBUG
299 #define	MEMSEG_SEARCH_STATS
300 #endif
301 
302 #ifdef MEMSEG_SEARCH_STATS
303 struct memseg_stats {
304     uint_t nsearch;
305     uint_t nlastwon;
306     uint_t nhashwon;
307     uint_t nnotfound;
308 } memseg_stats;
309 
310 #define	MEMSEG_STAT_INCR(v) \
311 	atomic_inc_32(&memseg_stats.v)
312 #else
313 #define	MEMSEG_STAT_INCR(x)
314 #endif
315 
316 struct memseg *memsegs;		/* list of memory segments */
317 
318 /*
319  * /etc/system tunable to control large page allocation hueristic.
320  *
321  * Setting to LPAP_LOCAL will heavily prefer the local lgroup over remote lgroup
322  * for large page allocation requests.  If a large page is not readily
323  * avaliable on the local freelists we will go through additional effort
324  * to create a large page, potentially moving smaller pages around to coalesce
325  * larger pages in the local lgroup.
326  * Default value of LPAP_DEFAULT will go to remote freelists if large pages
327  * are not readily available in the local lgroup.
328  */
329 enum lpap {
330 	LPAP_DEFAULT,	/* default large page allocation policy */
331 	LPAP_LOCAL	/* local large page allocation policy */
332 };
333 
334 enum lpap lpg_alloc_prefer = LPAP_DEFAULT;
335 
336 static void page_init_mem_config(void);
337 static int page_do_hashin(page_t *, vnode_t *, u_offset_t);
338 static void page_do_hashout(page_t *);
339 static void page_capture_init();
340 int page_capture_take_action(page_t *, uint_t, void *);
341 
342 static void page_demote_vp_pages(page_t *);
343 
344 
345 void
pcf_init(void)346 pcf_init(void)
347 {
348 	if (boot_ncpus != -1) {
349 		pcf_fanout = boot_ncpus;
350 	} else {
351 		pcf_fanout = max_ncpus;
352 	}
353 #ifdef sun4v
354 	/*
355 	 * Force at least 4 buckets if possible for sun4v.
356 	 */
357 	pcf_fanout = MAX(pcf_fanout, 4);
358 #endif /* sun4v */
359 
360 	/*
361 	 * Round up to the nearest power of 2.
362 	 */
363 	pcf_fanout = MIN(pcf_fanout, MAX_PCF_FANOUT);
364 	if (!ISP2(pcf_fanout)) {
365 		pcf_fanout = 1 << highbit(pcf_fanout);
366 
367 		if (pcf_fanout > MAX_PCF_FANOUT) {
368 			pcf_fanout = 1 << (highbit(MAX_PCF_FANOUT) - 1);
369 		}
370 	}
371 	pcf_fanout_mask = pcf_fanout - 1;
372 }
373 
374 /*
375  * vm subsystem related initialization
376  */
377 void
vm_init(void)378 vm_init(void)
379 {
380 	boolean_t callb_vm_cpr(void *, int);
381 
382 	(void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm");
383 	page_init_mem_config();
384 	page_retire_init();
385 	vm_usage_init();
386 	page_capture_init();
387 }
388 
389 /*
390  * This function is called at startup and when memory is added or deleted.
391  */
392 void
init_pages_pp_maximum()393 init_pages_pp_maximum()
394 {
395 	static pgcnt_t p_min;
396 	static pgcnt_t pages_pp_maximum_startup;
397 	static pgcnt_t avrmem_delta;
398 	static int init_done;
399 	static int user_set;	/* true if set in /etc/system */
400 
401 	if (init_done == 0) {
402 
403 		/* If the user specified a value, save it */
404 		if (pages_pp_maximum != 0) {
405 			user_set = 1;
406 			pages_pp_maximum_startup = pages_pp_maximum;
407 		}
408 
409 		/*
410 		 * Setting of pages_pp_maximum is based first time
411 		 * on the value of availrmem just after the start-up
412 		 * allocations. To preserve this relationship at run
413 		 * time, use a delta from availrmem_initial.
414 		 */
415 		ASSERT(availrmem_initial >= availrmem);
416 		avrmem_delta = availrmem_initial - availrmem;
417 
418 		/* The allowable floor of pages_pp_maximum */
419 		p_min = tune.t_minarmem + 100;
420 
421 		/* Make sure we don't come through here again. */
422 		init_done = 1;
423 	}
424 	/*
425 	 * Determine pages_pp_maximum, the number of currently available
426 	 * pages (availrmem) that can't be `locked'. If not set by
427 	 * the user, we set it to 4% of the currently available memory
428 	 * plus 4MB.
429 	 * But we also insist that it be greater than tune.t_minarmem;
430 	 * otherwise a process could lock down a lot of memory, get swapped
431 	 * out, and never have enough to get swapped back in.
432 	 */
433 	if (user_set)
434 		pages_pp_maximum = pages_pp_maximum_startup;
435 	else
436 		pages_pp_maximum = ((availrmem_initial - avrmem_delta) / 25)
437 		    + btop(4 * 1024 * 1024);
438 
439 	if (pages_pp_maximum <= p_min) {
440 		pages_pp_maximum = p_min;
441 	}
442 }
443 
444 /*
445  * In the past, we limited the maximum pages that could be gotten to essentially
446  * 1/2 of the total pages on the system. However, this is too conservative for
447  * some cases. For example, if we want to host a large virtual machine which
448  * needs to use a significant portion of the system's memory. In practice,
449  * allowing more than 1/2 of the total pages is fine, but becomes problematic
450  * as we approach or exceed 75% of the pages on the system. Thus, we limit the
451  * maximum to 23/32 of the total pages, which is ~72%.
452  */
453 void
set_max_page_get(pgcnt_t target_total_pages)454 set_max_page_get(pgcnt_t target_total_pages)
455 {
456 	max_page_get = (target_total_pages >> 5) * 23;
457 	ASSERT3U(max_page_get, >, 0);
458 }
459 
460 pgcnt_t
get_max_page_get()461 get_max_page_get()
462 {
463 	return (max_page_get);
464 }
465 
466 static pgcnt_t pending_delete;
467 
468 /*ARGSUSED*/
469 static void
page_mem_config_post_add(void * arg,pgcnt_t delta_pages)470 page_mem_config_post_add(
471 	void *arg,
472 	pgcnt_t delta_pages)
473 {
474 	set_max_page_get(total_pages - pending_delete);
475 	init_pages_pp_maximum();
476 }
477 
478 /*ARGSUSED*/
479 static int
page_mem_config_pre_del(void * arg,pgcnt_t delta_pages)480 page_mem_config_pre_del(
481 	void *arg,
482 	pgcnt_t delta_pages)
483 {
484 	pgcnt_t nv;
485 
486 	nv = atomic_add_long_nv(&pending_delete, (spgcnt_t)delta_pages);
487 	set_max_page_get(total_pages - nv);
488 	return (0);
489 }
490 
491 /*ARGSUSED*/
492 static void
page_mem_config_post_del(void * arg,pgcnt_t delta_pages,int cancelled)493 page_mem_config_post_del(
494 	void *arg,
495 	pgcnt_t delta_pages,
496 	int cancelled)
497 {
498 	pgcnt_t nv;
499 
500 	nv = atomic_add_long_nv(&pending_delete, -(spgcnt_t)delta_pages);
501 	set_max_page_get(total_pages - nv);
502 	if (!cancelled)
503 		init_pages_pp_maximum();
504 }
505 
506 static kphysm_setup_vector_t page_mem_config_vec = {
507 	KPHYSM_SETUP_VECTOR_VERSION,
508 	page_mem_config_post_add,
509 	page_mem_config_pre_del,
510 	page_mem_config_post_del,
511 };
512 
513 static void
page_init_mem_config(void)514 page_init_mem_config(void)
515 {
516 	int ret;
517 
518 	ret = kphysm_setup_func_register(&page_mem_config_vec, (void *)NULL);
519 	ASSERT(ret == 0);
520 }
521 
522 /*
523  * Evenly spread out the PCF counters for large free pages
524  */
525 static void
page_free_large_ctr(pgcnt_t npages)526 page_free_large_ctr(pgcnt_t npages)
527 {
528 	static struct pcf	*p = pcf;
529 	pgcnt_t			lump;
530 
531 	freemem += npages;
532 
533 	lump = roundup(npages, pcf_fanout) / pcf_fanout;
534 
535 	while (npages > 0) {
536 
537 		ASSERT(!p->pcf_block);
538 
539 		if (lump < npages) {
540 			p->pcf_count += (uint_t)lump;
541 			npages -= lump;
542 		} else {
543 			p->pcf_count += (uint_t)npages;
544 			npages = 0;
545 		}
546 
547 		ASSERT(!p->pcf_wait);
548 
549 		if (++p > &pcf[pcf_fanout - 1])
550 			p = pcf;
551 	}
552 
553 	ASSERT(npages == 0);
554 }
555 
556 /*
557  * Add a physical chunk of memory to the system free lists during startup.
558  * Platform specific startup() allocates the memory for the page structs.
559  *
560  * num	- number of page structures
561  * base - page number (pfn) to be associated with the first page.
562  *
563  * Since we are doing this during startup (ie. single threaded), we will
564  * use shortcut routines to avoid any locking overhead while putting all
565  * these pages on the freelists.
566  *
567  * NOTE: Any changes performed to page_free(), must also be performed to
568  *	 add_physmem() since this is how we initialize all page_t's at
569  *	 boot time.
570  */
571 void
add_physmem(page_t * pp,pgcnt_t num,pfn_t pnum)572 add_physmem(
573 	page_t	*pp,
574 	pgcnt_t	num,
575 	pfn_t	pnum)
576 {
577 	page_t	*root = NULL;
578 	uint_t	szc = page_num_pagesizes() - 1;
579 	pgcnt_t	large = page_get_pagecnt(szc);
580 	pgcnt_t	cnt = 0;
581 
582 	TRACE_2(TR_FAC_VM, TR_PAGE_INIT,
583 	    "add_physmem:pp %p num %lu", pp, num);
584 
585 	/*
586 	 * Arbitrarily limit the max page_get request
587 	 * to 1/2 of the page structs we have.
588 	 */
589 	total_pages += num;
590 	set_max_page_get(total_pages);
591 
592 	PLCNT_MODIFY_MAX(pnum, (long)num);
593 
594 	/*
595 	 * The physical space for the pages array
596 	 * representing ram pages has already been
597 	 * allocated.  Here we initialize each lock
598 	 * in the page structure, and put each on
599 	 * the free list
600 	 */
601 	for (; num; pp++, pnum++, num--) {
602 
603 		/*
604 		 * this needs to fill in the page number
605 		 * and do any other arch specific initialization
606 		 */
607 		add_physmem_cb(pp, pnum);
608 
609 		pp->p_lckcnt = 0;
610 		pp->p_cowcnt = 0;
611 		pp->p_slckcnt = 0;
612 
613 		/*
614 		 * Initialize the page lock as unlocked, since nobody
615 		 * can see or access this page yet.
616 		 */
617 		pp->p_selock = 0;
618 
619 		/*
620 		 * Initialize IO lock
621 		 */
622 		page_iolock_init(pp);
623 
624 		/*
625 		 * initialize other fields in the page_t
626 		 */
627 		PP_SETFREE(pp);
628 		page_clr_all_props(pp);
629 		PP_SETAGED(pp);
630 		pp->p_offset = (u_offset_t)-1;
631 		pp->p_next = pp;
632 		pp->p_prev = pp;
633 
634 		/*
635 		 * Simple case: System doesn't support large pages.
636 		 */
637 		if (szc == 0) {
638 			pp->p_szc = 0;
639 			page_free_at_startup(pp);
640 			continue;
641 		}
642 
643 		/*
644 		 * Handle unaligned pages, we collect them up onto
645 		 * the root page until we have a full large page.
646 		 */
647 		if (!IS_P2ALIGNED(pnum, large)) {
648 
649 			/*
650 			 * If not in a large page,
651 			 * just free as small page.
652 			 */
653 			if (root == NULL) {
654 				pp->p_szc = 0;
655 				page_free_at_startup(pp);
656 				continue;
657 			}
658 
659 			/*
660 			 * Link a constituent page into the large page.
661 			 */
662 			pp->p_szc = szc;
663 			page_list_concat(&root, &pp);
664 
665 			/*
666 			 * When large page is fully formed, free it.
667 			 */
668 			if (++cnt == large) {
669 				page_free_large_ctr(cnt);
670 				page_list_add_pages(root, PG_LIST_ISINIT);
671 				root = NULL;
672 				cnt = 0;
673 			}
674 			continue;
675 		}
676 
677 		/*
678 		 * At this point we have a page number which
679 		 * is aligned. We assert that we aren't already
680 		 * in a different large page.
681 		 */
682 		ASSERT(IS_P2ALIGNED(pnum, large));
683 		ASSERT(root == NULL && cnt == 0);
684 
685 		/*
686 		 * If insufficient number of pages left to form
687 		 * a large page, just free the small page.
688 		 */
689 		if (num < large) {
690 			pp->p_szc = 0;
691 			page_free_at_startup(pp);
692 			continue;
693 		}
694 
695 		/*
696 		 * Otherwise start a new large page.
697 		 */
698 		pp->p_szc = szc;
699 		cnt++;
700 		root = pp;
701 	}
702 	ASSERT(root == NULL && cnt == 0);
703 }
704 
705 /*
706  * Find a page representing the specified [vp, offset].
707  * If we find the page but it is intransit coming in,
708  * it will have an "exclusive" lock and we wait for
709  * the i/o to complete.  A page found on the free list
710  * is always reclaimed and then locked.  On success, the page
711  * is locked, its data is valid and it isn't on the free
712  * list, while a NULL is returned if the page doesn't exist.
713  */
714 page_t *
page_lookup(vnode_t * vp,u_offset_t off,se_t se)715 page_lookup(vnode_t *vp, u_offset_t off, se_t se)
716 {
717 	return (page_lookup_create(vp, off, se, NULL, NULL, 0));
718 }
719 
720 /*
721  * Find a page representing the specified [vp, offset].
722  * We either return the one we found or, if passed in,
723  * create one with identity of [vp, offset] of the
724  * pre-allocated page. If we find existing page but it is
725  * intransit coming in, it will have an "exclusive" lock
726  * and we wait for the i/o to complete.  A page found on
727  * the free list is always reclaimed and then locked.
728  * On success, the page is locked, its data is valid and
729  * it isn't on the free list, while a NULL is returned
730  * if the page doesn't exist and newpp is NULL;
731  */
732 page_t *
page_lookup_create(vnode_t * vp,u_offset_t off,se_t se,page_t * newpp,spgcnt_t * nrelocp,int flags)733 page_lookup_create(
734 	vnode_t *vp,
735 	u_offset_t off,
736 	se_t se,
737 	page_t *newpp,
738 	spgcnt_t *nrelocp,
739 	int flags)
740 {
741 	page_t		*pp;
742 	kmutex_t	*phm;
743 	ulong_t		index;
744 	uint_t		hash_locked;
745 	uint_t		es;
746 
747 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
748 	VM_STAT_ADD(page_lookup_cnt[0]);
749 	ASSERT(newpp ? PAGE_EXCL(newpp) : 1);
750 
751 	/*
752 	 * Acquire the appropriate page hash lock since
753 	 * we have to search the hash list.  Pages that
754 	 * hash to this list can't change identity while
755 	 * this lock is held.
756 	 */
757 	hash_locked = 0;
758 	index = PAGE_HASH_FUNC(vp, off);
759 	phm = NULL;
760 top:
761 	pp = page_hash_search(index, vp, off);
762 	if (pp != NULL) {
763 		VM_STAT_ADD(page_lookup_cnt[1]);
764 		es = (newpp != NULL) ? 1 : 0;
765 		es |= flags;
766 		if (!hash_locked) {
767 			VM_STAT_ADD(page_lookup_cnt[2]);
768 			if (!page_try_reclaim_lock(pp, se, es)) {
769 				/*
770 				 * On a miss, acquire the phm.  Then
771 				 * next time, page_lock() will be called,
772 				 * causing a wait if the page is busy.
773 				 * just looping with page_trylock() would
774 				 * get pretty boring.
775 				 */
776 				VM_STAT_ADD(page_lookup_cnt[3]);
777 				phm = PAGE_HASH_MUTEX(index);
778 				mutex_enter(phm);
779 				hash_locked = 1;
780 				goto top;
781 			}
782 		} else {
783 			VM_STAT_ADD(page_lookup_cnt[4]);
784 			if (!page_lock_es(pp, se, phm, P_RECLAIM, es)) {
785 				VM_STAT_ADD(page_lookup_cnt[5]);
786 				goto top;
787 			}
788 		}
789 
790 		/*
791 		 * Since `pp' is locked it can not change identity now.
792 		 * Reconfirm we locked the correct page.
793 		 *
794 		 * Both the p_vnode and p_offset *must* be cast volatile
795 		 * to force a reload of their values: The page_hash_search
796 		 * function will have stuffed p_vnode and p_offset into
797 		 * registers before calling page_trylock(); another thread,
798 		 * actually holding the hash lock, could have changed the
799 		 * page's identity in memory, but our registers would not
800 		 * be changed, fooling the reconfirmation.  If the hash
801 		 * lock was held during the search, the casting would
802 		 * not be needed.
803 		 */
804 		VM_STAT_ADD(page_lookup_cnt[6]);
805 		if (((volatile struct vnode *)(pp->p_vnode) != vp) ||
806 		    ((volatile u_offset_t)(pp->p_offset) != off)) {
807 			VM_STAT_ADD(page_lookup_cnt[7]);
808 			if (hash_locked) {
809 				panic("page_lookup_create: lost page %p",
810 				    (void *)pp);
811 				/*NOTREACHED*/
812 			}
813 			page_unlock(pp);
814 			phm = PAGE_HASH_MUTEX(index);
815 			mutex_enter(phm);
816 			hash_locked = 1;
817 			goto top;
818 		}
819 
820 		/*
821 		 * If page_trylock() was called, then pp may still be on
822 		 * the cachelist (can't be on the free list, it would not
823 		 * have been found in the search).  If it is on the
824 		 * cachelist it must be pulled now. To pull the page from
825 		 * the cachelist, it must be exclusively locked.
826 		 *
827 		 * The other big difference between page_trylock() and
828 		 * page_lock(), is that page_lock() will pull the
829 		 * page from whatever free list (the cache list in this
830 		 * case) the page is on.  If page_trylock() was used
831 		 * above, then we have to do the reclaim ourselves.
832 		 */
833 		if ((!hash_locked) && (PP_ISFREE(pp))) {
834 			ASSERT(PP_ISAGED(pp) == 0);
835 			VM_STAT_ADD(page_lookup_cnt[8]);
836 
837 			/*
838 			 * page_relcaim will insure that we
839 			 * have this page exclusively
840 			 */
841 
842 			if (!page_reclaim(pp, NULL)) {
843 				/*
844 				 * Page_reclaim dropped whatever lock
845 				 * we held.
846 				 */
847 				VM_STAT_ADD(page_lookup_cnt[9]);
848 				phm = PAGE_HASH_MUTEX(index);
849 				mutex_enter(phm);
850 				hash_locked = 1;
851 				goto top;
852 			} else if (se == SE_SHARED && newpp == NULL) {
853 				VM_STAT_ADD(page_lookup_cnt[10]);
854 				page_downgrade(pp);
855 			}
856 		}
857 
858 		if (hash_locked) {
859 			mutex_exit(phm);
860 		}
861 
862 		if (newpp != NULL && pp->p_szc < newpp->p_szc &&
863 		    PAGE_EXCL(pp) && nrelocp != NULL) {
864 			ASSERT(nrelocp != NULL);
865 			(void) page_relocate(&pp, &newpp, 1, 1, nrelocp,
866 			    NULL);
867 			if (*nrelocp > 0) {
868 				VM_STAT_COND_ADD(*nrelocp == 1,
869 				    page_lookup_cnt[11]);
870 				VM_STAT_COND_ADD(*nrelocp > 1,
871 				    page_lookup_cnt[12]);
872 				pp = newpp;
873 				se = SE_EXCL;
874 			} else {
875 				if (se == SE_SHARED) {
876 					page_downgrade(pp);
877 				}
878 				VM_STAT_ADD(page_lookup_cnt[13]);
879 			}
880 		} else if (newpp != NULL && nrelocp != NULL) {
881 			if (PAGE_EXCL(pp) && se == SE_SHARED) {
882 				page_downgrade(pp);
883 			}
884 			VM_STAT_COND_ADD(pp->p_szc < newpp->p_szc,
885 			    page_lookup_cnt[14]);
886 			VM_STAT_COND_ADD(pp->p_szc == newpp->p_szc,
887 			    page_lookup_cnt[15]);
888 			VM_STAT_COND_ADD(pp->p_szc > newpp->p_szc,
889 			    page_lookup_cnt[16]);
890 		} else if (newpp != NULL && PAGE_EXCL(pp)) {
891 			se = SE_EXCL;
892 		}
893 	} else if (!hash_locked) {
894 		VM_STAT_ADD(page_lookup_cnt[17]);
895 		phm = PAGE_HASH_MUTEX(index);
896 		mutex_enter(phm);
897 		hash_locked = 1;
898 		goto top;
899 	} else if (newpp != NULL) {
900 		/*
901 		 * If we have a preallocated page then
902 		 * insert it now and basically behave like
903 		 * page_create.
904 		 */
905 		VM_STAT_ADD(page_lookup_cnt[18]);
906 		/*
907 		 * Since we hold the page hash mutex and
908 		 * just searched for this page, page_hashin
909 		 * had better not fail.  If it does, that
910 		 * means some thread did not follow the
911 		 * page hash mutex rules.  Panic now and
912 		 * get it over with.  As usual, go down
913 		 * holding all the locks.
914 		 */
915 		ASSERT(MUTEX_HELD(phm));
916 		if (!page_hashin(newpp, vp, off, phm)) {
917 			ASSERT(MUTEX_HELD(phm));
918 			panic("page_lookup_create: hashin failed %p %p %llx %p",
919 			    (void *)newpp, (void *)vp, off, (void *)phm);
920 			/*NOTREACHED*/
921 		}
922 		ASSERT(MUTEX_HELD(phm));
923 		mutex_exit(phm);
924 		phm = NULL;
925 		page_set_props(newpp, P_REF);
926 		page_io_lock(newpp);
927 		pp = newpp;
928 		se = SE_EXCL;
929 	} else {
930 		VM_STAT_ADD(page_lookup_cnt[19]);
931 		mutex_exit(phm);
932 	}
933 
934 	ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1);
935 
936 	ASSERT(pp ? ((PP_ISFREE(pp) == 0) && (PP_ISAGED(pp) == 0)) : 1);
937 
938 	return (pp);
939 }
940 
941 /*
942  * Search the hash list for the page representing the
943  * specified [vp, offset] and return it locked.  Skip
944  * free pages and pages that cannot be locked as requested.
945  * Used while attempting to kluster pages.
946  */
947 page_t *
page_lookup_nowait(vnode_t * vp,u_offset_t off,se_t se)948 page_lookup_nowait(vnode_t *vp, u_offset_t off, se_t se)
949 {
950 	page_t		*pp;
951 	kmutex_t	*phm;
952 	ulong_t		index;
953 	uint_t		locked;
954 
955 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
956 	VM_STAT_ADD(page_lookup_nowait_cnt[0]);
957 
958 	index = PAGE_HASH_FUNC(vp, off);
959 	pp = page_hash_search(index, vp, off);
960 	locked = 0;
961 	if (pp == NULL) {
962 top:
963 		VM_STAT_ADD(page_lookup_nowait_cnt[1]);
964 		locked = 1;
965 		phm = PAGE_HASH_MUTEX(index);
966 		mutex_enter(phm);
967 		pp = page_hash_search(index, vp, off);
968 	}
969 
970 	if (pp == NULL || PP_ISFREE(pp)) {
971 		VM_STAT_ADD(page_lookup_nowait_cnt[2]);
972 		pp = NULL;
973 	} else {
974 		if (!page_trylock(pp, se)) {
975 			VM_STAT_ADD(page_lookup_nowait_cnt[3]);
976 			pp = NULL;
977 		} else {
978 			VM_STAT_ADD(page_lookup_nowait_cnt[4]);
979 			/*
980 			 * See the comment in page_lookup()
981 			 */
982 			if (((volatile struct vnode *)(pp->p_vnode) != vp) ||
983 			    ((u_offset_t)(pp->p_offset) != off)) {
984 				VM_STAT_ADD(page_lookup_nowait_cnt[5]);
985 				if (locked) {
986 					panic("page_lookup_nowait %p",
987 					    (void *)pp);
988 					/*NOTREACHED*/
989 				}
990 				page_unlock(pp);
991 				goto top;
992 			}
993 			if (PP_ISFREE(pp)) {
994 				VM_STAT_ADD(page_lookup_nowait_cnt[6]);
995 				page_unlock(pp);
996 				pp = NULL;
997 			}
998 		}
999 	}
1000 	if (locked) {
1001 		VM_STAT_ADD(page_lookup_nowait_cnt[7]);
1002 		mutex_exit(phm);
1003 	}
1004 
1005 	ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1);
1006 
1007 	return (pp);
1008 }
1009 
1010 /*
1011  * Search the hash list for a page with the specified [vp, off]
1012  * that is known to exist and is already locked.  This routine
1013  * is typically used by segment SOFTUNLOCK routines.
1014  */
1015 page_t *
page_find(vnode_t * vp,u_offset_t off)1016 page_find(vnode_t *vp, u_offset_t off)
1017 {
1018 	page_t		*pp;
1019 	kmutex_t	*phm;
1020 	ulong_t		index;
1021 
1022 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
1023 	VM_STAT_ADD(page_find_cnt);
1024 
1025 	index = PAGE_HASH_FUNC(vp, off);
1026 	phm = PAGE_HASH_MUTEX(index);
1027 
1028 	mutex_enter(phm);
1029 	pp = page_hash_search(index, vp, off);
1030 	mutex_exit(phm);
1031 
1032 	ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr);
1033 	return (pp);
1034 }
1035 
1036 /*
1037  * Determine whether a page with the specified [vp, off]
1038  * currently exists in the system.  Obviously this should
1039  * only be considered as a hint since nothing prevents the
1040  * page from disappearing or appearing immediately after
1041  * the return from this routine. Subsequently, we don't
1042  * even bother to lock the list.
1043  */
1044 page_t *
page_exists(vnode_t * vp,u_offset_t off)1045 page_exists(vnode_t *vp, u_offset_t off)
1046 {
1047 	ulong_t		index;
1048 
1049 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
1050 	VM_STAT_ADD(page_exists_cnt);
1051 
1052 	index = PAGE_HASH_FUNC(vp, off);
1053 
1054 	return (page_hash_search(index, vp, off));
1055 }
1056 
1057 /*
1058  * Determine if physically contiguous pages exist for [vp, off] - [vp, off +
1059  * page_size(szc)) range.  if they exist and ppa is not NULL fill ppa array
1060  * with these pages locked SHARED. If necessary reclaim pages from
1061  * freelist. Return 1 if contiguous pages exist and 0 otherwise.
1062  *
1063  * If we fail to lock pages still return 1 if pages exist and contiguous.
1064  * But in this case return value is just a hint. ppa array won't be filled.
1065  * Caller should initialize ppa[0] as NULL to distinguish return value.
1066  *
1067  * Returns 0 if pages don't exist or not physically contiguous.
1068  *
1069  * This routine doesn't work for anonymous(swapfs) pages.
1070  */
1071 int
page_exists_physcontig(vnode_t * vp,u_offset_t off,uint_t szc,page_t * ppa[])1072 page_exists_physcontig(vnode_t *vp, u_offset_t off, uint_t szc, page_t *ppa[])
1073 {
1074 	pgcnt_t pages;
1075 	pfn_t pfn;
1076 	page_t *rootpp;
1077 	pgcnt_t i;
1078 	pgcnt_t j;
1079 	u_offset_t save_off = off;
1080 	ulong_t index;
1081 	kmutex_t *phm;
1082 	page_t *pp;
1083 	uint_t pszc;
1084 	int loopcnt = 0;
1085 
1086 	ASSERT(szc != 0);
1087 	ASSERT(vp != NULL);
1088 	ASSERT(!IS_SWAPFSVP(vp));
1089 	ASSERT(!VN_ISKAS(vp));
1090 
1091 again:
1092 	if (++loopcnt > 3) {
1093 		VM_STAT_ADD(page_exphcontg[0]);
1094 		return (0);
1095 	}
1096 
1097 	index = PAGE_HASH_FUNC(vp, off);
1098 	phm = PAGE_HASH_MUTEX(index);
1099 
1100 	mutex_enter(phm);
1101 	pp = page_hash_search(index, vp, off);
1102 	mutex_exit(phm);
1103 
1104 	VM_STAT_ADD(page_exphcontg[1]);
1105 
1106 	if (pp == NULL) {
1107 		VM_STAT_ADD(page_exphcontg[2]);
1108 		return (0);
1109 	}
1110 
1111 	pages = page_get_pagecnt(szc);
1112 	rootpp = pp;
1113 	pfn = rootpp->p_pagenum;
1114 
1115 	if ((pszc = pp->p_szc) >= szc && ppa != NULL) {
1116 		VM_STAT_ADD(page_exphcontg[3]);
1117 		if (!page_trylock(pp, SE_SHARED)) {
1118 			VM_STAT_ADD(page_exphcontg[4]);
1119 			return (1);
1120 		}
1121 		/*
1122 		 * Also check whether p_pagenum was modified by DR.
1123 		 */
1124 		if (pp->p_szc != pszc || pp->p_vnode != vp ||
1125 		    pp->p_offset != off || pp->p_pagenum != pfn) {
1126 			VM_STAT_ADD(page_exphcontg[5]);
1127 			page_unlock(pp);
1128 			off = save_off;
1129 			goto again;
1130 		}
1131 		/*
1132 		 * szc was non zero and vnode and offset matched after we
1133 		 * locked the page it means it can't become free on us.
1134 		 */
1135 		ASSERT(!PP_ISFREE(pp));
1136 		if (!IS_P2ALIGNED(pfn, pages)) {
1137 			page_unlock(pp);
1138 			return (0);
1139 		}
1140 		ppa[0] = pp;
1141 		pp++;
1142 		off += PAGESIZE;
1143 		pfn++;
1144 		for (i = 1; i < pages; i++, pp++, off += PAGESIZE, pfn++) {
1145 			if (!page_trylock(pp, SE_SHARED)) {
1146 				VM_STAT_ADD(page_exphcontg[6]);
1147 				pp--;
1148 				while (i-- > 0) {
1149 					page_unlock(pp);
1150 					pp--;
1151 				}
1152 				ppa[0] = NULL;
1153 				return (1);
1154 			}
1155 			if (pp->p_szc != pszc) {
1156 				VM_STAT_ADD(page_exphcontg[7]);
1157 				page_unlock(pp);
1158 				pp--;
1159 				while (i-- > 0) {
1160 					page_unlock(pp);
1161 					pp--;
1162 				}
1163 				ppa[0] = NULL;
1164 				off = save_off;
1165 				goto again;
1166 			}
1167 			/*
1168 			 * szc the same as for previous already locked pages
1169 			 * with right identity. Since this page had correct
1170 			 * szc after we locked it can't get freed or destroyed
1171 			 * and therefore must have the expected identity.
1172 			 */
1173 			ASSERT(!PP_ISFREE(pp));
1174 			if (pp->p_vnode != vp ||
1175 			    pp->p_offset != off) {
1176 				panic("page_exists_physcontig: "
1177 				    "large page identity doesn't match");
1178 			}
1179 			ppa[i] = pp;
1180 			ASSERT(pp->p_pagenum == pfn);
1181 		}
1182 		VM_STAT_ADD(page_exphcontg[8]);
1183 		ppa[pages] = NULL;
1184 		return (1);
1185 	} else if (pszc >= szc) {
1186 		VM_STAT_ADD(page_exphcontg[9]);
1187 		if (!IS_P2ALIGNED(pfn, pages)) {
1188 			return (0);
1189 		}
1190 		return (1);
1191 	}
1192 
1193 	if (!IS_P2ALIGNED(pfn, pages)) {
1194 		VM_STAT_ADD(page_exphcontg[10]);
1195 		return (0);
1196 	}
1197 
1198 	if (page_numtomemseg_nolock(pfn) !=
1199 	    page_numtomemseg_nolock(pfn + pages - 1)) {
1200 		VM_STAT_ADD(page_exphcontg[11]);
1201 		return (0);
1202 	}
1203 
1204 	/*
1205 	 * We loop up 4 times across pages to promote page size.
1206 	 * We're extra cautious to promote page size atomically with respect
1207 	 * to everybody else.  But we can probably optimize into 1 loop if
1208 	 * this becomes an issue.
1209 	 */
1210 
1211 	for (i = 0; i < pages; i++, pp++, off += PAGESIZE, pfn++) {
1212 		if (!page_trylock(pp, SE_EXCL)) {
1213 			VM_STAT_ADD(page_exphcontg[12]);
1214 			break;
1215 		}
1216 		/*
1217 		 * Check whether p_pagenum was modified by DR.
1218 		 */
1219 		if (pp->p_pagenum != pfn) {
1220 			page_unlock(pp);
1221 			break;
1222 		}
1223 		if (pp->p_vnode != vp ||
1224 		    pp->p_offset != off) {
1225 			VM_STAT_ADD(page_exphcontg[13]);
1226 			page_unlock(pp);
1227 			break;
1228 		}
1229 		if (pp->p_szc >= szc) {
1230 			ASSERT(i == 0);
1231 			page_unlock(pp);
1232 			off = save_off;
1233 			goto again;
1234 		}
1235 	}
1236 
1237 	if (i != pages) {
1238 		VM_STAT_ADD(page_exphcontg[14]);
1239 		--pp;
1240 		while (i-- > 0) {
1241 			page_unlock(pp);
1242 			--pp;
1243 		}
1244 		return (0);
1245 	}
1246 
1247 	pp = rootpp;
1248 	for (i = 0; i < pages; i++, pp++) {
1249 		if (PP_ISFREE(pp)) {
1250 			VM_STAT_ADD(page_exphcontg[15]);
1251 			ASSERT(!PP_ISAGED(pp));
1252 			ASSERT(pp->p_szc == 0);
1253 			if (!page_reclaim(pp, NULL)) {
1254 				break;
1255 			}
1256 		} else {
1257 			ASSERT(pp->p_szc < szc);
1258 			VM_STAT_ADD(page_exphcontg[16]);
1259 			(void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
1260 		}
1261 	}
1262 	if (i < pages) {
1263 		VM_STAT_ADD(page_exphcontg[17]);
1264 		/*
1265 		 * page_reclaim failed because we were out of memory.
1266 		 * drop the rest of the locks and return because this page
1267 		 * must be already reallocated anyway.
1268 		 */
1269 		pp = rootpp;
1270 		for (j = 0; j < pages; j++, pp++) {
1271 			if (j != i) {
1272 				page_unlock(pp);
1273 			}
1274 		}
1275 		return (0);
1276 	}
1277 
1278 	off = save_off;
1279 	pp = rootpp;
1280 	for (i = 0; i < pages; i++, pp++, off += PAGESIZE) {
1281 		ASSERT(PAGE_EXCL(pp));
1282 		ASSERT(!PP_ISFREE(pp));
1283 		ASSERT(!hat_page_is_mapped(pp));
1284 		ASSERT(pp->p_vnode == vp);
1285 		ASSERT(pp->p_offset == off);
1286 		pp->p_szc = szc;
1287 	}
1288 	pp = rootpp;
1289 	for (i = 0; i < pages; i++, pp++) {
1290 		if (ppa == NULL) {
1291 			page_unlock(pp);
1292 		} else {
1293 			ppa[i] = pp;
1294 			page_downgrade(ppa[i]);
1295 		}
1296 	}
1297 	if (ppa != NULL) {
1298 		ppa[pages] = NULL;
1299 	}
1300 	VM_STAT_ADD(page_exphcontg[18]);
1301 	ASSERT(vp->v_pages != NULL);
1302 	return (1);
1303 }
1304 
1305 /*
1306  * Determine whether a page with the specified [vp, off]
1307  * currently exists in the system and if so return its
1308  * size code. Obviously this should only be considered as
1309  * a hint since nothing prevents the page from disappearing
1310  * or appearing immediately after the return from this routine.
1311  */
1312 int
page_exists_forreal(vnode_t * vp,u_offset_t off,uint_t * szc)1313 page_exists_forreal(vnode_t *vp, u_offset_t off, uint_t *szc)
1314 {
1315 	page_t		*pp;
1316 	kmutex_t	*phm;
1317 	ulong_t		index;
1318 	int		rc = 0;
1319 
1320 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
1321 	ASSERT(szc != NULL);
1322 	VM_STAT_ADD(page_exists_forreal_cnt);
1323 
1324 	index = PAGE_HASH_FUNC(vp, off);
1325 	phm = PAGE_HASH_MUTEX(index);
1326 
1327 	mutex_enter(phm);
1328 	pp = page_hash_search(index, vp, off);
1329 	if (pp != NULL) {
1330 		*szc = pp->p_szc;
1331 		rc = 1;
1332 	}
1333 	mutex_exit(phm);
1334 	return (rc);
1335 }
1336 
1337 /* wakeup threads waiting for pages in page_create_get_something() */
1338 void
wakeup_pcgs(void)1339 wakeup_pcgs(void)
1340 {
1341 	if (!CV_HAS_WAITERS(&pcgs_cv))
1342 		return;
1343 	cv_broadcast(&pcgs_cv);
1344 }
1345 
1346 /*
1347  * 'freemem' is used all over the kernel as an indication of how many
1348  * pages are free (either on the cache list or on the free page list)
1349  * in the system.  In very few places is a really accurate 'freemem'
1350  * needed.  To avoid contention of the lock protecting a the
1351  * single freemem, it was spread out into NCPU buckets.  Set_freemem
1352  * sets freemem to the total of all NCPU buckets.  It is called from
1353  * clock() on each TICK.
1354  */
1355 void
set_freemem(void)1356 set_freemem(void)
1357 {
1358 	struct pcf	*p;
1359 	ulong_t		t;
1360 	uint_t		i;
1361 
1362 	t = 0;
1363 	p = pcf;
1364 	for (i = 0;  i < pcf_fanout; i++) {
1365 		t += p->pcf_count;
1366 		p++;
1367 	}
1368 	freemem = t;
1369 
1370 	/*
1371 	 * Don't worry about grabbing mutex.  It's not that
1372 	 * critical if we miss a tick or two.  This is
1373 	 * where we wakeup possible delayers in
1374 	 * page_create_get_something().
1375 	 */
1376 	wakeup_pcgs();
1377 }
1378 
1379 ulong_t
get_freemem()1380 get_freemem()
1381 {
1382 	struct pcf	*p;
1383 	ulong_t		t;
1384 	uint_t		i;
1385 
1386 	t = 0;
1387 	p = pcf;
1388 	for (i = 0; i < pcf_fanout; i++) {
1389 		t += p->pcf_count;
1390 		p++;
1391 	}
1392 	/*
1393 	 * We just calculated it, might as well set it.
1394 	 */
1395 	freemem = t;
1396 	return (t);
1397 }
1398 
1399 /*
1400  * Acquire all of the page cache & free (pcf) locks.
1401  */
1402 void
pcf_acquire_all()1403 pcf_acquire_all()
1404 {
1405 	struct pcf	*p;
1406 	uint_t		i;
1407 
1408 	p = pcf;
1409 	for (i = 0; i < pcf_fanout; i++) {
1410 		mutex_enter(&p->pcf_lock);
1411 		p++;
1412 	}
1413 }
1414 
1415 /*
1416  * Release all the pcf_locks.
1417  */
1418 void
pcf_release_all()1419 pcf_release_all()
1420 {
1421 	struct pcf	*p;
1422 	uint_t		i;
1423 
1424 	p = pcf;
1425 	for (i = 0; i < pcf_fanout; i++) {
1426 		mutex_exit(&p->pcf_lock);
1427 		p++;
1428 	}
1429 }
1430 
1431 /*
1432  * Inform the VM system that we need some pages freed up.
1433  * Calls must be symmetric, e.g.:
1434  *
1435  *	page_needfree(100);
1436  *	wait a bit;
1437  *	page_needfree(-100);
1438  */
1439 void
page_needfree(spgcnt_t npages)1440 page_needfree(spgcnt_t npages)
1441 {
1442 	mutex_enter(&new_freemem_lock);
1443 	needfree += npages;
1444 	mutex_exit(&new_freemem_lock);
1445 }
1446 
1447 /*
1448  * Throttle for page_create(): try to prevent freemem from dropping
1449  * below throttlefree.  We can't provide a 100% guarantee because
1450  * KM_NOSLEEP allocations, page_reclaim(), and various other things
1451  * nibble away at the freelist.  However, we can block all PG_WAIT
1452  * allocations until memory becomes available.  The motivation is
1453  * that several things can fall apart when there's no free memory:
1454  *
1455  * (1) If pageout() needs memory to push a page, the system deadlocks.
1456  *
1457  * (2) By (broken) specification, timeout(9F) can neither fail nor
1458  *     block, so it has no choice but to panic the system if it
1459  *     cannot allocate a callout structure.
1460  *
1461  * (3) Like timeout(), ddi_set_callback() cannot fail and cannot block;
1462  *     it panics if it cannot allocate a callback structure.
1463  *
1464  * (4) Untold numbers of third-party drivers have not yet been hardened
1465  *     against KM_NOSLEEP and/or allocb() failures; they simply assume
1466  *     success and panic the system with a data fault on failure.
1467  *     (The long-term solution to this particular problem is to ship
1468  *     hostile fault-injecting DEBUG kernels with the DDK.)
1469  *
1470  * It is theoretically impossible to guarantee success of non-blocking
1471  * allocations, but in practice, this throttle is very hard to break.
1472  */
1473 static int
page_create_throttle(pgcnt_t npages,int flags)1474 page_create_throttle(pgcnt_t npages, int flags)
1475 {
1476 	ulong_t	fm;
1477 	uint_t	i;
1478 	pgcnt_t tf;	/* effective value of throttlefree */
1479 
1480 	/*
1481 	 * Normal priority allocations.
1482 	 */
1483 	if ((flags & (PG_WAIT | PG_NORMALPRI)) == PG_NORMALPRI) {
1484 		ASSERT(!(flags & (PG_PANIC | PG_PUSHPAGE)));
1485 		return (freemem >= npages + throttlefree);
1486 	}
1487 
1488 	/*
1489 	 * Never deny pages when:
1490 	 * - it's a thread that cannot block [NOMEMWAIT()]
1491 	 * - the allocation cannot block and must not fail
1492 	 * - the allocation cannot block and is pageout dispensated
1493 	 */
1494 	if (NOMEMWAIT() ||
1495 	    ((flags & (PG_WAIT | PG_PANIC)) == PG_PANIC) ||
1496 	    ((flags & (PG_WAIT | PG_PUSHPAGE)) == PG_PUSHPAGE))
1497 		return (1);
1498 
1499 	/*
1500 	 * If the allocation can't block, we look favorably upon it
1501 	 * unless we're below pageout_reserve.  In that case we fail
1502 	 * the allocation because we want to make sure there are a few
1503 	 * pages available for pageout.
1504 	 */
1505 	if ((flags & PG_WAIT) == 0)
1506 		return (freemem >= npages + pageout_reserve);
1507 
1508 	/* Calculate the effective throttlefree value */
1509 	tf = throttlefree -
1510 	    ((flags & PG_PUSHPAGE) ? pageout_reserve : 0);
1511 
1512 	cv_signal(&proc_pageout->p_cv);
1513 
1514 	for (;;) {
1515 		fm = 0;
1516 		pcf_acquire_all();
1517 		mutex_enter(&new_freemem_lock);
1518 		for (i = 0; i < pcf_fanout; i++) {
1519 			fm += pcf[i].pcf_count;
1520 			pcf[i].pcf_wait++;
1521 			mutex_exit(&pcf[i].pcf_lock);
1522 		}
1523 		freemem = fm;
1524 		if (freemem >= npages + tf) {
1525 			mutex_exit(&new_freemem_lock);
1526 			break;
1527 		}
1528 		needfree += npages;
1529 		freemem_wait++;
1530 		cv_wait(&freemem_cv, &new_freemem_lock);
1531 		freemem_wait--;
1532 		needfree -= npages;
1533 		mutex_exit(&new_freemem_lock);
1534 	}
1535 	return (1);
1536 }
1537 
1538 /*
1539  * page_create_wait() is called to either coalesce pages from the
1540  * different pcf buckets or to wait because there simply are not
1541  * enough pages to satisfy the caller's request.
1542  *
1543  * Sadly, this is called from platform/vm/vm_machdep.c
1544  */
1545 int
page_create_wait(pgcnt_t npages,uint_t flags)1546 page_create_wait(pgcnt_t npages, uint_t flags)
1547 {
1548 	pgcnt_t		total;
1549 	uint_t		i;
1550 	struct pcf	*p;
1551 
1552 	/*
1553 	 * Wait until there are enough free pages to satisfy our
1554 	 * entire request.
1555 	 * We set needfree += npages before prodding pageout, to make sure
1556 	 * it does real work when npages > lotsfree > freemem.
1557 	 */
1558 	VM_STAT_ADD(page_create_not_enough);
1559 
1560 	ASSERT(!kcage_on ? !(flags & PG_NORELOC) : 1);
1561 checkagain:
1562 	if ((flags & PG_NORELOC) &&
1563 	    kcage_freemem < kcage_throttlefree + npages)
1564 		(void) kcage_create_throttle(npages, flags);
1565 
1566 	if (freemem < npages + throttlefree)
1567 		if (!page_create_throttle(npages, flags))
1568 			return (0);
1569 
1570 	if (pcf_decrement_bucket(npages) ||
1571 	    pcf_decrement_multiple(&total, npages, 0))
1572 		return (1);
1573 
1574 	/*
1575 	 * All of the pcf locks are held, there are not enough pages
1576 	 * to satisfy the request (npages < total).
1577 	 * Be sure to acquire the new_freemem_lock before dropping
1578 	 * the pcf locks.  This prevents dropping wakeups in page_free().
1579 	 * The order is always pcf_lock then new_freemem_lock.
1580 	 *
1581 	 * Since we hold all the pcf locks, it is a good time to set freemem.
1582 	 *
1583 	 * If the caller does not want to wait, return now.
1584 	 * Else turn the pageout daemon loose to find something
1585 	 * and wait till it does.
1586 	 *
1587 	 */
1588 	freemem = total;
1589 
1590 	if ((flags & PG_WAIT) == 0) {
1591 		pcf_release_all();
1592 
1593 		TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_NOMEM,
1594 		"page_create_nomem:npages %ld freemem %ld", npages, freemem);
1595 		return (0);
1596 	}
1597 
1598 	ASSERT(proc_pageout != NULL);
1599 	cv_signal(&proc_pageout->p_cv);
1600 
1601 	TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_START,
1602 	    "page_create_sleep_start: freemem %ld needfree %ld",
1603 	    freemem, needfree);
1604 
1605 	/*
1606 	 * We are going to wait.
1607 	 * We currently hold all of the pcf_locks,
1608 	 * get the new_freemem_lock (it protects freemem_wait),
1609 	 * before dropping the pcf_locks.
1610 	 */
1611 	mutex_enter(&new_freemem_lock);
1612 
1613 	p = pcf;
1614 	for (i = 0; i < pcf_fanout; i++) {
1615 		p->pcf_wait++;
1616 		mutex_exit(&p->pcf_lock);
1617 		p++;
1618 	}
1619 
1620 	needfree += npages;
1621 	freemem_wait++;
1622 
1623 	cv_wait(&freemem_cv, &new_freemem_lock);
1624 
1625 	freemem_wait--;
1626 	needfree -= npages;
1627 
1628 	mutex_exit(&new_freemem_lock);
1629 
1630 	TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_END,
1631 	    "page_create_sleep_end: freemem %ld needfree %ld",
1632 	    freemem, needfree);
1633 
1634 	VM_STAT_ADD(page_create_not_enough_again);
1635 	goto checkagain;
1636 }
1637 /*
1638  * A routine to do the opposite of page_create_wait().
1639  */
1640 void
page_create_putback(spgcnt_t npages)1641 page_create_putback(spgcnt_t npages)
1642 {
1643 	struct pcf	*p;
1644 	pgcnt_t		lump;
1645 	uint_t		*which;
1646 
1647 	/*
1648 	 * When a contiguous lump is broken up, we have to
1649 	 * deal with lots of pages (min 64) so lets spread
1650 	 * the wealth around.
1651 	 */
1652 	lump = roundup(npages, pcf_fanout) / pcf_fanout;
1653 	freemem += npages;
1654 
1655 	for (p = pcf; (npages > 0) && (p < &pcf[pcf_fanout]); p++) {
1656 		which = &p->pcf_count;
1657 
1658 		mutex_enter(&p->pcf_lock);
1659 
1660 		if (p->pcf_block) {
1661 			which = &p->pcf_reserve;
1662 		}
1663 
1664 		if (lump < npages) {
1665 			*which += (uint_t)lump;
1666 			npages -= lump;
1667 		} else {
1668 			*which += (uint_t)npages;
1669 			npages = 0;
1670 		}
1671 
1672 		if (p->pcf_wait) {
1673 			mutex_enter(&new_freemem_lock);
1674 			/*
1675 			 * Check to see if some other thread
1676 			 * is actually waiting.  Another bucket
1677 			 * may have woken it up by now.  If there
1678 			 * are no waiters, then set our pcf_wait
1679 			 * count to zero to avoid coming in here
1680 			 * next time.
1681 			 */
1682 			if (freemem_wait) {
1683 				if (npages > 1) {
1684 					cv_broadcast(&freemem_cv);
1685 				} else {
1686 					cv_signal(&freemem_cv);
1687 				}
1688 				p->pcf_wait--;
1689 			} else {
1690 				p->pcf_wait = 0;
1691 			}
1692 			mutex_exit(&new_freemem_lock);
1693 		}
1694 		mutex_exit(&p->pcf_lock);
1695 	}
1696 	ASSERT(npages == 0);
1697 }
1698 
1699 /*
1700  * A helper routine for page_create_get_something.
1701  * The indenting got to deep down there.
1702  * Unblock the pcf counters.  Any pages freed after
1703  * pcf_block got set are moved to pcf_count and
1704  * wakeups (cv_broadcast() or cv_signal()) are done as needed.
1705  */
1706 static void
pcgs_unblock(void)1707 pcgs_unblock(void)
1708 {
1709 	int		i;
1710 	struct pcf	*p;
1711 
1712 	/* Update freemem while we're here. */
1713 	freemem = 0;
1714 	p = pcf;
1715 	for (i = 0; i < pcf_fanout; i++) {
1716 		mutex_enter(&p->pcf_lock);
1717 		ASSERT(p->pcf_count == 0);
1718 		p->pcf_count = p->pcf_reserve;
1719 		p->pcf_block = 0;
1720 		freemem += p->pcf_count;
1721 		if (p->pcf_wait) {
1722 			mutex_enter(&new_freemem_lock);
1723 			if (freemem_wait) {
1724 				if (p->pcf_reserve > 1) {
1725 					cv_broadcast(&freemem_cv);
1726 					p->pcf_wait = 0;
1727 				} else {
1728 					cv_signal(&freemem_cv);
1729 					p->pcf_wait--;
1730 				}
1731 			} else {
1732 				p->pcf_wait = 0;
1733 			}
1734 			mutex_exit(&new_freemem_lock);
1735 		}
1736 		p->pcf_reserve = 0;
1737 		mutex_exit(&p->pcf_lock);
1738 		p++;
1739 	}
1740 }
1741 
1742 /*
1743  * Called from page_create_va() when both the cache and free lists
1744  * have been checked once.
1745  *
1746  * Either returns a page or panics since the accounting was done
1747  * way before we got here.
1748  *
1749  * We don't come here often, so leave the accounting on permanently.
1750  */
1751 
1752 #define	MAX_PCGS	100
1753 
1754 #ifdef	DEBUG
1755 #define	PCGS_TRIES	100
1756 #else	/* DEBUG */
1757 #define	PCGS_TRIES	10
1758 #endif	/* DEBUG */
1759 
1760 #ifdef	VM_STATS
1761 uint_t	pcgs_counts[PCGS_TRIES];
1762 uint_t	pcgs_too_many;
1763 uint_t	pcgs_entered;
1764 uint_t	pcgs_entered_noreloc;
1765 uint_t	pcgs_locked;
1766 uint_t	pcgs_cagelocked;
1767 #endif	/* VM_STATS */
1768 
1769 static page_t *
page_create_get_something(vnode_t * vp,u_offset_t off,struct seg * seg,caddr_t vaddr,uint_t flags)1770 page_create_get_something(vnode_t *vp, u_offset_t off, struct seg *seg,
1771     caddr_t vaddr, uint_t flags)
1772 {
1773 	uint_t		count;
1774 	page_t		*pp;
1775 	uint_t		locked, i;
1776 	struct	pcf	*p;
1777 	lgrp_t		*lgrp;
1778 	int		cagelocked = 0;
1779 
1780 	VM_STAT_ADD(pcgs_entered);
1781 
1782 	/*
1783 	 * Tap any reserve freelists: if we fail now, we'll die
1784 	 * since the page(s) we're looking for have already been
1785 	 * accounted for.
1786 	 */
1787 	flags |= PG_PANIC;
1788 
1789 	if ((flags & PG_NORELOC) != 0) {
1790 		VM_STAT_ADD(pcgs_entered_noreloc);
1791 		/*
1792 		 * Requests for free pages from critical threads
1793 		 * such as pageout still won't throttle here, but
1794 		 * we must try again, to give the cageout thread
1795 		 * another chance to catch up. Since we already
1796 		 * accounted for the pages, we had better get them
1797 		 * this time.
1798 		 *
1799 		 * N.B. All non-critical threads acquire the pcgs_cagelock
1800 		 * to serialize access to the freelists. This implements a
1801 		 * turnstile-type synchornization to avoid starvation of
1802 		 * critical requests for PG_NORELOC memory by non-critical
1803 		 * threads: all non-critical threads must acquire a 'ticket'
1804 		 * before passing through, which entails making sure
1805 		 * kcage_freemem won't fall below minfree prior to grabbing
1806 		 * pages from the freelists.
1807 		 */
1808 		if (kcage_create_throttle(1, flags) == KCT_NONCRIT) {
1809 			mutex_enter(&pcgs_cagelock);
1810 			cagelocked = 1;
1811 			VM_STAT_ADD(pcgs_cagelocked);
1812 		}
1813 	}
1814 
1815 	/*
1816 	 * Time to get serious.
1817 	 * We failed to get a `correctly colored' page from both the
1818 	 * free and cache lists.
1819 	 * We escalate in stage.
1820 	 *
1821 	 * First try both lists without worring about color.
1822 	 *
1823 	 * Then, grab all page accounting locks (ie. pcf[]) and
1824 	 * steal any pages that they have and set the pcf_block flag to
1825 	 * stop deletions from the lists.  This will help because
1826 	 * a page can get added to the free list while we are looking
1827 	 * at the cache list, then another page could be added to the cache
1828 	 * list allowing the page on the free list to be removed as we
1829 	 * move from looking at the cache list to the free list. This
1830 	 * could happen over and over. We would never find the page
1831 	 * we have accounted for.
1832 	 *
1833 	 * Noreloc pages are a subset of the global (relocatable) page pool.
1834 	 * They are not tracked separately in the pcf bins, so it is
1835 	 * impossible to know when doing pcf accounting if the available
1836 	 * page(s) are noreloc pages or not. When looking for a noreloc page
1837 	 * it is quite easy to end up here even if the global (relocatable)
1838 	 * page pool has plenty of free pages but the noreloc pool is empty.
1839 	 *
1840 	 * When the noreloc pool is empty (or low), additional noreloc pages
1841 	 * are created by converting pages from the global page pool. This
1842 	 * process will stall during pcf accounting if the pcf bins are
1843 	 * already locked. Such is the case when a noreloc allocation is
1844 	 * looping here in page_create_get_something waiting for more noreloc
1845 	 * pages to appear.
1846 	 *
1847 	 * Short of adding a new field to the pcf bins to accurately track
1848 	 * the number of free noreloc pages, we instead do not grab the
1849 	 * pcgs_lock, do not set the pcf blocks and do not timeout when
1850 	 * allocating a noreloc page. This allows noreloc allocations to
1851 	 * loop without blocking global page pool allocations.
1852 	 *
1853 	 * NOTE: the behaviour of page_create_get_something has not changed
1854 	 * for the case of global page pool allocations.
1855 	 */
1856 
1857 	flags &= ~PG_MATCH_COLOR;
1858 	locked = 0;
1859 #if defined(__i386) || defined(__amd64)
1860 	flags = page_create_update_flags_x86(flags);
1861 #endif
1862 
1863 	lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE);
1864 
1865 	for (count = 0; kcage_on || count < MAX_PCGS; count++) {
1866 		pp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE,
1867 		    flags, lgrp);
1868 		if (pp == NULL) {
1869 			pp = page_get_cachelist(vp, off, seg, vaddr,
1870 			    flags, lgrp);
1871 		}
1872 		if (pp == NULL) {
1873 			/*
1874 			 * Serialize.  Don't fight with other pcgs().
1875 			 */
1876 			if (!locked && (!kcage_on || !(flags & PG_NORELOC))) {
1877 				mutex_enter(&pcgs_lock);
1878 				VM_STAT_ADD(pcgs_locked);
1879 				locked = 1;
1880 				p = pcf;
1881 				for (i = 0; i < pcf_fanout; i++) {
1882 					mutex_enter(&p->pcf_lock);
1883 					ASSERT(p->pcf_block == 0);
1884 					p->pcf_block = 1;
1885 					p->pcf_reserve = p->pcf_count;
1886 					p->pcf_count = 0;
1887 					mutex_exit(&p->pcf_lock);
1888 					p++;
1889 				}
1890 				freemem = 0;
1891 			}
1892 
1893 			if (count) {
1894 				/*
1895 				 * Since page_free() puts pages on
1896 				 * a list then accounts for it, we
1897 				 * just have to wait for page_free()
1898 				 * to unlock any page it was working
1899 				 * with. The page_lock()-page_reclaim()
1900 				 * path falls in the same boat.
1901 				 *
1902 				 * We don't need to check on the
1903 				 * PG_WAIT flag, we have already
1904 				 * accounted for the page we are
1905 				 * looking for in page_create_va().
1906 				 *
1907 				 * We just wait a moment to let any
1908 				 * locked pages on the lists free up,
1909 				 * then continue around and try again.
1910 				 *
1911 				 * Will be awakened by set_freemem().
1912 				 */
1913 				mutex_enter(&pcgs_wait_lock);
1914 				cv_wait(&pcgs_cv, &pcgs_wait_lock);
1915 				mutex_exit(&pcgs_wait_lock);
1916 			}
1917 		} else {
1918 #ifdef VM_STATS
1919 			if (count >= PCGS_TRIES) {
1920 				VM_STAT_ADD(pcgs_too_many);
1921 			} else {
1922 				VM_STAT_ADD(pcgs_counts[count]);
1923 			}
1924 #endif
1925 			if (locked) {
1926 				pcgs_unblock();
1927 				mutex_exit(&pcgs_lock);
1928 			}
1929 			if (cagelocked)
1930 				mutex_exit(&pcgs_cagelock);
1931 			return (pp);
1932 		}
1933 	}
1934 	/*
1935 	 * we go down holding the pcf locks.
1936 	 */
1937 	panic("no %spage found %d",
1938 	    ((flags & PG_NORELOC) ? "non-reloc " : ""), count);
1939 	/*NOTREACHED*/
1940 }
1941 
1942 /*
1943  * Create enough pages for "bytes" worth of data starting at
1944  * "off" in "vp".
1945  *
1946  *	Where flag must be one of:
1947  *
1948  *		PG_EXCL:	Exclusive create (fail if any page already
1949  *				exists in the page cache) which does not
1950  *				wait for memory to become available.
1951  *
1952  *		PG_WAIT:	Non-exclusive create which can wait for
1953  *				memory to become available.
1954  *
1955  *		PG_PHYSCONTIG:	Allocate physically contiguous pages.
1956  *				(Not Supported)
1957  *
1958  * A doubly linked list of pages is returned to the caller.  Each page
1959  * on the list has the "exclusive" (p_selock) lock and "iolock" (p_iolock)
1960  * lock.
1961  *
1962  * Unable to change the parameters to page_create() in a minor release,
1963  * we renamed page_create() to page_create_va(), changed all known calls
1964  * from page_create() to page_create_va(), and created this wrapper.
1965  *
1966  * Upon a major release, we should break compatibility by deleting this
1967  * wrapper, and replacing all the strings "page_create_va", with "page_create".
1968  *
1969  * NOTE: There is a copy of this interface as page_create_io() in
1970  *	 i86/vm/vm_machdep.c. Any bugs fixed here should be applied
1971  *	 there.
1972  */
1973 page_t *
page_create(vnode_t * vp,u_offset_t off,size_t bytes,uint_t flags)1974 page_create(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags)
1975 {
1976 	caddr_t random_vaddr;
1977 	struct seg kseg;
1978 
1979 #ifdef DEBUG
1980 	cmn_err(CE_WARN, "Using deprecated interface page_create: caller %p",
1981 	    (void *)caller());
1982 #endif
1983 
1984 	random_vaddr = (caddr_t)(((uintptr_t)vp >> 7) ^
1985 	    (uintptr_t)(off >> PAGESHIFT));
1986 	kseg.s_as = &kas;
1987 
1988 	return (page_create_va(vp, off, bytes, flags, &kseg, random_vaddr));
1989 }
1990 
1991 #ifdef DEBUG
1992 uint32_t pg_alloc_pgs_mtbf = 0;
1993 #endif
1994 
1995 /*
1996  * Used for large page support. It will attempt to allocate
1997  * a large page(s) off the freelist.
1998  *
1999  * Returns non zero on failure.
2000  */
2001 int
page_alloc_pages(struct vnode * vp,struct seg * seg,caddr_t addr,page_t ** basepp,page_t * ppa[],uint_t szc,int anypgsz,int pgflags)2002 page_alloc_pages(struct vnode *vp, struct seg *seg, caddr_t addr,
2003     page_t **basepp, page_t *ppa[], uint_t szc, int anypgsz, int pgflags)
2004 {
2005 	pgcnt_t		npgs, curnpgs, totpgs;
2006 	size_t		pgsz;
2007 	page_t		*pplist = NULL, *pp;
2008 	int		err = 0;
2009 	lgrp_t		*lgrp;
2010 
2011 	ASSERT(szc != 0 && szc <= (page_num_pagesizes() - 1));
2012 	ASSERT(pgflags == 0 || pgflags == PG_LOCAL);
2013 
2014 	/*
2015 	 * Check if system heavily prefers local large pages over remote
2016 	 * on systems with multiple lgroups.
2017 	 */
2018 	if (lpg_alloc_prefer == LPAP_LOCAL && nlgrps > 1) {
2019 		pgflags = PG_LOCAL;
2020 	}
2021 
2022 	VM_STAT_ADD(alloc_pages[0]);
2023 
2024 #ifdef DEBUG
2025 	if (pg_alloc_pgs_mtbf && !(gethrtime() % pg_alloc_pgs_mtbf)) {
2026 		return (ENOMEM);
2027 	}
2028 #endif
2029 
2030 	/*
2031 	 * One must be NULL but not both.
2032 	 * And one must be non NULL but not both.
2033 	 */
2034 	ASSERT(basepp != NULL || ppa != NULL);
2035 	ASSERT(basepp == NULL || ppa == NULL);
2036 
2037 #if defined(__i386) || defined(__amd64)
2038 	while (page_chk_freelist(szc) == 0) {
2039 		VM_STAT_ADD(alloc_pages[8]);
2040 		if (anypgsz == 0 || --szc == 0)
2041 			return (ENOMEM);
2042 	}
2043 #endif
2044 
2045 	pgsz = page_get_pagesize(szc);
2046 	totpgs = curnpgs = npgs = pgsz >> PAGESHIFT;
2047 
2048 	ASSERT(((uintptr_t)addr & (pgsz - 1)) == 0);
2049 
2050 	(void) page_create_wait(npgs, PG_WAIT);
2051 
2052 	while (npgs && szc) {
2053 		lgrp = lgrp_mem_choose(seg, addr, pgsz);
2054 		if (pgflags == PG_LOCAL) {
2055 			pp = page_get_freelist(vp, 0, seg, addr, pgsz,
2056 			    pgflags, lgrp);
2057 			if (pp == NULL) {
2058 				pp = page_get_freelist(vp, 0, seg, addr, pgsz,
2059 				    0, lgrp);
2060 			}
2061 		} else {
2062 			pp = page_get_freelist(vp, 0, seg, addr, pgsz,
2063 			    0, lgrp);
2064 		}
2065 		if (pp != NULL) {
2066 			VM_STAT_ADD(alloc_pages[1]);
2067 			page_list_concat(&pplist, &pp);
2068 			ASSERT(npgs >= curnpgs);
2069 			npgs -= curnpgs;
2070 		} else if (anypgsz) {
2071 			VM_STAT_ADD(alloc_pages[2]);
2072 			szc--;
2073 			pgsz = page_get_pagesize(szc);
2074 			curnpgs = pgsz >> PAGESHIFT;
2075 		} else {
2076 			VM_STAT_ADD(alloc_pages[3]);
2077 			ASSERT(npgs == totpgs);
2078 			page_create_putback(npgs);
2079 			return (ENOMEM);
2080 		}
2081 	}
2082 	if (szc == 0) {
2083 		VM_STAT_ADD(alloc_pages[4]);
2084 		ASSERT(npgs != 0);
2085 		page_create_putback(npgs);
2086 		err = ENOMEM;
2087 	} else if (basepp != NULL) {
2088 		ASSERT(npgs == 0);
2089 		ASSERT(ppa == NULL);
2090 		*basepp = pplist;
2091 	}
2092 
2093 	npgs = totpgs - npgs;
2094 	pp = pplist;
2095 
2096 	/*
2097 	 * Clear the free and age bits. Also if we were passed in a ppa then
2098 	 * fill it in with all the constituent pages from the large page. But
2099 	 * if we failed to allocate all the pages just free what we got.
2100 	 */
2101 	while (npgs != 0) {
2102 		ASSERT(PP_ISFREE(pp));
2103 		ASSERT(PP_ISAGED(pp));
2104 		if (ppa != NULL || err != 0) {
2105 			if (err == 0) {
2106 				VM_STAT_ADD(alloc_pages[5]);
2107 				PP_CLRFREE(pp);
2108 				PP_CLRAGED(pp);
2109 				page_sub(&pplist, pp);
2110 				*ppa++ = pp;
2111 				npgs--;
2112 			} else {
2113 				VM_STAT_ADD(alloc_pages[6]);
2114 				ASSERT(pp->p_szc != 0);
2115 				curnpgs = page_get_pagecnt(pp->p_szc);
2116 				page_list_break(&pp, &pplist, curnpgs);
2117 				page_list_add_pages(pp, 0);
2118 				page_create_putback(curnpgs);
2119 				ASSERT(npgs >= curnpgs);
2120 				npgs -= curnpgs;
2121 			}
2122 			pp = pplist;
2123 		} else {
2124 			VM_STAT_ADD(alloc_pages[7]);
2125 			PP_CLRFREE(pp);
2126 			PP_CLRAGED(pp);
2127 			pp = pp->p_next;
2128 			npgs--;
2129 		}
2130 	}
2131 	return (err);
2132 }
2133 
2134 /*
2135  * Get a single large page off of the freelists, and set it up for use.
2136  * Number of bytes requested must be a supported page size.
2137  *
2138  * Note that this call may fail even if there is sufficient
2139  * memory available or PG_WAIT is set, so the caller must
2140  * be willing to fallback on page_create_va(), block and retry,
2141  * or fail the requester.
2142  */
2143 page_t *
page_create_va_large(vnode_t * vp,u_offset_t off,size_t bytes,uint_t flags,struct seg * seg,caddr_t vaddr,void * arg)2144 page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags,
2145     struct seg *seg, caddr_t vaddr, void *arg)
2146 {
2147 	pgcnt_t		npages;
2148 	page_t		*pp;
2149 	page_t		*rootpp;
2150 	lgrp_t		*lgrp;
2151 	lgrp_id_t	*lgrpid = (lgrp_id_t *)arg;
2152 
2153 	ASSERT(vp != NULL);
2154 
2155 	ASSERT((flags & ~(PG_EXCL | PG_WAIT |
2156 	    PG_NORELOC | PG_PANIC | PG_PUSHPAGE | PG_NORMALPRI)) == 0);
2157 	/* but no others */
2158 
2159 	ASSERT((flags & PG_EXCL) == PG_EXCL);
2160 
2161 	npages = btop(bytes);
2162 
2163 	if (!kcage_on || panicstr) {
2164 		/*
2165 		 * Cage is OFF, or we are single threaded in
2166 		 * panic, so make everything a RELOC request.
2167 		 */
2168 		flags &= ~PG_NORELOC;
2169 	}
2170 
2171 	/*
2172 	 * Make sure there's adequate physical memory available.
2173 	 * Note: PG_WAIT is ignored here.
2174 	 */
2175 	if (freemem <= throttlefree + npages) {
2176 		VM_STAT_ADD(page_create_large_cnt[1]);
2177 		return (NULL);
2178 	}
2179 
2180 	/*
2181 	 * If cage is on, dampen draw from cage when available
2182 	 * cage space is low.
2183 	 */
2184 	if ((flags & (PG_NORELOC | PG_WAIT)) ==  (PG_NORELOC | PG_WAIT) &&
2185 	    kcage_freemem < kcage_throttlefree + npages) {
2186 
2187 		/*
2188 		 * The cage is on, the caller wants PG_NORELOC
2189 		 * pages and available cage memory is very low.
2190 		 * Call kcage_create_throttle() to attempt to
2191 		 * control demand on the cage.
2192 		 */
2193 		if (kcage_create_throttle(npages, flags) == KCT_FAILURE) {
2194 			VM_STAT_ADD(page_create_large_cnt[2]);
2195 			return (NULL);
2196 		}
2197 	}
2198 
2199 	if (!pcf_decrement_bucket(npages) &&
2200 	    !pcf_decrement_multiple(NULL, npages, 1)) {
2201 		VM_STAT_ADD(page_create_large_cnt[4]);
2202 		return (NULL);
2203 	}
2204 
2205 	/*
2206 	 * This is where this function behaves fundamentally differently
2207 	 * than page_create_va(); since we're intending to map the page
2208 	 * with a single TTE, we have to get it as a physically contiguous
2209 	 * hardware pagesize chunk.  If we can't, we fail.
2210 	 */
2211 	if (lgrpid != NULL && *lgrpid >= 0 && *lgrpid <= lgrp_alloc_max &&
2212 	    LGRP_EXISTS(lgrp_table[*lgrpid]))
2213 		lgrp = lgrp_table[*lgrpid];
2214 	else
2215 		lgrp = lgrp_mem_choose(seg, vaddr, bytes);
2216 
2217 	if ((rootpp = page_get_freelist(&kvp, off, seg, vaddr,
2218 	    bytes, flags & ~PG_MATCH_COLOR, lgrp)) == NULL) {
2219 		page_create_putback(npages);
2220 		VM_STAT_ADD(page_create_large_cnt[5]);
2221 		return (NULL);
2222 	}
2223 
2224 	/*
2225 	 * if we got the page with the wrong mtype give it back this is a
2226 	 * workaround for CR 6249718. When CR 6249718 is fixed we never get
2227 	 * inside "if" and the workaround becomes just a nop
2228 	 */
2229 	if (kcage_on && (flags & PG_NORELOC) && !PP_ISNORELOC(rootpp)) {
2230 		page_list_add_pages(rootpp, 0);
2231 		page_create_putback(npages);
2232 		VM_STAT_ADD(page_create_large_cnt[6]);
2233 		return (NULL);
2234 	}
2235 
2236 	/*
2237 	 * If satisfying this request has left us with too little
2238 	 * memory, start the wheels turning to get some back.  The
2239 	 * first clause of the test prevents waking up the pageout
2240 	 * daemon in situations where it would decide that there's
2241 	 * nothing to do.
2242 	 */
2243 	if (nscan < desscan && freemem < minfree) {
2244 		TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL,
2245 		    "pageout_cv_signal:freemem %ld", freemem);
2246 		cv_signal(&proc_pageout->p_cv);
2247 	}
2248 
2249 	pp = rootpp;
2250 	while (npages--) {
2251 		ASSERT(PAGE_EXCL(pp));
2252 		ASSERT(pp->p_vnode == NULL);
2253 		ASSERT(!hat_page_is_mapped(pp));
2254 		PP_CLRFREE(pp);
2255 		PP_CLRAGED(pp);
2256 		if (!page_hashin(pp, vp, off, NULL))
2257 			panic("page_create_large: hashin failed: page %p",
2258 			    (void *)pp);
2259 		page_io_lock(pp);
2260 		off += PAGESIZE;
2261 		pp = pp->p_next;
2262 	}
2263 
2264 	VM_STAT_ADD(page_create_large_cnt[0]);
2265 	return (rootpp);
2266 }
2267 
2268 page_t *
page_create_va(vnode_t * vp,u_offset_t off,size_t bytes,uint_t flags,struct seg * seg,caddr_t vaddr)2269 page_create_va(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags,
2270     struct seg *seg, caddr_t vaddr)
2271 {
2272 	page_t		*plist = NULL;
2273 	pgcnt_t		npages;
2274 	pgcnt_t		found_on_free = 0;
2275 	pgcnt_t		pages_req;
2276 	page_t		*npp = NULL;
2277 	struct pcf	*p;
2278 	lgrp_t		*lgrp;
2279 
2280 	TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START,
2281 	    "page_create_start:vp %p off %llx bytes %lu flags %x",
2282 	    vp, off, bytes, flags);
2283 
2284 	ASSERT(bytes != 0 && vp != NULL);
2285 
2286 	if ((flags & PG_EXCL) == 0 && (flags & PG_WAIT) == 0) {
2287 		panic("page_create: invalid flags");
2288 		/*NOTREACHED*/
2289 	}
2290 	ASSERT((flags & ~(PG_EXCL | PG_WAIT |
2291 	    PG_NORELOC | PG_PANIC | PG_PUSHPAGE | PG_NORMALPRI)) == 0);
2292 	    /* but no others */
2293 
2294 	pages_req = npages = btopr(bytes);
2295 	/*
2296 	 * Try to see whether request is too large to *ever* be
2297 	 * satisfied, in order to prevent deadlock.  We arbitrarily
2298 	 * decide to limit maximum size requests to max_page_get.
2299 	 */
2300 	if (npages >= max_page_get) {
2301 		if ((flags & PG_WAIT) == 0) {
2302 			TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_TOOBIG,
2303 			    "page_create_toobig:vp %p off %llx npages "
2304 			    "%lu max_page_get %lu",
2305 			    vp, off, npages, max_page_get);
2306 			return (NULL);
2307 		} else {
2308 			cmn_err(CE_WARN,
2309 			    "Request for too much kernel memory "
2310 			    "(%lu bytes), will hang forever", bytes);
2311 			for (;;)
2312 				delay(1000000000);
2313 		}
2314 	}
2315 
2316 	if (!kcage_on || panicstr) {
2317 		/*
2318 		 * Cage is OFF, or we are single threaded in
2319 		 * panic, so make everything a RELOC request.
2320 		 */
2321 		flags &= ~PG_NORELOC;
2322 	}
2323 
2324 	if (freemem <= throttlefree + npages)
2325 		if (!page_create_throttle(npages, flags))
2326 			return (NULL);
2327 
2328 	/*
2329 	 * If cage is on, dampen draw from cage when available
2330 	 * cage space is low.
2331 	 */
2332 	if ((flags & PG_NORELOC) &&
2333 	    kcage_freemem < kcage_throttlefree + npages) {
2334 
2335 		/*
2336 		 * The cage is on, the caller wants PG_NORELOC
2337 		 * pages and available cage memory is very low.
2338 		 * Call kcage_create_throttle() to attempt to
2339 		 * control demand on the cage.
2340 		 */
2341 		if (kcage_create_throttle(npages, flags) == KCT_FAILURE)
2342 			return (NULL);
2343 	}
2344 
2345 	VM_STAT_ADD(page_create_cnt[0]);
2346 
2347 	if (!pcf_decrement_bucket(npages)) {
2348 		/*
2349 		 * Have to look harder.  If npages is greater than
2350 		 * one, then we might have to coalesce the counters.
2351 		 *
2352 		 * Go wait.  We come back having accounted
2353 		 * for the memory.
2354 		 */
2355 		VM_STAT_ADD(page_create_cnt[1]);
2356 		if (!page_create_wait(npages, flags)) {
2357 			VM_STAT_ADD(page_create_cnt[2]);
2358 			return (NULL);
2359 		}
2360 	}
2361 
2362 	TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS,
2363 	    "page_create_success:vp %p off %llx", vp, off);
2364 
2365 	/*
2366 	 * If satisfying this request has left us with too little
2367 	 * memory, start the wheels turning to get some back.  The
2368 	 * first clause of the test prevents waking up the pageout
2369 	 * daemon in situations where it would decide that there's
2370 	 * nothing to do.
2371 	 */
2372 	if (nscan < desscan && freemem < minfree) {
2373 		TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL,
2374 		    "pageout_cv_signal:freemem %ld", freemem);
2375 		cv_signal(&proc_pageout->p_cv);
2376 	}
2377 
2378 	/*
2379 	 * Loop around collecting the requested number of pages.
2380 	 * Most of the time, we have to `create' a new page. With
2381 	 * this in mind, pull the page off the free list before
2382 	 * getting the hash lock.  This will minimize the hash
2383 	 * lock hold time, nesting, and the like.  If it turns
2384 	 * out we don't need the page, we put it back at the end.
2385 	 */
2386 	while (npages--) {
2387 		page_t		*pp;
2388 		kmutex_t	*phm = NULL;
2389 		ulong_t		index;
2390 
2391 		index = PAGE_HASH_FUNC(vp, off);
2392 top:
2393 		ASSERT(phm == NULL);
2394 		ASSERT(index == PAGE_HASH_FUNC(vp, off));
2395 		ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
2396 
2397 		if (npp == NULL) {
2398 			/*
2399 			 * Try to get a page from the freelist (ie,
2400 			 * a page with no [vp, off] tag).  If that
2401 			 * fails, use the cachelist.
2402 			 *
2403 			 * During the first attempt at both the free
2404 			 * and cache lists we try for the correct color.
2405 			 */
2406 			/*
2407 			 * XXXX-how do we deal with virtual indexed
2408 			 * caches and and colors?
2409 			 */
2410 			VM_STAT_ADD(page_create_cnt[4]);
2411 			/*
2412 			 * Get lgroup to allocate next page of shared memory
2413 			 * from and use it to specify where to allocate
2414 			 * the physical memory
2415 			 */
2416 			lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE);
2417 			npp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE,
2418 			    flags | PG_MATCH_COLOR, lgrp);
2419 			if (npp == NULL) {
2420 				npp = page_get_cachelist(vp, off, seg,
2421 				    vaddr, flags | PG_MATCH_COLOR, lgrp);
2422 				if (npp == NULL) {
2423 					npp = page_create_get_something(vp,
2424 					    off, seg, vaddr,
2425 					    flags & ~PG_MATCH_COLOR);
2426 				}
2427 
2428 				if (PP_ISAGED(npp) == 0) {
2429 					/*
2430 					 * Since this page came from the
2431 					 * cachelist, we must destroy the
2432 					 * old vnode association.
2433 					 */
2434 					page_hashout(npp, NULL);
2435 				}
2436 			}
2437 		}
2438 
2439 		/*
2440 		 * We own this page!
2441 		 */
2442 		ASSERT(PAGE_EXCL(npp));
2443 		ASSERT(npp->p_vnode == NULL);
2444 		ASSERT(!hat_page_is_mapped(npp));
2445 		PP_CLRFREE(npp);
2446 		PP_CLRAGED(npp);
2447 
2448 		/*
2449 		 * Here we have a page in our hot little mits and are
2450 		 * just waiting to stuff it on the appropriate lists.
2451 		 * Get the mutex and check to see if it really does
2452 		 * not exist.
2453 		 */
2454 		phm = PAGE_HASH_MUTEX(index);
2455 		mutex_enter(phm);
2456 		pp = page_hash_search(index, vp, off);
2457 		if (pp == NULL) {
2458 			VM_STAT_ADD(page_create_new);
2459 			pp = npp;
2460 			npp = NULL;
2461 			if (!page_hashin(pp, vp, off, phm)) {
2462 				/*
2463 				 * Since we hold the page hash mutex and
2464 				 * just searched for this page, page_hashin
2465 				 * had better not fail.  If it does, that
2466 				 * means somethread did not follow the
2467 				 * page hash mutex rules.  Panic now and
2468 				 * get it over with.  As usual, go down
2469 				 * holding all the locks.
2470 				 */
2471 				ASSERT(MUTEX_HELD(phm));
2472 				panic("page_create: "
2473 				    "hashin failed %p %p %llx %p",
2474 				    (void *)pp, (void *)vp, off, (void *)phm);
2475 				/*NOTREACHED*/
2476 			}
2477 			ASSERT(MUTEX_HELD(phm));
2478 			mutex_exit(phm);
2479 			phm = NULL;
2480 
2481 			/*
2482 			 * Hat layer locking need not be done to set
2483 			 * the following bits since the page is not hashed
2484 			 * and was on the free list (i.e., had no mappings).
2485 			 *
2486 			 * Set the reference bit to protect
2487 			 * against immediate pageout
2488 			 *
2489 			 * XXXmh modify freelist code to set reference
2490 			 * bit so we don't have to do it here.
2491 			 */
2492 			page_set_props(pp, P_REF);
2493 			found_on_free++;
2494 		} else {
2495 			VM_STAT_ADD(page_create_exists);
2496 			if (flags & PG_EXCL) {
2497 				/*
2498 				 * Found an existing page, and the caller
2499 				 * wanted all new pages.  Undo all of the work
2500 				 * we have done.
2501 				 */
2502 				mutex_exit(phm);
2503 				phm = NULL;
2504 				while (plist != NULL) {
2505 					pp = plist;
2506 					page_sub(&plist, pp);
2507 					page_io_unlock(pp);
2508 					/* large pages should not end up here */
2509 					ASSERT(pp->p_szc == 0);
2510 					/*LINTED: constant in conditional ctx*/
2511 					VN_DISPOSE(pp, B_INVAL, 0, kcred);
2512 				}
2513 				VM_STAT_ADD(page_create_found_one);
2514 				goto fail;
2515 			}
2516 			ASSERT(flags & PG_WAIT);
2517 			if (!page_lock(pp, SE_EXCL, phm, P_NO_RECLAIM)) {
2518 				/*
2519 				 * Start all over again if we blocked trying
2520 				 * to lock the page.
2521 				 */
2522 				mutex_exit(phm);
2523 				VM_STAT_ADD(page_create_page_lock_failed);
2524 				phm = NULL;
2525 				goto top;
2526 			}
2527 			mutex_exit(phm);
2528 			phm = NULL;
2529 
2530 			if (PP_ISFREE(pp)) {
2531 				ASSERT(PP_ISAGED(pp) == 0);
2532 				VM_STAT_ADD(pagecnt.pc_get_cache);
2533 				page_list_sub(pp, PG_CACHE_LIST);
2534 				PP_CLRFREE(pp);
2535 				found_on_free++;
2536 			}
2537 		}
2538 
2539 		/*
2540 		 * Got a page!  It is locked.  Acquire the i/o
2541 		 * lock since we are going to use the p_next and
2542 		 * p_prev fields to link the requested pages together.
2543 		 */
2544 		page_io_lock(pp);
2545 		page_add(&plist, pp);
2546 		plist = plist->p_next;
2547 		off += PAGESIZE;
2548 		vaddr += PAGESIZE;
2549 	}
2550 
2551 	ASSERT((flags & PG_EXCL) ? (found_on_free == pages_req) : 1);
2552 fail:
2553 	if (npp != NULL) {
2554 		/*
2555 		 * Did not need this page after all.
2556 		 * Put it back on the free list.
2557 		 */
2558 		VM_STAT_ADD(page_create_putbacks);
2559 		PP_SETFREE(npp);
2560 		PP_SETAGED(npp);
2561 		npp->p_offset = (u_offset_t)-1;
2562 		page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL);
2563 		page_unlock(npp);
2564 
2565 	}
2566 
2567 	ASSERT(pages_req >= found_on_free);
2568 
2569 	{
2570 		uint_t overshoot = (uint_t)(pages_req - found_on_free);
2571 
2572 		if (overshoot) {
2573 			VM_STAT_ADD(page_create_overshoot);
2574 			p = &pcf[PCF_INDEX()];
2575 			mutex_enter(&p->pcf_lock);
2576 			if (p->pcf_block) {
2577 				p->pcf_reserve += overshoot;
2578 			} else {
2579 				p->pcf_count += overshoot;
2580 				if (p->pcf_wait) {
2581 					mutex_enter(&new_freemem_lock);
2582 					if (freemem_wait) {
2583 						cv_signal(&freemem_cv);
2584 						p->pcf_wait--;
2585 					} else {
2586 						p->pcf_wait = 0;
2587 					}
2588 					mutex_exit(&new_freemem_lock);
2589 				}
2590 			}
2591 			mutex_exit(&p->pcf_lock);
2592 			/* freemem is approximate, so this test OK */
2593 			if (!p->pcf_block)
2594 				freemem += overshoot;
2595 		}
2596 	}
2597 
2598 	return (plist);
2599 }
2600 
2601 /*
2602  * One or more constituent pages of this large page has been marked
2603  * toxic. Simply demote the large page to PAGESIZE pages and let
2604  * page_free() handle it. This routine should only be called by
2605  * large page free routines (page_free_pages() and page_destroy_pages().
2606  * All pages are locked SE_EXCL and have already been marked free.
2607  */
2608 static void
page_free_toxic_pages(page_t * rootpp)2609 page_free_toxic_pages(page_t *rootpp)
2610 {
2611 	page_t	*tpp;
2612 	pgcnt_t	i, pgcnt = page_get_pagecnt(rootpp->p_szc);
2613 	uint_t	szc = rootpp->p_szc;
2614 
2615 	for (i = 0, tpp = rootpp; i < pgcnt; i++, tpp = tpp->p_next) {
2616 		ASSERT(tpp->p_szc == szc);
2617 		ASSERT((PAGE_EXCL(tpp) &&
2618 		    !page_iolock_assert(tpp)) || panicstr);
2619 		tpp->p_szc = 0;
2620 	}
2621 
2622 	while (rootpp != NULL) {
2623 		tpp = rootpp;
2624 		page_sub(&rootpp, tpp);
2625 		ASSERT(PP_ISFREE(tpp));
2626 		PP_CLRFREE(tpp);
2627 		page_free(tpp, 1);
2628 	}
2629 }
2630 
2631 /*
2632  * Put page on the "free" list.
2633  * The free list is really two lists maintained by
2634  * the PSM of whatever machine we happen to be on.
2635  */
2636 void
page_free(page_t * pp,int dontneed)2637 page_free(page_t *pp, int dontneed)
2638 {
2639 	struct pcf	*p;
2640 	uint_t		pcf_index;
2641 
2642 	ASSERT((PAGE_EXCL(pp) &&
2643 	    !page_iolock_assert(pp)) || panicstr);
2644 
2645 	if (PP_ISFREE(pp)) {
2646 		panic("page_free: page %p is free", (void *)pp);
2647 	}
2648 
2649 	if (pp->p_szc != 0) {
2650 		if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) ||
2651 		    PP_ISKAS(pp)) {
2652 			panic("page_free: anon or kernel "
2653 			    "or no vnode large page %p", (void *)pp);
2654 		}
2655 		page_demote_vp_pages(pp);
2656 		ASSERT(pp->p_szc == 0);
2657 	}
2658 
2659 	/*
2660 	 * The page_struct_lock need not be acquired to examine these
2661 	 * fields since the page has an "exclusive" lock.
2662 	 */
2663 	if (hat_page_is_mapped(pp) || pp->p_lckcnt != 0 || pp->p_cowcnt != 0 ||
2664 	    pp->p_slckcnt != 0) {
2665 		panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d "
2666 		    "slckcnt = %d", (void *)pp, page_pptonum(pp), pp->p_lckcnt,
2667 		    pp->p_cowcnt, pp->p_slckcnt);
2668 		/*NOTREACHED*/
2669 	}
2670 
2671 	ASSERT(!hat_page_getshare(pp));
2672 
2673 	PP_SETFREE(pp);
2674 	ASSERT(pp->p_vnode == NULL || !IS_VMODSORT(pp->p_vnode) ||
2675 	    !hat_ismod(pp));
2676 	page_clr_all_props(pp);
2677 	ASSERT(!hat_page_getshare(pp));
2678 
2679 	/*
2680 	 * Now we add the page to the head of the free list.
2681 	 * But if this page is associated with a paged vnode
2682 	 * then we adjust the head forward so that the page is
2683 	 * effectively at the end of the list.
2684 	 */
2685 	if (pp->p_vnode == NULL) {
2686 		/*
2687 		 * Page has no identity, put it on the free list.
2688 		 */
2689 		PP_SETAGED(pp);
2690 		pp->p_offset = (u_offset_t)-1;
2691 		page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
2692 		VM_STAT_ADD(pagecnt.pc_free_free);
2693 		TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE,
2694 		    "page_free_free:pp %p", pp);
2695 	} else {
2696 		PP_CLRAGED(pp);
2697 
2698 		if (!dontneed) {
2699 			/* move it to the tail of the list */
2700 			page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL);
2701 
2702 			VM_STAT_ADD(pagecnt.pc_free_cache);
2703 			TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_TAIL,
2704 			    "page_free_cache_tail:pp %p", pp);
2705 		} else {
2706 			page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD);
2707 
2708 			VM_STAT_ADD(pagecnt.pc_free_dontneed);
2709 			TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_HEAD,
2710 			    "page_free_cache_head:pp %p", pp);
2711 		}
2712 	}
2713 	page_unlock(pp);
2714 
2715 	/*
2716 	 * Now do the `freemem' accounting.
2717 	 */
2718 	pcf_index = PCF_INDEX();
2719 	p = &pcf[pcf_index];
2720 
2721 	mutex_enter(&p->pcf_lock);
2722 	if (p->pcf_block) {
2723 		p->pcf_reserve += 1;
2724 	} else {
2725 		p->pcf_count += 1;
2726 		if (p->pcf_wait) {
2727 			mutex_enter(&new_freemem_lock);
2728 			/*
2729 			 * Check to see if some other thread
2730 			 * is actually waiting.  Another bucket
2731 			 * may have woken it up by now.  If there
2732 			 * are no waiters, then set our pcf_wait
2733 			 * count to zero to avoid coming in here
2734 			 * next time.  Also, since only one page
2735 			 * was put on the free list, just wake
2736 			 * up one waiter.
2737 			 */
2738 			if (freemem_wait) {
2739 				cv_signal(&freemem_cv);
2740 				p->pcf_wait--;
2741 			} else {
2742 				p->pcf_wait = 0;
2743 			}
2744 			mutex_exit(&new_freemem_lock);
2745 		}
2746 	}
2747 	mutex_exit(&p->pcf_lock);
2748 
2749 	/* freemem is approximate, so this test OK */
2750 	if (!p->pcf_block)
2751 		freemem += 1;
2752 }
2753 
2754 /*
2755  * Put page on the "free" list during intial startup.
2756  * This happens during initial single threaded execution.
2757  */
2758 void
page_free_at_startup(page_t * pp)2759 page_free_at_startup(page_t *pp)
2760 {
2761 	struct pcf	*p;
2762 	uint_t		pcf_index;
2763 
2764 	page_list_add(pp, PG_FREE_LIST | PG_LIST_HEAD | PG_LIST_ISINIT);
2765 	VM_STAT_ADD(pagecnt.pc_free_free);
2766 
2767 	/*
2768 	 * Now do the `freemem' accounting.
2769 	 */
2770 	pcf_index = PCF_INDEX();
2771 	p = &pcf[pcf_index];
2772 
2773 	ASSERT(p->pcf_block == 0);
2774 	ASSERT(p->pcf_wait == 0);
2775 	p->pcf_count += 1;
2776 
2777 	/* freemem is approximate, so this is OK */
2778 	freemem += 1;
2779 }
2780 
2781 void
page_free_pages(page_t * pp)2782 page_free_pages(page_t *pp)
2783 {
2784 	page_t	*tpp, *rootpp = NULL;
2785 	pgcnt_t	pgcnt = page_get_pagecnt(pp->p_szc);
2786 	pgcnt_t	i;
2787 	uint_t	szc = pp->p_szc;
2788 
2789 	VM_STAT_ADD(pagecnt.pc_free_pages);
2790 	TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE,
2791 	    "page_free_free:pp %p", pp);
2792 
2793 	ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes());
2794 	if ((page_pptonum(pp) & (pgcnt - 1)) != 0) {
2795 		panic("page_free_pages: not root page %p", (void *)pp);
2796 		/*NOTREACHED*/
2797 	}
2798 
2799 	for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) {
2800 		ASSERT((PAGE_EXCL(tpp) &&
2801 		    !page_iolock_assert(tpp)) || panicstr);
2802 		if (PP_ISFREE(tpp)) {
2803 			panic("page_free_pages: page %p is free", (void *)tpp);
2804 			/*NOTREACHED*/
2805 		}
2806 		if (hat_page_is_mapped(tpp) || tpp->p_lckcnt != 0 ||
2807 		    tpp->p_cowcnt != 0 || tpp->p_slckcnt != 0) {
2808 			panic("page_free_pages %p", (void *)tpp);
2809 			/*NOTREACHED*/
2810 		}
2811 
2812 		ASSERT(!hat_page_getshare(tpp));
2813 		ASSERT(tpp->p_vnode == NULL);
2814 		ASSERT(tpp->p_szc == szc);
2815 
2816 		PP_SETFREE(tpp);
2817 		page_clr_all_props(tpp);
2818 		PP_SETAGED(tpp);
2819 		tpp->p_offset = (u_offset_t)-1;
2820 		ASSERT(tpp->p_next == tpp);
2821 		ASSERT(tpp->p_prev == tpp);
2822 		page_list_concat(&rootpp, &tpp);
2823 	}
2824 	ASSERT(rootpp == pp);
2825 
2826 	page_list_add_pages(rootpp, 0);
2827 	page_create_putback(pgcnt);
2828 }
2829 
2830 int free_pages = 1;
2831 
2832 /*
2833  * This routine attempts to return pages to the cachelist via page_release().
2834  * It does not *have* to be successful in all cases, since the pageout scanner
2835  * will catch any pages it misses.  It does need to be fast and not introduce
2836  * too much overhead.
2837  *
2838  * If a page isn't found on the unlocked sweep of the page_hash bucket, we
2839  * don't lock and retry.  This is ok, since the page scanner will eventually
2840  * find any page we miss in free_vp_pages().
2841  */
2842 void
free_vp_pages(vnode_t * vp,u_offset_t off,size_t len)2843 free_vp_pages(vnode_t *vp, u_offset_t off, size_t len)
2844 {
2845 	page_t *pp;
2846 	u_offset_t eoff;
2847 	extern int swap_in_range(vnode_t *, u_offset_t, size_t);
2848 
2849 	eoff = off + len;
2850 
2851 	if (free_pages == 0)
2852 		return;
2853 	if (swap_in_range(vp, off, len))
2854 		return;
2855 
2856 	for (; off < eoff; off += PAGESIZE) {
2857 
2858 		/*
2859 		 * find the page using a fast, but inexact search. It'll be OK
2860 		 * if a few pages slip through the cracks here.
2861 		 */
2862 		pp = page_exists(vp, off);
2863 
2864 		/*
2865 		 * If we didn't find the page (it may not exist), the page
2866 		 * is free, looks still in use (shared), or we can't lock it,
2867 		 * just give up.
2868 		 */
2869 		if (pp == NULL ||
2870 		    PP_ISFREE(pp) ||
2871 		    page_share_cnt(pp) > 0 ||
2872 		    !page_trylock(pp, SE_EXCL))
2873 			continue;
2874 
2875 		/*
2876 		 * Once we have locked pp, verify that it's still the
2877 		 * correct page and not already free
2878 		 */
2879 		ASSERT(PAGE_LOCKED_SE(pp, SE_EXCL));
2880 		if (pp->p_vnode != vp || pp->p_offset != off || PP_ISFREE(pp)) {
2881 			page_unlock(pp);
2882 			continue;
2883 		}
2884 
2885 		/*
2886 		 * try to release the page...
2887 		 */
2888 		(void) page_release(pp, 1);
2889 	}
2890 }
2891 
2892 /*
2893  * Reclaim the given page from the free list.
2894  * If pp is part of a large pages, only the given constituent page is reclaimed
2895  * and the large page it belonged to will be demoted.  This can only happen
2896  * if the page is not on the cachelist.
2897  *
2898  * Returns 1 on success or 0 on failure.
2899  *
2900  * The page is unlocked if it can't be reclaimed (when freemem == 0).
2901  * If `lock' is non-null, it will be dropped and re-acquired if
2902  * the routine must wait while freemem is 0.
2903  *
2904  * As it turns out, boot_getpages() does this.  It picks a page,
2905  * based on where OBP mapped in some address, gets its pfn, searches
2906  * the memsegs, locks the page, then pulls it off the free list!
2907  */
2908 int
page_reclaim(page_t * pp,kmutex_t * lock)2909 page_reclaim(page_t *pp, kmutex_t *lock)
2910 {
2911 	struct pcf	*p;
2912 	struct cpu	*cpup;
2913 	int		enough;
2914 	uint_t		i;
2915 
2916 	ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1);
2917 	ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp));
2918 
2919 	/*
2920 	 * If `freemem' is 0, we cannot reclaim this page from the
2921 	 * freelist, so release every lock we might hold: the page,
2922 	 * and the `lock' before blocking.
2923 	 *
2924 	 * The only way `freemem' can become 0 while there are pages
2925 	 * marked free (have their p->p_free bit set) is when the
2926 	 * system is low on memory and doing a page_create().  In
2927 	 * order to guarantee that once page_create() starts acquiring
2928 	 * pages it will be able to get all that it needs since `freemem'
2929 	 * was decreased by the requested amount.  So, we need to release
2930 	 * this page, and let page_create() have it.
2931 	 *
2932 	 * Since `freemem' being zero is not supposed to happen, just
2933 	 * use the usual hash stuff as a starting point.  If that bucket
2934 	 * is empty, then assume the worst, and start at the beginning
2935 	 * of the pcf array.  If we always start at the beginning
2936 	 * when acquiring more than one pcf lock, there won't be any
2937 	 * deadlock problems.
2938 	 */
2939 
2940 	/* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */
2941 
2942 	if (freemem <= throttlefree && !page_create_throttle(1l, 0)) {
2943 		pcf_acquire_all();
2944 		goto page_reclaim_nomem;
2945 	}
2946 
2947 	enough = pcf_decrement_bucket(1);
2948 
2949 	if (!enough) {
2950 		VM_STAT_ADD(page_reclaim_zero);
2951 		/*
2952 		 * Check again. Its possible that some other thread
2953 		 * could have been right behind us, and added one
2954 		 * to a list somewhere.  Acquire each of the pcf locks
2955 		 * until we find a page.
2956 		 */
2957 		p = pcf;
2958 		for (i = 0; i < pcf_fanout; i++) {
2959 			mutex_enter(&p->pcf_lock);
2960 			if (p->pcf_count >= 1) {
2961 				p->pcf_count -= 1;
2962 				/*
2963 				 * freemem is not protected by any lock. Thus,
2964 				 * we cannot have any assertion containing
2965 				 * freemem here.
2966 				 */
2967 				freemem -= 1;
2968 				enough = 1;
2969 				break;
2970 			}
2971 			p++;
2972 		}
2973 
2974 		if (!enough) {
2975 page_reclaim_nomem:
2976 			/*
2977 			 * We really can't have page `pp'.
2978 			 * Time for the no-memory dance with
2979 			 * page_free().  This is just like
2980 			 * page_create_wait().  Plus the added
2981 			 * attraction of releasing whatever mutex
2982 			 * we held when we were called with in `lock'.
2983 			 * Page_unlock() will wakeup any thread
2984 			 * waiting around for this page.
2985 			 */
2986 			if (lock) {
2987 				VM_STAT_ADD(page_reclaim_zero_locked);
2988 				mutex_exit(lock);
2989 			}
2990 			page_unlock(pp);
2991 
2992 			/*
2993 			 * get this before we drop all the pcf locks.
2994 			 */
2995 			mutex_enter(&new_freemem_lock);
2996 
2997 			p = pcf;
2998 			for (i = 0; i < pcf_fanout; i++) {
2999 				p->pcf_wait++;
3000 				mutex_exit(&p->pcf_lock);
3001 				p++;
3002 			}
3003 
3004 			freemem_wait++;
3005 			cv_wait(&freemem_cv, &new_freemem_lock);
3006 			freemem_wait--;
3007 
3008 			mutex_exit(&new_freemem_lock);
3009 
3010 			if (lock) {
3011 				mutex_enter(lock);
3012 			}
3013 			return (0);
3014 		}
3015 
3016 		/*
3017 		 * The pcf accounting has been done,
3018 		 * though none of the pcf_wait flags have been set,
3019 		 * drop the locks and continue on.
3020 		 */
3021 		while (p >= pcf) {
3022 			mutex_exit(&p->pcf_lock);
3023 			p--;
3024 		}
3025 	}
3026 
3027 
3028 	VM_STAT_ADD(pagecnt.pc_reclaim);
3029 
3030 	/*
3031 	 * page_list_sub will handle the case where pp is a large page.
3032 	 * It's possible that the page was promoted while on the freelist
3033 	 */
3034 	if (PP_ISAGED(pp)) {
3035 		page_list_sub(pp, PG_FREE_LIST);
3036 		TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_FREE,
3037 		    "page_reclaim_free:pp %p", pp);
3038 	} else {
3039 		page_list_sub(pp, PG_CACHE_LIST);
3040 		TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_CACHE,
3041 		    "page_reclaim_cache:pp %p", pp);
3042 	}
3043 
3044 	/*
3045 	 * clear the p_free & p_age bits since this page is no longer
3046 	 * on the free list.  Notice that there was a brief time where
3047 	 * a page is marked as free, but is not on the list.
3048 	 *
3049 	 * Set the reference bit to protect against immediate pageout.
3050 	 */
3051 	PP_CLRFREE(pp);
3052 	PP_CLRAGED(pp);
3053 	page_set_props(pp, P_REF);
3054 
3055 	CPU_STATS_ENTER_K();
3056 	cpup = CPU;	/* get cpup now that CPU cannot change */
3057 	CPU_STATS_ADDQ(cpup, vm, pgrec, 1);
3058 	CPU_STATS_ADDQ(cpup, vm, pgfrec, 1);
3059 	CPU_STATS_EXIT_K();
3060 	ASSERT(pp->p_szc == 0);
3061 
3062 	return (1);
3063 }
3064 
3065 /*
3066  * Destroy identity of the page and put it back on
3067  * the page free list.  Assumes that the caller has
3068  * acquired the "exclusive" lock on the page.
3069  */
3070 void
page_destroy(page_t * pp,int dontfree)3071 page_destroy(page_t *pp, int dontfree)
3072 {
3073 	ASSERT((PAGE_EXCL(pp) &&
3074 	    !page_iolock_assert(pp)) || panicstr);
3075 	ASSERT(pp->p_slckcnt == 0 || panicstr);
3076 
3077 	if (pp->p_szc != 0) {
3078 		if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) ||
3079 		    PP_ISKAS(pp)) {
3080 			panic("page_destroy: anon or kernel or no vnode "
3081 			    "large page %p", (void *)pp);
3082 		}
3083 		page_demote_vp_pages(pp);
3084 		ASSERT(pp->p_szc == 0);
3085 	}
3086 
3087 	TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy:pp %p", pp);
3088 
3089 	/*
3090 	 * Unload translations, if any, then hash out the
3091 	 * page to erase its identity.
3092 	 */
3093 	(void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
3094 	page_hashout(pp, NULL);
3095 
3096 	if (!dontfree) {
3097 		/*
3098 		 * Acquire the "freemem_lock" for availrmem.
3099 		 * The page_struct_lock need not be acquired for lckcnt
3100 		 * and cowcnt since the page has an "exclusive" lock.
3101 		 * We are doing a modified version of page_pp_unlock here.
3102 		 */
3103 		if ((pp->p_lckcnt != 0) || (pp->p_cowcnt != 0)) {
3104 			mutex_enter(&freemem_lock);
3105 			if (pp->p_lckcnt != 0) {
3106 				availrmem++;
3107 				pages_locked--;
3108 				pp->p_lckcnt = 0;
3109 			}
3110 			if (pp->p_cowcnt != 0) {
3111 				availrmem += pp->p_cowcnt;
3112 				pages_locked -= pp->p_cowcnt;
3113 				pp->p_cowcnt = 0;
3114 			}
3115 			mutex_exit(&freemem_lock);
3116 		}
3117 		/*
3118 		 * Put the page on the "free" list.
3119 		 */
3120 		page_free(pp, 0);
3121 	}
3122 }
3123 
3124 void
page_destroy_pages(page_t * pp)3125 page_destroy_pages(page_t *pp)
3126 {
3127 
3128 	page_t	*tpp, *rootpp = NULL;
3129 	pgcnt_t	pgcnt = page_get_pagecnt(pp->p_szc);
3130 	pgcnt_t	i, pglcks = 0;
3131 	uint_t	szc = pp->p_szc;
3132 
3133 	ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes());
3134 
3135 	VM_STAT_ADD(pagecnt.pc_destroy_pages);
3136 
3137 	TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy_pages:pp %p", pp);
3138 
3139 	if ((page_pptonum(pp) & (pgcnt - 1)) != 0) {
3140 		panic("page_destroy_pages: not root page %p", (void *)pp);
3141 		/*NOTREACHED*/
3142 	}
3143 
3144 	for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) {
3145 		ASSERT((PAGE_EXCL(tpp) &&
3146 		    !page_iolock_assert(tpp)) || panicstr);
3147 		ASSERT(tpp->p_slckcnt == 0 || panicstr);
3148 		(void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD);
3149 		page_hashout(tpp, NULL);
3150 		ASSERT(tpp->p_offset == (u_offset_t)-1);
3151 		if (tpp->p_lckcnt != 0) {
3152 			pglcks++;
3153 			tpp->p_lckcnt = 0;
3154 		} else if (tpp->p_cowcnt != 0) {
3155 			pglcks += tpp->p_cowcnt;
3156 			tpp->p_cowcnt = 0;
3157 		}
3158 		ASSERT(!hat_page_getshare(tpp));
3159 		ASSERT(tpp->p_vnode == NULL);
3160 		ASSERT(tpp->p_szc == szc);
3161 
3162 		PP_SETFREE(tpp);
3163 		page_clr_all_props(tpp);
3164 		PP_SETAGED(tpp);
3165 		ASSERT(tpp->p_next == tpp);
3166 		ASSERT(tpp->p_prev == tpp);
3167 		page_list_concat(&rootpp, &tpp);
3168 	}
3169 
3170 	ASSERT(rootpp == pp);
3171 	if (pglcks != 0) {
3172 		mutex_enter(&freemem_lock);
3173 		availrmem += pglcks;
3174 		mutex_exit(&freemem_lock);
3175 	}
3176 
3177 	page_list_add_pages(rootpp, 0);
3178 	page_create_putback(pgcnt);
3179 }
3180 
3181 /*
3182  * Similar to page_destroy(), but destroys pages which are
3183  * locked and known to be on the page free list.  Since
3184  * the page is known to be free and locked, no one can access
3185  * it.
3186  *
3187  * Also, the number of free pages does not change.
3188  */
3189 void
page_destroy_free(page_t * pp)3190 page_destroy_free(page_t *pp)
3191 {
3192 	ASSERT(PAGE_EXCL(pp));
3193 	ASSERT(PP_ISFREE(pp));
3194 	ASSERT(pp->p_vnode);
3195 	ASSERT(hat_page_getattr(pp, P_MOD | P_REF | P_RO) == 0);
3196 	ASSERT(!hat_page_is_mapped(pp));
3197 	ASSERT(PP_ISAGED(pp) == 0);
3198 	ASSERT(pp->p_szc == 0);
3199 
3200 	VM_STAT_ADD(pagecnt.pc_destroy_free);
3201 	page_list_sub(pp, PG_CACHE_LIST);
3202 
3203 	page_hashout(pp, NULL);
3204 	ASSERT(pp->p_vnode == NULL);
3205 	ASSERT(pp->p_offset == (u_offset_t)-1);
3206 	ASSERT(pp->p_hash == NULL);
3207 
3208 	PP_SETAGED(pp);
3209 	page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
3210 	page_unlock(pp);
3211 
3212 	mutex_enter(&new_freemem_lock);
3213 	if (freemem_wait) {
3214 		cv_signal(&freemem_cv);
3215 	}
3216 	mutex_exit(&new_freemem_lock);
3217 }
3218 
3219 /*
3220  * Rename the page "opp" to have an identity specified
3221  * by [vp, off].  If a page already exists with this name
3222  * it is locked and destroyed.  Note that the page's
3223  * translations are not unloaded during the rename.
3224  *
3225  * This routine is used by the anon layer to "steal" the
3226  * original page and is not unlike destroying a page and
3227  * creating a new page using the same page frame.
3228  *
3229  * XXX -- Could deadlock if caller 1 tries to rename A to B while
3230  * caller 2 tries to rename B to A.
3231  */
3232 void
page_rename(page_t * opp,vnode_t * vp,u_offset_t off)3233 page_rename(page_t *opp, vnode_t *vp, u_offset_t off)
3234 {
3235 	page_t		*pp;
3236 	int		olckcnt = 0;
3237 	int		ocowcnt = 0;
3238 	kmutex_t	*phm;
3239 	ulong_t		index;
3240 
3241 	ASSERT(PAGE_EXCL(opp) && !page_iolock_assert(opp));
3242 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
3243 	ASSERT(PP_ISFREE(opp) == 0);
3244 
3245 	VM_STAT_ADD(page_rename_count);
3246 
3247 	TRACE_3(TR_FAC_VM, TR_PAGE_RENAME,
3248 	    "page rename:pp %p vp %p off %llx", opp, vp, off);
3249 
3250 	/*
3251 	 * CacheFS may call page_rename for a large NFS page
3252 	 * when both CacheFS and NFS mount points are used
3253 	 * by applications. Demote this large page before
3254 	 * renaming it, to ensure that there are no "partial"
3255 	 * large pages left lying around.
3256 	 */
3257 	if (opp->p_szc != 0) {
3258 		vnode_t *ovp = opp->p_vnode;
3259 		ASSERT(ovp != NULL);
3260 		ASSERT(!IS_SWAPFSVP(ovp));
3261 		ASSERT(!VN_ISKAS(ovp));
3262 		page_demote_vp_pages(opp);
3263 		ASSERT(opp->p_szc == 0);
3264 	}
3265 
3266 	page_hashout(opp, NULL);
3267 	PP_CLRAGED(opp);
3268 
3269 	/*
3270 	 * Acquire the appropriate page hash lock, since
3271 	 * we're going to rename the page.
3272 	 */
3273 	index = PAGE_HASH_FUNC(vp, off);
3274 	phm = PAGE_HASH_MUTEX(index);
3275 	mutex_enter(phm);
3276 top:
3277 	/*
3278 	 * Look for an existing page with this name and destroy it if found.
3279 	 * By holding the page hash lock all the way to the page_hashin()
3280 	 * call, we are assured that no page can be created with this
3281 	 * identity.  In the case when the phm lock is dropped to undo any
3282 	 * hat layer mappings, the existing page is held with an "exclusive"
3283 	 * lock, again preventing another page from being created with
3284 	 * this identity.
3285 	 */
3286 	pp = page_hash_search(index, vp, off);
3287 	if (pp != NULL) {
3288 		VM_STAT_ADD(page_rename_exists);
3289 
3290 		/*
3291 		 * As it turns out, this is one of only two places where
3292 		 * page_lock() needs to hold the passed in lock in the
3293 		 * successful case.  In all of the others, the lock could
3294 		 * be dropped as soon as the attempt is made to lock
3295 		 * the page.  It is tempting to add yet another arguement,
3296 		 * PL_KEEP or PL_DROP, to let page_lock know what to do.
3297 		 */
3298 		if (!page_lock(pp, SE_EXCL, phm, P_RECLAIM)) {
3299 			/*
3300 			 * Went to sleep because the page could not
3301 			 * be locked.  We were woken up when the page
3302 			 * was unlocked, or when the page was destroyed.
3303 			 * In either case, `phm' was dropped while we
3304 			 * slept.  Hence we should not just roar through
3305 			 * this loop.
3306 			 */
3307 			goto top;
3308 		}
3309 
3310 		/*
3311 		 * If an existing page is a large page, then demote
3312 		 * it to ensure that no "partial" large pages are
3313 		 * "created" after page_rename. An existing page
3314 		 * can be a CacheFS page, and can't belong to swapfs.
3315 		 */
3316 		if (hat_page_is_mapped(pp)) {
3317 			/*
3318 			 * Unload translations.  Since we hold the
3319 			 * exclusive lock on this page, the page
3320 			 * can not be changed while we drop phm.
3321 			 * This is also not a lock protocol violation,
3322 			 * but rather the proper way to do things.
3323 			 */
3324 			mutex_exit(phm);
3325 			(void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
3326 			if (pp->p_szc != 0) {
3327 				ASSERT(!IS_SWAPFSVP(vp));
3328 				ASSERT(!VN_ISKAS(vp));
3329 				page_demote_vp_pages(pp);
3330 				ASSERT(pp->p_szc == 0);
3331 			}
3332 			mutex_enter(phm);
3333 		} else if (pp->p_szc != 0) {
3334 			ASSERT(!IS_SWAPFSVP(vp));
3335 			ASSERT(!VN_ISKAS(vp));
3336 			mutex_exit(phm);
3337 			page_demote_vp_pages(pp);
3338 			ASSERT(pp->p_szc == 0);
3339 			mutex_enter(phm);
3340 		}
3341 		page_hashout(pp, phm);
3342 	}
3343 	/*
3344 	 * Hash in the page with the new identity.
3345 	 */
3346 	if (!page_hashin(opp, vp, off, phm)) {
3347 		/*
3348 		 * We were holding phm while we searched for [vp, off]
3349 		 * and only dropped phm if we found and locked a page.
3350 		 * If we can't create this page now, then some thing
3351 		 * is really broken.
3352 		 */
3353 		panic("page_rename: Can't hash in page: %p", (void *)pp);
3354 		/*NOTREACHED*/
3355 	}
3356 
3357 	ASSERT(MUTEX_HELD(phm));
3358 	mutex_exit(phm);
3359 
3360 	/*
3361 	 * Now that we have dropped phm, lets get around to finishing up
3362 	 * with pp.
3363 	 */
3364 	if (pp != NULL) {
3365 		ASSERT(!hat_page_is_mapped(pp));
3366 		/* for now large pages should not end up here */
3367 		ASSERT(pp->p_szc == 0);
3368 		/*
3369 		 * Save the locks for transfer to the new page and then
3370 		 * clear them so page_free doesn't think they're important.
3371 		 * The page_struct_lock need not be acquired for lckcnt and
3372 		 * cowcnt since the page has an "exclusive" lock.
3373 		 */
3374 		olckcnt = pp->p_lckcnt;
3375 		ocowcnt = pp->p_cowcnt;
3376 		pp->p_lckcnt = pp->p_cowcnt = 0;
3377 
3378 		/*
3379 		 * Put the page on the "free" list after we drop
3380 		 * the lock.  The less work under the lock the better.
3381 		 */
3382 		/*LINTED: constant in conditional context*/
3383 		VN_DISPOSE(pp, B_FREE, 0, kcred);
3384 	}
3385 
3386 	/*
3387 	 * Transfer the lock count from the old page (if any).
3388 	 * The page_struct_lock need not be acquired for lckcnt and
3389 	 * cowcnt since the page has an "exclusive" lock.
3390 	 */
3391 	opp->p_lckcnt += olckcnt;
3392 	opp->p_cowcnt += ocowcnt;
3393 }
3394 
3395 /*
3396  * low level routine to add page `pp' to the hash and vp chains for [vp, offset]
3397  *
3398  * Pages are normally inserted at the start of a vnode's v_pages list.
3399  * If the vnode is VMODSORT and the page is modified, it goes at the end.
3400  * This can happen when a modified page is relocated for DR.
3401  *
3402  * Returns 1 on success and 0 on failure.
3403  */
3404 static int
page_do_hashin(page_t * pp,vnode_t * vp,u_offset_t offset)3405 page_do_hashin(page_t *pp, vnode_t *vp, u_offset_t offset)
3406 {
3407 	page_t		**listp;
3408 	page_t		*tp;
3409 	ulong_t		index;
3410 
3411 	ASSERT(PAGE_EXCL(pp));
3412 	ASSERT(vp != NULL);
3413 	ASSERT(MUTEX_HELD(page_vnode_mutex(vp)));
3414 
3415 	/*
3416 	 * Be sure to set these up before the page is inserted on the hash
3417 	 * list.  As soon as the page is placed on the list some other
3418 	 * thread might get confused and wonder how this page could
3419 	 * possibly hash to this list.
3420 	 */
3421 	pp->p_vnode = vp;
3422 	pp->p_offset = offset;
3423 
3424 	/*
3425 	 * record if this page is on a swap vnode
3426 	 */
3427 	if ((vp->v_flag & VISSWAP) != 0)
3428 		PP_SETSWAP(pp);
3429 
3430 	index = PAGE_HASH_FUNC(vp, offset);
3431 	ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(index)));
3432 	listp = &page_hash[index];
3433 
3434 	/*
3435 	 * If this page is already hashed in, fail this attempt to add it.
3436 	 */
3437 	for (tp = *listp; tp != NULL; tp = tp->p_hash) {
3438 		if (tp->p_vnode == vp && tp->p_offset == offset) {
3439 			pp->p_vnode = NULL;
3440 			pp->p_offset = (u_offset_t)(-1);
3441 			return (0);
3442 		}
3443 	}
3444 	pp->p_hash = *listp;
3445 	*listp = pp;
3446 
3447 	/*
3448 	 * Add the page to the vnode's list of pages
3449 	 */
3450 	if (vp->v_pages != NULL && IS_VMODSORT(vp) && hat_ismod(pp))
3451 		listp = &vp->v_pages->p_vpprev->p_vpnext;
3452 	else
3453 		listp = &vp->v_pages;
3454 
3455 	page_vpadd(listp, pp);
3456 
3457 	return (1);
3458 }
3459 
3460 /*
3461  * Add page `pp' to both the hash and vp chains for [vp, offset].
3462  *
3463  * Returns 1 on success and 0 on failure.
3464  * If hold is passed in, it is not dropped.
3465  */
3466 int
page_hashin(page_t * pp,vnode_t * vp,u_offset_t offset,kmutex_t * hold)3467 page_hashin(page_t *pp, vnode_t *vp, u_offset_t offset, kmutex_t *hold)
3468 {
3469 	kmutex_t	*phm = NULL;
3470 	kmutex_t	*vphm;
3471 	int		rc;
3472 
3473 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
3474 	ASSERT(pp->p_fsdata == 0 || panicstr);
3475 
3476 	TRACE_3(TR_FAC_VM, TR_PAGE_HASHIN,
3477 	    "page_hashin:pp %p vp %p offset %llx",
3478 	    pp, vp, offset);
3479 
3480 	VM_STAT_ADD(hashin_count);
3481 
3482 	if (hold != NULL)
3483 		phm = hold;
3484 	else {
3485 		VM_STAT_ADD(hashin_not_held);
3486 		phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, offset));
3487 		mutex_enter(phm);
3488 	}
3489 
3490 	vphm = page_vnode_mutex(vp);
3491 	mutex_enter(vphm);
3492 	rc = page_do_hashin(pp, vp, offset);
3493 	mutex_exit(vphm);
3494 	if (hold == NULL)
3495 		mutex_exit(phm);
3496 	if (rc == 0)
3497 		VM_STAT_ADD(hashin_already);
3498 	return (rc);
3499 }
3500 
3501 /*
3502  * Remove page ``pp'' from the hash and vp chains and remove vp association.
3503  * All mutexes must be held
3504  */
3505 static void
page_do_hashout(page_t * pp)3506 page_do_hashout(page_t *pp)
3507 {
3508 	page_t	**hpp;
3509 	page_t	*hp;
3510 	vnode_t	*vp = pp->p_vnode;
3511 
3512 	ASSERT(vp != NULL);
3513 	ASSERT(MUTEX_HELD(page_vnode_mutex(vp)));
3514 
3515 	/*
3516 	 * First, take pp off of its hash chain.
3517 	 */
3518 	hpp = &page_hash[PAGE_HASH_FUNC(vp, pp->p_offset)];
3519 
3520 	for (;;) {
3521 		hp = *hpp;
3522 		if (hp == pp)
3523 			break;
3524 		if (hp == NULL) {
3525 			panic("page_do_hashout");
3526 			/*NOTREACHED*/
3527 		}
3528 		hpp = &hp->p_hash;
3529 	}
3530 	*hpp = pp->p_hash;
3531 
3532 	/*
3533 	 * Now remove it from its associated vnode.
3534 	 */
3535 	if (vp->v_pages)
3536 		page_vpsub(&vp->v_pages, pp);
3537 
3538 	pp->p_hash = NULL;
3539 	page_clr_all_props(pp);
3540 	PP_CLRSWAP(pp);
3541 	pp->p_vnode = NULL;
3542 	pp->p_offset = (u_offset_t)-1;
3543 	pp->p_fsdata = 0;
3544 }
3545 
3546 /*
3547  * Remove page ``pp'' from the hash and vp chains and remove vp association.
3548  *
3549  * When `phm' is non-NULL it contains the address of the mutex protecting the
3550  * hash list pp is on.  It is not dropped.
3551  */
3552 void
page_hashout(page_t * pp,kmutex_t * phm)3553 page_hashout(page_t *pp, kmutex_t *phm)
3554 {
3555 	vnode_t		*vp;
3556 	ulong_t		index;
3557 	kmutex_t	*nphm;
3558 	kmutex_t	*vphm;
3559 	kmutex_t	*sep;
3560 
3561 	ASSERT(phm != NULL ? MUTEX_HELD(phm) : 1);
3562 	ASSERT(pp->p_vnode != NULL);
3563 	ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr);
3564 	ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(pp->p_vnode)));
3565 
3566 	vp = pp->p_vnode;
3567 
3568 	TRACE_2(TR_FAC_VM, TR_PAGE_HASHOUT,
3569 	    "page_hashout:pp %p vp %p", pp, vp);
3570 
3571 	/* Kernel probe */
3572 	TNF_PROBE_2(page_unmap, "vm pagefault", /* CSTYLED */,
3573 	    tnf_opaque, vnode, vp,
3574 	    tnf_offset, offset, pp->p_offset);
3575 
3576 	/*
3577 	 *
3578 	 */
3579 	VM_STAT_ADD(hashout_count);
3580 	index = PAGE_HASH_FUNC(vp, pp->p_offset);
3581 	if (phm == NULL) {
3582 		VM_STAT_ADD(hashout_not_held);
3583 		nphm = PAGE_HASH_MUTEX(index);
3584 		mutex_enter(nphm);
3585 	}
3586 	ASSERT(phm ? phm == PAGE_HASH_MUTEX(index) : 1);
3587 
3588 
3589 	/*
3590 	 * grab page vnode mutex and remove it...
3591 	 */
3592 	vphm = page_vnode_mutex(vp);
3593 	mutex_enter(vphm);
3594 
3595 	page_do_hashout(pp);
3596 
3597 	mutex_exit(vphm);
3598 	if (phm == NULL)
3599 		mutex_exit(nphm);
3600 
3601 	/*
3602 	 * Wake up processes waiting for this page.  The page's
3603 	 * identity has been changed, and is probably not the
3604 	 * desired page any longer.
3605 	 */
3606 	sep = page_se_mutex(pp);
3607 	mutex_enter(sep);
3608 	pp->p_selock &= ~SE_EWANTED;
3609 	if (CV_HAS_WAITERS(&pp->p_cv))
3610 		cv_broadcast(&pp->p_cv);
3611 	mutex_exit(sep);
3612 }
3613 
3614 /*
3615  * Add the page to the front of a linked list of pages
3616  * using the p_next & p_prev pointers for the list.
3617  * The caller is responsible for protecting the list pointers.
3618  */
3619 void
page_add(page_t ** ppp,page_t * pp)3620 page_add(page_t **ppp, page_t *pp)
3621 {
3622 	ASSERT(PAGE_EXCL(pp) || (PAGE_SHARED(pp) && page_iolock_assert(pp)));
3623 
3624 	page_add_common(ppp, pp);
3625 }
3626 
3627 
3628 
3629 /*
3630  *  Common code for page_add() and mach_page_add()
3631  */
3632 void
page_add_common(page_t ** ppp,page_t * pp)3633 page_add_common(page_t **ppp, page_t *pp)
3634 {
3635 	if (*ppp == NULL) {
3636 		pp->p_next = pp->p_prev = pp;
3637 	} else {
3638 		pp->p_next = *ppp;
3639 		pp->p_prev = (*ppp)->p_prev;
3640 		(*ppp)->p_prev = pp;
3641 		pp->p_prev->p_next = pp;
3642 	}
3643 	*ppp = pp;
3644 }
3645 
3646 
3647 /*
3648  * Remove this page from a linked list of pages
3649  * using the p_next & p_prev pointers for the list.
3650  *
3651  * The caller is responsible for protecting the list pointers.
3652  */
3653 void
page_sub(page_t ** ppp,page_t * pp)3654 page_sub(page_t **ppp, page_t *pp)
3655 {
3656 	ASSERT((PP_ISFREE(pp)) ? 1 :
3657 	    (PAGE_EXCL(pp)) || (PAGE_SHARED(pp) && page_iolock_assert(pp)));
3658 
3659 	if (*ppp == NULL || pp == NULL) {
3660 		panic("page_sub: bad arg(s): pp %p, *ppp %p",
3661 		    (void *)pp, (void *)(*ppp));
3662 		/*NOTREACHED*/
3663 	}
3664 
3665 	page_sub_common(ppp, pp);
3666 }
3667 
3668 
3669 /*
3670  *  Common code for page_sub() and mach_page_sub()
3671  */
3672 void
page_sub_common(page_t ** ppp,page_t * pp)3673 page_sub_common(page_t **ppp, page_t *pp)
3674 {
3675 	if (*ppp == pp)
3676 		*ppp = pp->p_next;		/* go to next page */
3677 
3678 	if (*ppp == pp)
3679 		*ppp = NULL;			/* page list is gone */
3680 	else {
3681 		pp->p_prev->p_next = pp->p_next;
3682 		pp->p_next->p_prev = pp->p_prev;
3683 	}
3684 	pp->p_prev = pp->p_next = pp;		/* make pp a list of one */
3685 }
3686 
3687 
3688 /*
3689  * Break page list cppp into two lists with npages in the first list.
3690  * The tail is returned in nppp.
3691  */
3692 void
page_list_break(page_t ** oppp,page_t ** nppp,pgcnt_t npages)3693 page_list_break(page_t **oppp, page_t **nppp, pgcnt_t npages)
3694 {
3695 	page_t *s1pp = *oppp;
3696 	page_t *s2pp;
3697 	page_t *e1pp, *e2pp;
3698 	long n = 0;
3699 
3700 	if (s1pp == NULL) {
3701 		*nppp = NULL;
3702 		return;
3703 	}
3704 	if (npages == 0) {
3705 		*nppp = s1pp;
3706 		*oppp = NULL;
3707 		return;
3708 	}
3709 	for (n = 0, s2pp = *oppp; n < npages; n++) {
3710 		s2pp = s2pp->p_next;
3711 	}
3712 	/* Fix head and tail of new lists */
3713 	e1pp = s2pp->p_prev;
3714 	e2pp = s1pp->p_prev;
3715 	s1pp->p_prev = e1pp;
3716 	e1pp->p_next = s1pp;
3717 	s2pp->p_prev = e2pp;
3718 	e2pp->p_next = s2pp;
3719 
3720 	/* second list empty */
3721 	if (s2pp == s1pp) {
3722 		*oppp = s1pp;
3723 		*nppp = NULL;
3724 	} else {
3725 		*oppp = s1pp;
3726 		*nppp = s2pp;
3727 	}
3728 }
3729 
3730 /*
3731  * Concatenate page list nppp onto the end of list ppp.
3732  */
3733 void
page_list_concat(page_t ** ppp,page_t ** nppp)3734 page_list_concat(page_t **ppp, page_t **nppp)
3735 {
3736 	page_t *s1pp, *s2pp, *e1pp, *e2pp;
3737 
3738 	if (*nppp == NULL) {
3739 		return;
3740 	}
3741 	if (*ppp == NULL) {
3742 		*ppp = *nppp;
3743 		return;
3744 	}
3745 	s1pp = *ppp;
3746 	e1pp =  s1pp->p_prev;
3747 	s2pp = *nppp;
3748 	e2pp = s2pp->p_prev;
3749 	s1pp->p_prev = e2pp;
3750 	e2pp->p_next = s1pp;
3751 	e1pp->p_next = s2pp;
3752 	s2pp->p_prev = e1pp;
3753 }
3754 
3755 /*
3756  * return the next page in the page list
3757  */
3758 page_t *
page_list_next(page_t * pp)3759 page_list_next(page_t *pp)
3760 {
3761 	return (pp->p_next);
3762 }
3763 
3764 
3765 /*
3766  * Add the page to the front of the linked list of pages
3767  * using p_vpnext/p_vpprev pointers for the list.
3768  *
3769  * The caller is responsible for protecting the lists.
3770  */
3771 void
page_vpadd(page_t ** ppp,page_t * pp)3772 page_vpadd(page_t **ppp, page_t *pp)
3773 {
3774 	if (*ppp == NULL) {
3775 		pp->p_vpnext = pp->p_vpprev = pp;
3776 	} else {
3777 		pp->p_vpnext = *ppp;
3778 		pp->p_vpprev = (*ppp)->p_vpprev;
3779 		(*ppp)->p_vpprev = pp;
3780 		pp->p_vpprev->p_vpnext = pp;
3781 	}
3782 	*ppp = pp;
3783 }
3784 
3785 /*
3786  * Remove this page from the linked list of pages
3787  * using p_vpnext/p_vpprev pointers for the list.
3788  *
3789  * The caller is responsible for protecting the lists.
3790  */
3791 void
page_vpsub(page_t ** ppp,page_t * pp)3792 page_vpsub(page_t **ppp, page_t *pp)
3793 {
3794 	if (*ppp == NULL || pp == NULL) {
3795 		panic("page_vpsub: bad arg(s): pp %p, *ppp %p",
3796 		    (void *)pp, (void *)(*ppp));
3797 		/*NOTREACHED*/
3798 	}
3799 
3800 	if (*ppp == pp)
3801 		*ppp = pp->p_vpnext;		/* go to next page */
3802 
3803 	if (*ppp == pp)
3804 		*ppp = NULL;			/* page list is gone */
3805 	else {
3806 		pp->p_vpprev->p_vpnext = pp->p_vpnext;
3807 		pp->p_vpnext->p_vpprev = pp->p_vpprev;
3808 	}
3809 	pp->p_vpprev = pp->p_vpnext = pp;	/* make pp a list of one */
3810 }
3811 
3812 /*
3813  * Lock a physical page into memory "long term".  Used to support "lock
3814  * in memory" functions.  Accepts the page to be locked, and a cow variable
3815  * to indicate whether a the lock will travel to the new page during
3816  * a potential copy-on-write.
3817  */
3818 int
page_pp_lock(page_t * pp,int cow,int kernel)3819 page_pp_lock(
3820 	page_t *pp,			/* page to be locked */
3821 	int cow,			/* cow lock */
3822 	int kernel)			/* must succeed -- ignore checking */
3823 {
3824 	int r = 0;			/* result -- assume failure */
3825 
3826 	ASSERT(PAGE_LOCKED(pp));
3827 
3828 	page_struct_lock(pp);
3829 	/*
3830 	 * Acquire the "freemem_lock" for availrmem.
3831 	 */
3832 	if (cow) {
3833 		mutex_enter(&freemem_lock);
3834 		if ((availrmem > pages_pp_maximum) &&
3835 		    (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) {
3836 			availrmem--;
3837 			pages_locked++;
3838 			mutex_exit(&freemem_lock);
3839 			r = 1;
3840 			if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
3841 				cmn_err(CE_WARN,
3842 				    "COW lock limit reached on pfn 0x%lx",
3843 				    page_pptonum(pp));
3844 			}
3845 		} else
3846 			mutex_exit(&freemem_lock);
3847 	} else {
3848 		if (pp->p_lckcnt) {
3849 			if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) {
3850 				r = 1;
3851 				if (++pp->p_lckcnt ==
3852 				    (ushort_t)PAGE_LOCK_MAXIMUM) {
3853 					cmn_err(CE_WARN, "Page lock limit "
3854 					    "reached on pfn 0x%lx",
3855 					    page_pptonum(pp));
3856 				}
3857 			}
3858 		} else {
3859 			if (kernel) {
3860 				/* availrmem accounting done by caller */
3861 				++pp->p_lckcnt;
3862 				r = 1;
3863 			} else {
3864 				mutex_enter(&freemem_lock);
3865 				if (availrmem > pages_pp_maximum) {
3866 					availrmem--;
3867 					pages_locked++;
3868 					++pp->p_lckcnt;
3869 					r = 1;
3870 				}
3871 				mutex_exit(&freemem_lock);
3872 			}
3873 		}
3874 	}
3875 	page_struct_unlock(pp);
3876 	return (r);
3877 }
3878 
3879 /*
3880  * Decommit a lock on a physical page frame.  Account for cow locks if
3881  * appropriate.
3882  */
3883 void
page_pp_unlock(page_t * pp,int cow,int kernel)3884 page_pp_unlock(
3885 	page_t *pp,			/* page to be unlocked */
3886 	int cow,			/* expect cow lock */
3887 	int kernel)			/* this was a kernel lock */
3888 {
3889 	ASSERT(PAGE_LOCKED(pp));
3890 
3891 	page_struct_lock(pp);
3892 	/*
3893 	 * Acquire the "freemem_lock" for availrmem.
3894 	 * If cowcnt or lcknt is already 0 do nothing; i.e., we
3895 	 * could be called to unlock even if nothing is locked. This could
3896 	 * happen if locked file pages were truncated (removing the lock)
3897 	 * and the file was grown again and new pages faulted in; the new
3898 	 * pages are unlocked but the segment still thinks they're locked.
3899 	 */
3900 	if (cow) {
3901 		if (pp->p_cowcnt) {
3902 			mutex_enter(&freemem_lock);
3903 			pp->p_cowcnt--;
3904 			availrmem++;
3905 			pages_locked--;
3906 			mutex_exit(&freemem_lock);
3907 		}
3908 	} else {
3909 		if (pp->p_lckcnt && --pp->p_lckcnt == 0) {
3910 			if (!kernel) {
3911 				mutex_enter(&freemem_lock);
3912 				availrmem++;
3913 				pages_locked--;
3914 				mutex_exit(&freemem_lock);
3915 			}
3916 		}
3917 	}
3918 	page_struct_unlock(pp);
3919 }
3920 
3921 /*
3922  * This routine reserves availrmem for npages;
3923  *	flags: KM_NOSLEEP or KM_SLEEP
3924  *	returns 1 on success or 0 on failure
3925  */
3926 int
page_resv(pgcnt_t npages,uint_t flags)3927 page_resv(pgcnt_t npages, uint_t flags)
3928 {
3929 	mutex_enter(&freemem_lock);
3930 	while (availrmem < tune.t_minarmem + npages) {
3931 		if (flags & KM_NOSLEEP) {
3932 			mutex_exit(&freemem_lock);
3933 			return (0);
3934 		}
3935 		mutex_exit(&freemem_lock);
3936 		page_needfree(npages);
3937 		kmem_reap();
3938 		delay(hz >> 2);
3939 		page_needfree(-(spgcnt_t)npages);
3940 		mutex_enter(&freemem_lock);
3941