17c478bd9Sstevel@tonic-gate /*
27c478bd9Sstevel@tonic-gate * CDDL HEADER START
37c478bd9Sstevel@tonic-gate *
47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the
53cff2f43Sstans * Common Development and Distribution License (the "License").
63cff2f43Sstans * You may not use this file except in compliance with the License.
77c478bd9Sstevel@tonic-gate *
87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing.
107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions
117c478bd9Sstevel@tonic-gate * and limitations under the License.
127c478bd9Sstevel@tonic-gate *
137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each
147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the
167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying
177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner]
187c478bd9Sstevel@tonic-gate *
197c478bd9Sstevel@tonic-gate * CDDL HEADER END
207c478bd9Sstevel@tonic-gate */
217c478bd9Sstevel@tonic-gate /*
2211494be0SStan Studzinski * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23e7c874afSJosef 'Jeff' Sipek * Copyright (c) 2015, Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
243f11de9dSSara Hartse * Copyright (c) 2015, 2016 by Delphix. All rights reserved.
250418219cSJerry Jelinek * Copyright 2018 Joyent, Inc.
26b57f5d3eSPatrick Mooney * Copyright 2021 Oxide Computer Company
277c478bd9Sstevel@tonic-gate */
287c478bd9Sstevel@tonic-gate
29727737b4SJoshua M. Clulow /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
30727737b4SJoshua M. Clulow /* All Rights Reserved */
317c478bd9Sstevel@tonic-gate
327c478bd9Sstevel@tonic-gate /*
337c478bd9Sstevel@tonic-gate * University Copyright- Copyright (c) 1982, 1986, 1988
347c478bd9Sstevel@tonic-gate * The Regents of the University of California
357c478bd9Sstevel@tonic-gate * All Rights Reserved
367c478bd9Sstevel@tonic-gate *
377c478bd9Sstevel@tonic-gate * University Acknowledgment- Portions of this document are derived from
387c478bd9Sstevel@tonic-gate * software developed by the University of California, Berkeley, and its
397c478bd9Sstevel@tonic-gate * contributors.
407c478bd9Sstevel@tonic-gate */
417c478bd9Sstevel@tonic-gate
427c478bd9Sstevel@tonic-gate /*
437c478bd9Sstevel@tonic-gate * VM - physical page management.
447c478bd9Sstevel@tonic-gate */
457c478bd9Sstevel@tonic-gate
467c478bd9Sstevel@tonic-gate #include <sys/types.h>
477c478bd9Sstevel@tonic-gate #include <sys/t_lock.h>
487c478bd9Sstevel@tonic-gate #include <sys/param.h>
497c478bd9Sstevel@tonic-gate #include <sys/systm.h>
507c478bd9Sstevel@tonic-gate #include <sys/errno.h>
517c478bd9Sstevel@tonic-gate #include <sys/time.h>
527c478bd9Sstevel@tonic-gate #include <sys/vnode.h>
537c478bd9Sstevel@tonic-gate #include <sys/vm.h>
547c478bd9Sstevel@tonic-gate #include <sys/vtrace.h>
557c478bd9Sstevel@tonic-gate #include <sys/swap.h>
567c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
577c478bd9Sstevel@tonic-gate #include <sys/tuneable.h>
587c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h>
597c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
607c478bd9Sstevel@tonic-gate #include <sys/callb.h>
617c478bd9Sstevel@tonic-gate #include <sys/debug.h>
627c478bd9Sstevel@tonic-gate #include <sys/condvar_impl.h>
637c478bd9Sstevel@tonic-gate #include <sys/mem_config.h>
647c478bd9Sstevel@tonic-gate #include <sys/mem_cage.h>
657c478bd9Sstevel@tonic-gate #include <sys/kmem.h>
667c478bd9Sstevel@tonic-gate #include <sys/atomic.h>
677c478bd9Sstevel@tonic-gate #include <sys/strlog.h>
687c478bd9Sstevel@tonic-gate #include <sys/mman.h>
697c478bd9Sstevel@tonic-gate #include <sys/ontrap.h>
707c478bd9Sstevel@tonic-gate #include <sys/lgrp.h>
717c478bd9Sstevel@tonic-gate #include <sys/vfs.h>
727c478bd9Sstevel@tonic-gate
737c478bd9Sstevel@tonic-gate #include <vm/hat.h>
747c478bd9Sstevel@tonic-gate #include <vm/anon.h>
757c478bd9Sstevel@tonic-gate #include <vm/page.h>
767c478bd9Sstevel@tonic-gate #include <vm/seg.h>
777c478bd9Sstevel@tonic-gate #include <vm/pvn.h>
787c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h>
797c478bd9Sstevel@tonic-gate #include <vm/vm_dep.h>
800209230bSgjelinek #include <sys/vm_usage.h>
817c478bd9Sstevel@tonic-gate #include <fs/fs_subr.h>
82cee1d74bSjfrank #include <sys/ddi.h>
83cee1d74bSjfrank #include <sys/modctl.h>
847c478bd9Sstevel@tonic-gate
857c478bd9Sstevel@tonic-gate static pgcnt_t max_page_get; /* max page_get request size in pages */
867c478bd9Sstevel@tonic-gate pgcnt_t total_pages = 0; /* total number of pages (used by /proc) */
87*338664dfSAndy Fiddaman volatile uint64_t n_throttle = 0;
887c478bd9Sstevel@tonic-gate
897c478bd9Sstevel@tonic-gate /*
907c478bd9Sstevel@tonic-gate * freemem_lock protects all freemem variables:
917c478bd9Sstevel@tonic-gate * availrmem. Also this lock protects the globals which track the
927c478bd9Sstevel@tonic-gate * availrmem changes for accurate kernel footprint calculation.
937c478bd9Sstevel@tonic-gate * See below for an explanation of these
947c478bd9Sstevel@tonic-gate * globals.
957c478bd9Sstevel@tonic-gate */
967c478bd9Sstevel@tonic-gate kmutex_t freemem_lock;
977c478bd9Sstevel@tonic-gate pgcnt_t availrmem;
987c478bd9Sstevel@tonic-gate pgcnt_t availrmem_initial;
997c478bd9Sstevel@tonic-gate
1007c478bd9Sstevel@tonic-gate /*
1017c478bd9Sstevel@tonic-gate * These globals track availrmem changes to get a more accurate
1027c478bd9Sstevel@tonic-gate * estimate of tke kernel size. Historically pp_kernel is used for
1037c478bd9Sstevel@tonic-gate * kernel size and is based on availrmem. But availrmem is adjusted for
1047c478bd9Sstevel@tonic-gate * locked pages in the system not just for kernel locked pages.
1057c478bd9Sstevel@tonic-gate * These new counters will track the pages locked through segvn and
1067c478bd9Sstevel@tonic-gate * by explicit user locking.
1077c478bd9Sstevel@tonic-gate *
108da6c28aaSamw * pages_locked : How many pages are locked because of user specified
1097c478bd9Sstevel@tonic-gate * locking through mlock or plock.
1107c478bd9Sstevel@tonic-gate *
1117c478bd9Sstevel@tonic-gate * pages_useclaim,pages_claimed : These two variables track the
112da6c28aaSamw * claim adjustments because of the protection changes on a segvn segment.
1137c478bd9Sstevel@tonic-gate *
1147c478bd9Sstevel@tonic-gate * All these globals are protected by the same lock which protects availrmem.
1157c478bd9Sstevel@tonic-gate */
116a98e9dbfSaguzovsk pgcnt_t pages_locked = 0;
117a98e9dbfSaguzovsk pgcnt_t pages_useclaim = 0;
118a98e9dbfSaguzovsk pgcnt_t pages_claimed = 0;
1197c478bd9Sstevel@tonic-gate
1207c478bd9Sstevel@tonic-gate
1217c478bd9Sstevel@tonic-gate /*
1227c478bd9Sstevel@tonic-gate * new_freemem_lock protects freemem, freemem_wait & freemem_cv.
1237c478bd9Sstevel@tonic-gate */
1247c478bd9Sstevel@tonic-gate static kmutex_t new_freemem_lock;
1257c478bd9Sstevel@tonic-gate static uint_t freemem_wait; /* someone waiting for freemem */
1267c478bd9Sstevel@tonic-gate static kcondvar_t freemem_cv;
1277c478bd9Sstevel@tonic-gate
1287c478bd9Sstevel@tonic-gate /*
1297c478bd9Sstevel@tonic-gate * The logical page free list is maintained as two lists, the 'free'
1307c478bd9Sstevel@tonic-gate * and the 'cache' lists.
1317c478bd9Sstevel@tonic-gate * The free list contains those pages that should be reused first.
1327c478bd9Sstevel@tonic-gate *
1337c478bd9Sstevel@tonic-gate * The implementation of the lists is machine dependent.
134d94ffb28Sjmcp * page_get_freelist(), page_get_cachelist(),
1357c478bd9Sstevel@tonic-gate * page_list_sub(), and page_list_add()
1367c478bd9Sstevel@tonic-gate * form the interface to the machine dependent implementation.
1377c478bd9Sstevel@tonic-gate *
1387c478bd9Sstevel@tonic-gate * Pages with p_free set are on the cache list.
1397c478bd9Sstevel@tonic-gate * Pages with p_free and p_age set are on the free list,
1407c478bd9Sstevel@tonic-gate *
1417c478bd9Sstevel@tonic-gate * A page may be locked while on either list.
1427c478bd9Sstevel@tonic-gate */
1437c478bd9Sstevel@tonic-gate
1447c478bd9Sstevel@tonic-gate /*
1457c478bd9Sstevel@tonic-gate * free list accounting stuff.
1467c478bd9Sstevel@tonic-gate *
1477c478bd9Sstevel@tonic-gate *
1487c478bd9Sstevel@tonic-gate * Spread out the value for the number of pages on the
1497c478bd9Sstevel@tonic-gate * page free and page cache lists. If there is just one
1507c478bd9Sstevel@tonic-gate * value, then it must be under just one lock.
1517c478bd9Sstevel@tonic-gate * The lock contention and cache traffic are a real bother.
1527c478bd9Sstevel@tonic-gate *
1537c478bd9Sstevel@tonic-gate * When we acquire and then drop a single pcf lock
1547c478bd9Sstevel@tonic-gate * we can start in the middle of the array of pcf structures.
1557c478bd9Sstevel@tonic-gate * If we acquire more than one pcf lock at a time, we need to
1567c478bd9Sstevel@tonic-gate * start at the front to avoid deadlocking.
1577c478bd9Sstevel@tonic-gate *
1587c478bd9Sstevel@tonic-gate * pcf_count holds the number of pages in each pool.
1597c478bd9Sstevel@tonic-gate *
1607c478bd9Sstevel@tonic-gate * pcf_block is set when page_create_get_something() has asked the
1617c478bd9Sstevel@tonic-gate * PSM page freelist and page cachelist routines without specifying
1627c478bd9Sstevel@tonic-gate * a color and nothing came back. This is used to block anything
1637c478bd9Sstevel@tonic-gate * else from moving pages from one list to the other while the
1647c478bd9Sstevel@tonic-gate * lists are searched again. If a page is freeed while pcf_block is
1657c478bd9Sstevel@tonic-gate * set, then pcf_reserve is incremented. pcgs_unblock() takes care
1667c478bd9Sstevel@tonic-gate * of clearning pcf_block, doing the wakeups, etc.
1677c478bd9Sstevel@tonic-gate */
1687c478bd9Sstevel@tonic-gate
16906fb6a36Sdv #define MAX_PCF_FANOUT NCPU
17006fb6a36Sdv static uint_t pcf_fanout = 1; /* Will get changed at boot time */
17106fb6a36Sdv static uint_t pcf_fanout_mask = 0;
1727c478bd9Sstevel@tonic-gate
1737c478bd9Sstevel@tonic-gate struct pcf {
1747c478bd9Sstevel@tonic-gate kmutex_t pcf_lock; /* protects the structure */
175f2b37d75Sfr uint_t pcf_count; /* page count */
1767c478bd9Sstevel@tonic-gate uint_t pcf_wait; /* number of waiters */
177727737b4SJoshua M. Clulow uint_t pcf_block; /* pcgs flag to page_free() */
178727737b4SJoshua M. Clulow uint_t pcf_reserve; /* pages freed after pcf_block set */
17906fb6a36Sdv uint_t pcf_fill[10]; /* to line up on the caches */
1807c478bd9Sstevel@tonic-gate };
1817c478bd9Sstevel@tonic-gate
18206fb6a36Sdv /*
18306fb6a36Sdv * PCF_INDEX hash needs to be dynamic (every so often the hash changes where
18406fb6a36Sdv * it will hash the cpu to). This is done to prevent a drain condition
18506fb6a36Sdv * from happening. This drain condition will occur when pcf_count decrement
18606fb6a36Sdv * occurs on cpu A and the increment of pcf_count always occurs on cpu B. An
18706fb6a36Sdv * example of this shows up with device interrupts. The dma buffer is allocated
18806fb6a36Sdv * by the cpu requesting the IO thus the pcf_count is decremented based on that.
18906fb6a36Sdv * When the memory is returned by the interrupt thread, the pcf_count will be
19006fb6a36Sdv * incremented based on the cpu servicing the interrupt.
19106fb6a36Sdv */
19206fb6a36Sdv static struct pcf pcf[MAX_PCF_FANOUT];
19306fb6a36Sdv #define PCF_INDEX() ((int)(((long)CPU->cpu_seqid) + \
19406fb6a36Sdv (randtick() >> 24)) & (pcf_fanout_mask))
19506fb6a36Sdv
19606fb6a36Sdv static int pcf_decrement_bucket(pgcnt_t);
19706fb6a36Sdv static int pcf_decrement_multiple(pgcnt_t *, pgcnt_t, int);
1987c478bd9Sstevel@tonic-gate
1997c478bd9Sstevel@tonic-gate kmutex_t pcgs_lock; /* serializes page_create_get_ */
2007c478bd9Sstevel@tonic-gate kmutex_t pcgs_cagelock; /* serializes NOSLEEP cage allocs */
2017c478bd9Sstevel@tonic-gate kmutex_t pcgs_wait_lock; /* used for delay in pcgs */
2027c478bd9Sstevel@tonic-gate static kcondvar_t pcgs_cv; /* cv for delay in pcgs */
2037c478bd9Sstevel@tonic-gate
2047c478bd9Sstevel@tonic-gate #ifdef VM_STATS
2057c478bd9Sstevel@tonic-gate
2067c478bd9Sstevel@tonic-gate /*
2077c478bd9Sstevel@tonic-gate * No locks, but so what, they are only statistics.
2087c478bd9Sstevel@tonic-gate */
2097c478bd9Sstevel@tonic-gate
2107c478bd9Sstevel@tonic-gate static struct page_tcnt {
2117c478bd9Sstevel@tonic-gate int pc_free_cache; /* free's into cache list */
2127c478bd9Sstevel@tonic-gate int pc_free_dontneed; /* free's with dontneed */
2137c478bd9Sstevel@tonic-gate int pc_free_pageout; /* free's from pageout */
2147c478bd9Sstevel@tonic-gate int pc_free_free; /* free's into free list */
2157c478bd9Sstevel@tonic-gate int pc_free_pages; /* free's into large page free list */
2167c478bd9Sstevel@tonic-gate int pc_destroy_pages; /* large page destroy's */
2177c478bd9Sstevel@tonic-gate int pc_get_cache; /* get's from cache list */
2187c478bd9Sstevel@tonic-gate int pc_get_free; /* get's from free list */
2197c478bd9Sstevel@tonic-gate int pc_reclaim; /* reclaim's */
2207c478bd9Sstevel@tonic-gate int pc_abortfree; /* abort's of free pages */
2217c478bd9Sstevel@tonic-gate int pc_find_hit; /* find's that find page */
2227c478bd9Sstevel@tonic-gate int pc_find_miss; /* find's that don't find page */
2237c478bd9Sstevel@tonic-gate int pc_destroy_free; /* # of free pages destroyed */
2247c478bd9Sstevel@tonic-gate #define PC_HASH_CNT (4*PAGE_HASHAVELEN)
2257c478bd9Sstevel@tonic-gate int pc_find_hashlen[PC_HASH_CNT+1];
2267c478bd9Sstevel@tonic-gate int pc_addclaim_pages;
2277c478bd9Sstevel@tonic-gate int pc_subclaim_pages;
2287c478bd9Sstevel@tonic-gate int pc_free_replacement_page[2];
2297c478bd9Sstevel@tonic-gate int pc_try_demote_pages[6];
2307c478bd9Sstevel@tonic-gate int pc_demote_pages[2];
2317c478bd9Sstevel@tonic-gate } pagecnt;
2327c478bd9Sstevel@tonic-gate
2337c478bd9Sstevel@tonic-gate uint_t hashin_count;
2347c478bd9Sstevel@tonic-gate uint_t hashin_not_held;
2357c478bd9Sstevel@tonic-gate uint_t hashin_already;
2367c478bd9Sstevel@tonic-gate
2377c478bd9Sstevel@tonic-gate uint_t hashout_count;
2387c478bd9Sstevel@tonic-gate uint_t hashout_not_held;
2397c478bd9Sstevel@tonic-gate
2407c478bd9Sstevel@tonic-gate uint_t page_create_count;
2417c478bd9Sstevel@tonic-gate uint_t page_create_not_enough;
2427c478bd9Sstevel@tonic-gate uint_t page_create_not_enough_again;
2437c478bd9Sstevel@tonic-gate uint_t page_create_zero;
2447c478bd9Sstevel@tonic-gate uint_t page_create_hashout;
2457c478bd9Sstevel@tonic-gate uint_t page_create_page_lock_failed;
2467c478bd9Sstevel@tonic-gate uint_t page_create_trylock_failed;
2477c478bd9Sstevel@tonic-gate uint_t page_create_found_one;
2487c478bd9Sstevel@tonic-gate uint_t page_create_hashin_failed;
2497c478bd9Sstevel@tonic-gate uint_t page_create_dropped_phm;
2507c478bd9Sstevel@tonic-gate
2517c478bd9Sstevel@tonic-gate uint_t page_create_new;
2527c478bd9Sstevel@tonic-gate uint_t page_create_exists;
2537c478bd9Sstevel@tonic-gate uint_t page_create_putbacks;
2547c478bd9Sstevel@tonic-gate uint_t page_create_overshoot;
2557c478bd9Sstevel@tonic-gate
2567c478bd9Sstevel@tonic-gate uint_t page_reclaim_zero;
2577c478bd9Sstevel@tonic-gate uint_t page_reclaim_zero_locked;
2587c478bd9Sstevel@tonic-gate
2597c478bd9Sstevel@tonic-gate uint_t page_rename_exists;
2607c478bd9Sstevel@tonic-gate uint_t page_rename_count;
2617c478bd9Sstevel@tonic-gate
2627c478bd9Sstevel@tonic-gate uint_t page_lookup_cnt[20];
2637c478bd9Sstevel@tonic-gate uint_t page_lookup_nowait_cnt[10];
2647c478bd9Sstevel@tonic-gate uint_t page_find_cnt;
2657c478bd9Sstevel@tonic-gate uint_t page_exists_cnt;
2667c478bd9Sstevel@tonic-gate uint_t page_exists_forreal_cnt;
2677c478bd9Sstevel@tonic-gate uint_t page_lookup_dev_cnt;
2687c478bd9Sstevel@tonic-gate uint_t get_cachelist_cnt;
2697c478bd9Sstevel@tonic-gate uint_t page_create_cnt[10];
27078b03d3aSkchow uint_t alloc_pages[9];
2717c478bd9Sstevel@tonic-gate uint_t page_exphcontg[19];
2727c478bd9Sstevel@tonic-gate uint_t page_create_large_cnt[10];
2737c478bd9Sstevel@tonic-gate
274e7c874afSJosef 'Jeff' Sipek #endif
2757c478bd9Sstevel@tonic-gate
276e7c874afSJosef 'Jeff' Sipek static inline page_t *
page_hash_search(ulong_t index,vnode_t * vnode,u_offset_t off)277e7c874afSJosef 'Jeff' Sipek page_hash_search(ulong_t index, vnode_t *vnode, u_offset_t off)
278e7c874afSJosef 'Jeff' Sipek {
279e7c874afSJosef 'Jeff' Sipek uint_t mylen = 0;
280e7c874afSJosef 'Jeff' Sipek page_t *page;
2817c478bd9Sstevel@tonic-gate
282e7c874afSJosef 'Jeff' Sipek for (page = page_hash[index]; page; page = page->p_hash, mylen++)
283e7c874afSJosef 'Jeff' Sipek if (page->p_vnode == vnode && page->p_offset == off)
284e7c874afSJosef 'Jeff' Sipek break;
2857c478bd9Sstevel@tonic-gate
286e7c874afSJosef 'Jeff' Sipek #ifdef VM_STATS
287e7c874afSJosef 'Jeff' Sipek if (page != NULL)
288e7c874afSJosef 'Jeff' Sipek pagecnt.pc_find_hit++;
289e7c874afSJosef 'Jeff' Sipek else
290e7c874afSJosef 'Jeff' Sipek pagecnt.pc_find_miss++;
2917c478bd9Sstevel@tonic-gate
292e7c874afSJosef 'Jeff' Sipek pagecnt.pc_find_hashlen[MIN(mylen, PC_HASH_CNT)]++;
293e7c874afSJosef 'Jeff' Sipek #endif
294e7c874afSJosef 'Jeff' Sipek
295e7c874afSJosef 'Jeff' Sipek return (page);
296e7c874afSJosef 'Jeff' Sipek }
2977c478bd9Sstevel@tonic-gate
2987c478bd9Sstevel@tonic-gate
2997c478bd9Sstevel@tonic-gate #ifdef DEBUG
3007c478bd9Sstevel@tonic-gate #define MEMSEG_SEARCH_STATS
3017c478bd9Sstevel@tonic-gate #endif
3027c478bd9Sstevel@tonic-gate
3037c478bd9Sstevel@tonic-gate #ifdef MEMSEG_SEARCH_STATS
3047c478bd9Sstevel@tonic-gate struct memseg_stats {
3057c478bd9Sstevel@tonic-gate uint_t nsearch;
3067c478bd9Sstevel@tonic-gate uint_t nlastwon;
3077c478bd9Sstevel@tonic-gate uint_t nhashwon;
3087c478bd9Sstevel@tonic-gate uint_t nnotfound;
3097c478bd9Sstevel@tonic-gate } memseg_stats;
3107c478bd9Sstevel@tonic-gate
3117c478bd9Sstevel@tonic-gate #define MEMSEG_STAT_INCR(v) \
3121a5e258fSJosef 'Jeff' Sipek atomic_inc_32(&memseg_stats.v)
3137c478bd9Sstevel@tonic-gate #else
3147c478bd9Sstevel@tonic-gate #define MEMSEG_STAT_INCR(x)
3157c478bd9Sstevel@tonic-gate #endif
3167c478bd9Sstevel@tonic-gate
3177c478bd9Sstevel@tonic-gate struct memseg *memsegs; /* list of memory segments */
3187c478bd9Sstevel@tonic-gate
3192be2af34Smec /*
3202be2af34Smec * /etc/system tunable to control large page allocation hueristic.
3212be2af34Smec *
3222be2af34Smec * Setting to LPAP_LOCAL will heavily prefer the local lgroup over remote lgroup
3232be2af34Smec * for large page allocation requests. If a large page is not readily
3242be2af34Smec * avaliable on the local freelists we will go through additional effort
3252be2af34Smec * to create a large page, potentially moving smaller pages around to coalesce
3262be2af34Smec * larger pages in the local lgroup.
3272be2af34Smec * Default value of LPAP_DEFAULT will go to remote freelists if large pages
3282be2af34Smec * are not readily available in the local lgroup.
3292be2af34Smec */
3302be2af34Smec enum lpap {
3312be2af34Smec LPAP_DEFAULT, /* default large page allocation policy */
3322be2af34Smec LPAP_LOCAL /* local large page allocation policy */
3332be2af34Smec };
3342be2af34Smec
3352be2af34Smec enum lpap lpg_alloc_prefer = LPAP_DEFAULT;
3367c478bd9Sstevel@tonic-gate
3377c478bd9Sstevel@tonic-gate static void page_init_mem_config(void);
3387c478bd9Sstevel@tonic-gate static int page_do_hashin(page_t *, vnode_t *, u_offset_t);
3397c478bd9Sstevel@tonic-gate static void page_do_hashout(page_t *);
3408b464eb8Smec static void page_capture_init();
3418b464eb8Smec int page_capture_take_action(page_t *, uint_t, void *);
3427c478bd9Sstevel@tonic-gate
3437c478bd9Sstevel@tonic-gate static void page_demote_vp_pages(page_t *);
3447c478bd9Sstevel@tonic-gate
34506fb6a36Sdv
34606fb6a36Sdv void
pcf_init(void)34706fb6a36Sdv pcf_init(void)
34806fb6a36Sdv {
34906fb6a36Sdv if (boot_ncpus != -1) {
35006fb6a36Sdv pcf_fanout = boot_ncpus;
35106fb6a36Sdv } else {
35206fb6a36Sdv pcf_fanout = max_ncpus;
35306fb6a36Sdv }
35406fb6a36Sdv #ifdef sun4v
35506fb6a36Sdv /*
35606fb6a36Sdv * Force at least 4 buckets if possible for sun4v.
35706fb6a36Sdv */
35806fb6a36Sdv pcf_fanout = MAX(pcf_fanout, 4);
35906fb6a36Sdv #endif /* sun4v */
36006fb6a36Sdv
36106fb6a36Sdv /*
36206fb6a36Sdv * Round up to the nearest power of 2.
36306fb6a36Sdv */
36406fb6a36Sdv pcf_fanout = MIN(pcf_fanout, MAX_PCF_FANOUT);
36506fb6a36Sdv if (!ISP2(pcf_fanout)) {
36606fb6a36Sdv pcf_fanout = 1 << highbit(pcf_fanout);
36706fb6a36Sdv
36806fb6a36Sdv if (pcf_fanout > MAX_PCF_FANOUT) {
36906fb6a36Sdv pcf_fanout = 1 << (highbit(MAX_PCF_FANOUT) - 1);
37006fb6a36Sdv }
37106fb6a36Sdv }
37206fb6a36Sdv pcf_fanout_mask = pcf_fanout - 1;
37306fb6a36Sdv }
37406fb6a36Sdv
3757c478bd9Sstevel@tonic-gate /*
3767c478bd9Sstevel@tonic-gate * vm subsystem related initialization
3777c478bd9Sstevel@tonic-gate */
3787c478bd9Sstevel@tonic-gate void
vm_init(void)3797c478bd9Sstevel@tonic-gate vm_init(void)
3807c478bd9Sstevel@tonic-gate {
3817c478bd9Sstevel@tonic-gate boolean_t callb_vm_cpr(void *, int);
3827c478bd9Sstevel@tonic-gate
3837c478bd9Sstevel@tonic-gate (void) callb_add(callb_vm_cpr, 0, CB_CL_CPR_VM, "vm");
3847c478bd9Sstevel@tonic-gate page_init_mem_config();
385db874c57Selowe page_retire_init();
3860209230bSgjelinek vm_usage_init();
3878b464eb8Smec page_capture_init();
3887c478bd9Sstevel@tonic-gate }
3897c478bd9Sstevel@tonic-gate
3907c478bd9Sstevel@tonic-gate /*
3917c478bd9Sstevel@tonic-gate * This function is called at startup and when memory is added or deleted.
3927c478bd9Sstevel@tonic-gate */
3937c478bd9Sstevel@tonic-gate void
init_pages_pp_maximum()3947c478bd9Sstevel@tonic-gate init_pages_pp_maximum()
3957c478bd9Sstevel@tonic-gate {
3967c478bd9Sstevel@tonic-gate static pgcnt_t p_min;
3977c478bd9Sstevel@tonic-gate static pgcnt_t pages_pp_maximum_startup;
3987c478bd9Sstevel@tonic-gate static pgcnt_t avrmem_delta;
3997c478bd9Sstevel@tonic-gate static int init_done;
4007c478bd9Sstevel@tonic-gate static int user_set; /* true if set in /etc/system */
4017c478bd9Sstevel@tonic-gate
4027c478bd9Sstevel@tonic-gate if (init_done == 0) {
4037c478bd9Sstevel@tonic-gate
4047c478bd9Sstevel@tonic-gate /* If the user specified a value, save it */
4057c478bd9Sstevel@tonic-gate if (pages_pp_maximum != 0) {
4067c478bd9Sstevel@tonic-gate user_set = 1;
4077c478bd9Sstevel@tonic-gate pages_pp_maximum_startup = pages_pp_maximum;
4087c478bd9Sstevel@tonic-gate }
4097c478bd9Sstevel@tonic-gate
4107c478bd9Sstevel@tonic-gate /*
4117c478bd9Sstevel@tonic-gate * Setting of pages_pp_maximum is based first time
4127c478bd9Sstevel@tonic-gate * on the value of availrmem just after the start-up
4137c478bd9Sstevel@tonic-gate * allocations. To preserve this relationship at run
4147c478bd9Sstevel@tonic-gate * time, use a delta from availrmem_initial.
4157c478bd9Sstevel@tonic-gate */
4167c478bd9Sstevel@tonic-gate ASSERT(availrmem_initial >= availrmem);
4177c478bd9Sstevel@tonic-gate avrmem_delta = availrmem_initial - availrmem;
4187c478bd9Sstevel@tonic-gate
4197c478bd9Sstevel@tonic-gate /* The allowable floor of pages_pp_maximum */
4207c478bd9Sstevel@tonic-gate p_min = tune.t_minarmem + 100;
4217c478bd9Sstevel@tonic-gate
4227c478bd9Sstevel@tonic-gate /* Make sure we don't come through here again. */
4237c478bd9Sstevel@tonic-gate init_done = 1;
4247c478bd9Sstevel@tonic-gate }
4257c478bd9Sstevel@tonic-gate /*
4267c478bd9Sstevel@tonic-gate * Determine pages_pp_maximum, the number of currently available
4277c478bd9Sstevel@tonic-gate * pages (availrmem) that can't be `locked'. If not set by
4287c478bd9Sstevel@tonic-gate * the user, we set it to 4% of the currently available memory
4297c478bd9Sstevel@tonic-gate * plus 4MB.
4307c478bd9Sstevel@tonic-gate * But we also insist that it be greater than tune.t_minarmem;
4317c478bd9Sstevel@tonic-gate * otherwise a process could lock down a lot of memory, get swapped
4327c478bd9Sstevel@tonic-gate * out, and never have enough to get swapped back in.
4337c478bd9Sstevel@tonic-gate */
4347c478bd9Sstevel@tonic-gate if (user_set)
4357c478bd9Sstevel@tonic-gate pages_pp_maximum = pages_pp_maximum_startup;
4367c478bd9Sstevel@tonic-gate else
4377c478bd9Sstevel@tonic-gate pages_pp_maximum = ((availrmem_initial - avrmem_delta) / 25)
4387c478bd9Sstevel@tonic-gate + btop(4 * 1024 * 1024);
4397c478bd9Sstevel@tonic-gate
4407c478bd9Sstevel@tonic-gate if (pages_pp_maximum <= p_min) {
4417c478bd9Sstevel@tonic-gate pages_pp_maximum = p_min;
4427c478bd9Sstevel@tonic-gate }
4437c478bd9Sstevel@tonic-gate }
4447c478bd9Sstevel@tonic-gate
4450418219cSJerry Jelinek /*
4460418219cSJerry Jelinek * In the past, we limited the maximum pages that could be gotten to essentially
4470418219cSJerry Jelinek * 1/2 of the total pages on the system. However, this is too conservative for
4480418219cSJerry Jelinek * some cases. For example, if we want to host a large virtual machine which
4490418219cSJerry Jelinek * needs to use a significant portion of the system's memory. In practice,
4500418219cSJerry Jelinek * allowing more than 1/2 of the total pages is fine, but becomes problematic
4510418219cSJerry Jelinek * as we approach or exceed 75% of the pages on the system. Thus, we limit the
4520418219cSJerry Jelinek * maximum to 23/32 of the total pages, which is ~72%.
4530418219cSJerry Jelinek */
4547c478bd9Sstevel@tonic-gate void
set_max_page_get(pgcnt_t target_total_pages)4557c478bd9Sstevel@tonic-gate set_max_page_get(pgcnt_t target_total_pages)
4567c478bd9Sstevel@tonic-gate {
4570418219cSJerry Jelinek max_page_get = (target_total_pages >> 5) * 23;
4580418219cSJerry Jelinek ASSERT3U(max_page_get, >, 0);
4590418219cSJerry Jelinek }
4600418219cSJerry Jelinek
4610418219cSJerry Jelinek pgcnt_t
get_max_page_get()4620418219cSJerry Jelinek get_max_page_get()
4630418219cSJerry Jelinek {
4640418219cSJerry Jelinek return (max_page_get);
4657c478bd9Sstevel@tonic-gate }
4667c478bd9Sstevel@tonic-gate
4677c478bd9Sstevel@tonic-gate static pgcnt_t pending_delete;
4687c478bd9Sstevel@tonic-gate
4697c478bd9Sstevel@tonic-gate /*ARGSUSED*/
4707c478bd9Sstevel@tonic-gate static void
page_mem_config_post_add(void * arg,pgcnt_t delta_pages)4717c478bd9Sstevel@tonic-gate page_mem_config_post_add(
4727c478bd9Sstevel@tonic-gate void *arg,
4737c478bd9Sstevel@tonic-gate pgcnt_t delta_pages)
4747c478bd9Sstevel@tonic-gate {
4757c478bd9Sstevel@tonic-gate set_max_page_get(total_pages - pending_delete);
4767c478bd9Sstevel@tonic-gate init_pages_pp_maximum();
4777c478bd9Sstevel@tonic-gate }
4787c478bd9Sstevel@tonic-gate
4797c478bd9Sstevel@tonic-gate /*ARGSUSED*/
4807c478bd9Sstevel@tonic-gate static int
page_mem_config_pre_del(void * arg,pgcnt_t delta_pages)4817c478bd9Sstevel@tonic-gate page_mem_config_pre_del(
4827c478bd9Sstevel@tonic-gate void *arg,
4837c478bd9Sstevel@tonic-gate pgcnt_t delta_pages)
4847c478bd9Sstevel@tonic-gate {
4857c478bd9Sstevel@tonic-gate pgcnt_t nv;
4867c478bd9Sstevel@tonic-gate
4877c478bd9Sstevel@tonic-gate nv = atomic_add_long_nv(&pending_delete, (spgcnt_t)delta_pages);
4887c478bd9Sstevel@tonic-gate set_max_page_get(total_pages - nv);
4897c478bd9Sstevel@tonic-gate return (0);
4907c478bd9Sstevel@tonic-gate }
4917c478bd9Sstevel@tonic-gate
4927c478bd9Sstevel@tonic-gate /*ARGSUSED*/
4937c478bd9Sstevel@tonic-gate static void
page_mem_config_post_del(void * arg,pgcnt_t delta_pages,int cancelled)4947c478bd9Sstevel@tonic-gate page_mem_config_post_del(
4957c478bd9Sstevel@tonic-gate void *arg,
4967c478bd9Sstevel@tonic-gate pgcnt_t delta_pages,
4977c478bd9Sstevel@tonic-gate int cancelled)
4987c478bd9Sstevel@tonic-gate {
4997c478bd9Sstevel@tonic-gate pgcnt_t nv;
5007c478bd9Sstevel@tonic-gate
5017c478bd9Sstevel@tonic-gate nv = atomic_add_long_nv(&pending_delete, -(spgcnt_t)delta_pages);
5027c478bd9Sstevel@tonic-gate set_max_page_get(total_pages - nv);
5037c478bd9Sstevel@tonic-gate if (!cancelled)
5047c478bd9Sstevel@tonic-gate init_pages_pp_maximum();
5057c478bd9Sstevel@tonic-gate }
5067c478bd9Sstevel@tonic-gate
5077c478bd9Sstevel@tonic-gate static kphysm_setup_vector_t page_mem_config_vec = {
5087c478bd9Sstevel@tonic-gate KPHYSM_SETUP_VECTOR_VERSION,
5097c478bd9Sstevel@tonic-gate page_mem_config_post_add,
5107c478bd9Sstevel@tonic-gate page_mem_config_pre_del,
5117c478bd9Sstevel@tonic-gate page_mem_config_post_del,
5127c478bd9Sstevel@tonic-gate };
5137c478bd9Sstevel@tonic-gate
5147c478bd9Sstevel@tonic-gate static void
page_init_mem_config(void)5157c478bd9Sstevel@tonic-gate page_init_mem_config(void)
5167c478bd9Sstevel@tonic-gate {
517d94ffb28Sjmcp int ret;
5187c478bd9Sstevel@tonic-gate
519d94ffb28Sjmcp ret = kphysm_setup_func_register(&page_mem_config_vec, (void *)NULL);
520d94ffb28Sjmcp ASSERT(ret == 0);
5217c478bd9Sstevel@tonic-gate }
5227c478bd9Sstevel@tonic-gate
5237c478bd9Sstevel@tonic-gate /*
5247c478bd9Sstevel@tonic-gate * Evenly spread out the PCF counters for large free pages
5257c478bd9Sstevel@tonic-gate */
5267c478bd9Sstevel@tonic-gate static void
page_free_large_ctr(pgcnt_t npages)5277c478bd9Sstevel@tonic-gate page_free_large_ctr(pgcnt_t npages)
5287c478bd9Sstevel@tonic-gate {
5297c478bd9Sstevel@tonic-gate static struct pcf *p = pcf;
5307c478bd9Sstevel@tonic-gate pgcnt_t lump;
5317c478bd9Sstevel@tonic-gate
5327c478bd9Sstevel@tonic-gate freemem += npages;
5337c478bd9Sstevel@tonic-gate
53406fb6a36Sdv lump = roundup(npages, pcf_fanout) / pcf_fanout;
5357c478bd9Sstevel@tonic-gate
5367c478bd9Sstevel@tonic-gate while (npages > 0) {
5377c478bd9Sstevel@tonic-gate
5387c478bd9Sstevel@tonic-gate ASSERT(!p->pcf_block);
5397c478bd9Sstevel@tonic-gate
5407c478bd9Sstevel@tonic-gate if (lump < npages) {
5417c478bd9Sstevel@tonic-gate p->pcf_count += (uint_t)lump;
5427c478bd9Sstevel@tonic-gate npages -= lump;
5437c478bd9Sstevel@tonic-gate } else {
5447c478bd9Sstevel@tonic-gate p->pcf_count += (uint_t)npages;
5457c478bd9Sstevel@tonic-gate npages = 0;
5467c478bd9Sstevel@tonic-gate }
5477c478bd9Sstevel@tonic-gate
5487c478bd9Sstevel@tonic-gate ASSERT(!p->pcf_wait);
5497c478bd9Sstevel@tonic-gate
55006fb6a36Sdv if (++p > &pcf[pcf_fanout - 1])
5517c478bd9Sstevel@tonic-gate p = pcf;
5527c478bd9Sstevel@tonic-gate }
5537c478bd9Sstevel@tonic-gate
5547c478bd9Sstevel@tonic-gate ASSERT(npages == 0);
5557c478bd9Sstevel@tonic-gate }
5567c478bd9Sstevel@tonic-gate
5577c478bd9Sstevel@tonic-gate /*
558da6c28aaSamw * Add a physical chunk of memory to the system free lists during startup.
5597c478bd9Sstevel@tonic-gate * Platform specific startup() allocates the memory for the page structs.
5607c478bd9Sstevel@tonic-gate *
5617c478bd9Sstevel@tonic-gate * num - number of page structures
5627c478bd9Sstevel@tonic-gate * base - page number (pfn) to be associated with the first page.
5637c478bd9Sstevel@tonic-gate *
5647c478bd9Sstevel@tonic-gate * Since we are doing this during startup (ie. single threaded), we will
5657c478bd9Sstevel@tonic-gate * use shortcut routines to avoid any locking overhead while putting all
5667c478bd9Sstevel@tonic-gate * these pages on the freelists.
5677c478bd9Sstevel@tonic-gate *
5687c478bd9Sstevel@tonic-gate * NOTE: Any changes performed to page_free(), must also be performed to
5697c478bd9Sstevel@tonic-gate * add_physmem() since this is how we initialize all page_t's at
5707c478bd9Sstevel@tonic-gate * boot time.
5717c478bd9Sstevel@tonic-gate */
5727c478bd9Sstevel@tonic-gate void
add_physmem(page_t * pp,pgcnt_t num,pfn_t pnum)5737c478bd9Sstevel@tonic-gate add_physmem(
5747c478bd9Sstevel@tonic-gate page_t *pp,
5757c478bd9Sstevel@tonic-gate pgcnt_t num,
5767c478bd9Sstevel@tonic-gate pfn_t pnum)
5777c478bd9Sstevel@tonic-gate {
5787c478bd9Sstevel@tonic-gate page_t *root = NULL;
5797c478bd9Sstevel@tonic-gate uint_t szc = page_num_pagesizes() - 1;
5807c478bd9Sstevel@tonic-gate pgcnt_t large = page_get_pagecnt(szc);
5817c478bd9Sstevel@tonic-gate pgcnt_t cnt = 0;
5827c478bd9Sstevel@tonic-gate
5837c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_INIT,
5846e4dd838Smec "add_physmem:pp %p num %lu", pp, num);
5857c478bd9Sstevel@tonic-gate
5867c478bd9Sstevel@tonic-gate /*
5877c478bd9Sstevel@tonic-gate * Arbitrarily limit the max page_get request
5887c478bd9Sstevel@tonic-gate * to 1/2 of the page structs we have.
5897c478bd9Sstevel@tonic-gate */
5907c478bd9Sstevel@tonic-gate total_pages += num;
5917c478bd9Sstevel@tonic-gate set_max_page_get(total_pages);
5927c478bd9Sstevel@tonic-gate
593e21bae1bSkchow PLCNT_MODIFY_MAX(pnum, (long)num);
594e21bae1bSkchow
5957c478bd9Sstevel@tonic-gate /*
5967c478bd9Sstevel@tonic-gate * The physical space for the pages array
5977c478bd9Sstevel@tonic-gate * representing ram pages has already been
5987c478bd9Sstevel@tonic-gate * allocated. Here we initialize each lock
5997c478bd9Sstevel@tonic-gate * in the page structure, and put each on
6007c478bd9Sstevel@tonic-gate * the free list
6017c478bd9Sstevel@tonic-gate */
602affbd3ccSkchow for (; num; pp++, pnum++, num--) {
6037c478bd9Sstevel@tonic-gate
6047c478bd9Sstevel@tonic-gate /*
6057c478bd9Sstevel@tonic-gate * this needs to fill in the page number
6067c478bd9Sstevel@tonic-gate * and do any other arch specific initialization
6077c478bd9Sstevel@tonic-gate */
6087c478bd9Sstevel@tonic-gate add_physmem_cb(pp, pnum);
6097c478bd9Sstevel@tonic-gate
61007b65a64Saguzovsk pp->p_lckcnt = 0;
61107b65a64Saguzovsk pp->p_cowcnt = 0;
61207b65a64Saguzovsk pp->p_slckcnt = 0;
61307b65a64Saguzovsk
6147c478bd9Sstevel@tonic-gate /*
6157c478bd9Sstevel@tonic-gate * Initialize the page lock as unlocked, since nobody
6167c478bd9Sstevel@tonic-gate * can see or access this page yet.
6177c478bd9Sstevel@tonic-gate */
6187c478bd9Sstevel@tonic-gate pp->p_selock = 0;
6197c478bd9Sstevel@tonic-gate
6207c478bd9Sstevel@tonic-gate /*
6217c478bd9Sstevel@tonic-gate * Initialize IO lock
6227c478bd9Sstevel@tonic-gate */
6237c478bd9Sstevel@tonic-gate page_iolock_init(pp);
6247c478bd9Sstevel@tonic-gate
6257c478bd9Sstevel@tonic-gate /*
6267c478bd9Sstevel@tonic-gate * initialize other fields in the page_t
6277c478bd9Sstevel@tonic-gate */
6287c478bd9Sstevel@tonic-gate PP_SETFREE(pp);
6299d0d62adSJason Beloro page_clr_all_props(pp);
6307c478bd9Sstevel@tonic-gate PP_SETAGED(pp);
6317c478bd9Sstevel@tonic-gate pp->p_offset = (u_offset_t)-1;
6327c478bd9Sstevel@tonic-gate pp->p_next = pp;
6337c478bd9Sstevel@tonic-gate pp->p_prev = pp;
6347c478bd9Sstevel@tonic-gate
6357c478bd9Sstevel@tonic-gate /*
6367c478bd9Sstevel@tonic-gate * Simple case: System doesn't support large pages.
6377c478bd9Sstevel@tonic-gate */
6387c478bd9Sstevel@tonic-gate if (szc == 0) {
6397c478bd9Sstevel@tonic-gate pp->p_szc = 0;
6407c478bd9Sstevel@tonic-gate page_free_at_startup(pp);
6417c478bd9Sstevel@tonic-gate continue;
6427c478bd9Sstevel@tonic-gate }
6437c478bd9Sstevel@tonic-gate
6447c478bd9Sstevel@tonic-gate /*
6457c478bd9Sstevel@tonic-gate * Handle unaligned pages, we collect them up onto
6467c478bd9Sstevel@tonic-gate * the root page until we have a full large page.
6477c478bd9Sstevel@tonic-gate */
6487c478bd9Sstevel@tonic-gate if (!IS_P2ALIGNED(pnum, large)) {
6497c478bd9Sstevel@tonic-gate
6507c478bd9Sstevel@tonic-gate /*
6517c478bd9Sstevel@tonic-gate * If not in a large page,
6527c478bd9Sstevel@tonic-gate * just free as small page.
6537c478bd9Sstevel@tonic-gate */
6547c478bd9Sstevel@tonic-gate if (root == NULL) {
6557c478bd9Sstevel@tonic-gate pp->p_szc = 0;
6567c478bd9Sstevel@tonic-gate page_free_at_startup(pp);
6577c478bd9Sstevel@tonic-gate continue;
6587c478bd9Sstevel@tonic-gate }
6597c478bd9Sstevel@tonic-gate
6607c478bd9Sstevel@tonic-gate /*
6617c478bd9Sstevel@tonic-gate * Link a constituent page into the large page.
6627c478bd9Sstevel@tonic-gate */
6637c478bd9Sstevel@tonic-gate pp->p_szc = szc;
6647c478bd9Sstevel@tonic-gate page_list_concat(&root, &pp);
6657c478bd9Sstevel@tonic-gate
6667c478bd9Sstevel@tonic-gate /*
6677c478bd9Sstevel@tonic-gate * When large page is fully formed, free it.
6687c478bd9Sstevel@tonic-gate */
6697c478bd9Sstevel@tonic-gate if (++cnt == large) {
6707c478bd9Sstevel@tonic-gate page_free_large_ctr(cnt);
6717c478bd9Sstevel@tonic-gate page_list_add_pages(root, PG_LIST_ISINIT);
6727c478bd9Sstevel@tonic-gate root = NULL;
6737c478bd9Sstevel@tonic-gate cnt = 0;
6747c478bd9Sstevel@tonic-gate }
6757c478bd9Sstevel@tonic-gate continue;
6767c478bd9Sstevel@tonic-gate }
6777c478bd9Sstevel@tonic-gate
6787c478bd9Sstevel@tonic-gate /*
6797c478bd9Sstevel@tonic-gate * At this point we have a page number which
6807c478bd9Sstevel@tonic-gate * is aligned. We assert that we aren't already
6817c478bd9Sstevel@tonic-gate * in a different large page.
6827c478bd9Sstevel@tonic-gate */
6837c478bd9Sstevel@tonic-gate ASSERT(IS_P2ALIGNED(pnum, large));
6847c478bd9Sstevel@tonic-gate ASSERT(root == NULL && cnt == 0);
6857c478bd9Sstevel@tonic-gate
6867c478bd9Sstevel@tonic-gate /*
6877c478bd9Sstevel@tonic-gate * If insufficient number of pages left to form
6887c478bd9Sstevel@tonic-gate * a large page, just free the small page.
6897c478bd9Sstevel@tonic-gate */
6907c478bd9Sstevel@tonic-gate if (num < large) {
6917c478bd9Sstevel@tonic-gate pp->p_szc = 0;
6927c478bd9Sstevel@tonic-gate page_free_at_startup(pp);
6937c478bd9Sstevel@tonic-gate continue;
6947c478bd9Sstevel@tonic-gate }
6957c478bd9Sstevel@tonic-gate
6967c478bd9Sstevel@tonic-gate /*
6977c478bd9Sstevel@tonic-gate * Otherwise start a new large page.
6987c478bd9Sstevel@tonic-gate */
6997c478bd9Sstevel@tonic-gate pp->p_szc = szc;
7007c478bd9Sstevel@tonic-gate cnt++;
7017c478bd9Sstevel@tonic-gate root = pp;
7027c478bd9Sstevel@tonic-gate }
7037c478bd9Sstevel@tonic-gate ASSERT(root == NULL && cnt == 0);
7047c478bd9Sstevel@tonic-gate }
7057c478bd9Sstevel@tonic-gate
7067c478bd9Sstevel@tonic-gate /*
7077c478bd9Sstevel@tonic-gate * Find a page representing the specified [vp, offset].
7087c478bd9Sstevel@tonic-gate * If we find the page but it is intransit coming in,
7097c478bd9Sstevel@tonic-gate * it will have an "exclusive" lock and we wait for
7107c478bd9Sstevel@tonic-gate * the i/o to complete. A page found on the free list
7117c478bd9Sstevel@tonic-gate * is always reclaimed and then locked. On success, the page
7127c478bd9Sstevel@tonic-gate * is locked, its data is valid and it isn't on the free
7137c478bd9Sstevel@tonic-gate * list, while a NULL is returned if the page doesn't exist.
7147c478bd9Sstevel@tonic-gate */
7157c478bd9Sstevel@tonic-gate page_t *
page_lookup(vnode_t * vp,u_offset_t off,se_t se)7167c478bd9Sstevel@tonic-gate page_lookup(vnode_t *vp, u_offset_t off, se_t se)
7177c478bd9Sstevel@tonic-gate {
7187c478bd9Sstevel@tonic-gate return (page_lookup_create(vp, off, se, NULL, NULL, 0));
7197c478bd9Sstevel@tonic-gate }
7207c478bd9Sstevel@tonic-gate
7217c478bd9Sstevel@tonic-gate /*
7227c478bd9Sstevel@tonic-gate * Find a page representing the specified [vp, offset].
7237c478bd9Sstevel@tonic-gate * We either return the one we found or, if passed in,
7247c478bd9Sstevel@tonic-gate * create one with identity of [vp, offset] of the
725da6c28aaSamw * pre-allocated page. If we find existing page but it is
7267c478bd9Sstevel@tonic-gate * intransit coming in, it will have an "exclusive" lock
7277c478bd9Sstevel@tonic-gate * and we wait for the i/o to complete. A page found on
7287c478bd9Sstevel@tonic-gate * the free list is always reclaimed and then locked.
7297c478bd9Sstevel@tonic-gate * On success, the page is locked, its data is valid and
7307c478bd9Sstevel@tonic-gate * it isn't on the free list, while a NULL is returned
7317c478bd9Sstevel@tonic-gate * if the page doesn't exist and newpp is NULL;
7327c478bd9Sstevel@tonic-gate */
7337c478bd9Sstevel@tonic-gate page_t *
page_lookup_create(vnode_t * vp,u_offset_t off,se_t se,page_t * newpp,spgcnt_t * nrelocp,int flags)7347c478bd9Sstevel@tonic-gate page_lookup_create(
7357c478bd9Sstevel@tonic-gate vnode_t *vp,
7367c478bd9Sstevel@tonic-gate u_offset_t off,
7377c478bd9Sstevel@tonic-gate se_t se,
7387c478bd9Sstevel@tonic-gate page_t *newpp,
7397c478bd9Sstevel@tonic-gate spgcnt_t *nrelocp,
7407c478bd9Sstevel@tonic-gate int flags)
7417c478bd9Sstevel@tonic-gate {
7427c478bd9Sstevel@tonic-gate page_t *pp;
7437c478bd9Sstevel@tonic-gate kmutex_t *phm;
7447c478bd9Sstevel@tonic-gate ulong_t index;
7457c478bd9Sstevel@tonic-gate uint_t hash_locked;
7467c478bd9Sstevel@tonic-gate uint_t es;
7477c478bd9Sstevel@tonic-gate
7487c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
7497c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[0]);
7507c478bd9Sstevel@tonic-gate ASSERT(newpp ? PAGE_EXCL(newpp) : 1);
7517c478bd9Sstevel@tonic-gate
7527c478bd9Sstevel@tonic-gate /*
7537c478bd9Sstevel@tonic-gate * Acquire the appropriate page hash lock since
7547c478bd9Sstevel@tonic-gate * we have to search the hash list. Pages that
7557c478bd9Sstevel@tonic-gate * hash to this list can't change identity while
7567c478bd9Sstevel@tonic-gate * this lock is held.
7577c478bd9Sstevel@tonic-gate */
7587c478bd9Sstevel@tonic-gate hash_locked = 0;
7597c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off);
7607c478bd9Sstevel@tonic-gate phm = NULL;
7617c478bd9Sstevel@tonic-gate top:
762e7c874afSJosef 'Jeff' Sipek pp = page_hash_search(index, vp, off);
7637c478bd9Sstevel@tonic-gate if (pp != NULL) {
7647c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[1]);
7657c478bd9Sstevel@tonic-gate es = (newpp != NULL) ? 1 : 0;
7667c478bd9Sstevel@tonic-gate es |= flags;
7677c478bd9Sstevel@tonic-gate if (!hash_locked) {
7687c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[2]);
7697c478bd9Sstevel@tonic-gate if (!page_try_reclaim_lock(pp, se, es)) {
7707c478bd9Sstevel@tonic-gate /*
7717c478bd9Sstevel@tonic-gate * On a miss, acquire the phm. Then
7727c478bd9Sstevel@tonic-gate * next time, page_lock() will be called,
7737c478bd9Sstevel@tonic-gate * causing a wait if the page is busy.
7747c478bd9Sstevel@tonic-gate * just looping with page_trylock() would
7757c478bd9Sstevel@tonic-gate * get pretty boring.
7767c478bd9Sstevel@tonic-gate */
7777c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[3]);
7787c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index);
7797c478bd9Sstevel@tonic-gate mutex_enter(phm);
7807c478bd9Sstevel@tonic-gate hash_locked = 1;
7817c478bd9Sstevel@tonic-gate goto top;
7827c478bd9Sstevel@tonic-gate }
7837c478bd9Sstevel@tonic-gate } else {
7847c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[4]);
7857c478bd9Sstevel@tonic-gate if (!page_lock_es(pp, se, phm, P_RECLAIM, es)) {
7867c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[5]);
7877c478bd9Sstevel@tonic-gate goto top;
7887c478bd9Sstevel@tonic-gate }
7897c478bd9Sstevel@tonic-gate }
7907c478bd9Sstevel@tonic-gate
7917c478bd9Sstevel@tonic-gate /*
7927c478bd9Sstevel@tonic-gate * Since `pp' is locked it can not change identity now.
7937c478bd9Sstevel@tonic-gate * Reconfirm we locked the correct page.
7947c478bd9Sstevel@tonic-gate *
7957c478bd9Sstevel@tonic-gate * Both the p_vnode and p_offset *must* be cast volatile
796e7c874afSJosef 'Jeff' Sipek * to force a reload of their values: The page_hash_search
797e7c874afSJosef 'Jeff' Sipek * function will have stuffed p_vnode and p_offset into
7987c478bd9Sstevel@tonic-gate * registers before calling page_trylock(); another thread,
7997c478bd9Sstevel@tonic-gate * actually holding the hash lock, could have changed the
8007c478bd9Sstevel@tonic-gate * page's identity in memory, but our registers would not
8017c478bd9Sstevel@tonic-gate * be changed, fooling the reconfirmation. If the hash
8027c478bd9Sstevel@tonic-gate * lock was held during the search, the casting would
8037c478bd9Sstevel@tonic-gate * not be needed.
8047c478bd9Sstevel@tonic-gate */
8057c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[6]);
8067c478bd9Sstevel@tonic-gate if (((volatile struct vnode *)(pp->p_vnode) != vp) ||
8077c478bd9Sstevel@tonic-gate ((volatile u_offset_t)(pp->p_offset) != off)) {
8087c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[7]);
8097c478bd9Sstevel@tonic-gate if (hash_locked) {
8107c478bd9Sstevel@tonic-gate panic("page_lookup_create: lost page %p",
8117c478bd9Sstevel@tonic-gate (void *)pp);
8127c478bd9Sstevel@tonic-gate /*NOTREACHED*/
8137c478bd9Sstevel@tonic-gate }
8147c478bd9Sstevel@tonic-gate page_unlock(pp);
8157c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index);
8167c478bd9Sstevel@tonic-gate mutex_enter(phm);
8177c478bd9Sstevel@tonic-gate hash_locked = 1;
8187c478bd9Sstevel@tonic-gate goto top;
8197c478bd9Sstevel@tonic-gate }
8207c478bd9Sstevel@tonic-gate
8217c478bd9Sstevel@tonic-gate /*
8227c478bd9Sstevel@tonic-gate * If page_trylock() was called, then pp may still be on
8237c478bd9Sstevel@tonic-gate * the cachelist (can't be on the free list, it would not
8247c478bd9Sstevel@tonic-gate * have been found in the search). If it is on the
8257c478bd9Sstevel@tonic-gate * cachelist it must be pulled now. To pull the page from
8267c478bd9Sstevel@tonic-gate * the cachelist, it must be exclusively locked.
8277c478bd9Sstevel@tonic-gate *
8287c478bd9Sstevel@tonic-gate * The other big difference between page_trylock() and
8297c478bd9Sstevel@tonic-gate * page_lock(), is that page_lock() will pull the
8307c478bd9Sstevel@tonic-gate * page from whatever free list (the cache list in this
8317c478bd9Sstevel@tonic-gate * case) the page is on. If page_trylock() was used
8327c478bd9Sstevel@tonic-gate * above, then we have to do the reclaim ourselves.
8337c478bd9Sstevel@tonic-gate */
8347c478bd9Sstevel@tonic-gate if ((!hash_locked) && (PP_ISFREE(pp))) {
8357c478bd9Sstevel@tonic-gate ASSERT(PP_ISAGED(pp) == 0);
8367c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[8]);
8377c478bd9Sstevel@tonic-gate
8387c478bd9Sstevel@tonic-gate /*
8397c478bd9Sstevel@tonic-gate * page_relcaim will insure that we
8407c478bd9Sstevel@tonic-gate * have this page exclusively
8417c478bd9Sstevel@tonic-gate */
8427c478bd9Sstevel@tonic-gate
8437c478bd9Sstevel@tonic-gate if (!page_reclaim(pp, NULL)) {
8447c478bd9Sstevel@tonic-gate /*
8457c478bd9Sstevel@tonic-gate * Page_reclaim dropped whatever lock
8467c478bd9Sstevel@tonic-gate * we held.
8477c478bd9Sstevel@tonic-gate */
8487c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[9]);
8497c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index);
8507c478bd9Sstevel@tonic-gate mutex_enter(phm);
8517c478bd9Sstevel@tonic-gate hash_locked = 1;
8527c478bd9Sstevel@tonic-gate goto top;
8537c478bd9Sstevel@tonic-gate } else if (se == SE_SHARED && newpp == NULL) {
8547c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[10]);
8557c478bd9Sstevel@tonic-gate page_downgrade(pp);
8567c478bd9Sstevel@tonic-gate }
8577c478bd9Sstevel@tonic-gate }
8587c478bd9Sstevel@tonic-gate
8597c478bd9Sstevel@tonic-gate if (hash_locked) {
8607c478bd9Sstevel@tonic-gate mutex_exit(phm);
8617c478bd9Sstevel@tonic-gate }
8627c478bd9Sstevel@tonic-gate
8637c478bd9Sstevel@tonic-gate if (newpp != NULL && pp->p_szc < newpp->p_szc &&
8647c478bd9Sstevel@tonic-gate PAGE_EXCL(pp) && nrelocp != NULL) {
8657c478bd9Sstevel@tonic-gate ASSERT(nrelocp != NULL);
8667c478bd9Sstevel@tonic-gate (void) page_relocate(&pp, &newpp, 1, 1, nrelocp,
8677c478bd9Sstevel@tonic-gate NULL);
8687c478bd9Sstevel@tonic-gate if (*nrelocp > 0) {
8697c478bd9Sstevel@tonic-gate VM_STAT_COND_ADD(*nrelocp == 1,
8707c478bd9Sstevel@tonic-gate page_lookup_cnt[11]);
8717c478bd9Sstevel@tonic-gate VM_STAT_COND_ADD(*nrelocp > 1,
8727c478bd9Sstevel@tonic-gate page_lookup_cnt[12]);
8737c478bd9Sstevel@tonic-gate pp = newpp;
8747c478bd9Sstevel@tonic-gate se = SE_EXCL;
8757c478bd9Sstevel@tonic-gate } else {
8767c478bd9Sstevel@tonic-gate if (se == SE_SHARED) {
8777c478bd9Sstevel@tonic-gate page_downgrade(pp);
8787c478bd9Sstevel@tonic-gate }
8797c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[13]);
8807c478bd9Sstevel@tonic-gate }
8817c478bd9Sstevel@tonic-gate } else if (newpp != NULL && nrelocp != NULL) {
8827c478bd9Sstevel@tonic-gate if (PAGE_EXCL(pp) && se == SE_SHARED) {
8837c478bd9Sstevel@tonic-gate page_downgrade(pp);
8847c478bd9Sstevel@tonic-gate }
8857c478bd9Sstevel@tonic-gate VM_STAT_COND_ADD(pp->p_szc < newpp->p_szc,
8867c478bd9Sstevel@tonic-gate page_lookup_cnt[14]);
8877c478bd9Sstevel@tonic-gate VM_STAT_COND_ADD(pp->p_szc == newpp->p_szc,
8887c478bd9Sstevel@tonic-gate page_lookup_cnt[15]);
8897c478bd9Sstevel@tonic-gate VM_STAT_COND_ADD(pp->p_szc > newpp->p_szc,
8907c478bd9Sstevel@tonic-gate page_lookup_cnt[16]);
8917c478bd9Sstevel@tonic-gate } else if (newpp != NULL && PAGE_EXCL(pp)) {
8927c478bd9Sstevel@tonic-gate se = SE_EXCL;
8937c478bd9Sstevel@tonic-gate }
8947c478bd9Sstevel@tonic-gate } else if (!hash_locked) {
8957c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[17]);
8967c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index);
8977c478bd9Sstevel@tonic-gate mutex_enter(phm);
8987c478bd9Sstevel@tonic-gate hash_locked = 1;
8997c478bd9Sstevel@tonic-gate goto top;
9007c478bd9Sstevel@tonic-gate } else if (newpp != NULL) {
9017c478bd9Sstevel@tonic-gate /*
9027c478bd9Sstevel@tonic-gate * If we have a preallocated page then
9037c478bd9Sstevel@tonic-gate * insert it now and basically behave like
9047c478bd9Sstevel@tonic-gate * page_create.
9057c478bd9Sstevel@tonic-gate */
9067c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[18]);
9077c478bd9Sstevel@tonic-gate /*
9087c478bd9Sstevel@tonic-gate * Since we hold the page hash mutex and
9097c478bd9Sstevel@tonic-gate * just searched for this page, page_hashin
9107c478bd9Sstevel@tonic-gate * had better not fail. If it does, that
9117c478bd9Sstevel@tonic-gate * means some thread did not follow the
9127c478bd9Sstevel@tonic-gate * page hash mutex rules. Panic now and
9137c478bd9Sstevel@tonic-gate * get it over with. As usual, go down
9147c478bd9Sstevel@tonic-gate * holding all the locks.
9157c478bd9Sstevel@tonic-gate */
9167c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm));
9177c478bd9Sstevel@tonic-gate if (!page_hashin(newpp, vp, off, phm)) {
9187c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm));
9197c478bd9Sstevel@tonic-gate panic("page_lookup_create: hashin failed %p %p %llx %p",
9207c478bd9Sstevel@tonic-gate (void *)newpp, (void *)vp, off, (void *)phm);
9217c478bd9Sstevel@tonic-gate /*NOTREACHED*/
9227c478bd9Sstevel@tonic-gate }
9237c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm));
9247c478bd9Sstevel@tonic-gate mutex_exit(phm);
9257c478bd9Sstevel@tonic-gate phm = NULL;
9267c478bd9Sstevel@tonic-gate page_set_props(newpp, P_REF);
9277c478bd9Sstevel@tonic-gate page_io_lock(newpp);
9287c478bd9Sstevel@tonic-gate pp = newpp;
9297c478bd9Sstevel@tonic-gate se = SE_EXCL;
9307c478bd9Sstevel@tonic-gate } else {
9317c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_cnt[19]);
9327c478bd9Sstevel@tonic-gate mutex_exit(phm);
9337c478bd9Sstevel@tonic-gate }
9347c478bd9Sstevel@tonic-gate
9357c478bd9Sstevel@tonic-gate ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1);
9367c478bd9Sstevel@tonic-gate
9377c478bd9Sstevel@tonic-gate ASSERT(pp ? ((PP_ISFREE(pp) == 0) && (PP_ISAGED(pp) == 0)) : 1);
9387c478bd9Sstevel@tonic-gate
9397c478bd9Sstevel@tonic-gate return (pp);
9407c478bd9Sstevel@tonic-gate }
9417c478bd9Sstevel@tonic-gate
9427c478bd9Sstevel@tonic-gate /*
9437c478bd9Sstevel@tonic-gate * Search the hash list for the page representing the
9447c478bd9Sstevel@tonic-gate * specified [vp, offset] and return it locked. Skip
9457c478bd9Sstevel@tonic-gate * free pages and pages that cannot be locked as requested.
9467c478bd9Sstevel@tonic-gate * Used while attempting to kluster pages.
9477c478bd9Sstevel@tonic-gate */
9487c478bd9Sstevel@tonic-gate page_t *
page_lookup_nowait(vnode_t * vp,u_offset_t off,se_t se)9497c478bd9Sstevel@tonic-gate page_lookup_nowait(vnode_t *vp, u_offset_t off, se_t se)
9507c478bd9Sstevel@tonic-gate {
9517c478bd9Sstevel@tonic-gate page_t *pp;
9527c478bd9Sstevel@tonic-gate kmutex_t *phm;
9537c478bd9Sstevel@tonic-gate ulong_t index;
9547c478bd9Sstevel@tonic-gate uint_t locked;
9557c478bd9Sstevel@tonic-gate
9567c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
9577c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[0]);
9587c478bd9Sstevel@tonic-gate
9597c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off);
960e7c874afSJosef 'Jeff' Sipek pp = page_hash_search(index, vp, off);
9617c478bd9Sstevel@tonic-gate locked = 0;
9627c478bd9Sstevel@tonic-gate if (pp == NULL) {
9637c478bd9Sstevel@tonic-gate top:
9647c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[1]);
9657c478bd9Sstevel@tonic-gate locked = 1;
9667c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index);
9677c478bd9Sstevel@tonic-gate mutex_enter(phm);
968e7c874afSJosef 'Jeff' Sipek pp = page_hash_search(index, vp, off);
9697c478bd9Sstevel@tonic-gate }
9707c478bd9Sstevel@tonic-gate
9717c478bd9Sstevel@tonic-gate if (pp == NULL || PP_ISFREE(pp)) {
9727c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[2]);
9737c478bd9Sstevel@tonic-gate pp = NULL;
9747c478bd9Sstevel@tonic-gate } else {
9757c478bd9Sstevel@tonic-gate if (!page_trylock(pp, se)) {
9767c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[3]);
9777c478bd9Sstevel@tonic-gate pp = NULL;
9787c478bd9Sstevel@tonic-gate } else {
9797c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[4]);
9807c478bd9Sstevel@tonic-gate /*
9817c478bd9Sstevel@tonic-gate * See the comment in page_lookup()
9827c478bd9Sstevel@tonic-gate */
9837c478bd9Sstevel@tonic-gate if (((volatile struct vnode *)(pp->p_vnode) != vp) ||
9847c478bd9Sstevel@tonic-gate ((u_offset_t)(pp->p_offset) != off)) {
9857c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[5]);
9867c478bd9Sstevel@tonic-gate if (locked) {
9877c478bd9Sstevel@tonic-gate panic("page_lookup_nowait %p",
9887c478bd9Sstevel@tonic-gate (void *)pp);
9897c478bd9Sstevel@tonic-gate /*NOTREACHED*/
9907c478bd9Sstevel@tonic-gate }
9917c478bd9Sstevel@tonic-gate page_unlock(pp);
9927c478bd9Sstevel@tonic-gate goto top;
9937c478bd9Sstevel@tonic-gate }
9947c478bd9Sstevel@tonic-gate if (PP_ISFREE(pp)) {
9957c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[6]);
9967c478bd9Sstevel@tonic-gate page_unlock(pp);
9977c478bd9Sstevel@tonic-gate pp = NULL;
9987c478bd9Sstevel@tonic-gate }
9997c478bd9Sstevel@tonic-gate }
10007c478bd9Sstevel@tonic-gate }
10017c478bd9Sstevel@tonic-gate if (locked) {
10027c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_lookup_nowait_cnt[7]);
10037c478bd9Sstevel@tonic-gate mutex_exit(phm);
10047c478bd9Sstevel@tonic-gate }
10057c478bd9Sstevel@tonic-gate
10067c478bd9Sstevel@tonic-gate ASSERT(pp ? PAGE_LOCKED_SE(pp, se) : 1);
10077c478bd9Sstevel@tonic-gate
10087c478bd9Sstevel@tonic-gate return (pp);
10097c478bd9Sstevel@tonic-gate }
10107c478bd9Sstevel@tonic-gate
10117c478bd9Sstevel@tonic-gate /*
10127c478bd9Sstevel@tonic-gate * Search the hash list for a page with the specified [vp, off]
10137c478bd9Sstevel@tonic-gate * that is known to exist and is already locked. This routine
10147c478bd9Sstevel@tonic-gate * is typically used by segment SOFTUNLOCK routines.
10157c478bd9Sstevel@tonic-gate */
10167c478bd9Sstevel@tonic-gate page_t *
page_find(vnode_t * vp,u_offset_t off)10177c478bd9Sstevel@tonic-gate page_find(vnode_t *vp, u_offset_t off)
10187c478bd9Sstevel@tonic-gate {
10197c478bd9Sstevel@tonic-gate page_t *pp;
10207c478bd9Sstevel@tonic-gate kmutex_t *phm;
10217c478bd9Sstevel@tonic-gate ulong_t index;
10227c478bd9Sstevel@tonic-gate
10237c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
10247c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_find_cnt);
10257c478bd9Sstevel@tonic-gate
10267c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off);
10277c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index);
10287c478bd9Sstevel@tonic-gate
10297c478bd9Sstevel@tonic-gate mutex_enter(phm);
1030e7c874afSJosef 'Jeff' Sipek pp = page_hash_search(index, vp, off);
10317c478bd9Sstevel@tonic-gate mutex_exit(phm);
10327c478bd9Sstevel@tonic-gate
10334fc2445aSelowe ASSERT(pp == NULL || PAGE_LOCKED(pp) || panicstr);
10347c478bd9Sstevel@tonic-gate return (pp);
10357c478bd9Sstevel@tonic-gate }
10367c478bd9Sstevel@tonic-gate
10377c478bd9Sstevel@tonic-gate /*
10387c478bd9Sstevel@tonic-gate * Determine whether a page with the specified [vp, off]
10397c478bd9Sstevel@tonic-gate * currently exists in the system. Obviously this should
10407c478bd9Sstevel@tonic-gate * only be considered as a hint since nothing prevents the
10417c478bd9Sstevel@tonic-gate * page from disappearing or appearing immediately after
10427c478bd9Sstevel@tonic-gate * the return from this routine. Subsequently, we don't
10437c478bd9Sstevel@tonic-gate * even bother to lock the list.
10447c478bd9Sstevel@tonic-gate */
10457c478bd9Sstevel@tonic-gate page_t *
page_exists(vnode_t * vp,u_offset_t off)10467c478bd9Sstevel@tonic-gate page_exists(vnode_t *vp, u_offset_t off)
10477c478bd9Sstevel@tonic-gate {
10487c478bd9Sstevel@tonic-gate ulong_t index;
10497c478bd9Sstevel@tonic-gate
10507c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
10517c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exists_cnt);
10527c478bd9Sstevel@tonic-gate
10537c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off);
10547c478bd9Sstevel@tonic-gate
1055e7c874afSJosef 'Jeff' Sipek return (page_hash_search(index, vp, off));
10567c478bd9Sstevel@tonic-gate }
10577c478bd9Sstevel@tonic-gate
10587c478bd9Sstevel@tonic-gate /*
10597c478bd9Sstevel@tonic-gate * Determine if physically contiguous pages exist for [vp, off] - [vp, off +
10607c478bd9Sstevel@tonic-gate * page_size(szc)) range. if they exist and ppa is not NULL fill ppa array
10617c478bd9Sstevel@tonic-gate * with these pages locked SHARED. If necessary reclaim pages from
10627c478bd9Sstevel@tonic-gate * freelist. Return 1 if contiguous pages exist and 0 otherwise.
10637c478bd9Sstevel@tonic-gate *
10647c478bd9Sstevel@tonic-gate * If we fail to lock pages still return 1 if pages exist and contiguous.
10657c478bd9Sstevel@tonic-gate * But in this case return value is just a hint. ppa array won't be filled.
10667c478bd9Sstevel@tonic-gate * Caller should initialize ppa[0] as NULL to distinguish return value.
10677c478bd9Sstevel@tonic-gate *
10687c478bd9Sstevel@tonic-gate * Returns 0 if pages don't exist or not physically contiguous.
10697c478bd9Sstevel@tonic-gate *
10707c478bd9Sstevel@tonic-gate * This routine doesn't work for anonymous(swapfs) pages.
10717c478bd9Sstevel@tonic-gate */
10727c478bd9Sstevel@tonic-gate int
page_exists_physcontig(vnode_t * vp,u_offset_t off,uint_t szc,page_t * ppa[])10737c478bd9Sstevel@tonic-gate page_exists_physcontig(vnode_t *vp, u_offset_t off, uint_t szc, page_t *ppa[])
10747c478bd9Sstevel@tonic-gate {
10757c478bd9Sstevel@tonic-gate pgcnt_t pages;
10767c478bd9Sstevel@tonic-gate pfn_t pfn;
10777c478bd9Sstevel@tonic-gate page_t *rootpp;
10787c478bd9Sstevel@tonic-gate pgcnt_t i;
10797c478bd9Sstevel@tonic-gate pgcnt_t j;
10807c478bd9Sstevel@tonic-gate u_offset_t save_off = off;
10817c478bd9Sstevel@tonic-gate ulong_t index;
10827c478bd9Sstevel@tonic-gate kmutex_t *phm;
10837c478bd9Sstevel@tonic-gate page_t *pp;
10847c478bd9Sstevel@tonic-gate uint_t pszc;
10857c478bd9Sstevel@tonic-gate int loopcnt = 0;
10867c478bd9Sstevel@tonic-gate
10877c478bd9Sstevel@tonic-gate ASSERT(szc != 0);
10887c478bd9Sstevel@tonic-gate ASSERT(vp != NULL);
10897c478bd9Sstevel@tonic-gate ASSERT(!IS_SWAPFSVP(vp));
1090ad23a2dbSjohansen ASSERT(!VN_ISKAS(vp));
10917c478bd9Sstevel@tonic-gate
10927c478bd9Sstevel@tonic-gate again:
10937c478bd9Sstevel@tonic-gate if (++loopcnt > 3) {
10947c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[0]);
10957c478bd9Sstevel@tonic-gate return (0);
10967c478bd9Sstevel@tonic-gate }
10977c478bd9Sstevel@tonic-gate
10987c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off);
10997c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index);
11007c478bd9Sstevel@tonic-gate
11017c478bd9Sstevel@tonic-gate mutex_enter(phm);
1102e7c874afSJosef 'Jeff' Sipek pp = page_hash_search(index, vp, off);
11037c478bd9Sstevel@tonic-gate mutex_exit(phm);
11047c478bd9Sstevel@tonic-gate
11057c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[1]);
11067c478bd9Sstevel@tonic-gate
11077c478bd9Sstevel@tonic-gate if (pp == NULL) {
11087c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[2]);
11097c478bd9Sstevel@tonic-gate return (0);
11107c478bd9Sstevel@tonic-gate }
11117c478bd9Sstevel@tonic-gate
11127c478bd9Sstevel@tonic-gate pages = page_get_pagecnt(szc);
11137c478bd9Sstevel@tonic-gate rootpp = pp;
11147c478bd9Sstevel@tonic-gate pfn = rootpp->p_pagenum;
11157c478bd9Sstevel@tonic-gate
11167c478bd9Sstevel@tonic-gate if ((pszc = pp->p_szc) >= szc && ppa != NULL) {
11177c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[3]);
11187c478bd9Sstevel@tonic-gate if (!page_trylock(pp, SE_SHARED)) {
11197c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[4]);
11207c478bd9Sstevel@tonic-gate return (1);
11217c478bd9Sstevel@tonic-gate }
11229853d9e8SJason Beloro /*
11239853d9e8SJason Beloro * Also check whether p_pagenum was modified by DR.
11249853d9e8SJason Beloro */
11257c478bd9Sstevel@tonic-gate if (pp->p_szc != pszc || pp->p_vnode != vp ||
11269853d9e8SJason Beloro pp->p_offset != off || pp->p_pagenum != pfn) {
11277c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[5]);
11287c478bd9Sstevel@tonic-gate page_unlock(pp);
11297c478bd9Sstevel@tonic-gate off = save_off;
11307c478bd9Sstevel@tonic-gate goto again;
11317c478bd9Sstevel@tonic-gate }
11327c478bd9Sstevel@tonic-gate /*
11337c478bd9Sstevel@tonic-gate * szc was non zero and vnode and offset matched after we
11347c478bd9Sstevel@tonic-gate * locked the page it means it can't become free on us.
11357c478bd9Sstevel@tonic-gate */
11367c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp));
11377c478bd9Sstevel@tonic-gate if (!IS_P2ALIGNED(pfn, pages)) {
11387c478bd9Sstevel@tonic-gate page_unlock(pp);
11397c478bd9Sstevel@tonic-gate return (0);
11407c478bd9Sstevel@tonic-gate }
11417c478bd9Sstevel@tonic-gate ppa[0] = pp;
11427c478bd9Sstevel@tonic-gate pp++;
11437c478bd9Sstevel@tonic-gate off += PAGESIZE;
11447c478bd9Sstevel@tonic-gate pfn++;
11457c478bd9Sstevel@tonic-gate for (i = 1; i < pages; i++, pp++, off += PAGESIZE, pfn++) {
11467c478bd9Sstevel@tonic-gate if (!page_trylock(pp, SE_SHARED)) {
11477c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[6]);
11487c478bd9Sstevel@tonic-gate pp--;
11497c478bd9Sstevel@tonic-gate while (i-- > 0) {
11507c478bd9Sstevel@tonic-gate page_unlock(pp);
11517c478bd9Sstevel@tonic-gate pp--;
11527c478bd9Sstevel@tonic-gate }
11537c478bd9Sstevel@tonic-gate ppa[0] = NULL;
11547c478bd9Sstevel@tonic-gate return (1);
11557c478bd9Sstevel@tonic-gate }
11567c478bd9Sstevel@tonic-gate if (pp->p_szc != pszc) {
11577c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[7]);
11587c478bd9Sstevel@tonic-gate page_unlock(pp);
11597c478bd9Sstevel@tonic-gate pp--;
11607c478bd9Sstevel@tonic-gate while (i-- > 0) {
11617c478bd9Sstevel@tonic-gate page_unlock(pp);
11627c478bd9Sstevel@tonic-gate pp--;
11637c478bd9Sstevel@tonic-gate }
11647c478bd9Sstevel@tonic-gate ppa[0] = NULL;
11657c478bd9Sstevel@tonic-gate off = save_off;
11667c478bd9Sstevel@tonic-gate goto again;
11677c478bd9Sstevel@tonic-gate }
11687c478bd9Sstevel@tonic-gate /*
11697c478bd9Sstevel@tonic-gate * szc the same as for previous already locked pages
11707c478bd9Sstevel@tonic-gate * with right identity. Since this page had correct
11717c478bd9Sstevel@tonic-gate * szc after we locked it can't get freed or destroyed
11727c478bd9Sstevel@tonic-gate * and therefore must have the expected identity.
11737c478bd9Sstevel@tonic-gate */
11747c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp));
11757c478bd9Sstevel@tonic-gate if (pp->p_vnode != vp ||
11767c478bd9Sstevel@tonic-gate pp->p_offset != off) {
11777c478bd9Sstevel@tonic-gate panic("page_exists_physcontig: "
11787c478bd9Sstevel@tonic-gate "large page identity doesn't match");
11797c478bd9Sstevel@tonic-gate }
11807c478bd9Sstevel@tonic-gate ppa[i] = pp;
11817c478bd9Sstevel@tonic-gate ASSERT(pp->p_pagenum == pfn);
11827c478bd9Sstevel@tonic-gate }
11837c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[8]);
11847c478bd9Sstevel@tonic-gate ppa[pages] = NULL;
11857c478bd9Sstevel@tonic-gate return (1);
11867c478bd9Sstevel@tonic-gate } else if (pszc >= szc) {
11877c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[9]);
11887c478bd9Sstevel@tonic-gate if (!IS_P2ALIGNED(pfn, pages)) {
11897c478bd9Sstevel@tonic-gate return (0);
11907c478bd9Sstevel@tonic-gate }
11917c478bd9Sstevel@tonic-gate return (1);
11927c478bd9Sstevel@tonic-gate }
11937c478bd9Sstevel@tonic-gate
11947c478bd9Sstevel@tonic-gate if (!IS_P2ALIGNED(pfn, pages)) {
11957c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[10]);
11967c478bd9Sstevel@tonic-gate return (0);
11977c478bd9Sstevel@tonic-gate }
11987c478bd9Sstevel@tonic-gate
11997c478bd9Sstevel@tonic-gate if (page_numtomemseg_nolock(pfn) !=
12007c478bd9Sstevel@tonic-gate page_numtomemseg_nolock(pfn + pages - 1)) {
12017c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[11]);
12027c478bd9Sstevel@tonic-gate return (0);
12037c478bd9Sstevel@tonic-gate }
12047c478bd9Sstevel@tonic-gate
12057c478bd9Sstevel@tonic-gate /*
12067c478bd9Sstevel@tonic-gate * We loop up 4 times across pages to promote page size.
12077c478bd9Sstevel@tonic-gate * We're extra cautious to promote page size atomically with respect
12087c478bd9Sstevel@tonic-gate * to everybody else. But we can probably optimize into 1 loop if
12097c478bd9Sstevel@tonic-gate * this becomes an issue.
12107c478bd9Sstevel@tonic-gate */
12117c478bd9Sstevel@tonic-gate
12127c478bd9Sstevel@tonic-gate for (i = 0; i < pages; i++, pp++, off += PAGESIZE, pfn++) {
12137c478bd9Sstevel@tonic-gate if (!page_trylock(pp, SE_EXCL)) {
12147c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[12]);
12157c478bd9Sstevel@tonic-gate break;
12167c478bd9Sstevel@tonic-gate }
12179853d9e8SJason Beloro /*
12189853d9e8SJason Beloro * Check whether p_pagenum was modified by DR.
12199853d9e8SJason Beloro */
12209853d9e8SJason Beloro if (pp->p_pagenum != pfn) {
12219853d9e8SJason Beloro page_unlock(pp);
12229853d9e8SJason Beloro break;
12239853d9e8SJason Beloro }
12247c478bd9Sstevel@tonic-gate if (pp->p_vnode != vp ||
12257c478bd9Sstevel@tonic-gate pp->p_offset != off) {
12267c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[13]);
12277c478bd9Sstevel@tonic-gate page_unlock(pp);
12287c478bd9Sstevel@tonic-gate break;
12297c478bd9Sstevel@tonic-gate }
12307c478bd9Sstevel@tonic-gate if (pp->p_szc >= szc) {
12317c478bd9Sstevel@tonic-gate ASSERT(i == 0);
12327c478bd9Sstevel@tonic-gate page_unlock(pp);
12337c478bd9Sstevel@tonic-gate off = save_off;
12347c478bd9Sstevel@tonic-gate goto again;
12357c478bd9Sstevel@tonic-gate }
12367c478bd9Sstevel@tonic-gate }
12377c478bd9Sstevel@tonic-gate
12387c478bd9Sstevel@tonic-gate if (i != pages) {
12397c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[14]);
12407c478bd9Sstevel@tonic-gate --pp;
12417c478bd9Sstevel@tonic-gate while (i-- > 0) {
12427c478bd9Sstevel@tonic-gate page_unlock(pp);
12437c478bd9Sstevel@tonic-gate --pp;
12447c478bd9Sstevel@tonic-gate }
12457c478bd9Sstevel@tonic-gate return (0);
12467c478bd9Sstevel@tonic-gate }
12477c478bd9Sstevel@tonic-gate
12487c478bd9Sstevel@tonic-gate pp = rootpp;
12497c478bd9Sstevel@tonic-gate for (i = 0; i < pages; i++, pp++) {
12507c478bd9Sstevel@tonic-gate if (PP_ISFREE(pp)) {
12517c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[15]);
12527c478bd9Sstevel@tonic-gate ASSERT(!PP_ISAGED(pp));
12537c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc == 0);
12547c478bd9Sstevel@tonic-gate if (!page_reclaim(pp, NULL)) {
12557c478bd9Sstevel@tonic-gate break;
12567c478bd9Sstevel@tonic-gate }
12577c478bd9Sstevel@tonic-gate } else {
12587c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc < szc);
12597c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[16]);
12607c478bd9Sstevel@tonic-gate (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
12617c478bd9Sstevel@tonic-gate }
12627c478bd9Sstevel@tonic-gate }
12637c478bd9Sstevel@tonic-gate if (i < pages) {
12647c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[17]);
12657c478bd9Sstevel@tonic-gate /*
12667c478bd9Sstevel@tonic-gate * page_reclaim failed because we were out of memory.
12677c478bd9Sstevel@tonic-gate * drop the rest of the locks and return because this page
12687c478bd9Sstevel@tonic-gate * must be already reallocated anyway.
12697c478bd9Sstevel@tonic-gate */
12707c478bd9Sstevel@tonic-gate pp = rootpp;
12717c478bd9Sstevel@tonic-gate for (j = 0; j < pages; j++, pp++) {
12727c478bd9Sstevel@tonic-gate if (j != i) {
12737c478bd9Sstevel@tonic-gate page_unlock(pp);
12747c478bd9Sstevel@tonic-gate }
12757c478bd9Sstevel@tonic-gate }
12767c478bd9Sstevel@tonic-gate return (0);
12777c478bd9Sstevel@tonic-gate }
12787c478bd9Sstevel@tonic-gate
12797c478bd9Sstevel@tonic-gate off = save_off;
12807c478bd9Sstevel@tonic-gate pp = rootpp;
12817c478bd9Sstevel@tonic-gate for (i = 0; i < pages; i++, pp++, off += PAGESIZE) {
12827c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp));
12837c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp));
12847c478bd9Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(pp));
12857c478bd9Sstevel@tonic-gate ASSERT(pp->p_vnode == vp);
12867c478bd9Sstevel@tonic-gate ASSERT(pp->p_offset == off);
12877c478bd9Sstevel@tonic-gate pp->p_szc = szc;
12887c478bd9Sstevel@tonic-gate }
12897c478bd9Sstevel@tonic-gate pp = rootpp;
12907c478bd9Sstevel@tonic-gate for (i = 0; i < pages; i++, pp++) {
12917c478bd9Sstevel@tonic-gate if (ppa == NULL) {
12927c478bd9Sstevel@tonic-gate page_unlock(pp);
12937c478bd9Sstevel@tonic-gate } else {
12947c478bd9Sstevel@tonic-gate ppa[i] = pp;
12957c478bd9Sstevel@tonic-gate page_downgrade(ppa[i]);
12967c478bd9Sstevel@tonic-gate }
12977c478bd9Sstevel@tonic-gate }
12987c478bd9Sstevel@tonic-gate if (ppa != NULL) {
12997c478bd9Sstevel@tonic-gate ppa[pages] = NULL;
13007c478bd9Sstevel@tonic-gate }
13017c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exphcontg[18]);
13027c478bd9Sstevel@tonic-gate ASSERT(vp->v_pages != NULL);
13037c478bd9Sstevel@tonic-gate return (1);
13047c478bd9Sstevel@tonic-gate }
13057c478bd9Sstevel@tonic-gate
13067c478bd9Sstevel@tonic-gate /*
13077c478bd9Sstevel@tonic-gate * Determine whether a page with the specified [vp, off]
13087c478bd9Sstevel@tonic-gate * currently exists in the system and if so return its
13097c478bd9Sstevel@tonic-gate * size code. Obviously this should only be considered as
13107c478bd9Sstevel@tonic-gate * a hint since nothing prevents the page from disappearing
13117c478bd9Sstevel@tonic-gate * or appearing immediately after the return from this routine.
13127c478bd9Sstevel@tonic-gate */
13137c478bd9Sstevel@tonic-gate int
page_exists_forreal(vnode_t * vp,u_offset_t off,uint_t * szc)13147c478bd9Sstevel@tonic-gate page_exists_forreal(vnode_t *vp, u_offset_t off, uint_t *szc)
13157c478bd9Sstevel@tonic-gate {
13167c478bd9Sstevel@tonic-gate page_t *pp;
13177c478bd9Sstevel@tonic-gate kmutex_t *phm;
13187c478bd9Sstevel@tonic-gate ulong_t index;
13197c478bd9Sstevel@tonic-gate int rc = 0;
13207c478bd9Sstevel@tonic-gate
13217c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
13227c478bd9Sstevel@tonic-gate ASSERT(szc != NULL);
13237c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_exists_forreal_cnt);
13247c478bd9Sstevel@tonic-gate
13257c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off);
13267c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index);
13277c478bd9Sstevel@tonic-gate
13287c478bd9Sstevel@tonic-gate mutex_enter(phm);
1329e7c874afSJosef 'Jeff' Sipek pp = page_hash_search(index, vp, off);
13307c478bd9Sstevel@tonic-gate if (pp != NULL) {
13317c478bd9Sstevel@tonic-gate *szc = pp->p_szc;
13327c478bd9Sstevel@tonic-gate rc = 1;
13337c478bd9Sstevel@tonic-gate }
13347c478bd9Sstevel@tonic-gate mutex_exit(phm);
13357c478bd9Sstevel@tonic-gate return (rc);
13367c478bd9Sstevel@tonic-gate }
13377c478bd9Sstevel@tonic-gate
13387c478bd9Sstevel@tonic-gate /* wakeup threads waiting for pages in page_create_get_something() */
13397c478bd9Sstevel@tonic-gate void
wakeup_pcgs(void)13407c478bd9Sstevel@tonic-gate wakeup_pcgs(void)
13417c478bd9Sstevel@tonic-gate {
13427c478bd9Sstevel@tonic-gate if (!CV_HAS_WAITERS(&pcgs_cv))
13437c478bd9Sstevel@tonic-gate return;
13447c478bd9Sstevel@tonic-gate cv_broadcast(&pcgs_cv);
13457c478bd9Sstevel@tonic-gate }
13467c478bd9Sstevel@tonic-gate
13477c478bd9Sstevel@tonic-gate /*
13487c478bd9Sstevel@tonic-gate * 'freemem' is used all over the kernel as an indication of how many
13497c478bd9Sstevel@tonic-gate * pages are free (either on the cache list or on the free page list)
13507c478bd9Sstevel@tonic-gate * in the system. In very few places is a really accurate 'freemem'
13517c478bd9Sstevel@tonic-gate * needed. To avoid contention of the lock protecting a the
13527c478bd9Sstevel@tonic-gate * single freemem, it was spread out into NCPU buckets. Set_freemem
13537c478bd9Sstevel@tonic-gate * sets freemem to the total of all NCPU buckets. It is called from
13547c478bd9Sstevel@tonic-gate * clock() on each TICK.
13557c478bd9Sstevel@tonic-gate */
13567c478bd9Sstevel@tonic-gate void
set_freemem(void)1357727737b4SJoshua M. Clulow set_freemem(void)
13587c478bd9Sstevel@tonic-gate {
13597c478bd9Sstevel@tonic-gate struct pcf *p;
13607c478bd9Sstevel@tonic-gate ulong_t t;
13617c478bd9Sstevel@tonic-gate uint_t i;
13627c478bd9Sstevel@tonic-gate
13637c478bd9Sstevel@tonic-gate t = 0;
13647c478bd9Sstevel@tonic-gate p = pcf;
136506fb6a36Sdv for (i = 0; i < pcf_fanout; i++) {
13667c478bd9Sstevel@tonic-gate t += p->pcf_count;
13677c478bd9Sstevel@tonic-gate p++;
13687c478bd9Sstevel@tonic-gate }
13697c478bd9Sstevel@tonic-gate freemem = t;
13707c478bd9Sstevel@tonic-gate
13717c478bd9Sstevel@tonic-gate /*
13727c478bd9Sstevel@tonic-gate * Don't worry about grabbing mutex. It's not that
13737c478bd9Sstevel@tonic-gate * critical if we miss a tick or two. This is
13747c478bd9Sstevel@tonic-gate * where we wakeup possible delayers in
13757c478bd9Sstevel@tonic-gate * page_create_get_something().
13767c478bd9Sstevel@tonic-gate */
13777c478bd9Sstevel@tonic-gate wakeup_pcgs();
13787c478bd9Sstevel@tonic-gate }
13797c478bd9Sstevel@tonic-gate
13807c478bd9Sstevel@tonic-gate ulong_t
get_freemem()13817c478bd9Sstevel@tonic-gate get_freemem()
13827c478bd9Sstevel@tonic-gate {
13837c478bd9Sstevel@tonic-gate struct pcf *p;
13847c478bd9Sstevel@tonic-gate ulong_t t;
13857c478bd9Sstevel@tonic-gate uint_t i;
13867c478bd9Sstevel@tonic-gate
13877c478bd9Sstevel@tonic-gate t = 0;
13887c478bd9Sstevel@tonic-gate p = pcf;
138906fb6a36Sdv for (i = 0; i < pcf_fanout; i++) {
13907c478bd9Sstevel@tonic-gate t += p->pcf_count;
13917c478bd9Sstevel@tonic-gate p++;
13927c478bd9Sstevel@tonic-gate }
13937c478bd9Sstevel@tonic-gate /*
13947c478bd9Sstevel@tonic-gate * We just calculated it, might as well set it.
13957c478bd9Sstevel@tonic-gate */
13967c478bd9Sstevel@tonic-gate freemem = t;
13977c478bd9Sstevel@tonic-gate return (t);
13987c478bd9Sstevel@tonic-gate }
13997c478bd9Sstevel@tonic-gate
14007c478bd9Sstevel@tonic-gate /*
14017c478bd9Sstevel@tonic-gate * Acquire all of the page cache & free (pcf) locks.
14027c478bd9Sstevel@tonic-gate */
14037c478bd9Sstevel@tonic-gate void
pcf_acquire_all()14047c478bd9Sstevel@tonic-gate pcf_acquire_all()
14057c478bd9Sstevel@tonic-gate {
14067c478bd9Sstevel@tonic-gate struct pcf *p;
14077c478bd9Sstevel@tonic-gate uint_t i;
14087c478bd9Sstevel@tonic-gate
14097c478bd9Sstevel@tonic-gate p = pcf;
141006fb6a36Sdv for (i = 0; i < pcf_fanout; i++) {
14117c478bd9Sstevel@tonic-gate mutex_enter(&p->pcf_lock);
14127c478bd9Sstevel@tonic-gate p++;
14137c478bd9Sstevel@tonic-gate }
14147c478bd9Sstevel@tonic-gate }
14157c478bd9Sstevel@tonic-gate
14167c478bd9Sstevel@tonic-gate /*
14177c478bd9Sstevel@tonic-gate * Release all the pcf_locks.
14187c478bd9Sstevel@tonic-gate */
14197c478bd9Sstevel@tonic-gate void
pcf_release_all()14207c478bd9Sstevel@tonic-gate pcf_release_all()
14217c478bd9Sstevel@tonic-gate {
14227c478bd9Sstevel@tonic-gate struct pcf *p;
14237c478bd9Sstevel@tonic-gate uint_t i;
14247c478bd9Sstevel@tonic-gate
14257c478bd9Sstevel@tonic-gate p = pcf;
142606fb6a36Sdv for (i = 0; i < pcf_fanout; i++) {
14277c478bd9Sstevel@tonic-gate mutex_exit(&p->pcf_lock);
14287c478bd9Sstevel@tonic-gate p++;
14297c478bd9Sstevel@tonic-gate }
14307c478bd9Sstevel@tonic-gate }
14317c478bd9Sstevel@tonic-gate
14327c478bd9Sstevel@tonic-gate /*
14337c478bd9Sstevel@tonic-gate * Inform the VM system that we need some pages freed up.
14347c478bd9Sstevel@tonic-gate * Calls must be symmetric, e.g.:
14357c478bd9Sstevel@tonic-gate *
14367c478bd9Sstevel@tonic-gate * page_needfree(100);
14377c478bd9Sstevel@tonic-gate * wait a bit;
14387c478bd9Sstevel@tonic-gate * page_needfree(-100);
14397c478bd9Sstevel@tonic-gate */
14407c478bd9Sstevel@tonic-gate void
page_needfree(spgcnt_t npages)14417c478bd9Sstevel@tonic-gate page_needfree(spgcnt_t npages)
14427c478bd9Sstevel@tonic-gate {
14437c478bd9Sstevel@tonic-gate mutex_enter(&new_freemem_lock);
14447c478bd9Sstevel@tonic-gate needfree += npages;
14457c478bd9Sstevel@tonic-gate mutex_exit(&new_freemem_lock);
14467c478bd9Sstevel@tonic-gate }
14477c478bd9Sstevel@tonic-gate
14487c478bd9Sstevel@tonic-gate /*
14497c478bd9Sstevel@tonic-gate * Throttle for page_create(): try to prevent freemem from dropping
14507c478bd9Sstevel@tonic-gate * below throttlefree. We can't provide a 100% guarantee because
14517c478bd9Sstevel@tonic-gate * KM_NOSLEEP allocations, page_reclaim(), and various other things
14527c478bd9Sstevel@tonic-gate * nibble away at the freelist. However, we can block all PG_WAIT
14537c478bd9Sstevel@tonic-gate * allocations until memory becomes available. The motivation is
14547c478bd9Sstevel@tonic-gate * that several things can fall apart when there's no free memory:
14557c478bd9Sstevel@tonic-gate *
14567c478bd9Sstevel@tonic-gate * (1) If pageout() needs memory to push a page, the system deadlocks.
14577c478bd9Sstevel@tonic-gate *
14587c478bd9Sstevel@tonic-gate * (2) By (broken) specification, timeout(9F) can neither fail nor
14597c478bd9Sstevel@tonic-gate * block, so it has no choice but to panic the system if it
14607c478bd9Sstevel@tonic-gate * cannot allocate a callout structure.
14617c478bd9Sstevel@tonic-gate *
14627c478bd9Sstevel@tonic-gate * (3) Like timeout(), ddi_set_callback() cannot fail and cannot block;
14637c478bd9Sstevel@tonic-gate * it panics if it cannot allocate a callback structure.
14647c478bd9Sstevel@tonic-gate *
14657c478bd9Sstevel@tonic-gate * (4) Untold numbers of third-party drivers have not yet been hardened
14667c478bd9Sstevel@tonic-gate * against KM_NOSLEEP and/or allocb() failures; they simply assume
14677c478bd9Sstevel@tonic-gate * success and panic the system with a data fault on failure.
14687c478bd9Sstevel@tonic-gate * (The long-term solution to this particular problem is to ship
14697c478bd9Sstevel@tonic-gate * hostile fault-injecting DEBUG kernels with the DDK.)
14707c478bd9Sstevel@tonic-gate *
14717c478bd9Sstevel@tonic-gate * It is theoretically impossible to guarantee success of non-blocking
14727c478bd9Sstevel@tonic-gate * allocations, but in practice, this throttle is very hard to break.
14737c478bd9Sstevel@tonic-gate */
14747c478bd9Sstevel@tonic-gate static int
page_create_throttle(pgcnt_t npages,int flags)14757c478bd9Sstevel@tonic-gate page_create_throttle(pgcnt_t npages, int flags)
14767c478bd9Sstevel@tonic-gate {
14777c478bd9Sstevel@tonic-gate ulong_t fm;
14787c478bd9Sstevel@tonic-gate uint_t i;
14797c478bd9Sstevel@tonic-gate pgcnt_t tf; /* effective value of throttlefree */
14807c478bd9Sstevel@tonic-gate
1481*338664dfSAndy Fiddaman atomic_inc_64(&n_throttle);
1482*338664dfSAndy Fiddaman
148323a80de1SStan Studzinski /*
148423a80de1SStan Studzinski * Normal priority allocations.
148523a80de1SStan Studzinski */
148623a80de1SStan Studzinski if ((flags & (PG_WAIT | PG_NORMALPRI)) == PG_NORMALPRI) {
148723a80de1SStan Studzinski ASSERT(!(flags & (PG_PANIC | PG_PUSHPAGE)));
148823a80de1SStan Studzinski return (freemem >= npages + throttlefree);
148923a80de1SStan Studzinski }
149023a80de1SStan Studzinski
14917c478bd9Sstevel@tonic-gate /*
14927c478bd9Sstevel@tonic-gate * Never deny pages when:
14937c478bd9Sstevel@tonic-gate * - it's a thread that cannot block [NOMEMWAIT()]
14947c478bd9Sstevel@tonic-gate * - the allocation cannot block and must not fail
14957c478bd9Sstevel@tonic-gate * - the allocation cannot block and is pageout dispensated
14967c478bd9Sstevel@tonic-gate */
14977c478bd9Sstevel@tonic-gate if (NOMEMWAIT() ||
14987c478bd9Sstevel@tonic-gate ((flags & (PG_WAIT | PG_PANIC)) == PG_PANIC) ||
14997c478bd9Sstevel@tonic-gate ((flags & (PG_WAIT | PG_PUSHPAGE)) == PG_PUSHPAGE))
15007c478bd9Sstevel@tonic-gate return (1);
15017c478bd9Sstevel@tonic-gate
15027c478bd9Sstevel@tonic-gate /*
15037c478bd9Sstevel@tonic-gate * If the allocation can't block, we look favorably upon it
15047c478bd9Sstevel@tonic-gate * unless we're below pageout_reserve. In that case we fail
15057c478bd9Sstevel@tonic-gate * the allocation because we want to make sure there are a few
15067c478bd9Sstevel@tonic-gate * pages available for pageout.
15077c478bd9Sstevel@tonic-gate */
15087c478bd9Sstevel@tonic-gate if ((flags & PG_WAIT) == 0)
15097c478bd9Sstevel@tonic-gate return (freemem >= npages + pageout_reserve);
15107c478bd9Sstevel@tonic-gate
15117c478bd9Sstevel@tonic-gate /* Calculate the effective throttlefree value */
15127c478bd9Sstevel@tonic-gate tf = throttlefree -
15137c478bd9Sstevel@tonic-gate ((flags & PG_PUSHPAGE) ? pageout_reserve : 0);
15147c478bd9Sstevel@tonic-gate
1515*338664dfSAndy Fiddaman WAKE_PAGEOUT_SCANNER(page__create__throttle);
15167c478bd9Sstevel@tonic-gate
151778b03d3aSkchow for (;;) {
151878b03d3aSkchow fm = 0;
15197c478bd9Sstevel@tonic-gate pcf_acquire_all();
15207c478bd9Sstevel@tonic-gate mutex_enter(&new_freemem_lock);
152106fb6a36Sdv for (i = 0; i < pcf_fanout; i++) {
15227c478bd9Sstevel@tonic-gate fm += pcf[i].pcf_count;
15237c478bd9Sstevel@tonic-gate pcf[i].pcf_wait++;
15247c478bd9Sstevel@tonic-gate mutex_exit(&pcf[i].pcf_lock);
15257c478bd9Sstevel@tonic-gate }
15267c478bd9Sstevel@tonic-gate freemem = fm;
152778b03d3aSkchow if (freemem >= npages + tf) {
152878b03d3aSkchow mutex_exit(&new_freemem_lock);
152978b03d3aSkchow break;
153078b03d3aSkchow }
15317c478bd9Sstevel@tonic-gate needfree += npages;
15327c478bd9Sstevel@tonic-gate freemem_wait++;
15337c478bd9Sstevel@tonic-gate cv_wait(&freemem_cv, &new_freemem_lock);
15347c478bd9Sstevel@tonic-gate freemem_wait--;
15357c478bd9Sstevel@tonic-gate needfree -= npages;
15367c478bd9Sstevel@tonic-gate mutex_exit(&new_freemem_lock);
15377c478bd9Sstevel@tonic-gate }
15387c478bd9Sstevel@tonic-gate return (1);
15397c478bd9Sstevel@tonic-gate }
15407c478bd9Sstevel@tonic-gate
15417c478bd9Sstevel@tonic-gate /*
1542da6c28aaSamw * page_create_wait() is called to either coalesce pages from the
15437c478bd9Sstevel@tonic-gate * different pcf buckets or to wait because there simply are not
15447c478bd9Sstevel@tonic-gate * enough pages to satisfy the caller's request.
15457c478bd9Sstevel@tonic-gate *
15467c478bd9Sstevel@tonic-gate * Sadly, this is called from platform/vm/vm_machdep.c
15477c478bd9Sstevel@tonic-gate */
15487c478bd9Sstevel@tonic-gate int
page_create_wait(pgcnt_t npages,uint_t flags)154906fb6a36Sdv page_create_wait(pgcnt_t npages, uint_t flags)
15507c478bd9Sstevel@tonic-gate {
15517c478bd9Sstevel@tonic-gate pgcnt_t total;
15527c478bd9Sstevel@tonic-gate uint_t i;
15537c478bd9Sstevel@tonic-gate struct pcf *p;
15547c478bd9Sstevel@tonic-gate
15557c478bd9Sstevel@tonic-gate /*
15567c478bd9Sstevel@tonic-gate * Wait until there are enough free pages to satisfy our
15577c478bd9Sstevel@tonic-gate * entire request.
15587c478bd9Sstevel@tonic-gate * We set needfree += npages before prodding pageout, to make sure
15597c478bd9Sstevel@tonic-gate * it does real work when npages > lotsfree > freemem.
15607c478bd9Sstevel@tonic-gate */
15617c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_not_enough);
15627c478bd9Sstevel@tonic-gate
15637c478bd9Sstevel@tonic-gate ASSERT(!kcage_on ? !(flags & PG_NORELOC) : 1);
15647c478bd9Sstevel@tonic-gate checkagain:
1565d94ffb28Sjmcp if ((flags & PG_NORELOC) &&
1566d94ffb28Sjmcp kcage_freemem < kcage_throttlefree + npages)
1567d94ffb28Sjmcp (void) kcage_create_throttle(npages, flags);
15687c478bd9Sstevel@tonic-gate
15697c478bd9Sstevel@tonic-gate if (freemem < npages + throttlefree)
15707c478bd9Sstevel@tonic-gate if (!page_create_throttle(npages, flags))
15717c478bd9Sstevel@tonic-gate return (0);
15727c478bd9Sstevel@tonic-gate
157306fb6a36Sdv if (pcf_decrement_bucket(npages) ||
157406fb6a36Sdv pcf_decrement_multiple(&total, npages, 0))
157506fb6a36Sdv return (1);
15767c478bd9Sstevel@tonic-gate
15777c478bd9Sstevel@tonic-gate /*
15787c478bd9Sstevel@tonic-gate * All of the pcf locks are held, there are not enough pages
15797c478bd9Sstevel@tonic-gate * to satisfy the request (npages < total).
15807c478bd9Sstevel@tonic-gate * Be sure to acquire the new_freemem_lock before dropping
15817c478bd9Sstevel@tonic-gate * the pcf locks. This prevents dropping wakeups in page_free().
15827c478bd9Sstevel@tonic-gate * The order is always pcf_lock then new_freemem_lock.
15837c478bd9Sstevel@tonic-gate *
15847c478bd9Sstevel@tonic-gate * Since we hold all the pcf locks, it is a good time to set freemem.
15857c478bd9Sstevel@tonic-gate *
15867c478bd9Sstevel@tonic-gate * If the caller does not want to wait, return now.
15877c478bd9Sstevel@tonic-gate * Else turn the pageout daemon loose to find something
15887c478bd9Sstevel@tonic-gate * and wait till it does.
15897c478bd9Sstevel@tonic-gate *
15907c478bd9Sstevel@tonic-gate */
15917c478bd9Sstevel@tonic-gate freemem = total;
15927c478bd9Sstevel@tonic-gate
15937c478bd9Sstevel@tonic-gate if ((flags & PG_WAIT) == 0) {
15947c478bd9Sstevel@tonic-gate pcf_release_all();
15957c478bd9Sstevel@tonic-gate
15967c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_NOMEM,
15977c478bd9Sstevel@tonic-gate "page_create_nomem:npages %ld freemem %ld", npages, freemem);
15987c478bd9Sstevel@tonic-gate return (0);
15997c478bd9Sstevel@tonic-gate }
16007c478bd9Sstevel@tonic-gate
16017c478bd9Sstevel@tonic-gate ASSERT(proc_pageout != NULL);
1602*338664dfSAndy Fiddaman WAKE_PAGEOUT_SCANNER(page__create__wait);
16037c478bd9Sstevel@tonic-gate
16047c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_START,
16057c478bd9Sstevel@tonic-gate "page_create_sleep_start: freemem %ld needfree %ld",
16067c478bd9Sstevel@tonic-gate freemem, needfree);
16077c478bd9Sstevel@tonic-gate
16087c478bd9Sstevel@tonic-gate /*
16097c478bd9Sstevel@tonic-gate * We are going to wait.
16107c478bd9Sstevel@tonic-gate * We currently hold all of the pcf_locks,
16117c478bd9Sstevel@tonic-gate * get the new_freemem_lock (it protects freemem_wait),
16127c478bd9Sstevel@tonic-gate * before dropping the pcf_locks.
16137c478bd9Sstevel@tonic-gate */
16147c478bd9Sstevel@tonic-gate mutex_enter(&new_freemem_lock);
16157c478bd9Sstevel@tonic-gate
16167c478bd9Sstevel@tonic-gate p = pcf;
161706fb6a36Sdv for (i = 0; i < pcf_fanout; i++) {
16187c478bd9Sstevel@tonic-gate p->pcf_wait++;
16197c478bd9Sstevel@tonic-gate mutex_exit(&p->pcf_lock);
16207c478bd9Sstevel@tonic-gate p++;
16217c478bd9Sstevel@tonic-gate }
16227c478bd9Sstevel@tonic-gate
16237c478bd9Sstevel@tonic-gate needfree += npages;
16247c478bd9Sstevel@tonic-gate freemem_wait++;
16257c478bd9Sstevel@tonic-gate
16267c478bd9Sstevel@tonic-gate cv_wait(&freemem_cv, &new_freemem_lock);
16277c478bd9Sstevel@tonic-gate
16287c478bd9Sstevel@tonic-gate freemem_wait--;
16297c478bd9Sstevel@tonic-gate needfree -= npages;
16307c478bd9Sstevel@tonic-gate
16317c478bd9Sstevel@tonic-gate mutex_exit(&new_freemem_lock);
16327c478bd9Sstevel@tonic-gate
16337c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SLEEP_END,
16347c478bd9Sstevel@tonic-gate "page_create_sleep_end: freemem %ld needfree %ld",
16357c478bd9Sstevel@tonic-gate freemem, needfree);
16367c478bd9Sstevel@tonic-gate
16377c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_not_enough_again);
16387c478bd9Sstevel@tonic-gate goto checkagain;
16397c478bd9Sstevel@tonic-gate }
16407c478bd9Sstevel@tonic-gate /*
16417c478bd9Sstevel@tonic-gate * A routine to do the opposite of page_create_wait().
16427c478bd9Sstevel@tonic-gate */
16437c478bd9Sstevel@tonic-gate void
page_create_putback(spgcnt_t npages)16447c478bd9Sstevel@tonic-gate page_create_putback(spgcnt_t npages)
16457c478bd9Sstevel@tonic-gate {
16467c478bd9Sstevel@tonic-gate struct pcf *p;
16477c478bd9Sstevel@tonic-gate pgcnt_t lump;
16487c478bd9Sstevel@tonic-gate uint_t *which;
16497c478bd9Sstevel@tonic-gate
16507c478bd9Sstevel@tonic-gate /*
16517c478bd9Sstevel@tonic-gate * When a contiguous lump is broken up, we have to
16527c478bd9Sstevel@tonic-gate * deal with lots of pages (min 64) so lets spread
16537c478bd9Sstevel@tonic-gate * the wealth around.
16547c478bd9Sstevel@tonic-gate */
165506fb6a36Sdv lump = roundup(npages, pcf_fanout) / pcf_fanout;
16567c478bd9Sstevel@tonic-gate freemem += npages;
16577c478bd9Sstevel@tonic-gate
165806fb6a36Sdv for (p = pcf; (npages > 0) && (p < &pcf[pcf_fanout]); p++) {
16597c478bd9Sstevel@tonic-gate which = &p->pcf_count;
16607c478bd9Sstevel@tonic-gate
16617c478bd9Sstevel@tonic-gate mutex_enter(&p->pcf_lock);
16627c478bd9Sstevel@tonic-gate
16637c478bd9Sstevel@tonic-gate if (p->pcf_block) {
16647c478bd9Sstevel@tonic-gate which = &p->pcf_reserve;
16657c478bd9Sstevel@tonic-gate }
16667c478bd9Sstevel@tonic-gate
16677c478bd9Sstevel@tonic-gate if (lump < npages) {
16687c478bd9Sstevel@tonic-gate *which += (uint_t)lump;
16697c478bd9Sstevel@tonic-gate npages -= lump;
16707c478bd9Sstevel@tonic-gate } else {
16717c478bd9Sstevel@tonic-gate *which += (uint_t)npages;
16727c478bd9Sstevel@tonic-gate npages = 0;
16737c478bd9Sstevel@tonic-gate }
16747c478bd9Sstevel@tonic-gate
16757c478bd9Sstevel@tonic-gate if (p->pcf_wait) {
16767c478bd9Sstevel@tonic-gate mutex_enter(&new_freemem_lock);
16777c478bd9Sstevel@tonic-gate /*
16787c478bd9Sstevel@tonic-gate * Check to see if some other thread
16797c478bd9Sstevel@tonic-gate * is actually waiting. Another bucket
16807c478bd9Sstevel@tonic-gate * may have woken it up by now. If there
16817c478bd9Sstevel@tonic-gate * are no waiters, then set our pcf_wait
16827c478bd9Sstevel@tonic-gate * count to zero to avoid coming in here
16837c478bd9Sstevel@tonic-gate * next time.
16847c478bd9Sstevel@tonic-gate */
16857c478bd9Sstevel@tonic-gate if (freemem_wait) {
16867c478bd9Sstevel@tonic-gate if (npages > 1) {
16877c478bd9Sstevel@tonic-gate cv_broadcast(&freemem_cv);
16887c478bd9Sstevel@tonic-gate } else {
16897c478bd9Sstevel@tonic-gate cv_signal(&freemem_cv);
16907c478bd9Sstevel@tonic-gate }
16917c478bd9Sstevel@tonic-gate p->pcf_wait--;
16927c478bd9Sstevel@tonic-gate } else {
16937c478bd9Sstevel@tonic-gate p->pcf_wait = 0;
16947c478bd9Sstevel@tonic-gate }
16957c478bd9Sstevel@tonic-gate mutex_exit(&new_freemem_lock);
16967c478bd9Sstevel@tonic-gate }
16977c478bd9Sstevel@tonic-gate mutex_exit(&p->pcf_lock);
16987c478bd9Sstevel@tonic-gate }
16997c478bd9Sstevel@tonic-gate ASSERT(npages == 0);
17007c478bd9Sstevel@tonic-gate }
17017c478bd9Sstevel@tonic-gate
17027c478bd9Sstevel@tonic-gate /*
17037c478bd9Sstevel@tonic-gate * A helper routine for page_create_get_something.
17047c478bd9Sstevel@tonic-gate * The indenting got to deep down there.
17057c478bd9Sstevel@tonic-gate * Unblock the pcf counters. Any pages freed after
17067c478bd9Sstevel@tonic-gate * pcf_block got set are moved to pcf_count and
17077c478bd9Sstevel@tonic-gate * wakeups (cv_broadcast() or cv_signal()) are done as needed.
17087c478bd9Sstevel@tonic-gate */
17097c478bd9Sstevel@tonic-gate static void
pcgs_unblock(void)17107c478bd9Sstevel@tonic-gate pcgs_unblock(void)
17117c478bd9Sstevel@tonic-gate {
17127c478bd9Sstevel@tonic-gate int i;
17137c478bd9Sstevel@tonic-gate struct pcf *p;
17147c478bd9Sstevel@tonic-gate
17157c478bd9Sstevel@tonic-gate /* Update freemem while we're here. */
17167c478bd9Sstevel@tonic-gate freemem = 0;
17177c478bd9Sstevel@tonic-gate p = pcf;
171806fb6a36Sdv for (i = 0; i < pcf_fanout; i++) {
17197c478bd9Sstevel@tonic-gate mutex_enter(&p->pcf_lock);
17207c478bd9Sstevel@tonic-gate ASSERT(p->pcf_count == 0);
17217c478bd9Sstevel@tonic-gate p->pcf_count = p->pcf_reserve;
17227c478bd9Sstevel@tonic-gate p->pcf_block = 0;
17237c478bd9Sstevel@tonic-gate freemem += p->pcf_count;
17247c478bd9Sstevel@tonic-gate if (p->pcf_wait) {
17257c478bd9Sstevel@tonic-gate mutex_enter(&new_freemem_lock);
17267c478bd9Sstevel@tonic-gate if (freemem_wait) {
17277c478bd9Sstevel@tonic-gate if (p->pcf_reserve > 1) {
17287c478bd9Sstevel@tonic-gate cv_broadcast(&freemem_cv);
17297c478bd9Sstevel@tonic-gate p->pcf_wait = 0;
17307c478bd9Sstevel@tonic-gate } else {
17317c478bd9Sstevel@tonic-gate cv_signal(&freemem_cv);
17327c478bd9Sstevel@tonic-gate p->pcf_wait--;
17337c478bd9Sstevel@tonic-gate }
17347c478bd9Sstevel@tonic-gate } else {
17357c478bd9Sstevel@tonic-gate p->pcf_wait = 0;
17367c478bd9Sstevel@tonic-gate }
17377c478bd9Sstevel@tonic-gate mutex_exit(&new_freemem_lock);
17387c478bd9Sstevel@tonic-gate }
17397c478bd9Sstevel@tonic-gate p->pcf_reserve = 0;
17407c478bd9Sstevel@tonic-gate mutex_exit(&p->pcf_lock);
17417c478bd9Sstevel@tonic-gate p++;
17427c478bd9Sstevel@tonic-gate }
17437c478bd9Sstevel@tonic-gate }
17447c478bd9Sstevel@tonic-gate
17457c478bd9Sstevel@tonic-gate /*
17467c478bd9Sstevel@tonic-gate * Called from page_create_va() when both the cache and free lists
17477c478bd9Sstevel@tonic-gate * have been checked once.
17487c478bd9Sstevel@tonic-gate *
17497c478bd9Sstevel@tonic-gate * Either returns a page or panics since the accounting was done
17507c478bd9Sstevel@tonic-gate * way before we got here.
17517c478bd9Sstevel@tonic-gate *
17527c478bd9Sstevel@tonic-gate * We don't come here often, so leave the accounting on permanently.
17537c478bd9Sstevel@tonic-gate */
17547c478bd9Sstevel@tonic-gate
17557c478bd9Sstevel@tonic-gate #define MAX_PCGS 100
17567c478bd9Sstevel@tonic-gate
17577c478bd9Sstevel@tonic-gate #ifdef DEBUG
17587c478bd9Sstevel@tonic-gate #define PCGS_TRIES 100
17597c478bd9Sstevel@tonic-gate #else /* DEBUG */
17607c478bd9Sstevel@tonic-gate #define PCGS_TRIES 10
17617c478bd9Sstevel@tonic-gate #endif /* DEBUG */
17627c478bd9Sstevel@tonic-gate
17637c478bd9Sstevel@tonic-gate #ifdef VM_STATS
17647c478bd9Sstevel@tonic-gate uint_t pcgs_counts[PCGS_TRIES];
17657c478bd9Sstevel@tonic-gate uint_t pcgs_too_many;
17667c478bd9Sstevel@tonic-gate uint_t pcgs_entered;
17677c478bd9Sstevel@tonic-gate uint_t pcgs_entered_noreloc;
17687c478bd9Sstevel@tonic-gate uint_t pcgs_locked;
17697c478bd9Sstevel@tonic-gate uint_t pcgs_cagelocked;
17707c478bd9Sstevel@tonic-gate #endif /* VM_STATS */
17717c478bd9Sstevel@tonic-gate
17727c478bd9Sstevel@tonic-gate static page_t *
page_create_get_something(vnode_t * vp,u_offset_t off,struct seg * seg,caddr_t vaddr,uint_t flags)17737c478bd9Sstevel@tonic-gate page_create_get_something(vnode_t *vp, u_offset_t off, struct seg *seg,
17747c478bd9Sstevel@tonic-gate caddr_t vaddr, uint_t flags)
17757c478bd9Sstevel@tonic-gate {
17767c478bd9Sstevel@tonic-gate uint_t count;
17777c478bd9Sstevel@tonic-gate page_t *pp;
17787c478bd9Sstevel@tonic-gate uint_t locked, i;
17797c478bd9Sstevel@tonic-gate struct pcf *p;
17807c478bd9Sstevel@tonic-gate lgrp_t *lgrp;
17817c478bd9Sstevel@tonic-gate int cagelocked = 0;
17827c478bd9Sstevel@tonic-gate
17837c478bd9Sstevel@tonic-gate VM_STAT_ADD(pcgs_entered);
17847c478bd9Sstevel@tonic-gate
17857c478bd9Sstevel@tonic-gate /*
17867c478bd9Sstevel@tonic-gate * Tap any reserve freelists: if we fail now, we'll die
17877c478bd9Sstevel@tonic-gate * since the page(s) we're looking for have already been
17887c478bd9Sstevel@tonic-gate * accounted for.
17897c478bd9Sstevel@tonic-gate */
17907c478bd9Sstevel@tonic-gate flags |= PG_PANIC;
17917c478bd9Sstevel@tonic-gate
1792d94ffb28Sjmcp if ((flags & PG_NORELOC) != 0) {
17937c478bd9Sstevel@tonic-gate VM_STAT_ADD(pcgs_entered_noreloc);
17947c478bd9Sstevel@tonic-gate /*
17957c478bd9Sstevel@tonic-gate * Requests for free pages from critical threads
17967c478bd9Sstevel@tonic-gate * such as pageout still won't throttle here, but
17977c478bd9Sstevel@tonic-gate * we must try again, to give the cageout thread
17987c478bd9Sstevel@tonic-gate * another chance to catch up. Since we already
17997c478bd9Sstevel@tonic-gate * accounted for the pages, we had better get them
18007c478bd9Sstevel@tonic-gate * this time.
18017c478bd9Sstevel@tonic-gate *
18027c478bd9Sstevel@tonic-gate * N.B. All non-critical threads acquire the pcgs_cagelock
18037c478bd9Sstevel@tonic-gate * to serialize access to the freelists. This implements a
18047c478bd9Sstevel@tonic-gate * turnstile-type synchornization to avoid starvation of
18057c478bd9Sstevel@tonic-gate * critical requests for PG_NORELOC memory by non-critical
18067c478bd9Sstevel@tonic-gate * threads: all non-critical threads must acquire a 'ticket'
18077c478bd9Sstevel@tonic-gate * before passing through, which entails making sure
18087c478bd9Sstevel@tonic-gate * kcage_freemem won't fall below minfree prior to grabbing
18097c478bd9Sstevel@tonic-gate * pages from the freelists.
18107c478bd9Sstevel@tonic-gate */
1811d94ffb28Sjmcp if (kcage_create_throttle(1, flags) == KCT_NONCRIT) {
18127c478bd9Sstevel@tonic-gate mutex_enter(&pcgs_cagelock);
18137c478bd9Sstevel@tonic-gate cagelocked = 1;
18147c478bd9Sstevel@tonic-gate VM_STAT_ADD(pcgs_cagelocked);
18157c478bd9Sstevel@tonic-gate }
18167c478bd9Sstevel@tonic-gate }
18177c478bd9Sstevel@tonic-gate
18187c478bd9Sstevel@tonic-gate /*
18197c478bd9Sstevel@tonic-gate * Time to get serious.
18207c478bd9Sstevel@tonic-gate * We failed to get a `correctly colored' page from both the
18217c478bd9Sstevel@tonic-gate * free and cache lists.
18227c478bd9Sstevel@tonic-gate * We escalate in stage.
18237c478bd9Sstevel@tonic-gate *
18247c478bd9Sstevel@tonic-gate * First try both lists without worring about color.
18257c478bd9Sstevel@tonic-gate *
18267c478bd9Sstevel@tonic-gate * Then, grab all page accounting locks (ie. pcf[]) and
18277c478bd9Sstevel@tonic-gate * steal any pages that they have and set the pcf_block flag to
18287c478bd9Sstevel@tonic-gate * stop deletions from the lists. This will help because
18297c478bd9Sstevel@tonic-gate * a page can get added to the free list while we are looking
18307c478bd9Sstevel@tonic-gate * at the cache list, then another page could be added to the cache
18317c478bd9Sstevel@tonic-gate * list allowing the page on the free list to be removed as we
18327c478bd9Sstevel@tonic-gate * move from looking at the cache list to the free list. This
18337c478bd9Sstevel@tonic-gate * could happen over and over. We would never find the page
18347c478bd9Sstevel@tonic-gate * we have accounted for.
18357c478bd9Sstevel@tonic-gate *
18367c478bd9Sstevel@tonic-gate * Noreloc pages are a subset of the global (relocatable) page pool.
18377c478bd9Sstevel@tonic-gate * They are not tracked separately in the pcf bins, so it is
18387c478bd9Sstevel@tonic-gate * impossible to know when doing pcf accounting if the available
18397c478bd9Sstevel@tonic-gate * page(s) are noreloc pages or not. When looking for a noreloc page
18407c478bd9Sstevel@tonic-gate * it is quite easy to end up here even if the global (relocatable)
18417c478bd9Sstevel@tonic-gate * page pool has plenty of free pages but the noreloc pool is empty.
18427c478bd9Sstevel@tonic-gate *
18437c478bd9Sstevel@tonic-gate * When the noreloc pool is empty (or low), additional noreloc pages
18447c478bd9Sstevel@tonic-gate * are created by converting pages from the global page pool. This
18457c478bd9Sstevel@tonic-gate * process will stall during pcf accounting if the pcf bins are
18467c478bd9Sstevel@tonic-gate * already locked. Such is the case when a noreloc allocation is
18477c478bd9Sstevel@tonic-gate * looping here in page_create_get_something waiting for more noreloc
18487c478bd9Sstevel@tonic-gate * pages to appear.
18497c478bd9Sstevel@tonic-gate *
18507c478bd9Sstevel@tonic-gate * Short of adding a new field to the pcf bins to accurately track
18517c478bd9Sstevel@tonic-gate * the number of free noreloc pages, we instead do not grab the
18527c478bd9Sstevel@tonic-gate * pcgs_lock, do not set the pcf blocks and do not timeout when
18537c478bd9Sstevel@tonic-gate * allocating a noreloc page. This allows noreloc allocations to
18547c478bd9Sstevel@tonic-gate * loop without blocking global page pool allocations.
18557c478bd9Sstevel@tonic-gate *
18567c478bd9Sstevel@tonic-gate * NOTE: the behaviour of page_create_get_something has not changed
18577c478bd9Sstevel@tonic-gate * for the case of global page pool allocations.
18587c478bd9Sstevel@tonic-gate */
18597c478bd9Sstevel@tonic-gate
18607c478bd9Sstevel@tonic-gate flags &= ~PG_MATCH_COLOR;
18617c478bd9Sstevel@tonic-gate locked = 0;
186286ef0a63SRichard Lowe #if defined(__x86)
1863843e1988Sjohnlev flags = page_create_update_flags_x86(flags);
18647c478bd9Sstevel@tonic-gate #endif
18657c478bd9Sstevel@tonic-gate
18667c478bd9Sstevel@tonic-gate lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE);
18677c478bd9Sstevel@tonic-gate
1868d94ffb28Sjmcp for (count = 0; kcage_on || count < MAX_PCGS; count++) {
1869d94ffb28Sjmcp pp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE,
18707c478bd9Sstevel@tonic-gate flags, lgrp);
18717c478bd9Sstevel@tonic-gate if (pp == NULL) {
18727c478bd9Sstevel@tonic-gate pp = page_get_cachelist(vp, off, seg, vaddr,
18736e4dd838Smec flags, lgrp);
18747c478bd9Sstevel@tonic-gate }
18757c478bd9Sstevel@tonic-gate if (pp == NULL) {
18767c478bd9Sstevel@tonic-gate /*
18777c478bd9Sstevel@tonic-gate * Serialize. Don't fight with other pcgs().
18787c478bd9Sstevel@tonic-gate */
1879d94ffb28Sjmcp if (!locked && (!kcage_on || !(flags & PG_NORELOC))) {
18807c478bd9Sstevel@tonic-gate mutex_enter(&pcgs_lock);
18817c478bd9Sstevel@tonic-gate VM_STAT_ADD(pcgs_locked);
18827c478bd9Sstevel@tonic-gate locked = 1;
18837c478bd9Sstevel@tonic-gate p = pcf;
188406fb6a36Sdv for (i = 0; i < pcf_fanout; i++) {
18857c478bd9Sstevel@tonic-gate mutex_enter(&p->pcf_lock);
18867c478bd9Sstevel@tonic-gate ASSERT(p->pcf_block == 0);
18877c478bd9Sstevel@tonic-gate p->pcf_block = 1;
18887c478bd9Sstevel@tonic-gate p->pcf_reserve = p->pcf_count;
18897c478bd9Sstevel@tonic-gate p->pcf_count = 0;
18907c478bd9Sstevel@tonic-gate mutex_exit(&p->pcf_lock);
18917c478bd9Sstevel@tonic-gate p++;
18927c478bd9Sstevel@tonic-gate }
18937c478bd9Sstevel@tonic-gate freemem = 0;
18947c478bd9Sstevel@tonic-gate }
18957c478bd9Sstevel@tonic-gate
18967c478bd9Sstevel@tonic-gate if (count) {
18977c478bd9Sstevel@tonic-gate /*
18987c478bd9Sstevel@tonic-gate * Since page_free() puts pages on
18997c478bd9Sstevel@tonic-gate * a list then accounts for it, we
19007c478bd9Sstevel@tonic-gate * just have to wait for page_free()
19017c478bd9Sstevel@tonic-gate * to unlock any page it was working
19027c478bd9Sstevel@tonic-gate * with. The page_lock()-page_reclaim()
19037c478bd9Sstevel@tonic-gate * path falls in the same boat.
19047c478bd9Sstevel@tonic-gate *
19057c478bd9Sstevel@tonic-gate * We don't need to check on the
19067c478bd9Sstevel@tonic-gate * PG_WAIT flag, we have already
19077c478bd9Sstevel@tonic-gate * accounted for the page we are
19087c478bd9Sstevel@tonic-gate * looking for in page_create_va().
19097c478bd9Sstevel@tonic-gate *
19107c478bd9Sstevel@tonic-gate * We just wait a moment to let any
19117c478bd9Sstevel@tonic-gate * locked pages on the lists free up,
19127c478bd9Sstevel@tonic-gate * then continue around and try again.
19137c478bd9Sstevel@tonic-gate *
19147c478bd9Sstevel@tonic-gate * Will be awakened by set_freemem().
19157c478bd9Sstevel@tonic-gate */
19167c478bd9Sstevel@tonic-gate mutex_enter(&pcgs_wait_lock);
19177c478bd9Sstevel@tonic-gate cv_wait(&pcgs_cv, &pcgs_wait_lock);
19187c478bd9Sstevel@tonic-gate mutex_exit(&pcgs_wait_lock);
19197c478bd9Sstevel@tonic-gate }
19207c478bd9Sstevel@tonic-gate } else {
19217c478bd9Sstevel@tonic-gate #ifdef VM_STATS
19227c478bd9Sstevel@tonic-gate if (count >= PCGS_TRIES) {
19237c478bd9Sstevel@tonic-gate VM_STAT_ADD(pcgs_too_many);
19247c478bd9Sstevel@tonic-gate } else {
19257c478bd9Sstevel@tonic-gate VM_STAT_ADD(pcgs_counts[count]);
19267c478bd9Sstevel@tonic-gate }
19277c478bd9Sstevel@tonic-gate #endif
19287c478bd9Sstevel@tonic-gate if (locked) {
19297c478bd9Sstevel@tonic-gate pcgs_unblock();
19307c478bd9Sstevel@tonic-gate mutex_exit(&pcgs_lock);
19317c478bd9Sstevel@tonic-gate }
19327c478bd9Sstevel@tonic-gate if (cagelocked)
19337c478bd9Sstevel@tonic-gate mutex_exit(&pcgs_cagelock);
19347c478bd9Sstevel@tonic-gate return (pp);
19357c478bd9Sstevel@tonic-gate }
19367c478bd9Sstevel@tonic-gate }
19377c478bd9Sstevel@tonic-gate /*
19387c478bd9Sstevel@tonic-gate * we go down holding the pcf locks.
19397c478bd9Sstevel@tonic-gate */
19407c478bd9Sstevel@tonic-gate panic("no %spage found %d",
19417c478bd9Sstevel@tonic-gate ((flags & PG_NORELOC) ? "non-reloc " : ""), count);
19427c478bd9Sstevel@tonic-gate /*NOTREACHED*/
19437c478bd9Sstevel@tonic-gate }
19447c478bd9Sstevel@tonic-gate
19457c478bd9Sstevel@tonic-gate /*
19467c478bd9Sstevel@tonic-gate * Create enough pages for "bytes" worth of data starting at
19477c478bd9Sstevel@tonic-gate * "off" in "vp".
19487c478bd9Sstevel@tonic-gate *
19497c478bd9Sstevel@tonic-gate * Where flag must be one of:
19507c478bd9Sstevel@tonic-gate *
19517c478bd9Sstevel@tonic-gate * PG_EXCL: Exclusive create (fail if any page already
19527c478bd9Sstevel@tonic-gate * exists in the page cache) which does not
19537c478bd9Sstevel@tonic-gate * wait for memory to become available.
19547c478bd9Sstevel@tonic-gate *
19557c478bd9Sstevel@tonic-gate * PG_WAIT: Non-exclusive create which can wait for
19567c478bd9Sstevel@tonic-gate * memory to become available.
19577c478bd9Sstevel@tonic-gate *
19587c478bd9Sstevel@tonic-gate * PG_PHYSCONTIG: Allocate physically contiguous pages.
19597c478bd9Sstevel@tonic-gate * (Not Supported)
19607c478bd9Sstevel@tonic-gate *
19617c478bd9Sstevel@tonic-gate * A doubly linked list of pages is returned to the caller. Each page
19627c478bd9Sstevel@tonic-gate * on the list has the "exclusive" (p_selock) lock and "iolock" (p_iolock)
19637c478bd9Sstevel@tonic-gate * lock.
19647c478bd9Sstevel@tonic-gate *
19657c478bd9Sstevel@tonic-gate * Unable to change the parameters to page_create() in a minor release,
19667c478bd9Sstevel@tonic-gate * we renamed page_create() to page_create_va(), changed all known calls
19677c478bd9Sstevel@tonic-gate * from page_create() to page_create_va(), and created this wrapper.
19687c478bd9Sstevel@tonic-gate *
19697c478bd9Sstevel@tonic-gate * Upon a major release, we should break compatibility by deleting this
19707c478bd9Sstevel@tonic-gate * wrapper, and replacing all the strings "page_create_va", with "page_create".
19717c478bd9Sstevel@tonic-gate *
19727c478bd9Sstevel@tonic-gate * NOTE: There is a copy of this interface as page_create_io() in
19737c478bd9Sstevel@tonic-gate * i86/vm/vm_machdep.c. Any bugs fixed here should be applied
19747c478bd9Sstevel@tonic-gate * there.
19757c478bd9Sstevel@tonic-gate */
19767c478bd9Sstevel@tonic-gate page_t *
page_create(vnode_t * vp,u_offset_t off,size_t bytes,uint_t flags)19777c478bd9Sstevel@tonic-gate page_create(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags)
19787c478bd9Sstevel@tonic-gate {
19797c478bd9Sstevel@tonic-gate caddr_t random_vaddr;
19807c478bd9Sstevel@tonic-gate struct seg kseg;
19817c478bd9Sstevel@tonic-gate
19827c478bd9Sstevel@tonic-gate #ifdef DEBUG
19837c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "Using deprecated interface page_create: caller %p",
19847c478bd9Sstevel@tonic-gate (void *)caller());
19857c478bd9Sstevel@tonic-gate #endif
19867c478bd9Sstevel@tonic-gate
19877c478bd9Sstevel@tonic-gate random_vaddr = (caddr_t)(((uintptr_t)vp >> 7) ^
19887c478bd9Sstevel@tonic-gate (uintptr_t)(off >> PAGESHIFT));
19897c478bd9Sstevel@tonic-gate kseg.s_as = &kas;
19907c478bd9Sstevel@tonic-gate
19917c478bd9Sstevel@tonic-gate return (page_create_va(vp, off, bytes, flags, &kseg, random_vaddr));
19927c478bd9Sstevel@tonic-gate }
19937c478bd9Sstevel@tonic-gate
19947c478bd9Sstevel@tonic-gate #ifdef DEBUG
19957c478bd9Sstevel@tonic-gate uint32_t pg_alloc_pgs_mtbf = 0;
19967c478bd9Sstevel@tonic-gate #endif
19977c478bd9Sstevel@tonic-gate
19987c478bd9Sstevel@tonic-gate /*
19997c478bd9Sstevel@tonic-gate * Used for large page support. It will attempt to allocate
20007c478bd9Sstevel@tonic-gate * a large page(s) off the freelist.
20017c478bd9Sstevel@tonic-gate *
20027c478bd9Sstevel@tonic-gate * Returns non zero on failure.
20037c478bd9Sstevel@tonic-gate */
20047c478bd9Sstevel@tonic-gate int
page_alloc_pages(struct vnode * vp,struct seg * seg,caddr_t addr,page_t ** basepp,page_t * ppa[],uint_t szc,int anypgsz,int pgflags)2005e44bd21cSsusans page_alloc_pages(struct vnode *vp, struct seg *seg, caddr_t addr,
20062cb27123Saguzovsk page_t **basepp, page_t *ppa[], uint_t szc, int anypgsz, int pgflags)
20077c478bd9Sstevel@tonic-gate {
20087c478bd9Sstevel@tonic-gate pgcnt_t npgs, curnpgs, totpgs;
20097c478bd9Sstevel@tonic-gate size_t pgsz;
20107c478bd9Sstevel@tonic-gate page_t *pplist = NULL, *pp;
20117c478bd9Sstevel@tonic-gate int err = 0;
20127c478bd9Sstevel@tonic-gate lgrp_t *lgrp;
20137c478bd9Sstevel@tonic-gate
20147c478bd9Sstevel@tonic-gate ASSERT(szc != 0 && szc <= (page_num_pagesizes() - 1));
20152cb27123Saguzovsk ASSERT(pgflags == 0 || pgflags == PG_LOCAL);
20167c478bd9Sstevel@tonic-gate
20172be2af34Smec /*
20182be2af34Smec * Check if system heavily prefers local large pages over remote
20192be2af34Smec * on systems with multiple lgroups.
20202be2af34Smec */
20212be2af34Smec if (lpg_alloc_prefer == LPAP_LOCAL && nlgrps > 1) {
20222be2af34Smec pgflags = PG_LOCAL;
20232be2af34Smec }
20242be2af34Smec
20257c478bd9Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[0]);
20267c478bd9Sstevel@tonic-gate
20277c478bd9Sstevel@tonic-gate #ifdef DEBUG
20287c478bd9Sstevel@tonic-gate if (pg_alloc_pgs_mtbf && !(gethrtime() % pg_alloc_pgs_mtbf)) {
20297c478bd9Sstevel@tonic-gate return (ENOMEM);
20307c478bd9Sstevel@tonic-gate }
20317c478bd9Sstevel@tonic-gate #endif
20327c478bd9Sstevel@tonic-gate
20337c478bd9Sstevel@tonic-gate /*
20347c478bd9Sstevel@tonic-gate * One must be NULL but not both.
20357c478bd9Sstevel@tonic-gate * And one must be non NULL but not both.
20367c478bd9Sstevel@tonic-gate */
20377c478bd9Sstevel@tonic-gate ASSERT(basepp != NULL || ppa != NULL);
20387c478bd9Sstevel@tonic-gate ASSERT(basepp == NULL || ppa == NULL);
20397c478bd9Sstevel@tonic-gate
204086ef0a63SRichard Lowe #if defined(__x86)
204178b03d3aSkchow while (page_chk_freelist(szc) == 0) {
204278b03d3aSkchow VM_STAT_ADD(alloc_pages[8]);
204378b03d3aSkchow if (anypgsz == 0 || --szc == 0)
204478b03d3aSkchow return (ENOMEM);
204578b03d3aSkchow }
204678b03d3aSkchow #endif
204778b03d3aSkchow
204878b03d3aSkchow pgsz = page_get_pagesize(szc);
204978b03d3aSkchow totpgs = curnpgs = npgs = pgsz >> PAGESHIFT;
205078b03d3aSkchow
205178b03d3aSkchow ASSERT(((uintptr_t)addr & (pgsz - 1)) == 0);
205278b03d3aSkchow
20537c478bd9Sstevel@tonic-gate (void) page_create_wait(npgs, PG_WAIT);
20547c478bd9Sstevel@tonic-gate
20557c478bd9Sstevel@tonic-gate while (npgs && szc) {
20567c478bd9Sstevel@tonic-gate lgrp = lgrp_mem_choose(seg, addr, pgsz);
20572cb27123Saguzovsk if (pgflags == PG_LOCAL) {
2058d94ffb28Sjmcp pp = page_get_freelist(vp, 0, seg, addr, pgsz,
20592cb27123Saguzovsk pgflags, lgrp);
20602cb27123Saguzovsk if (pp == NULL) {
2061d94ffb28Sjmcp pp = page_get_freelist(vp, 0, seg, addr, pgsz,
20622cb27123Saguzovsk 0, lgrp);
20632cb27123Saguzovsk }
20642cb27123Saguzovsk } else {
2065d94ffb28Sjmcp pp = page_get_freelist(vp, 0, seg, addr, pgsz,
20662cb27123Saguzovsk 0, lgrp);
20672cb27123Saguzovsk }
20687c478bd9Sstevel@tonic-gate if (pp != NULL) {
20697c478bd9Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[1]);
20707c478bd9Sstevel@tonic-gate page_list_concat(&pplist, &pp);
20717c478bd9Sstevel@tonic-gate ASSERT(npgs >= curnpgs);
20727c478bd9Sstevel@tonic-gate npgs -= curnpgs;
20737c478bd9Sstevel@tonic-gate } else if (anypgsz) {
20747c478bd9Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[2]);
20757c478bd9Sstevel@tonic-gate szc--;
20767c478bd9Sstevel@tonic-gate pgsz = page_get_pagesize(szc);
20777c478bd9Sstevel@tonic-gate curnpgs = pgsz >> PAGESHIFT;
20787c478bd9Sstevel@tonic-gate } else {
20797c478bd9Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[3]);
20807c478bd9Sstevel@tonic-gate ASSERT(npgs == totpgs);
20817c478bd9Sstevel@tonic-gate page_create_putback(npgs);
20827c478bd9Sstevel@tonic-gate return (ENOMEM);
20837c478bd9Sstevel@tonic-gate }
20847c478bd9Sstevel@tonic-gate }
20857c478bd9Sstevel@tonic-gate if (szc == 0) {
20867c478bd9Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[4]);
20877c478bd9Sstevel@tonic-gate ASSERT(npgs != 0);
20887c478bd9Sstevel@tonic-gate page_create_putback(npgs);
20897c478bd9Sstevel@tonic-gate err = ENOMEM;
20907c478bd9Sstevel@tonic-gate } else if (basepp != NULL) {
20917c478bd9Sstevel@tonic-gate ASSERT(npgs == 0);
20927c478bd9Sstevel@tonic-gate ASSERT(ppa == NULL);
20937c478bd9Sstevel@tonic-gate *basepp = pplist;
20947c478bd9Sstevel@tonic-gate }
20957c478bd9Sstevel@tonic-gate
20967c478bd9Sstevel@tonic-gate npgs = totpgs - npgs;
20977c478bd9Sstevel@tonic-gate pp = pplist;
20987c478bd9Sstevel@tonic-gate
20997c478bd9Sstevel@tonic-gate /*
21007c478bd9Sstevel@tonic-gate * Clear the free and age bits. Also if we were passed in a ppa then
21017c478bd9Sstevel@tonic-gate * fill it in with all the constituent pages from the large page. But
21027c478bd9Sstevel@tonic-gate * if we failed to allocate all the pages just free what we got.
21037c478bd9Sstevel@tonic-gate */
21047c478bd9Sstevel@tonic-gate while (npgs != 0) {
21057c478bd9Sstevel@tonic-gate ASSERT(PP_ISFREE(pp));
21067c478bd9Sstevel@tonic-gate ASSERT(PP_ISAGED(pp));
21077c478bd9Sstevel@tonic-gate if (ppa != NULL || err != 0) {
21087c478bd9Sstevel@tonic-gate if (err == 0) {
21097c478bd9Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[5]);
21107c478bd9Sstevel@tonic-gate PP_CLRFREE(pp);
21117c478bd9Sstevel@tonic-gate PP_CLRAGED(pp);
21127c478bd9Sstevel@tonic-gate page_sub(&pplist, pp);
21137c478bd9Sstevel@tonic-gate *ppa++ = pp;
21147c478bd9Sstevel@tonic-gate npgs--;
21157c478bd9Sstevel@tonic-gate } else {
21167c478bd9Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[6]);
21177c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc != 0);
21187c478bd9Sstevel@tonic-gate curnpgs = page_get_pagecnt(pp->p_szc);
21197c478bd9Sstevel@tonic-gate page_list_break(&pp, &pplist, curnpgs);
21207c478bd9Sstevel@tonic-gate page_list_add_pages(pp, 0);
21217c478bd9Sstevel@tonic-gate page_create_putback(curnpgs);
21227c478bd9Sstevel@tonic-gate ASSERT(npgs >= curnpgs);
21237c478bd9Sstevel@tonic-gate npgs -= curnpgs;
21247c478bd9Sstevel@tonic-gate }
21257c478bd9Sstevel@tonic-gate pp = pplist;
21267c478bd9Sstevel@tonic-gate } else {
21277c478bd9Sstevel@tonic-gate VM_STAT_ADD(alloc_pages[7]);
21287c478bd9Sstevel@tonic-gate PP_CLRFREE(pp);
21297c478bd9Sstevel@tonic-gate PP_CLRAGED(pp);
21307c478bd9Sstevel@tonic-gate pp = pp->p_next;
21317c478bd9Sstevel@tonic-gate npgs--;
21327c478bd9Sstevel@tonic-gate }
21337c478bd9Sstevel@tonic-gate }
21347c478bd9Sstevel@tonic-gate return (err);
21357c478bd9Sstevel@tonic-gate }
21367c478bd9Sstevel@tonic-gate
21377c478bd9Sstevel@tonic-gate /*
21387c478bd9Sstevel@tonic-gate * Get a single large page off of the freelists, and set it up for use.
21397c478bd9Sstevel@tonic-gate * Number of bytes requested must be a supported page size.
21407c478bd9Sstevel@tonic-gate *
21417c478bd9Sstevel@tonic-gate * Note that this call may fail even if there is sufficient
21427c478bd9Sstevel@tonic-gate * memory available or PG_WAIT is set, so the caller must
21437c478bd9Sstevel@tonic-gate * be willing to fallback on page_create_va(), block and retry,
21447c478bd9Sstevel@tonic-gate * or fail the requester.
21457c478bd9Sstevel@tonic-gate */
21467c478bd9Sstevel@tonic-gate page_t *
page_create_va_large(vnode_t * vp,u_offset_t off,size_t bytes,uint_t flags,struct seg * seg,caddr_t vaddr,void * arg)21477c478bd9Sstevel@tonic-gate page_create_va_large(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags,
21487c478bd9Sstevel@tonic-gate struct seg *seg, caddr_t vaddr, void *arg)
21497c478bd9Sstevel@tonic-gate {
215006fb6a36Sdv pgcnt_t npages;
21517c478bd9Sstevel@tonic-gate page_t *pp;
21527c478bd9Sstevel@tonic-gate page_t *rootpp;
21537c478bd9Sstevel@tonic-gate lgrp_t *lgrp;
21547c478bd9Sstevel@tonic-gate lgrp_id_t *lgrpid = (lgrp_id_t *)arg;
21557c478bd9Sstevel@tonic-gate
21567c478bd9Sstevel@tonic-gate ASSERT(vp != NULL);
21577c478bd9Sstevel@tonic-gate
21587c478bd9Sstevel@tonic-gate ASSERT((flags & ~(PG_EXCL | PG_WAIT |
215923a80de1SStan Studzinski PG_NORELOC | PG_PANIC | PG_PUSHPAGE | PG_NORMALPRI)) == 0);
21607c478bd9Sstevel@tonic-gate /* but no others */
21617c478bd9Sstevel@tonic-gate
21627c478bd9Sstevel@tonic-gate ASSERT((flags & PG_EXCL) == PG_EXCL);
21637c478bd9Sstevel@tonic-gate
21647c478bd9Sstevel@tonic-gate npages = btop(bytes);
21657c478bd9Sstevel@tonic-gate
21667c478bd9Sstevel@tonic-gate if (!kcage_on || panicstr) {
21677c478bd9Sstevel@tonic-gate /*
2168d94ffb28Sjmcp * Cage is OFF, or we are single threaded in
2169d94ffb28Sjmcp * panic, so make everything a RELOC request.
21707c478bd9Sstevel@tonic-gate */
21717c478bd9Sstevel@tonic-gate flags &= ~PG_NORELOC;
21727c478bd9Sstevel@tonic-gate }
21737c478bd9Sstevel@tonic-gate
21747c478bd9Sstevel@tonic-gate /*
21757c478bd9Sstevel@tonic-gate * Make sure there's adequate physical memory available.
21767c478bd9Sstevel@tonic-gate * Note: PG_WAIT is ignored here.
21777c478bd9Sstevel@tonic-gate */
21787c478bd9Sstevel@tonic-gate if (freemem <= throttlefree + npages) {
21797c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_large_cnt[1]);
21807c478bd9Sstevel@tonic-gate return (NULL);
21817c478bd9Sstevel@tonic-gate }
21827c478bd9Sstevel@tonic-gate
21837c478bd9Sstevel@tonic-gate /*
2184d94ffb28Sjmcp * If cage is on, dampen draw from cage when available
2185d94ffb28Sjmcp * cage space is low.
21867c478bd9Sstevel@tonic-gate */
2187d94ffb28Sjmcp if ((flags & (PG_NORELOC | PG_WAIT)) == (PG_NORELOC | PG_WAIT) &&
2188d94ffb28Sjmcp kcage_freemem < kcage_throttlefree + npages) {
2189d94ffb28Sjmcp
2190d94ffb28Sjmcp /*
2191d94ffb28Sjmcp * The cage is on, the caller wants PG_NORELOC
2192d94ffb28Sjmcp * pages and available cage memory is very low.
2193d94ffb28Sjmcp * Call kcage_create_throttle() to attempt to
2194d94ffb28Sjmcp * control demand on the cage.
2195d94ffb28Sjmcp */
2196d94ffb28Sjmcp if (kcage_create_throttle(npages, flags) == KCT_FAILURE) {
2197d94ffb28Sjmcp VM_STAT_ADD(page_create_large_cnt[2]);
2198d94ffb28Sjmcp return (NULL);
2199d94ffb28Sjmcp }
22007c478bd9Sstevel@tonic-gate }
22017c478bd9Sstevel@tonic-gate
220206fb6a36Sdv if (!pcf_decrement_bucket(npages) &&
220306fb6a36Sdv !pcf_decrement_multiple(NULL, npages, 1)) {
220406fb6a36Sdv VM_STAT_ADD(page_create_large_cnt[4]);
220506fb6a36Sdv return (NULL);
22067c478bd9Sstevel@tonic-gate }
22077c478bd9Sstevel@tonic-gate
22087c478bd9Sstevel@tonic-gate /*
22097c478bd9Sstevel@tonic-gate * This is where this function behaves fundamentally differently
22107c478bd9Sstevel@tonic-gate * than page_create_va(); since we're intending to map the page
22117c478bd9Sstevel@tonic-gate * with a single TTE, we have to get it as a physically contiguous
22127c478bd9Sstevel@tonic-gate * hardware pagesize chunk. If we can't, we fail.
22137c478bd9Sstevel@tonic-gate */
22147c478bd9Sstevel@tonic-gate if (lgrpid != NULL && *lgrpid >= 0 && *lgrpid <= lgrp_alloc_max &&
22156e4dd838Smec LGRP_EXISTS(lgrp_table[*lgrpid]))
22167c478bd9Sstevel@tonic-gate lgrp = lgrp_table[*lgrpid];
22177c478bd9Sstevel@tonic-gate else
22187c478bd9Sstevel@tonic-gate lgrp = lgrp_mem_choose(seg, vaddr, bytes);
22197c478bd9Sstevel@tonic-gate
2220d94ffb28Sjmcp if ((rootpp = page_get_freelist(&kvp, off, seg, vaddr,
2221d94ffb28Sjmcp bytes, flags & ~PG_MATCH_COLOR, lgrp)) == NULL) {
22227c478bd9Sstevel@tonic-gate page_create_putback(npages);
22237c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_large_cnt[5]);
22247c478bd9Sstevel@tonic-gate return (NULL);
22257c478bd9Sstevel@tonic-gate }
22267c478bd9Sstevel@tonic-gate
22277c478bd9Sstevel@tonic-gate /*
22287c478bd9Sstevel@tonic-gate * if we got the page with the wrong mtype give it back this is a
22297c478bd9Sstevel@tonic-gate * workaround for CR 6249718. When CR 6249718 is fixed we never get
22307c478bd9Sstevel@tonic-gate * inside "if" and the workaround becomes just a nop
22317c478bd9Sstevel@tonic-gate */
22327c478bd9Sstevel@tonic-gate if (kcage_on && (flags & PG_NORELOC) && !PP_ISNORELOC(rootpp)) {
22337c478bd9Sstevel@tonic-gate page_list_add_pages(rootpp, 0);
22347c478bd9Sstevel@tonic-gate page_create_putback(npages);
22357c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_large_cnt[6]);
22367c478bd9Sstevel@tonic-gate return (NULL);
22377c478bd9Sstevel@tonic-gate }
22387c478bd9Sstevel@tonic-gate
22397c478bd9Sstevel@tonic-gate /*
22407c478bd9Sstevel@tonic-gate * If satisfying this request has left us with too little
22417c478bd9Sstevel@tonic-gate * memory, start the wheels turning to get some back. The
22427c478bd9Sstevel@tonic-gate * first clause of the test prevents waking up the pageout
22437c478bd9Sstevel@tonic-gate * daemon in situations where it would decide that there's
22447c478bd9Sstevel@tonic-gate * nothing to do.
22457c478bd9Sstevel@tonic-gate */
22467c478bd9Sstevel@tonic-gate if (nscan < desscan && freemem < minfree) {
22477c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL,
22487c478bd9Sstevel@tonic-gate "pageout_cv_signal:freemem %ld", freemem);
2249*338664dfSAndy Fiddaman WAKE_PAGEOUT_SCANNER(va__large);
22507c478bd9Sstevel@tonic-gate }
22517c478bd9Sstevel@tonic-gate
22527c478bd9Sstevel@tonic-gate pp = rootpp;
22537c478bd9Sstevel@tonic-gate while (npages--) {
22547c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp));
22557c478bd9Sstevel@tonic-gate ASSERT(pp->p_vnode == NULL);
22567c478bd9Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(pp));
22577c478bd9Sstevel@tonic-gate PP_CLRFREE(pp);
22587c478bd9Sstevel@tonic-gate PP_CLRAGED(pp);
22597c478bd9Sstevel@tonic-gate if (!page_hashin(pp, vp, off, NULL))
22607c478bd9Sstevel@tonic-gate panic("page_create_large: hashin failed: page %p",
22617c478bd9Sstevel@tonic-gate (void *)pp);
22627c478bd9Sstevel@tonic-gate page_io_lock(pp);
22637c478bd9Sstevel@tonic-gate off += PAGESIZE;
22647c478bd9Sstevel@tonic-gate pp = pp->p_next;
22657c478bd9Sstevel@tonic-gate }
22667c478bd9Sstevel@tonic-gate
22677c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_large_cnt[0]);
22687c478bd9Sstevel@tonic-gate return (rootpp);
22697c478bd9Sstevel@tonic-gate }
22707c478bd9Sstevel@tonic-gate
22717c478bd9Sstevel@tonic-gate page_t *
page_create_va(vnode_t * vp,u_offset_t off,size_t bytes,uint_t flags,struct seg * seg,caddr_t vaddr)22727c478bd9Sstevel@tonic-gate page_create_va(vnode_t *vp, u_offset_t off, size_t bytes, uint_t flags,
22737c478bd9Sstevel@tonic-gate struct seg *seg, caddr_t vaddr)
22747c478bd9Sstevel@tonic-gate {
22757c478bd9Sstevel@tonic-gate page_t *plist = NULL;
22767c478bd9Sstevel@tonic-gate pgcnt_t npages;
22777c478bd9Sstevel@tonic-gate pgcnt_t found_on_free = 0;
22787c478bd9Sstevel@tonic-gate pgcnt_t pages_req;
22797c478bd9Sstevel@tonic-gate page_t *npp = NULL;
22807c478bd9Sstevel@tonic-gate struct pcf *p;
22817c478bd9Sstevel@tonic-gate lgrp_t *lgrp;
22827c478bd9Sstevel@tonic-gate
22837c478bd9Sstevel@tonic-gate TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_START,
22846e4dd838Smec "page_create_start:vp %p off %llx bytes %lu flags %x",
22856e4dd838Smec vp, off, bytes, flags);
22867c478bd9Sstevel@tonic-gate
22877c478bd9Sstevel@tonic-gate ASSERT(bytes != 0 && vp != NULL);
22887c478bd9Sstevel@tonic-gate
22897c478bd9Sstevel@tonic-gate if ((flags & PG_EXCL) == 0 && (flags & PG_WAIT) == 0) {
22907c478bd9Sstevel@tonic-gate panic("page_create: invalid flags");
22917c478bd9Sstevel@tonic-gate /*NOTREACHED*/
22927c478bd9Sstevel@tonic-gate }
22937c478bd9Sstevel@tonic-gate ASSERT((flags & ~(PG_EXCL | PG_WAIT |
229423a80de1SStan Studzinski PG_NORELOC | PG_PANIC | PG_PUSHPAGE | PG_NORMALPRI)) == 0);
22957c478bd9Sstevel@tonic-gate /* but no others */
22967c478bd9Sstevel@tonic-gate
22977c478bd9Sstevel@tonic-gate pages_req = npages = btopr(bytes);
22987c478bd9Sstevel@tonic-gate /*
22997c478bd9Sstevel@tonic-gate * Try to see whether request is too large to *ever* be
23007c478bd9Sstevel@tonic-gate * satisfied, in order to prevent deadlock. We arbitrarily
23017c478bd9Sstevel@tonic-gate * decide to limit maximum size requests to max_page_get.
23027c478bd9Sstevel@tonic-gate */
23037c478bd9Sstevel@tonic-gate if (npages >= max_page_get) {
23047c478bd9Sstevel@tonic-gate if ((flags & PG_WAIT) == 0) {
23057c478bd9Sstevel@tonic-gate TRACE_4(TR_FAC_VM, TR_PAGE_CREATE_TOOBIG,
23067c478bd9Sstevel@tonic-gate "page_create_toobig:vp %p off %llx npages "
23077c478bd9Sstevel@tonic-gate "%lu max_page_get %lu",
23087c478bd9Sstevel@tonic-gate vp, off, npages, max_page_get);
23097c478bd9Sstevel@tonic-gate return (NULL);
23107c478bd9Sstevel@tonic-gate } else {
23117c478bd9Sstevel@tonic-gate cmn_err(CE_WARN,
23127c478bd9Sstevel@tonic-gate "Request for too much kernel memory "
23137c478bd9Sstevel@tonic-gate "(%lu bytes), will hang forever", bytes);
23147c478bd9Sstevel@tonic-gate for (;;)
23157c478bd9Sstevel@tonic-gate delay(1000000000);
23167c478bd9Sstevel@tonic-gate }
23177c478bd9Sstevel@tonic-gate }
23187c478bd9Sstevel@tonic-gate
23197c478bd9Sstevel@tonic-gate if (!kcage_on || panicstr) {
23207c478bd9Sstevel@tonic-gate /*
2321d94ffb28Sjmcp * Cage is OFF, or we are single threaded in
2322d94ffb28Sjmcp * panic, so make everything a RELOC request.
23237c478bd9Sstevel@tonic-gate */
23247c478bd9Sstevel@tonic-gate flags &= ~PG_NORELOC;
23257c478bd9Sstevel@tonic-gate }
23267c478bd9Sstevel@tonic-gate
2327d94ffb28Sjmcp if (freemem <= throttlefree + npages)
2328d94ffb28Sjmcp if (!page_create_throttle(npages, flags))
23297c478bd9Sstevel@tonic-gate return (NULL);
23307c478bd9Sstevel@tonic-gate
23317c478bd9Sstevel@tonic-gate /*
2332d94ffb28Sjmcp * If cage is on, dampen draw from cage when available
2333d94ffb28Sjmcp * cage space is low.
23347c478bd9Sstevel@tonic-gate */
2335d94ffb28Sjmcp if ((flags & PG_NORELOC) &&
2336d94ffb28Sjmcp kcage_freemem < kcage_throttlefree + npages) {
23377c478bd9Sstevel@tonic-gate
2338d94ffb28Sjmcp /*
2339d94ffb28Sjmcp * The cage is on, the caller wants PG_NORELOC
2340d94ffb28Sjmcp * pages and available cage memory is very low.
2341d94ffb28Sjmcp * Call kcage_create_throttle() to attempt to
2342d94ffb28Sjmcp * control demand on the cage.
2343d94ffb28Sjmcp */
2344d94ffb28Sjmcp if (kcage_create_throttle(npages, flags) == KCT_FAILURE)
2345d94ffb28Sjmcp return (NULL);
23467c478bd9Sstevel@tonic-gate }
23477c478bd9Sstevel@tonic-gate
23487c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_cnt[0]);
23497c478bd9Sstevel@tonic-gate
235006fb6a36Sdv if (!pcf_decrement_bucket(npages)) {
23517c478bd9Sstevel@tonic-gate /*
23527c478bd9Sstevel@tonic-gate * Have to look harder. If npages is greater than
2353da6c28aaSamw * one, then we might have to coalesce the counters.
23547c478bd9Sstevel@tonic-gate *
23557c478bd9Sstevel@tonic-gate * Go wait. We come back having accounted
23567c478bd9Sstevel@tonic-gate * for the memory.
23577c478bd9Sstevel@tonic-gate */
23587c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_cnt[1]);
23597c478bd9Sstevel@tonic-gate if (!page_create_wait(npages, flags)) {
23607c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_cnt[2]);
23617c478bd9Sstevel@tonic-gate return (NULL);
23627c478bd9Sstevel@tonic-gate }
23637c478bd9Sstevel@tonic-gate }
23647c478bd9Sstevel@tonic-gate
23657c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_CREATE_SUCCESS,
23666e4dd838Smec "page_create_success:vp %p off %llx", vp, off);
23677c478bd9Sstevel@tonic-gate
23687c478bd9Sstevel@tonic-gate /*
23697c478bd9Sstevel@tonic-gate * If satisfying this request has left us with too little
23707c478bd9Sstevel@tonic-gate * memory, start the wheels turning to get some back. The
23717c478bd9Sstevel@tonic-gate * first clause of the test prevents waking up the pageout
23727c478bd9Sstevel@tonic-gate * daemon in situations where it would decide that there's
23737c478bd9Sstevel@tonic-gate * nothing to do.
23747c478bd9Sstevel@tonic-gate */
23757c478bd9Sstevel@tonic-gate if (nscan < desscan && freemem < minfree) {
23767c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGEOUT_CV_SIGNAL,
23776e4dd838Smec "pageout_cv_signal:freemem %ld", freemem);
2378*338664dfSAndy Fiddaman WAKE_PAGEOUT_SCANNER(va);
23797c478bd9Sstevel@tonic-gate }
23807c478bd9Sstevel@tonic-gate
23817c478bd9Sstevel@tonic-gate /*
23827c478bd9Sstevel@tonic-gate * Loop around collecting the requested number of pages.
23837c478bd9Sstevel@tonic-gate * Most of the time, we have to `create' a new page. With
23847c478bd9Sstevel@tonic-gate * this in mind, pull the page off the free list before
23857c478bd9Sstevel@tonic-gate * getting the hash lock. This will minimize the hash
23867c478bd9Sstevel@tonic-gate * lock hold time, nesting, and the like. If it turns
23877c478bd9Sstevel@tonic-gate * out we don't need the page, we put it back at the end.
23887c478bd9Sstevel@tonic-gate */
23897c478bd9Sstevel@tonic-gate while (npages--) {
23907c478bd9Sstevel@tonic-gate page_t *pp;
23917c478bd9Sstevel@tonic-gate kmutex_t *phm = NULL;
23927c478bd9Sstevel@tonic-gate ulong_t index;
23937c478bd9Sstevel@tonic-gate
23947c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off);
23957c478bd9Sstevel@tonic-gate top:
23967c478bd9Sstevel@tonic-gate ASSERT(phm == NULL);
23977c478bd9Sstevel@tonic-gate ASSERT(index == PAGE_HASH_FUNC(vp, off));
23987c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
23997c478bd9Sstevel@tonic-gate
24007c478bd9Sstevel@tonic-gate if (npp == NULL) {
24017c478bd9Sstevel@tonic-gate /*
24027c478bd9Sstevel@tonic-gate * Try to get a page from the freelist (ie,
24037c478bd9Sstevel@tonic-gate * a page with no [vp, off] tag). If that
24047c478bd9Sstevel@tonic-gate * fails, use the cachelist.
24057c478bd9Sstevel@tonic-gate *
24067c478bd9Sstevel@tonic-gate * During the first attempt at both the free
24077c478bd9Sstevel@tonic-gate * and cache lists we try for the correct color.
24087c478bd9Sstevel@tonic-gate */
24097c478bd9Sstevel@tonic-gate /*
24107c478bd9Sstevel@tonic-gate * XXXX-how do we deal with virtual indexed
24117c478bd9Sstevel@tonic-gate * caches and and colors?
24127c478bd9Sstevel@tonic-gate */
24137c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_cnt[4]);
24147c478bd9Sstevel@tonic-gate /*
24157c478bd9Sstevel@tonic-gate * Get lgroup to allocate next page of shared memory
24167c478bd9Sstevel@tonic-gate * from and use it to specify where to allocate
24177c478bd9Sstevel@tonic-gate * the physical memory
24187c478bd9Sstevel@tonic-gate */
24197c478bd9Sstevel@tonic-gate lgrp = lgrp_mem_choose(seg, vaddr, PAGESIZE);
2420d94ffb28Sjmcp npp = page_get_freelist(vp, off, seg, vaddr, PAGESIZE,
24217c478bd9Sstevel@tonic-gate flags | PG_MATCH_COLOR, lgrp);
24227c478bd9Sstevel@tonic-gate if (npp == NULL) {
24237c478bd9Sstevel@tonic-gate npp = page_get_cachelist(vp, off, seg,
24247c478bd9Sstevel@tonic-gate vaddr, flags | PG_MATCH_COLOR, lgrp);
24257c478bd9Sstevel@tonic-gate if (npp == NULL) {
24267c478bd9Sstevel@tonic-gate npp = page_create_get_something(vp,
24277c478bd9Sstevel@tonic-gate off, seg, vaddr,
24287c478bd9Sstevel@tonic-gate flags & ~PG_MATCH_COLOR);
24297c478bd9Sstevel@tonic-gate }
24307c478bd9Sstevel@tonic-gate
24317c478bd9Sstevel@tonic-gate if (PP_ISAGED(npp) == 0) {
24327c478bd9Sstevel@tonic-gate /*
24337c478bd9Sstevel@tonic-gate * Since this page came from the
24347c478bd9Sstevel@tonic-gate * cachelist, we must destroy the
24357c478bd9Sstevel@tonic-gate * old vnode association.
24367c478bd9Sstevel@tonic-gate */
24377c478bd9Sstevel@tonic-gate page_hashout(npp, NULL);
24387c478bd9Sstevel@tonic-gate }
24397c478bd9Sstevel@tonic-gate }
24407c478bd9Sstevel@tonic-gate }
24417c478bd9Sstevel@tonic-gate
24427c478bd9Sstevel@tonic-gate /*
24437c478bd9Sstevel@tonic-gate * We own this page!
24447c478bd9Sstevel@tonic-gate */
24457c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(npp));
24467c478bd9Sstevel@tonic-gate ASSERT(npp->p_vnode == NULL);
24477c478bd9Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(npp));
24487c478bd9Sstevel@tonic-gate PP_CLRFREE(npp);
24497c478bd9Sstevel@tonic-gate PP_CLRAGED(npp);
24507c478bd9Sstevel@tonic-gate
24517c478bd9Sstevel@tonic-gate /*
24527c478bd9Sstevel@tonic-gate * Here we have a page in our hot little mits and are
24537c478bd9Sstevel@tonic-gate * just waiting to stuff it on the appropriate lists.
24547c478bd9Sstevel@tonic-gate * Get the mutex and check to see if it really does
24557c478bd9Sstevel@tonic-gate * not exist.
24567c478bd9Sstevel@tonic-gate */
24577c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index);
24587c478bd9Sstevel@tonic-gate mutex_enter(phm);
2459e7c874afSJosef 'Jeff' Sipek pp = page_hash_search(index, vp, off);
24607c478bd9Sstevel@tonic-gate if (pp == NULL) {
24617c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_new);
24627c478bd9Sstevel@tonic-gate pp = npp;
24637c478bd9Sstevel@tonic-gate npp = NULL;
24647c478bd9Sstevel@tonic-gate if (!page_hashin(pp, vp, off, phm)) {
24657c478bd9Sstevel@tonic-gate /*
24667c478bd9Sstevel@tonic-gate * Since we hold the page hash mutex and
24677c478bd9Sstevel@tonic-gate * just searched for this page, page_hashin
24687c478bd9Sstevel@tonic-gate * had better not fail. If it does, that
24697c478bd9Sstevel@tonic-gate * means somethread did not follow the
24707c478bd9Sstevel@tonic-gate * page hash mutex rules. Panic now and
24717c478bd9Sstevel@tonic-gate * get it over with. As usual, go down
24727c478bd9Sstevel@tonic-gate * holding all the locks.
24737c478bd9Sstevel@tonic-gate */
24747c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm));
24757c478bd9Sstevel@tonic-gate panic("page_create: "
24767c478bd9Sstevel@tonic-gate "hashin failed %p %p %llx %p",
24777c478bd9Sstevel@tonic-gate (void *)pp, (void *)vp, off, (void *)phm);
24787c478bd9Sstevel@tonic-gate /*NOTREACHED*/
24797c478bd9Sstevel@tonic-gate }
24807c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm));
24817c478bd9Sstevel@tonic-gate mutex_exit(phm);
24827c478bd9Sstevel@tonic-gate phm = NULL;
24837c478bd9Sstevel@tonic-gate
24847c478bd9Sstevel@tonic-gate /*
24857c478bd9Sstevel@tonic-gate * Hat layer locking need not be done to set
24867c478bd9Sstevel@tonic-gate * the following bits since the page is not hashed
24877c478bd9Sstevel@tonic-gate * and was on the free list (i.e., had no mappings).
24887c478bd9Sstevel@tonic-gate *
24897c478bd9Sstevel@tonic-gate * Set the reference bit to protect
24907c478bd9Sstevel@tonic-gate * against immediate pageout
24917c478bd9Sstevel@tonic-gate *
24927c478bd9Sstevel@tonic-gate * XXXmh modify freelist code to set reference
24937c478bd9Sstevel@tonic-gate * bit so we don't have to do it here.
24947c478bd9Sstevel@tonic-gate */
24957c478bd9Sstevel@tonic-gate page_set_props(pp, P_REF);
24967c478bd9Sstevel@tonic-gate found_on_free++;
24977c478bd9Sstevel@tonic-gate } else {
24987c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_exists);
24997c478bd9Sstevel@tonic-gate if (flags & PG_EXCL) {
25007c478bd9Sstevel@tonic-gate /*
25017c478bd9Sstevel@tonic-gate * Found an existing page, and the caller
25027c478bd9Sstevel@tonic-gate * wanted all new pages. Undo all of the work
25037c478bd9Sstevel@tonic-gate * we have done.
25047c478bd9Sstevel@tonic-gate */
25057c478bd9Sstevel@tonic-gate mutex_exit(phm);
25067c478bd9Sstevel@tonic-gate phm = NULL;
25077c478bd9Sstevel@tonic-gate while (plist != NULL) {
25087c478bd9Sstevel@tonic-gate pp = plist;
25097c478bd9Sstevel@tonic-gate page_sub(&plist, pp);
25107c478bd9Sstevel@tonic-gate page_io_unlock(pp);
25117c478bd9Sstevel@tonic-gate /* large pages should not end up here */
25127c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc == 0);
25137c478bd9Sstevel@tonic-gate /*LINTED: constant in conditional ctx*/
25147c478bd9Sstevel@tonic-gate VN_DISPOSE(pp, B_INVAL, 0, kcred);
25157c478bd9Sstevel@tonic-gate }
25167c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_found_one);
25177c478bd9Sstevel@tonic-gate goto fail;
25187c478bd9Sstevel@tonic-gate }
25197c478bd9Sstevel@tonic-gate ASSERT(flags & PG_WAIT);
25207c478bd9Sstevel@tonic-gate if (!page_lock(pp, SE_EXCL, phm, P_NO_RECLAIM)) {
25217c478bd9Sstevel@tonic-gate /*
25227c478bd9Sstevel@tonic-gate * Start all over again if we blocked trying
25237c478bd9Sstevel@tonic-gate * to lock the page.
25247c478bd9Sstevel@tonic-gate */
25257c478bd9Sstevel@tonic-gate mutex_exit(phm);
25267c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_page_lock_failed);
25277c478bd9Sstevel@tonic-gate phm = NULL;
25287c478bd9Sstevel@tonic-gate goto top;
25297c478bd9Sstevel@tonic-gate }
25307c478bd9Sstevel@tonic-gate mutex_exit(phm);
25317c478bd9Sstevel@tonic-gate phm = NULL;
25327c478bd9Sstevel@tonic-gate
25337c478bd9Sstevel@tonic-gate if (PP_ISFREE(pp)) {
25347c478bd9Sstevel@tonic-gate ASSERT(PP_ISAGED(pp) == 0);
25357c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_get_cache);
25367c478bd9Sstevel@tonic-gate page_list_sub(pp, PG_CACHE_LIST);
25377c478bd9Sstevel@tonic-gate PP_CLRFREE(pp);
25387c478bd9Sstevel@tonic-gate found_on_free++;
25397c478bd9Sstevel@tonic-gate }
25407c478bd9Sstevel@tonic-gate }
25417c478bd9Sstevel@tonic-gate
25427c478bd9Sstevel@tonic-gate /*
25437c478bd9Sstevel@tonic-gate * Got a page! It is locked. Acquire the i/o
25447c478bd9Sstevel@tonic-gate * lock since we are going to use the p_next and
25457c478bd9Sstevel@tonic-gate * p_prev fields to link the requested pages together.
25467c478bd9Sstevel@tonic-gate */
25477c478bd9Sstevel@tonic-gate page_io_lock(pp);
25487c478bd9Sstevel@tonic-gate page_add(&plist, pp);
25497c478bd9Sstevel@tonic-gate plist = plist->p_next;
25507c478bd9Sstevel@tonic-gate off += PAGESIZE;
25517c478bd9Sstevel@tonic-gate vaddr += PAGESIZE;
25527c478bd9Sstevel@tonic-gate }
25537c478bd9Sstevel@tonic-gate
25547c478bd9Sstevel@tonic-gate ASSERT((flags & PG_EXCL) ? (found_on_free == pages_req) : 1);
25557c478bd9Sstevel@tonic-gate fail:
25567c478bd9Sstevel@tonic-gate if (npp != NULL) {
25577c478bd9Sstevel@tonic-gate /*
25587c478bd9Sstevel@tonic-gate * Did not need this page after all.
25597c478bd9Sstevel@tonic-gate * Put it back on the free list.
25607c478bd9Sstevel@tonic-gate */
25617c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_putbacks);
25627c478bd9Sstevel@tonic-gate PP_SETFREE(npp);
25637c478bd9Sstevel@tonic-gate PP_SETAGED(npp);
25647c478bd9Sstevel@tonic-gate npp->p_offset = (u_offset_t)-1;
25657c478bd9Sstevel@tonic-gate page_list_add(npp, PG_FREE_LIST | PG_LIST_TAIL);
25667c478bd9Sstevel@tonic-gate page_unlock(npp);
2567d94ffb28Sjmcp
25687c478bd9Sstevel@tonic-gate }
25697c478bd9Sstevel@tonic-gate
25707c478bd9Sstevel@tonic-gate ASSERT(pages_req >= found_on_free);
25717c478bd9Sstevel@tonic-gate
25727c478bd9Sstevel@tonic-gate {
25737c478bd9Sstevel@tonic-gate uint_t overshoot = (uint_t)(pages_req - found_on_free);
25747c478bd9Sstevel@tonic-gate
25757c478bd9Sstevel@tonic-gate if (overshoot) {
25767c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_create_overshoot);
257706fb6a36Sdv p = &pcf[PCF_INDEX()];
25787c478bd9Sstevel@tonic-gate mutex_enter(&p->pcf_lock);
25797c478bd9Sstevel@tonic-gate if (p->pcf_block) {
25807c478bd9Sstevel@tonic-gate p->pcf_reserve += overshoot;
25817c478bd9Sstevel@tonic-gate } else {
25827c478bd9Sstevel@tonic-gate p->pcf_count += overshoot;
25837c478bd9Sstevel@tonic-gate if (p->pcf_wait) {
25847c478bd9Sstevel@tonic-gate mutex_enter(&new_freemem_lock);
25857c478bd9Sstevel@tonic-gate if (freemem_wait) {
25867c478bd9Sstevel@tonic-gate cv_signal(&freemem_cv);
25877c478bd9Sstevel@tonic-gate p->pcf_wait--;
25887c478bd9Sstevel@tonic-gate } else {
25897c478bd9Sstevel@tonic-gate p->pcf_wait = 0;
25907c478bd9Sstevel@tonic-gate }
25917c478bd9Sstevel@tonic-gate mutex_exit(&new_freemem_lock);
25927c478bd9Sstevel@tonic-gate }
25937c478bd9Sstevel@tonic-gate }
25947c478bd9Sstevel@tonic-gate mutex_exit(&p->pcf_lock);
25957c478bd9Sstevel@tonic-gate /* freemem is approximate, so this test OK */
25967c478bd9Sstevel@tonic-gate if (!p->pcf_block)
25977c478bd9Sstevel@tonic-gate freemem += overshoot;
25987c478bd9Sstevel@tonic-gate }
25997c478bd9Sstevel@tonic-gate }
26007c478bd9Sstevel@tonic-gate
26017c478bd9Sstevel@tonic-gate return (plist);
26027c478bd9Sstevel@tonic-gate }
26037c478bd9Sstevel@tonic-gate
26047c478bd9Sstevel@tonic-gate /*
26057c478bd9Sstevel@tonic-gate * One or more constituent pages of this large page has been marked
26067c478bd9Sstevel@tonic-gate * toxic. Simply demote the large page to PAGESIZE pages and let
26077c478bd9Sstevel@tonic-gate * page_free() handle it. This routine should only be called by
26087c478bd9Sstevel@tonic-gate * large page free routines (page_free_pages() and page_destroy_pages().
26097c478bd9Sstevel@tonic-gate * All pages are locked SE_EXCL and have already been marked free.
26107c478bd9Sstevel@tonic-gate */
26117c478bd9Sstevel@tonic-gate static void
page_free_toxic_pages(page_t * rootpp)26127c478bd9Sstevel@tonic-gate page_free_toxic_pages(page_t *rootpp)
26137c478bd9Sstevel@tonic-gate {
26147c478bd9Sstevel@tonic-gate page_t *tpp;
26157c478bd9Sstevel@tonic-gate pgcnt_t i, pgcnt = page_get_pagecnt(rootpp->p_szc);
26167c478bd9Sstevel@tonic-gate uint_t szc = rootpp->p_szc;
26177c478bd9Sstevel@tonic-gate
26187c478bd9Sstevel@tonic-gate for (i = 0, tpp = rootpp; i < pgcnt; i++, tpp = tpp->p_next) {
26197c478bd9Sstevel@tonic-gate ASSERT(tpp->p_szc == szc);
26207c478bd9Sstevel@tonic-gate ASSERT((PAGE_EXCL(tpp) &&
26217c478bd9Sstevel@tonic-gate !page_iolock_assert(tpp)) || panicstr);
26227c478bd9Sstevel@tonic-gate tpp->p_szc = 0;
26237c478bd9Sstevel@tonic-gate }
26247c478bd9Sstevel@tonic-gate
26257c478bd9Sstevel@tonic-gate while (rootpp != NULL) {
26267c478bd9Sstevel@tonic-gate tpp = rootpp;
26277c478bd9Sstevel@tonic-gate page_sub(&rootpp, tpp);
26287c478bd9Sstevel@tonic-gate ASSERT(PP_ISFREE(tpp));
26297c478bd9Sstevel@tonic-gate PP_CLRFREE(tpp);
26307c478bd9Sstevel@tonic-gate page_free(tpp, 1);
26317c478bd9Sstevel@tonic-gate }
26327c478bd9Sstevel@tonic-gate }
26337c478bd9Sstevel@tonic-gate
26347c478bd9Sstevel@tonic-gate /*
26357c478bd9Sstevel@tonic-gate * Put page on the "free" list.
26367c478bd9Sstevel@tonic-gate * The free list is really two lists maintained by
26377c478bd9Sstevel@tonic-gate * the PSM of whatever machine we happen to be on.
26387c478bd9Sstevel@tonic-gate */
26397c478bd9Sstevel@tonic-gate void
page_free(page_t * pp,int dontneed)26407c478bd9Sstevel@tonic-gate page_free(page_t *pp, int dontneed)
26417c478bd9Sstevel@tonic-gate {
26427c478bd9Sstevel@tonic-gate struct pcf *p;
26437c478bd9Sstevel@tonic-gate uint_t pcf_index;
26447c478bd9Sstevel@tonic-gate
26457c478bd9Sstevel@tonic-gate ASSERT((PAGE_EXCL(pp) &&
26467c478bd9Sstevel@tonic-gate !page_iolock_assert(pp)) || panicstr);
26477c478bd9Sstevel@tonic-gate
26487c478bd9Sstevel@tonic-gate if (PP_ISFREE(pp)) {
26497c478bd9Sstevel@tonic-gate panic("page_free: page %p is free", (void *)pp);
26507c478bd9Sstevel@tonic-gate }
26517c478bd9Sstevel@tonic-gate
26527c478bd9Sstevel@tonic-gate if (pp->p_szc != 0) {
26537c478bd9Sstevel@tonic-gate if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) ||
2654ad23a2dbSjohansen PP_ISKAS(pp)) {
26557c478bd9Sstevel@tonic-gate panic("page_free: anon or kernel "
26567c478bd9Sstevel@tonic-gate "or no vnode large page %p", (void *)pp);
26577c478bd9Sstevel@tonic-gate }
26587c478bd9Sstevel@tonic-gate page_demote_vp_pages(pp);
26597c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc == 0);
26607c478bd9Sstevel@tonic-gate }
26617c478bd9Sstevel@tonic-gate
26627c478bd9Sstevel@tonic-gate /*
26637c478bd9Sstevel@tonic-gate * The page_struct_lock need not be acquired to examine these
26647c478bd9Sstevel@tonic-gate * fields since the page has an "exclusive" lock.
26657c478bd9Sstevel@tonic-gate */
266607b65a64Saguzovsk if (hat_page_is_mapped(pp) || pp->p_lckcnt != 0 || pp->p_cowcnt != 0 ||
266707b65a64Saguzovsk pp->p_slckcnt != 0) {
266807b65a64Saguzovsk panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d "
26698793b36bSNick Todd "slckcnt = %d", (void *)pp, page_pptonum(pp), pp->p_lckcnt,
267007b65a64Saguzovsk pp->p_cowcnt, pp->p_slckcnt);
26717c478bd9Sstevel@tonic-gate /*NOTREACHED*/
26727c478bd9Sstevel@tonic-gate }
26737c478bd9Sstevel@tonic-gate
26747c478bd9Sstevel@tonic-gate ASSERT(!hat_page_getshare(pp));
26757c478bd9Sstevel@tonic-gate
26767c478bd9Sstevel@tonic-gate PP_SETFREE(pp);
26777c478bd9Sstevel@tonic-gate ASSERT(pp->p_vnode == NULL || !IS_VMODSORT(pp->p_vnode) ||
26787c478bd9Sstevel@tonic-gate !hat_ismod(pp));
26799d0d62adSJason Beloro page_clr_all_props(pp);
26807c478bd9Sstevel@tonic-gate ASSERT(!hat_page_getshare(pp));
26817c478bd9Sstevel@tonic-gate
26827c478bd9Sstevel@tonic-gate /*
26837c478bd9Sstevel@tonic-gate * Now we add the page to the head of the free list.
26847c478bd9Sstevel@tonic-gate * But if this page is associated with a paged vnode
26857c478bd9Sstevel@tonic-gate * then we adjust the head forward so that the page is
26867c478bd9Sstevel@tonic-gate * effectively at the end of the list.
26877c478bd9Sstevel@tonic-gate */
26887c478bd9Sstevel@tonic-gate if (pp->p_vnode == NULL) {
26897c478bd9Sstevel@tonic-gate /*
26907c478bd9Sstevel@tonic-gate * Page has no identity, put it on the free list.
26917c478bd9Sstevel@tonic-gate */
26927c478bd9Sstevel@tonic-gate PP_SETAGED(pp);
26937c478bd9Sstevel@tonic-gate pp->p_offset = (u_offset_t)-1;
26947c478bd9Sstevel@tonic-gate page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
26957c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_free);
26967c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE,
26977c478bd9Sstevel@tonic-gate "page_free_free:pp %p", pp);
26987c478bd9Sstevel@tonic-gate } else {
26997c478bd9Sstevel@tonic-gate PP_CLRAGED(pp);
27007c478bd9Sstevel@tonic-gate
27018d4235fbSJosef 'Jeff' Sipek if (!dontneed) {
27027c478bd9Sstevel@tonic-gate /* move it to the tail of the list */
27037c478bd9Sstevel@tonic-gate page_list_add(pp, PG_CACHE_LIST | PG_LIST_TAIL);
27047c478bd9Sstevel@tonic-gate
27057c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_cache);
27067c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_TAIL,
27077c478bd9Sstevel@tonic-gate "page_free_cache_tail:pp %p", pp);
27087c478bd9Sstevel@tonic-gate } else {
27097c478bd9Sstevel@tonic-gate page_list_add(pp, PG_CACHE_LIST | PG_LIST_HEAD);
27107c478bd9Sstevel@tonic-gate
27117c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_dontneed);
27127c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_FREE_CACHE_HEAD,
27137c478bd9Sstevel@tonic-gate "page_free_cache_head:pp %p", pp);
27147c478bd9Sstevel@tonic-gate }
27157c478bd9Sstevel@tonic-gate }
27167c478bd9Sstevel@tonic-gate page_unlock(pp);
27177c478bd9Sstevel@tonic-gate
27187c478bd9Sstevel@tonic-gate /*
27197c478bd9Sstevel@tonic-gate * Now do the `freemem' accounting.
27207c478bd9Sstevel@tonic-gate */
27217c478bd9Sstevel@tonic-gate pcf_index = PCF_INDEX();
27227c478bd9Sstevel@tonic-gate p = &pcf[pcf_index];
27237c478bd9Sstevel@tonic-gate
27247c478bd9Sstevel@tonic-gate mutex_enter(&p->pcf_lock);
27257c478bd9Sstevel@tonic-gate if (p->pcf_block) {
27267c478bd9Sstevel@tonic-gate p->pcf_reserve += 1;
27277c478bd9Sstevel@tonic-gate } else {
27287c478bd9Sstevel@tonic-gate p->pcf_count += 1;
27297c478bd9Sstevel@tonic-gate if (p->pcf_wait) {
27307c478bd9Sstevel@tonic-gate mutex_enter(&new_freemem_lock);
27317c478bd9Sstevel@tonic-gate /*
27327c478bd9Sstevel@tonic-gate * Check to see if some other thread
27337c478bd9Sstevel@tonic-gate * is actually waiting. Another bucket
27347c478bd9Sstevel@tonic-gate * may have woken it up by now. If there
27357c478bd9Sstevel@tonic-gate * are no waiters, then set our pcf_wait
27367c478bd9Sstevel@tonic-gate * count to zero to avoid coming in here
27377c478bd9Sstevel@tonic-gate * next time. Also, since only one page
27387c478bd9Sstevel@tonic-gate * was put on the free list, just wake
27397c478bd9Sstevel@tonic-gate * up one waiter.
27407c478bd9Sstevel@tonic-gate */
27417c478bd9Sstevel@tonic-gate if (freemem_wait) {
27427c478bd9Sstevel@tonic-gate cv_signal(&freemem_cv);
27437c478bd9Sstevel@tonic-gate p->pcf_wait--;
27447c478bd9Sstevel@tonic-gate } else {
27457c478bd9Sstevel@tonic-gate p->pcf_wait = 0;
27467c478bd9Sstevel@tonic-gate }
27477c478bd9Sstevel@tonic-gate mutex_exit(&new_freemem_lock);
27487c478bd9Sstevel@tonic-gate }
27497c478bd9Sstevel@tonic-gate }
27507c478bd9Sstevel@tonic-gate mutex_exit(&p->pcf_lock);
27517c478bd9Sstevel@tonic-gate
27527c478bd9Sstevel@tonic-gate /* freemem is approximate, so this test OK */
27537c478bd9Sstevel@tonic-gate if (!p->pcf_block)
27547c478bd9Sstevel@tonic-gate freemem += 1;
27557c478bd9Sstevel@tonic-gate }
27567c478bd9Sstevel@tonic-gate
27577c478bd9Sstevel@tonic-gate /*
27587c478bd9Sstevel@tonic-gate * Put page on the "free" list during intial startup.
27597c478bd9Sstevel@tonic-gate * This happens during initial single threaded execution.
27607c478bd9Sstevel@tonic-gate */
27617c478bd9Sstevel@tonic-gate void
page_free_at_startup(page_t * pp)27627c478bd9Sstevel@tonic-gate page_free_at_startup(page_t *pp)
27637c478bd9Sstevel@tonic-gate {
27647c478bd9Sstevel@tonic-gate struct pcf *p;
27657c478bd9Sstevel@tonic-gate uint_t pcf_index;
27667c478bd9Sstevel@tonic-gate
27677c478bd9Sstevel@tonic-gate page_list_add(pp, PG_FREE_LIST | PG_LIST_HEAD | PG_LIST_ISINIT);
27687c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_free);
27697c478bd9Sstevel@tonic-gate
27707c478bd9Sstevel@tonic-gate /*
27717c478bd9Sstevel@tonic-gate * Now do the `freemem' accounting.
27727c478bd9Sstevel@tonic-gate */
27737c478bd9Sstevel@tonic-gate pcf_index = PCF_INDEX();
27747c478bd9Sstevel@tonic-gate p = &pcf[pcf_index];
27757c478bd9Sstevel@tonic-gate
27767c478bd9Sstevel@tonic-gate ASSERT(p->pcf_block == 0);
27777c478bd9Sstevel@tonic-gate ASSERT(p->pcf_wait == 0);
27787c478bd9Sstevel@tonic-gate p->pcf_count += 1;
27797c478bd9Sstevel@tonic-gate
27807c478bd9Sstevel@tonic-gate /* freemem is approximate, so this is OK */
27817c478bd9Sstevel@tonic-gate freemem += 1;
27827c478bd9Sstevel@tonic-gate }
27837c478bd9Sstevel@tonic-gate
27847c478bd9Sstevel@tonic-gate void
page_free_pages(page_t * pp)27857c478bd9Sstevel@tonic-gate page_free_pages(page_t *pp)
27867c478bd9Sstevel@tonic-gate {
27877c478bd9Sstevel@tonic-gate page_t *tpp, *rootpp = NULL;
27887c478bd9Sstevel@tonic-gate pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc);
27897c478bd9Sstevel@tonic-gate pgcnt_t i;
27907c478bd9Sstevel@tonic-gate uint_t szc = pp->p_szc;
27917c478bd9Sstevel@tonic-gate
27927c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_pages);
27937c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_FREE_FREE,
27947c478bd9Sstevel@tonic-gate "page_free_free:pp %p", pp);
27957c478bd9Sstevel@tonic-gate
27967c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes());
27977c478bd9Sstevel@tonic-gate if ((page_pptonum(pp) & (pgcnt - 1)) != 0) {
27987c478bd9Sstevel@tonic-gate panic("page_free_pages: not root page %p", (void *)pp);
27997c478bd9Sstevel@tonic-gate /*NOTREACHED*/
28007c478bd9Sstevel@tonic-gate }
28017c478bd9Sstevel@tonic-gate
2802affbd3ccSkchow for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) {
28037c478bd9Sstevel@tonic-gate ASSERT((PAGE_EXCL(tpp) &&
28047c478bd9Sstevel@tonic-gate !page_iolock_assert(tpp)) || panicstr);
28057c478bd9Sstevel@tonic-gate if (PP_ISFREE(tpp)) {
28067c478bd9Sstevel@tonic-gate panic("page_free_pages: page %p is free", (void *)tpp);
28077c478bd9Sstevel@tonic-gate /*NOTREACHED*/
28087c478bd9Sstevel@tonic-gate }
28097c478bd9Sstevel@tonic-gate if (hat_page_is_mapped(tpp) || tpp->p_lckcnt != 0 ||
281007b65a64Saguzovsk tpp->p_cowcnt != 0 || tpp->p_slckcnt != 0) {
28117c478bd9Sstevel@tonic-gate panic("page_free_pages %p", (void *)tpp);
28127c478bd9Sstevel@tonic-gate /*NOTREACHED*/
28137c478bd9Sstevel@tonic-gate }
28147c478bd9Sstevel@tonic-gate
28157c478bd9Sstevel@tonic-gate ASSERT(!hat_page_getshare(tpp));
28167c478bd9Sstevel@tonic-gate ASSERT(tpp->p_vnode == NULL);
28177c478bd9Sstevel@tonic-gate ASSERT(tpp->p_szc == szc);
28187c478bd9Sstevel@tonic-gate
28197c478bd9Sstevel@tonic-gate PP_SETFREE(tpp);
28209d0d62adSJason Beloro page_clr_all_props(tpp);
28217c478bd9Sstevel@tonic-gate PP_SETAGED(tpp);
28227c478bd9Sstevel@tonic-gate tpp->p_offset = (u_offset_t)-1;
28237c478bd9Sstevel@tonic-gate ASSERT(tpp->p_next == tpp);
28247c478bd9Sstevel@tonic-gate ASSERT(tpp->p_prev == tpp);
28257c478bd9Sstevel@tonic-gate page_list_concat(&rootpp, &tpp);
28267c478bd9Sstevel@tonic-gate }
28277c478bd9Sstevel@tonic-gate ASSERT(rootpp == pp);
28287c478bd9Sstevel@tonic-gate
28297c478bd9Sstevel@tonic-gate page_list_add_pages(rootpp, 0);
28307c478bd9Sstevel@tonic-gate page_create_putback(pgcnt);
28317c478bd9Sstevel@tonic-gate }
28327c478bd9Sstevel@tonic-gate
28337c478bd9Sstevel@tonic-gate int free_pages = 1;
28347c478bd9Sstevel@tonic-gate
28357c478bd9Sstevel@tonic-gate /*
28367c478bd9Sstevel@tonic-gate * This routine attempts to return pages to the cachelist via page_release().
28377c478bd9Sstevel@tonic-gate * It does not *have* to be successful in all cases, since the pageout scanner
28387c478bd9Sstevel@tonic-gate * will catch any pages it misses. It does need to be fast and not introduce
28397c478bd9Sstevel@tonic-gate * too much overhead.
28407c478bd9Sstevel@tonic-gate *
28417c478bd9Sstevel@tonic-gate * If a page isn't found on the unlocked sweep of the page_hash bucket, we
28427c478bd9Sstevel@tonic-gate * don't lock and retry. This is ok, since the page scanner will eventually
28437c478bd9Sstevel@tonic-gate * find any page we miss in free_vp_pages().
28447c478bd9Sstevel@tonic-gate */
28457c478bd9Sstevel@tonic-gate void
free_vp_pages(vnode_t * vp,u_offset_t off,size_t len)28467c478bd9Sstevel@tonic-gate free_vp_pages(vnode_t *vp, u_offset_t off, size_t len)
28477c478bd9Sstevel@tonic-gate {
28487c478bd9Sstevel@tonic-gate page_t *pp;
28497c478bd9Sstevel@tonic-gate u_offset_t eoff;
28507c478bd9Sstevel@tonic-gate extern int swap_in_range(vnode_t *, u_offset_t, size_t);
28517c478bd9Sstevel@tonic-gate
28527c478bd9Sstevel@tonic-gate eoff = off + len;
28537c478bd9Sstevel@tonic-gate
28547c478bd9Sstevel@tonic-gate if (free_pages == 0)
28557c478bd9Sstevel@tonic-gate return;
28567c478bd9Sstevel@tonic-gate if (swap_in_range(vp, off, len))
28577c478bd9Sstevel@tonic-gate return;
28587c478bd9Sstevel@tonic-gate
28597c478bd9Sstevel@tonic-gate for (; off < eoff; off += PAGESIZE) {
28607c478bd9Sstevel@tonic-gate
28617c478bd9Sstevel@tonic-gate /*
28627c478bd9Sstevel@tonic-gate * find the page using a fast, but inexact search. It'll be OK
28637c478bd9Sstevel@tonic-gate * if a few pages slip through the cracks here.
28647c478bd9Sstevel@tonic-gate */
28657c478bd9Sstevel@tonic-gate pp = page_exists(vp, off);
28667c478bd9Sstevel@tonic-gate
28677c478bd9Sstevel@tonic-gate /*
28687c478bd9Sstevel@tonic-gate * If we didn't find the page (it may not exist), the page
28697c478bd9Sstevel@tonic-gate * is free, looks still in use (shared), or we can't lock it,
28707c478bd9Sstevel@tonic-gate * just give up.
28717c478bd9Sstevel@tonic-gate */
28727c478bd9Sstevel@tonic-gate if (pp == NULL ||
28737c478bd9Sstevel@tonic-gate PP_ISFREE(pp) ||
28747c478bd9Sstevel@tonic-gate page_share_cnt(pp) > 0 ||
28757c478bd9Sstevel@tonic-gate !page_trylock(pp, SE_EXCL))
28767c478bd9Sstevel@tonic-gate continue;
28777c478bd9Sstevel@tonic-gate
28787c478bd9Sstevel@tonic-gate /*
28797c478bd9Sstevel@tonic-gate * Once we have locked pp, verify that it's still the
28807c478bd9Sstevel@tonic-gate * correct page and not already free
28817c478bd9Sstevel@tonic-gate */
28827c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED_SE(pp, SE_EXCL));
28837c478bd9Sstevel@tonic-gate if (pp->p_vnode != vp || pp->p_offset != off || PP_ISFREE(pp)) {
28847c478bd9Sstevel@tonic-gate page_unlock(pp);
28857c478bd9Sstevel@tonic-gate continue;
28867c478bd9Sstevel@tonic-gate }
28877c478bd9Sstevel@tonic-gate
28887c478bd9Sstevel@tonic-gate /*
28897c478bd9Sstevel@tonic-gate * try to release the page...
28907c478bd9Sstevel@tonic-gate */
28917c478bd9Sstevel@tonic-gate (void) page_release(pp, 1);
28927c478bd9Sstevel@tonic-gate }
28937c478bd9Sstevel@tonic-gate }
28947c478bd9Sstevel@tonic-gate
28957c478bd9Sstevel@tonic-gate /*
28967c478bd9Sstevel@tonic-gate * Reclaim the given page from the free list.
28976e4dd838Smec * If pp is part of a large pages, only the given constituent page is reclaimed
28986e4dd838Smec * and the large page it belonged to will be demoted. This can only happen
28996e4dd838Smec * if the page is not on the cachelist.
29006e4dd838Smec *
29017c478bd9Sstevel@tonic-gate * Returns 1 on success or 0 on failure.
29027c478bd9Sstevel@tonic-gate *
29037c478bd9Sstevel@tonic-gate * The page is unlocked if it can't be reclaimed (when freemem == 0).
29047c478bd9Sstevel@tonic-gate * If `lock' is non-null, it will be dropped and re-acquired if
29057c478bd9Sstevel@tonic-gate * the routine must wait while freemem is 0.
29067c478bd9Sstevel@tonic-gate *
29077c478bd9Sstevel@tonic-gate * As it turns out, boot_getpages() does this. It picks a page,
29087c478bd9Sstevel@tonic-gate * based on where OBP mapped in some address, gets its pfn, searches
29097c478bd9Sstevel@tonic-gate * the memsegs, locks the page, then pulls it off the free list!
29107c478bd9Sstevel@tonic-gate */
29117c478bd9Sstevel@tonic-gate int
page_reclaim(page_t * pp,kmutex_t * lock)29127c478bd9Sstevel@tonic-gate page_reclaim(page_t *pp, kmutex_t *lock)
29137c478bd9Sstevel@tonic-gate {
29147c478bd9Sstevel@tonic-gate struct pcf *p;
29157c478bd9Sstevel@tonic-gate struct cpu *cpup;
29166e4dd838Smec int enough;
29177c478bd9Sstevel@tonic-gate uint_t i;
29187c478bd9Sstevel@tonic-gate
29197c478bd9Sstevel@tonic-gate ASSERT(lock != NULL ? MUTEX_HELD(lock) : 1);
29207c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp) && PP_ISFREE(pp));
2921db874c57Selowe
29227c478bd9Sstevel@tonic-gate /*
29237c478bd9Sstevel@tonic-gate * If `freemem' is 0, we cannot reclaim this page from the
29247c478bd9Sstevel@tonic-gate * freelist, so release every lock we might hold: the page,
29257c478bd9Sstevel@tonic-gate * and the `lock' before blocking.
29267c478bd9Sstevel@tonic-gate *
29277c478bd9Sstevel@tonic-gate * The only way `freemem' can become 0 while there are pages
29287c478bd9Sstevel@tonic-gate * marked free (have their p->p_free bit set) is when the
29297c478bd9Sstevel@tonic-gate * system is low on memory and doing a page_create(). In
29307c478bd9Sstevel@tonic-gate * order to guarantee that once page_create() starts acquiring
29317c478bd9Sstevel@tonic-gate * pages it will be able to get all that it needs since `freemem'
29327c478bd9Sstevel@tonic-gate * was decreased by the requested amount. So, we need to release
29337c478bd9Sstevel@tonic-gate * this page, and let page_create() have it.
29347c478bd9Sstevel@tonic-gate *
29357c478bd9Sstevel@tonic-gate * Since `freemem' being zero is not supposed to happen, just
29367c478bd9Sstevel@tonic-gate * use the usual hash stuff as a starting point. If that bucket
29377c478bd9Sstevel@tonic-gate * is empty, then assume the worst, and start at the beginning
29387c478bd9Sstevel@tonic-gate * of the pcf array. If we always start at the beginning
29397c478bd9Sstevel@tonic-gate * when acquiring more than one pcf lock, there won't be any
29407c478bd9Sstevel@tonic-gate * deadlock problems.
29417c478bd9Sstevel@tonic-gate */
29427c478bd9Sstevel@tonic-gate
29437c478bd9Sstevel@tonic-gate /* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */
29447c478bd9Sstevel@tonic-gate
29456e4dd838Smec if (freemem <= throttlefree && !page_create_throttle(1l, 0)) {
29467c478bd9Sstevel@tonic-gate pcf_acquire_all();
29477c478bd9Sstevel@tonic-gate goto page_reclaim_nomem;
29487c478bd9Sstevel@tonic-gate }
29497c478bd9Sstevel@tonic-gate
295006fb6a36Sdv enough = pcf_decrement_bucket(1);
29517c478bd9Sstevel@tonic-gate
29526e4dd838Smec if (!enough) {
29537c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_reclaim_zero);
29547c478bd9Sstevel@tonic-gate /*
29557c478bd9Sstevel@tonic-gate * Check again. Its possible that some other thread
29567c478bd9Sstevel@tonic-gate * could have been right behind us, and added one
29577c478bd9Sstevel@tonic-gate * to a list somewhere. Acquire each of the pcf locks
29587c478bd9Sstevel@tonic-gate * until we find a page.
29597c478bd9Sstevel@tonic-gate */
29607c478bd9Sstevel@tonic-gate p = pcf;
296106fb6a36Sdv for (i = 0; i < pcf_fanout; i++) {
29627c478bd9Sstevel@tonic-gate mutex_enter(&p->pcf_lock);
29636e4dd838Smec if (p->pcf_count >= 1) {
29646e4dd838Smec p->pcf_count -= 1;
29655797d5ddSDavid Valin /*
29665797d5ddSDavid Valin * freemem is not protected by any lock. Thus,
29675797d5ddSDavid Valin * we cannot have any assertion containing
29685797d5ddSDavid Valin * freemem here.
29695797d5ddSDavid Valin */
29705797d5ddSDavid Valin freemem -= 1;
29716e4dd838Smec enough = 1;
29726e4dd838Smec break;
29737c478bd9Sstevel@tonic-gate }
29747c478bd9Sstevel@tonic-gate p++;
29757c478bd9Sstevel@tonic-gate }
29767c478bd9Sstevel@tonic-gate
29776e4dd838Smec if (!enough) {
29787c478bd9Sstevel@tonic-gate page_reclaim_nomem:
29797c478bd9Sstevel@tonic-gate /*
29807c478bd9Sstevel@tonic-gate * We really can't have page `pp'.
29817c478bd9Sstevel@tonic-gate * Time for the no-memory dance with
29827c478bd9Sstevel@tonic-gate * page_free(). This is just like
29837c478bd9Sstevel@tonic-gate * page_create_wait(). Plus the added
29847c478bd9Sstevel@tonic-gate * attraction of releasing whatever mutex
29857c478bd9Sstevel@tonic-gate * we held when we were called with in `lock'.
29867c478bd9Sstevel@tonic-gate * Page_unlock() will wakeup any thread
29877c478bd9Sstevel@tonic-gate * waiting around for this page.
29887c478bd9Sstevel@tonic-gate */
29897c478bd9Sstevel@tonic-gate if (lock) {
29907c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_reclaim_zero_locked);
29917c478bd9Sstevel@tonic-gate mutex_exit(lock);
29927c478bd9Sstevel@tonic-gate }
29937c478bd9Sstevel@tonic-gate page_unlock(pp);
29947c478bd9Sstevel@tonic-gate
29957c478bd9Sstevel@tonic-gate /*
29967c478bd9Sstevel@tonic-gate * get this before we drop all the pcf locks.
29977c478bd9Sstevel@tonic-gate */
29987c478bd9Sstevel@tonic-gate mutex_enter(&new_freemem_lock);
29997c478bd9Sstevel@tonic-gate
30007c478bd9Sstevel@tonic-gate p = pcf;
300106fb6a36Sdv for (i = 0; i < pcf_fanout; i++) {
30027c478bd9Sstevel@tonic-gate p->pcf_wait++;
30037c478bd9Sstevel@tonic-gate mutex_exit(&p->pcf_lock);
30047c478bd9Sstevel@tonic-gate p++;
30057c478bd9Sstevel@tonic-gate }
30067c478bd9Sstevel@tonic-gate
30077c478bd9Sstevel@tonic-gate freemem_wait++;
30087c478bd9Sstevel@tonic-gate cv_wait(&freemem_cv, &new_freemem_lock);
30097c478bd9Sstevel@tonic-gate freemem_wait--;
30107c478bd9Sstevel@tonic-gate
30117c478bd9Sstevel@tonic-gate mutex_exit(&new_freemem_lock);
30127c478bd9Sstevel@tonic-gate
30137c478bd9Sstevel@tonic-gate if (lock) {
30147c478bd9Sstevel@tonic-gate mutex_enter(lock);
30157c478bd9Sstevel@tonic-gate }
30167c478bd9Sstevel@tonic-gate return (0);
30177c478bd9Sstevel@tonic-gate }
30187c478bd9Sstevel@tonic-gate
30197c478bd9Sstevel@tonic-gate /*
30207c478bd9Sstevel@tonic-gate * The pcf accounting has been done,
30217c478bd9Sstevel@tonic-gate * though none of the pcf_wait flags have been set,
30227c478bd9Sstevel@tonic-gate * drop the locks and continue on.
30237c478bd9Sstevel@tonic-gate */
30247c478bd9Sstevel@tonic-gate while (p >= pcf) {
30257c478bd9Sstevel@tonic-gate mutex_exit(&p->pcf_lock);
30267c478bd9Sstevel@tonic-gate p--;
30277c478bd9Sstevel@tonic-gate }
30287c478bd9Sstevel@tonic-gate }
30297c478bd9Sstevel@tonic-gate
30307c478bd9Sstevel@tonic-gate
30317c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_reclaim);
30326e4dd838Smec
30336e4dd838Smec /*
30346e4dd838Smec * page_list_sub will handle the case where pp is a large page.
30356e4dd838Smec * It's possible that the page was promoted while on the freelist
30366e4dd838Smec */
30377c478bd9Sstevel@tonic-gate if (PP_ISAGED(pp)) {
30386e4dd838Smec page_list_sub(pp, PG_FREE_LIST);
30397c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_FREE,
30407c478bd9Sstevel@tonic-gate "page_reclaim_free:pp %p", pp);
30417c478bd9Sstevel@tonic-gate } else {
30427c478bd9Sstevel@tonic-gate page_list_sub(pp, PG_CACHE_LIST);
30437c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_UNFREE_CACHE,
30447c478bd9Sstevel@tonic-gate "page_reclaim_cache:pp %p", pp);
30457c478bd9Sstevel@tonic-gate }
30467c478bd9Sstevel@tonic-gate
30477c478bd9Sstevel@tonic-gate /*
30487c478bd9Sstevel@tonic-gate * clear the p_free & p_age bits since this page is no longer
30497c478bd9Sstevel@tonic-gate * on the free list. Notice that there was a brief time where
30507c478bd9Sstevel@tonic-gate * a page is marked as free, but is not on the list.
30517c478bd9Sstevel@tonic-gate *
30527c478bd9Sstevel@tonic-gate * Set the reference bit to protect against immediate pageout.
30537c478bd9Sstevel@tonic-gate */
30546e4dd838Smec PP_CLRFREE(pp);
30556e4dd838Smec PP_CLRAGED(pp);
30566e4dd838Smec page_set_props(pp, P_REF);
30577c478bd9Sstevel@tonic-gate
30587c478bd9Sstevel@tonic-gate CPU_STATS_ENTER_K();
30597c478bd9Sstevel@tonic-gate cpup = CPU; /* get cpup now that CPU cannot change */
30607c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cpup, vm, pgrec, 1);
30617c478bd9Sstevel@tonic-gate CPU_STATS_ADDQ(cpup, vm, pgfrec, 1);
30627c478bd9Sstevel@tonic-gate CPU_STATS_EXIT_K();
30636e4dd838Smec ASSERT(pp->p_szc == 0);
30647c478bd9Sstevel@tonic-gate
30657c478bd9Sstevel@tonic-gate return (1);
30667c478bd9Sstevel@tonic-gate }
30677c478bd9Sstevel@tonic-gate
30687c478bd9Sstevel@tonic-gate /*
30697c478bd9Sstevel@tonic-gate * Destroy identity of the page and put it back on
30707c478bd9Sstevel@tonic-gate * the page free list. Assumes that the caller has
30717c478bd9Sstevel@tonic-gate * acquired the "exclusive" lock on the page.
30727c478bd9Sstevel@tonic-gate */
30737c478bd9Sstevel@tonic-gate void
page_destroy(page_t * pp,int dontfree)30747c478bd9Sstevel@tonic-gate page_destroy(page_t *pp, int dontfree)
30757c478bd9Sstevel@tonic-gate {
30767c478bd9Sstevel@tonic-gate ASSERT((PAGE_EXCL(pp) &&
30777c478bd9Sstevel@tonic-gate !page_iolock_assert(pp)) || panicstr);
307807b65a64Saguzovsk ASSERT(pp->p_slckcnt == 0 || panicstr);
30797c478bd9Sstevel@tonic-gate
30807c478bd9Sstevel@tonic-gate if (pp->p_szc != 0) {
30817c478bd9Sstevel@tonic-gate if (pp->p_vnode == NULL || IS_SWAPFSVP(pp->p_vnode) ||
3082ad23a2dbSjohansen PP_ISKAS(pp)) {
30837c478bd9Sstevel@tonic-gate panic("page_destroy: anon or kernel or no vnode "
30847c478bd9Sstevel@tonic-gate "large page %p", (void *)pp);
30857c478bd9Sstevel@tonic-gate }
30867c478bd9Sstevel@tonic-gate page_demote_vp_pages(pp);
30877c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc == 0);
30887c478bd9Sstevel@tonic-gate }
30897c478bd9Sstevel@tonic-gate
30907c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy:pp %p", pp);
30917c478bd9Sstevel@tonic-gate
30927c478bd9Sstevel@tonic-gate /*
30937c478bd9Sstevel@tonic-gate * Unload translations, if any, then hash out the
30947c478bd9Sstevel@tonic-gate * page to erase its identity.
30957c478bd9Sstevel@tonic-gate */
30967c478bd9Sstevel@tonic-gate (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
30977c478bd9Sstevel@tonic-gate page_hashout(pp, NULL);
30987c478bd9Sstevel@tonic-gate
30997c478bd9Sstevel@tonic-gate if (!dontfree) {
31007c478bd9Sstevel@tonic-gate /*
31017c478bd9Sstevel@tonic-gate * Acquire the "freemem_lock" for availrmem.
31027c478bd9Sstevel@tonic-gate * The page_struct_lock need not be acquired for lckcnt
31037c478bd9Sstevel@tonic-gate * and cowcnt since the page has an "exclusive" lock.
3104552507c5SGangadhar Mylapuram * We are doing a modified version of page_pp_unlock here.
31057c478bd9Sstevel@tonic-gate */
31067c478bd9Sstevel@tonic-gate if ((pp->p_lckcnt != 0) || (pp->p_cowcnt != 0)) {
31077c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock);
31087c478bd9Sstevel@tonic-gate if (pp->p_lckcnt != 0) {
31097c478bd9Sstevel@tonic-gate availrmem++;
3110552507c5SGangadhar Mylapuram pages_locked--;
31117c478bd9Sstevel@tonic-gate pp->p_lckcnt = 0;
31127c478bd9Sstevel@tonic-gate }
31137c478bd9Sstevel@tonic-gate if (pp->p_cowcnt != 0) {
31147c478bd9Sstevel@tonic-gate availrmem += pp->p_cowcnt;
3115552507c5SGangadhar Mylapuram pages_locked -= pp->p_cowcnt;
31167c478bd9Sstevel@tonic-gate pp->p_cowcnt = 0;
31177c478bd9Sstevel@tonic-gate }
31187c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock);
31197c478bd9Sstevel@tonic-gate }
31207c478bd9Sstevel@tonic-gate /*
31217c478bd9Sstevel@tonic-gate * Put the page on the "free" list.
31227c478bd9Sstevel@tonic-gate */
31237c478bd9Sstevel@tonic-gate page_free(pp, 0);
31247c478bd9Sstevel@tonic-gate }
31257c478bd9Sstevel@tonic-gate }
31267c478bd9Sstevel@tonic-gate
31277c478bd9Sstevel@tonic-gate void
page_destroy_pages(page_t * pp)31287c478bd9Sstevel@tonic-gate page_destroy_pages(page_t *pp)
31297c478bd9Sstevel@tonic-gate {
31307c478bd9Sstevel@tonic-gate
31317c478bd9Sstevel@tonic-gate page_t *tpp, *rootpp = NULL;
31327c478bd9Sstevel@tonic-gate pgcnt_t pgcnt = page_get_pagecnt(pp->p_szc);
31337c478bd9Sstevel@tonic-gate pgcnt_t i, pglcks = 0;
31347c478bd9Sstevel@tonic-gate uint_t szc = pp->p_szc;
31357c478bd9Sstevel@tonic-gate
31367c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc != 0 && pp->p_szc < page_num_pagesizes());
31377c478bd9Sstevel@tonic-gate
31387c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_destroy_pages);
31397c478bd9Sstevel@tonic-gate
31407c478bd9Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_DESTROY, "page_destroy_pages:pp %p", pp);
31417c478bd9Sstevel@tonic-gate
31427c478bd9Sstevel@tonic-gate if ((page_pptonum(pp) & (pgcnt - 1)) != 0) {
31437c478bd9Sstevel@tonic-gate panic("page_destroy_pages: not root page %p", (void *)pp);
31447c478bd9Sstevel@tonic-gate /*NOTREACHED*/
31457c478bd9Sstevel@tonic-gate }
31467c478bd9Sstevel@tonic-gate
3147affbd3ccSkchow for (i = 0, tpp = pp; i < pgcnt; i++, tpp++) {
31487c478bd9Sstevel@tonic-gate ASSERT((PAGE_EXCL(tpp) &&
31497c478bd9Sstevel@tonic-gate !page_iolock_assert(tpp)) || panicstr);
315007b65a64Saguzovsk ASSERT(tpp->p_slckcnt == 0 || panicstr);
31517c478bd9Sstevel@tonic-gate (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD);
31527c478bd9Sstevel@tonic-gate page_hashout(tpp, NULL);
31537c478bd9Sstevel@tonic-gate ASSERT(tpp->p_offset == (u_offset_t)-1);
31547c478bd9Sstevel@tonic-gate if (tpp->p_lckcnt != 0) {
31557c478bd9Sstevel@tonic-gate pglcks++;
31567c478bd9Sstevel@tonic-gate tpp->p_lckcnt = 0;
31577c478bd9Sstevel@tonic-gate } else if (tpp->p_cowcnt != 0) {
31587c478bd9Sstevel@tonic-gate pglcks += tpp->p_cowcnt;
31597c478bd9Sstevel@tonic-gate tpp->p_cowcnt = 0;
31607c478bd9Sstevel@tonic-gate }
31617c478bd9Sstevel@tonic-gate ASSERT(!hat_page_getshare(tpp));
31627c478bd9Sstevel@tonic-gate ASSERT(tpp->p_vnode == NULL);
31637c478bd9Sstevel@tonic-gate ASSERT(tpp->p_szc == szc);
31647c478bd9Sstevel@tonic-gate
31657c478bd9Sstevel@tonic-gate PP_SETFREE(tpp);
31669d0d62adSJason Beloro page_clr_all_props(tpp);
31677c478bd9Sstevel@tonic-gate PP_SETAGED(tpp);
31687c478bd9Sstevel@tonic-gate ASSERT(tpp->p_next == tpp);
31697c478bd9Sstevel@tonic-gate ASSERT(tpp->p_prev == tpp);
31707c478bd9Sstevel@tonic-gate page_list_concat(&rootpp, &tpp);
31717c478bd9Sstevel@tonic-gate }
31727c478bd9Sstevel@tonic-gate
31737c478bd9Sstevel@tonic-gate ASSERT(rootpp == pp);
31747c478bd9Sstevel@tonic-gate if (pglcks != 0) {
31757c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock);
31767c478bd9Sstevel@tonic-gate availrmem += pglcks;
31777c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock);
31787c478bd9Sstevel@tonic-gate }
31797c478bd9Sstevel@tonic-gate
31807c478bd9Sstevel@tonic-gate page_list_add_pages(rootpp, 0);
31817c478bd9Sstevel@tonic-gate page_create_putback(pgcnt);
31827c478bd9Sstevel@tonic-gate }
31837c478bd9Sstevel@tonic-gate
31847c478bd9Sstevel@tonic-gate /*
31857c478bd9Sstevel@tonic-gate * Similar to page_destroy(), but destroys pages which are
31867c478bd9Sstevel@tonic-gate * locked and known to be on the page free list. Since
31877c478bd9Sstevel@tonic-gate * the page is known to be free and locked, no one can access
31887c478bd9Sstevel@tonic-gate * it.
31897c478bd9Sstevel@tonic-gate *
31907c478bd9Sstevel@tonic-gate * Also, the number of free pages does not change.
31917c478bd9Sstevel@tonic-gate */
31927c478bd9Sstevel@tonic-gate void
page_destroy_free(page_t * pp)31937c478bd9Sstevel@tonic-gate page_destroy_free(page_t *pp)
31947c478bd9Sstevel@tonic-gate {
31957c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp));
31967c478bd9Sstevel@tonic-gate ASSERT(PP_ISFREE(pp));
31977c478bd9Sstevel@tonic-gate ASSERT(pp->p_vnode);
31987c478bd9Sstevel@tonic-gate ASSERT(hat_page_getattr(pp, P_MOD | P_REF | P_RO) == 0);
31997c478bd9Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(pp));
32007c478bd9Sstevel@tonic-gate ASSERT(PP_ISAGED(pp) == 0);
32017c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc == 0);
32027c478bd9Sstevel@tonic-gate
32037c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_destroy_free);
32047c478bd9Sstevel@tonic-gate page_list_sub(pp, PG_CACHE_LIST);
32057c478bd9Sstevel@tonic-gate
32067c478bd9Sstevel@tonic-gate page_hashout(pp, NULL);
32077c478bd9Sstevel@tonic-gate ASSERT(pp->p_vnode == NULL);
32087c478bd9Sstevel@tonic-gate ASSERT(pp->p_offset == (u_offset_t)-1);
32097c478bd9Sstevel@tonic-gate ASSERT(pp->p_hash == NULL);
32107c478bd9Sstevel@tonic-gate
32117c478bd9Sstevel@tonic-gate PP_SETAGED(pp);
32127c478bd9Sstevel@tonic-gate page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
32137c478bd9Sstevel@tonic-gate page_unlock(pp);
32147c478bd9Sstevel@tonic-gate
32157c478bd9Sstevel@tonic-gate mutex_enter(&new_freemem_lock);
32167c478bd9Sstevel@tonic-gate if (freemem_wait) {
32177c478bd9Sstevel@tonic-gate cv_signal(&freemem_cv);
32187c478bd9Sstevel@tonic-gate }
32197c478bd9Sstevel@tonic-gate mutex_exit(&new_freemem_lock);
32207c478bd9Sstevel@tonic-gate }
32217c478bd9Sstevel@tonic-gate
32227c478bd9Sstevel@tonic-gate /*
32237c478bd9Sstevel@tonic-gate * Rename the page "opp" to have an identity specified
32247c478bd9Sstevel@tonic-gate * by [vp, off]. If a page already exists with this name
32257c478bd9Sstevel@tonic-gate * it is locked and destroyed. Note that the page's
32267c478bd9Sstevel@tonic-gate * translations are not unloaded during the rename.
32277c478bd9Sstevel@tonic-gate *
32287c478bd9Sstevel@tonic-gate * This routine is used by the anon layer to "steal" the
32297c478bd9Sstevel@tonic-gate * original page and is not unlike destroying a page and
32307c478bd9Sstevel@tonic-gate * creating a new page using the same page frame.
32317c478bd9Sstevel@tonic-gate *
32327c478bd9Sstevel@tonic-gate * XXX -- Could deadlock if caller 1 tries to rename A to B while
32337c478bd9Sstevel@tonic-gate * caller 2 tries to rename B to A.
32347c478bd9Sstevel@tonic-gate */
32357c478bd9Sstevel@tonic-gate void
page_rename(page_t * opp,vnode_t * vp,u_offset_t off)32367c478bd9Sstevel@tonic-gate page_rename(page_t *opp, vnode_t *vp, u_offset_t off)
32377c478bd9Sstevel@tonic-gate {
32387c478bd9Sstevel@tonic-gate page_t *pp;
32397c478bd9Sstevel@tonic-gate int olckcnt = 0;
32407c478bd9Sstevel@tonic-gate int ocowcnt = 0;
32417c478bd9Sstevel@tonic-gate kmutex_t *phm;
32427c478bd9Sstevel@tonic-gate ulong_t index;
32437c478bd9Sstevel@tonic-gate
32447c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(opp) && !page_iolock_assert(opp));
32457c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
32467c478bd9Sstevel@tonic-gate ASSERT(PP_ISFREE(opp) == 0);
32477c478bd9Sstevel@tonic-gate
32487c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_rename_count);
32497c478bd9Sstevel@tonic-gate
32507c478bd9Sstevel@tonic-gate TRACE_3(TR_FAC_VM, TR_PAGE_RENAME,
32516e4dd838Smec "page rename:pp %p vp %p off %llx", opp, vp, off);
32527c478bd9Sstevel@tonic-gate
325337fbc076Saguzovsk /*
325437fbc076Saguzovsk * CacheFS may call page_rename for a large NFS page
325537fbc076Saguzovsk * when both CacheFS and NFS mount points are used
325637fbc076Saguzovsk * by applications. Demote this large page before
325737fbc076Saguzovsk * renaming it, to ensure that there are no "partial"
325837fbc076Saguzovsk * large pages left lying around.
325937fbc076Saguzovsk */
326037fbc076Saguzovsk if (opp->p_szc != 0) {
326137fbc076Saguzovsk vnode_t *ovp = opp->p_vnode;
326237fbc076Saguzovsk ASSERT(ovp != NULL);
326337fbc076Saguzovsk ASSERT(!IS_SWAPFSVP(ovp));
3264ad23a2dbSjohansen ASSERT(!VN_ISKAS(ovp));
326537fbc076Saguzovsk page_demote_vp_pages(opp);
326637fbc076Saguzovsk ASSERT(opp->p_szc == 0);
326737fbc076Saguzovsk }
326837fbc076Saguzovsk
32697c478bd9Sstevel@tonic-gate page_hashout(opp, NULL);
32707c478bd9Sstevel@tonic-gate PP_CLRAGED(opp);
32717c478bd9Sstevel@tonic-gate
32727c478bd9Sstevel@tonic-gate /*
32737c478bd9Sstevel@tonic-gate * Acquire the appropriate page hash lock, since
32747c478bd9Sstevel@tonic-gate * we're going to rename the page.
32757c478bd9Sstevel@tonic-gate */
32767c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, off);
32777c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(index);
32787c478bd9Sstevel@tonic-gate mutex_enter(phm);
32797c478bd9Sstevel@tonic-gate top:
32807c478bd9Sstevel@tonic-gate /*
32817c478bd9Sstevel@tonic-gate * Look for an existing page with this name and destroy it if found.
32827c478bd9Sstevel@tonic-gate * By holding the page hash lock all the way to the page_hashin()
32837c478bd9Sstevel@tonic-gate * call, we are assured that no page can be created with this
32847c478bd9Sstevel@tonic-gate * identity. In the case when the phm lock is dropped to undo any
32857c478bd9Sstevel@tonic-gate * hat layer mappings, the existing page is held with an "exclusive"
32867c478bd9Sstevel@tonic-gate * lock, again preventing another page from being created with
32877c478bd9Sstevel@tonic-gate * this identity.
32887c478bd9Sstevel@tonic-gate */
3289e7c874afSJosef 'Jeff' Sipek pp = page_hash_search(index, vp, off);
32907c478bd9Sstevel@tonic-gate if (pp != NULL) {
32917c478bd9Sstevel@tonic-gate VM_STAT_ADD(page_rename_exists);
32927c478bd9Sstevel@tonic-gate
32937c478bd9Sstevel@tonic-gate /*
32947c478bd9Sstevel@tonic-gate * As it turns out, this is one of only two places where
32957c478bd9Sstevel@tonic-gate * page_lock() needs to hold the passed in lock in the
32967c478bd9Sstevel@tonic-gate * successful case. In all of the others, the lock could
32977c478bd9Sstevel@tonic-gate * be dropped as soon as the attempt is made to lock
32987c478bd9Sstevel@tonic-gate * the page. It is tempting to add yet another arguement,
32997c478bd9Sstevel@tonic-gate * PL_KEEP or PL_DROP, to let page_lock know what to do.
33007c478bd9Sstevel@tonic-gate */
33017c478bd9Sstevel@tonic-gate if (!page_lock(pp, SE_EXCL, phm, P_RECLAIM)) {
33027c478bd9Sstevel@tonic-gate /*
33037c478bd9Sstevel@tonic-gate * Went to sleep because the page could not
33047c478bd9Sstevel@tonic-gate * be locked. We were woken up when the page
33057c478bd9Sstevel@tonic-gate * was unlocked, or when the page was destroyed.
33067c478bd9Sstevel@tonic-gate * In either case, `phm' was dropped while we
33077c478bd9Sstevel@tonic-gate * slept. Hence we should not just roar through
33087c478bd9Sstevel@tonic-gate * this loop.
33097c478bd9Sstevel@tonic-gate */
33107c478bd9Sstevel@tonic-gate goto top;
33117c478bd9Sstevel@tonic-gate }
33127c478bd9Sstevel@tonic-gate
331337fbc076Saguzovsk /*
331437fbc076Saguzovsk * If an existing page is a large page, then demote
331537fbc076Saguzovsk * it to ensure that no "partial" large pages are
331637fbc076Saguzovsk * "created" after page_rename. An existing page
331737fbc076Saguzovsk * can be a CacheFS page, and can't belong to swapfs.
331837fbc076Saguzovsk */
33197c478bd9Sstevel@tonic-gate if (hat_page_is_mapped(pp)) {
33207c478bd9Sstevel@tonic-gate /*
33217c478bd9Sstevel@tonic-gate * Unload translations. Since we hold the
33227c478bd9Sstevel@tonic-gate * exclusive lock on this page, the page
33237c478bd9Sstevel@tonic-gate * can not be changed while we drop phm.
33247c478bd9Sstevel@tonic-gate * This is also not a lock protocol violation,
33257c478bd9Sstevel@tonic-gate * but rather the proper way to do things.
33267c478bd9Sstevel@tonic-gate */
33277c478bd9Sstevel@tonic-gate mutex_exit(phm);
33287c478bd9Sstevel@tonic-gate (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
332937fbc076Saguzovsk if (pp->p_szc != 0) {
333037fbc076Saguzovsk ASSERT(!IS_SWAPFSVP(vp));
3331ad23a2dbSjohansen ASSERT(!VN_ISKAS(vp));
333237fbc076Saguzovsk page_demote_vp_pages(pp);
333337fbc076Saguzovsk ASSERT(pp->p_szc == 0);
333437fbc076Saguzovsk }
333537fbc076Saguzovsk mutex_enter(phm);
333637fbc076Saguzovsk } else if (pp->p_szc != 0) {
333737fbc076Saguzovsk ASSERT(!IS_SWAPFSVP(vp));
3338ad23a2dbSjohansen ASSERT(!VN_ISKAS(vp));
333937fbc076Saguzovsk mutex_exit(phm);
334037fbc076Saguzovsk page_demote_vp_pages(pp);
334137fbc076Saguzovsk ASSERT(pp->p_szc == 0);
33427c478bd9Sstevel@tonic-gate mutex_enter(phm);
33437c478bd9Sstevel@tonic-gate }
33447c478bd9Sstevel@tonic-gate page_hashout(pp, phm);
33457c478bd9Sstevel@tonic-gate }
33467c478bd9Sstevel@tonic-gate /*
33477c478bd9Sstevel@tonic-gate * Hash in the page with the new identity.
33487c478bd9Sstevel@tonic-gate */
33497c478bd9Sstevel@tonic-gate if (!page_hashin(opp, vp, off, phm)) {
33507c478bd9Sstevel@tonic-gate /*
33517c478bd9Sstevel@tonic-gate * We were holding phm while we searched for [vp, off]
33527c478bd9Sstevel@tonic-gate * and only dropped phm if we found and locked a page.
33537c478bd9Sstevel@tonic-gate * If we can't create this page now, then some thing
33547c478bd9Sstevel@tonic-gate * is really broken.
33557c478bd9Sstevel@tonic-gate */
33567c478bd9Sstevel@tonic-gate panic("page_rename: Can't hash in page: %p", (void *)pp);
33577c478bd9Sstevel@tonic-gate /*NOTREACHED*/
33587c478bd9Sstevel@tonic-gate }
33597c478bd9Sstevel@tonic-gate
33607c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(phm));
33617c478bd9Sstevel@tonic-gate mutex_exit(phm);
33627c478bd9Sstevel@tonic-gate
33637c478bd9Sstevel@tonic-gate /*
33647c478bd9Sstevel@tonic-gate * Now that we have dropped phm, lets get around to finishing up
33657c478bd9Sstevel@tonic-gate * with pp.
33667c478bd9Sstevel@tonic-gate */
33677c478bd9Sstevel@tonic-gate if (pp != NULL) {
33687c478bd9Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(pp));
33697c478bd9Sstevel@tonic-gate /* for now large pages should not end up here */
33707c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc == 0);
33717c478bd9Sstevel@tonic-gate /*
33727c478bd9Sstevel@tonic-gate * Save the locks for transfer to the new page and then
33737c478bd9Sstevel@tonic-gate * clear them so page_free doesn't think they're important.
33747c478bd9Sstevel@tonic-gate * The page_struct_lock need not be acquired for lckcnt and
33757c478bd9Sstevel@tonic-gate * cowcnt since the page has an "exclusive" lock.
33767c478bd9Sstevel@tonic-gate */
33777c478bd9Sstevel@tonic-gate olckcnt = pp->p_lckcnt;
33787c478bd9Sstevel@tonic-gate ocowcnt = pp->p_cowcnt;
33797c478bd9Sstevel@tonic-gate pp->p_lckcnt = pp->p_cowcnt = 0;
33807c478bd9Sstevel@tonic-gate
33817c478bd9Sstevel@tonic-gate /*
33827c478bd9Sstevel@tonic-gate * Put the page on the "free" list after we drop
33837c478bd9Sstevel@tonic-gate * the lock. The less work under the lock the better.
33847c478bd9Sstevel@tonic-gate */
33857c478bd9Sstevel@tonic-gate /*LINTED: constant in conditional context*/
33867c478bd9Sstevel@tonic-gate VN_DISPOSE(pp, B_FREE, 0, kcred);
33877c478bd9Sstevel@tonic-gate }
33887c478bd9Sstevel@tonic-gate
33897c478bd9Sstevel@tonic-gate /*
33907c478bd9Sstevel@tonic-gate * Transfer the lock count from the old page (if any).
33917c478bd9Sstevel@tonic-gate * The page_struct_lock need not be acquired for lckcnt and
33927c478bd9Sstevel@tonic-gate * cowcnt since the page has an "exclusive" lock.
33937c478bd9Sstevel@tonic-gate */
33947c478bd9Sstevel@tonic-gate opp->p_lckcnt += olckcnt;
33957c478bd9Sstevel@tonic-gate opp->p_cowcnt += ocowcnt;
33967c478bd9Sstevel@tonic-gate }
33977c478bd9Sstevel@tonic-gate
33987c478bd9Sstevel@tonic-gate /*
33997c478bd9Sstevel@tonic-gate * low level routine to add page `pp' to the hash and vp chains for [vp, offset]
34007c478bd9Sstevel@tonic-gate *
34017c478bd9Sstevel@tonic-gate * Pages are normally inserted at the start of a vnode's v_pages list.
34027c478bd9Sstevel@tonic-gate * If the vnode is VMODSORT and the page is modified, it goes at the end.
34037c478bd9Sstevel@tonic-gate * This can happen when a modified page is relocated for DR.
34047c478bd9Sstevel@tonic-gate *
34057c478bd9Sstevel@tonic-gate * Returns 1 on success and 0 on failure.
34067c478bd9Sstevel@tonic-gate */
34077c478bd9Sstevel@tonic-gate static int
page_do_hashin(page_t * pp,vnode_t * vp,u_offset_t offset)34087c478bd9Sstevel@tonic-gate page_do_hashin(page_t *pp, vnode_t *vp, u_offset_t offset)
34097c478bd9Sstevel@tonic-gate {
34107c478bd9Sstevel@tonic-gate page_t **listp;
34117c478bd9Sstevel@tonic-gate page_t *tp;
34127c478bd9Sstevel@tonic-gate ulong_t index;
34137c478bd9Sstevel@tonic-gate
34147c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp));
34157c478bd9Sstevel@tonic-gate ASSERT(vp != NULL);
34167c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(page_vnode_mutex(vp)));
34177c478bd9Sstevel@tonic-gate
34187c478bd9Sstevel@tonic-gate /*
34197c478bd9Sstevel@tonic-gate * Be sure to set these up before the page is inserted on the hash
34207c478bd9Sstevel@tonic-gate * list. As soon as the page is placed on the list some other
34217c478bd9Sstevel@tonic-gate * thread might get confused and wonder how this page could
34227c478bd9Sstevel@tonic-gate * possibly hash to this list.
34237c478bd9Sstevel@tonic-gate */
34247c478bd9Sstevel@tonic-gate pp->p_vnode = vp;
34257c478bd9Sstevel@tonic-gate pp->p_offset = offset;
34267c478bd9Sstevel@tonic-gate
34277c478bd9Sstevel@tonic-gate /*
34287c478bd9Sstevel@tonic-gate * record if this page is on a swap vnode
34297c478bd9Sstevel@tonic-gate */
34307c478bd9Sstevel@tonic-gate if ((vp->v_flag & VISSWAP) != 0)
34317c478bd9Sstevel@tonic-gate PP_SETSWAP(pp);
34327c478bd9Sstevel@tonic-gate
34337c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, offset);
34347c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(index)));
34357c478bd9Sstevel@tonic-gate listp = &page_hash[index];
34367c478bd9Sstevel@tonic-gate
34377c478bd9Sstevel@tonic-gate /*
34387c478bd9Sstevel@tonic-gate * If this page is already hashed in, fail this attempt to add it.
34397c478bd9Sstevel@tonic-gate */
34407c478bd9Sstevel@tonic-gate for (tp = *listp; tp != NULL; tp = tp->p_hash) {
34417c478bd9Sstevel@tonic-gate if (tp->p_vnode == vp && tp->p_offset == offset) {
34427c478bd9Sstevel@tonic-gate pp->p_vnode = NULL;
34437c478bd9Sstevel@tonic-gate pp->p_offset = (u_offset_t)(-1);
34447c478bd9Sstevel@tonic-gate return (0);
34457c478bd9Sstevel@tonic-gate }
34467c478bd9Sstevel@tonic-gate }
34477c478bd9Sstevel@tonic-gate pp->p_hash = *listp;
34487c478bd9Sstevel@tonic-gate *listp = pp;
34497c478bd9Sstevel@tonic-gate
34507c478bd9Sstevel@tonic-gate /*
34517c478bd9Sstevel@tonic-gate * Add the page to the vnode's list of pages
34527c478bd9Sstevel@tonic-gate */
34537c478bd9Sstevel@tonic-gate if (vp->v_pages != NULL && IS_VMODSORT(vp) && hat_ismod(pp))
34547c478bd9Sstevel@tonic-gate listp = &vp->v_pages->p_vpprev->p_vpnext;
34557c478bd9Sstevel@tonic-gate else
34567c478bd9Sstevel@tonic-gate listp = &vp->v_pages;
34577c478bd9Sstevel@tonic-gate
34587c478bd9Sstevel@tonic-gate page_vpadd(listp, pp);
34597c478bd9Sstevel@tonic-gate
34607c478bd9Sstevel@tonic-gate return (1);
34617c478bd9Sstevel@tonic-gate }
34627c478bd9Sstevel@tonic-gate
34637c478bd9Sstevel@tonic-gate /*
34647c478bd9Sstevel@tonic-gate * Add page `pp' to both the hash and vp chains for [vp, offset].
34657c478bd9Sstevel@tonic-gate *
34667c478bd9Sstevel@tonic-gate * Returns 1 on success and 0 on failure.
34677c478bd9Sstevel@tonic-gate * If hold is passed in, it is not dropped.
34687c478bd9Sstevel@tonic-gate */
34697c478bd9Sstevel@tonic-gate int
page_hashin(page_t * pp,vnode_t * vp,u_offset_t offset,kmutex_t * hold)34707c478bd9Sstevel@tonic-gate page_hashin(page_t *pp, vnode_t *vp, u_offset_t offset, kmutex_t *hold)
34717c478bd9Sstevel@tonic-gate {
34727c478bd9Sstevel@tonic-gate kmutex_t *phm = NULL;
34737c478bd9Sstevel@tonic-gate kmutex_t *vphm;
34747c478bd9Sstevel@tonic-gate int rc;
34757c478bd9Sstevel@tonic-gate
34767c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp)));
3477c7531c7fSPrakash Sangappa ASSERT(pp->p_fsdata == 0 || panicstr);
34787c478bd9Sstevel@tonic-gate
34797c478bd9Sstevel@tonic-gate TRACE_3(TR_FAC_VM, TR_PAGE_HASHIN,
34806e4dd838Smec "page_hashin:pp %p vp %p offset %llx",
34816e4dd838Smec pp, vp, offset);
34827c478bd9Sstevel@tonic-gate
34837c478bd9Sstevel@tonic-gate VM_STAT_ADD(hashin_count);
34847c478bd9Sstevel@tonic-gate
34857c478bd9Sstevel@tonic-gate if (hold != NULL)
34867c478bd9Sstevel@tonic-gate phm = hold;
34877c478bd9Sstevel@tonic-gate else {
34887c478bd9Sstevel@tonic-gate VM_STAT_ADD(hashin_not_held);
34897c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, offset));
34907c478bd9Sstevel@tonic-gate mutex_enter(phm);
34917c478bd9Sstevel@tonic-gate }
34927c478bd9Sstevel@tonic-gate
34937c478bd9Sstevel@tonic-gate vphm = page_vnode_mutex(vp);
34947c478bd9Sstevel@tonic-gate mutex_enter(vphm);
34957c478bd9Sstevel@tonic-gate rc = page_do_hashin(pp, vp, offset);
34967c478bd9Sstevel@tonic-gate mutex_exit(vphm);
34977c478bd9Sstevel@tonic-gate if (hold == NULL)
34987c478bd9Sstevel@tonic-gate mutex_exit(phm);
3499d94ffb28Sjmcp if (rc == 0)
35007c478bd9Sstevel@tonic-gate VM_STAT_ADD(hashin_already);
35017c478bd9Sstevel@tonic-gate return (rc);
35027c478bd9Sstevel@tonic-gate }
35037c478bd9Sstevel@tonic-gate
35047c478bd9Sstevel@tonic-gate /*
35057c478bd9Sstevel@tonic-gate * Remove page ``pp'' from the hash and vp chains and remove vp association.
35067c478bd9Sstevel@tonic-gate * All mutexes must be held
35077c478bd9Sstevel@tonic-gate */
35087c478bd9Sstevel@tonic-gate static void
page_do_hashout(page_t * pp)35097c478bd9Sstevel@tonic-gate page_do_hashout(page_t *pp)
35107c478bd9Sstevel@tonic-gate {
35117c478bd9Sstevel@tonic-gate page_t **hpp;
35127c478bd9Sstevel@tonic-gate page_t *hp;
35137c478bd9Sstevel@tonic-gate vnode_t *vp = pp->p_vnode;
35147c478bd9Sstevel@tonic-gate
35157c478bd9Sstevel@tonic-gate ASSERT(vp != NULL);
35167c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(page_vnode_mutex(vp)));
35177c478bd9Sstevel@tonic-gate
35187c478bd9Sstevel@tonic-gate /*
35197c478bd9Sstevel@tonic-gate * First, take pp off of its hash chain.
35207c478bd9Sstevel@tonic-gate */
35217c478bd9Sstevel@tonic-gate hpp = &page_hash[PAGE_HASH_FUNC(vp, pp->p_offset)];
35227c478bd9Sstevel@tonic-gate
35237c478bd9Sstevel@tonic-gate for (;;) {
35247c478bd9Sstevel@tonic-gate hp = *hpp;
35257c478bd9Sstevel@tonic-gate if (hp == pp)
35267c478bd9Sstevel@tonic-gate break;
35277c478bd9Sstevel@tonic-gate if (hp == NULL) {
35287c478bd9Sstevel@tonic-gate panic("page_do_hashout");
35297c478bd9Sstevel@tonic-gate /*NOTREACHED*/
35307c478bd9Sstevel@tonic-gate }
35317c478bd9Sstevel@tonic-gate hpp = &hp->p_hash;
35327c478bd9Sstevel@tonic-gate }
35337c478bd9Sstevel@tonic-gate *hpp = pp->p_hash;
35347c478bd9Sstevel@tonic-gate
35357c478bd9Sstevel@tonic-gate /*
35367c478bd9Sstevel@tonic-gate * Now remove it from its associated vnode.
35377c478bd9Sstevel@tonic-gate */
35387c478bd9Sstevel@tonic-gate if (vp->v_pages)
35397c478bd9Sstevel@tonic-gate page_vpsub(&vp->v_pages, pp);
35407c478bd9Sstevel@tonic-gate
35417c478bd9Sstevel@tonic-gate pp->p_hash = NULL;
35429d0d62adSJason Beloro page_clr_all_props(pp);
35437c478bd9Sstevel@tonic-gate PP_CLRSWAP(pp);
35447c478bd9Sstevel@tonic-gate pp->p_vnode = NULL;
35457c478bd9Sstevel@tonic-gate pp->p_offset = (u_offset_t)-1;
3546c7531c7fSPrakash Sangappa pp->p_fsdata = 0;
35477c478bd9Sstevel@tonic-gate }
35487c478bd9Sstevel@tonic-gate
35497c478bd9Sstevel@tonic-gate /*
35507c478bd9Sstevel@tonic-gate * Remove page ``pp'' from the hash and vp chains and remove vp association.
35517c478bd9Sstevel@tonic-gate *
35527c478bd9Sstevel@tonic-gate * When `phm' is non-NULL it contains the address of the mutex protecting the
35537c478bd9Sstevel@tonic-gate * hash list pp is on. It is not dropped.
35547c478bd9Sstevel@tonic-gate */
35557c478bd9Sstevel@tonic-gate void
page_hashout(page_t * pp,kmutex_t * phm)35567c478bd9Sstevel@tonic-gate page_hashout(page_t *pp, kmutex_t *phm)
35577c478bd9Sstevel@tonic-gate {
35587c478bd9Sstevel@tonic-gate vnode_t *vp;
35597c478bd9Sstevel@tonic-gate ulong_t index;
35607c478bd9Sstevel@tonic-gate kmutex_t *nphm;
35617c478bd9Sstevel@tonic-gate kmutex_t *vphm;
35627c478bd9Sstevel@tonic-gate kmutex_t *sep;
35637c478bd9Sstevel@tonic-gate
35647c478bd9Sstevel@tonic-gate ASSERT(phm != NULL ? MUTEX_HELD(phm) : 1);
35657c478bd9Sstevel@tonic-gate ASSERT(pp->p_vnode != NULL);
35667c478bd9Sstevel@tonic-gate ASSERT((PAGE_EXCL(pp) && !page_iolock_assert(pp)) || panicstr);
35677c478bd9Sstevel@tonic-gate ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(pp->p_vnode)));
35687c478bd9Sstevel@tonic-gate
35697c478bd9Sstevel@tonic-gate vp = pp->p_vnode;
35707c478bd9Sstevel@tonic-gate
35717c478bd9Sstevel@tonic-gate TRACE_2(TR_FAC_VM, TR_PAGE_HASHOUT,
35726e4dd838Smec "page_hashout:pp %p vp %p", pp, vp);
35737c478bd9Sstevel@tonic-gate
35747c478bd9Sstevel@tonic-gate /*
35757c478bd9Sstevel@tonic-gate *
35767c478bd9Sstevel@tonic-gate */
35777c478bd9Sstevel@tonic-gate VM_STAT_ADD(hashout_count);
35787c478bd9Sstevel@tonic-gate index = PAGE_HASH_FUNC(vp, pp->p_offset);
35797c478bd9Sstevel@tonic-gate if (phm == NULL) {
35807c478bd9Sstevel@tonic-gate VM_STAT_ADD(hashout_not_held);
35817c478bd9Sstevel@tonic-gate nphm = PAGE_HASH_MUTEX(index);
35827c478bd9Sstevel@tonic-gate mutex_enter(nphm);
35837c478bd9Sstevel@tonic-gate }
35847c478bd9Sstevel@tonic-gate ASSERT(phm ? phm == PAGE_HASH_MUTEX(index) : 1);
35857c478bd9Sstevel@tonic-gate
35867c478bd9Sstevel@tonic-gate
35877c478bd9Sstevel@tonic-gate /*
35887c478bd9Sstevel@tonic-gate * grab page vnode mutex and remove it...
35897c478bd9Sstevel@tonic-gate */
35907c478bd9Sstevel@tonic-gate vphm = page_vnode_mutex(vp);
35917c478bd9Sstevel@tonic-gate mutex_enter(vphm);
35927c478bd9Sstevel@tonic-gate
35937c478bd9Sstevel@tonic-gate page_do_hashout(pp);
35947c478bd9Sstevel@tonic-gate
35957c478bd9Sstevel@tonic-gate mutex_exit(vphm);
35967c478bd9Sstevel@tonic-gate if (phm == NULL)
35977c478bd9Sstevel@tonic-gate mutex_exit(nphm);
35987c478bd9Sstevel@tonic-gate
35997c478bd9Sstevel@tonic-gate /*
36007c478bd9Sstevel@tonic-gate * Wake up processes waiting for this page. The page's
36017c478bd9Sstevel@tonic-gate * identity has been changed, and is probably not the
36027c478bd9Sstevel@tonic-gate * desired page any longer.
36037c478bd9Sstevel@tonic-gate */
36047c478bd9Sstevel@tonic-gate sep = page_se_mutex(pp);
36057c478bd9Sstevel@tonic-gate mutex_enter(sep);
360642787a71Sstans pp->p_selock &= ~SE_EWANTED;
36077c478bd9Sstevel@tonic-gate if (CV_HAS_WAITERS(&pp->p_cv))
36087c478bd9Sstevel@tonic-gate cv_broadcast(&pp->p_cv);
36097c478bd9Sstevel@tonic-gate mutex_exit(sep);
36107c478bd9Sstevel@tonic-gate }
36117c478bd9Sstevel@tonic-gate
36127c478bd9Sstevel@tonic-gate /*
36137c478bd9Sstevel@tonic-gate * Add the page to the front of a linked list of pages
36147c478bd9Sstevel@tonic-gate * using the p_next & p_prev pointers for the list.
36157c478bd9Sstevel@tonic-gate * The caller is responsible for protecting the list pointers.
36167c478bd9Sstevel@tonic-gate */
36177c478bd9Sstevel@tonic-gate void
page_add(page_t ** ppp,page_t * pp)36187c478bd9Sstevel@tonic-gate page_add(page_t **ppp, page_t *pp)
36197c478bd9Sstevel@tonic-gate {
36207c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp) || (PAGE_SHARED(pp) && page_iolock_assert(pp)));
36217c478bd9Sstevel@tonic-gate
36227c478bd9Sstevel@tonic-gate page_add_common(ppp, pp);
36237c478bd9Sstevel@tonic-gate }
36247c478bd9Sstevel@tonic-gate
36257c478bd9Sstevel@tonic-gate
36267c478bd9Sstevel@tonic-gate
36277c478bd9Sstevel@tonic-gate /*
36287c478bd9Sstevel@tonic-gate * Common code for page_add() and mach_page_add()
36297c478bd9Sstevel@tonic-gate */
36307c478bd9Sstevel@tonic-gate void
page_add_common(page_t ** ppp,page_t * pp)36317c478bd9Sstevel@tonic-gate page_add_common(page_t **ppp, page_t *pp)
36327c478bd9Sstevel@tonic-gate {
36337c478bd9Sstevel@tonic-gate if (*ppp == NULL) {
36347c478bd9Sstevel@tonic-gate pp->p_next = pp->p_prev = pp;
36357c478bd9Sstevel@tonic-gate } else {
36367c478bd9Sstevel@tonic-gate pp->p_next = *ppp;
36377c478bd9Sstevel@tonic-gate pp->p_prev = (*ppp)->p_prev;
36387c478bd9Sstevel@tonic-gate (*ppp)->p_prev = pp;
36397c478bd9Sstevel@tonic-gate pp->p_prev->p_next = pp;
36407c478bd9Sstevel@tonic-gate }
36417c478bd9Sstevel@tonic-gate *ppp = pp;
36427c478bd9Sstevel@tonic-gate }
36437c478bd9Sstevel@tonic-gate
36447c478bd9Sstevel@tonic-gate
36457c478bd9Sstevel@tonic-gate /*
36467c478bd9Sstevel@tonic-gate * Remove this page from a linked list of pages
36477c478bd9Sstevel@tonic-gate * using the p_next & p_prev pointers for the list.
36487c478bd9Sstevel@tonic-gate *
36497c478bd9Sstevel@tonic-gate * The caller is responsible for protecting the list pointers.
36507c478bd9Sstevel@tonic-gate */
36517c478bd9Sstevel@tonic-gate void
page_sub(page_t ** ppp,page_t * pp)36527c478bd9Sstevel@tonic-gate page_sub(page_t **ppp, page_t *pp)
36537c478bd9Sstevel@tonic-gate {
36543df2e8b2SRobert Mustacchi ASSERT(pp != NULL && (PP_ISFREE(pp)) ? 1 :
36557c478bd9Sstevel@tonic-gate (PAGE_EXCL(pp)) || (PAGE_SHARED(pp) && page_iolock_assert(pp)));
36567c478bd9Sstevel@tonic-gate
36577c478bd9Sstevel@tonic-gate if (*ppp == NULL || pp == NULL) {
36587c478bd9Sstevel@tonic-gate panic("page_sub: bad arg(s): pp %p, *ppp %p",
36597c478bd9Sstevel@tonic-gate (void *)pp, (void *)(*ppp));
36607c478bd9Sstevel@tonic-gate /*NOTREACHED*/
36617c478bd9Sstevel@tonic-gate }
36627c478bd9Sstevel@tonic-gate
36637c478bd9Sstevel@tonic-gate page_sub_common(ppp, pp);
36647c478bd9Sstevel@tonic-gate }
36657c478bd9Sstevel@tonic-gate
36667c478bd9Sstevel@tonic-gate
36677c478bd9Sstevel@tonic-gate /*
36687c478bd9Sstevel@tonic-gate * Common code for page_sub() and mach_page_sub()
36697c478bd9Sstevel@tonic-gate */
36707c478bd9Sstevel@tonic-gate void
page_sub_common(page_t ** ppp,page_t * pp)36717c478bd9Sstevel@tonic-gate page_sub_common(page_t **ppp, page_t *pp)
36727c478bd9Sstevel@tonic-gate {
36737c478bd9Sstevel@tonic-gate if (*ppp == pp)
36747c478bd9Sstevel@tonic-gate *ppp = pp->p_next; /* go to next page */
36757c478bd9Sstevel@tonic-gate
36767c478bd9Sstevel@tonic-gate if (*ppp == pp)
36777c478bd9Sstevel@tonic-gate *ppp = NULL; /* page list is gone */
36787c478bd9Sstevel@tonic-gate else {
36797c478bd9Sstevel@tonic-gate pp->p_prev->p_next = pp->p_next;
36807c478bd9Sstevel@tonic-gate pp->p_next->p_prev = pp->p_prev;
36817c478bd9Sstevel@tonic-gate }
36827c478bd9Sstevel@tonic-gate pp->p_prev = pp->p_next = pp; /* make pp a list of one */
36837c478bd9Sstevel@tonic-gate }
36847c478bd9Sstevel@tonic-gate
36857c478bd9Sstevel@tonic-gate
36867c478bd9Sstevel@tonic-gate /*
36877c478bd9Sstevel@tonic-gate * Break page list cppp into two lists with npages in the first list.
36887c478bd9Sstevel@tonic-gate * The tail is returned in nppp.
36897c478bd9Sstevel@tonic-gate */
36907c478bd9Sstevel@tonic-gate void
page_list_break(page_t ** oppp,page_t ** nppp,pgcnt_t npages)36917c478bd9Sstevel@tonic-gate page_list_break(page_t **oppp, page_t **nppp, pgcnt_t npages)
36927c478bd9Sstevel@tonic-gate {
36937c478bd9Sstevel@tonic-gate page_t *s1pp = *oppp;
36947c478bd9Sstevel@tonic-gate page_t *s2pp;
36957c478bd9Sstevel@tonic-gate page_t *e1pp, *e2pp;
36967c478bd9Sstevel@tonic-gate long n = 0;
36977c478bd9Sstevel@tonic-gate
36987c478bd9Sstevel@tonic-gate if (s1pp == NULL) {
36997c478bd9Sstevel@tonic-gate *nppp = NULL;
37007c478bd9Sstevel@tonic-gate return;
37017c478bd9Sstevel@tonic-gate }
37027c478bd9Sstevel@tonic-gate if (npages == 0) {
37037c478bd9Sstevel@tonic-gate *nppp = s1pp;
37047c478bd9Sstevel@tonic-gate *oppp = NULL;
37057c478bd9Sstevel@tonic-gate return;
37067c478bd9Sstevel@tonic-gate }
37077c478bd9Sstevel@tonic-gate for (n = 0, s2pp = *oppp; n < npages; n++) {
37087c478bd9Sstevel@tonic-gate s2pp = s2pp->p_next;
37097c478bd9Sstevel@tonic-gate }
37107c478bd9Sstevel@tonic-gate /* Fix head and tail of new lists */
37117c478bd9Sstevel@tonic-gate e1pp = s2pp->p_prev;
37127c478bd9Sstevel@tonic-gate e2pp = s1pp->p_prev;
37137c478bd9Sstevel@tonic-gate s1pp->p_prev = e1pp;
37147c478bd9Sstevel@tonic-gate e1pp->p_next = s1pp;
37157c478bd9Sstevel@tonic-gate s2pp->p_prev = e2pp;
37167c478bd9Sstevel@tonic-gate e2pp->p_next = s2pp;
37177c478bd9Sstevel@tonic-gate
37187c478bd9Sstevel@tonic-gate /* second list empty */
37197c478bd9Sstevel@tonic-gate if (s2pp == s1pp) {
37207c478bd9Sstevel@tonic-gate *oppp = s1pp;
37217c478bd9Sstevel@tonic-gate *nppp = NULL;
37227c478bd9Sstevel@tonic-gate } else {
37237c478bd9Sstevel@tonic-gate *oppp = s1pp;
37247c478bd9Sstevel@tonic-gate *nppp = s2pp;
37257c478bd9Sstevel@tonic-gate }
37267c478bd9Sstevel@tonic-gate }
37277c478bd9Sstevel@tonic-gate
37287c478bd9Sstevel@tonic-gate /*
37297c478bd9Sstevel@tonic-gate * Concatenate page list nppp onto the end of list ppp.
37307c478bd9Sstevel@tonic-gate */
37317c478bd9Sstevel@tonic-gate void
page_list_concat(page_t ** ppp,page_t ** nppp)37327c478bd9Sstevel@tonic-gate page_list_concat(page_t **ppp, page_t **nppp)
37337c478bd9Sstevel@tonic-gate {
37347c478bd9Sstevel@tonic-gate page_t *s1pp, *s2pp, *e1pp, *e2pp;
37357c478bd9Sstevel@tonic-gate
37367c478bd9Sstevel@tonic-gate if (*nppp == NULL) {
37377c478bd9Sstevel@tonic-gate return;
37387c478bd9Sstevel@tonic-gate }
37397c478bd9Sstevel@tonic-gate if (*ppp == NULL) {
37407c478bd9Sstevel@tonic-gate *ppp = *nppp;
37417c478bd9Sstevel@tonic-gate return;
37427c478bd9Sstevel@tonic-gate }
37437c478bd9Sstevel@tonic-gate s1pp = *ppp;
37447c478bd9Sstevel@tonic-gate e1pp = s1pp->p_prev;
37457c478bd9Sstevel@tonic-gate s2pp = *nppp;
37467c478bd9Sstevel@tonic-gate e2pp = s2pp->p_prev;
37477c478bd9Sstevel@tonic-gate s1pp->p_prev = e2pp;
37487c478bd9Sstevel@tonic-gate e2pp->p_next = s1pp;
37497c478bd9Sstevel@tonic-gate e1pp->p_next = s2pp;
37507c478bd9Sstevel@tonic-gate s2pp->p_prev = e1pp;
37517c478bd9Sstevel@tonic-gate }
37527c478bd9Sstevel@tonic-gate
37537c478bd9Sstevel@tonic-gate /*
37547c478bd9Sstevel@tonic-gate * return the next page in the page list
37557c478bd9Sstevel@tonic-gate */
37567c478bd9Sstevel@tonic-gate page_t *
page_list_next(page_t * pp)37577c478bd9Sstevel@tonic-gate page_list_next(page_t *pp)
37587c478bd9Sstevel@tonic-gate {
37597c478bd9Sstevel@tonic-gate return (pp->p_next);
37607c478bd9Sstevel@tonic-gate }
37617c478bd9Sstevel@tonic-gate
37627c478bd9Sstevel@tonic-gate
37637c478bd9Sstevel@tonic-gate /*
37647c478bd9Sstevel@tonic-gate * Add the page to the front of the linked list of pages
37657c478bd9Sstevel@tonic-gate * using p_vpnext/p_vpprev pointers for the list.
37667c478bd9Sstevel@tonic-gate *
37677c478bd9Sstevel@tonic-gate * The caller is responsible for protecting the lists.
37687c478bd9Sstevel@tonic-gate */
37697c478bd9Sstevel@tonic-gate void
page_vpadd(page_t ** ppp,page_t * pp)37707c478bd9Sstevel@tonic-gate page_vpadd(page_t **ppp, page_t *pp)
37717c478bd9Sstevel@tonic-gate {
37727c478bd9Sstevel@tonic-gate if (*ppp == NULL) {
37737c478bd9Sstevel@tonic-gate pp->p_vpnext = pp->p_vpprev = pp;
37747c478bd9Sstevel@tonic-gate } else {
37757c478bd9Sstevel@tonic-gate pp->p_vpnext = *ppp;
37767c478bd9Sstevel@tonic-gate pp->p_vpprev = (*ppp)->p_vpprev;
37777c478bd9Sstevel@tonic-gate (*ppp)->p_vpprev = pp;
37787c478bd9Sstevel@tonic-gate pp->p_vpprev->p_vpnext = pp;
37797c478bd9Sstevel@tonic-gate }
37807c478bd9Sstevel@tonic-gate *ppp = pp;
37817c478bd9Sstevel@tonic-gate }
37827c478bd9Sstevel@tonic-gate
37837c478bd9Sstevel@tonic-gate /*
37847c478bd9Sstevel@tonic-gate * Remove this page from the linked list of pages
37857c478bd9Sstevel@tonic-gate * using p_vpnext/p_vpprev pointers for the list.
37867c478bd9Sstevel@tonic-gate *
37877c478bd9Sstevel@tonic-gate * The caller is responsible for protecting the lists.
37887c478bd9Sstevel@tonic-gate */
37897c478bd9Sstevel@tonic-gate void
page_vpsub(page_t ** ppp,page_t * pp)37907c478bd9Sstevel@tonic-gate page_vpsub(page_t **ppp, page_t *pp)
37917c478bd9Sstevel@tonic-gate {
37927c478bd9Sstevel@tonic-gate if (*ppp == NULL || pp == NULL) {
37937c478bd9Sstevel@tonic-gate panic("page_vpsub: bad arg(s): pp %p, *ppp %p",
37947c478bd9Sstevel@tonic-gate (void *)pp, (void *)(*ppp));
37957c478bd9Sstevel@tonic-gate /*NOTREACHED*/
37967c478bd9Sstevel@tonic-gate }
37977c478bd9Sstevel@tonic-gate
37987c478bd9Sstevel@tonic-gate if (*ppp == pp)
37997c478bd9Sstevel@tonic-gate *ppp = pp->p_vpnext; /* go to next page */
38007c478bd9Sstevel@tonic-gate
38017c478bd9Sstevel@tonic-gate if (*ppp == pp)
38027c478bd9Sstevel@tonic-gate *ppp = NULL; /* page list is gone */
38037c478bd9Sstevel@tonic-gate else {
38047c478bd9Sstevel@tonic-gate pp->p_vpprev->p_vpnext = pp->p_vpnext;
38057c478bd9Sstevel@tonic-gate pp->p_vpnext->p_vpprev = pp->p_vpprev;
38067c478bd9Sstevel@tonic-gate }
38077c478bd9Sstevel@tonic-gate pp->p_vpprev = pp->p_vpnext = pp; /* make pp a list of one */
38087c478bd9Sstevel@tonic-gate }
38097c478bd9Sstevel@tonic-gate
38107c478bd9Sstevel@tonic-gate /*
38117c478bd9Sstevel@tonic-gate * Lock a physical page into memory "long term". Used to support "lock
38127c478bd9Sstevel@tonic-gate * in memory" functions. Accepts the page to be locked, and a cow variable
38137c478bd9Sstevel@tonic-gate * to indicate whether a the lock will travel to the new page during
38147c478bd9Sstevel@tonic-gate * a potential copy-on-write.
38157c478bd9Sstevel@tonic-gate */
38167c478bd9Sstevel@tonic-gate int
page_pp_lock(page_t * pp,int cow,int kernel)38177c478bd9Sstevel@tonic-gate page_pp_lock(
38187c478bd9Sstevel@tonic-gate page_t *pp, /* page to be locked */
38197c478bd9Sstevel@tonic-gate int cow, /* cow lock */
38207c478bd9Sstevel@tonic-gate int kernel) /* must succeed -- ignore checking */
38217c478bd9Sstevel@tonic-gate {
38227c478bd9Sstevel@tonic-gate int r = 0; /* result -- assume failure */
38237c478bd9Sstevel@tonic-gate
38247c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp));
38257c478bd9Sstevel@tonic-gate
38267c478bd9Sstevel@tonic-gate page_struct_lock(pp);
38277c478bd9Sstevel@tonic-gate /*
38287c478bd9Sstevel@tonic-gate * Acquire the "freemem_lock" for availrmem.
38297c478bd9Sstevel@tonic-gate */
38307c478bd9Sstevel@tonic-gate if (cow) {
38317c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock);
38327c478bd9Sstevel@tonic-gate if ((availrmem > pages_pp_maximum) &&
38337c478bd9Sstevel@tonic-gate (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) {
38347c478bd9Sstevel@tonic-gate availrmem--;
38357c478bd9Sstevel@tonic-gate pages_locked++;
38367c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock);
38377c478bd9Sstevel@tonic-gate r = 1;
38387c478bd9Sstevel@tonic-gate if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
38397c478bd9Sstevel@tonic-gate cmn_err(CE_WARN,
38407c478bd9Sstevel@tonic-gate "COW lock limit reached on pfn 0x%lx",
38417c478bd9Sstevel@tonic-gate page_pptonum(pp));
38427c478bd9Sstevel@tonic-gate }
38437c478bd9Sstevel@tonic-gate } else
38447c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock);
38457c478bd9Sstevel@tonic-gate } else {
38467c478bd9Sstevel@tonic-gate if (pp->p_lckcnt) {
38477c478bd9Sstevel@tonic-gate if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) {
38487c478bd9Sstevel@tonic-gate r = 1;
38497c478bd9Sstevel@tonic-gate if (++pp->p_lckcnt ==
38507c478bd9Sstevel@tonic-gate (ushort_t)PAGE_LOCK_MAXIMUM) {
38517c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "Page lock limit "
38527c478bd9Sstevel@tonic-gate "reached on pfn 0x%lx",
38537c478bd9Sstevel@tonic-gate page_pptonum(pp));
38547c478bd9Sstevel@tonic-gate }
38557c478bd9Sstevel@tonic-gate }
38567c478bd9Sstevel@tonic-gate } else {
38577c478bd9Sstevel@tonic-gate if (kernel) {
38587c478bd9Sstevel@tonic-gate /* availrmem accounting done by caller */
38597c478bd9Sstevel@tonic-gate ++pp->p_lckcnt;
38607c478bd9Sstevel@tonic-gate r = 1;
38617c478bd9Sstevel@tonic-gate } else {
38627c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock);
38637c478bd9Sstevel@tonic-gate if (availrmem > pages_pp_maximum) {
38647c478bd9Sstevel@tonic-gate availrmem--;
38657c478bd9Sstevel@tonic-gate pages_locked++;
38667c478bd9Sstevel@tonic-gate ++pp->p_lckcnt;
38677c478bd9Sstevel@tonic-gate r = 1;
38687c478bd9Sstevel@tonic-gate }
38697c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock);
38707c478bd9Sstevel@tonic-gate }
38717c478bd9Sstevel@tonic-gate }
38727c478bd9Sstevel@tonic-gate }
38737c478bd9Sstevel@tonic-gate page_struct_unlock(pp);
38747c478bd9Sstevel@tonic-gate return (r);
38757c478bd9Sstevel@tonic-gate }
38767c478bd9Sstevel@tonic-gate
38777c478bd9Sstevel@tonic-gate /*
38787c478bd9Sstevel@tonic-gate * Decommit a lock on a physical page frame. Account for cow locks if
38797c478bd9Sstevel@tonic-gate * appropriate.
38807c478bd9Sstevel@tonic-gate */
38817c478bd9Sstevel@tonic-gate void
page_pp_unlock(page_t * pp,int cow,int kernel)38827c478bd9Sstevel@tonic-gate page_pp_unlock(
38837c478bd9Sstevel@tonic-gate page_t *pp, /* page to be unlocked */
38847c478bd9Sstevel@tonic-gate int cow, /* expect cow lock */
38857c478bd9Sstevel@tonic-gate int kernel) /* this was a kernel lock */
38867c478bd9Sstevel@tonic-gate {
38877c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp));
38887c478bd9Sstevel@tonic-gate
38897c478bd9Sstevel@tonic-gate page_struct_lock(pp);
38907c478bd9Sstevel@tonic-gate /*
38917c478bd9Sstevel@tonic-gate * Acquire the "freemem_lock" for availrmem.
38927c478bd9Sstevel@tonic-gate * If cowcnt or lcknt is already 0 do nothing; i.e., we
38937c478bd9Sstevel@tonic-gate * could be called to unlock even if nothing is locked. This could
38947c478bd9Sstevel@tonic-gate * happen if locked file pages were truncated (removing the lock)
38957c478bd9Sstevel@tonic-gate * and the file was grown again and new pages faulted in; the new
38967c478bd9Sstevel@tonic-gate * pages are unlocked but the segment still thinks they're locked.
38977c478bd9Sstevel@tonic-gate */
38987c478bd9Sstevel@tonic-gate if (cow) {
38997c478bd9Sstevel@tonic-gate if (pp->p_cowcnt) {
39007c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock);
39017c478bd9Sstevel@tonic-gate pp->p_cowcnt--;
39027c478bd9Sstevel@tonic-gate availrmem++;
39037c478bd9Sstevel@tonic-gate pages_locked--;
39047c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock);
39057c478bd9Sstevel@tonic-gate }
39067c478bd9Sstevel@tonic-gate } else {
39077c478bd9Sstevel@tonic-gate if (pp->p_lckcnt && --pp->p_lckcnt == 0) {
39087c478bd9Sstevel@tonic-gate if (!kernel) {
39097c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock);
39107c478bd9Sstevel@tonic-gate availrmem++;
39117c478bd9Sstevel@tonic-gate pages_locked--;
39127c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock);
39137c478bd9Sstevel@tonic-gate }
39147c478bd9Sstevel@tonic-gate }
39157c478bd9Sstevel@tonic-gate }
39167c478bd9Sstevel@tonic-gate page_struct_unlock(pp);
39177c478bd9Sstevel@tonic-gate }
39187c478bd9Sstevel@tonic-gate
39197c478bd9Sstevel@tonic-gate /*
3920b57f5d3eSPatrick Mooney * This routine reserves availrmem for npages.
3921b57f5d3eSPatrick Mooney * It returns 1 on success or 0 on failure.
3922b57f5d3eSPatrick Mooney *
3923b57f5d3eSPatrick Mooney * flags: KM_NOSLEEP or KM_SLEEP
3924b57f5d3eSPatrick Mooney * cb_wait: called to induce delay when KM_SLEEP reservation requires kmem
3925b57f5d3eSPatrick Mooney * reaping to potentially succeed. If the callback returns 0, the
3926b57f5d3eSPatrick Mooney * reservation attempts will cease to repeat and page_xresv() may
3927b57f5d3eSPatrick Mooney * report a failure. If cb_wait is NULL, the traditional delay(hz/2)
3928b57f5d3eSPatrick Mooney * behavior will be used while waiting for a reap.
39297c478bd9Sstevel@tonic-gate */
39307c478bd9Sstevel@tonic-gate int
page_xresv(pgcnt_t npages,uint_t flags,int (* cb_wait)(void))3931b57f5d3eSPatrick Mooney page_xresv(pgcnt_t npages, uint_t flags, int (*cb_wait)(void))
39327c478bd9Sstevel@tonic-gate {
39337c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock);
3934b57f5d3eSPatrick Mooney if (availrmem >= tune.t_minarmem + npages) {
3935b57f5d3eSPatrick Mooney availrmem -= npages;
39367c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock);
3937b57f5d3eSPatrick Mooney return (1);
3938b57f5d3eSPatrick Mooney } else if ((flags & KM_NOSLEEP) != 0) {
3939b57f5d3eSPatrick Mooney mutex_exit(&freemem_lock);
3940b57f5d3eSPatrick Mooney return (0);
39417c478bd9Sstevel@tonic-gate }
39427c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock);
3943b57f5d3eSPatrick Mooney
3944b57f5d3eSPatrick Mooney /*
3945b57f5d3eSPatrick Mooney * We signal memory pressure to the system by elevating 'needfree'.
3946b57f5d3eSPatrick Mooney * Processes such as kmem reaping, pageout, and ZFS ARC shrinking can
3947b57f5d3eSPatrick Mooney * then respond to said pressure by freeing pages.
3948b57f5d3eSPatrick Mooney */
3949b57f5d3eSPatrick Mooney page_needfree(npages);
3950b57f5d3eSPatrick Mooney int nobail = 1;
3951b57f5d3eSPatrick Mooney do {
3952b57f5d3eSPatrick Mooney kmem_reap();
3953b57f5d3eSPatrick Mooney if (cb_wait == NULL) {
3954b57f5d3eSPatrick Mooney delay(hz >> 2);
3955b57f5d3eSPatrick Mooney } else {
3956b57f5d3eSPatrick Mooney nobail = cb_wait();
3957b57f5d3eSPatrick Mooney }
3958b57f5d3eSPatrick Mooney
3959b57f5d3eSPatrick Mooney mutex_enter(&freemem_lock);
3960b57f5d3eSPatrick Mooney if (availrmem >= tune.t_minarmem + npages) {
3961b57f5d3eSPatrick Mooney availrmem -= npages;
3962b57f5d3eSPatrick Mooney mutex_exit(&freemem_lock);
3963b57f5d3eSPatrick Mooney page_needfree(-(spgcnt_t)npages);
3964b57f5d3eSPatrick Mooney return (1);
3965b57f5d3eSPatrick Mooney }
3966b57f5d3eSPatrick Mooney mutex_exit(&freemem_lock);
3967b57f5d3eSPatrick Mooney } while (nobail != 0);
3968b57f5d3eSPatrick Mooney page_needfree(-(spgcnt_t)npages);
3969b57f5d3eSPatrick Mooney
3970b57f5d3eSPatrick Mooney return (0);
3971b57f5d3eSPatrick Mooney }
3972b57f5d3eSPatrick Mooney
3973b57f5d3eSPatrick Mooney /*
3974b57f5d3eSPatrick Mooney * This routine reserves availrmem for npages;
3975b57f5d3eSPatrick Mooney * flags: KM_NOSLEEP or KM_SLEEP
3976b57f5d3eSPatrick Mooney * returns 1 on success or 0 on failure
3977b57f5d3eSPatrick Mooney */
3978b57f5d3eSPatrick Mooney int
page_resv(pgcnt_t npages,uint_t flags)3979b57f5d3eSPatrick Mooney page_resv(pgcnt_t npages, uint_t flags)
3980b57f5d3eSPatrick Mooney {
3981b57f5d3eSPatrick Mooney return (page_xresv(npages, flags, NULL));
39827c478bd9Sstevel@tonic-gate }
39837c478bd9Sstevel@tonic-gate
39847c478bd9Sstevel@tonic-gate /*
39857c478bd9Sstevel@tonic-gate * This routine unreserves availrmem for npages;
39867c478bd9Sstevel@tonic-gate */
39877c478bd9Sstevel@tonic-gate void
page_unresv(pgcnt_t npages)39887c478bd9Sstevel@tonic-gate page_unresv(pgcnt_t npages)
39897c478bd9Sstevel@tonic-gate {
39907c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock);
39917c478bd9Sstevel@tonic-gate availrmem += npages;
39927c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock);
39937c478bd9Sstevel@tonic-gate }
39947c478bd9Sstevel@tonic-gate
39957c478bd9Sstevel@tonic-gate /*
39967c478bd9Sstevel@tonic-gate * See Statement at the beginning of segvn_lockop() regarding
39977c478bd9Sstevel@tonic-gate * the way we handle cowcnts and lckcnts.
39987c478bd9Sstevel@tonic-gate *
39997c478bd9Sstevel@tonic-gate * Transfer cowcnt on 'opp' to cowcnt on 'npp' if the vpage
40007c478bd9Sstevel@tonic-gate * that breaks COW has PROT_WRITE.
40017c478bd9Sstevel@tonic-gate *
40027c478bd9Sstevel@tonic-gate * Note that, we may also break COW in case we are softlocking
40037c478bd9Sstevel@tonic-gate * on read access during physio;
40047c478bd9Sstevel@tonic-gate * in this softlock case, the vpage may not have PROT_WRITE.
40057c478bd9Sstevel@tonic-gate * So, we need to transfer lckcnt on 'opp' to lckcnt on 'npp'
40067c478bd9Sstevel@tonic-gate * if the vpage doesn't have PROT_WRITE.
40077c478bd9Sstevel@tonic-gate *
40087c478bd9Sstevel@tonic-gate * This routine is never called if we are stealing a page
40097c478bd9Sstevel@tonic-gate * in anon_private.
40107c478bd9Sstevel@tonic-gate *
40117c478bd9Sstevel@tonic-gate * The caller subtracted from availrmem for read only mapping.
40127c478bd9Sstevel@tonic-gate * if lckcnt is 1 increment availrmem.
40137c478bd9Sstevel@tonic-gate */
40147c478bd9Sstevel@tonic-gate void
page_pp_useclaim(page_t * opp,page_t * npp,uint_t write_perm)40157c478bd9Sstevel@tonic-gate page_pp_useclaim(
40167c478bd9Sstevel@tonic-gate page_t *opp, /* original page frame losing lock */
40177c478bd9Sstevel@tonic-gate page_t *npp, /* new page frame gaining lock */
4018727737b4SJoshua M. Clulow uint_t write_perm) /* set if vpage has PROT_WRITE */
40197c478bd9Sstevel@tonic-gate {
40207c478bd9Sstevel@tonic-gate int payback = 0;
4021cb15d5d9SPeter Rival int nidx, oidx;
40227c478bd9Sstevel@tonic-gate
40237c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(opp));
40247c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(npp));
40257c478bd9Sstevel@tonic-gate
4026cb15d5d9SPeter Rival /*
4027cb15d5d9SPeter Rival * Since we have two pages we probably have two locks. We need to take
4028cb15d5d9SPeter Rival * them in a defined order to avoid deadlocks. It's also possible they
4029cb15d5d9SPeter Rival * both hash to the same lock in which case this is a non-issue.
4030cb15d5d9SPeter Rival */
4031cb15d5d9SPeter Rival nidx = PAGE_LLOCK_HASH(PP_PAGEROOT(npp));
4032cb15d5d9SPeter Rival oidx = PAGE_LLOCK_HASH(PP_PAGEROOT(opp));
4033cb15d5d9SPeter Rival if (nidx < oidx) {
4034cb15d5d9SPeter Rival page_struct_lock(npp);
4035cb15d5d9SPeter Rival page_struct_lock(opp);
4036cb15d5d9SPeter Rival } else if (oidx < nidx) {
4037cb15d5d9SPeter Rival page_struct_lock(opp);
4038cb15d5d9SPeter Rival page_struct_lock(npp);
4039cb15d5d9SPeter Rival } else { /* The pages hash to the same lock */
4040cb15d5d9SPeter Rival page_struct_lock(npp);
4041cb15d5d9SPeter Rival }
40427c478bd9Sstevel@tonic-gate
40437c478bd9Sstevel@tonic-gate ASSERT(npp->p_cowcnt == 0);
40447c478bd9Sstevel@tonic-gate ASSERT(npp->p_lckcnt == 0);
40457c478bd9Sstevel@tonic-gate
40467c478bd9Sstevel@tonic-gate /* Don't use claim if nothing is locked (see page_pp_unlock above) */
40477c478bd9Sstevel@tonic-gate if ((write_perm && opp->p_cowcnt != 0) ||
40487c478bd9Sstevel@tonic-gate (!write_perm && opp->p_lckcnt != 0)) {
40497c478bd9Sstevel@tonic-gate
40507c478bd9Sstevel@tonic-gate if (write_perm) {
40517c478bd9Sstevel@tonic-gate npp->p_cowcnt++;
40527c478bd9Sstevel@tonic-gate ASSERT(opp->p_cowcnt != 0);
40537c478bd9Sstevel@tonic-gate opp->p_cowcnt--;
40547c478bd9Sstevel@tonic-gate } else {
40557c478bd9Sstevel@tonic-gate
40567c478bd9Sstevel@tonic-gate ASSERT(opp->p_lckcnt != 0);
40577c478bd9Sstevel@tonic-gate
40587c478bd9Sstevel@tonic-gate /*
40597c478bd9Sstevel@tonic-gate * We didn't need availrmem decremented if p_lckcnt on
40607c478bd9Sstevel@tonic-gate * original page is 1. Here, we are unlocking
40617c478bd9Sstevel@tonic-gate * read-only copy belonging to original page and
40627c478bd9Sstevel@tonic-gate * are locking a copy belonging to new page.
40637c478bd9Sstevel@tonic-gate */
40647c478bd9Sstevel@tonic-gate if (opp->p_lckcnt == 1)
40657c478bd9Sstevel@tonic-gate payback = 1;
40667c478bd9Sstevel@tonic-gate
40677c478bd9Sstevel@tonic-gate npp->p_lckcnt++;
40687c478bd9Sstevel@tonic-gate opp->p_lckcnt--;
40697c478bd9Sstevel@tonic-gate }
40707c478bd9Sstevel@tonic-gate }
40717c478bd9Sstevel@tonic-gate if (payback) {
40727c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock);
40737c478bd9Sstevel@tonic-gate availrmem++;
40747c478bd9Sstevel@tonic-gate pages_useclaim--;
40757c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock);
40767c478bd9Sstevel@tonic-gate }
4077cb15d5d9SPeter Rival
4078cb15d5d9SPeter Rival if (nidx < oidx) {
4079cb15d5d9SPeter Rival page_struct_unlock(opp);
4080cb15d5d9SPeter Rival page_struct_unlock(npp);
4081cb15d5d9SPeter Rival } else if (oidx < nidx) {
4082cb15d5d9SPeter Rival page_struct_unlock(npp);
4083cb15d5d9SPeter Rival page_struct_unlock(opp);
4084cb15d5d9SPeter Rival } else { /* The pages hash to the same lock */
4085cb15d5d9SPeter Rival page_struct_unlock(npp);
4086cb15d5d9SPeter Rival }
40877c478bd9Sstevel@tonic-gate }
40887c478bd9Sstevel@tonic-gate
40897c478bd9Sstevel@tonic-gate /*
40907c478bd9Sstevel@tonic-gate * Simple claim adjust functions -- used to support changes in
40917c478bd9Sstevel@tonic-gate * claims due to changes in access permissions. Used by segvn_setprot().
40927c478bd9Sstevel@tonic-gate */
40937c478bd9Sstevel@tonic-gate int
page_addclaim(page_t * pp)40947c478bd9Sstevel@tonic-gate page_addclaim(page_t *pp)
40957c478bd9Sstevel@tonic-gate {
40967c478bd9Sstevel@tonic-gate int r = 0; /* result */
40977c478bd9Sstevel@tonic-gate
40987c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp));
40997c478bd9Sstevel@tonic-gate
41007c478bd9Sstevel@tonic-gate page_struct_lock(pp);
41017c478bd9Sstevel@tonic-gate ASSERT(pp->p_lckcnt != 0);
41027c478bd9Sstevel@tonic-gate
41037c478bd9Sstevel@tonic-gate if (pp->p_lckcnt == 1) {
41047c478bd9Sstevel@tonic-gate if (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM) {
41057c478bd9Sstevel@tonic-gate --pp->p_lckcnt;
41067c478bd9Sstevel@tonic-gate r = 1;
41077c478bd9Sstevel@tonic-gate if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
41087c478bd9Sstevel@tonic-gate cmn_err(CE_WARN,
41097c478bd9Sstevel@tonic-gate "COW lock limit reached on pfn 0x%lx",
41107c478bd9Sstevel@tonic-gate page_pptonum(pp));
41117c478bd9Sstevel@tonic-gate }
41127c478bd9Sstevel@tonic-gate }
41137c478bd9Sstevel@tonic-gate } else {
41147c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock);
41157c478bd9Sstevel@tonic-gate if ((availrmem > pages_pp_maximum) &&
41167c478bd9Sstevel@tonic-gate (pp->p_cowcnt < (ushort_t)PAGE_LOCK_MAXIMUM)) {
41177c478bd9Sstevel@tonic-gate --availrmem;
41187c478bd9Sstevel@tonic-gate ++pages_claimed;
41197c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock);
41207c478bd9Sstevel@tonic-gate --pp->p_lckcnt;
41217c478bd9Sstevel@tonic-gate r = 1;
41227c478bd9Sstevel@tonic-gate if (++pp->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
41237c478bd9Sstevel@tonic-gate cmn_err(CE_WARN,
41247c478bd9Sstevel@tonic-gate "COW lock limit reached on pfn 0x%lx",
41257c478bd9Sstevel@tonic-gate page_pptonum(pp));
41267c478bd9Sstevel@tonic-gate }
41277c478bd9Sstevel@tonic-gate } else
41287c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock);
41297c478bd9Sstevel@tonic-gate }
41307c478bd9Sstevel@tonic-gate page_struct_unlock(pp);
41317c478bd9Sstevel@tonic-gate return (r);
41327c478bd9Sstevel@tonic-gate }
41337c478bd9Sstevel@tonic-gate
41347c478bd9Sstevel@tonic-gate int
page_subclaim(page_t * pp)41357c478bd9Sstevel@tonic-gate page_subclaim(page_t *pp)
41367c478bd9Sstevel@tonic-gate {
41377c478bd9Sstevel@tonic-gate int r = 0;
41387c478bd9Sstevel@tonic-gate
41397c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp));
41407c478bd9Sstevel@tonic-gate
41417c478bd9Sstevel@tonic-gate page_struct_lock(pp);
41427c478bd9Sstevel@tonic-gate ASSERT(pp->p_cowcnt != 0);
41437c478bd9Sstevel@tonic-gate
41447c478bd9Sstevel@tonic-gate if (pp->p_lckcnt) {
41457c478bd9Sstevel@tonic-gate if (pp->p_lckcnt < (ushort_t)PAGE_LOCK_MAXIMUM) {
41467c478bd9Sstevel@tonic-gate r = 1;
41477c478bd9Sstevel@tonic-gate /*
41487c478bd9Sstevel@tonic-gate * for availrmem
41497c478bd9Sstevel@tonic-gate */
41507c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock);
41517c478bd9Sstevel@tonic-gate availrmem++;
41527c478bd9Sstevel@tonic-gate pages_claimed--;
41537c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock);
41547c478bd9Sstevel@tonic-gate
41557c478bd9Sstevel@tonic-gate pp->p_cowcnt--;
41567c478bd9Sstevel@tonic-gate
41577c478bd9Sstevel@tonic-gate if (++pp->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
41587c478bd9Sstevel@tonic-gate cmn_err(CE_WARN,
41597c478bd9Sstevel@tonic-gate "Page lock limit reached on pfn 0x%lx",
41607c478bd9Sstevel@tonic-gate page_pptonum(pp));
41617c478bd9Sstevel@tonic-gate }
41627c478bd9Sstevel@tonic-gate }
41637c478bd9Sstevel@tonic-gate } else {
41647c478bd9Sstevel@tonic-gate r = 1;
41657c478bd9Sstevel@tonic-gate pp->p_cowcnt--;
41667c478bd9Sstevel@tonic-gate pp->p_lckcnt++;
41677c478bd9Sstevel@tonic-gate }
41687c478bd9Sstevel@tonic-gate page_struct_unlock(pp);
41697c478bd9Sstevel@tonic-gate return (r);
41707c478bd9Sstevel@tonic-gate }
41717c478bd9Sstevel@tonic-gate
4172cb15d5d9SPeter Rival /*
4173cb15d5d9SPeter Rival * Variant of page_addclaim(), where ppa[] contains the pages of a single large
4174cb15d5d9SPeter Rival * page.
4175cb15d5d9SPeter Rival */
41767c478bd9Sstevel@tonic-gate int
page_addclaim_pages(page_t ** ppa)41777c478bd9Sstevel@tonic-gate page_addclaim_pages(page_t **ppa)
41787c478bd9Sstevel@tonic-gate {
41797c478bd9Sstevel@tonic-gate pgcnt_t lckpgs = 0, pg_idx;
41807c478bd9Sstevel@tonic-gate
41817c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_addclaim_pages);
41827c478bd9Sstevel@tonic-gate
4183cb15d5d9SPeter Rival /*
4184cb15d5d9SPeter Rival * Only need to take the page struct lock on the large page root.
4185cb15d5d9SPeter Rival */
4186cb15d5d9SPeter Rival page_struct_lock(ppa[0]);
41877c478bd9Sstevel@tonic-gate for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
41887c478bd9Sstevel@tonic-gate
41897c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(ppa[pg_idx]));
41907c478bd9Sstevel@tonic-gate ASSERT(ppa[pg_idx]->p_lckcnt != 0);
41917c478bd9Sstevel@tonic-gate if (ppa[pg_idx]->p_cowcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
4192cb15d5d9SPeter Rival page_struct_unlock(ppa[0]);
41937c478bd9Sstevel@tonic-gate return (0);
41947c478bd9Sstevel@tonic-gate }
41957c478bd9Sstevel@tonic-gate if (ppa[pg_idx]->p_lckcnt > 1)
41967c478bd9Sstevel@tonic-gate lckpgs++;
41977c478bd9Sstevel@tonic-gate }
41987c478bd9Sstevel@tonic-gate
41997c478bd9Sstevel@tonic-gate if (lckpgs != 0) {
42007c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock);
42017c478bd9Sstevel@tonic-gate if (availrmem >= pages_pp_maximum + lckpgs) {
42027c478bd9Sstevel@tonic-gate availrmem -= lckpgs;
42037c478bd9Sstevel@tonic-gate pages_claimed += lckpgs;
42047c478bd9Sstevel@tonic-gate } else {
42057c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock);
4206cb15d5d9SPeter Rival page_struct_unlock(ppa[0]);
42077c478bd9Sstevel@tonic-gate return (0);
42087c478bd9Sstevel@tonic-gate }
42097c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock);
42107c478bd9Sstevel@tonic-gate }
42117c478bd9Sstevel@tonic-gate
42127c478bd9Sstevel@tonic-gate for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
42137c478bd9Sstevel@tonic-gate ppa[pg_idx]->p_lckcnt--;
42147c478bd9Sstevel@tonic-gate ppa[pg_idx]->p_cowcnt++;
42157c478bd9Sstevel@tonic-gate }
4216cb15d5d9SPeter Rival page_struct_unlock(ppa[0]);
42177c478bd9Sstevel@tonic-gate return (1);
42187c478bd9Sstevel@tonic-gate }
42197c478bd9Sstevel@tonic-gate
4220cb15d5d9SPeter Rival /*
4221cb15d5d9SPeter Rival * Variant of page_subclaim(), where ppa[] contains the pages of a single large
4222cb15d5d9SPeter Rival * page.
4223cb15d5d9SPeter Rival */
42247c478bd9Sstevel@tonic-gate int
page_subclaim_pages(page_t ** ppa)42257c478bd9Sstevel@tonic-gate page_subclaim_pages(page_t **ppa)
42267c478bd9Sstevel@tonic-gate {
42277c478bd9Sstevel@tonic-gate pgcnt_t ulckpgs = 0, pg_idx;
42287c478bd9Sstevel@tonic-gate
42297c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_subclaim_pages);
42307c478bd9Sstevel@tonic-gate
4231cb15d5d9SPeter Rival /*
4232cb15d5d9SPeter Rival * Only need to take the page struct lock on the large page root.
4233cb15d5d9SPeter Rival */
4234cb15d5d9SPeter Rival page_struct_lock(ppa[0]);
42357c478bd9Sstevel@tonic-gate for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
42367c478bd9Sstevel@tonic-gate
42377c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(ppa[pg_idx]));
42387c478bd9Sstevel@tonic-gate ASSERT(ppa[pg_idx]->p_cowcnt != 0);
42397c478bd9Sstevel@tonic-gate if (ppa[pg_idx]->p_lckcnt == (ushort_t)PAGE_LOCK_MAXIMUM) {
4240cb15d5d9SPeter Rival page_struct_unlock(ppa[0]);
42417c478bd9Sstevel@tonic-gate return (0);
42427c478bd9Sstevel@tonic-gate }
42437c478bd9Sstevel@tonic-gate if (ppa[pg_idx]->p_lckcnt != 0)
42447c478bd9Sstevel@tonic-gate ulckpgs++;
42457c478bd9Sstevel@tonic-gate }
42467c478bd9Sstevel@tonic-gate
42477c478bd9Sstevel@tonic-gate if (ulckpgs != 0) {
42487c478bd9Sstevel@tonic-gate mutex_enter(&freemem_lock);
42497c478bd9Sstevel@tonic-gate availrmem += ulckpgs;
42507c478bd9Sstevel@tonic-gate pages_claimed -= ulckpgs;
42517c478bd9Sstevel@tonic-gate mutex_exit(&freemem_lock);
42527c478bd9Sstevel@tonic-gate }
42537c478bd9Sstevel@tonic-gate
42547c478bd9Sstevel@tonic-gate for (pg_idx = 0; ppa[pg_idx] != NULL; pg_idx++) {
42557c478bd9Sstevel@tonic-gate ppa[pg_idx]->p_cowcnt--;
42567c478bd9Sstevel@tonic-gate ppa[pg_idx]->p_lckcnt++;
42577c478bd9Sstevel@tonic-gate
42587c478bd9Sstevel@tonic-gate }
4259cb15d5d9SPeter Rival page_struct_unlock(ppa[0]);
42607c478bd9Sstevel@tonic-gate return (1);
42617c478bd9Sstevel@tonic-gate }
42627c478bd9Sstevel@tonic-gate
42637c478bd9Sstevel@tonic-gate page_t *
page_numtopp(pfn_t pfnum,se_t se)42647c478bd9Sstevel@tonic-gate page_numtopp(pfn_t pfnum, se_t se)
42657c478bd9Sstevel@tonic-gate {
42667c478bd9Sstevel@tonic-gate page_t *pp;
42677c478bd9Sstevel@tonic-gate
42687c478bd9Sstevel@tonic-gate retry:
42697c478bd9Sstevel@tonic-gate pp = page_numtopp_nolock(pfnum);
42707c478bd9Sstevel@tonic-gate if (pp == NULL) {
42717c478bd9Sstevel@tonic-gate return ((page_t *)NULL);
42727c478bd9Sstevel@tonic-gate }
42737c478bd9Sstevel@tonic-gate
42747c478bd9Sstevel@tonic-gate /*
42757c478bd9Sstevel@tonic-gate * Acquire the appropriate lock on the page.
42767c478bd9Sstevel@tonic-gate */
42777c478bd9Sstevel@tonic-gate while (!page_lock(pp, se, (kmutex_t *)NULL, P_RECLAIM)) {
42787c478bd9Sstevel@tonic-gate if (page_pptonum(pp) != pfnum)
42797c478bd9Sstevel@tonic-gate goto retry;
42807c478bd9Sstevel@tonic-gate continue;
42817c478bd9Sstevel@tonic-gate }
42827c478bd9Sstevel@tonic-gate
42837c478bd9Sstevel@tonic-gate if (page_pptonum(pp) != pfnum) {
42847c478bd9Sstevel@tonic-gate page_unlock(pp);
42857c478bd9Sstevel@tonic-gate goto retry;
42867c478bd9Sstevel@tonic-gate }
42877c478bd9Sstevel@tonic-gate
42887c478bd9Sstevel@tonic-gate return (pp);
42897c478bd9Sstevel@tonic-gate }
42907c478bd9Sstevel@tonic-gate
42917c478bd9Sstevel@tonic-gate page_t *
page_numtopp_noreclaim(pfn_t pfnum,se_t se)42927c478bd9Sstevel@tonic-gate page_numtopp_noreclaim(pfn_t pfnum, se_t se)
42937c478bd9Sstevel@tonic-gate {
42947c478bd9Sstevel@tonic-gate page_t *pp;
42957c478bd9Sstevel@tonic-gate
42967c478bd9Sstevel@tonic-gate retry:
42977c478bd9Sstevel@tonic-gate pp = page_numtopp_nolock(pfnum);
42987c478bd9Sstevel@tonic-gate if (pp == NULL) {
42997c478bd9Sstevel@tonic-gate return ((page_t *)NULL);
43007c478bd9Sstevel@tonic-gate }
43017c478bd9Sstevel@tonic-gate
43027c478bd9Sstevel@tonic-gate /*
43037c478bd9Sstevel@tonic-gate * Acquire the appropriate lock on the page.
43047c478bd9Sstevel@tonic-gate */
43057c478bd9Sstevel@tonic-gate while (!page_lock(pp, se, (kmutex_t *)NULL, P_NO_RECLAIM)) {
43067c478bd9Sstevel@tonic-gate if (page_pptonum(pp) != pfnum)
43077c478bd9Sstevel@tonic-gate goto retry;
43087c478bd9Sstevel@tonic-gate continue;
43097c478bd9Sstevel@tonic-gate }
43107c478bd9Sstevel@tonic-gate
43117c478bd9Sstevel@tonic-gate if (page_pptonum(pp) != pfnum) {
43127c478bd9Sstevel@tonic-gate page_unlock(pp);
43137c478bd9Sstevel@tonic-gate goto retry;
43147c478bd9Sstevel@tonic-gate }
43157c478bd9Sstevel@tonic-gate
43167c478bd9Sstevel@tonic-gate return (pp);
43177c478bd9Sstevel@tonic-gate }
43187c478bd9Sstevel@tonic-gate
43197c478bd9Sstevel@tonic-gate /*
43207c478bd9Sstevel@tonic-gate * This routine is like page_numtopp, but will only return page structs
43217c478bd9Sstevel@tonic-gate * for pages which are ok for loading into hardware using the page struct.
43227c478bd9Sstevel@tonic-gate */
43237c478bd9Sstevel@tonic-gate page_t *
page_numtopp_nowait(pfn_t pfnum,se_t se)43247c478bd9Sstevel@tonic-gate page_numtopp_nowait(pfn_t pfnum, se_t se)
43257c478bd9Sstevel@tonic-gate {
43267c478bd9Sstevel@tonic-gate page_t *pp;
43277c478bd9Sstevel@tonic-gate
43287c478bd9Sstevel@tonic-gate retry:
43297c478bd9Sstevel@tonic-gate pp = page_numtopp_nolock(pfnum);
43307c478bd9Sstevel@tonic-gate if (pp == NULL) {
43317c478bd9Sstevel@tonic-gate return ((page_t *)NULL);
43327c478bd9Sstevel@tonic-gate }
43337c478bd9Sstevel@tonic-gate
43347c478bd9Sstevel@tonic-gate /*
43357c478bd9Sstevel@tonic-gate * Try to acquire the appropriate lock on the page.
43367c478bd9Sstevel@tonic-gate */
43377c478bd9Sstevel@tonic-gate if (PP_ISFREE(pp))
43387c478bd9Sstevel@tonic-gate pp = NULL;
43397c478bd9Sstevel@tonic-gate else {
43407c478bd9Sstevel@tonic-gate if (!page_trylock(pp, se))
43417c478bd9Sstevel@tonic-gate pp = NULL;
43427c478bd9Sstevel@tonic-gate else {
43437c478bd9Sstevel@tonic-gate if (page_pptonum(pp) != pfnum) {
43447c478bd9Sstevel@tonic-gate page_unlock(pp);
43457c478bd9Sstevel@tonic-gate goto retry;
43467c478bd9Sstevel@tonic-gate }
43477c478bd9Sstevel@tonic-gate if (PP_ISFREE(pp)) {
43487c478bd9Sstevel@tonic-gate page_unlock(pp);
43497c478bd9Sstevel@tonic-gate pp = NULL;
43507c478bd9Sstevel@tonic-gate }
43517c478bd9Sstevel@tonic-gate }
43527c478bd9Sstevel@tonic-gate }
43537c478bd9Sstevel@tonic-gate return (pp);
43547c478bd9Sstevel@tonic-gate }
43557c478bd9Sstevel@tonic-gate
43567c478bd9Sstevel@tonic-gate /*
43577c478bd9Sstevel@tonic-gate * Returns a count of dirty pages that are in the process
43587c478bd9Sstevel@tonic-gate * of being written out. If 'cleanit' is set, try to push the page.
43597c478bd9Sstevel@tonic-gate */
43607c478bd9Sstevel@tonic-gate pgcnt_t
page_busy(int cleanit)43617c478bd9Sstevel@tonic-gate page_busy(int cleanit)
43627c478bd9Sstevel@tonic-gate {
43637c478bd9Sstevel@tonic-gate page_t *page0 = page_first();
43647c478bd9Sstevel@tonic-gate page_t *pp = page0;
43657c478bd9Sstevel@tonic-gate pgcnt_t nppbusy = 0;
43667c478bd9Sstevel@tonic-gate u_offset_t off;
43677c478bd9Sstevel@tonic-gate
43687c478bd9Sstevel@tonic-gate do {
43697c478bd9Sstevel@tonic-gate vnode_t *vp = pp->p_vnode;
43707c478bd9Sstevel@tonic-gate /*
43717c478bd9Sstevel@tonic-gate * A page is a candidate for syncing if it is:
43727c478bd9Sstevel@tonic-gate *
43737c478bd9Sstevel@tonic-gate * (a) On neither the freelist nor the cachelist
43747c478bd9Sstevel@tonic-gate * (b) Hashed onto a vnode
43757c478bd9Sstevel@tonic-gate * (c) Not a kernel page
43767c478bd9Sstevel@tonic-gate * (d) Dirty
43777c478bd9Sstevel@tonic-gate * (e) Not part of a swapfile
43787c478bd9Sstevel@tonic-gate * (f) a page which belongs to a real vnode; eg has a non-null
43797c478bd9Sstevel@tonic-gate * v_vfsp pointer.
43807c478bd9Sstevel@tonic-gate * (g) Backed by a filesystem which doesn't have a
43817c478bd9Sstevel@tonic-gate * stubbed-out sync operation
43827c478bd9Sstevel@tonic-gate */
4383ad23a2dbSjohansen if (!PP_ISFREE(pp) && vp != NULL && !VN_ISKAS(vp) &&
43847c478bd9Sstevel@tonic-gate hat_ismod(pp) && !IS_SWAPVP(vp) && vp->v_vfsp != NULL &&
43857c478bd9Sstevel@tonic-gate vfs_can_sync(vp->v_vfsp)) {
43867c478bd9Sstevel@tonic-gate nppbusy++;
43877c478bd9Sstevel@tonic-gate
43887c478bd9Sstevel@tonic-gate if (!cleanit)
43897c478bd9Sstevel@tonic-gate continue;
43907c478bd9Sstevel@tonic-gate if (!page_trylock(pp, SE_EXCL))
43917c478bd9Sstevel@tonic-gate continue;
43927c478bd9Sstevel@tonic-gate
43937c478bd9Sstevel@tonic-gate if (PP_ISFREE(pp) || vp == NULL || IS_SWAPVP(vp) ||
43947c478bd9Sstevel@tonic-gate pp->p_lckcnt != 0 || pp->p_cowcnt != 0 ||
43957c478bd9Sstevel@tonic-gate !(hat_pagesync(pp,
43967c478bd9Sstevel@tonic-gate HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD) & P_MOD)) {
43977c478bd9Sstevel@tonic-gate page_unlock(pp);
43987c478bd9Sstevel@tonic-gate continue;
43997c478bd9Sstevel@tonic-gate }
44007c478bd9Sstevel@tonic-gate off = pp->p_offset;
44017c478bd9Sstevel@tonic-gate VN_HOLD(vp);
44027c478bd9Sstevel@tonic-gate page_unlock(pp);
44037c478bd9Sstevel@tonic-gate (void) VOP_PUTPAGE(vp, off, PAGESIZE,
4404da6c28aaSamw B_ASYNC | B_FREE, kcred, NULL);
44057c478bd9Sstevel@tonic-gate VN_RELE(vp);
44067c478bd9Sstevel@tonic-gate }
44077c478bd9Sstevel@tonic-gate } while ((pp = page_next(pp)) != page0);
44087c478bd9Sstevel@tonic-gate
44097c478bd9Sstevel@tonic-gate return (nppbusy);
44107c478bd9Sstevel@tonic-gate }
44117c478bd9Sstevel@tonic-gate
44127c478bd9Sstevel@tonic-gate void page_invalidate_pages(void);
44137c478bd9Sstevel@tonic-gate
44147c478bd9Sstevel@tonic-gate /*
44157c478bd9Sstevel@tonic-gate * callback handler to vm sub-system
44167c478bd9Sstevel@tonic-gate *
44177c478bd9Sstevel@tonic-gate * callers make sure no recursive entries to this func.
44187c478bd9Sstevel@tonic-gate */
44197c478bd9Sstevel@tonic-gate /*ARGSUSED*/
44207c478bd9Sstevel@tonic-gate boolean_t
callb_vm_cpr(void * arg,int code)44217c478bd9Sstevel@tonic-gate callb_vm_cpr(void *arg, int code)
44227c478bd9Sstevel@tonic-gate {
44237c478bd9Sstevel@tonic-gate if (code == CB_CODE_CPR_CHKPT)
44247c478bd9Sstevel@tonic-gate page_invalidate_pages();
44257c478bd9Sstevel@tonic-gate return (B_TRUE);
44267c478bd9Sstevel@tonic-gate }
44277c478bd9Sstevel@tonic-gate
44287c478bd9Sstevel@tonic-gate /*
44297c478bd9Sstevel@tonic-gate * Invalidate all pages of the system.
44307c478bd9Sstevel@tonic-gate * It shouldn't be called until all user page activities are all stopped.
44317c478bd9Sstevel@tonic-gate */
44327c478bd9Sstevel@tonic-gate void
page_invalidate_pages()44337c478bd9Sstevel@tonic-gate page_invalidate_pages()
44347c478bd9Sstevel@tonic-gate {
44357c478bd9Sstevel@tonic-gate page_t *pp;
44367c478bd9Sstevel@tonic-gate page_t *page0;
44377c478bd9Sstevel@tonic-gate pgcnt_t nbusypages;
44387c478bd9Sstevel@tonic-gate int retry = 0;
44397c478bd9Sstevel@tonic-gate const int MAXRETRIES = 4;
44407c478bd9Sstevel@tonic-gate top:
44417c478bd9Sstevel@tonic-gate /*
44428b464eb8Smec * Flush dirty pages and destroy the clean ones.
44437c478bd9Sstevel@tonic-gate */
44447c478bd9Sstevel@tonic-gate nbusypages = 0;
44457c478bd9Sstevel@tonic-gate
44467c478bd9Sstevel@tonic-gate pp = page0 = page_first();
44477c478bd9Sstevel@tonic-gate do {
44487c478bd9Sstevel@tonic-gate struct vnode *vp;
44497c478bd9Sstevel@tonic-gate u_offset_t offset;
44507c478bd9Sstevel@tonic-gate int mod;
44517c478bd9Sstevel@tonic-gate
44527c478bd9Sstevel@tonic-gate /*
44537c478bd9Sstevel@tonic-gate * skip the page if it has no vnode or the page associated
44547c478bd9Sstevel@tonic-gate * with the kernel vnode or prom allocated kernel mem.
44557c478bd9Sstevel@tonic-gate */
4456ad23a2dbSjohansen if ((vp = pp->p_vnode) == NULL || VN_ISKAS(vp))
44577c478bd9Sstevel@tonic-gate continue;
44587c478bd9Sstevel@tonic-gate
44597c478bd9Sstevel@tonic-gate /*
44607c478bd9Sstevel@tonic-gate * skip the page which is already free invalidated.
44617c478bd9Sstevel@tonic-gate */
44627c478bd9Sstevel@tonic-gate if (PP_ISFREE(pp) && PP_ISAGED(pp))
44637c478bd9Sstevel@tonic-gate continue;
44647c478bd9Sstevel@tonic-gate
44657c478bd9Sstevel@tonic-gate /*
44667c478bd9Sstevel@tonic-gate * skip pages that are already locked or can't be "exclusively"
44677c478bd9Sstevel@tonic-gate * locked or are already free. After we lock the page, check
44682e0ea4c4SMichael Corcoran * the free and age bits again to be sure it's not destroyed
44697c478bd9Sstevel@tonic-gate * yet.
44707c478bd9Sstevel@tonic-gate * To achieve max. parallelization, we use page_trylock instead
44717c478bd9Sstevel@tonic-gate * of page_lock so that we don't get block on individual pages
44727c478bd9Sstevel@tonic-gate * while we have thousands of other pages to process.
44737c478bd9Sstevel@tonic-gate */
44747c478bd9Sstevel@tonic-gate if (!page_trylock(pp, SE_EXCL)) {
44757c478bd9Sstevel@tonic-gate nbusypages++;
44767c478bd9Sstevel@tonic-gate continue;
44777c478bd9Sstevel@tonic-gate } else if (PP_ISFREE(pp)) {
44787c478bd9Sstevel@tonic-gate if (!PP_ISAGED(pp)) {
44797c478bd9Sstevel@tonic-gate page_destroy_free(pp);
44807c478bd9Sstevel@tonic-gate } else {
44817c478bd9Sstevel@tonic-gate page_unlock(pp);
44827c478bd9Sstevel@tonic-gate }
44837c478bd9Sstevel@tonic-gate continue;
44847c478bd9Sstevel@tonic-gate }
44857c478bd9Sstevel@tonic-gate /*
44867c478bd9Sstevel@tonic-gate * Is this page involved in some I/O? shared?
44877c478bd9Sstevel@tonic-gate *
44887c478bd9Sstevel@tonic-gate * The page_struct_lock need not be acquired to
44897c478bd9Sstevel@tonic-gate * examine these fields since the page has an
44907c478bd9Sstevel@tonic-gate * "exclusive" lock.
44917c478bd9Sstevel@tonic-gate */
44927c478bd9Sstevel@tonic-gate if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
44937c478bd9Sstevel@tonic-gate page_unlock(pp);
44947c478bd9Sstevel@tonic-gate continue;
44957c478bd9Sstevel@tonic-gate }
44967c478bd9Sstevel@tonic-gate
44977c478bd9Sstevel@tonic-gate if (vp->v_type == VCHR) {
44987c478bd9Sstevel@tonic-gate panic("vp->v_type == VCHR");
44997c478bd9Sstevel@tonic-gate /*NOTREACHED*/
45007c478bd9Sstevel@tonic-gate }
45017c478bd9Sstevel@tonic-gate
45027c478bd9Sstevel@tonic-gate if (!page_try_demote_pages(pp)) {
45037c478bd9Sstevel@tonic-gate page_unlock(pp);
45047c478bd9Sstevel@tonic-gate continue;
45057c478bd9Sstevel@tonic-gate }
45067c478bd9Sstevel@tonic-gate
45077c478bd9Sstevel@tonic-gate /*
45087c478bd9Sstevel@tonic-gate * Check the modified bit. Leave the bits alone in hardware
45097c478bd9Sstevel@tonic-gate * (they will be modified if we do the putpage).
45107c478bd9Sstevel@tonic-gate */
45117c478bd9Sstevel@tonic-gate mod = (hat_pagesync(pp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD)
45126e4dd838Smec & P_MOD);
45137c478bd9Sstevel@tonic-gate if (mod) {
45147c478bd9Sstevel@tonic-gate offset = pp->p_offset;
45157c478bd9Sstevel@tonic-gate /*
45167c478bd9Sstevel@tonic-gate * Hold the vnode before releasing the page lock
45177c478bd9Sstevel@tonic-gate * to prevent it from being freed and re-used by
45187c478bd9Sstevel@tonic-gate * some other thread.
45197c478bd9Sstevel@tonic-gate */
45207c478bd9Sstevel@tonic-gate VN_HOLD(vp);
45217c478bd9Sstevel@tonic-gate page_unlock(pp);
45227c478bd9Sstevel@tonic-gate /*
45237c478bd9Sstevel@tonic-gate * No error return is checked here. Callers such as
45247c478bd9Sstevel@tonic-gate * cpr deals with the dirty pages at the dump time
45257c478bd9Sstevel@tonic-gate * if this putpage fails.
45267c478bd9Sstevel@tonic-gate */
45277c478bd9Sstevel@tonic-gate (void) VOP_PUTPAGE(vp, offset, PAGESIZE, B_INVAL,
4528da6c28aaSamw kcred, NULL);
45297c478bd9Sstevel@tonic-gate VN_RELE(vp);
45307c478bd9Sstevel@tonic-gate } else {
45312e0ea4c4SMichael Corcoran /*LINTED: constant in conditional context*/
45322e0ea4c4SMichael Corcoran VN_DISPOSE(pp, B_INVAL, 0, kcred);
45337c478bd9Sstevel@tonic-gate }
45347c478bd9Sstevel@tonic-gate } while ((pp = page_next(pp)) != page0);
45357c478bd9Sstevel@tonic-gate if (nbusypages && retry++ < MAXRETRIES) {
45367c478bd9Sstevel@tonic-gate delay(1);
45377c478bd9Sstevel@tonic-gate goto top;
45387c478bd9Sstevel@tonic-gate }
45397c478bd9Sstevel@tonic-gate }
45407c478bd9Sstevel@tonic-gate
45417c478bd9Sstevel@tonic-gate /*
45427c478bd9Sstevel@tonic-gate * Replace the page "old" with the page "new" on the page hash and vnode lists
45437c478bd9Sstevel@tonic-gate *
4544da6c28aaSamw * the replacement must be done in place, ie the equivalent sequence:
45457c478bd9Sstevel@tonic-gate *
45467c478bd9Sstevel@tonic-gate * vp = old->p_vnode;
45477c478bd9Sstevel@tonic-gate * off = old->p_offset;
45487c478bd9Sstevel@tonic-gate * page_do_hashout(old)
45497c478bd9Sstevel@tonic-gate * page_do_hashin(new, vp, off)
45507c478bd9Sstevel@tonic-gate *
45517c478bd9Sstevel@tonic-gate * doesn't work, since
45527c478bd9Sstevel@tonic-gate * 1) if old is the only page on the vnode, the v_pages list has a window
45537c478bd9Sstevel@tonic-gate * where it looks empty. This will break file system assumptions.
45547c478bd9Sstevel@tonic-gate * and
45557c478bd9Sstevel@tonic-gate * 2) pvn_vplist_dirty() can't deal with pages moving on the v_pages list.
45567c478bd9Sstevel@tonic-gate */
45577c478bd9Sstevel@tonic-gate static void
page_do_relocate_hash(page_t * new,page_t * old)45587c478bd9Sstevel@tonic-gate page_do_relocate_hash(page_t *new, page_t *old)
45597c478bd9Sstevel@tonic-gate {
45607c478bd9Sstevel@tonic-gate page_t **hash_list;
45617c478bd9Sstevel@tonic-gate vnode_t *vp = old->p_vnode;
45627c478bd9Sstevel@tonic-gate kmutex_t *sep;
45637c478bd9Sstevel@tonic-gate
45647c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(old));
45657c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(new));
45667c478bd9Sstevel@tonic-gate ASSERT(vp != NULL);
45677c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(page_vnode_mutex(vp)));
45687c478bd9Sstevel@tonic-gate ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, old->p_offset))));
45697c478bd9Sstevel@tonic-gate
45707c478bd9Sstevel@tonic-gate /*
45717c478bd9Sstevel@tonic-gate * First find old page on the page hash list
45727c478bd9Sstevel@tonic-gate */
45737c478bd9Sstevel@tonic-gate hash_list = &page_hash[PAGE_HASH_FUNC(vp, old->p_offset)];
45747c478bd9Sstevel@tonic-gate
45757c478bd9Sstevel@tonic-gate for (;;) {
45767c478bd9Sstevel@tonic-gate if (*hash_list == old)
45777c478bd9Sstevel@tonic-gate break;
45787c478bd9Sstevel@tonic-gate if (*hash_list == NULL) {
45797c478bd9Sstevel@tonic-gate panic("page_do_hashout");
45807c478bd9Sstevel@tonic-gate /*NOTREACHED*/
45817c478bd9Sstevel@tonic-gate }
45827c478bd9Sstevel@tonic-gate hash_list = &(*hash_list)->p_hash;
45837c478bd9Sstevel@tonic-gate }
45847c478bd9Sstevel@tonic-gate
45857c478bd9Sstevel@tonic-gate /*
45867c478bd9Sstevel@tonic-gate * update new and replace old with new on the page hash list
45877c478bd9Sstevel@tonic-gate */
45887c478bd9Sstevel@tonic-gate new->p_vnode = old->p_vnode;
45897c478bd9Sstevel@tonic-gate new->p_offset = old->p_offset;
45907c478bd9Sstevel@tonic-gate new->p_hash = old->p_hash;
45917c478bd9Sstevel@tonic-gate *hash_list = new;
45927c478bd9Sstevel@tonic-gate
45937c478bd9Sstevel@tonic-gate if ((new->p_vnode->v_flag & VISSWAP) != 0)
45947c478bd9Sstevel@tonic-gate PP_SETSWAP(new);
45957c478bd9Sstevel@tonic-gate
45967c478bd9Sstevel@tonic-gate /*
45977c478bd9Sstevel@tonic-gate * replace old with new on the vnode's page list
45987c478bd9Sstevel@tonic-gate */
45997c478bd9Sstevel@tonic-gate if (old->p_vpnext == old) {
46007c478bd9Sstevel@tonic-gate new->p_vpnext = new;
46017c478bd9Sstevel@tonic-gate new->p_vpprev = new;
46027c478bd9Sstevel@tonic-gate } else {
46037c478bd9Sstevel@tonic-gate new->p_vpnext = old->p_vpnext;
46047c478bd9Sstevel@tonic-gate new->p_vpprev = old->p_vpprev;
46057c478bd9Sstevel@tonic-gate new->p_vpnext->p_vpprev = new;
46067c478bd9Sstevel@tonic-gate new->p_vpprev->p_vpnext = new;
46077c478bd9Sstevel@tonic-gate }
46087c478bd9Sstevel@tonic-gate if (vp->v_pages == old)
46097c478bd9Sstevel@tonic-gate vp->v_pages = new;
46107c478bd9Sstevel@tonic-gate
46117c478bd9Sstevel@tonic-gate /*
46127c478bd9Sstevel@tonic-gate * clear out the old page
46137c478bd9Sstevel@tonic-gate */
46147c478bd9Sstevel@tonic-gate old->p_hash = NULL;
46157c478bd9Sstevel@tonic-gate old->p_vpnext = NULL;
46167c478bd9Sstevel@tonic-gate old->p_vpprev = NULL;
46177c478bd9Sstevel@tonic-gate old->p_vnode = NULL;
46187c478bd9Sstevel@tonic-gate PP_CLRSWAP(old);
46197c478bd9Sstevel@tonic-gate old->p_offset = (u_offset_t)-1;
46209d0d62adSJason Beloro page_clr_all_props(old);
46217c478bd9Sstevel@tonic-gate
46227c478bd9Sstevel@tonic-gate /*
46237c478bd9Sstevel@tonic-gate * Wake up processes waiting for this page. The page's
46247c478bd9Sstevel@tonic-gate * identity has been changed, and is probably not the
46257c478bd9Sstevel@tonic-gate * desired page any longer.
46267c478bd9Sstevel@tonic-gate */
46277c478bd9Sstevel@tonic-gate sep = page_se_mutex(old);
46287c478bd9Sstevel@tonic-gate mutex_enter(sep);
462942787a71Sstans old->p_selock &= ~SE_EWANTED;
46307c478bd9Sstevel@tonic-gate if (CV_HAS_WAITERS(&old->p_cv))
46317c478bd9Sstevel@tonic-gate cv_broadcast(&old->p_cv);
46327c478bd9Sstevel@tonic-gate mutex_exit(sep);
46337c478bd9Sstevel@tonic-gate }
46347c478bd9Sstevel@tonic-gate
46357c478bd9Sstevel@tonic-gate /*
46367c478bd9Sstevel@tonic-gate * This function moves the identity of page "pp_old" to page "pp_new".
46377c478bd9Sstevel@tonic-gate * Both pages must be locked on entry. "pp_new" is free, has no identity,
46387c478bd9Sstevel@tonic-gate * and need not be hashed out from anywhere.
46397c478bd9Sstevel@tonic-gate */
46407c478bd9Sstevel@tonic-gate void
page_relocate_hash(page_t * pp_new,page_t * pp_old)46417c478bd9Sstevel@tonic-gate page_relocate_hash(page_t *pp_new, page_t *pp_old)
46427c478bd9Sstevel@tonic-gate {
46437c478bd9Sstevel@tonic-gate vnode_t *vp = pp_old->p_vnode;
46447c478bd9Sstevel@tonic-gate u_offset_t off = pp_old->p_offset;
46457c478bd9Sstevel@tonic-gate kmutex_t *phm, *vphm;
46467c478bd9Sstevel@tonic-gate
46477c478bd9Sstevel@tonic-gate /*
46487c478bd9Sstevel@tonic-gate * Rehash two pages
46497c478bd9Sstevel@tonic-gate */
46507c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp_old));
46517c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp_new));
46527c478bd9Sstevel@tonic-gate ASSERT(vp != NULL);
46537c478bd9Sstevel@tonic-gate ASSERT(pp_new->p_vnode == NULL);
46547c478bd9Sstevel@tonic-gate
46557c478bd9Sstevel@tonic-gate /*
46567c478bd9Sstevel@tonic-gate * hashout then hashin while holding the mutexes
46577c478bd9Sstevel@tonic-gate */
46587c478bd9Sstevel@tonic-gate phm = PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp, off));
46597c478bd9Sstevel@tonic-gate mutex_enter(phm);
46607c478bd9Sstevel@tonic-gate vphm = page_vnode_mutex(vp);
46617c478bd9Sstevel@tonic-gate mutex_enter(vphm);
46627c478bd9Sstevel@tonic-gate
46637c478bd9Sstevel@tonic-gate page_do_relocate_hash(pp_new, pp_old);
46647c478bd9Sstevel@tonic-gate
4665c7531c7fSPrakash Sangappa /* The following comment preserved from page_flip(). */
4666c7531c7fSPrakash Sangappa pp_new->p_fsdata = pp_old->p_fsdata;
4667c7531c7fSPrakash Sangappa pp_old->p_fsdata = 0;
46687c478bd9Sstevel@tonic-gate mutex_exit(vphm);
46697c478bd9Sstevel@tonic-gate mutex_exit(phm);
46707c478bd9Sstevel@tonic-gate
46717c478bd9Sstevel@tonic-gate /*
46727c478bd9Sstevel@tonic-gate * The page_struct_lock need not be acquired for lckcnt and
46737c478bd9Sstevel@tonic-gate * cowcnt since the page has an "exclusive" lock.
46747c478bd9Sstevel@tonic-gate */
46757c478bd9Sstevel@tonic-gate ASSERT(pp_new->p_lckcnt == 0);
46767c478bd9Sstevel@tonic-gate ASSERT(pp_new->p_cowcnt == 0);
46777c478bd9Sstevel@tonic-gate pp_new->p_lckcnt = pp_old->p_lckcnt;
46787c478bd9Sstevel@tonic-gate pp_new->p_cowcnt = pp_old->p_cowcnt;
46797c478bd9Sstevel@tonic-gate pp_old->p_lckcnt = pp_old->p_cowcnt = 0;
46807c478bd9Sstevel@tonic-gate
46817c478bd9Sstevel@tonic-gate }
46827c478bd9Sstevel@tonic-gate
46837c478bd9Sstevel@tonic-gate /*
46847c478bd9Sstevel@tonic-gate * Helper routine used to lock all remaining members of a
46857c478bd9Sstevel@tonic-gate * large page. The caller is responsible for passing in a locked
46867c478bd9Sstevel@tonic-gate * pp. If pp is a large page, then it succeeds in locking all the
46877c478bd9Sstevel@tonic-gate * remaining constituent pages or it returns with only the
46887c478bd9Sstevel@tonic-gate * original page locked.
46897c478bd9Sstevel@tonic-gate *
46907c478bd9Sstevel@tonic-gate * Returns 1 on success, 0 on failure.
46917c478bd9Sstevel@tonic-gate *
4692da6c28aaSamw * If success is returned this routine guarantees p_szc for all constituent
46937c478bd9Sstevel@tonic-gate * pages of a large page pp belongs to can't change. To achieve this we
46947c478bd9Sstevel@tonic-gate * recheck szc of pp after locking all constituent pages and retry if szc
46957c478bd9Sstevel@tonic-gate * changed (it could only decrease). Since hat_page_demote() needs an EXCL
46967c478bd9Sstevel@tonic-gate * lock on one of constituent pages it can't be running after all constituent
46977c478bd9Sstevel@tonic-gate * pages are locked. hat_page_demote() with a lock on a constituent page
46987c478bd9Sstevel@tonic-gate * outside of this large page (i.e. pp belonged to a larger large page) is
46997c478bd9Sstevel@tonic-gate * already done with all constituent pages of pp since the root's p_szc is
4700da6c28aaSamw * changed last. Therefore no need to synchronize with hat_page_demote() that
47017c478bd9Sstevel@tonic-gate * locked a constituent page outside of pp's current large page.
47027c478bd9Sstevel@tonic-gate */
47037c478bd9Sstevel@tonic-gate #ifdef DEBUG
47047c478bd9Sstevel@tonic-gate uint32_t gpg_trylock_mtbf = 0;
47057c478bd9Sstevel@tonic-gate #endif
47067c478bd9Sstevel@tonic-gate
47077c478bd9Sstevel@tonic-gate int
group_page_trylock(page_t * pp,se_t se)47087c478bd9Sstevel@tonic-gate group_page_trylock(page_t *pp, se_t se)
47097c478bd9Sstevel@tonic-gate {
47107c478bd9Sstevel@tonic-gate page_t *tpp;
47117c478bd9Sstevel@tonic-gate pgcnt_t npgs, i, j;
47127c478bd9Sstevel@tonic-gate uint_t pszc = pp->p_szc;
47137c478bd9Sstevel@tonic-gate
47147c478bd9Sstevel@tonic-gate #ifdef DEBUG
47157c478bd9Sstevel@tonic-gate if (gpg_trylock_mtbf && !(gethrtime() % gpg_trylock_mtbf)) {
47167c478bd9Sstevel@tonic-gate return (0);
47177c478bd9Sstevel@tonic-gate }
47187c478bd9Sstevel@tonic-gate #endif
47197c478bd9Sstevel@tonic-gate
47207c478bd9Sstevel@tonic-gate if (pp != PP_GROUPLEADER(pp, pszc)) {
47217c478bd9Sstevel@tonic-gate return (0);
47227c478bd9Sstevel@tonic-gate }
47237c478bd9Sstevel@tonic-gate
47247c478bd9Sstevel@tonic-gate retry:
47257c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED_SE(pp, se));
47267c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp));
47277c478bd9Sstevel@tonic-gate if (pszc == 0) {
47287c478bd9Sstevel@tonic-gate return (1);
47297c478bd9Sstevel@tonic-gate }
47307c478bd9Sstevel@tonic-gate npgs = page_get_pagecnt(pszc);
47317c478bd9Sstevel@tonic-gate tpp = pp + 1;
47327c478bd9Sstevel@tonic-gate for (i = 1; i < npgs; i++, tpp++) {
47337c478bd9Sstevel@tonic-gate if (!page_trylock(tpp, se)) {
47347c478bd9Sstevel@tonic-gate tpp = pp + 1;
47357c478bd9Sstevel@tonic-gate for (j = 1; j < i; j++, tpp++) {
47367c478bd9Sstevel@tonic-gate page_unlock(tpp);
47377c478bd9Sstevel@tonic-gate }
47387c478bd9Sstevel@tonic-gate return (0);
47397c478bd9Sstevel@tonic-gate }
47407c478bd9Sstevel@tonic-gate }
47417c478bd9Sstevel@tonic-gate if (pp->p_szc != pszc) {
47427c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc < pszc);
4743ad23a2dbSjohansen ASSERT(pp->p_vnode != NULL && !PP_ISKAS(pp) &&
47447c478bd9Sstevel@tonic-gate !IS_SWAPFSVP(pp->p_vnode));
47457c478bd9Sstevel@tonic-gate tpp = pp + 1;
47467c478bd9Sstevel@tonic-gate for (i = 1; i < npgs; i++, tpp++) {
47477c478bd9Sstevel@tonic-gate page_unlock(tpp);
47487c478bd9Sstevel@tonic-gate }
47497c478bd9Sstevel@tonic-gate pszc = pp->p_szc;
47507c478bd9Sstevel@tonic-gate goto retry;
47517c478bd9Sstevel@tonic-gate }
47527c478bd9Sstevel@tonic-gate return (1);
47537c478bd9Sstevel@tonic-gate }
47547c478bd9Sstevel@tonic-gate
47557c478bd9Sstevel@tonic-gate void
group_page_unlock(page_t * pp)47567c478bd9Sstevel@tonic-gate group_page_unlock(page_t *pp)
47577c478bd9Sstevel@tonic-gate {
47587c478bd9Sstevel@tonic-gate page_t *tpp;
47597c478bd9Sstevel@tonic-gate pgcnt_t npgs, i;
47607c478bd9Sstevel@tonic-gate
47617c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp));
47627c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp));
47637c478bd9Sstevel@tonic-gate ASSERT(pp == PP_PAGEROOT(pp));
47647c478bd9Sstevel@tonic-gate npgs = page_get_pagecnt(pp->p_szc);
47657c478bd9Sstevel@tonic-gate for (i = 1, tpp = pp + 1; i < npgs; i++, tpp++) {
47667c478bd9Sstevel@tonic-gate page_unlock(tpp);
47677c478bd9Sstevel@tonic-gate }
47687c478bd9Sstevel@tonic-gate }
47697c478bd9Sstevel@tonic-gate
47707c478bd9Sstevel@tonic-gate /*
47717c478bd9Sstevel@tonic-gate * returns
4772727737b4SJoshua M. Clulow * 0 : on success and *nrelocp is number of relocated PAGESIZE pages
47737c478bd9Sstevel@tonic-gate * ERANGE : this is not a base page
47747c478bd9Sstevel@tonic-gate * EBUSY : failure to get locks on the page/pages
47757c478bd9Sstevel@tonic-gate * ENOMEM : failure to obtain replacement pages
47767c478bd9Sstevel@tonic-gate * EAGAIN : OBP has not yet completed its boot-time handoff to the kernel
47778b464eb8Smec * EIO : An error occurred while trying to copy the page data
47787c478bd9Sstevel@tonic-gate *
47797c478bd9Sstevel@tonic-gate * Return with all constituent members of target and replacement
47807c478bd9Sstevel@tonic-gate * SE_EXCL locked. It is the callers responsibility to drop the
47817c478bd9Sstevel@tonic-gate * locks.
47827c478bd9Sstevel@tonic-gate */
47837c478bd9Sstevel@tonic-gate int
do_page_relocate(page_t ** target,page_t ** replacement,int grouplock,spgcnt_t * nrelocp,lgrp_t * lgrp)47847c478bd9Sstevel@tonic-gate do_page_relocate(
47857c478bd9Sstevel@tonic-gate page_t **target,
47867c478bd9Sstevel@tonic-gate page_t **replacement,
47877c478bd9Sstevel@tonic-gate int grouplock,
47887c478bd9Sstevel@tonic-gate spgcnt_t *nrelocp,
47897c478bd9Sstevel@tonic-gate lgrp_t *lgrp)
47907c478bd9Sstevel@tonic-gate {
47917c478bd9Sstevel@tonic-gate page_t *first_repl;
47927c478bd9Sstevel@tonic-gate page_t *repl;
47937c478bd9Sstevel@tonic-gate page_t *targ;
47947c478bd9Sstevel@tonic-gate page_t *pl = NULL;
47957c478bd9Sstevel@tonic-gate uint_t ppattr;
47967c478bd9Sstevel@tonic-gate pfn_t pfn, repl_pfn;
47977c478bd9Sstevel@tonic-gate uint_t szc;
47987c478bd9Sstevel@tonic-gate spgcnt_t npgs, i;
47997c478bd9Sstevel@tonic-gate int repl_contig = 0;
48007c478bd9Sstevel@tonic-gate uint_t flags = 0;
48017c478bd9Sstevel@tonic-gate spgcnt_t dofree = 0;
48027c478bd9Sstevel@tonic-gate
48037c478bd9Sstevel@tonic-gate *nrelocp = 0;
48047c478bd9Sstevel@tonic-gate
48057c478bd9Sstevel@tonic-gate #if defined(__sparc)
48067c478bd9Sstevel@tonic-gate /*
48077c478bd9Sstevel@tonic-gate * We need to wait till OBP has completed
48087c478bd9Sstevel@tonic-gate * its boot-time handoff of its resources to the kernel
48097c478bd9Sstevel@tonic-gate * before we allow page relocation
48107c478bd9Sstevel@tonic-gate */
48117c478bd9Sstevel@tonic-gate if (page_relocate_ready == 0) {
48127c478bd9Sstevel@tonic-gate return (EAGAIN);
48137c478bd9Sstevel@tonic-gate }
48147c478bd9Sstevel@tonic-gate #endif
48157c478bd9Sstevel@tonic-gate
48167c478bd9Sstevel@tonic-gate /*
48177c478bd9Sstevel@tonic-gate * If this is not a base page,
48187c478bd9Sstevel@tonic-gate * just return with 0x0 pages relocated.
48197c478bd9Sstevel@tonic-gate */
48207c478bd9Sstevel@tonic-gate targ = *target;
48217c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(targ));
48227c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(targ));
48237c478bd9Sstevel@tonic-gate szc = targ->p_szc;
48247c478bd9Sstevel@tonic-gate ASSERT(szc < mmu_page_sizes);
48257c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]);
48267c478bd9Sstevel@tonic-gate pfn = targ->p_pagenum;
48277c478bd9Sstevel@tonic-gate if (pfn != PFN_BASE(pfn, szc)) {
48287c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_relocnoroot[szc]);
48297c478bd9Sstevel@tonic-gate return (ERANGE);
48307c478bd9Sstevel@tonic-gate }
48317c478bd9Sstevel@tonic-gate
48327c478bd9Sstevel@tonic-gate if ((repl = *replacement) != NULL && repl->p_szc >= szc) {
48337c478bd9Sstevel@tonic-gate repl_pfn = repl->p_pagenum;
48347c478bd9Sstevel@tonic-gate if (repl_pfn != PFN_BASE(repl_pfn, szc)) {
48357c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_reloc_replnoroot[szc]);
48367c478bd9Sstevel@tonic-gate return (ERANGE);
48377c478bd9Sstevel@tonic-gate }
48387c478bd9Sstevel@tonic-gate repl_contig = 1;
48397c478bd9Sstevel@tonic-gate }
48407c478bd9Sstevel@tonic-gate
48417c478bd9Sstevel@tonic-gate /*
48427c478bd9Sstevel@tonic-gate * We must lock all members of this large page or we cannot
48437c478bd9Sstevel@tonic-gate * relocate any part of it.
48447c478bd9Sstevel@tonic-gate */
48457c478bd9Sstevel@tonic-gate if (grouplock != 0 && !group_page_trylock(targ, SE_EXCL)) {
48467c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_relocnolock[targ->p_szc]);
48477c478bd9Sstevel@tonic-gate return (EBUSY);
48487c478bd9Sstevel@tonic-gate }
48497c478bd9Sstevel@tonic-gate
48507c478bd9Sstevel@tonic-gate /*
48517c478bd9Sstevel@tonic-gate * reread szc it could have been decreased before
48527c478bd9Sstevel@tonic-gate * group_page_trylock() was done.
48537c478bd9Sstevel@tonic-gate */
48547c478bd9Sstevel@tonic-gate szc = targ->p_szc;
48557c478bd9Sstevel@tonic-gate ASSERT(szc < mmu_page_sizes);
48567c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_reloc[szc]);
48577c478bd9Sstevel@tonic-gate ASSERT(pfn == PFN_BASE(pfn, szc));
48587c478bd9Sstevel@tonic-gate
48597c478bd9Sstevel@tonic-gate npgs = page_get_pagecnt(targ->p_szc);
48607c478bd9Sstevel@tonic-gate
48617c478bd9Sstevel@tonic-gate if (repl == NULL) {
48627c478bd9Sstevel@tonic-gate dofree = npgs; /* Size of target page in MMU pages */
48637c478bd9Sstevel@tonic-gate if (!page_create_wait(dofree, 0)) {
48647c478bd9Sstevel@tonic-gate if (grouplock != 0) {
48657c478bd9Sstevel@tonic-gate group_page_unlock(targ);
48667c478bd9Sstevel@tonic-gate }
48677c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]);
48687c478bd9Sstevel@tonic-gate return (ENOMEM);
48697c478bd9Sstevel@tonic-gate }
48707c478bd9Sstevel@tonic-gate
48717c478bd9Sstevel@tonic-gate /*
48727c478bd9Sstevel@tonic-gate * seg kmem pages require that the target and replacement
48737c478bd9Sstevel@tonic-gate * page be the same pagesize.
48747c478bd9Sstevel@tonic-gate */
4875ad23a2dbSjohansen flags = (VN_ISKAS(targ->p_vnode)) ? PGR_SAMESZC : 0;
48767c478bd9Sstevel@tonic-gate repl = page_get_replacement_page(targ, lgrp, flags);
48777c478bd9Sstevel@tonic-gate if (repl == NULL) {
48787c478bd9Sstevel@tonic-gate if (grouplock != 0) {
48797c478bd9Sstevel@tonic-gate group_page_unlock(targ);
48807c478bd9Sstevel@tonic-gate }
48817c478bd9Sstevel@tonic-gate page_create_putback(dofree);
48827c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_relocnomem[szc]);
48837c478bd9Sstevel@tonic-gate return (ENOMEM);
48847c478bd9Sstevel@tonic-gate }
48857c478bd9Sstevel@tonic-gate }
48867c478bd9Sstevel@tonic-gate #ifdef DEBUG
48877c478bd9Sstevel@tonic-gate else {
48887c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(repl));
48897c478bd9Sstevel@tonic-gate }
48907c478bd9Sstevel@tonic-gate #endif /* DEBUG */
48917c478bd9Sstevel@tonic-gate
48927c478bd9Sstevel@tonic-gate #if defined(__sparc)
48937c478bd9Sstevel@tonic-gate /*
48947c478bd9Sstevel@tonic-gate * Let hat_page_relocate() complete the relocation if it's kernel page
48957c478bd9Sstevel@tonic-gate */
4896ad23a2dbSjohansen if (VN_ISKAS(targ->p_vnode)) {
48977c478bd9Sstevel@tonic-gate *replacement = repl;
48987c478bd9Sstevel@tonic-gate if (hat_page_relocate(target, replacement, nrelocp) != 0) {
48997c478bd9Sstevel@tonic-gate if (grouplock != 0) {
49007c478bd9Sstevel@tonic-gate group_page_unlock(targ);
49017c478bd9Sstevel@tonic-gate }
49027c478bd9Sstevel@tonic-gate if (dofree) {
49037c478bd9Sstevel@tonic-gate *replacement = NULL;
49047c478bd9Sstevel@tonic-gate page_free_replacement_page(repl);
49057c478bd9Sstevel@tonic-gate page_create_putback(dofree);
49067c478bd9Sstevel@tonic-gate }
49077c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_krelocfail[szc]);
49087c478bd9Sstevel@tonic-gate return (EAGAIN);
49097c478bd9Sstevel@tonic-gate }
49107c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]);
49117c478bd9Sstevel@tonic-gate return (0);
49127c478bd9Sstevel@tonic-gate }
49137c478bd9Sstevel@tonic-gate #else
49147c478bd9Sstevel@tonic-gate #if defined(lint)
49157c478bd9Sstevel@tonic-gate dofree = dofree;
49167c478bd9Sstevel@tonic-gate #endif
49177c478bd9Sstevel@tonic-gate #endif
49187c478bd9Sstevel@tonic-gate
49197c478bd9Sstevel@tonic-gate first_repl = repl;
49207c478bd9Sstevel@tonic-gate
49217c478bd9Sstevel@tonic-gate for (i = 0; i < npgs; i++) {
49227c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(targ));
492307b65a64Saguzovsk ASSERT(targ->p_slckcnt == 0);
492407b65a64Saguzovsk ASSERT(repl->p_slckcnt == 0);
49257c478bd9Sstevel@tonic-gate
49267c478bd9Sstevel@tonic-gate (void) hat_pageunload(targ, HAT_FORCE_PGUNLOAD);
49277c478bd9Sstevel@tonic-gate
49287c478bd9Sstevel@tonic-gate ASSERT(hat_page_getshare(targ) == 0);
49297c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(targ));
49307c478bd9Sstevel@tonic-gate ASSERT(targ->p_pagenum == (pfn + i));
49317c478bd9Sstevel@tonic-gate ASSERT(repl_contig == 0 ||
49327c478bd9Sstevel@tonic-gate repl->p_pagenum == (repl_pfn + i));
49337c478bd9Sstevel@tonic-gate
49347c478bd9Sstevel@tonic-gate /*
49357c478bd9Sstevel@tonic-gate * Copy the page contents and attributes then
49367c478bd9Sstevel@tonic-gate * relocate the page in the page hash.
49377c478bd9Sstevel@tonic-gate */
49388b464eb8Smec if (ppcopy(targ, repl) == 0) {
49398b464eb8Smec targ = *target;
49408b464eb8Smec repl = first_repl;
49418b464eb8Smec VM_STAT_ADD(vmm_vmstats.ppr_copyfail);
49428b464eb8Smec if (grouplock != 0) {
49438b464eb8Smec group_page_unlock(targ);
49448b464eb8Smec }
49458b464eb8Smec if (dofree) {
49468b464eb8Smec *replacement = NULL;
49478b464eb8Smec page_free_replacement_page(repl);
49488b464eb8Smec page_create_putback(dofree);
49498b464eb8Smec }
49508b464eb8Smec return (EIO);
49518b464eb8Smec }
49528b464eb8Smec
49538b464eb8Smec targ++;
49548b464eb8Smec if (repl_contig != 0) {
49558b464eb8Smec repl++;
49568b464eb8Smec } else {
49578b464eb8Smec repl = repl->p_next;
49588b464eb8Smec }
49598b464eb8Smec }
49608b464eb8Smec
49618b464eb8Smec repl = first_repl;
49628b464eb8Smec targ = *target;
49638b464eb8Smec
49648b464eb8Smec for (i = 0; i < npgs; i++) {
49657c478bd9Sstevel@tonic-gate ppattr = hat_page_getattr(targ, (P_MOD | P_REF | P_RO));
49669d0d62adSJason Beloro page_clr_all_props(repl);
49677c478bd9Sstevel@tonic-gate page_set_props(repl, ppattr);
49687c478bd9Sstevel@tonic-gate page_relocate_hash(repl, targ);
49697c478bd9Sstevel@tonic-gate
49707c478bd9Sstevel@tonic-gate ASSERT(hat_page_getshare(targ) == 0);
49717c478bd9Sstevel@tonic-gate ASSERT(hat_page_getshare(repl) == 0);
49727c478bd9Sstevel@tonic-gate /*
49737c478bd9Sstevel@tonic-gate * Now clear the props on targ, after the
49747c478bd9Sstevel@tonic-gate * page_relocate_hash(), they no longer
49757c478bd9Sstevel@tonic-gate * have any meaning.
49767c478bd9Sstevel@tonic-gate */
49779d0d62adSJason Beloro page_clr_all_props(targ);
49787c478bd9Sstevel@tonic-gate ASSERT(targ->p_next == targ);
49797c478bd9Sstevel@tonic-gate ASSERT(targ->p_prev == targ);
49807c478bd9Sstevel@tonic-gate page_list_concat(&pl, &targ);
49817c478bd9Sstevel@tonic-gate
49827c478bd9Sstevel@tonic-gate targ++;
49837c478bd9Sstevel@tonic-gate if (repl_contig != 0) {
49847c478bd9Sstevel@tonic-gate repl++;
49857c478bd9Sstevel@tonic-gate } else {
49867c478bd9Sstevel@tonic-gate repl = repl->p_next;
49877c478bd9Sstevel@tonic-gate }
49887c478bd9Sstevel@tonic-gate }
49897c478bd9Sstevel@tonic-gate /* assert that we have come full circle with repl */
49907c478bd9Sstevel@tonic-gate ASSERT(repl_contig == 1 || first_repl == repl);
49917c478bd9Sstevel@tonic-gate
49927c478bd9Sstevel@tonic-gate *target = pl;
49937c478bd9Sstevel@tonic-gate if (*replacement == NULL) {
49947c478bd9Sstevel@tonic-gate ASSERT(first_repl == repl);
49957c478bd9Sstevel@tonic-gate *replacement = repl;
49967c478bd9Sstevel@tonic-gate }
49977c478bd9Sstevel@tonic-gate VM_STAT_ADD(vmm_vmstats.ppr_relocok[szc]);
49987c478bd9Sstevel@tonic-gate *nrelocp = npgs;
49997c478bd9Sstevel@tonic-gate return (0);
50007c478bd9Sstevel@tonic-gate }
50017c478bd9Sstevel@tonic-gate /*
50027c478bd9Sstevel@tonic-gate * On success returns 0 and *nrelocp the number of PAGESIZE pages relocated.
50037c478bd9Sstevel@tonic-gate */
50047c478bd9Sstevel@tonic-gate int
page_relocate(page_t ** target,page_t ** replacement,int grouplock,int freetarget,spgcnt_t * nrelocp,lgrp_t * lgrp)50057c478bd9Sstevel@tonic-gate page_relocate(
50067c478bd9Sstevel@tonic-gate page_t **target,
50077c478bd9Sstevel@tonic-gate page_t **replacement,
50087c478bd9Sstevel@tonic-gate int grouplock,
50097c478bd9Sstevel@tonic-gate int freetarget,
50107c478bd9Sstevel@tonic-gate spgcnt_t *nrelocp,
50117c478bd9Sstevel@tonic-gate lgrp_t *lgrp)
50127c478bd9Sstevel@tonic-gate {
50137c478bd9Sstevel@tonic-gate spgcnt_t ret;
50147c478bd9Sstevel@tonic-gate
50157c478bd9Sstevel@tonic-gate /* do_page_relocate returns 0 on success or errno value */
50167c478bd9Sstevel@tonic-gate ret = do_page_relocate(target, replacement, grouplock, nrelocp, lgrp);
50177c478bd9Sstevel@tonic-gate
50187c478bd9Sstevel@tonic-gate if (ret != 0 || freetarget == 0) {
50197c478bd9Sstevel@tonic-gate return (ret);
50207c478bd9Sstevel@tonic-gate }
50217c478bd9Sstevel@tonic-gate if (*nrelocp == 1) {
50227c478bd9Sstevel@tonic-gate ASSERT(*target != NULL);
50237c478bd9Sstevel@tonic-gate page_free(*target, 1);
50247c478bd9Sstevel@tonic-gate } else {
50257c478bd9Sstevel@tonic-gate page_t *tpp = *target;
50267c478bd9Sstevel@tonic-gate uint_t szc = tpp->p_szc;
50277c478bd9Sstevel@tonic-gate pgcnt_t npgs = page_get_pagecnt(szc);
50287c478bd9Sstevel@tonic-gate ASSERT(npgs > 1);
50297c478bd9Sstevel@tonic-gate ASSERT(szc != 0);
50307c478bd9Sstevel@tonic-gate do {
50317c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(tpp));
50327c478bd9Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(tpp));
50337c478bd9Sstevel@tonic-gate ASSERT(tpp->p_szc == szc);
50347c478bd9Sstevel@tonic-gate PP_SETFREE(tpp);
50357c478bd9Sstevel@tonic-gate PP_SETAGED(tpp);
50367c478bd9Sstevel@tonic-gate npgs--;
50377c478bd9Sstevel@tonic-gate } while ((tpp = tpp->p_next) != *target);
50387c478bd9Sstevel@tonic-gate ASSERT(npgs == 0);
50397c478bd9Sstevel@tonic-gate page_list_add_pages(*target, 0);
50407c478bd9Sstevel@tonic-gate npgs = page_get_pagecnt(szc);
50417c478bd9Sstevel@tonic-gate page_create_putback(npgs);
50427c478bd9Sstevel@tonic-gate }
50437c478bd9Sstevel@tonic-gate return (ret);
50447c478bd9Sstevel@tonic-gate }
50457c478bd9Sstevel@tonic-gate
50467c478bd9Sstevel@tonic-gate /*
50477c478bd9Sstevel@tonic-gate * it is up to the caller to deal with pcf accounting.
50487c478bd9Sstevel@tonic-gate */
50497c478bd9Sstevel@tonic-gate void
page_free_replacement_page(page_t * pplist)50507c478bd9Sstevel@tonic-gate page_free_replacement_page(page_t *pplist)
50517c478bd9Sstevel@tonic-gate {
50527c478bd9Sstevel@tonic-gate page_t *pp;
50537c478bd9Sstevel@tonic-gate
50547c478bd9Sstevel@tonic-gate while (pplist != NULL) {
50557c478bd9Sstevel@tonic-gate /*
50567c478bd9Sstevel@tonic-gate * pp_targ is a linked list.
50577c478bd9Sstevel@tonic-gate */
50587c478bd9Sstevel@tonic-gate pp = pplist;
50597c478bd9Sstevel@tonic-gate if (pp->p_szc == 0) {
50607c478bd9Sstevel@tonic-gate page_sub(&pplist, pp);
50619d0d62adSJason Beloro page_clr_all_props(pp);
50627c478bd9Sstevel@tonic-gate PP_SETFREE(pp);
50637c478bd9Sstevel@tonic-gate PP_SETAGED(pp);
50647c478bd9Sstevel@tonic-gate page_list_add(pp, PG_FREE_LIST | PG_LIST_TAIL);
50657c478bd9Sstevel@tonic-gate page_unlock(pp);
50667c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_replacement_page[0]);
50677c478bd9Sstevel@tonic-gate } else {
50687c478bd9Sstevel@tonic-gate spgcnt_t curnpgs = page_get_pagecnt(pp->p_szc);
50697c478bd9Sstevel@tonic-gate page_t *tpp;
50707c478bd9Sstevel@tonic-gate page_list_break(&pp, &pplist, curnpgs);
50717c478bd9Sstevel@tonic-gate tpp = pp;
50727c478bd9Sstevel@tonic-gate do {
50737c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(tpp));
50747c478bd9Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(tpp));
50759d0d62adSJason Beloro page_clr_all_props(tpp);
50767c478bd9Sstevel@tonic-gate PP_SETFREE(tpp);
50777c478bd9Sstevel@tonic-gate PP_SETAGED(tpp);
50787c478bd9Sstevel@tonic-gate } while ((tpp = tpp->p_next) != pp);
50797c478bd9Sstevel@tonic-gate page_list_add_pages(pp, 0);
50807c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_free_replacement_page[1]);
50817c478bd9Sstevel@tonic-gate }
50827c478bd9Sstevel@tonic-gate }
50837c478bd9Sstevel@tonic-gate }
50847c478bd9Sstevel@tonic-gate
50857c478bd9Sstevel@tonic-gate /*
50867c478bd9Sstevel@tonic-gate * Relocate target to non-relocatable replacement page.
50877c478bd9Sstevel@tonic-gate */
50887c478bd9Sstevel@tonic-gate int
page_relocate_cage(page_t ** target,page_t ** replacement)50897c478bd9Sstevel@tonic-gate page_relocate_cage(page_t **target, page_t **replacement)
50907c478bd9Sstevel@tonic-gate {
50917c478bd9Sstevel@tonic-gate page_t *tpp, *rpp;
50927c478bd9Sstevel@tonic-gate spgcnt_t pgcnt, npgs;
50937c478bd9Sstevel@tonic-gate int result;
50947c478bd9Sstevel@tonic-gate
50957c478bd9Sstevel@tonic-gate tpp = *target;
50967c478bd9Sstevel@tonic-gate
50977c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(tpp));
50987c478bd9Sstevel@tonic-gate ASSERT(tpp->p_szc == 0);
50997c478bd9Sstevel@tonic-gate
51007c478bd9Sstevel@tonic-gate pgcnt = btop(page_get_pagesize(tpp->p_szc));
51017c478bd9Sstevel@tonic-gate
51027c478bd9Sstevel@tonic-gate do {
51037c478bd9Sstevel@tonic-gate (void) page_create_wait(pgcnt, PG_WAIT | PG_NORELOC);
51047c478bd9Sstevel@tonic-gate rpp = page_get_replacement_page(tpp, NULL, PGR_NORELOC);
51057c478bd9Sstevel@tonic-gate if (rpp == NULL) {
51067c478bd9Sstevel@tonic-gate page_create_putback(pgcnt);
51077c478bd9Sstevel@tonic-gate kcage_cageout_wakeup();
51087c478bd9Sstevel@tonic-gate }
51097c478bd9Sstevel@tonic-gate } while (rpp == NULL);
51107c478bd9Sstevel@tonic-gate
51117c478bd9Sstevel@tonic-gate ASSERT(PP_ISNORELOC(rpp));
51127c478bd9Sstevel@tonic-gate
51137c478bd9Sstevel@tonic-gate result = page_relocate(&tpp, &rpp, 0, 1, &npgs, NULL);
51147c478bd9Sstevel@tonic-gate
51157c478bd9Sstevel@tonic-gate if (result == 0) {
51167c478bd9Sstevel@tonic-gate *replacement = rpp;
51177c478bd9Sstevel@tonic-gate if (pgcnt != npgs)
51187c478bd9Sstevel@tonic-gate panic("page_relocate_cage: partial relocation");
51197c478bd9Sstevel@tonic-gate }
51207c478bd9Sstevel@tonic-gate
51217c478bd9Sstevel@tonic-gate return (result);
51227c478bd9Sstevel@tonic-gate }
51237c478bd9Sstevel@tonic-gate
51247c478bd9Sstevel@tonic-gate /*
51257c478bd9Sstevel@tonic-gate * Release the page lock on a page, place on cachelist
51267c478bd9Sstevel@tonic-gate * tail if no longer mapped. Caller can let us know if
51277c478bd9Sstevel@tonic-gate * the page is known to be clean.
51287c478bd9Sstevel@tonic-gate */
51297c478bd9Sstevel@tonic-gate int
page_release(page_t * pp,int checkmod)51307c478bd9Sstevel@tonic-gate page_release(page_t *pp, int checkmod)
51317c478bd9Sstevel@tonic-gate {
51327c478bd9Sstevel@tonic-gate int status;
51337c478bd9Sstevel@tonic-gate
51347c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp) && !PP_ISFREE(pp) &&
51356e4dd838Smec (pp->p_vnode != NULL));
51367c478bd9Sstevel@tonic-gate
51377c478bd9Sstevel@tonic-gate if (!hat_page_is_mapped(pp) && !IS_SWAPVP(pp->p_vnode) &&
51387c478bd9Sstevel@tonic-gate ((PAGE_SHARED(pp) && page_tryupgrade(pp)) || PAGE_EXCL(pp)) &&
51397c478bd9Sstevel@tonic-gate pp->p_lckcnt == 0 && pp->p_cowcnt == 0 &&
51407c478bd9Sstevel@tonic-gate !hat_page_is_mapped(pp)) {
51417c478bd9Sstevel@tonic-gate
51427c478bd9Sstevel@tonic-gate /*
51437c478bd9Sstevel@tonic-gate * If page is modified, unlock it
51447c478bd9Sstevel@tonic-gate *
51457c478bd9Sstevel@tonic-gate * (p_nrm & P_MOD) bit has the latest stuff because:
51467c478bd9Sstevel@tonic-gate * (1) We found that this page doesn't have any mappings
51477c478bd9Sstevel@tonic-gate * _after_ holding SE_EXCL and
51487c478bd9Sstevel@tonic-gate * (2) We didn't drop SE_EXCL lock after the check in (1)
51497c478bd9Sstevel@tonic-gate */
51507c478bd9Sstevel@tonic-gate if (checkmod && hat_ismod(pp)) {
51517c478bd9Sstevel@tonic-gate page_unlock(pp);
51527c478bd9Sstevel@tonic-gate status = PGREL_MOD;
51537c478bd9Sstevel@tonic-gate } else {
51547c478bd9Sstevel@tonic-gate /*LINTED: constant in conditional context*/
51557c478bd9Sstevel@tonic-gate VN_DISPOSE(pp, B_FREE, 0, kcred);
51567c478bd9Sstevel@tonic-gate status = PGREL_CLEAN;
51577c478bd9Sstevel@tonic-gate }
51587c478bd9Sstevel@tonic-gate } else {
51597c478bd9Sstevel@tonic-gate page_unlock(pp);
51607c478bd9Sstevel@tonic-gate status = PGREL_NOTREL;
51617c478bd9Sstevel@tonic-gate }
51627c478bd9Sstevel@tonic-gate return (status);
51637c478bd9Sstevel@tonic-gate }
51647c478bd9Sstevel@tonic-gate
5165db874c57Selowe /*
5166db874c57Selowe * Given a constituent page, try to demote the large page on the freelist.
5167db874c57Selowe *
5168db874c57Selowe * Returns nonzero if the page could be demoted successfully. Returns with
5169db874c57Selowe * the constituent page still locked.
5170db874c57Selowe */
5171db874c57Selowe int
page_try_demote_free_pages(page_t * pp)5172db874c57Selowe page_try_demote_free_pages(page_t *pp)
5173db874c57Selowe {
5174db874c57Selowe page_t *rootpp = pp;
5175db874c57Selowe pfn_t pfn = page_pptonum(pp);
5176db874c57Selowe spgcnt_t npgs;
5177db874c57Selowe uint_t szc = pp->p_szc;
5178db874c57Selowe
5179db874c57Selowe ASSERT(PP_ISFREE(pp));
5180db874c57Selowe ASSERT(PAGE_EXCL(pp));
5181db874c57Selowe
5182db874c57Selowe /*
5183db874c57Selowe * Adjust rootpp and lock it, if `pp' is not the base
5184db874c57Selowe * constituent page.
5185db874c57Selowe */
5186db874c57Selowe npgs = page_get_pagecnt(pp->p_szc);
5187db874c57Selowe if (npgs == 1) {
5188db874c57Selowe return (0);
5189db874c57Selowe }
5190db874c57Selowe
5191db874c57Selowe if (!IS_P2ALIGNED(pfn, npgs)) {
5192db874c57Selowe pfn = P2ALIGN(pfn, npgs);
5193db874c57Selowe rootpp = page_numtopp_nolock(pfn);
5194db874c57Selowe }
5195db874c57Selowe
5196db874c57Selowe if (pp != rootpp && !page_trylock(rootpp, SE_EXCL)) {
5197db874c57Selowe return (0);
5198db874c57Selowe }
5199db874c57Selowe
5200db874c57Selowe if (rootpp->p_szc != szc) {
5201db874c57Selowe if (pp != rootpp)
5202db874c57Selowe page_unlock(rootpp);
5203db874c57Selowe return (0);
5204db874c57Selowe }
5205db874c57Selowe
5206db874c57Selowe page_demote_free_pages(rootpp);
5207db874c57Selowe
5208db874c57Selowe if (pp != rootpp)
5209db874c57Selowe page_unlock(rootpp);
5210db874c57Selowe
5211db874c57Selowe ASSERT(PP_ISFREE(pp));
5212db874c57Selowe ASSERT(PAGE_EXCL(pp));
5213db874c57Selowe return (1);
5214db874c57Selowe }
5215db874c57Selowe
5216db874c57Selowe /*
5217db874c57Selowe * Given a constituent page, try to demote the large page.
5218db874c57Selowe *
5219db874c57Selowe * Returns nonzero if the page could be demoted successfully. Returns with
5220db874c57Selowe * the constituent page still locked.
5221db874c57Selowe */
52227c478bd9Sstevel@tonic-gate int
page_try_demote_pages(page_t * pp)52237c478bd9Sstevel@tonic-gate page_try_demote_pages(page_t *pp)
52247c478bd9Sstevel@tonic-gate {
52257c478bd9Sstevel@tonic-gate page_t *tpp, *rootpp = pp;
52267c478bd9Sstevel@tonic-gate pfn_t pfn = page_pptonum(pp);
52277c478bd9Sstevel@tonic-gate spgcnt_t i, npgs;
5228f045d8d6SAmritpal Sandhu uint_t szc = pp->p_szc;
5229d94ffb28Sjmcp vnode_t *vp = pp->p_vnode;
52307c478bd9Sstevel@tonic-gate
5231db874c57Selowe ASSERT(PAGE_EXCL(pp));
52327c478bd9Sstevel@tonic-gate
52337c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_try_demote_pages[0]);
52347c478bd9Sstevel@tonic-gate
5235db874c57Selowe if (pp->p_szc == 0) {
52367c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_try_demote_pages[1]);
52377c478bd9Sstevel@tonic-gate return (1);
52387c478bd9Sstevel@tonic-gate }
52397c478bd9Sstevel@tonic-gate
5240ad23a2dbSjohansen if (vp != NULL && !IS_SWAPFSVP(vp) && !VN_ISKAS(vp)) {
52417c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_try_demote_pages[2]);
5242db874c57Selowe page_demote_vp_pages(pp);
52437c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc == 0);
52447c478bd9Sstevel@tonic-gate return (1);
52457c478bd9Sstevel@tonic-gate }
52467c478bd9Sstevel@tonic-gate
52477c478bd9Sstevel@tonic-gate /*
5248db874c57Selowe * Adjust rootpp if passed in is not the base
52497c478bd9Sstevel@tonic-gate * constituent page.
52507c478bd9Sstevel@tonic-gate */
5251db874c57Selowe npgs = page_get_pagecnt(pp->p_szc);
52527c478bd9Sstevel@tonic-gate ASSERT(npgs > 1);
52537c478bd9Sstevel@tonic-gate if (!IS_P2ALIGNED(pfn, npgs)) {
52547c478bd9Sstevel@tonic-gate pfn = P2ALIGN(pfn, npgs);
52557c478bd9Sstevel@tonic-gate rootpp = page_numtopp_nolock(pfn);
52567c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_try_demote_pages[3]);
52577c478bd9Sstevel@tonic-gate ASSERT(rootpp->p_vnode != NULL);
52587c478bd9Sstevel@tonic-gate ASSERT(rootpp->p_szc == szc);
52597c478bd9Sstevel@tonic-gate }
52607c478bd9Sstevel@tonic-gate
52617c478bd9Sstevel@tonic-gate /*
52627c478bd9Sstevel@tonic-gate * We can't demote kernel pages since we can't hat_unload()
52637c478bd9Sstevel@tonic-gate * the mappings.
52647c478bd9Sstevel@tonic-gate */
5265ad23a2dbSjohansen if (VN_ISKAS(rootpp->p_vnode))
52667c478bd9Sstevel@tonic-gate return (0);
52677c478bd9Sstevel@tonic-gate
52687c478bd9Sstevel@tonic-gate /*
52697c478bd9Sstevel@tonic-gate * Attempt to lock all constituent pages except the page passed
52707c478bd9Sstevel@tonic-gate * in since it's already locked.
52717c478bd9Sstevel@tonic-gate */
5272affbd3ccSkchow for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) {
52737c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(tpp));
52747c478bd9Sstevel@tonic-gate ASSERT(tpp->p_vnode != NULL);
52757c478bd9Sstevel@tonic-gate
52767c478bd9Sstevel@tonic-gate if (tpp != pp && !page_trylock(tpp, SE_EXCL))
52777c478bd9Sstevel@tonic-gate break;
52787c478bd9Sstevel@tonic-gate ASSERT(tpp->p_szc == rootpp->p_szc);
52797c478bd9Sstevel@tonic-gate ASSERT(page_pptonum(tpp) == page_pptonum(rootpp) + i);
52807c478bd9Sstevel@tonic-gate }
52817c478bd9Sstevel@tonic-gate
52827c478bd9Sstevel@tonic-gate /*
5283db874c57Selowe * If we failed to lock them all then unlock what we have
5284db874c57Selowe * locked so far and bail.
52857c478bd9Sstevel@tonic-gate */
52867c478bd9Sstevel@tonic-gate if (i < npgs) {
52877c478bd9Sstevel@tonic-gate tpp = rootpp;
52887c478bd9Sstevel@tonic-gate while (i-- > 0) {
52897c478bd9Sstevel@tonic-gate if (tpp != pp)
52907c478bd9Sstevel@tonic-gate page_unlock(tpp);
5291affbd3ccSkchow tpp++;
52927c478bd9Sstevel@tonic-gate }
52937c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_try_demote_pages[4]);
52947c478bd9Sstevel@tonic-gate return (0);
52957c478bd9Sstevel@tonic-gate }
52967c478bd9Sstevel@tonic-gate
5297affbd3ccSkchow for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) {
52987c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(tpp));
529907b65a64Saguzovsk ASSERT(tpp->p_slckcnt == 0);
5300db874c57Selowe (void) hat_pageunload(tpp, HAT_FORCE_PGUNLOAD);
53017c478bd9Sstevel@tonic-gate tpp->p_szc = 0;
53027c478bd9Sstevel@tonic-gate }
53037c478bd9Sstevel@tonic-gate
53047c478bd9Sstevel@tonic-gate /*
53057c478bd9Sstevel@tonic-gate * Unlock all pages except the page passed in.
53067c478bd9Sstevel@tonic-gate */
5307affbd3ccSkchow for (tpp = rootpp, i = 0; i < npgs; i++, tpp++) {
53087c478bd9Sstevel@tonic-gate ASSERT(!hat_page_is_mapped(tpp));
53097c478bd9Sstevel@tonic-gate if (tpp != pp)
53107c478bd9Sstevel@tonic-gate page_unlock(tpp);
53117c478bd9Sstevel@tonic-gate }
5312db874c57Selowe
53137c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_try_demote_pages[5]);
53147c478bd9Sstevel@tonic-gate return (1);
53157c478bd9Sstevel@tonic-gate }
53167c478bd9Sstevel@tonic-gate
53177c478bd9Sstevel@tonic-gate /*
53187c478bd9Sstevel@tonic-gate * Called by page_free() and page_destroy() to demote the page size code
53197c478bd9Sstevel@tonic-gate * (p_szc) to 0 (since we can't just put a single PAGESIZE page with non zero
53207c478bd9Sstevel@tonic-gate * p_szc on free list, neither can we just clear p_szc of a single page_t
53217c478bd9Sstevel@tonic-gate * within a large page since it will break other code that relies on p_szc
53227c478bd9Sstevel@tonic-gate * being the same for all page_t's of a large page). Anonymous pages should
53237c478bd9Sstevel@tonic-gate * never end up here because anon_map_getpages() cannot deal with p_szc
53247c478bd9Sstevel@tonic-gate * changes after a single constituent page is locked. While anonymous or
53257c478bd9Sstevel@tonic-gate * kernel large pages are demoted or freed the entire large page at a time
53267c478bd9Sstevel@tonic-gate * with all constituent pages locked EXCL for the file system pages we
53277c478bd9Sstevel@tonic-gate * have to be able to demote a large page (i.e. decrease all constituent pages
53287c478bd9Sstevel@tonic-gate * p_szc) with only just an EXCL lock on one of constituent pages. The reason
53297c478bd9Sstevel@tonic-gate * we can easily deal with anonymous page demotion the entire large page at a
53307c478bd9Sstevel@tonic-gate * time is that those operation originate at address space level and concern
53317c478bd9Sstevel@tonic-gate * the entire large page region with actual demotion only done when pages are
53327c478bd9Sstevel@tonic-gate * not shared with any other processes (therefore we can always get EXCL lock
53337c478bd9Sstevel@tonic-gate * on all anonymous constituent pages after clearing segment page
53347c478bd9Sstevel@tonic-gate * cache). However file system pages can be truncated or invalidated at a
53357c478bd9Sstevel@tonic-gate * PAGESIZE level from the file system side and end up in page_free() or
53367c478bd9Sstevel@tonic-gate * page_destroy() (we also allow only part of the large page to be SOFTLOCKed
5337da6c28aaSamw * and therefore pageout should be able to demote a large page by EXCL locking
53387c478bd9Sstevel@tonic-gate * any constituent page that is not under SOFTLOCK). In those cases we cannot
53397c478bd9Sstevel@tonic-gate * rely on being able to lock EXCL all constituent pages.
53407c478bd9Sstevel@tonic-gate *
53417c478bd9Sstevel@tonic-gate * To prevent szc changes on file system pages one has to lock all constituent
53427c478bd9Sstevel@tonic-gate * pages at least SHARED (or call page_szc_lock()). The only subsystem that
53437c478bd9Sstevel@tonic-gate * doesn't rely on locking all constituent pages (or using page_szc_lock()) to
53447c478bd9Sstevel@tonic-gate * prevent szc changes is hat layer that uses its own page level mlist
53457c478bd9Sstevel@tonic-gate * locks. hat assumes that szc doesn't change after mlist lock for a page is
53467c478bd9Sstevel@tonic-gate * taken. Therefore we need to change szc under hat level locks if we only
53477c478bd9Sstevel@tonic-gate * have an EXCL lock on a single constituent page and hat still references any
53487c478bd9Sstevel@tonic-gate * of constituent pages. (Note we can't "ignore" hat layer by simply
53497c478bd9Sstevel@tonic-gate * hat_pageunload() all constituent pages without having EXCL locks on all of
53507c478bd9Sstevel@tonic-gate * constituent pages). We use hat_page_demote() call to safely demote szc of
53517c478bd9Sstevel@tonic-gate * all constituent pages under hat locks when we only have an EXCL lock on one
53527c478bd9Sstevel@tonic-gate * of constituent pages.
53537c478bd9Sstevel@tonic-gate *
53547c478bd9Sstevel@tonic-gate * This routine calls page_szc_lock() before calling hat_page_demote() to
53557c478bd9Sstevel@tonic-gate * allow segvn in one special case not to lock all constituent pages SHARED
5356da6c28aaSamw * before calling hat_memload_array() that relies on p_szc not changing even
53577c478bd9Sstevel@tonic-gate * before hat level mlist lock is taken. In that case segvn uses
5358da6c28aaSamw * page_szc_lock() to prevent hat_page_demote() changing p_szc values.
53597c478bd9Sstevel@tonic-gate *
53607c478bd9Sstevel@tonic-gate * Anonymous or kernel page demotion still has to lock all pages exclusively
53617c478bd9Sstevel@tonic-gate * and do hat_pageunload() on all constituent pages before demoting the page
53627c478bd9Sstevel@tonic-gate * therefore there's no need for anonymous or kernel page demotion to use
53637c478bd9Sstevel@tonic-gate * hat_page_demote() mechanism.
53647c478bd9Sstevel@tonic-gate *
53657c478bd9Sstevel@tonic-gate * hat_page_demote() removes all large mappings that map pp and then decreases
53667c478bd9Sstevel@tonic-gate * p_szc starting from the last constituent page of the large page. By working
53677c478bd9Sstevel@tonic-gate * from the tail of a large page in pfn decreasing order allows one looking at
53687c478bd9Sstevel@tonic-gate * the root page to know that hat_page_demote() is done for root's szc area.
53697c478bd9Sstevel@tonic-gate * e.g. if a root page has szc 1 one knows it only has to lock all constituent
53707c478bd9Sstevel@tonic-gate * pages within szc 1 area to prevent szc changes because hat_page_demote()
53717c478bd9Sstevel@tonic-gate * that started on this page when it had szc > 1 is done for this szc 1 area.
53727c478bd9Sstevel@tonic-gate *
5373da6c28aaSamw * We are guaranteed that all constituent pages of pp's large page belong to
53747c478bd9Sstevel@tonic-gate * the same vnode with the consecutive offsets increasing in the direction of
53757c478bd9Sstevel@tonic-gate * the pfn i.e. the identity of constituent pages can't change until their
53767c478bd9Sstevel@tonic-gate * p_szc is decreased. Therefore it's safe for hat_page_demote() to remove
53777c478bd9Sstevel@tonic-gate * large mappings to pp even though we don't lock any constituent page except
53787c478bd9Sstevel@tonic-gate * pp (i.e. we won't unload e.g. kernel locked page).
53797c478bd9Sstevel@tonic-gate */
53807c478bd9Sstevel@tonic-gate static void
page_demote_vp_pages(page_t * pp)53817c478bd9Sstevel@tonic-gate page_demote_vp_pages(page_t *pp)
53827c478bd9Sstevel@tonic-gate {
53837c478bd9Sstevel@tonic-gate kmutex_t *mtx;
53847c478bd9Sstevel@tonic-gate
53857c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(pp));
53867c478bd9Sstevel@tonic-gate ASSERT(!PP_ISFREE(pp));
53877c478bd9Sstevel@tonic-gate ASSERT(pp->p_vnode != NULL);
53887c478bd9Sstevel@tonic-gate ASSERT(!IS_SWAPFSVP(pp->p_vnode));
5389ad23a2dbSjohansen ASSERT(!PP_ISKAS(pp));
53907c478bd9Sstevel@tonic-gate
53917c478bd9Sstevel@tonic-gate VM_STAT_ADD(pagecnt.pc_demote_pages[0]);
53927c478bd9Sstevel@tonic-gate
53937c478bd9Sstevel@tonic-gate mtx = page_szc_lock(pp);
53947c478bd9Sstevel@tonic-gate if (mtx != NULL) {
53957c478bd9Sstevel@tonic-gate hat_page_demote(pp);
53967c478bd9Sstevel@tonic-gate mutex_exit(mtx);
53977c478bd9Sstevel@tonic-gate }
53987c478bd9Sstevel@tonic-gate ASSERT(pp->p_szc == 0);
53997c478bd9Sstevel@tonic-gate }
54007c478bd9Sstevel@tonic-gate
54017c478bd9Sstevel@tonic-gate /*
54027c478bd9Sstevel@tonic-gate * Mark any existing pages for migration in the given range
54037c478bd9Sstevel@tonic-gate */
54047c478bd9Sstevel@tonic-gate void
page_mark_migrate(struct seg * seg,caddr_t addr,size_t len,struct anon_map * amp,ulong_t anon_index,vnode_t * vp,u_offset_t vnoff,int rflag)54057c478bd9Sstevel@tonic-gate page_mark_migrate(struct seg *seg, caddr_t addr, size_t len,
54067c478bd9Sstevel@tonic-gate struct anon_map *amp, ulong_t anon_index, vnode_t *vp,
54077c478bd9Sstevel@tonic-gate u_offset_t vnoff, int rflag)
54087c478bd9Sstevel@tonic-gate {
54097c478bd9Sstevel@tonic-gate struct anon *ap;
54107c478bd9Sstevel@tonic-gate vnode_t *curvp;
54117c478bd9Sstevel@tonic-gate lgrp_t *from;
54127c478bd9Sstevel@tonic-gate pgcnt_t nlocked;
54137c478bd9Sstevel@tonic-gate u_offset_t off;
54147c478bd9Sstevel@tonic-gate pfn_t pfn;
54157c478bd9Sstevel@tonic-gate size_t pgsz;
54167c478bd9Sstevel@tonic-gate size_t segpgsz;
54177c478bd9Sstevel@tonic-gate pgcnt_t pages;
54187c478bd9Sstevel@tonic-gate uint_t pszc;
54195c16be9bSDonghai Qiao page_t *pp0, *pp;
54207c478bd9Sstevel@tonic-gate caddr_t va;
54217c478bd9Sstevel@tonic-gate ulong_t an_idx;
54227c478bd9Sstevel@tonic-gate anon_sync_obj_t cookie;
54237c478bd9Sstevel@tonic-gate
5424dc32d872SJosef 'Jeff' Sipek ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
54257c478bd9Sstevel@tonic-gate
54267c478bd9Sstevel@tonic-gate /*
54277c478bd9Sstevel@tonic-gate * Don't do anything if don't need to do lgroup optimizations
54287c478bd9Sstevel@tonic-gate * on this system
54297c478bd9Sstevel@tonic-gate */
54307c478bd9Sstevel@tonic-gate if (!lgrp_optimizations())
54317c478bd9Sstevel@tonic-gate return;
54327c478bd9Sstevel@tonic-gate
54337c478bd9Sstevel@tonic-gate /*
54347c478bd9Sstevel@tonic-gate * Align address and length to (potentially large) page boundary
54357c478bd9Sstevel@tonic-gate */
54367c478bd9Sstevel@tonic-gate segpgsz = page_get_pagesize(seg->s_szc);
54377c478bd9Sstevel@tonic-gate addr = (caddr_t)P2ALIGN((uintptr_t)addr, segpgsz);
54387c478bd9Sstevel@tonic-gate if (rflag)
54397c478bd9Sstevel@tonic-gate len = P2ROUNDUP(len, segpgsz);
54407c478bd9Sstevel@tonic-gate
54417c478bd9Sstevel@tonic-gate /*
54427c478bd9Sstevel@tonic-gate * Do one (large) page at a time
54437c478bd9Sstevel@tonic-gate */
54447c478bd9Sstevel@tonic-gate va = addr;
54457c478bd9Sstevel@tonic-gate while (va < addr + len) {
54467c478bd9Sstevel@tonic-gate /*
54477c478bd9Sstevel@tonic-gate * Lookup (root) page for vnode and offset corresponding to
54487c478bd9Sstevel@tonic-gate * this virtual address
54497c478bd9Sstevel@tonic-gate * Try anonmap first since there may be copy-on-write
54507c478bd9Sstevel@tonic-gate * pages, but initialize vnode pointer and offset using
54517c478bd9Sstevel@tonic-gate * vnode arguments just in case there isn't an amp.
54527c478bd9Sstevel@tonic-gate */
54537c478bd9Sstevel@tonic-gate curvp = vp;
54547c478bd9Sstevel@tonic-gate off = vnoff + va - seg->s_base;
54557c478bd9Sstevel@tonic-gate if (amp) {
54567c478bd9Sstevel@tonic-gate ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
54577c478bd9Sstevel@tonic-gate an_idx = anon_index + seg_page(seg, va);
54587c478bd9Sstevel@tonic-gate anon_array_enter(amp, an_idx, &cookie);
54597c478bd9Sstevel@tonic-gate ap = anon_get_ptr(amp->ahp, an_idx);
54607c478bd9Sstevel@tonic-gate if (ap)
54617c478bd9Sstevel@tonic-gate swap_xlate(ap, &curvp, &off);
54627c478bd9Sstevel@tonic-gate anon_array_exit(&cookie);
54637c478bd9Sstevel@tonic-gate ANON_LOCK_EXIT(&->a_rwlock);
54647c478bd9Sstevel@tonic-gate }
54657c478bd9Sstevel@tonic-gate
54667c478bd9Sstevel@tonic-gate pp = NULL;
54677c478bd9Sstevel@tonic-gate if (curvp)
54687c478bd9Sstevel@tonic-gate pp = page_lookup(curvp, off, SE_SHARED);
54697c478bd9Sstevel@tonic-gate
54707c478bd9Sstevel@tonic-gate /*
54717c478bd9Sstevel@tonic-gate * If there isn't a page at this virtual address,
54727c478bd9Sstevel@tonic-gate * skip to next page
54737c478bd9Sstevel@tonic-gate */
54747c478bd9Sstevel@tonic-gate if (pp == NULL) {
54757c478bd9Sstevel@tonic-gate va += PAGESIZE;
54767c478bd9Sstevel@tonic-gate continue;
54777c478bd9Sstevel@tonic-gate }
54787c478bd9Sstevel@tonic-gate
54797c478bd9Sstevel@tonic-gate /*
54807c478bd9Sstevel@tonic-gate * Figure out which lgroup this page is in for kstats
54817c478bd9Sstevel@tonic-gate */
54827c478bd9Sstevel@tonic-gate pfn = page_pptonum(pp);
54837c478bd9Sstevel@tonic-gate from = lgrp_pfn_to_lgrp(pfn);
54847c478bd9Sstevel@tonic-gate
54857c478bd9Sstevel@tonic-gate /*
54867c478bd9Sstevel@tonic-gate * Get page size, and round up and skip to next page boundary
54877c478bd9Sstevel@tonic-gate * if unaligned address
54887c478bd9Sstevel@tonic-gate */
54897c478bd9Sstevel@tonic-gate pszc = pp->p_szc;
54907c478bd9Sstevel@tonic-gate pgsz = page_get_pagesize(pszc);
54917c478bd9Sstevel@tonic-gate pages = btop(pgsz);
54927c478bd9Sstevel@tonic-gate if (!IS_P2ALIGNED(va, pgsz) ||
54937c478bd9Sstevel@tonic-gate !IS_P2ALIGNED(pfn, pages) ||
54947c478bd9Sstevel@tonic-gate pgsz > segpgsz) {
54957c478bd9Sstevel@tonic-gate pgsz = MIN(pgsz, segpgsz);
54967c478bd9Sstevel@tonic-gate page_unlock(pp);
54975c16be9bSDonghai Qiao pages = btop(P2END((uintptr_t)va, pgsz) -
54987c478bd9Sstevel@tonic-gate (uintptr_t)va);
54997c478bd9Sstevel@tonic-gate va = (caddr_t)P2END((uintptr_t)va, pgsz);
55005c16be9bSDonghai Qiao lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS, pages);
55017c478bd9Sstevel@tonic-gate continue;
55027c478bd9Sstevel@tonic-gate }
55037c478bd9Sstevel@tonic-gate
55047c478bd9Sstevel@tonic-gate /*
55057c478bd9Sstevel@tonic-gate * Upgrade to exclusive lock on page
55067c478bd9Sstevel@tonic-gate */
55077c478bd9Sstevel@tonic-gate if (!page_tryupgrade(pp)) {
55087c478bd9Sstevel@tonic-gate page_unlock(pp);
55097c478bd9Sstevel@tonic-gate va += pgsz;
55107c478bd9Sstevel@tonic-gate lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS,
55117c478bd9Sstevel@tonic-gate btop(pgsz));
55127c478bd9Sstevel@tonic-gate continue;
55137c478bd9Sstevel@tonic-gate }
55147c478bd9Sstevel@tonic-gate
55155c16be9bSDonghai Qiao pp0 = pp++;
55167c478bd9Sstevel@tonic-gate nlocked = 1;
55177c478bd9Sstevel@tonic-gate
55187c478bd9Sstevel@tonic-gate /*
55197c478bd9Sstevel@tonic-gate * Lock constituent pages if this is large page
55207c478bd9Sstevel@tonic-gate */
55217c478bd9Sstevel@tonic-gate if (pages > 1) {
55227c478bd9Sstevel@tonic-gate /*
55237c478bd9Sstevel@tonic-gate * Lock all constituents except root page, since it
55247c478bd9Sstevel@tonic-gate * should be locked already.
55257c478bd9Sstevel@tonic-gate */
55265c16be9bSDonghai Qiao for (; nlocked < pages; nlocked++) {
55277c478bd9Sstevel@tonic-gate if (!page_trylock(pp, SE_EXCL)) {
55287c478bd9Sstevel@tonic-gate break;
55297c478bd9Sstevel@tonic-gate }
55307c478bd9Sstevel@tonic-gate if (PP_ISFREE(pp) ||
55317c478bd9Sstevel@tonic-gate pp->p_szc != pszc) {
55327c478bd9Sstevel@tonic-gate /*
55337c478bd9Sstevel@tonic-gate * hat_page_demote() raced in with us.
55347c478bd9Sstevel@tonic-gate */
55357c478bd9Sstevel@tonic-gate ASSERT(!IS_SWAPFSVP(curvp));
55367c478bd9Sstevel@tonic-gate page_unlock(pp);
55377c478bd9Sstevel@tonic-gate break;
55387c478bd9Sstevel@tonic-gate }
55395c16be9bSDonghai Qiao pp++;
55407c478bd9Sstevel@tonic-gate }
55417c478bd9Sstevel@tonic-gate }
55427c478bd9Sstevel@tonic-gate
55437c478bd9Sstevel@tonic-gate /*
55447c478bd9Sstevel@tonic-gate * If all constituent pages couldn't be locked,
55457c478bd9Sstevel@tonic-gate * unlock pages locked so far and skip to next page.
55467c478bd9Sstevel@tonic-gate */
55475c16be9bSDonghai Qiao if (nlocked < pages) {
55485c16be9bSDonghai Qiao while (pp0 < pp) {
55495c16be9bSDonghai Qiao page_unlock(pp0++);
55505c16be9bSDonghai Qiao }
55517c478bd9Sstevel@tonic-gate va += pgsz;
55527c478bd9Sstevel@tonic-gate lgrp_stat_add(from->lgrp_id, LGRP_PMM_FAIL_PGS,
55537c478bd9Sstevel@tonic-gate btop(pgsz));
55547c478bd9Sstevel@tonic-gate continue;
55557c478bd9Sstevel@tonic-gate }
55567c478bd9Sstevel@tonic-gate
55577c478bd9Sstevel@tonic-gate /*
55587c478bd9Sstevel@tonic-gate * hat_page_demote() can no longer happen
55597c478bd9Sstevel@tonic-gate * since last cons page had the right p_szc after
55607c478bd9Sstevel@tonic-gate * all cons pages were locked. all cons pages
55617c478bd9Sstevel@tonic-gate * should now have the same p_szc.
55627c478bd9Sstevel@tonic-gate */
55637c478bd9Sstevel@tonic-gate
55647c478bd9Sstevel@tonic-gate /*
55657c478bd9Sstevel@tonic-gate * All constituent pages locked successfully, so mark
55667c478bd9Sstevel@tonic-gate * large page for migration and unload the mappings of
55677c478bd9Sstevel@tonic-gate * constituent pages, so a fault will occur on any part of the
55687c478bd9Sstevel@tonic-gate * large page
55697c478bd9Sstevel@tonic-gate */
55705c16be9bSDonghai Qiao PP_SETMIGRATE(pp0);
55715c16be9bSDonghai Qiao while (pp0 < pp) {
55725c16be9bSDonghai Qiao (void) hat_pageunload(pp0, HAT_FORCE_PGUNLOAD);
55735c16be9bSDonghai Qiao ASSERT(hat_page_getshare(pp0) == 0);
55745c16be9bSDonghai Qiao page_unlock(pp0++);
55757c478bd9Sstevel@tonic-gate }
55767c478bd9Sstevel@tonic-gate lgrp_stat_add(from->lgrp_id, LGRP_PMM_PGS, nlocked);
55777c478bd9Sstevel@tonic-gate
55787c478bd9Sstevel@tonic-gate va += pgsz;
55797c478bd9Sstevel@tonic-gate }
55807c478bd9Sstevel@tonic-gate }
55817c478bd9Sstevel@tonic-gate
55827c478bd9Sstevel@tonic-gate /*
55837c478bd9Sstevel@tonic-gate * Migrate any pages that have been marked for migration in the given range
55847c478bd9Sstevel@tonic-gate */
55857c478bd9Sstevel@tonic-gate void
page_migrate(struct seg * seg,caddr_t addr,page_t ** ppa,pgcnt_t npages)55867c478bd9Sstevel@tonic-gate page_migrate(
55877c478bd9Sstevel@tonic-gate struct seg *seg,
55887c478bd9Sstevel@tonic-gate caddr_t addr,
55897c478bd9Sstevel@tonic-gate page_t **ppa,
55907c478bd9Sstevel@tonic-gate pgcnt_t npages)
55917c478bd9Sstevel@tonic-gate {
55927c478bd9Sstevel@tonic-gate lgrp_t *from;
55937c478bd9Sstevel@tonic-gate lgrp_t *to;
55947c478bd9Sstevel@tonic-gate page_t *newpp;
55957c478bd9Sstevel@tonic-gate page_t *pp;
55967c478bd9Sstevel@tonic-gate pfn_t pfn;
55977c478bd9Sstevel@tonic-gate size_t pgsz;
55987c478bd9Sstevel@tonic-gate spgcnt_t page_cnt;
55997c478bd9Sstevel@tonic-gate spgcnt_t i;
56007c478bd9Sstevel@tonic-gate uint_t pszc;
56017c478bd9Sstevel@tonic-gate
5602dc32d872SJosef 'Jeff' Sipek ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
56037c478bd9Sstevel@tonic-gate
56047c478bd9Sstevel@tonic-gate while (npages > 0) {
56057c478bd9Sstevel@tonic-gate pp = *ppa;
56067c478bd9Sstevel@tonic-gate pszc = pp->p_szc;
56077c478bd9Sstevel@tonic-gate pgsz = page_get_pagesize(pszc);
56087c478bd9Sstevel@tonic-gate page_cnt = btop(pgsz);
56097c478bd9Sstevel@tonic-gate
56107c478bd9Sstevel@tonic-gate /*
56117c478bd9Sstevel@tonic-gate * Check to see whether this page is marked for migration
56127c478bd9Sstevel@tonic-gate *
56137c478bd9Sstevel@tonic-gate * Assume that root page of large page is marked for
56147c478bd9Sstevel@tonic-gate * migration and none of the other constituent pages
56157c478bd9Sstevel@tonic-gate * are marked. This really simplifies clearing the
56167c478bd9Sstevel@tonic-gate * migrate bit by not having to clear it from each
56177c478bd9Sstevel@tonic-gate * constituent page.
56187c478bd9Sstevel@tonic-gate *
56197c478bd9Sstevel@tonic-gate * note we don't want to relocate an entire large page if
56207c478bd9Sstevel@tonic-gate * someone is only using one subpage.
56217c478bd9Sstevel@tonic-gate */
56227c478bd9Sstevel@tonic-gate if (npages < page_cnt)
56237c478bd9Sstevel@tonic-gate break;
56247c478bd9Sstevel@tonic-gate
56257c478bd9Sstevel@tonic-gate /*
56267c478bd9Sstevel@tonic-gate * Is it marked for migration?
56277c478bd9Sstevel@tonic-gate */
56287c478bd9Sstevel@tonic-gate if (!PP_ISMIGRATE(pp))
56297c478bd9Sstevel@tonic-gate goto next;
56307c478bd9Sstevel@tonic-gate
56317c478bd9Sstevel@tonic-gate /*
56327c478bd9Sstevel@tonic-gate * Determine lgroups that page is being migrated between
56337c478bd9Sstevel@tonic-gate */
56347c478bd9Sstevel@tonic-gate pfn = page_pptonum(pp);
56357c478bd9Sstevel@tonic-gate if (!IS_P2ALIGNED(pfn, page_cnt)) {
56367c478bd9Sstevel@tonic-gate break;
56377c478bd9Sstevel@tonic-gate }
56387c478bd9Sstevel@tonic-gate from = lgrp_pfn_to_lgrp(pfn);
56397c478bd9Sstevel@tonic-gate to = lgrp_mem_choose(seg, addr, pgsz);
56407c478bd9Sstevel@tonic-gate
56417c478bd9Sstevel@tonic-gate /*
56427c478bd9Sstevel@tonic-gate * Need to get exclusive lock's to migrate
56437c478bd9Sstevel@tonic-gate */
56447c478bd9Sstevel@tonic-gate for (i = 0; i < page_cnt; i++) {
56457c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(ppa[i]));
56467c478bd9Sstevel@tonic-gate if (page_pptonum(ppa[i]) != pfn + i ||
56477c478bd9Sstevel@tonic-gate ppa[i]->p_szc != pszc) {
56487c478bd9Sstevel@tonic-gate break;
56497c478bd9Sstevel@tonic-gate }
56507c478bd9Sstevel@tonic-gate if (!page_tryupgrade(ppa[i])) {
56517c478bd9Sstevel@tonic-gate lgrp_stat_add(from->lgrp_id,
56527c478bd9Sstevel@tonic-gate LGRP_PM_FAIL_LOCK_PGS,
56537c478bd9Sstevel@tonic-gate page_cnt);
56547c478bd9Sstevel@tonic-gate break;
56557c478bd9Sstevel@tonic-gate }
56566bc16138Sjj
56576bc16138Sjj /*
56586bc16138Sjj * Check to see whether we are trying to migrate
56596bc16138Sjj * page to lgroup where it is allocated already.
56606bc16138Sjj * If so, clear the migrate bit and skip to next
56616bc16138Sjj * page.
56626bc16138Sjj */
56636bc16138Sjj if (i == 0 && to == from) {
56646bc16138Sjj PP_CLRMIGRATE(ppa[0]);
56656bc16138Sjj page_downgrade(ppa[0]);
56666bc16138Sjj goto next;
56676bc16138Sjj }
56687c478bd9Sstevel@tonic-gate }
56696bc16138Sjj
56706bc16138Sjj /*
56716bc16138Sjj * If all constituent pages couldn't be locked,
56726bc16138Sjj * unlock pages locked so far and skip to next page.
56736bc16138Sjj */
56747c478bd9Sstevel@tonic-gate if (i != page_cnt) {
56757c478bd9Sstevel@tonic-gate while (--i != -1) {
56767c478bd9Sstevel@tonic-gate page_downgrade(ppa[i]);
56777c478bd9Sstevel@tonic-gate }
56787c478bd9Sstevel@tonic-gate goto next;
56797c478bd9Sstevel@tonic-gate }
56807c478bd9Sstevel@tonic-gate
56817c478bd9Sstevel@tonic-gate (void) page_create_wait(page_cnt, PG_WAIT);
56827c478bd9Sstevel@tonic-gate newpp = page_get_replacement_page(pp, to, PGR_SAMESZC);
56837c478bd9Sstevel@tonic-gate if (newpp == NULL) {
56847c478bd9Sstevel@tonic-gate page_create_putback(page_cnt);
56857c478bd9Sstevel@tonic-gate for (i = 0; i < page_cnt; i++) {
56867c478bd9Sstevel@tonic-gate page_downgrade(ppa[i]);
56877c478bd9Sstevel@tonic-gate }
56887c478bd9Sstevel@tonic-gate lgrp_stat_add(to->lgrp_id, LGRP_PM_FAIL_ALLOC_PGS,
56897c478bd9Sstevel@tonic-gate page_cnt);
56907c478bd9Sstevel@tonic-gate goto next;
56917c478bd9Sstevel@tonic-gate }
56927c478bd9Sstevel@tonic-gate ASSERT(newpp->p_szc == pszc);
56937c478bd9Sstevel@tonic-gate /*
56947c478bd9Sstevel@tonic-gate * Clear migrate bit and relocate page
56957c478bd9Sstevel@tonic-gate */
56967c478bd9Sstevel@tonic-gate PP_CLRMIGRATE(pp);
56977c478bd9Sstevel@tonic-gate if (page_relocate(&pp, &newpp, 0, 1, &page_cnt, to)) {
56987c478bd9Sstevel@tonic-gate panic("page_migrate: page_relocate failed");
56997c478bd9Sstevel@tonic-gate }
57007c478bd9Sstevel@tonic-gate ASSERT(page_cnt * PAGESIZE == pgsz);
57017c478bd9Sstevel@tonic-gate
57027c478bd9Sstevel@tonic-gate /*
57037c478bd9Sstevel@tonic-gate * Keep stats for number of pages migrated from and to
57047c478bd9Sstevel@tonic-gate * each lgroup
57057c478bd9Sstevel@tonic-gate */
57067c478bd9Sstevel@tonic-gate lgrp_stat_add(from->lgrp_id, LGRP_PM_SRC_PGS, page_cnt);
57077c478bd9Sstevel@tonic-gate lgrp_stat_add(to->lgrp_id, LGRP_PM_DEST_PGS, page_cnt);
57087c478bd9Sstevel@tonic-gate /*
57097c478bd9Sstevel@tonic-gate * update the page_t array we were passed in and
57107c478bd9Sstevel@tonic-gate * unlink constituent pages of a large page.
57117c478bd9Sstevel@tonic-gate */
57127c478bd9Sstevel@tonic-gate for (i = 0; i < page_cnt; ++i, ++pp) {
57137c478bd9Sstevel@tonic-gate ASSERT(PAGE_EXCL(newpp));
57147c478bd9Sstevel@tonic-gate ASSERT(newpp->p_szc == pszc);
57157c478bd9Sstevel@tonic-gate ppa[i] = newpp;
57167c478bd9Sstevel@tonic-gate pp = newpp;
57177c478bd9Sstevel@tonic-gate page_sub(&newpp, pp);
57187c478bd9Sstevel@tonic-gate page_downgrade(pp);
57197c478bd9Sstevel@tonic-gate }
57207c478bd9Sstevel@tonic-gate ASSERT(newpp == NULL);
57217c478bd9Sstevel@tonic-gate next:
57227c478bd9Sstevel@tonic-gate addr += pgsz;
57237c478bd9Sstevel@tonic-gate ppa += page_cnt;
57247c478bd9Sstevel@tonic-gate npages -= page_cnt;
57257c478bd9Sstevel@tonic-gate }
57267c478bd9Sstevel@tonic-gate }
57277c478bd9Sstevel@tonic-gate
57289424a8dfSDan Kimmel uint_t page_reclaim_maxcnt = 60; /* max total iterations */
57299424a8dfSDan Kimmel uint_t page_reclaim_nofree_maxcnt = 3; /* max iterations without progress */
57303cff2f43Sstans /*
57313cff2f43Sstans * Reclaim/reserve availrmem for npages.
57323cff2f43Sstans * If there is not enough memory start reaping seg, kmem caches.
57333cff2f43Sstans * Start pageout scanner (via page_needfree()).
57343cff2f43Sstans * Exit after ~ MAX_CNT s regardless of how much memory has been released.
57353cff2f43Sstans * Note: There is no guarantee that any availrmem will be freed as
57363cff2f43Sstans * this memory typically is locked (kernel heap) or reserved for swap.
57373cff2f43Sstans * Also due to memory fragmentation kmem allocator may not be able
57383cff2f43Sstans * to free any memory (single user allocated buffer will prevent
57393cff2f43Sstans * freeing slab or a page).
57403cff2f43Sstans */
57413cff2f43Sstans int
page_reclaim_mem(pgcnt_t npages,pgcnt_t epages,int adjust)57423cff2f43Sstans page_reclaim_mem(pgcnt_t npages, pgcnt_t epages, int adjust)
57433cff2f43Sstans {
57443cff2f43Sstans int i = 0;
57459424a8dfSDan Kimmel int i_nofree = 0;
57463cff2f43Sstans int ret = 0;
57473cff2f43Sstans pgcnt_t deficit;
57489424a8dfSDan Kimmel pgcnt_t old_availrmem = 0;
57493cff2f43Sstans
57503cff2f43Sstans mutex_enter(&freemem_lock);
57519424a8dfSDan Kimmel while (availrmem < tune.t_minarmem + npages + epages &&
57529424a8dfSDan Kimmel i++ < page_reclaim_maxcnt) {
57539424a8dfSDan Kimmel /* ensure we made some progress in the last few iterations */
57549424a8dfSDan Kimmel if (old_availrmem < availrmem) {
57559424a8dfSDan Kimmel old_availrmem = availrmem;
57569424a8dfSDan Kimmel i_nofree = 0;
57579424a8dfSDan Kimmel } else if (i_nofree++ >= page_reclaim_nofree_maxcnt) {
57589424a8dfSDan Kimmel break;
57599424a8dfSDan Kimmel }
57609424a8dfSDan Kimmel
57613cff2f43Sstans deficit = tune.t_minarmem + npages + epages - availrmem;
57623cff2f43Sstans mutex_exit(&freemem_lock);
57633cff2f43Sstans page_needfree(deficit);
57643cff2f43Sstans kmem_reap();
57653cff2f43Sstans delay(hz);
57663cff2f43Sstans page_needfree(-(spgcnt_t)deficit);
57673cff2f43Sstans mutex_enter(&freemem_lock);
57683cff2f43Sstans }
57693cff2f43Sstans
57703cff2f43Sstans if (adjust && (availrmem >= tune.t_minarmem + npages + epages)) {
57713cff2f43Sstans availrmem -= npages;
57723cff2f43Sstans ret = 1;
57733cff2f43Sstans }
57743cff2f43Sstans
57753cff2f43Sstans mutex_exit(&freemem_lock);
57763cff2f43Sstans
57773cff2f43Sstans return (ret);
57783cff2f43Sstans }
57797c478bd9Sstevel@tonic-gate
57807c478bd9Sstevel@tonic-gate /*
57817c478bd9Sstevel@tonic-gate * Search the memory segments to locate the desired page. Within a
57827c478bd9Sstevel@tonic-gate * segment, pages increase linearly with one page structure per
57837c478bd9Sstevel@tonic-gate * physical page frame (size PAGESIZE). The search begins
57847c478bd9Sstevel@tonic-gate * with the segment that was accessed last, to take advantage of locality.
57857c478bd9Sstevel@tonic-gate * If the hint misses, we start from the beginning of the sorted memseg list
57867c478bd9Sstevel@tonic-gate */
57877c478bd9Sstevel@tonic-gate
57887c478bd9Sstevel@tonic-gate
57897c478bd9Sstevel@tonic-gate /*
57907c478bd9Sstevel@tonic-gate * Some data structures for pfn to pp lookup.
57917c478bd9Sstevel@tonic-gate */
57927c478bd9Sstevel@tonic-gate ulong_t mhash_per_slot;
57937c478bd9Sstevel@tonic-gate struct memseg *memseg_hash[N_MEM_SLOTS];
57947c478bd9Sstevel@tonic-gate
57957c478bd9Sstevel@tonic-gate page_t *
page_numtopp_nolock(pfn_t pfnum)57967c478bd9Sstevel@tonic-gate page_numtopp_nolock(pfn_t pfnum)
57977c478bd9Sstevel@tonic-gate {
57987c478bd9Sstevel@tonic-gate struct memseg *seg;
57997c478bd9Sstevel@tonic-gate page_t *pp;
58002af6eb52SMichael Corcoran vm_cpu_data_t *vc;
58017c478bd9Sstevel@tonic-gate
58022af6eb52SMichael Corcoran /*
58032af6eb52SMichael Corcoran * We need to disable kernel preemption while referencing the
58042af6eb52SMichael Corcoran * cpu_vm_data field in order to prevent us from being switched to
58052af6eb52SMichael Corcoran * another cpu and trying to reference it after it has been freed.
58062af6eb52SMichael Corcoran * This will keep us on cpu and prevent it from being removed while
58072af6eb52SMichael Corcoran * we are still on it.
58089853d9e8SJason Beloro *
58099853d9e8SJason Beloro * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg
58109853d9e8SJason Beloro * which is being resued by DR who will flush those references
58119853d9e8SJason Beloro * before modifying the reused memseg. See memseg_cpu_vm_flush().
58122af6eb52SMichael Corcoran */
58132af6eb52SMichael Corcoran kpreempt_disable();
58142af6eb52SMichael Corcoran vc = CPU->cpu_vm_data;
5815affbd3ccSkchow ASSERT(vc != NULL);
58167c478bd9Sstevel@tonic-gate
58177c478bd9Sstevel@tonic-gate MEMSEG_STAT_INCR(nsearch);
58187c478bd9Sstevel@tonic-gate
58197c478bd9Sstevel@tonic-gate /* Try last winner first */
5820affbd3ccSkchow if (((seg = vc->vc_pnum_memseg) != NULL) &&
58216e4dd838Smec (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) {
58227c478bd9Sstevel@tonic-gate MEMSEG_STAT_INCR(nlastwon);
58237c478bd9Sstevel@tonic-gate pp = seg->pages + (pfnum - seg->pages_base);
58242af6eb52SMichael Corcoran if (pp->p_pagenum == pfnum) {
58252af6eb52SMichael Corcoran kpreempt_enable();
58267c478bd9Sstevel@tonic-gate return ((page_t *)pp);
58272af6eb52SMichael Corcoran }
58287c478bd9Sstevel@tonic-gate }
58297c478bd9Sstevel@tonic-gate
58307c478bd9Sstevel@tonic-gate /* Else Try hash */
58317c478bd9Sstevel@tonic-gate if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) &&
58326e4dd838Smec (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) {
58337c478bd9Sstevel@tonic-gate MEMSEG_STAT_INCR(nhashwon);
5834affbd3ccSkchow vc->vc_pnum_memseg = seg;
58357c478bd9Sstevel@tonic-gate pp = seg->pages + (pfnum - seg->pages_base);
58362af6eb52SMichael Corcoran if (pp->p_pagenum == pfnum) {
58372af6eb52SMichael Corcoran kpreempt_enable();
58387c478bd9Sstevel@tonic-gate return ((page_t *)pp);
58392af6eb52SMichael Corcoran }
58407c478bd9Sstevel@tonic-gate }
58417c478bd9Sstevel@tonic-gate
58427c478bd9Sstevel@tonic-gate /* Else Brute force */
58437c478bd9Sstevel@tonic-gate for (seg = memsegs; seg != NULL; seg = seg->next) {
58447c478bd9Sstevel@tonic-gate if (pfnum >= seg->pages_base && pfnum < seg->pages_end) {
5845affbd3ccSkchow vc->vc_pnum_memseg = seg;
58467c478bd9Sstevel@tonic-gate pp = seg->pages + (pfnum - seg->pages_base);
58479853d9e8SJason Beloro if (pp->p_pagenum == pfnum) {
58489853d9e8SJason Beloro kpreempt_enable();
58499853d9e8SJason Beloro return ((page_t *)pp);
58509853d9e8SJason Beloro }
58517c478bd9Sstevel@tonic-gate }
58527c478bd9Sstevel@tonic-gate }
5853affbd3ccSkchow vc->vc_pnum_memseg = NULL;
58542af6eb52SMichael Corcoran kpreempt_enable();
58557c478bd9Sstevel@tonic-gate MEMSEG_STAT_INCR(nnotfound);
58567c478bd9Sstevel@tonic-gate return ((page_t *)NULL);
58577c478bd9Sstevel@tonic-gate
58587c478bd9Sstevel@tonic-gate }
58597c478bd9Sstevel@tonic-gate
58607c478bd9Sstevel@tonic-gate struct memseg *
page_numtomemseg_nolock(pfn_t pfnum)58617c478bd9Sstevel@tonic-gate page_numtomemseg_nolock(pfn_t pfnum)
58627c478bd9Sstevel@tonic-gate {
58637c478bd9Sstevel@tonic-gate struct memseg *seg;
58647c478bd9Sstevel@tonic-gate page_t *pp;
58657c478bd9Sstevel@tonic-gate
58669853d9e8SJason Beloro /*
58679853d9e8SJason Beloro * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg
58689853d9e8SJason Beloro * which is being resued by DR who will flush those references
58699853d9e8SJason Beloro * before modifying the reused memseg. See memseg_cpu_vm_flush().
58709853d9e8SJason Beloro */
58719853d9e8SJason Beloro kpreempt_disable();
58727c478bd9Sstevel@tonic-gate /* Try hash */
58737c478bd9Sstevel@tonic-gate if (((seg = memseg_hash[MEMSEG_PFN_HASH(pfnum)]) != NULL) &&
58746e4dd838Smec (pfnum >= seg->pages_base) && (pfnum < seg->pages_end)) {
58757c478bd9Sstevel@tonic-gate pp = seg->pages + (pfnum - seg->pages_base);
58769853d9e8SJason Beloro if (pp->p_pagenum == pfnum) {
58779853d9e8SJason Beloro kpreempt_enable();
58787c478bd9Sstevel@tonic-gate return (seg);
58799853d9e8SJason Beloro }
58807c478bd9Sstevel@tonic-gate }
58817c478bd9Sstevel@tonic-gate
58827c478bd9Sstevel@tonic-gate /* Else Brute force */
58837c478bd9Sstevel@tonic-gate for (seg = memsegs; seg != NULL; seg = seg->next) {
58847c478bd9Sstevel@tonic-gate if (pfnum >= seg->pages_base && pfnum < seg->pages_end) {
58859853d9e8SJason Beloro pp = seg->pages + (pfnum - seg->pages_base);
58869853d9e8SJason Beloro if (pp->p_pagenum == pfnum) {
58879853d9e8SJason Beloro kpreempt_enable();
58889853d9e8SJason Beloro return (seg);
58899853d9e8SJason Beloro }
58907c478bd9Sstevel@tonic-gate }
58917c478bd9Sstevel@tonic-gate }
58929853d9e8SJason Beloro kpreempt_enable();
58937c478bd9Sstevel@tonic-gate return ((struct memseg *)NULL);
58947c478bd9Sstevel@tonic-gate }
58957c478bd9Sstevel@tonic-gate
58967c478bd9Sstevel@tonic-gate /*
58977c478bd9Sstevel@tonic-gate * Given a page and a count return the page struct that is
58987c478bd9Sstevel@tonic-gate * n structs away from the current one in the global page
58997c478bd9Sstevel@tonic-gate * list.
59007c478bd9Sstevel@tonic-gate *
59017c478bd9Sstevel@tonic-gate * This function wraps to the first page upon
59027c478bd9Sstevel@tonic-gate * reaching the end of the memseg list.
59037c478bd9Sstevel@tonic-gate */
59047c478bd9Sstevel@tonic-gate page_t *
page_nextn(page_t * pp,ulong_t n)59057c478bd9Sstevel@tonic-gate page_nextn(page_t *pp, ulong_t n)
59067c478bd9Sstevel@tonic-gate {
59077c478bd9Sstevel@tonic-gate struct memseg *seg;
59087c478bd9Sstevel@tonic-gate page_t *ppn;
59092af6eb52SMichael Corcoran vm_cpu_data_t *vc;
59102af6eb52SMichael Corcoran
59112af6eb52SMichael Corcoran /*
59122af6eb52SMichael Corcoran * We need to disable kernel preemption while referencing the
59132af6eb52SMichael Corcoran * cpu_vm_data field in order to prevent us from being switched to
59142af6eb52SMichael Corcoran * another cpu and trying to reference it after it has been freed.
59152af6eb52SMichael Corcoran * This will keep us on cpu and prevent it from being removed while
59162af6eb52SMichael Corcoran * we are still on it.
59179853d9e8SJason Beloro *
59189853d9e8SJason Beloro * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg
59199853d9e8SJason Beloro * which is being resued by DR who will flush those references
59209853d9e8SJason Beloro * before modifying the reused memseg. See memseg_cpu_vm_flush().
59212af6eb52SMichael Corcoran */
59222af6eb52SMichael Corcoran kpreempt_disable();
59232af6eb52SMichael Corcoran vc = (vm_cpu_data_t *)CPU->cpu_vm_data;
59247c478bd9Sstevel@tonic-gate
5925affbd3ccSkchow ASSERT(vc != NULL);
5926affbd3ccSkchow
5927affbd3ccSkchow if (((seg = vc->vc_pnext_memseg) == NULL) ||
59287c478bd9Sstevel@tonic-gate (seg->pages_base == seg->pages_end) ||
59297c478bd9Sstevel@tonic-gate !(pp >= seg->pages && pp < seg->epages)) {
59307c478bd9Sstevel@tonic-gate
59317c478bd9Sstevel@tonic-gate for (seg = memsegs; seg; seg = seg->next) {
59327c478bd9Sstevel@tonic-gate if (pp >= seg->pages && pp < seg->epages)
59337c478bd9Sstevel@tonic-gate break;
59347c478bd9Sstevel@tonic-gate }
59357c478bd9Sstevel@tonic-gate
59367c478bd9Sstevel@tonic-gate if (seg == NULL) {
59377c478bd9Sstevel@tonic-gate /* Memory delete got in, return something valid. */
59387c478bd9Sstevel@tonic-gate /* TODO: fix me. */
59397c478bd9Sstevel@tonic-gate seg = memsegs;
59407c478bd9Sstevel@tonic-gate pp = seg->pages;
59417c478bd9Sstevel@tonic-gate }
59427c478bd9Sstevel@tonic-gate }
59437c478bd9Sstevel@tonic-gate
59447c478bd9Sstevel@tonic-gate /* check for wraparound - possible if n is large */
59457c478bd9Sstevel@tonic-gate while ((ppn = (pp + n)) >= seg->epages || ppn < pp) {
59467c478bd9Sstevel@tonic-gate n -= seg->epages - pp;
59477c478bd9Sstevel@tonic-gate seg = seg->next;
59487c478bd9Sstevel@tonic-gate if (seg == NULL)
59497c478bd9Sstevel@tonic-gate seg = memsegs;
59507c478bd9Sstevel@tonic-gate pp = seg->pages;
59517c478bd9Sstevel@tonic-gate }
5952affbd3ccSkchow vc->vc_pnext_memseg = seg;
59532af6eb52SMichael Corcoran kpreempt_enable();
59547c478bd9Sstevel@tonic-gate return (ppn);
59557c478bd9Sstevel@tonic-gate }
59567c478bd9Sstevel@tonic-gate
59577c478bd9Sstevel@tonic-gate /*
59587c478bd9Sstevel@tonic-gate * Initialize for a loop using page_next_scan_large().
59597c478bd9Sstevel@tonic-gate */
59607c478bd9Sstevel@tonic-gate page_t *
page_next_scan_init(void ** cookie)59617c478bd9Sstevel@tonic-gate page_next_scan_init(void **cookie)
59627c478bd9Sstevel@tonic-gate {
59637c478bd9Sstevel@tonic-gate ASSERT(cookie != NULL);
59647c478bd9Sstevel@tonic-gate *cookie = (void *)memsegs;
59657c478bd9Sstevel@tonic-gate return ((page_t *)memsegs->pages);
59667c478bd9Sstevel@tonic-gate }
59677c478bd9Sstevel@tonic-gate
59687c478bd9Sstevel@tonic-gate /*
59697c478bd9Sstevel@tonic-gate * Return the next page in a scan of page_t's, assuming we want
59707c478bd9Sstevel@tonic-gate * to skip over sub-pages within larger page sizes.
59717c478bd9Sstevel@tonic-gate *
59727c478bd9Sstevel@tonic-gate * The cookie is used to keep track of the current memseg.
59737c478bd9Sstevel@tonic-gate */
59747c478bd9Sstevel@tonic-gate page_t *
page_next_scan_large(page_t * pp,ulong_t * n,void ** cookie)59757c478bd9Sstevel@tonic-gate page_next_scan_large(
59767c478bd9Sstevel@tonic-gate page_t *pp,
59777c478bd9Sstevel@tonic-gate ulong_t *n,
59787c478bd9Sstevel@tonic-gate void **cookie)
59797c478bd9Sstevel@tonic-gate {
59807c478bd9Sstevel@tonic-gate struct memseg *seg = (struct memseg *)*cookie;
59817c478bd9Sstevel@tonic-gate page_t *new_pp;
59827c478bd9Sstevel@tonic-gate ulong_t cnt;
59837c478bd9Sstevel@tonic-gate pfn_t pfn;
59847c478bd9Sstevel@tonic-gate
59857c478bd9Sstevel@tonic-gate
59867c478bd9Sstevel@tonic-gate /*
59877c478bd9Sstevel@tonic-gate * get the count of page_t's to skip based on the page size
59887c478bd9Sstevel@tonic-gate */
59897c478bd9Sstevel@tonic-gate ASSERT(pp != NULL);
59907c478bd9Sstevel@tonic-gate if (pp->p_szc == 0) {
59917c478bd9Sstevel@tonic-gate cnt = 1;
59927c478bd9Sstevel@tonic-gate } else {
59937c478bd9Sstevel@tonic-gate pfn = page_pptonum(pp);
59947c478bd9Sstevel@tonic-gate cnt = page_get_pagecnt(pp->p_szc);
59957c478bd9Sstevel@tonic-gate cnt -= pfn & (cnt - 1);
59967c478bd9Sstevel@tonic-gate }
59977c478bd9Sstevel@tonic-gate *n += cnt;
59987c478bd9Sstevel@tonic-gate new_pp = pp + cnt;
59997c478bd9Sstevel@tonic-gate
60007c478bd9Sstevel@tonic-gate /*
60017c478bd9Sstevel@tonic-gate * Catch if we went past the end of the current memory segment. If so,
60027c478bd9Sstevel@tonic-gate * just move to the next segment with pages.
60037c478bd9Sstevel@tonic-gate */
60049853d9e8SJason Beloro if (new_pp >= seg->epages || seg->pages_base == seg->pages_end) {
60057c478bd9Sstevel@tonic-gate do {
60067c478bd9Sstevel@tonic-gate seg = seg->next;
60077c478bd9Sstevel@tonic-gate if (seg == NULL)
60087c478bd9Sstevel@tonic-gate seg = memsegs;
60099853d9e8SJason Beloro } while (seg->pages_base == seg->pages_end);
60107c478bd9Sstevel@tonic-gate new_pp = seg->pages;
60117c478bd9Sstevel@tonic-gate *cookie = (void *)seg;
60127c478bd9Sstevel@tonic-gate }
60137c478bd9Sstevel@tonic-gate
60147c478bd9Sstevel@tonic-gate return (new_pp);
60157c478bd9Sstevel@tonic-gate }
60167c478bd9Sstevel@tonic-gate
60177c478bd9Sstevel@tonic-gate
60187c478bd9Sstevel@tonic-gate /*
60197c478bd9Sstevel@tonic-gate * Returns next page in list. Note: this function wraps
60207c478bd9Sstevel@tonic-gate * to the first page in the list upon reaching the end
60217c478bd9Sstevel@tonic-gate * of the list. Callers should be aware of this fact.
60227c478bd9Sstevel@tonic-gate */
60237c478bd9Sstevel@tonic-gate
60247c478bd9Sstevel@tonic-gate /* We should change this be a #define */
60257c478bd9Sstevel@tonic-gate
60267c478bd9Sstevel@tonic-gate page_t *
page_next(page_t * pp)60277c478bd9Sstevel@tonic-gate page_next(page_t *pp)
60287c478bd9Sstevel@tonic-gate {
60297c478bd9Sstevel@tonic-gate return (page_nextn(pp, 1));
60307c478bd9Sstevel@tonic-gate }
60317c478bd9Sstevel@tonic-gate
60327c478bd9Sstevel@tonic-gate page_t *
page_first()60337c478bd9Sstevel@tonic-gate page_first()
60347c478bd9Sstevel@tonic-gate {
60357c478bd9Sstevel@tonic-gate return ((page_t *)memsegs->pages);
60367c478bd9Sstevel@tonic-gate }
60377c478bd9Sstevel@tonic-gate
60387c478bd9Sstevel@tonic-gate
60397c478bd9Sstevel@tonic-gate /*
60407c478bd9Sstevel@tonic-gate * This routine is called at boot with the initial memory configuration
60417c478bd9Sstevel@tonic-gate * and when memory is added or removed.
60427c478bd9Sstevel@tonic-gate */
60437c478bd9Sstevel@tonic-gate void
build_pfn_hash()60447c478bd9Sstevel@tonic-gate build_pfn_hash()
60457c478bd9Sstevel@tonic-gate {
60467c478bd9Sstevel@tonic-gate pfn_t cur;
60477c478bd9Sstevel@tonic-gate pgcnt_t index;
60487c478bd9Sstevel@tonic-gate struct memseg *pseg;
60497c478bd9Sstevel@tonic-gate int i;
60507c478bd9Sstevel@tonic-gate
60517c478bd9Sstevel@tonic-gate /*
60527c478bd9Sstevel@tonic-gate * Clear memseg_hash array.
60537c478bd9Sstevel@tonic-gate * Since memory add/delete is designed to operate concurrently
60547c478bd9Sstevel@tonic-gate * with normal operation, the hash rebuild must be able to run
60557c478bd9Sstevel@tonic-gate * concurrently with page_numtopp_nolock(). To support this
60567c478bd9Sstevel@tonic-gate * functionality, assignments to memseg_hash array members must
60577c478bd9Sstevel@tonic-gate * be done atomically.
60587c478bd9Sstevel@tonic-gate *
60597c478bd9Sstevel@tonic-gate * NOTE: bzero() does not currently guarantee this for kernel
60607c478bd9Sstevel@tonic-gate * threads, and cannot be used here.
60617c478bd9Sstevel@tonic-gate */
60627c478bd9Sstevel@tonic-gate for (i = 0; i < N_MEM_SLOTS; i++)
60637c478bd9Sstevel@tonic-gate memseg_hash[i] = NULL;
60647c478bd9Sstevel@tonic-gate
60657c478bd9Sstevel@tonic-gate hat_kpm_mseghash_clear(N_MEM_SLOTS);
60667c478bd9Sstevel@tonic-gate
60677c478bd9Sstevel@tonic-gate /*
60687c478bd9Sstevel@tonic-gate * Physmax is the last valid pfn.
60697c478bd9Sstevel@tonic-gate */
60707c478bd9Sstevel@tonic-gate mhash_per_slot = (physmax + 1) >> MEM_HASH_SHIFT;
60717c478bd9Sstevel@tonic-gate for (pseg = memsegs; pseg != NULL; pseg = pseg->next) {
60727c478bd9Sstevel@tonic-gate index = MEMSEG_PFN_HASH(pseg->pages_base);
60737c478bd9Sstevel@tonic-gate cur = pseg->pages_base;
60747c478bd9Sstevel@tonic-gate do {
60757c478bd9Sstevel@tonic-gate if (index >= N_MEM_SLOTS)
60767c478bd9Sstevel@tonic-gate index = MEMSEG_PFN_HASH(cur);
60777c478bd9Sstevel@tonic-gate
60787c478bd9Sstevel@tonic-gate if (memseg_hash[index] == NULL ||
60797c478bd9Sstevel@tonic-gate memseg_hash[index]->pages_base > pseg->pages_base) {
60807c478bd9Sstevel@tonic-gate memseg_hash[index] = pseg;
60817c478bd9Sstevel@tonic-gate hat_kpm_mseghash_update(index, pseg);
60827c478bd9Sstevel@tonic-gate }
60837c478bd9Sstevel@tonic-gate cur += mhash_per_slot;
60847c478bd9Sstevel@tonic-gate index++;
60857c478bd9Sstevel@tonic-gate } while (cur < pseg->pages_end);
60867c478bd9Sstevel@tonic-gate }
60877c478bd9Sstevel@tonic-gate }
60887c478bd9Sstevel@tonic-gate
60897c478bd9Sstevel@tonic-gate /*
60907c478bd9Sstevel@tonic-gate * Return the pagenum for the pp
60917c478bd9Sstevel@tonic-gate */
60927c478bd9Sstevel@tonic-gate pfn_t
page_pptonum(page_t * pp)60937c478bd9Sstevel@tonic-gate page_pptonum(page_t *pp)
60947c478bd9Sstevel@tonic-gate {
60957c478bd9Sstevel@tonic-gate return (pp->p_pagenum);
60967c478bd9Sstevel@tonic-gate }
60977c478bd9Sstevel@tonic-gate
60987c478bd9Sstevel@tonic-gate /*
60997c478bd9Sstevel@tonic-gate * interface to the referenced and modified etc bits
61007c478bd9Sstevel@tonic-gate * in the PSM part of the page struct
61017c478bd9Sstevel@tonic-gate * when no locking is desired.
61027c478bd9Sstevel@tonic-gate */
61037c478bd9Sstevel@tonic-gate void
page_set_props(page_t * pp,uint_t flags)61047c478bd9Sstevel@tonic-gate page_set_props(page_t *pp, uint_t flags)
61057c478bd9Sstevel@tonic-gate {
61067c478bd9Sstevel@tonic-gate ASSERT((flags & ~(P_MOD | P_REF | P_RO)) == 0);
61077c478bd9Sstevel@tonic-gate pp->p_nrm |= (uchar_t)flags;
61087c478bd9Sstevel@tonic-gate }
61097c478bd9Sstevel@tonic-gate
61107c478bd9Sstevel@tonic-gate void
page_clr_all_props(page_t * pp)61119d0d62adSJason Beloro page_clr_all_props(page_t *pp)
61127c478bd9Sstevel@tonic-gate {
61137c478bd9Sstevel@tonic-gate pp->p_nrm = 0;
61147c478bd9Sstevel@tonic-gate }
61157c478bd9Sstevel@tonic-gate
61167c478bd9Sstevel@tonic-gate /*
6117db874c57Selowe * Clear p_lckcnt and p_cowcnt, adjusting freemem if required.
61187c478bd9Sstevel@tonic-gate */
61197c478bd9Sstevel@tonic-gate int
page_clear_lck_cow(page_t * pp,int adjust)6120db874c57Selowe page_clear_lck_cow(page_t *pp, int adjust)
61217c478bd9Sstevel@tonic-gate {
6122db874c57Selowe int f_amount;
61237c478bd9Sstevel@tonic-gate
6124db874c57Selowe ASSERT(PAGE_EXCL(pp));
61257c478bd9Sstevel@tonic-gate
61267c478bd9Sstevel@tonic-gate /*
6127db874c57Selowe * The page_struct_lock need not be acquired here since
6128db874c57Selowe * we require the caller hold the page exclusively locked.
61297c478bd9Sstevel@tonic-gate */
6130db874c57Selowe f_amount = 0;
6131db874c57Selowe if (pp->p_lckcnt) {
6132db874c57Selowe f_amount = 1;
6133db874c57Selowe pp->p_lckcnt = 0;
6134db874c57Selowe }
6135db874c57Selowe if (pp->p_cowcnt) {
6136db874c57Selowe f_amount += pp->p_cowcnt;
6137db874c57Selowe pp->p_cowcnt = 0;
6138db874c57Selowe }
61397c478bd9Sstevel@tonic-gate
6140db874c57Selowe if (adjust && f_amount) {
6141db874c57Selowe mutex_enter(&freemem_lock);
6142db874c57Selowe availrmem += f_amount;
6143db874c57Selowe mutex_exit(&freemem_lock);
61447c478bd9Sstevel@tonic-gate }
61457c478bd9Sstevel@tonic-gate
6146db874c57Selowe return (f_amount);
61477c478bd9Sstevel@tonic-gate }
61487c478bd9Sstevel@tonic-gate
61497c478bd9Sstevel@tonic-gate /*
6150db874c57Selowe * The following functions is called from free_vp_pages()
6151db874c57Selowe * for an inexact estimate of a newly free'd page...
61527c478bd9Sstevel@tonic-gate */
6153db874c57Selowe ulong_t
page_share_cnt(page_t * pp)6154db874c57Selowe page_share_cnt(page_t *pp)
6155db874c57Selowe {
6156db874c57Selowe return (hat_page_getshare(pp));
6157db874c57Selowe }
6158db874c57Selowe
61597c478bd9Sstevel@tonic-gate int
page_isshared(page_t * pp)61607c478bd9Sstevel@tonic-gate page_isshared(page_t *pp)
61617c478bd9Sstevel@tonic-gate {
616205d3dc4bSpaulsan return (hat_page_checkshare(pp, 1));
61637c478bd9Sstevel@tonic-gate }
61647c478bd9Sstevel@tonic-gate
61657c478bd9Sstevel@tonic-gate int
page_isfree(page_t * pp)61667c478bd9Sstevel@tonic-gate page_isfree(page_t *pp)
61677c478bd9Sstevel@tonic-gate {
61687c478bd9Sstevel@tonic-gate return (PP_ISFREE(pp));
61697c478bd9Sstevel@tonic-gate }
61707c478bd9Sstevel@tonic-gate
61717c478bd9Sstevel@tonic-gate int
page_isref(page_t * pp)61727c478bd9Sstevel@tonic-gate page_isref(page_t *pp)
61737c478bd9Sstevel@tonic-gate {
61747c478bd9Sstevel@tonic-gate return (hat_page_getattr(pp, P_REF));
61757c478bd9Sstevel@tonic-gate }
61767c478bd9Sstevel@tonic-gate
61777c478bd9Sstevel@tonic-gate int
page_ismod(page_t * pp)61787c478bd9Sstevel@tonic-gate page_ismod(page_t *pp)
61797c478bd9Sstevel@tonic-gate {
61807c478bd9Sstevel@tonic-gate return (hat_page_getattr(pp, P_MOD));
61817c478bd9Sstevel@tonic-gate }
61828b464eb8Smec
61838b464eb8Smec /*
61848b464eb8Smec * The following code all currently relates to the page capture logic:
61858b464eb8Smec *
61868b464eb8Smec * This logic is used for cases where there is a desire to claim a certain
61878b464eb8Smec * physical page in the system for the caller. As it may not be possible
61888b464eb8Smec * to capture the page immediately, the p_toxic bits are used in the page
61898b464eb8Smec * structure to indicate that someone wants to capture this page. When the
61908b464eb8Smec * page gets unlocked, the toxic flag will be noted and an attempt to capture
61918b464eb8Smec * the page will be made. If it is successful, the original callers callback
61928b464eb8Smec * will be called with the page to do with it what they please.
61938b464eb8Smec *
61948b464eb8Smec * There is also an async thread which wakes up to attempt to capture
61958b464eb8Smec * pages occasionally which have the capture bit set. All of the pages which
61968b464eb8Smec * need to be captured asynchronously have been inserted into the
61978b464eb8Smec * page_capture_hash and thus this thread walks that hash list. Items in the
61988b464eb8Smec * hash have an expiration time so this thread handles that as well by removing
61998b464eb8Smec * the item from the hash if it has expired.
62008b464eb8Smec *
62018b464eb8Smec * Some important things to note are:
62028b464eb8Smec * - if the PR_CAPTURE bit is set on a page, then the page is in the
62038b464eb8Smec * page_capture_hash. The page_capture_hash_head.pchh_mutex is needed
62048b464eb8Smec * to set and clear this bit, and while the lock is held is the only time
62058b464eb8Smec * you can add or remove an entry from the hash.
62068b464eb8Smec * - the PR_CAPTURE bit can only be set and cleared while holding the
62078b464eb8Smec * page_capture_hash_head.pchh_mutex
62088b464eb8Smec * - the t_flag field of the thread struct is used with the T_CAPTURING
62098b464eb8Smec * flag to prevent recursion while dealing with large pages.
62108b464eb8Smec * - pages which need to be retired never expire on the page_capture_hash.
62118b464eb8Smec */
62128b464eb8Smec
62138b464eb8Smec static void page_capture_thread(void);
62148b464eb8Smec static kthread_t *pc_thread_id;
62158b464eb8Smec kcondvar_t pc_cv;
62168b464eb8Smec static kmutex_t pc_thread_mutex;
62178b464eb8Smec static clock_t pc_thread_shortwait;
62188b464eb8Smec static clock_t pc_thread_longwait;
6219a98e9dbfSaguzovsk static int pc_thread_retry;
62208b464eb8Smec
62218b464eb8Smec struct page_capture_callback pc_cb[PC_NUM_CALLBACKS];
62228b464eb8Smec
62238b464eb8Smec /* Note that this is a circular linked list */
62248b464eb8Smec typedef struct page_capture_hash_bucket {
62258b464eb8Smec page_t *pp;
622611494be0SStan Studzinski uchar_t szc;
622711494be0SStan Studzinski uchar_t pri;
62288b464eb8Smec uint_t flags;
62298b464eb8Smec clock_t expires; /* lbolt at which this request expires. */
62308b464eb8Smec void *datap; /* Cached data passed in for callback */
62318b464eb8Smec struct page_capture_hash_bucket *next;
62328b464eb8Smec struct page_capture_hash_bucket *prev;
62338b464eb8Smec } page_capture_hash_bucket_t;
62348b464eb8Smec
623511494be0SStan Studzinski #define PC_PRI_HI 0 /* capture now */
623611494be0SStan Studzinski #define PC_PRI_LO 1 /* capture later */
623711494be0SStan Studzinski #define PC_NUM_PRI 2
623811494be0SStan Studzinski
623911494be0SStan Studzinski #define PAGE_CAPTURE_PRIO(pp) (PP_ISRAF(pp) ? PC_PRI_LO : PC_PRI_HI)
624011494be0SStan Studzinski
624111494be0SStan Studzinski
62428b464eb8Smec /*
62438b464eb8Smec * Each hash bucket will have it's own mutex and two lists which are:
62448b464eb8Smec * active (0): represents requests which have not been processed by
62458b464eb8Smec * the page_capture async thread yet.
62468b464eb8Smec * walked (1): represents requests which have been processed by the
62478b464eb8Smec * page_capture async thread within it's given walk of this bucket.
62488b464eb8Smec *
62498b464eb8Smec * These are all needed so that we can synchronize all async page_capture
62508b464eb8Smec * events. When the async thread moves to a new bucket, it will append the
62518b464eb8Smec * walked list to the active list and walk each item one at a time, moving it
62528b464eb8Smec * from the active list to the walked list. Thus if there is an async request
62538b464eb8Smec * outstanding for a given page, it will always be in one of the two lists.
62548b464eb8Smec * New requests will always be added to the active list.
62558b464eb8Smec * If we were not able to capture a page before the request expired, we'd free
62568b464eb8Smec * up the request structure which would indicate to page_capture that there is
62578b464eb8Smec * no longer a need for the given page, and clear the PR_CAPTURE flag if
62588b464eb8Smec * possible.
62598b464eb8Smec */
62608b464eb8Smec typedef struct page_capture_hash_head {
62618b464eb8Smec kmutex_t pchh_mutex;
626211494be0SStan Studzinski uint_t num_pages[PC_NUM_PRI];
62638b464eb8Smec page_capture_hash_bucket_t lists[2]; /* sentinel nodes */
62648b464eb8Smec } page_capture_hash_head_t;
62658b464eb8Smec
62668b464eb8Smec #ifdef DEBUG
62678b464eb8Smec #define NUM_PAGE_CAPTURE_BUCKETS 4
62688b464eb8Smec #else
62698b464eb8Smec #define NUM_PAGE_CAPTURE_BUCKETS 64
62708b464eb8Smec #endif
62718b464eb8Smec
62728b464eb8Smec page_capture_hash_head_t page_capture_hash[NUM_PAGE_CAPTURE_BUCKETS];
62738b464eb8Smec
62748b464eb8Smec /* for now use a very simple hash based upon the size of a page struct */
62758b464eb8Smec #define PAGE_CAPTURE_HASH(pp) \
62768b464eb8Smec ((int)(((uintptr_t)pp >> 7) & (NUM_PAGE_CAPTURE_BUCKETS - 1)))
62778b464eb8Smec
62788b464eb8Smec extern pgcnt_t swapfs_minfree;
62798b464eb8Smec
62808b464eb8Smec int page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap);
62818b464eb8Smec
62828b464eb8Smec /*
62838b464eb8Smec * a callback function is required for page capture requests.
62848b464eb8Smec */
62858b464eb8Smec void
page_capture_register_callback(uint_t index,clock_t duration,int (* cb_func)(page_t *,void *,uint_t))62868b464eb8Smec page_capture_register_callback(uint_t index, clock_t duration,
62878b464eb8Smec int (*cb_func)(page_t *, void *, uint_t))
62888b464eb8Smec {
62898b464eb8Smec ASSERT(pc_cb[index].cb_active == 0);
62908b464eb8Smec ASSERT(cb_func != NULL);
62918b464eb8Smec rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER);
62928b464eb8Smec pc_cb[index].duration = duration;
62938b464eb8Smec pc_cb[index].cb_func = cb_func;
62948b464eb8Smec pc_cb[index].cb_active = 1;
62958b464eb8Smec rw_exit(&pc_cb[index].cb_rwlock);
62968b464eb8Smec }
62978b464eb8Smec
62988b464eb8Smec void
page_capture_unregister_callback(uint_t index)62998b464eb8Smec page_capture_unregister_callback(uint_t index)
63008b464eb8Smec {
63018b464eb8Smec int i, j;
63028b464eb8Smec struct page_capture_hash_bucket *bp1;
63038b464eb8Smec struct page_capture_hash_bucket *bp2;
63048b464eb8Smec struct page_capture_hash_bucket *head = NULL;
63058b464eb8Smec uint_t flags = (1 << index);
63068b464eb8Smec
63078b464eb8Smec rw_enter(&pc_cb[index].cb_rwlock, RW_WRITER);
63088b464eb8Smec ASSERT(pc_cb[index].cb_active == 1);
63098b464eb8Smec pc_cb[index].duration = 0; /* Paranoia */
63108b464eb8Smec pc_cb[index].cb_func = NULL; /* Paranoia */
63118b464eb8Smec pc_cb[index].cb_active = 0;
63128b464eb8Smec rw_exit(&pc_cb[index].cb_rwlock);
63138b464eb8Smec
63148b464eb8Smec /*
63158b464eb8Smec * Just move all the entries to a private list which we can walk
63168b464eb8Smec * through without the need to hold any locks.
63178b464eb8Smec * No more requests can get added to the hash lists for this consumer
63188b464eb8Smec * as the cb_active field for the callback has been cleared.
63198b464eb8Smec */
63208b464eb8Smec for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) {
63218b464eb8Smec mutex_enter(&page_capture_hash[i].pchh_mutex);
63228b464eb8Smec for (j = 0; j < 2; j++) {
63238b464eb8Smec bp1 = page_capture_hash[i].lists[j].next;
63248b464eb8Smec /* walk through all but first (sentinel) element */
63258b464eb8Smec while (bp1 != &page_capture_hash[i].lists[j]) {
63268b464eb8Smec bp2 = bp1;
63278b464eb8Smec if (bp2->flags & flags) {
63288b464eb8Smec bp1 = bp2->next;
63298b464eb8Smec bp1->prev = bp2->prev;
63308b464eb8Smec bp2->prev->next = bp1;
63318b464eb8Smec bp2->next = head;
63328b464eb8Smec head = bp2;
63338b464eb8Smec /*
63348b464eb8Smec * Clear the PR_CAPTURE bit as we
63358b464eb8Smec * hold appropriate locks here.
63368b464eb8Smec */
63378b464eb8Smec page_clrtoxic(head->pp, PR_CAPTURE);
633811494be0SStan Studzinski page_capture_hash[i].
633911494be0SStan Studzinski num_pages[bp2->pri]--;
63408b464eb8Smec continue;
63418b464eb8Smec }
63428b464eb8Smec bp1 = bp1->next;
63438b464eb8Smec }
63448b464eb8Smec }
63458b464eb8Smec mutex_exit(&page_capture_hash[i].pchh_mutex);
63468b464eb8Smec }
63478b464eb8Smec
63488b464eb8Smec while (head != NULL) {
63498b464eb8Smec bp1 = head;
63508b464eb8Smec head = head->next;
63518b464eb8Smec kmem_free(bp1, sizeof (*bp1));
63528b464eb8Smec }
63538b464eb8Smec }
63548b464eb8Smec
63558b464eb8Smec
63568b464eb8Smec /*
63578b464eb8Smec * Find pp in the active list and move it to the walked list if it
63588b464eb8Smec * exists.
63598b464eb8Smec * Note that most often pp should be at the front of the active list
63608b464eb8Smec * as it is currently used and thus there is no other sort of optimization
63618b464eb8Smec * being done here as this is a linked list data structure.
63628b464eb8Smec * Returns 1 on successful move or 0 if page could not be found.
63638b464eb8Smec */
63648b464eb8Smec static int
page_capture_move_to_walked(page_t * pp)63658b464eb8Smec page_capture_move_to_walked(page_t *pp)
63668b464eb8Smec {
63678b464eb8Smec page_capture_hash_bucket_t *bp;
63688b464eb8Smec int index;
63698b464eb8Smec
63708b464eb8Smec index = PAGE_CAPTURE_HASH(pp);
63718b464eb8Smec
63728b464eb8Smec mutex_enter(&page_capture_hash[index].pchh_mutex);
63738b464eb8Smec bp = page_capture_hash[index].lists[0].next;
63748b464eb8Smec while (bp != &page_capture_hash[index].lists[0]) {
63758b464eb8Smec if (bp->pp == pp) {
63768b464eb8Smec /* Remove from old list */
63778b464eb8Smec bp->next->prev = bp->prev;
63788b464eb8Smec bp->prev->next = bp->next;
63798b464eb8Smec
63808b464eb8Smec /* Add to new list */
63818b464eb8Smec bp->next = page_capture_hash[index].lists[1].next;
63828b464eb8Smec bp->prev = &page_capture_hash[index].lists[1];
63838b464eb8Smec page_capture_hash[index].lists[1].next = bp;
63848b464eb8Smec bp->next->prev = bp;
63858b464eb8Smec
638611494be0SStan Studzinski /*
638711494be0SStan Studzinski * There is a small probability of page on a free
638811494be0SStan Studzinski * list being retired while being allocated
638911494be0SStan Studzinski * and before P_RAF is set on it. The page may
639011494be0SStan Studzinski * end up marked as high priority request instead
639111494be0SStan Studzinski * of low priority request.
639211494be0SStan Studzinski * If P_RAF page is not marked as low priority request
639311494be0SStan Studzinski * change it to low priority request.
639411494be0SStan Studzinski */
639511494be0SStan Studzinski page_capture_hash[index].num_pages[bp->pri]--;
639611494be0SStan Studzinski bp->pri = PAGE_CAPTURE_PRIO(pp);
639711494be0SStan Studzinski page_capture_hash[index].num_pages[bp->pri]++;
639811494be0SStan Studzinski mutex_exit(&page_capture_hash[index].pchh_mutex);
63998b464eb8Smec return (1);
64008b464eb8Smec }
64018b464eb8Smec bp = bp->next;
64028b464eb8Smec }
64038b464eb8Smec mutex_exit(&page_capture_hash[index].pchh_mutex);
64048b464eb8Smec return (0);
64058b464eb8Smec }
64068b464eb8Smec
64078b464eb8Smec /*
64088b464eb8Smec * Add a new entry to the page capture hash. The only case where a new
64098b464eb8Smec * entry is not added is when the page capture consumer is no longer registered.
64108b464eb8Smec * In this case, we'll silently not add the page to the hash. We know that
64118b464eb8Smec * page retire will always be registered for the case where we are currently
64128b464eb8Smec * unretiring a page and thus there are no conflicts.
64138b464eb8Smec */
64148b464eb8Smec static void
page_capture_add_hash(page_t * pp,uint_t szc,uint_t flags,void * datap)64158b464eb8Smec page_capture_add_hash(page_t *pp, uint_t szc, uint_t flags, void *datap)
64168b464eb8Smec {
64178b464eb8Smec page_capture_hash_bucket_t *bp1;
64188b464eb8Smec page_capture_hash_bucket_t *bp2;
64198b464eb8Smec int index;
64208b464eb8Smec int cb_index;
64218b464eb8Smec int i;
642211494be0SStan Studzinski uchar_t pri;
64238b464eb8Smec #ifdef DEBUG
64248b464eb8Smec page_capture_hash_bucket_t *tp1;
64258b464eb8Smec int l;
64268b464eb8Smec #endif
64278b464eb8Smec
64288b464eb8Smec ASSERT(!(flags & CAPTURE_ASYNC));
64298b464eb8Smec
64308b464eb8Smec bp1 = kmem_alloc(sizeof (struct page_capture_hash_bucket), KM_SLEEP);
64318b464eb8Smec
64328b464eb8Smec bp1->pp = pp;
64338b464eb8Smec bp1->szc = szc;
64348b464eb8Smec bp1->flags = flags;
64358b464eb8Smec bp1->datap = datap;
64368b464eb8Smec
64378b464eb8Smec for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) {
64388b464eb8Smec if ((flags >> cb_index) & 1) {
64398b464eb8Smec break;
64408b464eb8Smec }
64418b464eb8Smec }
64428b464eb8Smec
64438b464eb8Smec ASSERT(cb_index != PC_NUM_CALLBACKS);
64448b464eb8Smec
64458b464eb8Smec rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER);
64468b464eb8Smec if (pc_cb[cb_index].cb_active) {
64478b464eb8Smec if (pc_cb[cb_index].duration == -1) {
64488b464eb8Smec bp1->expires = (clock_t)-1;
64498b464eb8Smec } else {
6450d3d50737SRafael Vanoni bp1->expires = ddi_get_lbolt() +
6451d3d50737SRafael Vanoni pc_cb[cb_index].duration;
64528b464eb8Smec }
64538b464eb8Smec } else {
64548b464eb8Smec /* There's no callback registered so don't add to the hash */
64558b464eb8Smec rw_exit(&pc_cb[cb_index].cb_rwlock);
64568b464eb8Smec kmem_free(bp1, sizeof (*bp1));
64578b464eb8Smec return;
64588b464eb8Smec }
64598b464eb8Smec
64608b464eb8Smec index = PAGE_CAPTURE_HASH(pp);
64618b464eb8Smec
64628b464eb8Smec /*
64638b464eb8Smec * Only allow capture flag to be modified under this mutex.
64648b464eb8Smec * Prevents multiple entries for same page getting added.
64658b464eb8Smec */
64668b464eb8Smec mutex_enter(&page_capture_hash[index].pchh_mutex);
64678b464eb8Smec
64688b464eb8Smec /*
64698b464eb8Smec * if not already on the hash, set capture bit and add to the hash
64708b464eb8Smec */
64718b464eb8Smec if (!(pp->p_toxic & PR_CAPTURE)) {
64728b464eb8Smec #ifdef DEBUG
64738b464eb8Smec /* Check for duplicate entries */
64748b464eb8Smec for (l = 0; l < 2; l++) {
64758b464eb8Smec tp1 = page_capture_hash[index].lists[l].next;
64768b464eb8Smec while (tp1 != &page_capture_hash[index].lists[l]) {
64778b464eb8Smec if (tp1->pp == pp) {
64788b464eb8Smec panic("page pp 0x%p already on hash "
64798793b36bSNick Todd "at 0x%p\n",
64808793b36bSNick Todd (void *)pp, (void *)tp1);
64818b464eb8Smec }
64828b464eb8Smec tp1 = tp1->next;
64838b464eb8Smec }
64848b464eb8Smec }
64858b464eb8Smec
64868b464eb8Smec #endif
64878b464eb8Smec page_settoxic(pp, PR_CAPTURE);
648811494be0SStan Studzinski pri = PAGE_CAPTURE_PRIO(pp);
648911494be0SStan Studzinski bp1->pri = pri;
64908b464eb8Smec bp1->next = page_capture_hash[index].lists[0].next;
64918b464eb8Smec bp1->prev = &page_capture_hash[index].lists[0];
64928b464eb8Smec bp1->next->prev = bp1;
64938b464eb8Smec page_capture_hash[index].lists[0].next = bp1;
649411494be0SStan Studzinski page_capture_hash[index].num_pages[pri]++;
6495cee1d74bSjfrank if (flags & CAPTURE_RETIRE) {
6496704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States page_retire_incr_pend_count(datap);
6497cee1d74bSjfrank }
64988b464eb8Smec mutex_exit(&page_capture_hash[index].pchh_mutex);
64998b464eb8Smec rw_exit(&pc_cb[cb_index].cb_rwlock);
65008b464eb8Smec cv_signal(&pc_cv);
65018b464eb8Smec return;
65028b464eb8Smec }
65038b464eb8Smec
65048b464eb8Smec /*
65058b464eb8Smec * A page retire request will replace any other request.
65068b464eb8Smec * A second physmem request which is for a different process than
65078b464eb8Smec * the currently registered one will be dropped as there is
65088b464eb8Smec * no way to hold the private data for both calls.
65098b464eb8Smec * In the future, once there are more callers, this will have to
65108b464eb8Smec * be worked out better as there needs to be private storage for
65118b464eb8Smec * at least each type of caller (maybe have datap be an array of
65128b464eb8Smec * *void's so that we can index based upon callers index).
65138b464eb8Smec */
65148b464eb8Smec
65158b464eb8Smec /* walk hash list to update expire time */
65168b464eb8Smec for (i = 0; i < 2; i++) {
65178b464eb8Smec bp2 = page_capture_hash[index].lists[i].next;
65188b464eb8Smec while (bp2 != &page_capture_hash[index].lists[i]) {
65198b464eb8Smec if (bp2->pp == pp) {
65208b464eb8Smec if (flags & CAPTURE_RETIRE) {
65218b464eb8Smec if (!(bp2->flags & CAPTURE_RETIRE)) {
6522704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States page_retire_incr_pend_count(
6523704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States datap);
65248b464eb8Smec bp2->flags = flags;
65258b464eb8Smec bp2->expires = bp1->expires;
65268b464eb8Smec bp2->datap = datap;
65278b464eb8Smec }
65288b464eb8Smec } else {
65298b464eb8Smec ASSERT(flags & CAPTURE_PHYSMEM);
65308b464eb8Smec if (!(bp2->flags & CAPTURE_RETIRE) &&
65318b464eb8Smec (datap == bp2->datap)) {
65328b464eb8Smec bp2->expires = bp1->expires;
65338b464eb8Smec }
65348b464eb8Smec }
65358b464eb8Smec mutex_exit(&page_capture_hash[index].
65368b464eb8Smec pchh_mutex);
65378b464eb8Smec rw_exit(&pc_cb[cb_index].cb_rwlock);
65388b464eb8Smec kmem_free(bp1, sizeof (*bp1));
65398b464eb8Smec return;
65408b464eb8Smec }
65418b464eb8Smec bp2 = bp2->next;
65428b464eb8Smec }
65438b464eb8Smec }
65448b464eb8Smec
65458b464eb8Smec /*
65468b464eb8Smec * the PR_CAPTURE flag is protected by the page_capture_hash mutexes
65478b464eb8Smec * and thus it either has to be set or not set and can't change
65488b464eb8Smec * while holding the mutex above.
65498b464eb8Smec */
65508793b36bSNick Todd panic("page_capture_add_hash, PR_CAPTURE flag set on pp %p\n",
65518793b36bSNick Todd (void *)pp);
65528b464eb8Smec }
65538b464eb8Smec
65548b464eb8Smec /*
65558b464eb8Smec * We have a page in our hands, lets try and make it ours by turning
65568b464eb8Smec * it into a clean page like it had just come off the freelists.
65578b464eb8Smec *
65588b464eb8Smec * Returns 0 on success, with the page still EXCL locked.
65598b464eb8Smec * On failure, the page will be unlocked, and returns EAGAIN
65608b464eb8Smec */
65618b464eb8Smec static int
page_capture_clean_page(page_t * pp)65628b464eb8Smec page_capture_clean_page(page_t *pp)
65638b464eb8Smec {
65648b464eb8Smec page_t *newpp;
65658b464eb8Smec int skip_unlock = 0;
65668b464eb8Smec spgcnt_t count;
65678b464eb8Smec page_t *tpp;
65688b464eb8Smec int ret = 0;
65698b464eb8Smec int extra;
65708b464eb8Smec
65718b464eb8Smec ASSERT(PAGE_EXCL(pp));
65728b464eb8Smec ASSERT(!PP_RETIRED(pp));
65738b464eb8Smec ASSERT(curthread->t_flag & T_CAPTURING);
65748b464eb8Smec
65758b464eb8Smec if (PP_ISFREE(pp)) {
65766e4dd838Smec if (!page_reclaim(pp, NULL)) {
65778b464eb8Smec skip_unlock = 1;
65788b464eb8Smec ret = EAGAIN;
65798b464eb8Smec goto cleanup;
65808b464eb8Smec }
65816e4dd838Smec ASSERT(pp->p_szc == 0);
65828b464eb8Smec if (pp->p_vnode != NULL) {
65838b464eb8Smec /*
65848b464eb8Smec * Since this page came from the
65858b464eb8Smec * cachelist, we must destroy the
65868b464eb8Smec * old vnode association.
65878b464eb8Smec */
65888b464eb8Smec page_hashout(pp, NULL);
65898b464eb8Smec }
65908b464eb8Smec goto cleanup;
65918b464eb8Smec }
65928b464eb8Smec
65938b464eb8Smec /*
65948b464eb8Smec * If we know page_relocate will fail, skip it
65958b464eb8Smec * It could still fail due to a UE on another page but we
65968b464eb8Smec * can't do anything about that.
65978b464eb8Smec */
65988b464eb8Smec if (pp->p_toxic & PR_UE) {
65998b464eb8Smec goto skip_relocate;
66008b464eb8Smec }
66018b464eb8Smec
66028b464eb8Smec /*
66038b464eb8Smec * It's possible that pages can not have a vnode as fsflush comes
66048b464eb8Smec * through and cleans up these pages. It's ugly but that's how it is.
66058b464eb8Smec */
66068b464eb8Smec if (pp->p_vnode == NULL) {
66078b464eb8Smec goto skip_relocate;
66088b464eb8Smec }
66098b464eb8Smec
66108b464eb8Smec /*
66118b464eb8Smec * Page was not free, so lets try to relocate it.
66128b464eb8Smec * page_relocate only works with root pages, so if this is not a root
66138b464eb8Smec * page, we need to demote it to try and relocate it.
66148b464eb8Smec * Unfortunately this is the best we can do right now.
66158b464eb8Smec */
66168b464eb8Smec newpp = NULL;
66178b464eb8Smec if ((pp->p_szc > 0) && (pp != PP_PAGEROOT(pp))) {
66188b464eb8Smec if (page_try_demote_pages(pp) == 0) {
66198b464eb8Smec ret = EAGAIN;
66208b464eb8Smec goto cleanup;
66218b464eb8Smec }
66228b464eb8Smec }
66238b464eb8Smec ret = page_relocate(&pp, &newpp, 1, 0, &count, NULL);
66248b464eb8Smec if (ret == 0) {
66258b464eb8Smec page_t *npp;
66268b464eb8Smec /* unlock the new page(s) */
66278b464eb8Smec while (count-- > 0) {
66288b464eb8Smec ASSERT(newpp != NULL);
66298b464eb8Smec npp = newpp;
66308b464eb8Smec page_sub(&newpp, npp);
66318b464eb8Smec page_unlock(npp);
66328b464eb8Smec }
66338b464eb8Smec ASSERT(newpp == NULL);
66348b464eb8Smec /*
66358b464eb8Smec * Check to see if the page we have is too large.
66368b464eb8Smec * If so, demote it freeing up the extra pages.
66378b464eb8Smec */
66388b464eb8Smec if (pp->p_szc > 0) {
66398b464eb8Smec /* For now demote extra pages to szc == 0 */
66408b464eb8Smec extra = page_get_pagecnt(pp->p_szc) - 1;
66418b464eb8Smec while (extra > 0) {
66428b464eb8Smec tpp = pp->p_next;
66438b464eb8Smec page_sub(&pp, tpp);
66448b464eb8Smec tpp->p_szc = 0;
66458b464eb8Smec page_free(tpp, 1);
66468b464eb8Smec extra--;
66478b464eb8Smec }
66488b464eb8Smec /* Make sure to set our page to szc 0 as well */
66498b464eb8Smec ASSERT(pp->p_next == pp && pp->p_prev == pp);
66508b464eb8Smec pp->p_szc = 0;
66518b464eb8Smec }
66528b464eb8Smec goto cleanup;
66538b464eb8Smec } else if (ret == EIO) {
66548b464eb8Smec ret = EAGAIN;
66558b464eb8Smec goto cleanup;
66568b464eb8Smec } else {
66578b464eb8Smec /*
66588b464eb8Smec * Need to reset return type as we failed to relocate the page
66598b464eb8Smec * but that does not mean that some of the next steps will not
66608b464eb8Smec * work.
66618b464eb8Smec */
66628b464eb8Smec ret = 0;
66638b464eb8Smec }
66648b464eb8Smec
66658b464eb8Smec skip_relocate:
66668b464eb8Smec
66678b464eb8Smec if (pp->p_szc > 0) {
66688b464eb8Smec if (page_try_demote_pages(pp) == 0) {
66698b464eb8Smec ret = EAGAIN;
66708b464eb8Smec goto cleanup;
66718b464eb8Smec }
66728b464eb8Smec }
66738b464eb8Smec
66748b464eb8Smec ASSERT(pp->p_szc == 0);
66758b464eb8Smec
66768b464eb8Smec if (hat_ismod(pp)) {
66778b464eb8Smec ret = EAGAIN;
66788b464eb8Smec goto cleanup;
66798b464eb8Smec }
6680ad23a2dbSjohansen if (PP_ISKAS(pp)) {
66818b464eb8Smec ret = EAGAIN;
66828b464eb8Smec goto cleanup;
66838b464eb8Smec }
66848b464eb8Smec if (pp->p_lckcnt || pp->p_cowcnt) {
66858b464eb8Smec ret = EAGAIN;
66868b464eb8Smec goto cleanup;
66878b464eb8Smec }
66888b464eb8Smec
66898b464eb8Smec (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
66908b464eb8Smec ASSERT(!hat_page_is_mapped(pp));
66918b464eb8Smec
66928b464eb8Smec if (hat_ismod(pp)) {
66938b464eb8Smec /*
66948b464eb8Smec * This is a semi-odd case as the page is now modified but not
66958b464eb8Smec * mapped as we just unloaded the mappings above.
66968b464eb8Smec */
66978b464eb8Smec ret = EAGAIN;
66988b464eb8Smec goto cleanup;
66998b464eb8Smec }
67008b464eb8Smec if (pp->p_vnode != NULL) {
67018b464eb8Smec page_hashout(pp, NULL);
67028b464eb8Smec }
67038b464eb8Smec
67048b464eb8Smec /*
67058b464eb8Smec * At this point, the page should be in a clean state and
67068b464eb8Smec * we can do whatever we want with it.
67078b464eb8Smec */
67088b464eb8Smec
67098b464eb8Smec cleanup:
67108b464eb8Smec if (ret != 0) {
67118b464eb8Smec if (!skip_unlock) {
67128b464eb8Smec page_unlock(pp);
67138b464eb8Smec }
67148b464eb8Smec } else {
67158b464eb8Smec ASSERT(pp->p_szc == 0);
67168b464eb8Smec ASSERT(PAGE_EXCL(pp));
67178b464eb8Smec
67188b464eb8Smec pp->p_next = pp;
67198b464eb8Smec pp->p_prev = pp;
67208b464eb8Smec }
67218b464eb8Smec return (ret);
67228b464eb8Smec }
67238b464eb8Smec
67248b464eb8Smec /*
67258b464eb8Smec * Various callers of page_trycapture() can have different restrictions upon
67268b464eb8Smec * what memory they have access to.
67278b464eb8Smec * Returns 0 on success, with the following error codes on failure:
67288b464eb8Smec * EPERM - The requested page is long term locked, and thus repeated
67298b464eb8Smec * requests to capture this page will likely fail.
67308b464eb8Smec * ENOMEM - There was not enough free memory in the system to safely
67318b464eb8Smec * map the requested page.
67328b464eb8Smec * ENOENT - The requested page was inside the kernel cage, and the
67338b464eb8Smec * PHYSMEM_CAGE flag was not set.
67348b464eb8Smec */
67358b464eb8Smec int
page_capture_pre_checks(page_t * pp,uint_t flags)67368b464eb8Smec page_capture_pre_checks(page_t *pp, uint_t flags)
67378b464eb8Smec {
67388b464eb8Smec ASSERT(pp != NULL);
67398b464eb8Smec
67408b464eb8Smec #if defined(__sparc)
6741af4c679fSSean McEnroe if (pp->p_vnode == &promvp) {
67428b464eb8Smec return (EPERM);
67438b464eb8Smec }
67448b464eb8Smec
6745a98e9dbfSaguzovsk if (PP_ISNORELOC(pp) && !(flags & CAPTURE_GET_CAGE) &&
6746a98e9dbfSaguzovsk (flags & CAPTURE_PHYSMEM)) {
67478b464eb8Smec return (ENOENT);
67488b464eb8Smec }
67498b464eb8Smec
67508b464eb8Smec if (PP_ISNORELOCKERNEL(pp)) {
67518b464eb8Smec return (EPERM);
67528b464eb8Smec }
67538b464eb8Smec #else
6754ad23a2dbSjohansen if (PP_ISKAS(pp)) {
67558b464eb8Smec return (EPERM);
67568b464eb8Smec }
67578b464eb8Smec #endif /* __sparc */
67588b464eb8Smec
6759a98e9dbfSaguzovsk /* only physmem currently has the restrictions checked below */
6760a98e9dbfSaguzovsk if (!(flags & CAPTURE_PHYSMEM)) {
6761a98e9dbfSaguzovsk return (0);
6762a98e9dbfSaguzovsk }
6763a98e9dbfSaguzovsk
67648b464eb8Smec if (availrmem < swapfs_minfree) {
67658b464eb8Smec /*
67668b464eb8Smec * We won't try to capture this page as we are
67678b464eb8Smec * running low on memory.
67688b464eb8Smec */
67698b464eb8Smec return (ENOMEM);
67708b464eb8Smec }
67718b464eb8Smec return (0);
67728b464eb8Smec }
67738b464eb8Smec
67748b464eb8Smec /*
67758b464eb8Smec * Once we have a page in our mits, go ahead and complete the capture
67768b464eb8Smec * operation.
67778b464eb8Smec * Returns 1 on failure where page is no longer needed
67788b464eb8Smec * Returns 0 on success
67798b464eb8Smec * Returns -1 if there was a transient failure.
67808b464eb8Smec * Failure cases must release the SE_EXCL lock on pp (usually via page_free).
67818b464eb8Smec */
67828b464eb8Smec int
page_capture_take_action(page_t * pp,uint_t flags,void * datap)67838b464eb8Smec page_capture_take_action(page_t *pp, uint_t flags, void *datap)
67848b464eb8Smec {
67858b464eb8Smec int cb_index;
67868b464eb8Smec int ret = 0;
67878b464eb8Smec page_capture_hash_bucket_t *bp1;
67888b464eb8Smec page_capture_hash_bucket_t *bp2;
67898b464eb8Smec int index;
67908b464eb8Smec int found = 0;
67918b464eb8Smec int i;
67928b464eb8Smec
67938b464eb8Smec ASSERT(PAGE_EXCL(pp));
67948b464eb8Smec ASSERT(curthread->t_flag & T_CAPTURING);
67958b464eb8Smec
67968b464eb8Smec for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) {
67978b464eb8Smec if ((flags >> cb_index) & 1) {
67988b464eb8Smec break;
67998b464eb8Smec }
68008b464eb8Smec }
68018b464eb8Smec ASSERT(cb_index < PC_NUM_CALLBACKS);
68028b464eb8Smec
68038b464eb8Smec /*
68048b464eb8Smec * Remove the entry from the page_capture hash, but don't free it yet
68058b464eb8Smec * as we may need to put it back.
68068b464eb8Smec * Since we own the page at this point in time, we should find it
68078b464eb8Smec * in the hash if this is an ASYNC call. If we don't it's likely
68088b464eb8Smec * that the page_capture_async() thread decided that this request
68098b464eb8Smec * had expired, in which case we just continue on.
68108b464eb8Smec */
68118b464eb8Smec if (flags & CAPTURE_ASYNC) {
68128b464eb8Smec
68138b464eb8Smec index = PAGE_CAPTURE_HASH(pp);
68148b464eb8Smec
68158b464eb8Smec mutex_enter(&page_capture_hash[index].pchh_mutex);
68168b464eb8Smec for (i = 0; i < 2 && !found; i++) {
68178b464eb8Smec bp1 = page_capture_hash[index].lists[i].next;
68188b464eb8Smec while (bp1 != &page_capture_hash[index].lists[i]) {
68198b464eb8Smec if (bp1->pp == pp) {
68208b464eb8Smec bp1->next->prev = bp1->prev;
68218b464eb8Smec bp1->prev->next = bp1->next;
682211494be0SStan Studzinski page_capture_hash[index].
682311494be0SStan Studzinski num_pages[bp1->pri]--;
68248b464eb8Smec page_clrtoxic(pp, PR_CAPTURE);
68258b464eb8Smec found = 1;
68268b464eb8Smec break;
68278b464eb8Smec }
68288b464eb8Smec bp1 = bp1->next;
68298b464eb8Smec }
68308b464eb8Smec }
68318b464eb8Smec mutex_exit(&page_capture_hash[index].pchh_mutex);
68328b464eb8Smec }
68338b464eb8Smec
68348b464eb8Smec /* Synchronize with the unregister func. */
68358b464eb8Smec rw_enter(&pc_cb[cb_index].cb_rwlock, RW_READER);
68368b464eb8Smec if (!pc_cb[cb_index].cb_active) {
68378b464eb8Smec page_free(pp, 1);
68388b464eb8Smec rw_exit(&pc_cb[cb_index].cb_rwlock);
68398b464eb8Smec if (found) {
68408b464eb8Smec kmem_free(bp1, sizeof (*bp1));
68418b464eb8Smec }
68428b464eb8Smec return (1);
68438b464eb8Smec }
68448b464eb8Smec
68458b464eb8Smec /*
68468b464eb8Smec * We need to remove the entry from the page capture hash and turn off
68478b464eb8Smec * the PR_CAPTURE bit before calling the callback. We'll need to cache
68488b464eb8Smec * the entry here, and then based upon the return value, cleanup
68498b464eb8Smec * appropriately or re-add it to the hash, making sure that someone else
68508b464eb8Smec * hasn't already done so.
68518b464eb8Smec * It should be rare for the callback to fail and thus it's ok for
68528b464eb8Smec * the failure path to be a bit complicated as the success path is
68538b464eb8Smec * cleaner and the locking rules are easier to follow.
68548b464eb8Smec */
68558b464eb8Smec
68568b464eb8Smec ret = pc_cb[cb_index].cb_func(pp, datap, flags);
68578b464eb8Smec
68588b464eb8Smec rw_exit(&pc_cb[cb_index].cb_rwlock);
68598b464eb8Smec
68608b464eb8Smec /*
68618b464eb8Smec * If this was an ASYNC request, we need to cleanup the hash if the
68628b464eb8Smec * callback was successful or if the request was no longer valid.
68638b464eb8Smec * For non-ASYNC requests, we return failure to map and the caller
68648b464eb8Smec * will take care of adding the request to the hash.
68658b464eb8Smec * Note also that the callback itself is responsible for the page
68668b464eb8Smec * at this point in time in terms of locking ... The most common
68678b464eb8Smec * case for the failure path should just be a page_free.
68688b464eb8Smec */
68698b464eb8Smec if (ret >= 0) {
68708b464eb8Smec if (found) {
6871cee1d74bSjfrank if (bp1->flags & CAPTURE_RETIRE) {
6872704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States page_retire_decr_pend_count(datap);
6873cee1d74bSjfrank }
68748b464eb8Smec kmem_free(bp1, sizeof (*bp1));
68758b464eb8Smec }
68768b464eb8Smec return (ret);
68778b464eb8Smec }
68788b464eb8Smec if (!found) {
68798b464eb8Smec return (ret);
68808b464eb8Smec }
68818b464eb8Smec
68828b464eb8Smec ASSERT(flags & CAPTURE_ASYNC);
68838b464eb8Smec
68848b464eb8Smec /*
68858b464eb8Smec * Check for expiration time first as we can just free it up if it's
68868b464eb8Smec * expired.
68878b464eb8Smec */
6888d3d50737SRafael Vanoni if (ddi_get_lbolt() > bp1->expires && bp1->expires != -1) {
68898b464eb8Smec kmem_free(bp1, sizeof (*bp1));
68908b464eb8Smec return (ret);
68918b464eb8Smec }
68928b464eb8Smec
68938b464eb8Smec /*
68948b464eb8Smec * The callback failed and there used to be an entry in the hash for
68958b464eb8Smec * this page, so we need to add it back to the hash.
68968b464eb8Smec */
68978b464eb8Smec mutex_enter(&page_capture_hash[index].pchh_mutex);
68988b464eb8Smec if (!(pp->p_toxic & PR_CAPTURE)) {
68998b464eb8Smec /* just add bp1 back to head of walked list */
69008b464eb8Smec page_settoxic(pp, PR_CAPTURE);
69018b464eb8Smec bp1->next = page_capture_hash[index].lists[1].next;
69028b464eb8Smec bp1->prev = &page_capture_hash[index].lists[1];
69038b464eb8Smec bp1->next->prev = bp1;
690411494be0SStan Studzinski bp1->pri = PAGE_CAPTURE_PRIO(pp);
69058b464eb8Smec page_capture_hash[index].lists[1].next = bp1;
690611494be0SStan Studzinski page_capture_hash[index].num_pages[bp1->pri]++;
69078b464eb8Smec mutex_exit(&page_capture_hash[index].pchh_mutex);
69088b464eb8Smec return (ret);
69098b464eb8Smec }
69108b464eb8Smec
69118b464eb8Smec /*
69128b464eb8Smec * Otherwise there was a new capture request added to list
69138b464eb8Smec * Need to make sure that our original data is represented if
69148b464eb8Smec * appropriate.
69158b464eb8Smec */
69168b464eb8Smec for (i = 0; i < 2; i++) {
69178b464eb8Smec bp2 = page_capture_hash[index].lists[i].next;
69188b464eb8Smec while (bp2 != &page_capture_hash[index].lists[i]) {
69198b464eb8Smec if (bp2->pp == pp) {
69208b464eb8Smec if (bp1->flags & CAPTURE_RETIRE) {
69218b464eb8Smec if (!(bp2->flags & CAPTURE_RETIRE)) {
69228b464eb8Smec bp2->szc = bp1->szc;
69238b464eb8Smec bp2->flags = bp1->flags;
69248b464eb8Smec bp2->expires = bp1->expires;
69258b464eb8Smec bp2->datap = bp1->datap;
69268b464eb8Smec }
69278b464eb8Smec } else {
69288b464eb8Smec ASSERT(bp1->flags & CAPTURE_PHYSMEM);
69298b464eb8Smec if (!(bp2->flags & CAPTURE_RETIRE)) {
69308b464eb8Smec bp2->szc = bp1->szc;
69318b464eb8Smec bp2->flags = bp1->flags;
69328b464eb8Smec bp2->expires = bp1->expires;
69338b464eb8Smec bp2->datap = bp1->datap;
69348b464eb8Smec }
69358b464eb8Smec }
693611494be0SStan Studzinski page_capture_hash[index].num_pages[bp2->pri]--;
693711494be0SStan Studzinski bp2->pri = PAGE_CAPTURE_PRIO(pp);
693811494be0SStan Studzinski page_capture_hash[index].num_pages[bp2->pri]++;
69398b464eb8Smec mutex_exit(&page_capture_hash[index].
69408b464eb8Smec pchh_mutex);
69418b464eb8Smec kmem_free(bp1, sizeof (*bp1));
69428b464eb8Smec return (ret);
69438b464eb8Smec }
69448b464eb8Smec bp2 = bp2->next;
69458b464eb8Smec }
69468b464eb8Smec }
69478793b36bSNick Todd panic("PR_CAPTURE set but not on hash for pp 0x%p\n", (void *)pp);
69488b464eb8Smec /*NOTREACHED*/
69498b464eb8Smec }
69508b464eb8Smec
69518b464eb8Smec /*
69528b464eb8Smec * Try to capture the given page for the caller specified in the flags
69538b464eb8Smec * parameter. The page will either be captured and handed over to the
69548b464eb8Smec * appropriate callback, or will be queued up in the page capture hash
69558b464eb8Smec * to be captured asynchronously.
69568b464eb8Smec * If the current request is due to an async capture, the page must be
69578b464eb8Smec * exclusively locked before calling this function.
69588b464eb8Smec * Currently szc must be 0 but in the future this should be expandable to
69598b464eb8Smec * other page sizes.
69608b464eb8Smec * Returns 0 on success, with the following error codes on failure:
69618b464eb8Smec * EPERM - The requested page is long term locked, and thus repeated
69628b464eb8Smec * requests to capture this page will likely fail.
69638b464eb8Smec * ENOMEM - There was not enough free memory in the system to safely
69648b464eb8Smec * map the requested page.
69658b464eb8Smec * ENOENT - The requested page was inside the kernel cage, and the
69668b464eb8Smec * CAPTURE_GET_CAGE flag was not set.
69678b464eb8Smec * EAGAIN - The requested page could not be capturead at this point in
69688b464eb8Smec * time but future requests will likely work.
69698b464eb8Smec * EBUSY - The requested page is retired and the CAPTURE_GET_RETIRED flag
69708b464eb8Smec * was not set.
69718b464eb8Smec */
69728b464eb8Smec int
page_itrycapture(page_t * pp,uint_t szc,uint_t flags,void * datap)69738b464eb8Smec page_itrycapture(page_t *pp, uint_t szc, uint_t flags, void *datap)
69748b464eb8Smec {
69758b464eb8Smec int ret;
69768b464eb8Smec int cb_index;
69778b464eb8Smec
69788b464eb8Smec if (flags & CAPTURE_ASYNC) {
69798b464eb8Smec ASSERT(PAGE_EXCL(pp));
69808b464eb8Smec goto async;
69818b464eb8Smec }
69828b464eb8Smec
69838b464eb8Smec /* Make sure there's enough availrmem ... */
69848b464eb8Smec ret = page_capture_pre_checks(pp, flags);
69858b464eb8Smec if (ret != 0) {
69868b464eb8Smec return (ret);
69878b464eb8Smec }
69888b464eb8Smec
69898b464eb8Smec if (!page_trylock(pp, SE_EXCL)) {
69908b464eb8Smec for (cb_index = 0; cb_index < PC_NUM_CALLBACKS; cb_index++) {
69918b464eb8Smec if ((flags >> cb_index) & 1) {
69928b464eb8Smec break;
69938b464eb8Smec }
69948b464eb8Smec }
69958b464eb8Smec ASSERT(cb_index < PC_NUM_CALLBACKS);
69968b464eb8Smec ret = EAGAIN;
69978b464eb8Smec /* Special case for retired pages */
69988b464eb8Smec if (PP_RETIRED(pp)) {
69998b464eb8Smec if (flags & CAPTURE_GET_RETIRED) {
70008b464eb8Smec if (!page_unretire_pp(pp, PR_UNR_TEMP)) {
70018b464eb8Smec /*
70028b464eb8Smec * Need to set capture bit and add to
70038b464eb8Smec * hash so that the page will be
70048b464eb8Smec * retired when freed.
70058b464eb8Smec */
70068b464eb8Smec page_capture_add_hash(pp, szc,
70078b464eb8Smec CAPTURE_RETIRE, NULL);
70088b464eb8Smec ret = 0;
70098b464eb8Smec goto own_page;
70108b464eb8Smec }
70118b464eb8Smec } else {
70128b464eb8Smec return (EBUSY);
70138b464eb8Smec }
70148b464eb8Smec }
70158b464eb8Smec page_capture_add_hash(pp, szc, flags, datap);
70168b464eb8Smec return (ret);
70178b464eb8Smec }
70188b464eb8Smec
70198b464eb8Smec async:
70208b464eb8Smec ASSERT(PAGE_EXCL(pp));
70218b464eb8Smec
70228b464eb8Smec /* Need to check for physmem async requests that availrmem is sane */
70238b464eb8Smec if ((flags & (CAPTURE_ASYNC | CAPTURE_PHYSMEM)) ==
70248b464eb8Smec (CAPTURE_ASYNC | CAPTURE_PHYSMEM) &&
70258b464eb8Smec (availrmem < swapfs_minfree)) {
70268b464eb8Smec page_unlock(pp);
70278b464eb8Smec return (ENOMEM);
70288b464eb8Smec }
70298b464eb8Smec
70308b464eb8Smec ret = page_capture_clean_page(pp);
70318b464eb8Smec
70328b464eb8Smec if (ret != 0) {
70338b464eb8Smec /* We failed to get the page, so lets add it to the hash */
70348b464eb8Smec if (!(flags & CAPTURE_ASYNC)) {
70358b464eb8Smec page_capture_add_hash(pp, szc, flags, datap);
70368b464eb8Smec }
70378b464eb8Smec return (ret);
70388b464eb8Smec }
70398b464eb8Smec
70408b464eb8Smec own_page:
70418b464eb8Smec ASSERT(PAGE_EXCL(pp));
70428b464eb8Smec ASSERT(pp->p_szc == 0);
70438b464eb8Smec
70448b464eb8Smec /* Call the callback */
70458b464eb8Smec ret = page_capture_take_action(pp, flags, datap);
70468b464eb8Smec
70478b464eb8Smec if (ret == 0) {
70488b464eb8Smec return (0);
70498b464eb8Smec }
70508b464eb8Smec
70518b464eb8Smec /*
70528b464eb8Smec * Note that in the failure cases from page_capture_take_action, the
70538b464eb8Smec * EXCL lock will have already been dropped.
70548b464eb8Smec */
70558b464eb8Smec if ((ret == -1) && (!(flags & CAPTURE_ASYNC))) {
70568b464eb8Smec page_capture_add_hash(pp, szc, flags, datap);
70578b464eb8Smec }
70588b464eb8Smec return (EAGAIN);
70598b464eb8Smec }
70608b464eb8Smec
70618b464eb8Smec int
page_trycapture(page_t * pp,uint_t szc,uint_t flags,void * datap)70628b464eb8Smec page_trycapture(page_t *pp, uint_t szc, uint_t flags, void *datap)
70638b464eb8Smec {
70648b464eb8Smec int ret;
70658b464eb8Smec
70668b464eb8Smec curthread->t_flag |= T_CAPTURING;
70678b464eb8Smec ret = page_itrycapture(pp, szc, flags, datap);
70688b464eb8Smec curthread->t_flag &= ~T_CAPTURING; /* xor works as we know its set */
70698b464eb8Smec return (ret);
70708b464eb8Smec }
70718b464eb8Smec
70728b464eb8Smec /*
70738b464eb8Smec * When unlocking a page which has the PR_CAPTURE bit set, this routine
70748b464eb8Smec * gets called to try and capture the page.
70758b464eb8Smec */
70768b464eb8Smec void
page_unlock_capture(page_t * pp)70778b464eb8Smec page_unlock_capture(page_t *pp)
70788b464eb8Smec {
70798b464eb8Smec page_capture_hash_bucket_t *bp;
70808b464eb8Smec int index;
70818b464eb8Smec int i;
70828b464eb8Smec uint_t szc;
70838b464eb8Smec uint_t flags = 0;
70848b464eb8Smec void *datap;
70858b464eb8Smec kmutex_t *mp;
70868b464eb8Smec extern vnode_t retired_pages;
70878b464eb8Smec
70888b464eb8Smec /*
70898b464eb8Smec * We need to protect against a possible deadlock here where we own
70908b464eb8Smec * the vnode page hash mutex and want to acquire it again as there
70918b464eb8Smec * are locations in the code, where we unlock a page while holding
70928b464eb8Smec * the mutex which can lead to the page being captured and eventually
70938b464eb8Smec * end up here. As we may be hashing out the old page and hashing into
70948b464eb8Smec * the retire vnode, we need to make sure we don't own them.
70958b464eb8Smec * Other callbacks who do hash operations also need to make sure that
70968b464eb8Smec * before they hashin to a vnode that they do not currently own the
70978b464eb8Smec * vphm mutex otherwise there will be a panic.
70988b464eb8Smec */
70998b464eb8Smec if (mutex_owned(page_vnode_mutex(&retired_pages))) {
710022addef7Smec page_unlock_nocapture(pp);
71018b464eb8Smec return;
71028b464eb8Smec }
71038b464eb8Smec if (pp->p_vnode != NULL && mutex_owned(page_vnode_mutex(pp->p_vnode))) {
710422addef7Smec page_unlock_nocapture(pp);
71058b464eb8Smec return;
71068b464eb8Smec }
71078b464eb8Smec
71088b464eb8Smec index = PAGE_CAPTURE_HASH(pp);
71098b464eb8Smec
71108b464eb8Smec mp = &page_capture_hash[index].pchh_mutex;
71118b464eb8Smec mutex_enter(mp);
71128b464eb8Smec for (i = 0; i < 2; i++) {
71138b464eb8Smec bp = page_capture_hash[index].lists[i].next;
71148b464eb8Smec while (bp != &page_capture_hash[index].lists[i]) {
71158b464eb8Smec if (bp->pp == pp) {
71168b464eb8Smec szc = bp->szc;
71178b464eb8Smec flags = bp->flags | CAPTURE_ASYNC;
71188b464eb8Smec datap = bp->datap;
71198b464eb8Smec mutex_exit(mp);
71208b464eb8Smec (void) page_trycapture(pp, szc, flags, datap);
71218b464eb8Smec return;
71228b464eb8Smec }
71238b464eb8Smec bp = bp->next;
71248b464eb8Smec }
71258b464eb8Smec }
71268b464eb8Smec
71278b464eb8Smec /* Failed to find page in hash so clear flags and unlock it. */
71288b464eb8Smec page_clrtoxic(pp, PR_CAPTURE);
71298b464eb8Smec page_unlock(pp);
71308b464eb8Smec
71318b464eb8Smec mutex_exit(mp);
71328b464eb8Smec }
71338b464eb8Smec
71348b464eb8Smec void
page_capture_init()71358b464eb8Smec page_capture_init()
71368b464eb8Smec {
71378b464eb8Smec int i;
71388b464eb8Smec for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) {
71398b464eb8Smec page_capture_hash[i].lists[0].next =
71408b464eb8Smec &page_capture_hash[i].lists[0];
71418b464eb8Smec page_capture_hash[i].lists[0].prev =
71428b464eb8Smec &page_capture_hash[i].lists[0];
71438b464eb8Smec page_capture_hash[i].lists[1].next =
71448b464eb8Smec &page_capture_hash[i].lists[1];
71458b464eb8Smec page_capture_hash[i].lists[1].prev =
71468b464eb8Smec &page_capture_hash[i].lists[1];
71478b464eb8Smec }
71488b464eb8Smec
71498b464eb8Smec pc_thread_shortwait = 23 * hz;
71508b464eb8Smec pc_thread_longwait = 1201 * hz;
7151a98e9dbfSaguzovsk pc_thread_retry = 3;
71528b464eb8Smec mutex_init(&pc_thread_mutex, NULL, MUTEX_DEFAULT, NULL);
71538b464eb8Smec cv_init(&pc_cv, NULL, CV_DEFAULT, NULL);
71548b464eb8Smec pc_thread_id = thread_create(NULL, 0, page_capture_thread, NULL, 0, &p0,
71558b464eb8Smec TS_RUN, minclsyspri);
71568b464eb8Smec }
71578b464eb8Smec
71588b464eb8Smec /*
71598b464eb8Smec * It is necessary to scrub any failing pages prior to reboot in order to
71608b464eb8Smec * prevent a latent error trap from occurring on the next boot.
71618b464eb8Smec */
71628b464eb8Smec void
page_retire_mdboot()71638b464eb8Smec page_retire_mdboot()
71648b464eb8Smec {
71658b464eb8Smec page_t *pp;
71668b464eb8Smec int i, j;
71678b464eb8Smec page_capture_hash_bucket_t *bp;
716811494be0SStan Studzinski uchar_t pri;
71698b464eb8Smec
71708b464eb8Smec /* walk lists looking for pages to scrub */
71718b464eb8Smec for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) {
717211494be0SStan Studzinski for (pri = 0; pri < PC_NUM_PRI; pri++) {
717311494be0SStan Studzinski if (page_capture_hash[i].num_pages[pri] != 0) {
717411494be0SStan Studzinski break;
717511494be0SStan Studzinski }
717611494be0SStan Studzinski }
717711494be0SStan Studzinski if (pri == PC_NUM_PRI)
71788b464eb8Smec continue;
71798b464eb8Smec
71808b464eb8Smec mutex_enter(&page_capture_hash[i].pchh_mutex);
71818b464eb8Smec
71828b464eb8Smec for (j = 0; j < 2; j++) {
71838b464eb8Smec bp = page_capture_hash[i].lists[j].next;
71848b464eb8Smec while (bp != &page_capture_hash[i].lists[j]) {
71858b464eb8Smec pp = bp->pp;
7186954021b7SJustin Frank if (PP_TOXIC(pp)) {
7187954021b7SJustin Frank if (page_trylock(pp, SE_EXCL)) {
7188954021b7SJustin Frank PP_CLRFREE(pp);
7189954021b7SJustin Frank pagescrub(pp, 0, PAGESIZE);
7190954021b7SJustin Frank page_unlock(pp);
7191954021b7SJustin Frank }
71928b464eb8Smec }
71938b464eb8Smec bp = bp->next;
71948b464eb8Smec }
71958b464eb8Smec }
71968b464eb8Smec mutex_exit(&page_capture_hash[i].pchh_mutex);
71978b464eb8Smec }
71988b464eb8Smec }
71998b464eb8Smec
72008b464eb8Smec /*
72018b464eb8Smec * Walk the page_capture_hash trying to capture pages and also cleanup old
72028b464eb8Smec * entries which have expired.
72038b464eb8Smec */
72048b464eb8Smec void
page_capture_async()72058b464eb8Smec page_capture_async()
72068b464eb8Smec {
72078b464eb8Smec page_t *pp;
72088b464eb8Smec int i;
72098b464eb8Smec int ret;
72108b464eb8Smec page_capture_hash_bucket_t *bp1, *bp2;
72118b464eb8Smec uint_t szc;
72128b464eb8Smec uint_t flags;
72138b464eb8Smec void *datap;
721411494be0SStan Studzinski uchar_t pri;
72158b464eb8Smec
72168b464eb8Smec /* If there are outstanding pages to be captured, get to work */
72178b464eb8Smec for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) {
721811494be0SStan Studzinski for (pri = 0; pri < PC_NUM_PRI; pri++) {
721911494be0SStan Studzinski if (page_capture_hash[i].num_pages[pri] != 0)
722011494be0SStan Studzinski break;
722111494be0SStan Studzinski }
722211494be0SStan Studzinski if (pri == PC_NUM_PRI)
72238b464eb8Smec continue;
722411494be0SStan Studzinski
72258b464eb8Smec /* Append list 1 to list 0 and then walk through list 0 */
72268b464eb8Smec mutex_enter(&page_capture_hash[i].pchh_mutex);
72278b464eb8Smec bp1 = &page_capture_hash[i].lists[1];
72288b464eb8Smec bp2 = bp1->next;
72298b464eb8Smec if (bp1 != bp2) {
72308b464eb8Smec bp1->prev->next = page_capture_hash[i].lists[0].next;
72318b464eb8Smec bp2->prev = &page_capture_hash[i].lists[0];
72328b464eb8Smec page_capture_hash[i].lists[0].next->prev = bp1->prev;
72338b464eb8Smec page_capture_hash[i].lists[0].next = bp2;
72348b464eb8Smec bp1->next = bp1;
72358b464eb8Smec bp1->prev = bp1;
72368b464eb8Smec }
72378b464eb8Smec
72388b464eb8Smec /* list[1] will be empty now */
72398b464eb8Smec
72408b464eb8Smec bp1 = page_capture_hash[i].lists[0].next;
72418b464eb8Smec while (bp1 != &page_capture_hash[i].lists[0]) {
72428b464eb8Smec /* Check expiration time */
7243d3d50737SRafael Vanoni if ((ddi_get_lbolt() > bp1->expires &&
7244d3d50737SRafael Vanoni bp1->expires != -1) ||
72458b464eb8Smec page_deleted(bp1->pp)) {
72468b464eb8Smec page_capture_hash[i].lists[0].next = bp1->next;
72478b464eb8Smec bp1->next->prev =
72488b464eb8Smec &page_capture_hash[i].lists[0];
724911494be0SStan Studzinski page_capture_hash[i].num_pages[bp1->pri]--;
72508b464eb8Smec
72518b464eb8Smec /*
72528b464eb8Smec * We can safely remove the PR_CAPTURE bit
72538b464eb8Smec * without holding the EXCL lock on the page
72548b464eb8Smec * as the PR_CAPTURE bit requres that the
72558b464eb8Smec * page_capture_hash[].pchh_mutex be held
72568b464eb8Smec * to modify it.
72578b464eb8Smec */
72588b464eb8Smec page_clrtoxic(bp1->pp, PR_CAPTURE);
72598b464eb8Smec mutex_exit(&page_capture_hash[i].pchh_mutex);
72608b464eb8Smec kmem_free(bp1, sizeof (*bp1));
72618b464eb8Smec mutex_enter(&page_capture_hash[i].pchh_mutex);
72628b464eb8Smec bp1 = page_capture_hash[i].lists[0].next;
72638b464eb8Smec continue;
72648b464eb8Smec }
72658b464eb8Smec pp = bp1->pp;
72668b464eb8Smec szc = bp1->szc;
72678b464eb8Smec flags = bp1->flags;
72688b464eb8Smec datap = bp1->datap;
72698b464eb8Smec mutex_exit(&page_capture_hash[i].pchh_mutex);
72708b464eb8Smec if (page_trylock(pp, SE_EXCL)) {
72718b464eb8Smec ret = page_trycapture(pp, szc,
72728b464eb8Smec flags | CAPTURE_ASYNC, datap);
72738b464eb8Smec } else {
72748b464eb8Smec ret = 1; /* move to walked hash */
72758b464eb8Smec }
72768b464eb8Smec
72778b464eb8Smec if (ret != 0) {
72788b464eb8Smec /* Move to walked hash */
72798b464eb8Smec (void) page_capture_move_to_walked(pp);
72808b464eb8Smec }
72818b464eb8Smec mutex_enter(&page_capture_hash[i].pchh_mutex);
72828b464eb8Smec bp1 = page_capture_hash[i].lists[0].next;
72838b464eb8Smec }
72848b464eb8Smec
72858b464eb8Smec mutex_exit(&page_capture_hash[i].pchh_mutex);
72868b464eb8Smec }
72878b464eb8Smec }
72888b464eb8Smec
7289cee1d74bSjfrank /*
7290cee1d74bSjfrank * This function is called by the page_capture_thread, and is needed in
7291cee1d74bSjfrank * in order to initiate aio cleanup, so that pages used in aio
7292cee1d74bSjfrank * will be unlocked and subsequently retired by page_capture_thread.
7293cee1d74bSjfrank */
7294cee1d74bSjfrank static int
do_aio_cleanup(void)7295cee1d74bSjfrank do_aio_cleanup(void)
7296cee1d74bSjfrank {
7297cee1d74bSjfrank proc_t *procp;
7298cee1d74bSjfrank int (*aio_cleanup_dr_delete_memory)(proc_t *);
7299cee1d74bSjfrank int cleaned = 0;
7300cee1d74bSjfrank
7301cee1d74bSjfrank if (modload("sys", "kaio") == -1) {
7302cee1d74bSjfrank cmn_err(CE_WARN, "do_aio_cleanup: cannot load kaio");
7303cee1d74bSjfrank return (0);
7304cee1d74bSjfrank }
7305cee1d74bSjfrank /*
7306cee1d74bSjfrank * We use the aio_cleanup_dr_delete_memory function to
7307cee1d74bSjfrank * initiate the actual clean up; this function will wake
7308cee1d74bSjfrank * up the per-process aio_cleanup_thread.
7309cee1d74bSjfrank */
7310cee1d74bSjfrank aio_cleanup_dr_delete_memory = (int (*)(proc_t *))
7311cee1d74bSjfrank modgetsymvalue("aio_cleanup_dr_delete_memory", 0);
7312cee1d74bSjfrank if (aio_cleanup_dr_delete_memory == NULL) {
7313cee1d74bSjfrank cmn_err(CE_WARN,
7314cee1d74bSjfrank "aio_cleanup_dr_delete_memory not found in kaio");
7315cee1d74bSjfrank return (0);
7316cee1d74bSjfrank }
7317cee1d74bSjfrank mutex_enter(&pidlock);
7318cee1d74bSjfrank for (procp = practive; (procp != NULL); procp = procp->p_next) {
7319cee1d74bSjfrank mutex_enter(&procp->p_lock);
7320cee1d74bSjfrank if (procp->p_aio != NULL) {
7321cee1d74bSjfrank /* cleanup proc's outstanding kaio */
7322cee1d74bSjfrank cleaned += (*aio_cleanup_dr_delete_memory)(procp);
7323cee1d74bSjfrank }
7324cee1d74bSjfrank mutex_exit(&procp->p_lock);
7325cee1d74bSjfrank }
7326cee1d74bSjfrank mutex_exit(&pidlock);
7327cee1d74bSjfrank return (cleaned);
7328cee1d74bSjfrank }
7329cee1d74bSjfrank
7330cee1d74bSjfrank /*
7331cee1d74bSjfrank * helper function for page_capture_thread
7332cee1d74bSjfrank */
7333cee1d74bSjfrank static void
page_capture_handle_outstanding(void)7334cee1d74bSjfrank page_capture_handle_outstanding(void)
7335cee1d74bSjfrank {
7336cee1d74bSjfrank int ntry;
7337cee1d74bSjfrank
7338704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States /* Reap pages before attempting capture pages */
7339704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States kmem_reap();
7340704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States
7341704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States if ((page_retire_pend_count() > page_retire_pend_kas_count()) &&
7342704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
7343cee1d74bSjfrank /*
7344c2d79585SVijay Balakrishna, SG-RPE * Note: Purging only for platforms that support
7345c2d79585SVijay Balakrishna, SG-RPE * ISM hat_pageunload() - mainly SPARC. On x86/x64
7346c2d79585SVijay Balakrishna, SG-RPE * platforms ISM pages SE_SHARED locked until destroyed.
7347cee1d74bSjfrank */
7348a98e9dbfSaguzovsk
7349a98e9dbfSaguzovsk /* disable and purge seg_pcache */
7350a98e9dbfSaguzovsk (void) seg_p_disable();
7351a98e9dbfSaguzovsk for (ntry = 0; ntry < pc_thread_retry; ntry++) {
7352a98e9dbfSaguzovsk if (!page_retire_pend_count())
7353a98e9dbfSaguzovsk break;
7354a98e9dbfSaguzovsk if (do_aio_cleanup()) {
7355a98e9dbfSaguzovsk /*
7356a98e9dbfSaguzovsk * allow the apps cleanup threads
7357a98e9dbfSaguzovsk * to run
7358a98e9dbfSaguzovsk */
7359a98e9dbfSaguzovsk delay(pc_thread_shortwait);
7360cee1d74bSjfrank }
7361cee1d74bSjfrank page_capture_async();
7362cee1d74bSjfrank }
7363a98e9dbfSaguzovsk /* reenable seg_pcache */
7364a98e9dbfSaguzovsk seg_p_enable();
7365704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States
7366704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States /* completed what can be done. break out */
7367704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States return;
7368cee1d74bSjfrank }
7369704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States
7370704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States /*
7371704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States * For kernel pages and/or unsupported HAT_DYNAMIC_ISM_UNMAP, reap
7372704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States * and then attempt to capture.
7373704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States */
7374704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States seg_preap();
7375704b9682SChristopher Baumbauer - Sun Microsystems - San Diego United States page_capture_async();
7376cee1d74bSjfrank }
7377cee1d74bSjfrank
73788b464eb8Smec /*
73798b464eb8Smec * The page_capture_thread loops forever, looking to see if there are
73808b464eb8Smec * pages still waiting to be captured.
73818b464eb8Smec */
73828b464eb8Smec static void
page_capture_thread(void)73838b464eb8Smec page_capture_thread(void)
73848b464eb8Smec {
73858b464eb8Smec callb_cpr_t c;
73868b464eb8Smec int i;
738711494be0SStan Studzinski int high_pri_pages;
738811494be0SStan Studzinski int low_pri_pages;
738911494be0SStan Studzinski clock_t timeout;
73908b464eb8Smec
73918b464eb8Smec CALLB_CPR_INIT(&c, &pc_thread_mutex, callb_generic_cpr, "page_capture");
73928b464eb8Smec
73938b464eb8Smec mutex_enter(&pc_thread_mutex);
73948b464eb8Smec for (;;) {
739511494be0SStan Studzinski high_pri_pages = 0;
739611494be0SStan Studzinski low_pri_pages = 0;
739711494be0SStan Studzinski for (i = 0; i < NUM_PAGE_CAPTURE_BUCKETS; i++) {
739811494be0SStan Studzinski high_pri_pages +=
739911494be0SStan Studzinski page_capture_hash[i].num_pages[PC_PRI_HI];
740011494be0SStan Studzinski low_pri_pages +=
740111494be0SStan Studzinski page_capture_hash[i].num_pages[PC_PRI_LO];
740211494be0SStan Studzinski }
740311494be0SStan Studzinski
740411494be0SStan Studzinski timeout = pc_thread_longwait;
740511494be0SStan Studzinski if (high_pri_pages != 0) {
740611494be0SStan Studzinski timeout = pc_thread_shortwait;
7407cee1d74bSjfrank page_capture_handle_outstanding();
740811494be0SStan Studzinski } else if (low_pri_pages != 0) {
740911494be0SStan Studzinski page_capture_async();
74108b464eb8Smec }
741111494be0SStan Studzinski CALLB_CPR_SAFE_BEGIN(&c);
741211494be0SStan Studzinski (void) cv_reltimedwait(&pc_cv, &pc_thread_mutex,
741311494be0SStan Studzinski timeout, TR_CLOCK_TICK);
741411494be0SStan Studzinski CALLB_CPR_SAFE_END(&c, &pc_thread_mutex);
74158b464eb8Smec }
74168b464eb8Smec /*NOTREACHED*/
74178b464eb8Smec }
741806fb6a36Sdv /*
741906fb6a36Sdv * Attempt to locate a bucket that has enough pages to satisfy the request.
742006fb6a36Sdv * The initial check is done without the lock to avoid unneeded contention.
742106fb6a36Sdv * The function returns 1 if enough pages were found, else 0 if it could not
742206fb6a36Sdv * find enough pages in a bucket.
742306fb6a36Sdv */
742406fb6a36Sdv static int
pcf_decrement_bucket(pgcnt_t npages)742506fb6a36Sdv pcf_decrement_bucket(pgcnt_t npages)
742606fb6a36Sdv {
742706fb6a36Sdv struct pcf *p;
742806fb6a36Sdv struct pcf *q;
742906fb6a36Sdv int i;
743006fb6a36Sdv
743106fb6a36Sdv p = &pcf[PCF_INDEX()];
743206fb6a36Sdv q = &pcf[pcf_fanout];
743306fb6a36Sdv for (i = 0; i < pcf_fanout; i++) {
743406fb6a36Sdv if (p->pcf_count > npages) {
743506fb6a36Sdv /*
743606fb6a36Sdv * a good one to try.
743706fb6a36Sdv */
743806fb6a36Sdv mutex_enter(&p->pcf_lock);
743906fb6a36Sdv if (p->pcf_count > npages) {
744006fb6a36Sdv p->pcf_count -= (uint_t)npages;
744106fb6a36Sdv /*
744206fb6a36Sdv * freemem is not protected by any lock.
744306fb6a36Sdv * Thus, we cannot have any assertion
744406fb6a36Sdv * containing freemem here.
744506fb6a36Sdv */
744606fb6a36Sdv freemem -= npages;
744706fb6a36Sdv mutex_exit(&p->pcf_lock);
744806fb6a36Sdv return (1);
744906fb6a36Sdv }
745006fb6a36Sdv mutex_exit(&p->pcf_lock);
745106fb6a36Sdv }
745206fb6a36Sdv p++;
745306fb6a36Sdv if (p >= q) {
745406fb6a36Sdv p = pcf;
745506fb6a36Sdv }
745606fb6a36Sdv }
745706fb6a36Sdv return (0);
745806fb6a36Sdv }
745906fb6a36Sdv
746006fb6a36Sdv /*
746106fb6a36Sdv * Arguments:
746206fb6a36Sdv * pcftotal_ret: If the value is not NULL and we have walked all the
746306fb6a36Sdv * buckets but did not find enough pages then it will
746406fb6a36Sdv * be set to the total number of pages in all the pcf
746506fb6a36Sdv * buckets.
746606fb6a36Sdv * npages: Is the number of pages we have been requested to
746706fb6a36Sdv * find.
746806fb6a36Sdv * unlock: If set to 0 we will leave the buckets locked if the
746906fb6a36Sdv * requested number of pages are not found.
747006fb6a36Sdv *
747106fb6a36Sdv * Go and try to satisfy the page request from any number of buckets.
747206fb6a36Sdv * This can be a very expensive operation as we have to lock the buckets
747306fb6a36Sdv * we are checking (and keep them locked), starting at bucket 0.
747406fb6a36Sdv *
747506fb6a36Sdv * The function returns 1 if enough pages were found, else 0 if it could not
747606fb6a36Sdv * find enough pages in the buckets.
747706fb6a36Sdv *
747806fb6a36Sdv */
747906fb6a36Sdv static int
pcf_decrement_multiple(pgcnt_t * pcftotal_ret,pgcnt_t npages,int unlock)748006fb6a36Sdv pcf_decrement_multiple(pgcnt_t *pcftotal_ret, pgcnt_t npages, int unlock)
748106fb6a36Sdv {
748206fb6a36Sdv struct pcf *p;
748306fb6a36Sdv pgcnt_t pcftotal;
748406fb6a36Sdv int i;
748506fb6a36Sdv
748606fb6a36Sdv p = pcf;
748706fb6a36Sdv /* try to collect pages from several pcf bins */
748806fb6a36Sdv for (pcftotal = 0, i = 0; i < pcf_fanout; i++) {
748906fb6a36Sdv mutex_enter(&p->pcf_lock);
749006fb6a36Sdv pcftotal += p->pcf_count;
749106fb6a36Sdv if (pcftotal >= npages) {
749206fb6a36Sdv /*
749306fb6a36Sdv * Wow! There are enough pages laying around
749406fb6a36Sdv * to satisfy the request. Do the accounting,
749506fb6a36Sdv * drop the locks we acquired, and go back.
749606fb6a36Sdv *
749706fb6a36Sdv * freemem is not protected by any lock. So,
749806fb6a36Sdv * we cannot have any assertion containing
749906fb6a36Sdv * freemem.
750006fb6a36Sdv */
750106fb6a36Sdv freemem -= npages;
750206fb6a36Sdv while (p >= pcf) {
750306fb6a36Sdv if (p->pcf_count <= npages) {
750406fb6a36Sdv npages -= p->pcf_count;
750506fb6a36Sdv p->pcf_count = 0;
750606fb6a36Sdv } else {
750706fb6a36Sdv p->pcf_count -= (uint_t)npages;
750806fb6a36Sdv npages = 0;
750906fb6a36Sdv }
751006fb6a36Sdv mutex_exit(&p->pcf_lock);
751106fb6a36Sdv p--;
751206fb6a36Sdv }
751306fb6a36Sdv ASSERT(npages == 0);
751406fb6a36Sdv return (1);
751506fb6a36Sdv }
751606fb6a36Sdv p++;
751706fb6a36Sdv }
751806fb6a36Sdv if (unlock) {
751906fb6a36Sdv /* failed to collect pages - release the locks */
752006fb6a36Sdv while (--p >= pcf) {
752106fb6a36Sdv mutex_exit(&p->pcf_lock);
752206fb6a36Sdv }
752306fb6a36Sdv }
752406fb6a36Sdv if (pcftotal_ret != NULL)
752506fb6a36Sdv *pcftotal_ret = pcftotal;
752606fb6a36Sdv return (0);
752706fb6a36Sdv }
7528