xref: /illumos-gate/usr/src/uts/i86pc/vm/hat_i86.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * VM - Hardware Address Translation management for i386 and amd64
31  *
32  * Implementation of the interfaces described in <common/vm/hat.h>
33  *
34  * Nearly all the details of how the hardware is managed should not be
35  * visible outside this layer except for misc. machine specific functions
36  * that work in conjunction with this code.
37  *
38  * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
39  */
40 
41 #include <sys/machparam.h>
42 #include <sys/machsystm.h>
43 #include <sys/mman.h>
44 #include <sys/types.h>
45 #include <sys/systm.h>
46 #include <sys/cpuvar.h>
47 #include <sys/thread.h>
48 #include <sys/proc.h>
49 #include <sys/cpu.h>
50 #include <sys/kmem.h>
51 #include <sys/disp.h>
52 #include <sys/shm.h>
53 #include <sys/sysmacros.h>
54 #include <sys/machparam.h>
55 #include <sys/vmem.h>
56 #include <sys/vmsystm.h>
57 #include <sys/promif.h>
58 #include <sys/var.h>
59 #include <sys/x86_archext.h>
60 #include <sys/atomic.h>
61 #include <sys/bitmap.h>
62 
63 #include <vm/seg_kmem.h>
64 #include <vm/hat_i86.h>
65 #include <vm/as.h>
66 #include <vm/seg.h>
67 #include <vm/page.h>
68 #include <vm/seg_kp.h>
69 #include <vm/seg_kpm.h>
70 #include <vm/vm_dep.h>
71 
72 #include <sys/cmn_err.h>
73 
74 
75 /*
76  * Basic parameters for hat operation.
77  */
78 struct hat_mmu_info mmu;
79 uint_t force_pae_off = 0;	/* for testing, change with kernel debugger */
80 uint_t force_pae_on = 0;	/* for testing, change with kernel debugger */
81 
82 /*
83  * The page that is the kernel's top level pagetable.
84  *
85  * For 32 bit VLP support, the kernel hat will use the 1st 4 entries
86  * on this 4K page for its top level page table. The remaining groups of
87  * 4 entries are used for per processor copies of user VLP pagetables for
88  * running threads.  See hat_switch() and reload_pae32() for details.
89  *
90  * vlp_page[0] - 0th level==2 PTE for kernel HAT (will be zero)
91  * vlp_page[1] - 1st level==2 PTE for kernel HAT (will be zero)
92  * vlp_page[2] - 2nd level==2 PTE for kernel HAT (zero for small memory)
93  * vlp_page[3] - 3rd level==2 PTE for kernel
94  *
95  * vlp_page[4] - 0th level==2 PTE for user thread on cpu 0
96  * vlp_page[5] - 1st level==2 PTE for user thread on cpu 0
97  * vlp_page[6] - 2nd level==2 PTE for user thread on cpu 0
98  * vlp_page[7] - probably copy of kernel PTE
99  *
100  * vlp_page[8]  - 0th level==2 PTE for user thread on cpu 1
101  * vlp_page[9]  - 1st level==2 PTE for user thread on cpu 1
102  * vlp_page[10] - 2nd level==2 PTE for user thread on cpu 1
103  * vlp_page[11] - probably copy of kernel PTE
104  * ...
105  *
106  * when / where the kernel PTE's are (entry 2 or 3 or none) depends
107  * on kernelbase.
108  */
109 static x86pte_t *vlp_page;
110 
111 /*
112  * forward declaration of internal utility routines
113  */
114 static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
115 	x86pte_t new);
116 
117 /*
118  * The kernel address space exists in all HATs. To implement this the
119  * kernel reserves a fixed number of entries in every topmost level page
120  * table. The values are setup in hat_init() and then copied to every hat
121  * created by hat_alloc(). This means that kernelbase must be:
122  *
123  *	  4Meg aligned for 32 bit kernels
124  *	512Gig aligned for x86_64 64 bit kernel
125  *
126  * The PAE 32 bit hat is handled as a special case. Otherwise requiring 1Gig
127  * alignment would use too much VA for the kernel.
128  *
129  */
130 static uint_t	khat_start;	/* index of 1st entry in kernel's top ptable */
131 static uint_t	khat_entries;	/* number of entries in kernel's top ptable */
132 
133 #if defined(__i386)
134 
135 static htable_t	*khat_pae32_htable = NULL;
136 static uint_t	khat_pae32_start;
137 static uint_t	khat_pae32_entries;
138 
139 #endif
140 
141 /*
142  * Locks, etc. to control use of the hat reserves when recursively
143  * allocating pagetables for the hat data structures.
144  */
145 static kmutex_t hat_reserves_lock;
146 static kcondvar_t hat_reserves_cv;
147 kthread_t *hat_reserves_thread;
148 uint_t use_boot_reserve = 1;	/* cleared after early boot process */
149 uint_t can_steal_post_boot = 0;	/* set late in boot to enable stealing */
150 
151 /*
152  * A cpuset for all cpus. This is used for kernel address cross calls, since
153  * the kernel addresses apply to all cpus.
154  */
155 cpuset_t khat_cpuset;
156 
157 /*
158  * management stuff for hat structures
159  */
160 kmutex_t	hat_list_lock;
161 kcondvar_t	hat_list_cv;
162 kmem_cache_t	*hat_cache;
163 kmem_cache_t	*hat_hash_cache;
164 kmem_cache_t	*vlp_hash_cache;
165 
166 /*
167  * Simple statistics
168  */
169 struct hatstats hatstat;
170 
171 /*
172  * macros to detect addresses in use by kernel only during boot
173  */
174 #if defined(__amd64)
175 
176 #define	BOOT_VA(va) ((va) < kernelbase ||			\
177 	((va) >= BOOT_DOUBLEMAP_BASE &&				\
178 	(va) < BOOT_DOUBLEMAP_BASE + BOOT_DOUBLEMAP_SIZE))
179 
180 #elif defined(__i386)
181 
182 #define	BOOT_VA(va) ((va) < kernelbase)
183 
184 #endif	/* __i386 */
185 
186 /*
187  * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's.
188  */
189 extern void atomic_orb(uchar_t *addr, uchar_t val);
190 extern void atomic_andb(uchar_t *addr, uchar_t val);
191 
192 #define	PP_GETRM(pp, rmmask)    (pp->p_nrm & rmmask)
193 #define	PP_ISMOD(pp)		PP_GETRM(pp, P_MOD)
194 #define	PP_ISREF(pp)		PP_GETRM(pp, P_REF)
195 #define	PP_ISRO(pp)		PP_GETRM(pp, P_RO)
196 
197 #define	PP_SETRM(pp, rm)	atomic_orb(&(pp->p_nrm), rm)
198 #define	PP_SETMOD(pp)		PP_SETRM(pp, P_MOD)
199 #define	PP_SETREF(pp)		PP_SETRM(pp, P_REF)
200 #define	PP_SETRO(pp)		PP_SETRM(pp, P_RO)
201 
202 #define	PP_CLRRM(pp, rm)	atomic_andb(&(pp->p_nrm), ~(rm))
203 #define	PP_CLRMOD(pp)   	PP_CLRRM(pp, P_MOD)
204 #define	PP_CLRREF(pp)   	PP_CLRRM(pp, P_REF)
205 #define	PP_CLRRO(pp)    	PP_CLRRM(pp, P_RO)
206 #define	PP_CLRALL(pp)		PP_CLRRM(pp, P_MOD | P_REF | P_RO)
207 
208 /*
209  * some useful tracing macros
210  */
211 
212 int hattrace = 0;
213 #ifdef DEBUG
214 
215 #define	HATIN(r, h, a, l)	\
216 	if (hattrace) prom_printf("->%s hat=%p, adr=%p, len=%lx\n", #r, h, a, l)
217 
218 #define	HATOUT(r, h, a)		\
219 	if (hattrace) prom_printf("<-%s hat=%p, adr=%p\n", #r, h, a)
220 #else
221 
222 #define	HATIN(r, h, a, l)
223 #define	HATOUT(r, h, a)
224 
225 #endif
226 
227 
228 /*
229  * kmem cache constructor for struct hat
230  */
231 /*ARGSUSED*/
232 static int
233 hati_constructor(void *buf, void *handle, int kmflags)
234 {
235 	hat_t	*hat = buf;
236 
237 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
238 	bzero(hat->hat_pages_mapped,
239 	    sizeof (pgcnt_t) * (mmu.max_page_level + 1));
240 	hat->hat_stats = 0;
241 	hat->hat_flags = 0;
242 	mutex_init(&hat->hat_switch_mutex, NULL, MUTEX_DRIVER,
243 	    (void *)ipltospl(DISP_LEVEL));
244 	CPUSET_ZERO(hat->hat_cpus);
245 	hat->hat_htable = NULL;
246 	hat->hat_ht_hash = NULL;
247 	return (0);
248 }
249 
250 /*
251  * Allocate a hat structure for as. We also create the top level
252  * htable and initialize it to contain the kernel hat entries.
253  */
254 hat_t *
255 hat_alloc(struct as *as)
256 {
257 	hat_t		*hat;
258 	htable_t	*ht;	/* top level htable */
259 	uint_t		use_vlp;
260 
261 	/*
262 	 * Once we start creating user process HATs we can enable
263 	 * the htable_steal() code.
264 	 */
265 	if (can_steal_post_boot == 0)
266 		can_steal_post_boot = 1;
267 
268 	ASSERT(AS_WRITE_HELD(as, &as->a_lock));
269 	hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
270 	hat->hat_as = as;
271 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
272 	ASSERT(hat->hat_flags == 0);
273 
274 	/*
275 	 * a 32 bit process uses a VLP style hat when using PAE
276 	 */
277 #if defined(__amd64)
278 	use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32);
279 #elif defined(__i386)
280 	use_vlp = mmu.pae_hat;
281 #endif
282 	if (use_vlp) {
283 		hat->hat_flags = HAT_VLP;
284 		bzero(hat->hat_vlp_ptes, VLP_SIZE);
285 	}
286 
287 	/*
288 	 * Allocate the htable hash
289 	 */
290 	if ((hat->hat_flags & HAT_VLP)) {
291 		hat->hat_num_hash = mmu.vlp_hash_cnt;
292 		hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
293 	} else {
294 		hat->hat_num_hash = mmu.hash_cnt;
295 		hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
296 	}
297 	bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
298 
299 	/*
300 	 * Initialize Kernel HAT entries at the top of the top level page
301 	 * table for the new hat.
302 	 *
303 	 * Note that we don't call htable_release() for the top level, that
304 	 * happens when the hat is destroyed in hat_free_end()
305 	 */
306 	hat->hat_htable = NULL;
307 	hat->hat_ht_cached = NULL;
308 	ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
309 	if (!(hat->hat_flags & HAT_VLP))
310 		x86pte_copy(kas.a_hat->hat_htable, ht, khat_start,
311 		    khat_entries);
312 #if defined(__i386)
313 	else if (khat_entries > 0)
314 		bcopy(vlp_page + khat_start, hat->hat_vlp_ptes + khat_start,
315 		    khat_entries * sizeof (x86pte_t));
316 #endif
317 	hat->hat_htable = ht;
318 
319 #if defined(__i386)
320 	/*
321 	 * PAE32 HAT alignment is less restrictive than the others to keep
322 	 * the kernel from using too much VA. Because of this we may need
323 	 * one layer further down when kernelbase isn't 1Gig aligned.
324 	 * See hat_free_end() for the htable_release() that goes with this
325 	 * htable_create()
326 	 */
327 	if (khat_pae32_htable != NULL) {
328 		ht = htable_create(hat, kernelbase,
329 		    khat_pae32_htable->ht_level, NULL);
330 		x86pte_copy(khat_pae32_htable, ht, khat_pae32_start,
331 		    khat_pae32_entries);
332 		ht->ht_valid_cnt = khat_pae32_entries;
333 	}
334 #endif
335 
336 	/*
337 	 * Put it in the global list of all hats (used by stealing, etc.)
338 	 */
339 	mutex_enter(&hat_list_lock);
340 	if (kas.a_hat->hat_next != NULL) {
341 		hat->hat_next = kas.a_hat->hat_next;
342 		hat->hat_prev = kas.a_hat->hat_next->hat_prev;
343 		kas.a_hat->hat_next->hat_prev->hat_next = hat;
344 		kas.a_hat->hat_next->hat_prev = hat;
345 	} else {
346 		hat->hat_next = hat;
347 		hat->hat_prev = hat;
348 	}
349 	kas.a_hat->hat_next = hat;
350 	mutex_exit(&hat_list_lock);
351 
352 
353 	return (hat);
354 }
355 
356 /*
357  * process has finished executing but as has not been cleaned up yet.
358  */
359 /*ARGSUSED*/
360 void
361 hat_free_start(hat_t *hat)
362 {
363 	ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock));
364 	mutex_enter(&hat_list_lock);
365 	hat->hat_flags |= HAT_FREEING;
366 	mutex_exit(&hat_list_lock);
367 }
368 
369 /*
370  * An address space is being destroyed, so we destroy the associated hat.
371  */
372 void
373 hat_free_end(hat_t *hat)
374 {
375 	int i;
376 	kmem_cache_t *cache;
377 
378 #ifdef DEBUG
379 	for (i = 0; i <= mmu.max_page_level; i++)
380 		ASSERT(hat->hat_pages_mapped[i] == 0);
381 #endif
382 	ASSERT(hat->hat_flags & HAT_FREEING);
383 
384 	/*
385 	 * must not be running on the given hat
386 	 */
387 	ASSERT(CPU->cpu_current_hat != hat);
388 
389 	/*
390 	 * If the hat is currently a stealing victim, wait for the stealing
391 	 * to finish.  Once we've removed it from the list, nobody can
392 	 * find these htables anymore.
393 	 */
394 	mutex_enter(&hat_list_lock);
395 	while (hat->hat_flags & HAT_VICTIM)
396 		cv_wait(&hat_list_cv, &hat_list_lock);
397 	hat->hat_next->hat_prev = hat->hat_prev;
398 	hat->hat_prev->hat_next = hat->hat_next;
399 	if (kas.a_hat->hat_next == hat) {
400 		kas.a_hat->hat_next = hat->hat_next;
401 		if (kas.a_hat->hat_next == hat)
402 			kas.a_hat->hat_next = NULL;
403 	}
404 	mutex_exit(&hat_list_lock);
405 
406 	/*
407 	 * Make a pass through the htables freeing them all up.
408 	 */
409 	htable_purge_hat(hat);
410 
411 	/*
412 	 * Decide which kmem cache the hash table came from, then free it.
413 	 */
414 	if (hat->hat_flags & HAT_VLP)
415 		cache = vlp_hash_cache;
416 	else
417 		cache = hat_hash_cache;
418 	kmem_cache_free(cache, hat->hat_ht_hash);
419 	hat->hat_ht_hash = NULL;
420 
421 	hat->hat_flags = 0;
422 	kmem_cache_free(hat_cache, hat);
423 }
424 
425 /*
426  * round kernelbase down to a supported value to use for _userlimit
427  *
428  * userlimit must be aligned down to an entry in the top level htable.
429  * The one exception is for 32 bit HAT's running PAE.
430  */
431 uintptr_t
432 hat_kernelbase(uintptr_t va)
433 {
434 #if defined(__i386)
435 	va &= LEVEL_MASK(1);
436 #endif
437 	if (IN_VA_HOLE(va))
438 		panic("_userlimit %p will fall in VA hole\n", (void *)va);
439 	return (va);
440 }
441 
442 /*
443  * Initialize hat data structures based on processor MMU information.
444  */
445 void
446 mmu_init(void)
447 {
448 	uint_t max_htables;
449 	uint_t pa_bits;
450 	uint_t va_bits;
451 	int i;
452 
453 	/*
454 	 * if CPU enabled the page table global bit, use it for the kernel
455 	 * This is bit 7 in CR4 (PGE - Page Global Enable)
456 	 */
457 	if ((x86_feature & X86_PGE) != 0 && (getcr4() & 0x80) != 0)
458 		mmu.pt_global = PT_GLOBAL;
459 
460 	/*
461 	 * We use PAE except when we aren't on an AMD64 and this is
462 	 * a 32 bit kernel with all physical addresses less than 4 Gig.
463 	 */
464 	mmu.pae_hat = 1;
465 	if (x86_feature & X86_NX) {
466 		mmu.pt_nx = PT_NX;
467 	} else {
468 		mmu.pt_nx = 0;
469 #if defined(__i386)
470 		if (!PFN_ABOVE4G(physmax))
471 			mmu.pae_hat = 0;
472 #endif
473 	}
474 
475 #if defined(__i386)
476 	/*
477 	 * Setting one of these two lets you force testing of the different
478 	 * hat modes for 32 bit, regardless of the hardware setup.
479 	 */
480 	if (force_pae_on) {
481 		mmu.pae_hat = 1;
482 	} else if (force_pae_off) {
483 		mmu.pae_hat = 0;
484 		mmu.pt_nx = 0;
485 	}
486 #endif
487 
488 	/*
489 	 * Use CPU info to set various MMU parameters
490 	 */
491 	cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
492 
493 	if (va_bits < sizeof (void *) * NBBY) {
494 		mmu.hole_start = (1ul << (va_bits - 1));
495 		mmu.hole_end = 0ul - mmu.hole_start - 1;
496 	} else {
497 		mmu.hole_end = 0;
498 		mmu.hole_start = mmu.hole_end - 1;
499 	}
500 #if defined(OPTERON_ERRATUM_121)
501 	/*
502 	 * If erratum 121 has already been detected at this time, hole_start
503 	 * contains the value to be subtracted from mmu.hole_start.
504 	 */
505 	ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
506 	hole_start = mmu.hole_start - hole_start;
507 #else
508 	hole_start = mmu.hole_start;
509 #endif
510 	hole_end = mmu.hole_end;
511 
512 	mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
513 	if (mmu.pae_hat == 0 && pa_bits > 32)
514 		mmu.highest_pfn = PFN_4G - 1;
515 
516 	if (mmu.pae_hat) {
517 		mmu.pte_size = 8;	/* 8 byte PTEs */
518 		mmu.pte_size_shift = 3;
519 	} else {
520 		mmu.pte_size = 4;	/* 4 byte PTEs */
521 		mmu.pte_size_shift = 2;
522 	}
523 
524 	if (mmu.pae_hat && (x86_feature & X86_PAE) == 0)
525 		panic("Processor does not support PAE");
526 
527 	if ((x86_feature & X86_CX8) == 0)
528 		panic("Processor does not support cmpxchg8b instruction");
529 
530 	/*
531 	 * Initialize parameters based on the 64 or 32 bit kernels and
532 	 * for the 32 bit kernel decide if we should use PAE.
533 	 */
534 	if (x86_feature & X86_LARGEPAGE)
535 		mmu.max_page_level = 1;
536 	else
537 		mmu.max_page_level = 0;
538 	mmu_page_sizes = mmu.max_page_level + 1;
539 	mmu_exported_page_sizes = mmu_page_sizes;
540 
541 #if defined(__amd64)
542 
543 	mmu.num_level = 4;
544 	mmu.max_level = 3;
545 	mmu.ptes_per_table = 512;
546 	mmu.top_level_count = 512;
547 
548 	mmu.level_shift[0] = 12;
549 	mmu.level_shift[1] = 21;
550 	mmu.level_shift[2] = 30;
551 	mmu.level_shift[3] = 39;
552 
553 #elif defined(__i386)
554 
555 	if (mmu.pae_hat) {
556 		mmu.num_level = 3;
557 		mmu.max_level = 2;
558 		mmu.ptes_per_table = 512;
559 		mmu.top_level_count = 4;
560 
561 		mmu.level_shift[0] = 12;
562 		mmu.level_shift[1] = 21;
563 		mmu.level_shift[2] = 30;
564 
565 	} else {
566 		mmu.num_level = 2;
567 		mmu.max_level = 1;
568 		mmu.ptes_per_table = 1024;
569 		mmu.top_level_count = 1024;
570 
571 		mmu.level_shift[0] = 12;
572 		mmu.level_shift[1] = 22;
573 	}
574 
575 #endif	/* __i386 */
576 
577 	for (i = 0; i < mmu.num_level; ++i) {
578 		mmu.level_size[i] = 1UL << mmu.level_shift[i];
579 		mmu.level_offset[i] = mmu.level_size[i] - 1;
580 		mmu.level_mask[i] = ~mmu.level_offset[i];
581 	}
582 
583 	mmu.pte_bits[0] = PT_VALID;
584 	for (i = 1; i <= mmu.max_page_level; ++i)
585 		mmu.pte_bits[i] = PT_VALID | PT_PAGESIZE;
586 
587 	/*
588 	 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
589 	 */
590 	for (i = 1; i < mmu.num_level; ++i)
591 		mmu.ptp_bits[i] = PT_PTPBITS;
592 #if defined(__i386)
593 	mmu.ptp_bits[2] = PT_VALID;
594 #endif
595 
596 	/*
597 	 * Compute how many hash table entries to have per process for htables.
598 	 * We start with 1 page's worth of entries.
599 	 *
600 	 * If physical memory is small, reduce the amount need to cover it.
601 	 */
602 	max_htables = physmax / mmu.ptes_per_table;
603 	mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
604 	while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
605 		mmu.hash_cnt >>= 1;
606 	mmu.vlp_hash_cnt = mmu.hash_cnt;
607 
608 #if defined(__amd64)
609 	/*
610 	 * If running in 64 bits and physical memory is large,
611 	 * increase the size of the cache to cover all of memory for
612 	 * a 64 bit process.
613 	 */
614 #define	HASH_MAX_LENGTH 4
615 	while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
616 		mmu.hash_cnt <<= 1;
617 #endif
618 
619 	/*
620 	 * This code knows that there are only 2 pagesizes.
621 	 * We ignore 4MB (non-PAE) for now. The value is only used
622 	 * for optimizing demaps across large ranges.
623 	 * These return zero if no information is known.
624 	 */
625 	mmu.tlb_entries[0] = cpuid_get_dtlb_nent(NULL, MMU_PAGESIZE);
626 	mmu.tlb_entries[1] = cpuid_get_dtlb_nent(NULL, 2 * 1024 * 1024);
627 }
628 
629 
630 /*
631  * initialize hat data structures
632  */
633 void
634 hat_init()
635 {
636 #if defined(__i386)
637 	/*
638 	 * _userlimit must be aligned correctly
639 	 */
640 	if ((_userlimit & LEVEL_MASK(1)) != _userlimit) {
641 		prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n",
642 		    (void *)_userlimit, (void *)LEVEL_SIZE(1));
643 		halt("hat_init(): Unable to continue");
644 	}
645 #endif
646 
647 	cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
648 
649 	/*
650 	 * initialize kmem caches
651 	 */
652 	htable_init();
653 	hment_init();
654 
655 	hat_cache = kmem_cache_create("hat_t",
656 	    sizeof (hat_t), 0, hati_constructor, NULL, NULL,
657 	    NULL, 0, 0);
658 
659 	hat_hash_cache = kmem_cache_create("HatHash",
660 	    mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
661 	    NULL, 0, 0);
662 
663 	/*
664 	 * VLP hats can use a smaller hash table size on large memroy machines
665 	 */
666 	if (mmu.hash_cnt == mmu.vlp_hash_cnt) {
667 		vlp_hash_cache = hat_hash_cache;
668 	} else {
669 		vlp_hash_cache = kmem_cache_create("HatVlpHash",
670 		    mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
671 		    NULL, 0, 0);
672 	}
673 
674 	/*
675 	 * Set up the kernel's hat
676 	 */
677 	AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
678 	kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
679 	mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
680 	kas.a_hat->hat_as = &kas;
681 	kas.a_hat->hat_flags = 0;
682 	AS_LOCK_EXIT(&kas, &kas.a_lock);
683 
684 	CPUSET_ZERO(khat_cpuset);
685 	CPUSET_ADD(khat_cpuset, CPU->cpu_id);
686 
687 	/*
688 	 * The kernel hat's next pointer serves as the head of the hat list .
689 	 */
690 	kas.a_hat->hat_next = NULL;
691 
692 	/*
693 	 * Allocate an htable hash bucket for the kernel
694 	 * XX64 - tune for 64 bit procs
695 	 */
696 	kas.a_hat->hat_num_hash = mmu.hash_cnt;
697 	kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
698 	bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
699 
700 	/*
701 	 * zero out the top level and cached htable pointers
702 	 */
703 	kas.a_hat->hat_ht_cached = NULL;
704 	kas.a_hat->hat_htable = NULL;
705 }
706 
707 /*
708  * Prepare CPU specific pagetables for VLP processes on 64 bit kernels.
709  *
710  * Each CPU has a set of 2 pagetables that are reused for any 32 bit
711  * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and
712  * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes.
713  */
714 /*ARGSUSED*/
715 static void
716 hat_vlp_setup(struct cpu *cpu)
717 {
718 #if defined(__amd64)
719 	struct hat_cpu_info *hci = cpu->cpu_hat_info;
720 	pfn_t pfn;
721 
722 	/*
723 	 * allocate the level==2 page table for the bottom most
724 	 * 512Gig of address space (this is where 32 bit apps live)
725 	 */
726 	ASSERT(hci != NULL);
727 	hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
728 
729 	/*
730 	 * Allocate a top level pagetable and copy the kernel's
731 	 * entries into it. Then link in hci_vlp_l2ptes in the 1st entry.
732 	 */
733 	hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
734 	hci->hci_vlp_pfn =
735 	    hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes);
736 	ASSERT(hci->hci_vlp_pfn != PFN_INVALID);
737 	bcopy(vlp_page + khat_start, hci->hci_vlp_l3ptes + khat_start,
738 	    khat_entries * sizeof (x86pte_t));
739 
740 	pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes);
741 	ASSERT(pfn != PFN_INVALID);
742 	hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2);
743 #endif /* __amd64 */
744 }
745 
746 /*
747  * Finish filling in the kernel hat.
748  * Pre fill in all top level kernel page table entries for the kernel's
749  * part of the address range.  From this point on we can't use any new
750  * kernel large pages if they need PTE's at max_level
751  */
752 void
753 hat_init_finish(void)
754 {
755 	htable_t	*top = kas.a_hat->hat_htable;
756 	htable_t	*ht;
757 	uint_t		e;
758 	x86pte_t	pte;
759 	uintptr_t	va = kernelbase;
760 
761 
762 #if defined(__i386)
763 	ASSERT((va & LEVEL_MASK(1)) == va);
764 
765 	/*
766 	 * Deal with kernelbase not 1Gig aligned for 32 bit PAE hats.
767 	 */
768 	if (!mmu.pae_hat || (va & LEVEL_OFFSET(mmu.max_level)) == 0) {
769 		khat_pae32_htable = NULL;
770 	} else {
771 		ASSERT(mmu.max_level == 2);
772 		ASSERT((va & LEVEL_OFFSET(mmu.max_level - 1)) == 0);
773 		khat_pae32_htable =
774 		    htable_create(kas.a_hat, va, mmu.max_level - 1, NULL);
775 		khat_pae32_start = htable_va2entry(va, khat_pae32_htable);
776 		khat_pae32_entries = mmu.ptes_per_table - khat_pae32_start;
777 		for (e = khat_pae32_start; e < mmu.ptes_per_table;
778 		    ++e, va += LEVEL_SIZE(mmu.max_level - 1)) {
779 			pte = x86pte_get(khat_pae32_htable, e);
780 			if (PTE_ISVALID(pte))
781 				continue;
782 			ht = htable_create(kas.a_hat, va, mmu.max_level - 2,
783 			    NULL);
784 			ASSERT(ht != NULL);
785 		}
786 	}
787 #endif
788 
789 	/*
790 	 * The kernel hat will need fixed values in the highest level
791 	 * ptable for copying to all other hat's. This implies
792 	 * alignment restrictions on _userlimit.
793 	 *
794 	 * Note we don't htable_release() these htables. This keeps them
795 	 * from ever being stolen or free'd.
796 	 *
797 	 * top_level_count is used instead of ptes_per_table, since
798 	 * on 32-bit PAE we only have 4 usable entries at the top level ptable.
799 	 */
800 	if (va == 0)
801 		khat_start = mmu.top_level_count;
802 	else
803 		khat_start = htable_va2entry(va, kas.a_hat->hat_htable);
804 	khat_entries = mmu.top_level_count - khat_start;
805 	for (e = khat_start; e < mmu.top_level_count;
806 	    ++e, va += LEVEL_SIZE(mmu.max_level)) {
807 		pte = x86pte_get(top, e);
808 		if (PTE_ISVALID(pte))
809 			continue;
810 		ht = htable_create(kas.a_hat, va, mmu.max_level - 1, NULL);
811 		ASSERT(ht != NULL);
812 	}
813 
814 	/*
815 	 * We are now effectively running on the kernel hat.
816 	 * Clearing use_boot_reserve shuts off using the pre-allocated boot
817 	 * reserve for all HAT allocations.  From here on, the reserves are
818 	 * only used when mapping in memory for the hat's own allocations.
819 	 */
820 	use_boot_reserve = 0;
821 	htable_adjust_reserve();
822 
823 	/*
824 	 * 32 bit kernels use only 4 of the 512 entries in its top level
825 	 * pagetable. We'll use the remainder for the "per CPU" page tables
826 	 * for VLP processes.
827 	 *
828 	 * We map the top level kernel pagetable into the kernel's AS to make
829 	 * it easy to use bcopy for kernel entry PTEs.
830 	 *
831 	 * We were guaranteed to get a physical address < 4Gig, since the 32 bit
832 	 * boot loader uses non-PAE page tables.
833 	 */
834 	if (mmu.pae_hat) {
835 		vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
836 		hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE,
837 		    kas.a_hat->hat_htable->ht_pfn,
838 		    PROT_READ | PROT_WRITE | HAT_NOSYNC | HAT_UNORDERED_OK,
839 		    HAT_LOAD | HAT_LOAD_NOCONSIST);
840 	}
841 	hat_vlp_setup(CPU);
842 }
843 
844 /*
845  * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
846  * are 32 bit, so for safety we must use cas64() to install these.
847  */
848 #ifdef __i386
849 static void
850 reload_pae32(hat_t *hat, cpu_t *cpu)
851 {
852 	x86pte_t *src;
853 	x86pte_t *dest;
854 	x86pte_t pte;
855 	int i;
856 
857 	/*
858 	 * Load the 4 entries of the level 2 page table into this
859 	 * cpu's range of the vlp_page and point cr3 at them.
860 	 */
861 	ASSERT(mmu.pae_hat);
862 	src = hat->hat_vlp_ptes;
863 	dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
864 	for (i = 0; i < VLP_NUM_PTES; ++i) {
865 		for (;;) {
866 			pte = dest[i];
867 			if (pte == src[i])
868 				break;
869 			if (cas64(dest + i, pte, src[i]) != src[i])
870 				break;
871 		}
872 	}
873 }
874 #endif
875 
876 /*
877  * Switch to a new active hat, maintaining bit masks to track active CPUs.
878  */
879 void
880 hat_switch(hat_t *hat)
881 {
882 	uintptr_t	newcr3;
883 	cpu_t		*cpu = CPU;
884 	hat_t		*old = cpu->cpu_current_hat;
885 
886 	/*
887 	 * set up this information first, so we don't miss any cross calls
888 	 */
889 	if (old != NULL) {
890 		if (old == hat)
891 			return;
892 		if (old != kas.a_hat)
893 			CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
894 	}
895 
896 	/*
897 	 * Wait for any in flight pagetable invalidates on this hat to finish.
898 	 * This is a spin lock at DISP_LEVEL
899 	 */
900 	if (hat != kas.a_hat) {
901 		mutex_enter(&hat->hat_switch_mutex);
902 		CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
903 		mutex_exit(&hat->hat_switch_mutex);
904 	}
905 	cpu->cpu_current_hat = hat;
906 
907 	/*
908 	 * now go ahead and load cr3
909 	 */
910 	if (hat->hat_flags & HAT_VLP) {
911 #if defined(__amd64)
912 		x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
913 
914 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
915 		newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn);
916 #elif defined(__i386)
917 		reload_pae32(hat, cpu);
918 		newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) +
919 		    (cpu->cpu_id + 1) * VLP_SIZE;
920 #endif
921 	} else {
922 		newcr3 = MAKECR3(hat->hat_htable->ht_pfn);
923 	}
924 	setcr3(newcr3);
925 	ASSERT(cpu == CPU);
926 }
927 
928 /*
929  * Utility to return a valid x86pte_t from protections, pfn, and level number
930  */
931 static x86pte_t
932 hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
933 {
934 	x86pte_t	pte;
935 	uint_t		cache_attr = attr & HAT_ORDER_MASK;
936 
937 	pte = MAKEPTE(pfn, level);
938 
939 	if (attr & PROT_WRITE)
940 		PTE_SET(pte, PT_WRITABLE);
941 
942 	if (attr & PROT_USER)
943 		PTE_SET(pte, PT_USER);
944 
945 	if (!(attr & PROT_EXEC))
946 		PTE_SET(pte, mmu.pt_nx);
947 
948 	/*
949 	 * set the software bits used track ref/mod sync's and hments
950 	 */
951 	if (attr & HAT_NOSYNC)
952 		PTE_SET(pte, PT_NOSYNC);
953 	if (flags & HAT_LOAD_NOCONSIST)
954 		PTE_SET(pte, PT_NOCONSIST | PT_NOSYNC);
955 
956 	/*
957 	 * Set the caching attributes in the PTE. The combination
958 	 * of attributes are poorly defined, so we pay attention
959 	 * to them in the given order.
960 	 *
961 	 * The test for HAT_STRICTORDER is different because it's defined
962 	 * as "0" - which was a stupid thing to do, but is too late to change!
963 	 */
964 	if (cache_attr == HAT_STRICTORDER) {
965 		PTE_SET(pte, PT_NOCACHE);
966 	/*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
967 	} else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
968 		/* nothing to set */;
969 	} else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
970 		PTE_SET(pte, PT_NOCACHE);
971 		if (x86_feature & X86_PAT)
972 			PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
973 		else
974 			PTE_SET(pte, PT_WRITETHRU);
975 	} else {
976 		panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
977 	}
978 
979 	return (pte);
980 }
981 
982 /*
983  * Duplicate address translations of the parent to the child.
984  * This function really isn't used anymore.
985  */
986 /*ARGSUSED*/
987 int
988 hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
989 {
990 	ASSERT((uintptr_t)addr < kernelbase);
991 	ASSERT(new != kas.a_hat);
992 	ASSERT(old != kas.a_hat);
993 	return (0);
994 }
995 
996 /*
997  * Allocate any hat resources required for a process being swapped in.
998  */
999 /*ARGSUSED*/
1000 void
1001 hat_swapin(hat_t *hat)
1002 {
1003 	/* do nothing - we let everything fault back in */
1004 }
1005 
1006 /*
1007  * Unload all translations associated with an address space of a process
1008  * that is being swapped out.
1009  */
1010 void
1011 hat_swapout(hat_t *hat)
1012 {
1013 	uintptr_t	vaddr = (uintptr_t)0;
1014 	uintptr_t	eaddr = _userlimit;
1015 	htable_t	*ht = NULL;
1016 	level_t		l;
1017 
1018 	/*
1019 	 * We can't just call hat_unload(hat, 0, _userlimit...)  here, because
1020 	 * seg_spt and shared pagetables can't be swapped out.
1021 	 * Take a look at segspt_shmswapout() - it's a big no-op.
1022 	 *
1023 	 * Instead we'll walk through all the address space and unload
1024 	 * any mappings which we are sure are not shared, not locked.
1025 	 */
1026 	ASSERT(IS_PAGEALIGNED(vaddr));
1027 	ASSERT(IS_PAGEALIGNED(eaddr));
1028 	ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1029 	if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
1030 		eaddr = (uintptr_t)hat->hat_as->a_userlimit;
1031 
1032 	while (vaddr < eaddr) {
1033 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
1034 		if (ht == NULL)
1035 			break;
1036 
1037 		ASSERT(!IN_VA_HOLE(vaddr));
1038 
1039 		/*
1040 		 * If the page table is shared skip its entire range.
1041 		 * This code knows that only level 0 page tables are shared
1042 		 */
1043 		l = ht->ht_level;
1044 		if (ht->ht_flags & HTABLE_SHARED_PFN) {
1045 			ASSERT(l == 0);
1046 			vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
1047 			htable_release(ht);
1048 			ht = NULL;
1049 			continue;
1050 		}
1051 
1052 		/*
1053 		 * If the page table has no locked entries, unload this one.
1054 		 */
1055 		if (ht->ht_lock_cnt == 0)
1056 			hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
1057 			    HAT_UNLOAD_UNMAP);
1058 
1059 		/*
1060 		 * If we have a level 0 page table with locked entries,
1061 		 * skip the entire page table, otherwise skip just one entry.
1062 		 */
1063 		if (ht->ht_lock_cnt > 0 && l == 0)
1064 			vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
1065 		else
1066 			vaddr += LEVEL_SIZE(l);
1067 	}
1068 	if (ht)
1069 		htable_release(ht);
1070 
1071 	/*
1072 	 * We're in swapout because the system is low on memory, so
1073 	 * go back and flush all the htables off the cached list.
1074 	 */
1075 	htable_purge_hat(hat);
1076 }
1077 
1078 /*
1079  * returns number of bytes that have valid mappings in hat.
1080  */
1081 size_t
1082 hat_get_mapped_size(hat_t *hat)
1083 {
1084 	size_t total = 0;
1085 	int l;
1086 
1087 	for (l = 0; l <= mmu.max_page_level; l++)
1088 		total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
1089 
1090 	return (total);
1091 }
1092 
1093 /*
1094  * enable/disable collection of stats for hat.
1095  */
1096 int
1097 hat_stats_enable(hat_t *hat)
1098 {
1099 	atomic_add_32(&hat->hat_stats, 1);
1100 	return (1);
1101 }
1102 
1103 void
1104 hat_stats_disable(hat_t *hat)
1105 {
1106 	atomic_add_32(&hat->hat_stats, -1);
1107 }
1108 
1109 /*
1110  * Utility to sync the ref/mod bits from a page table entry to the page_t
1111  * We must be holding the mapping list lock when this is called.
1112  */
1113 static void
1114 hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
1115 {
1116 	uint_t	rm = 0;
1117 	pgcnt_t	pgcnt;
1118 
1119 	if (PTE_GET(pte, PT_NOSYNC))
1120 		return;
1121 
1122 	if (PTE_GET(pte, PT_REF))
1123 		rm |= P_REF;
1124 
1125 	if (PTE_GET(pte, PT_MOD))
1126 		rm |= P_MOD;
1127 
1128 	if (rm == 0)
1129 		return;
1130 
1131 	/*
1132 	 * sync to all constituent pages of a large page
1133 	 */
1134 	ASSERT(x86_hm_held(pp));
1135 	pgcnt = page_get_pagecnt(level);
1136 	ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
1137 	for (; pgcnt > 0; --pgcnt) {
1138 		/*
1139 		 * hat_page_demote() can't decrease
1140 		 * pszc below this mapping size
1141 		 * since this large mapping existed after we
1142 		 * took mlist lock.
1143 		 */
1144 		ASSERT(pp->p_szc >= level);
1145 		hat_page_setattr(pp, rm);
1146 		++pp;
1147 	}
1148 }
1149 
1150 /*
1151  * This the set of PTE bits for PFN, permissions and caching
1152  * that require a TLB flush (hat_demap) if changed on a HAT_LOAD_REMAP
1153  */
1154 #define	PT_REMAP_BITS							\
1155 	(PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU |		\
1156 	PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE)
1157 
1158 /*
1159  * Do the low-level work to get a mapping entered into a HAT's pagetables
1160  * and in the mapping list of the associated page_t.
1161  */
1162 static void
1163 hati_pte_map(
1164 	htable_t	*ht,
1165 	uint_t		entry,
1166 	page_t		*pp,
1167 	x86pte_t	pte,
1168 	int		flags,
1169 	void		*pte_ptr)
1170 {
1171 	hat_t		*hat = ht->ht_hat;
1172 	x86pte_t	old_pte;
1173 	level_t		l = ht->ht_level;
1174 	hment_t		*hm;
1175 	uint_t		is_consist;
1176 
1177 	/*
1178 	 * Is this a consistant (ie. need mapping list lock) mapping?
1179 	 */
1180 	is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
1181 
1182 	/*
1183 	 * Track locked mapping count in the htable.  Do this first,
1184 	 * as we track locking even if there already is a mapping present.
1185 	 */
1186 	if ((flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat)
1187 		HTABLE_LOCK_INC(ht);
1188 
1189 	/*
1190 	 * Acquire the page's mapping list lock and get an hment to use.
1191 	 * Note that hment_prepare() might return NULL.
1192 	 */
1193 	if (is_consist) {
1194 		x86_hm_enter(pp);
1195 		hm = hment_prepare(ht, entry, pp);
1196 	}
1197 
1198 	/*
1199 	 * Set the new pte, retrieving the old one at the same time.
1200 	 */
1201 	old_pte = x86pte_set(ht, entry, pte, pte_ptr);
1202 
1203 	/*
1204 	 * If the mapping didn't change there is nothing more to do.
1205 	 */
1206 	if (PTE_EQUIV(pte, old_pte)) {
1207 		if (is_consist) {
1208 			x86_hm_exit(pp);
1209 			if (hm != NULL)
1210 				hment_free(hm);
1211 		}
1212 		return;
1213 	}
1214 
1215 	/*
1216 	 * Install a new mapping in the page's mapping list
1217 	 */
1218 	if (!PTE_ISVALID(old_pte)) {
1219 		if (is_consist) {
1220 			hment_assign(ht, entry, pp, hm);
1221 			x86_hm_exit(pp);
1222 		} else {
1223 			ASSERT(flags & HAT_LOAD_NOCONSIST);
1224 		}
1225 		HTABLE_INC(ht->ht_valid_cnt);
1226 		PGCNT_INC(hat, l);
1227 		return;
1228 	}
1229 
1230 	/*
1231 	 * Remap's are more complicated:
1232 	 *  - HAT_LOAD_REMAP must be specified if changing the pfn.
1233 	 *    We also require that NOCONSIST be specified.
1234 	 *  - Otherwise only permission or caching bits may change.
1235 	 */
1236 	if (!PTE_ISPAGE(old_pte, l))
1237 		panic("non-null/page mapping pte=" FMT_PTE, old_pte);
1238 
1239 	if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1240 		ASSERT(flags & HAT_LOAD_REMAP);
1241 		ASSERT(flags & HAT_LOAD_NOCONSIST);
1242 		ASSERT(PTE_GET(old_pte, PT_NOCONSIST));
1243 		ASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
1244 		    pf_is_memory(PTE2PFN(pte, l)));
1245 		ASSERT(!is_consist);
1246 	}
1247 
1248 	/*
1249 	 * We only let remaps change the bits for PFNs, permissions
1250 	 * or caching type.
1251 	 */
1252 	ASSERT(PTE_GET(old_pte, ~(PT_REMAP_BITS | PT_REF | PT_MOD)) ==
1253 	    PTE_GET(pte, ~PT_REMAP_BITS));
1254 
1255 	/*
1256 	 * A remap requires invalidating the TLBs, since remapping the
1257 	 * same PFN requires NOCONSIST, we don't have to sync R/M bits.
1258 	 */
1259 	hat_demap(hat, htable_e2va(ht, entry));
1260 
1261 	/*
1262 	 * We don't create any mapping list entries on a remap, so release
1263 	 * any allocated hment after we drop the mapping list lock.
1264 	 */
1265 	if (is_consist) {
1266 		x86_hm_exit(pp);
1267 		if (hm != NULL)
1268 			hment_free(hm);
1269 	}
1270 }
1271 
1272 /*
1273  * The t_hatdepth field is an 8-bit counter.  We use the lower seven bits
1274  * to track exactly how deep we are in the memload->kmem_alloc recursion.
1275  * If the depth is greater than 1, that indicates that we are performing a
1276  * hat operation to satisfy another hat operation.  To prevent infinite
1277  * recursion, we switch over to using pre-allocated "reserves" of htables
1278  * and hments.
1279  *
1280  * The uppermost bit is used to indicate that we are transitioning away
1281  * from being the reserves thread.  See hati_reserves_exit() for the
1282  * details.
1283  */
1284 #define	EXITING_FLAG		(1 << 7)
1285 #define	DEPTH_MASK		(~EXITING_FLAG)
1286 #define	HAT_DEPTH(t)		((t)->t_hatdepth & DEPTH_MASK)
1287 #define	EXITING_RESERVES(t)	((t)->t_hatdepth & EXITING_FLAG)
1288 
1289 /*
1290  * Access to reserves for HAT_NO_KALLOC is single threaded.
1291  * If someone else is in the reserves, we'll politely wait for them
1292  * to finish. This keeps normal hat_memload()s from eating up
1293  * the mappings needed to replenish the reserve.
1294  */
1295 static void
1296 hati_reserves_enter(uint_t kmem_for_hat)
1297 {
1298 	/*
1299 	 * 64 is an arbitrary number to catch serious problems.  I'm not
1300 	 * sure what the absolute maximum depth is, but it should be
1301 	 * substantially less than this.
1302 	 */
1303 	ASSERT(HAT_DEPTH(curthread) < 64);
1304 
1305 	/*
1306 	 * If we are doing a memload to satisfy a kmem operation, we enter
1307 	 * the reserves immediately; we don't wait to recurse to a second
1308 	 * level of memload.
1309 	 */
1310 	ASSERT(kmem_for_hat < 2);
1311 	curthread->t_hatdepth += (1 + kmem_for_hat);
1312 
1313 	if (hat_reserves_thread == curthread || use_boot_reserve)
1314 		return;
1315 
1316 	if (HAT_DEPTH(curthread) > 1 || hat_reserves_thread != NULL) {
1317 		mutex_enter(&hat_reserves_lock);
1318 		while (hat_reserves_thread != NULL)
1319 			cv_wait(&hat_reserves_cv, &hat_reserves_lock);
1320 
1321 		if (HAT_DEPTH(curthread) > 1)
1322 			hat_reserves_thread = curthread;
1323 
1324 		mutex_exit(&hat_reserves_lock);
1325 	}
1326 }
1327 
1328 /*
1329  * If we are the reserves_thread and we've finally finished with all our
1330  * memloads (ie. no longer doing hat slabs), we can release our use of the
1331  * reserve.
1332  */
1333 static void
1334 hati_reserves_exit(uint_t kmem_for_hat)
1335 {
1336 	ASSERT(kmem_for_hat < 2);
1337 	curthread->t_hatdepth -= (1 + kmem_for_hat);
1338 
1339 	/*
1340 	 * Simple case: either we are not the reserves thread, or we are
1341 	 * the reserves thread and we are nested deeply enough that we
1342 	 * should still be the reserves thread.
1343 	 *
1344 	 * Note: we may not become the reserves thread after we recursively
1345 	 * enter our second HAT routine, but we don't stop being the
1346 	 * reserves thread until we exit the toplevel HAT routine.  This is
1347 	 * to work around vmem's inability to determine when an allocation
1348 	 * should be satisfied from the hat_memload arena, which can lead
1349 	 * to an infinite loop of memload->vmem_populate->memload->.
1350 	 */
1351 	if (curthread != hat_reserves_thread || HAT_DEPTH(curthread) > 0 ||
1352 	    use_boot_reserve)
1353 		return;
1354 
1355 	mutex_enter(&hat_reserves_lock);
1356 	ASSERT(hat_reserves_thread == curthread);
1357 	hat_reserves_thread = NULL;
1358 	cv_broadcast(&hat_reserves_cv);
1359 	mutex_exit(&hat_reserves_lock);
1360 
1361 	/*
1362 	 * As we leave the reserves, we want to be sure the reserve lists
1363 	 * aren't overstocked.  Freeing excess reserves requires that we
1364 	 * call kmem_free(), which may require additional allocations,
1365 	 * causing us to re-enter the reserves.  To avoid infinite
1366 	 * recursion, we only try to adjust reserves at the very top level.
1367 	 */
1368 	if (!kmem_for_hat && !EXITING_RESERVES(curthread)) {
1369 		curthread->t_hatdepth |= EXITING_FLAG;
1370 		htable_adjust_reserve();
1371 		hment_adjust_reserve();
1372 		curthread->t_hatdepth &= (~EXITING_FLAG);
1373 	}
1374 
1375 	/*
1376 	 * just in case something went wrong in doing adjust reserves
1377 	 */
1378 	ASSERT(hat_reserves_thread != curthread);
1379 }
1380 
1381 /*
1382  * Internal routine to load a single page table entry.
1383  */
1384 static void
1385 hati_load_common(
1386 	hat_t		*hat,
1387 	uintptr_t	va,
1388 	page_t		*pp,
1389 	uint_t		attr,
1390 	uint_t		flags,
1391 	level_t		level,
1392 	pfn_t		pfn)
1393 {
1394 	htable_t	*ht;
1395 	uint_t		entry;
1396 	x86pte_t	pte;
1397 	uint_t		kmem_for_hat = (flags & HAT_NO_KALLOC) ? 1 : 0;
1398 
1399 	ASSERT(hat == kas.a_hat ||
1400 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1401 
1402 	if (flags & HAT_LOAD_SHARE)
1403 		hat->hat_flags |= HAT_SHARED;
1404 
1405 	/*
1406 	 * Find the page table that maps this page if it already exists.
1407 	 */
1408 	ht = htable_lookup(hat, va, level);
1409 
1410 	/*
1411 	 * All threads go through hati_reserves_enter() to at least wait
1412 	 * for any existing reserves user to finish. This helps reduce
1413 	 * pressure on the reserves. In addition, if this thread needs
1414 	 * to become the new reserve user it will.
1415 	 */
1416 	hati_reserves_enter(kmem_for_hat);
1417 
1418 	ASSERT(HAT_DEPTH(curthread) == 1 || va >= kernelbase);
1419 
1420 	/*
1421 	 * Kernel memloads for HAT data should never use hments!
1422 	 * If it did that would seriously complicate the reserves system, since
1423 	 * hment_alloc() would need to know about HAT_NO_KALLOC.
1424 	 *
1425 	 * We also must have HAT_LOAD_NOCONSIST if page_t is NULL.
1426 	 */
1427 	if (HAT_DEPTH(curthread) > 1 || pp == NULL)
1428 		flags |= HAT_LOAD_NOCONSIST;
1429 
1430 	if (ht == NULL) {
1431 		ht = htable_create(hat, va, level, NULL);
1432 		ASSERT(ht != NULL);
1433 	}
1434 	entry = htable_va2entry(va, ht);
1435 
1436 	/*
1437 	 * a bunch of paranoid error checking
1438 	 */
1439 	ASSERT(ht->ht_busy > 0);
1440 	if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht))
1441 		panic("hati_load_common: bad htable %p, va %p", ht, (void *)va);
1442 	ASSERT(ht->ht_level == level);
1443 
1444 	/*
1445 	 * construct the new PTE
1446 	 */
1447 	if (hat == kas.a_hat)
1448 		attr &= ~PROT_USER;
1449 	pte = hati_mkpte(pfn, attr, level, flags);
1450 	if (hat == kas.a_hat && va >= kernelbase)
1451 		PTE_SET(pte, mmu.pt_global);
1452 
1453 	/*
1454 	 * establish the mapping
1455 	 */
1456 	hati_pte_map(ht, entry, pp, pte, flags, NULL);
1457 
1458 	/*
1459 	 * release the htable and any reserves
1460 	 */
1461 	htable_release(ht);
1462 	hati_reserves_exit(kmem_for_hat);
1463 }
1464 
1465 /*
1466  * special case of hat_memload to deal with some kernel addrs for performance
1467  */
1468 static void
1469 hat_kmap_load(
1470 	caddr_t		addr,
1471 	page_t		*pp,
1472 	uint_t		attr,
1473 	uint_t		flags)
1474 {
1475 	uintptr_t	va = (uintptr_t)addr;
1476 	x86pte_t	pte;
1477 	pfn_t		pfn = page_pptonum(pp);
1478 	pgcnt_t		pg_off = mmu_btop(va - mmu.kmap_addr);
1479 	htable_t	*ht;
1480 	uint_t		entry;
1481 	void		*pte_ptr;
1482 
1483 	/*
1484 	 * construct the requested PTE
1485 	 */
1486 	attr &= ~PROT_USER;
1487 	attr |= HAT_STORECACHING_OK;
1488 	pte = hati_mkpte(pfn, attr, 0, flags);
1489 	PTE_SET(pte, mmu.pt_global);
1490 
1491 	/*
1492 	 * Figure out the pte_ptr and htable and use common code to finish up
1493 	 */
1494 	if (mmu.pae_hat)
1495 		pte_ptr = mmu.kmap_ptes + pg_off;
1496 	else
1497 		pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
1498 	ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
1499 	    LEVEL_SHIFT(1)];
1500 	entry = htable_va2entry(va, ht);
1501 	hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
1502 }
1503 
1504 /*
1505  * hat_memload() - load a translation to the given page struct
1506  *
1507  * Flags for hat_memload/hat_devload/hat_*attr.
1508  *
1509  * 	HAT_LOAD	Default flags to load a translation to the page.
1510  *
1511  * 	HAT_LOAD_LOCK	Lock down mapping resources; hat_map(), hat_memload(),
1512  *			and hat_devload().
1513  *
1514  *	HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
1515  *			sets PT_NOCONSIST (soft bit)
1516  *
1517  *	HAT_LOAD_SHARE	A flag to hat_memload() to indicate h/w page tables
1518  *			that map some user pages (not kas) is shared by more
1519  *			than one process (eg. ISM).
1520  *
1521  *	HAT_LOAD_REMAP	Reload a valid pte with a different page frame.
1522  *
1523  *	HAT_NO_KALLOC	Do not kmem_alloc while creating the mapping; at this
1524  *			point, it's setting up mapping to allocate internal
1525  *			hat layer data structures.  This flag forces hat layer
1526  *			to tap its reserves in order to prevent infinite
1527  *			recursion.
1528  *
1529  * The following is a protection attribute (like PROT_READ, etc.)
1530  *
1531  *	HAT_NOSYNC	set PT_NOSYNC (soft bit) - this mapping's ref/mod bits
1532  *			are never cleared.
1533  *
1534  * Installing new valid PTE's and creation of the mapping list
1535  * entry are controlled under the same lock. It's derived from the
1536  * page_t being mapped.
1537  */
1538 static uint_t supported_memload_flags =
1539 	HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
1540 	HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
1541 
1542 void
1543 hat_memload(
1544 	hat_t		*hat,
1545 	caddr_t		addr,
1546 	page_t		*pp,
1547 	uint_t		attr,
1548 	uint_t		flags)
1549 {
1550 	uintptr_t	va = (uintptr_t)addr;
1551 	level_t		level = 0;
1552 	pfn_t		pfn = page_pptonum(pp);
1553 
1554 	HATIN(hat_memload, hat, addr, (size_t)MMU_PAGESIZE);
1555 	ASSERT(IS_PAGEALIGNED(va));
1556 	ASSERT(hat == kas.a_hat || va <= kernelbase);
1557 	ASSERT(hat == kas.a_hat ||
1558 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1559 	ASSERT((flags & supported_memload_flags) == flags);
1560 
1561 	ASSERT(!IN_VA_HOLE(va));
1562 	ASSERT(!PP_ISFREE(pp));
1563 
1564 	/*
1565 	 * kernel address special case for performance.
1566 	 */
1567 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
1568 		ASSERT(hat == kas.a_hat);
1569 		hat_kmap_load(addr, pp, attr, flags);
1570 		return;
1571 	}
1572 
1573 	/*
1574 	 * This is used for memory with normal caching enabled, so
1575 	 * always set HAT_STORECACHING_OK.
1576 	 */
1577 	attr |= HAT_STORECACHING_OK;
1578 	hati_load_common(hat, va, pp, attr, flags, level, pfn);
1579 	HATOUT(hat_memload, hat, addr);
1580 }
1581 
1582 /*
1583  * Load the given array of page structs using large pages when possible
1584  */
1585 void
1586 hat_memload_array(
1587 	hat_t		*hat,
1588 	caddr_t		addr,
1589 	size_t		len,
1590 	page_t		**pages,
1591 	uint_t		attr,
1592 	uint_t		flags)
1593 {
1594 	uintptr_t	va = (uintptr_t)addr;
1595 	uintptr_t	eaddr = va + len;
1596 	level_t		level;
1597 	size_t		pgsize;
1598 	pgcnt_t		pgindx = 0;
1599 	pfn_t		pfn;
1600 	pgcnt_t		i;
1601 
1602 	HATIN(hat_memload_array, hat, addr, len);
1603 	ASSERT(IS_PAGEALIGNED(va));
1604 	ASSERT(hat == kas.a_hat || va + len <= kernelbase);
1605 	ASSERT(hat == kas.a_hat ||
1606 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1607 	ASSERT((flags & supported_memload_flags) == flags);
1608 
1609 	/*
1610 	 * memload is used for memory with full caching enabled, so
1611 	 * set HAT_STORECACHING_OK.
1612 	 */
1613 	attr |= HAT_STORECACHING_OK;
1614 
1615 	/*
1616 	 * handle all pages using largest possible pagesize
1617 	 */
1618 	while (va < eaddr) {
1619 		/*
1620 		 * decide what level mapping to use (ie. pagesize)
1621 		 */
1622 		pfn = page_pptonum(pages[pgindx]);
1623 		for (level = mmu.max_page_level; ; --level) {
1624 			pgsize = LEVEL_SIZE(level);
1625 			if (level == 0)
1626 				break;
1627 			if (!IS_P2ALIGNED(va, pgsize) ||
1628 			    (eaddr - va) < pgsize ||
1629 			    !IS_P2ALIGNED(pfn << MMU_PAGESHIFT, pgsize))
1630 				continue;
1631 
1632 			/*
1633 			 * To use a large mapping of this size, all the
1634 			 * pages we are passed must be sequential subpages
1635 			 * of the large page.
1636 			 * hat_page_demote() can't change p_szc because
1637 			 * all pages are locked.
1638 			 */
1639 			if (pages[pgindx]->p_szc >= level) {
1640 				for (i = 0; i < mmu_btop(pgsize); ++i) {
1641 					if (pfn + i !=
1642 					    page_pptonum(pages[pgindx + i]))
1643 						break;
1644 					ASSERT(pages[pgindx + i]->p_szc >=
1645 					    level);
1646 					ASSERT(pages[pgindx] + i ==
1647 					    pages[pgindx + i]);
1648 				}
1649 				if (i == mmu_btop(pgsize))
1650 					break;
1651 			}
1652 		}
1653 
1654 		/*
1655 		 * Shared page tables for DISM might have a pre-existing
1656 		 * level 0 page table that wasn't unlinked from all the
1657 		 * sharing hats. If we hit this for a large page, back off
1658 		 * to using level 0 pages.
1659 		 *
1660 		 * This can't be made better (ie. use large pages) until we
1661 		 * track all the htable's sharing and rewrite hat_pageunload().
1662 		 * Note that would cost a pointer in htable_t for a rare case.
1663 		 *
1664 		 * Since the 32 bit kernel caches empty page tables, check
1665 		 * the kernel too.
1666 		 */
1667 		if ((hat == kas.a_hat || (hat->hat_flags & HAT_SHARED)) &&
1668 		    level > 0) {
1669 			htable_t *lower;
1670 
1671 			lower = htable_getpte(hat, va, NULL, NULL, level - 1);
1672 			if (lower != NULL) {
1673 				level = 0;
1674 				pgsize = LEVEL_SIZE(0);
1675 				htable_release(lower);
1676 			}
1677 		}
1678 
1679 		/*
1680 		 * load this page mapping
1681 		 */
1682 		ASSERT(!IN_VA_HOLE(va));
1683 		hati_load_common(hat, va, pages[pgindx], attr, flags,
1684 		    level, pfn);
1685 
1686 		/*
1687 		 * move to next page
1688 		 */
1689 		va += pgsize;
1690 		pgindx += mmu_btop(pgsize);
1691 	}
1692 	HATOUT(hat_memload_array, hat, addr);
1693 }
1694 
1695 /*
1696  * void hat_devload(hat, addr, len, pf, attr, flags)
1697  *	load/lock the given page frame number
1698  *
1699  * Advisory ordering attributes. Apply only to device mappings.
1700  *
1701  * HAT_STRICTORDER: the CPU must issue the references in order, as the
1702  *	programmer specified.  This is the default.
1703  * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
1704  *	of reordering; store or load with store or load).
1705  * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
1706  *	to consecutive locations (for example, turn two consecutive byte
1707  *	stores into one halfword store), and it may batch individual loads
1708  *	(for example, turn two consecutive byte loads into one halfword load).
1709  *	This also implies re-ordering.
1710  * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
1711  *	until another store occurs.  The default is to fetch new data
1712  *	on every load.  This also implies merging.
1713  * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
1714  *	the device (perhaps with other data) at a later time.  The default is
1715  *	to push the data right away.  This also implies load caching.
1716  *
1717  * Equivalent of hat_memload(), but can be used for device memory where
1718  * there are no page_t's and we support additional flags (write merging, etc).
1719  * Note that we can have large page mappings with this interface.
1720  */
1721 int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
1722 	HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
1723 	HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
1724 
1725 void
1726 hat_devload(
1727 	hat_t		*hat,
1728 	caddr_t		addr,
1729 	size_t		len,
1730 	pfn_t		pfn,
1731 	uint_t		attr,
1732 	int		flags)
1733 {
1734 	uintptr_t	va = ALIGN2PAGE(addr);
1735 	uintptr_t	eva = va + len;
1736 	level_t		level;
1737 	size_t		pgsize;
1738 	page_t		*pp;
1739 	int		f;	/* per PTE copy of flags  - maybe modified */
1740 	uint_t		a;	/* per PTE copy of attr */
1741 
1742 	HATIN(hat_devload, hat, addr, len);
1743 	ASSERT(IS_PAGEALIGNED(va));
1744 	ASSERT(hat == kas.a_hat || eva <= kernelbase);
1745 	ASSERT(hat == kas.a_hat ||
1746 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1747 	ASSERT((flags & supported_devload_flags) == flags);
1748 
1749 	/*
1750 	 * handle all pages
1751 	 */
1752 	while (va < eva) {
1753 
1754 		/*
1755 		 * decide what level mapping to use (ie. pagesize)
1756 		 */
1757 		for (level = mmu.max_page_level; ; --level) {
1758 			pgsize = LEVEL_SIZE(level);
1759 			if (level == 0)
1760 				break;
1761 			if (IS_P2ALIGNED(va, pgsize) &&
1762 			    (eva - va) >= pgsize &&
1763 			    IS_P2ALIGNED(pfn, mmu_btop(pgsize)))
1764 				break;
1765 		}
1766 
1767 		/*
1768 		 * Some kernel addresses have permanently existing page tables,
1769 		 * so be sure to use a compatible pagesize.
1770 		 */
1771 		if (hat == kas.a_hat && level > 0) {
1772 			htable_t *lower;
1773 
1774 			lower = htable_getpte(hat, va, NULL, NULL, level - 1);
1775 			if (lower != NULL) {
1776 				level = 0;
1777 				pgsize = LEVEL_SIZE(0);
1778 				htable_release(lower);
1779 			}
1780 		}
1781 
1782 		/*
1783 		 * If it is memory get page_t and allow caching (this happens
1784 		 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
1785 		 * to override that. If we don't have a page_t, make sure
1786 		 * NOCONSIST is set.
1787 		 */
1788 		a = attr;
1789 		f = flags;
1790 		if (pf_is_memory(pfn)) {
1791 			if (!(a & HAT_PLAT_NOCACHE))
1792 				a |= HAT_STORECACHING_OK;
1793 
1794 			if (f & HAT_LOAD_NOCONSIST)
1795 				pp = NULL;
1796 			else
1797 				pp = page_numtopp_nolock(pfn);
1798 		} else {
1799 			pp = NULL;
1800 			f |= HAT_LOAD_NOCONSIST;
1801 		}
1802 
1803 		/*
1804 		 * load this page mapping
1805 		 */
1806 		ASSERT(!IN_VA_HOLE(va));
1807 		hati_load_common(hat, va, pp, a, f, level, pfn);
1808 
1809 		/*
1810 		 * move to next page
1811 		 */
1812 		va += pgsize;
1813 		pfn += mmu_btop(pgsize);
1814 	}
1815 	HATOUT(hat_devload, hat, addr);
1816 }
1817 
1818 /*
1819  * void hat_unlock(hat, addr, len)
1820  *	unlock the mappings to a given range of addresses
1821  *
1822  * Locks are tracked by ht_lock_cnt in the htable.
1823  */
1824 void
1825 hat_unlock(hat_t *hat, caddr_t addr, size_t len)
1826 {
1827 	uintptr_t	vaddr = (uintptr_t)addr;
1828 	uintptr_t	eaddr = vaddr + len;
1829 	htable_t	*ht = NULL;
1830 
1831 	/*
1832 	 * kernel entries are always locked, we don't track lock counts
1833 	 */
1834 	ASSERT(hat == kas.a_hat || eaddr <= kernelbase);
1835 	ASSERT(IS_PAGEALIGNED(vaddr));
1836 	ASSERT(IS_PAGEALIGNED(eaddr));
1837 	if (hat == kas.a_hat)
1838 		return;
1839 	if (eaddr > _userlimit)
1840 		panic("hat_unlock() address out of range - above _userlimit");
1841 
1842 	ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1843 	while (vaddr < eaddr) {
1844 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
1845 		if (ht == NULL)
1846 			break;
1847 
1848 		ASSERT(!IN_VA_HOLE(vaddr));
1849 
1850 		if (ht->ht_lock_cnt < 1)
1851 			panic("hat_unlock(): lock_cnt < 1, "
1852 			    "htable=%p, vaddr=%p\n", ht, (caddr_t)vaddr);
1853 		HTABLE_LOCK_DEC(ht);
1854 
1855 		vaddr += LEVEL_SIZE(ht->ht_level);
1856 	}
1857 	if (ht)
1858 		htable_release(ht);
1859 }
1860 
1861 /*
1862  * Cross call service routine to demap a virtual page on
1863  * the current CPU or flush all mappings in TLB.
1864  */
1865 /*ARGSUSED*/
1866 static int
1867 hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
1868 {
1869 	hat_t	*hat = (hat_t *)a1;
1870 	caddr_t	addr = (caddr_t)a2;
1871 
1872 	/*
1873 	 * If the target hat isn't the kernel and this CPU isn't operating
1874 	 * in the target hat, we can ignore the cross call.
1875 	 */
1876 	if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
1877 		return (0);
1878 
1879 	/*
1880 	 * For a normal address, we just flush one page mapping
1881 	 */
1882 	if ((uintptr_t)addr != DEMAP_ALL_ADDR) {
1883 		mmu_tlbflush_entry((caddr_t)addr);
1884 		return (0);
1885 	}
1886 
1887 	/*
1888 	 * Otherwise we reload cr3 to effect a complete TLB flush.
1889 	 *
1890 	 * A reload of cr3 on a VLP process also means we must also recopy in
1891 	 * the pte values from the struct hat
1892 	 */
1893 	if (hat->hat_flags & HAT_VLP) {
1894 #if defined(__amd64)
1895 		x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes;
1896 
1897 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1898 #elif defined(__i386)
1899 		reload_pae32(hat, CPU);
1900 #endif
1901 	}
1902 	reload_cr3();
1903 	return (0);
1904 }
1905 
1906 /*
1907  * Internal routine to do cross calls to invalidate a range of pages on
1908  * all CPUs using a given hat.
1909  */
1910 void
1911 hat_demap(hat_t *hat, uintptr_t va)
1912 {
1913 	extern int	flushes_require_xcalls;	/* from mp_startup.c */
1914 	cpuset_t	justme;
1915 
1916 	/*
1917 	 * If the hat is being destroyed, there are no more users, so
1918 	 * demap need not do anything.
1919 	 */
1920 	if (hat->hat_flags & HAT_FREEING)
1921 		return;
1922 
1923 	/*
1924 	 * If demapping from a shared pagetable, we best demap the
1925 	 * entire set of user TLBs, since we don't know what addresses
1926 	 * these were shared at.
1927 	 */
1928 	if (hat->hat_flags & HAT_SHARED) {
1929 		hat = kas.a_hat;
1930 		va = DEMAP_ALL_ADDR;
1931 	}
1932 
1933 	/*
1934 	 * if not running with multiple CPUs, don't use cross calls
1935 	 */
1936 	if (panicstr || !flushes_require_xcalls) {
1937 		(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
1938 		return;
1939 	}
1940 
1941 
1942 	/*
1943 	 * All CPUs must see kernel hat changes.
1944 	 */
1945 	if (hat == kas.a_hat) {
1946 		kpreempt_disable();
1947 		xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL,
1948 		    X_CALL_HIPRI, khat_cpuset, hati_demap_func);
1949 		kpreempt_enable();
1950 		return;
1951 	}
1952 
1953 	/*
1954 	 * Otherwise we notify CPUs currently running in this HAT
1955 	 */
1956 	hat_enter(hat);
1957 	kpreempt_disable();
1958 	CPUSET_ONLY(justme, CPU->cpu_id);
1959 	if (CPUSET_ISEQUAL(hat->hat_cpus, justme))
1960 		(void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL);
1961 	else
1962 		xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL,
1963 		    X_CALL_HIPRI, hat->hat_cpus, hati_demap_func);
1964 	kpreempt_enable();
1965 	hat_exit(hat);
1966 }
1967 
1968 /*
1969  * Interior routine for HAT_UNLOADs from hat_unload_callback(),
1970  * hat_kmap_unload() OR from hat_steal() code.  This routine doesn't
1971  * handle releasing of the htables.
1972  */
1973 void
1974 hat_pte_unmap(
1975 	htable_t	*ht,
1976 	uint_t		entry,
1977 	uint_t		flags,
1978 	x86pte_t	old_pte,
1979 	void		*pte_ptr)
1980 {
1981 	hat_t		*hat = ht->ht_hat;
1982 	hment_t		*hm = NULL;
1983 	page_t		*pp = NULL;
1984 	level_t		l = ht->ht_level;
1985 	pfn_t		pfn;
1986 
1987 	/*
1988 	 * We always track the locking counts, even if nothing is unmapped
1989 	 */
1990 	if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
1991 		ASSERT(ht->ht_lock_cnt > 0);
1992 		HTABLE_LOCK_DEC(ht);
1993 	}
1994 
1995 	/*
1996 	 * Figure out which page's mapping list lock to acquire using the PFN
1997 	 * passed in "old" PTE. We then attempt to invalidate the PTE.
1998 	 * If another thread, probably a hat_pageunload, has asynchronously
1999 	 * unmapped/remapped this address we'll loop here.
2000 	 */
2001 	ASSERT(ht->ht_busy > 0);
2002 	while (PTE_ISVALID(old_pte)) {
2003 		pfn = PTE2PFN(old_pte, l);
2004 		if (PTE_GET(old_pte, PT_NOCONSIST)) {
2005 			pp = NULL;
2006 		} else {
2007 			pp = page_numtopp_nolock(pfn);
2008 			ASSERT(pp != NULL);
2009 			x86_hm_enter(pp);
2010 		}
2011 		old_pte = x86pte_invalidate_pfn(ht, entry, pfn, pte_ptr);
2012 
2013 		/*
2014 		 * If the page hadn't changed we've unmapped it and can proceed
2015 		 */
2016 		if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
2017 			break;
2018 
2019 		/*
2020 		 * Otherwise, we'll have to retry with the current old_pte.
2021 		 * Drop the hment lock, since the pfn may have changed.
2022 		 */
2023 		if (pp != NULL) {
2024 			x86_hm_exit(pp);
2025 			pp = NULL;
2026 		} else {
2027 			ASSERT(PTE_GET(old_pte, PT_NOCONSIST));
2028 		}
2029 	}
2030 
2031 	/*
2032 	 * If the old mapping wasn't valid, there's nothing more to do
2033 	 */
2034 	if (!PTE_ISVALID(old_pte)) {
2035 		if (pp != NULL)
2036 			x86_hm_exit(pp);
2037 		return;
2038 	}
2039 
2040 	/*
2041 	 * Take care of syncing any MOD/REF bits and removing the hment.
2042 	 */
2043 	if (pp != NULL) {
2044 		if (!(flags & HAT_UNLOAD_NOSYNC))
2045 			hati_sync_pte_to_page(pp, old_pte, l);
2046 		hm = hment_remove(pp, ht, entry);
2047 		x86_hm_exit(pp);
2048 		if (hm != NULL)
2049 			hment_free(hm);
2050 	}
2051 
2052 	/*
2053 	 * Handle book keeping in the htable and hat
2054 	 */
2055 	ASSERT(ht->ht_valid_cnt > 0);
2056 	HTABLE_DEC(ht->ht_valid_cnt);
2057 	PGCNT_DEC(hat, l);
2058 }
2059 
2060 /*
2061  * very cheap unload implementation to special case some kernel addresses
2062  */
2063 static void
2064 hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
2065 {
2066 	uintptr_t	va = (uintptr_t)addr;
2067 	uintptr_t	eva = va + len;
2068 	pgcnt_t		pg_off;
2069 	htable_t	*ht;
2070 	uint_t		entry;
2071 	void		*pte_ptr;
2072 	x86pte_t	old_pte;
2073 
2074 	for (; va < eva; va += MMU_PAGESIZE) {
2075 		/*
2076 		 * Get the PTE
2077 		 */
2078 		pg_off = mmu_btop(va - mmu.kmap_addr);
2079 		if (mmu.pae_hat) {
2080 			pte_ptr = mmu.kmap_ptes + pg_off;
2081 			old_pte = *(x86pte_t *)pte_ptr;
2082 		} else {
2083 			pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
2084 			old_pte = *(x86pte32_t *)pte_ptr;
2085 		}
2086 
2087 		/*
2088 		 * get the htable / entry
2089 		 */
2090 		ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
2091 		    >> LEVEL_SHIFT(1)];
2092 		entry = htable_va2entry(va, ht);
2093 
2094 		/*
2095 		 * use mostly common code to unmap it.
2096 		 */
2097 		hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr);
2098 	}
2099 }
2100 
2101 
2102 /*
2103  * unload a range of virtual address space (no callback)
2104  */
2105 void
2106 hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2107 {
2108 	uintptr_t va = (uintptr_t)addr;
2109 	ASSERT(hat == kas.a_hat || va + len <= kernelbase);
2110 
2111 	/*
2112 	 * special case for performance.
2113 	 */
2114 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
2115 		ASSERT(hat == kas.a_hat);
2116 		hat_kmap_unload(addr, len, flags);
2117 		return;
2118 	}
2119 	hat_unload_callback(hat, addr, len, flags, NULL);
2120 }
2121 
2122 /*
2123  * Do the callbacks for ranges being unloaded.
2124  */
2125 typedef struct range_info {
2126 	uintptr_t	rng_va;
2127 	ulong_t		rng_cnt;
2128 	level_t		rng_level;
2129 } range_info_t;
2130 
2131 static void
2132 handle_ranges(hat_callback_t *cb, uint_t cnt, range_info_t *range)
2133 {
2134 	/*
2135 	 * do callbacks to upper level VM system
2136 	 */
2137 	while (cb != NULL && cnt > 0) {
2138 		--cnt;
2139 		cb->hcb_start_addr = (caddr_t)range[cnt].rng_va;
2140 		cb->hcb_end_addr = cb->hcb_start_addr;
2141 		cb->hcb_end_addr +=
2142 		    range[cnt].rng_cnt << LEVEL_SIZE(range[cnt].rng_level);
2143 		cb->hcb_function(cb);
2144 	}
2145 }
2146 
2147 /*
2148  * Unload a given range of addresses (has optional callback)
2149  *
2150  * Flags:
2151  * define	HAT_UNLOAD		0x00
2152  * define	HAT_UNLOAD_NOSYNC	0x02
2153  * define	HAT_UNLOAD_UNLOCK	0x04
2154  * define	HAT_UNLOAD_OTHER	0x08 - not used
2155  * define	HAT_UNLOAD_UNMAP	0x10 - same as HAT_UNLOAD
2156  */
2157 #define	MAX_UNLOAD_CNT (8)
2158 void
2159 hat_unload_callback(
2160 	hat_t		*hat,
2161 	caddr_t		addr,
2162 	size_t		len,
2163 	uint_t		flags,
2164 	hat_callback_t	*cb)
2165 {
2166 	uintptr_t	vaddr = (uintptr_t)addr;
2167 	uintptr_t	eaddr = vaddr + len;
2168 	htable_t	*ht = NULL;
2169 	uint_t		entry;
2170 	uintptr_t	last_va = (uintptr_t)-1L;
2171 	range_info_t	r[MAX_UNLOAD_CNT];
2172 	uint_t		r_cnt = 0;
2173 	x86pte_t	old_pte;
2174 
2175 	HATIN(hat_unload_callback, hat, addr, len);
2176 	ASSERT(hat == kas.a_hat || eaddr <= kernelbase);
2177 	ASSERT(IS_PAGEALIGNED(vaddr));
2178 	ASSERT(IS_PAGEALIGNED(eaddr));
2179 
2180 	while (vaddr < eaddr) {
2181 		old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
2182 		if (ht == NULL)
2183 			break;
2184 
2185 		ASSERT(!IN_VA_HOLE(vaddr));
2186 
2187 		if (vaddr < (uintptr_t)addr)
2188 			panic("hat_unload_callback(): unmap inside large page");
2189 
2190 		/*
2191 		 * We'll do the call backs for contiguous ranges
2192 		 */
2193 		if (vaddr != last_va ||
2194 		    (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) {
2195 			if (r_cnt == MAX_UNLOAD_CNT) {
2196 				handle_ranges(cb, r_cnt, r);
2197 				r_cnt = 0;
2198 			}
2199 			r[r_cnt].rng_va = vaddr;
2200 			r[r_cnt].rng_cnt = 0;
2201 			r[r_cnt].rng_level = ht->ht_level;
2202 			++r_cnt;
2203 		}
2204 
2205 		/*
2206 		 * Unload one mapping from the page tables.
2207 		 */
2208 		entry = htable_va2entry(vaddr, ht);
2209 		hat_pte_unmap(ht, entry, flags, old_pte, NULL);
2210 
2211 		ASSERT(ht->ht_level <= mmu.max_page_level);
2212 		last_va = vaddr;
2213 		vaddr += LEVEL_SIZE(ht->ht_level);
2214 		++r[r_cnt - 1].rng_cnt;
2215 	}
2216 	if (ht)
2217 		htable_release(ht);
2218 
2219 	/*
2220 	 * handle last range for callbacks
2221 	 */
2222 	if (r_cnt > 0)
2223 		handle_ranges(cb, r_cnt, r);
2224 
2225 	HATOUT(hat_unload_callback, hat, addr);
2226 }
2227 
2228 /*
2229  * synchronize mapping with software data structures
2230  *
2231  * This interface is currently only used by the working set monitor
2232  * driver.
2233  */
2234 /*ARGSUSED*/
2235 void
2236 hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2237 {
2238 	uintptr_t	vaddr = (uintptr_t)addr;
2239 	uintptr_t	eaddr = vaddr + len;
2240 	htable_t	*ht = NULL;
2241 	uint_t		entry;
2242 	x86pte_t	pte;
2243 	x86pte_t	save_pte;
2244 	x86pte_t	new;
2245 	page_t		*pp;
2246 
2247 	ASSERT(!IN_VA_HOLE(vaddr));
2248 	ASSERT(IS_PAGEALIGNED(vaddr));
2249 	ASSERT(IS_PAGEALIGNED(eaddr));
2250 	ASSERT(hat == kas.a_hat || eaddr <= kernelbase);
2251 
2252 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2253 try_again:
2254 		pte = htable_walk(hat, &ht, &vaddr, eaddr);
2255 		if (ht == NULL)
2256 			break;
2257 		entry = htable_va2entry(vaddr, ht);
2258 
2259 		if (PTE_GET(pte, PT_NOSYNC) ||
2260 		    PTE_GET(pte, PT_REF | PT_MOD) == 0)
2261 			continue;
2262 
2263 		/*
2264 		 * We need to acquire the mapping list lock to protect
2265 		 * against hat_pageunload(), hat_unload(), etc.
2266 		 */
2267 		pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
2268 		if (pp == NULL)
2269 			break;
2270 		x86_hm_enter(pp);
2271 		save_pte = pte;
2272 		pte = x86pte_get(ht, entry);
2273 		if (pte != save_pte) {
2274 			x86_hm_exit(pp);
2275 			goto try_again;
2276 		}
2277 		if (PTE_GET(pte, PT_NOSYNC) ||
2278 		    PTE_GET(pte, PT_REF | PT_MOD) == 0) {
2279 			x86_hm_exit(pp);
2280 			continue;
2281 		}
2282 
2283 		/*
2284 		 * Need to clear ref or mod bits. We may compete with
2285 		 * hardware updating the R/M bits and have to try again.
2286 		 */
2287 		if (flags == HAT_SYNC_ZERORM) {
2288 			new = pte;
2289 			PTE_CLR(new, PT_REF | PT_MOD);
2290 			pte = hati_update_pte(ht, entry, pte, new);
2291 			if (pte != 0) {
2292 				x86_hm_exit(pp);
2293 				goto try_again;
2294 			}
2295 		} else {
2296 			/*
2297 			 * sync the PTE to the page_t
2298 			 */
2299 			hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
2300 		}
2301 		x86_hm_exit(pp);
2302 	}
2303 	if (ht)
2304 		htable_release(ht);
2305 }
2306 
2307 /*
2308  * void	hat_map(hat, addr, len, flags)
2309  */
2310 /*ARGSUSED*/
2311 void
2312 hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2313 {
2314 	/* does nothing */
2315 }
2316 
2317 /*
2318  * uint_t hat_getattr(hat, addr, *attr)
2319  *	returns attr for <hat,addr> in *attr.  returns 0 if there was a
2320  *	mapping and *attr is valid, nonzero if there was no mapping and
2321  *	*attr is not valid.
2322  */
2323 uint_t
2324 hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
2325 {
2326 	uintptr_t	vaddr = ALIGN2PAGE(addr);
2327 	htable_t	*ht = NULL;
2328 	x86pte_t	pte;
2329 
2330 	ASSERT(hat == kas.a_hat || vaddr < kernelbase);
2331 
2332 	if (IN_VA_HOLE(vaddr))
2333 		return ((uint_t)-1);
2334 
2335 	ht = htable_getpte(hat, vaddr, NULL, &pte, MAX_PAGE_LEVEL);
2336 	if (ht == NULL)
2337 		return ((uint_t)-1);
2338 
2339 	if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
2340 		htable_release(ht);
2341 		return ((uint_t)-1);
2342 	}
2343 
2344 	*attr = PROT_READ;
2345 	if (PTE_GET(pte, PT_WRITABLE))
2346 		*attr |= PROT_WRITE;
2347 	if (PTE_GET(pte, PT_USER))
2348 		*attr |= PROT_USER;
2349 	if (!PTE_GET(pte, mmu.pt_nx))
2350 		*attr |= PROT_EXEC;
2351 	if (PTE_GET(pte, PT_NOSYNC))
2352 		*attr |= HAT_NOSYNC;
2353 	htable_release(ht);
2354 	return (0);
2355 }
2356 
2357 /*
2358  * hat_updateattr() applies the given attribute change to an existing mapping
2359  */
2360 #define	HAT_LOAD_ATTR		1
2361 #define	HAT_SET_ATTR		2
2362 #define	HAT_CLR_ATTR		3
2363 
2364 static void
2365 hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
2366 {
2367 	uintptr_t	vaddr = (uintptr_t)addr;
2368 	uintptr_t	eaddr = (uintptr_t)addr + len;
2369 	htable_t	*ht = NULL;
2370 	uint_t		entry;
2371 	x86pte_t	oldpte, newpte;
2372 	page_t		*pp;
2373 
2374 	ASSERT(IS_PAGEALIGNED(vaddr));
2375 	ASSERT(IS_PAGEALIGNED(eaddr));
2376 	ASSERT(hat == kas.a_hat ||
2377 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
2378 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2379 try_again:
2380 		oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
2381 		if (ht == NULL)
2382 			break;
2383 		if (PTE_GET(oldpte, PT_NOCONSIST))
2384 			continue;
2385 
2386 		pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
2387 		if (pp == NULL)
2388 			continue;
2389 		x86_hm_enter(pp);
2390 
2391 		newpte = oldpte;
2392 		/*
2393 		 * We found a page table entry in the desired range,
2394 		 * figure out the new attributes.
2395 		 */
2396 		if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
2397 			if ((attr & PROT_WRITE) &&
2398 			    !PTE_GET(oldpte, PT_WRITABLE))
2399 				newpte |= PT_WRITABLE;
2400 
2401 			if ((attr & HAT_NOSYNC) && !PTE_GET(oldpte, PT_NOSYNC))
2402 				newpte |= PT_NOSYNC;
2403 
2404 			if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
2405 				newpte &= ~mmu.pt_nx;
2406 		}
2407 
2408 		if (what == HAT_LOAD_ATTR) {
2409 			if (!(attr & PROT_WRITE) &&
2410 			    PTE_GET(oldpte, PT_WRITABLE))
2411 				newpte &= ~PT_WRITABLE;
2412 
2413 			if (!(attr & HAT_NOSYNC) && PTE_GET(oldpte, PT_NOSYNC))
2414 				newpte &= ~PT_NOSYNC;
2415 
2416 			if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2417 				newpte |= mmu.pt_nx;
2418 		}
2419 
2420 		if (what == HAT_CLR_ATTR) {
2421 			if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
2422 				newpte &= ~PT_WRITABLE;
2423 
2424 			if ((attr & HAT_NOSYNC) && PTE_GET(oldpte, PT_NOSYNC))
2425 				newpte &= ~PT_NOSYNC;
2426 
2427 			if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2428 				newpte |= mmu.pt_nx;
2429 		}
2430 
2431 		/*
2432 		 * what about PROT_READ or others? this code only handles:
2433 		 * EXEC, WRITE, NOSYNC
2434 		 */
2435 
2436 		/*
2437 		 * If new PTE really changed, update the table.
2438 		 */
2439 		if (newpte != oldpte) {
2440 			entry = htable_va2entry(vaddr, ht);
2441 			oldpte = hati_update_pte(ht, entry, oldpte, newpte);
2442 			if (oldpte != 0) {
2443 				x86_hm_exit(pp);
2444 				goto try_again;
2445 			}
2446 		}
2447 		x86_hm_exit(pp);
2448 	}
2449 	if (ht)
2450 		htable_release(ht);
2451 }
2452 
2453 /*
2454  * Various wrappers for hat_updateattr()
2455  */
2456 void
2457 hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2458 {
2459 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= kernelbase);
2460 	hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
2461 }
2462 
2463 void
2464 hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2465 {
2466 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= kernelbase);
2467 	hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
2468 }
2469 
2470 void
2471 hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2472 {
2473 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= kernelbase);
2474 	hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
2475 }
2476 
2477 void
2478 hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
2479 {
2480 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= kernelbase);
2481 	hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
2482 }
2483 
2484 /*ARGSUSED*/
2485 void
2486 hat_chgattr_pagedir(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2487 {
2488 	panic("hat_chgattr_pgdir() not supported - used by 80387 emulation");
2489 }
2490 
2491 /*
2492  * size_t hat_getpagesize(hat, addr)
2493  *	returns pagesize in bytes for <hat, addr>. returns -1 of there is
2494  *	no mapping. This is an advisory call.
2495  */
2496 ssize_t
2497 hat_getpagesize(hat_t *hat, caddr_t addr)
2498 {
2499 	uintptr_t	vaddr = ALIGN2PAGE(addr);
2500 	htable_t	*ht;
2501 	size_t		pagesize;
2502 
2503 	ASSERT(hat == kas.a_hat || vaddr < kernelbase);
2504 	if (IN_VA_HOLE(vaddr))
2505 		return (-1);
2506 	ht = htable_getpage(hat, vaddr, NULL);
2507 	if (ht == NULL)
2508 		return (-1);
2509 	pagesize = LEVEL_SIZE(ht->ht_level);
2510 	htable_release(ht);
2511 	return (pagesize);
2512 }
2513 
2514 
2515 
2516 /*
2517  * pfn_t hat_getpfnum(hat, addr)
2518  *	returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
2519  */
2520 pfn_t
2521 hat_getpfnum(hat_t *hat, caddr_t addr)
2522 {
2523 	uintptr_t	vaddr = ALIGN2PAGE(addr);
2524 	htable_t	*ht;
2525 	uint_t		entry;
2526 	pfn_t		pfn = PFN_INVALID;
2527 
2528 	ASSERT(hat == kas.a_hat || vaddr < kernelbase);
2529 	if (khat_running == 0)
2530 		panic("hat_getpfnum(): called too early\n");
2531 
2532 	if (IN_VA_HOLE(vaddr))
2533 		return (PFN_INVALID);
2534 
2535 	/*
2536 	 * A very common use of hat_getpfnum() is from the DDI for kernel pages.
2537 	 * Use the kmap_ptes (which also covers the 32 bit heap) to speed
2538 	 * this up.
2539 	 */
2540 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2541 		x86pte_t pte;
2542 		pgcnt_t pg_off;
2543 
2544 		pg_off = mmu_btop(vaddr - mmu.kmap_addr);
2545 		if (mmu.pae_hat) {
2546 #ifdef __i386
2547 			volatile x86pte_t *p = mmu.kmap_ptes + pg_off;
2548 
2549 			do {
2550 				pte = *p;
2551 			} while (pte != *p);
2552 #else
2553 			pte = mmu.kmap_ptes[pg_off];
2554 #endif
2555 		} else {
2556 			pte = ((x86pte32_t *)mmu.kmap_ptes)[pg_off];
2557 		}
2558 		if (!PTE_ISVALID(pte))
2559 			return (PFN_INVALID);
2560 		/*LINTED [use of constant 0 causes a silly lint warning] */
2561 		return (PTE2PFN(pte, 0));
2562 	}
2563 
2564 	ht = htable_getpage(hat, vaddr, &entry);
2565 	if (ht == NULL)
2566 		return (PFN_INVALID);
2567 	ASSERT(vaddr >= ht->ht_vaddr);
2568 	ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
2569 	pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
2570 	if (ht->ht_level > 0)
2571 		pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
2572 	htable_release(ht);
2573 	return (pfn);
2574 }
2575 
2576 /*
2577  * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged.
2578  * Use hat_getpfnum(kas.a_hat, ...) instead.
2579  *
2580  * We'd like to return PFN_INVALID if the mappings have underlying page_t's
2581  * but can't right now due to the fact that some software has grown to use
2582  * this interface incorrectly. So for now when the interface is misused,
2583  * return a warning to the user that in the future it won't work in the
2584  * way they're abusing it, and carry on.
2585  *
2586  * Note that hat_getkpfnum() is never supported on amd64.
2587  */
2588 #if !defined(__amd64)
2589 pfn_t
2590 hat_getkpfnum(caddr_t addr)
2591 {
2592 	pfn_t	pfn;
2593 	int badcaller = 0;
2594 
2595 
2596 	if (khat_running == 0)
2597 		panic("hat_getkpfnum(): called too early\n");
2598 	if ((uintptr_t)addr < kernelbase)
2599 		return (PFN_INVALID);
2600 
2601 
2602 	if (segkpm && IS_KPM_ADDR(addr)) {
2603 		badcaller = 1;
2604 		pfn = hat_kpm_va2pfn(addr);
2605 	} else {
2606 		pfn = hat_getpfnum(kas.a_hat, addr);
2607 		badcaller = pf_is_memory(pfn);
2608 	}
2609 
2610 	if (badcaller)
2611 		hat_getkpfnum_badcall(caller());
2612 	return (pfn);
2613 }
2614 #endif /* __amd64 */
2615 
2616 /*
2617  * int hat_probe(hat, addr)
2618  *	return 0 if no valid mapping is present.  Faster version
2619  *	of hat_getattr in certain architectures.
2620  */
2621 int
2622 hat_probe(hat_t *hat, caddr_t addr)
2623 {
2624 	uintptr_t	vaddr = ALIGN2PAGE(addr);
2625 	uint_t		entry;
2626 	htable_t	*ht;
2627 	pgcnt_t		pg_off;
2628 
2629 	ASSERT(hat == kas.a_hat || vaddr < kernelbase);
2630 	ASSERT(hat == kas.a_hat ||
2631 	    AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
2632 	if (IN_VA_HOLE(vaddr))
2633 		return (0);
2634 
2635 	/*
2636 	 * Most common use of hat_probe is from segmap. We special case it
2637 	 * for performance.
2638 	 */
2639 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2640 		pg_off = mmu_btop(vaddr - mmu.kmap_addr);
2641 		if (mmu.pae_hat)
2642 			return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
2643 		else
2644 			return (PTE_ISVALID(
2645 			    ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
2646 	}
2647 
2648 	ht = htable_getpage(hat, vaddr, &entry);
2649 	if (ht == NULL)
2650 		return (0);
2651 	htable_release(ht);
2652 	return (1);
2653 }
2654 
2655 /*
2656  * Simple implementation of ISM. hat_share() is just like hat_memload_array(),
2657  * except that we use the ism_hat's existing mappings to determine the pages
2658  * and protections to use for this hat. In case we find a properly aligned
2659  * and sized pagetable of 4K mappings, we will attempt to share the pagetable
2660  * itself.
2661  */
2662 /*ARGSUSED*/
2663 int
2664 hat_share(
2665 	hat_t		*hat,
2666 	caddr_t		addr,
2667 	hat_t		*ism_hat,
2668 	caddr_t		src_addr,
2669 	size_t		len,	/* almost useless value, see below.. */
2670 	uint_t		ismszc)
2671 {
2672 	uintptr_t	vaddr_start = (uintptr_t)addr;
2673 	uintptr_t	vaddr;
2674 	uintptr_t	pt_vaddr;
2675 	uintptr_t	eaddr = vaddr_start + len;
2676 	uintptr_t	ism_addr_start = (uintptr_t)src_addr;
2677 	uintptr_t	ism_addr = ism_addr_start;
2678 	uintptr_t	e_ism_addr = ism_addr + len;
2679 	htable_t	*ism_ht = NULL;
2680 	htable_t	*ht;
2681 	x86pte_t	pte;
2682 	page_t		*pp;
2683 	pfn_t		pfn;
2684 	level_t		l;
2685 	pgcnt_t		pgcnt;
2686 	uint_t		prot;
2687 	uint_t		valid_cnt;
2688 
2689 	/*
2690 	 * We might be asked to share an empty DISM hat by as_dup()
2691 	 */
2692 	ASSERT(hat != kas.a_hat);
2693 	ASSERT(eaddr <= kernelbase);
2694 	if (!(ism_hat->hat_flags & HAT_SHARED)) {
2695 		ASSERT(hat_get_mapped_size(ism_hat) == 0);
2696 		return (0);
2697 	}
2698 
2699 	/*
2700 	 * The SPT segment driver often passes us a size larger than there are
2701 	 * valid mappings. That's because it rounds the segment size up to a
2702 	 * large pagesize, even if the actual memory mapped by ism_hat is less.
2703 	 */
2704 	HATIN(hat_share, hat, addr, len);
2705 	ASSERT(IS_PAGEALIGNED(vaddr_start));
2706 	ASSERT(IS_PAGEALIGNED(ism_addr_start));
2707 	ASSERT(ism_hat->hat_flags & HAT_SHARED);
2708 	while (ism_addr < e_ism_addr) {
2709 		/*
2710 		 * use htable_walk to get the next valid ISM mapping
2711 		 */
2712 		pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
2713 		if (ism_ht == NULL)
2714 			break;
2715 
2716 		/*
2717 		 * Find the largest page size we can use, based on the
2718 		 * ISM mapping size, our address alignment and the remaining
2719 		 * map length.
2720 		 */
2721 		vaddr = vaddr_start + (ism_addr - ism_addr_start);
2722 		for (l = ism_ht->ht_level; l > 0; --l) {
2723 			if (LEVEL_SIZE(l) <= eaddr - vaddr &&
2724 			    (vaddr & LEVEL_OFFSET(l)) == 0)
2725 				break;
2726 		}
2727 
2728 		/*
2729 		 * attempt to share the pagetable
2730 		 *
2731 		 * - only 4K pagetables are shared (ie. level == 0)
2732 		 * - the hat_share() length must cover the whole pagetable
2733 		 * - the shared address must align at level 1
2734 		 * - a shared PTE for this address already exists OR
2735 		 * - no page table for this address exists yet
2736 		 */
2737 		pt_vaddr =
2738 		    vaddr_start + (ism_ht->ht_vaddr - ism_addr_start);
2739 		if (ism_ht->ht_level == 0 &&
2740 		    ism_ht->ht_vaddr + LEVEL_SIZE(1) <= e_ism_addr &&
2741 		    (pt_vaddr & LEVEL_OFFSET(1)) == 0) {
2742 
2743 			ht = htable_lookup(hat, pt_vaddr, 0);
2744 			if (ht == NULL)
2745 				ht = htable_create(hat, pt_vaddr, 0, ism_ht);
2746 
2747 			if (ht->ht_level > 0 ||
2748 			    !(ht->ht_flags & HTABLE_SHARED_PFN)) {
2749 
2750 				htable_release(ht);
2751 
2752 			} else {
2753 
2754 				/*
2755 				 * share the page table
2756 				 */
2757 				ASSERT(ht->ht_level == 0);
2758 				ASSERT(ht->ht_shares == ism_ht);
2759 				valid_cnt = ism_ht->ht_valid_cnt;
2760 				atomic_add_long(&hat->hat_pages_mapped[0],
2761 				    valid_cnt - ht->ht_valid_cnt);
2762 				ht->ht_valid_cnt = valid_cnt;
2763 				htable_release(ht);
2764 				ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(1);
2765 				htable_release(ism_ht);
2766 				ism_ht = NULL;
2767 				continue;
2768 			}
2769 		}
2770 
2771 		/*
2772 		 * Unable to share the page table. Instead we will
2773 		 * create new mappings from the values in the ISM mappings.
2774 		 *
2775 		 * The ISM mapping might be larger than the share area,
2776 		 * be careful to trunctate it if needed.
2777 		 */
2778 		if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
2779 			pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
2780 		} else {
2781 			pgcnt = mmu_btop(eaddr - vaddr);
2782 			l = 0;
2783 		}
2784 
2785 		pfn = PTE2PFN(pte, ism_ht->ht_level);
2786 		ASSERT(pfn != PFN_INVALID);
2787 		while (pgcnt > 0) {
2788 			/*
2789 			 * Make a new pte for the PFN for this level.
2790 			 * Copy protections for the pte from the ISM pte.
2791 			 */
2792 			pp = page_numtopp_nolock(pfn);
2793 			ASSERT(pp != NULL);
2794 
2795 			prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
2796 			if (PTE_GET(pte, PT_WRITABLE))
2797 				prot |= PROT_WRITE;
2798 			if (!PTE_GET(pte, PT_NX))
2799 				prot |= PROT_EXEC;
2800 
2801 			/*
2802 			 * XX64 -- can shm ever be written to swap?
2803 			 * if not we could use HAT_NOSYNC here.
2804 			 */
2805 			hati_load_common(hat, vaddr, pp, prot,
2806 			    HAT_LOAD, l, pfn);
2807 
2808 			vaddr += LEVEL_SIZE(l);
2809 			ism_addr += LEVEL_SIZE(l);
2810 			pfn += mmu_btop(LEVEL_SIZE(l));
2811 			pgcnt -= mmu_btop(LEVEL_SIZE(l));
2812 		}
2813 	}
2814 	if (ism_ht != NULL)
2815 		htable_release(ism_ht);
2816 
2817 	HATOUT(hat_share, hat, addr);
2818 	return (0);
2819 }
2820 
2821 
2822 /*
2823  * hat_unshare() is similar to hat_unload_callback(), but
2824  * we have to look for empty shared pagetables. Note that
2825  * hat_unshare() is always invoked against an entire segment.
2826  */
2827 /*ARGSUSED*/
2828 void
2829 hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
2830 {
2831 	uintptr_t	vaddr = (uintptr_t)addr;
2832 	uintptr_t	eaddr = vaddr + len;
2833 	htable_t	*ht = NULL;
2834 	uint_t		need_demaps = 0;
2835 
2836 	ASSERT(hat != kas.a_hat);
2837 	ASSERT(eaddr <= kernelbase);
2838 	HATIN(hat_unshare, hat, addr, len);
2839 	ASSERT(IS_PAGEALIGNED(vaddr));
2840 	ASSERT(IS_PAGEALIGNED(eaddr));
2841 
2842 	/*
2843 	 * First go through and remove any shared pagetables.
2844 	 *
2845 	 * Note that it's ok to delay the demap until the entire range is
2846 	 * finished, because if hat_pageunload() were to unload a shared
2847 	 * pagetable page, its hat_demap() will do a global user TLB invalidate.
2848 	 */
2849 	while (vaddr < eaddr) {
2850 		ASSERT(!IN_VA_HOLE(vaddr));
2851 		/*
2852 		 * find the pagetable that would map the current address
2853 		 */
2854 		ht = htable_lookup(hat, vaddr, 0);
2855 		if (ht != NULL) {
2856 			if (ht->ht_flags & HTABLE_SHARED_PFN) {
2857 				/*
2858 				 * clear mapped pages count, set valid_cnt to 0
2859 				 * and let htable_release() finish the job
2860 				 */
2861 				atomic_add_long(&hat->hat_pages_mapped[0],
2862 				    -ht->ht_valid_cnt);
2863 				ht->ht_valid_cnt = 0;
2864 				need_demaps = 1;
2865 			}
2866 			htable_release(ht);
2867 		}
2868 		vaddr = (vaddr & LEVEL_MASK(1)) + LEVEL_SIZE(1);
2869 	}
2870 
2871 	/*
2872 	 * flush the TLBs - since we're probably dealing with MANY mappings
2873 	 * we do just one CR3 reload.
2874 	 */
2875 	if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
2876 		hat_demap(hat, DEMAP_ALL_ADDR);
2877 
2878 	/*
2879 	 * Now go back and clean up any unaligned mappings that
2880 	 * couldn't share pagetables.
2881 	 */
2882 	hat_unload(hat, addr, len, HAT_UNLOAD_UNMAP);
2883 
2884 	HATOUT(hat_unshare, hat, addr);
2885 }
2886 
2887 
2888 /*
2889  * hat_reserve() does nothing
2890  */
2891 /*ARGSUSED*/
2892 void
2893 hat_reserve(struct as *as, caddr_t addr, size_t len)
2894 {
2895 }
2896 
2897 
2898 /*
2899  * Called when all mappings to a page should have write permission removed.
2900  * Mostly stolem from hat_pagesync()
2901  */
2902 static void
2903 hati_page_clrwrt(struct page *pp)
2904 {
2905 	hment_t		*hm = NULL;
2906 	htable_t	*ht;
2907 	uint_t		entry;
2908 	x86pte_t	old;
2909 	x86pte_t	new;
2910 	uint_t		pszc = 0;
2911 
2912 next_size:
2913 	/*
2914 	 * walk thru the mapping list clearing write permission
2915 	 */
2916 	x86_hm_enter(pp);
2917 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
2918 		if (ht->ht_level < pszc)
2919 			continue;
2920 		old = x86pte_get(ht, entry);
2921 
2922 		for (;;) {
2923 			/*
2924 			 * Is this mapping of interest?
2925 			 */
2926 			if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
2927 			    PTE_GET(old, PT_WRITABLE) == 0)
2928 				break;
2929 
2930 			/*
2931 			 * Clear ref/mod writable bits. This requires cross
2932 			 * calls to ensure any executing TLBs see cleared bits.
2933 			 */
2934 			new = old;
2935 			PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
2936 			old = hati_update_pte(ht, entry, old, new);
2937 			if (old != 0)
2938 				continue;
2939 
2940 			break;
2941 		}
2942 	}
2943 	x86_hm_exit(pp);
2944 	while (pszc < pp->p_szc) {
2945 		page_t *tpp;
2946 		pszc++;
2947 		tpp = PP_GROUPLEADER(pp, pszc);
2948 		if (pp != tpp) {
2949 			pp = tpp;
2950 			goto next_size;
2951 		}
2952 	}
2953 }
2954 
2955 /*
2956  * void hat_page_setattr(pp, flag)
2957  * void hat_page_clrattr(pp, flag)
2958  *	used to set/clr ref/mod bits.
2959  */
2960 void
2961 hat_page_setattr(struct page *pp, uint_t flag)
2962 {
2963 	vnode_t		*vp = pp->p_vnode;
2964 	kmutex_t	*vphm = NULL;
2965 	page_t		**listp;
2966 
2967 	if (PP_GETRM(pp, flag) == flag)
2968 		return;
2969 
2970 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
2971 		vphm = page_vnode_mutex(vp);
2972 		mutex_enter(vphm);
2973 	}
2974 
2975 	PP_SETRM(pp, flag);
2976 
2977 	if (vphm != NULL) {
2978 
2979 		/*
2980 		 * Some File Systems examine v_pages for NULL w/o
2981 		 * grabbing the vphm mutex. Must not let it become NULL when
2982 		 * pp is the only page on the list.
2983 		 */
2984 		if (pp->p_vpnext != pp) {
2985 			page_vpsub(&vp->v_pages, pp);
2986 			if (vp->v_pages != NULL)
2987 				listp = &vp->v_pages->p_vpprev->p_vpnext;
2988 			else
2989 				listp = &vp->v_pages;
2990 			page_vpadd(listp, pp);
2991 		}
2992 		mutex_exit(vphm);
2993 	}
2994 }
2995 
2996 void
2997 hat_page_clrattr(struct page *pp, uint_t flag)
2998 {
2999 	vnode_t		*vp = pp->p_vnode;
3000 	kmutex_t	*vphm = NULL;
3001 	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
3002 
3003 	/*
3004 	 * for vnode with a sorted v_pages list, we need to change
3005 	 * the attributes and the v_pages list together under page_vnode_mutex.
3006 	 */
3007 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
3008 		vphm = page_vnode_mutex(vp);
3009 		mutex_enter(vphm);
3010 	}
3011 
3012 	PP_CLRRM(pp, flag);
3013 
3014 	if (vphm != NULL) {
3015 
3016 		/*
3017 		 * Some File Systems examine v_pages for NULL w/o
3018 		 * grabbing the vphm mutex. Must not let it become NULL when
3019 		 * pp is the only page on the list.
3020 		 */
3021 		if (pp->p_vpnext != pp) {
3022 			page_vpsub(&vp->v_pages, pp);
3023 			page_vpadd(&vp->v_pages, pp);
3024 		}
3025 		mutex_exit(vphm);
3026 
3027 		/*
3028 		 * VMODSORT works by removing write permissions and getting
3029 		 * a fault when a page is made dirty. At this point
3030 		 * we need to remove write permission from all mappings
3031 		 * to this page.
3032 		 */
3033 		hati_page_clrwrt(pp);
3034 	}
3035 }
3036 
3037 /*
3038  *	If flag is specified, returns 0 if attribute is disabled
3039  *	and non zero if enabled.  If flag specifes multiple attributs
3040  *	then returns 0 if ALL atriibutes are disabled.  This is an advisory
3041  *	call.
3042  */
3043 uint_t
3044 hat_page_getattr(struct page *pp, uint_t flag)
3045 {
3046 	return (PP_GETRM(pp, flag));
3047 }
3048 
3049 
3050 /*
3051  * common code used by hat_pageunload() and hment_steal()
3052  */
3053 hment_t *
3054 hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
3055 {
3056 	x86pte_t old_pte;
3057 	pfn_t pfn = pp->p_pagenum;
3058 	hment_t *hm;
3059 
3060 	/*
3061 	 * We need to acquire a hold on the htable in order to
3062 	 * do the invalidate. We know the htable must exist, since
3063 	 * unmap's don't release the htable until after removing any
3064 	 * hment. Having x86_hm_enter() keeps that from proceeding.
3065 	 */
3066 	htable_acquire(ht);
3067 
3068 	/*
3069 	 * Invalidate the PTE and remove the hment.
3070 	 */
3071 	old_pte = x86pte_invalidate_pfn(ht, entry, pfn, NULL);
3072 	ASSERT(PTE2PFN(old_pte, ht->ht_level) == pfn);
3073 
3074 	/*
3075 	 * Clean up all the htable information for this mapping
3076 	 */
3077 	ASSERT(ht->ht_valid_cnt > 0);
3078 	HTABLE_DEC(ht->ht_valid_cnt);
3079 	PGCNT_DEC(ht->ht_hat, ht->ht_level);
3080 
3081 	/*
3082 	 * sync ref/mod bits to the page_t
3083 	 */
3084 	if (PTE_GET(old_pte, PT_NOSYNC) == 0)
3085 		hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
3086 
3087 	/*
3088 	 * Remove the mapping list entry for this page.
3089 	 */
3090 	hm = hment_remove(pp, ht, entry);
3091 
3092 	/*
3093 	 * drop the mapping list lock so that we might free the
3094 	 * hment and htable.
3095 	 */
3096 	x86_hm_exit(pp);
3097 	htable_release(ht);
3098 	return (hm);
3099 }
3100 
3101 /*
3102  * Unload all translations to a page. If the page is a subpage of a large
3103  * page, the large page mappings are also removed.
3104  *
3105  * The forceflags are unused.
3106  */
3107 
3108 /*ARGSUSED*/
3109 static int
3110 hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
3111 {
3112 	page_t		*cur_pp = pp;
3113 	hment_t		*hm;
3114 	hment_t		*prev;
3115 	htable_t	*ht;
3116 	uint_t		entry;
3117 	level_t		level;
3118 
3119 	/*
3120 	 * The loop with next_size handles pages with multiple pagesize mappings
3121 	 */
3122 next_size:
3123 	for (;;) {
3124 
3125 		/*
3126 		 * Get a mapping list entry
3127 		 */
3128 		x86_hm_enter(cur_pp);
3129 		for (prev = NULL; ; prev = hm) {
3130 			hm = hment_walk(cur_pp, &ht, &entry, prev);
3131 			if (hm == NULL) {
3132 				x86_hm_exit(cur_pp);
3133 
3134 				/*
3135 				 * If not part of a larger page, we're done.
3136 				 */
3137 				if (cur_pp->p_szc <= pg_szcd)
3138 					return (0);
3139 
3140 				/*
3141 				 * Else check the next larger page size.
3142 				 * hat_page_demote() may decrease p_szc
3143 				 * but that's ok we'll just take an extra
3144 				 * trip discover there're no larger mappings
3145 				 * and return.
3146 				 */
3147 				++pg_szcd;
3148 				cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
3149 				goto next_size;
3150 			}
3151 
3152 			/*
3153 			 * If this mapping size matches, remove it.
3154 			 */
3155 			level = ht->ht_level;
3156 			if (level == pg_szcd)
3157 				break;
3158 		}
3159 
3160 		/*
3161 		 * Remove the mapping list entry for this page.
3162 		 * Note this does the x86_hm_exit() for us.
3163 		 */
3164 		hm = hati_page_unmap(cur_pp, ht, entry);
3165 		if (hm != NULL)
3166 			hment_free(hm);
3167 	}
3168 }
3169 
3170 int
3171 hat_pageunload(struct page *pp, uint_t forceflag)
3172 {
3173 	ASSERT(PAGE_EXCL(pp));
3174 	return (hati_pageunload(pp, 0, forceflag));
3175 }
3176 
3177 /*
3178  * Unload all large mappings to pp and reduce by 1 p_szc field of every large
3179  * page level that included pp.
3180  *
3181  * pp must be locked EXCL. Even though no other constituent pages are locked
3182  * it's legal to unload large mappings to pp because all constituent pages of
3183  * large locked mappings have to be locked SHARED.  therefore if we have EXCL
3184  * lock on one of constituent pages none of the large mappings to pp are
3185  * locked.
3186  *
3187  * Change (always decrease) p_szc field starting from the last constituent
3188  * page and ending with root constituent page so that root's pszc always shows
3189  * the area where hat_page_demote() may be active.
3190  *
3191  * This mechanism is only used for file system pages where it's not always
3192  * possible to get EXCL locks on all constituent pages to demote the size code
3193  * (as is done for anonymous or kernel large pages).
3194  */
3195 void
3196 hat_page_demote(page_t *pp)
3197 {
3198 	uint_t		pszc;
3199 	uint_t		rszc;
3200 	uint_t		szc;
3201 	page_t		*rootpp;
3202 	page_t		*firstpp;
3203 	page_t		*lastpp;
3204 	pgcnt_t		pgcnt;
3205 
3206 	ASSERT(PAGE_EXCL(pp));
3207 	ASSERT(!PP_ISFREE(pp));
3208 	ASSERT(page_szc_lock_assert(pp));
3209 
3210 	if (pp->p_szc == 0)
3211 		return;
3212 
3213 	rootpp = PP_GROUPLEADER(pp, 1);
3214 	(void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
3215 
3216 	/*
3217 	 * all large mappings to pp are gone
3218 	 * and no new can be setup since pp is locked exclusively.
3219 	 *
3220 	 * Lock the root to make sure there's only one hat_page_demote()
3221 	 * outstanding within the area of this root's pszc.
3222 	 *
3223 	 * Second potential hat_page_demote() is already eliminated by upper
3224 	 * VM layer via page_szc_lock() but we don't rely on it and use our
3225 	 * own locking (so that upper layer locking can be changed without
3226 	 * assumptions that hat depends on upper layer VM to prevent multiple
3227 	 * hat_page_demote() to be issued simultaneously to the same large
3228 	 * page).
3229 	 */
3230 again:
3231 	pszc = pp->p_szc;
3232 	if (pszc == 0)
3233 		return;
3234 	rootpp = PP_GROUPLEADER(pp, pszc);
3235 	x86_hm_enter(rootpp);
3236 	/*
3237 	 * If root's p_szc is different from pszc we raced with another
3238 	 * hat_page_demote().  Drop the lock and try to find the root again.
3239 	 * If root's p_szc is greater than pszc previous hat_page_demote() is
3240 	 * not done yet.  Take and release mlist lock of root's root to wait
3241 	 * for previous hat_page_demote() to complete.
3242 	 */
3243 	if ((rszc = rootpp->p_szc) != pszc) {
3244 		x86_hm_exit(rootpp);
3245 		if (rszc > pszc) {
3246 			/* p_szc of a locked non free page can't increase */
3247 			ASSERT(pp != rootpp);
3248 
3249 			rootpp = PP_GROUPLEADER(rootpp, rszc);
3250 			x86_hm_enter(rootpp);
3251 			x86_hm_exit(rootpp);
3252 		}
3253 		goto again;
3254 	}
3255 	ASSERT(pp->p_szc == pszc);
3256 
3257 	/*
3258 	 * Decrement by 1 p_szc of every constituent page of a region that
3259 	 * covered pp. For example if original szc is 3 it gets changed to 2
3260 	 * everywhere except in region 2 that covered pp. Region 2 that
3261 	 * covered pp gets demoted to 1 everywhere except in region 1 that
3262 	 * covered pp. The region 1 that covered pp is demoted to region
3263 	 * 0. It's done this way because from region 3 we removed level 3
3264 	 * mappings, from region 2 that covered pp we removed level 2 mappings
3265 	 * and from region 1 that covered pp we removed level 1 mappings.  All
3266 	 * changes are done from from high pfn's to low pfn's so that roots
3267 	 * are changed last allowing one to know the largest region where
3268 	 * hat_page_demote() is stil active by only looking at the root page.
3269 	 *
3270 	 * This algorithm is implemented in 2 while loops. First loop changes
3271 	 * p_szc of pages to the right of pp's level 1 region and second
3272 	 * loop changes p_szc of pages of level 1 region that covers pp
3273 	 * and all pages to the left of level 1 region that covers pp.
3274 	 * In the first loop p_szc keeps dropping with every iteration
3275 	 * and in the second loop it keeps increasing with every iteration.
3276 	 *
3277 	 * First loop description: Demote pages to the right of pp outside of
3278 	 * level 1 region that covers pp.  In every iteration of the while
3279 	 * loop below find the last page of szc region and the first page of
3280 	 * (szc - 1) region that is immediately to the right of (szc - 1)
3281 	 * region that covers pp.  From last such page to first such page
3282 	 * change every page's szc to szc - 1. Decrement szc and continue
3283 	 * looping until szc is 1. If pp belongs to the last (szc - 1) region
3284 	 * of szc region skip to the next iteration.
3285 	 */
3286 	szc = pszc;
3287 	while (szc > 1) {
3288 		lastpp = PP_GROUPLEADER(pp, szc);
3289 		pgcnt = page_get_pagecnt(szc);
3290 		lastpp += pgcnt - 1;
3291 		firstpp = PP_GROUPLEADER(pp, (szc - 1));
3292 		pgcnt = page_get_pagecnt(szc - 1);
3293 		if (lastpp - firstpp < pgcnt) {
3294 			szc--;
3295 			continue;
3296 		}
3297 		firstpp += pgcnt;
3298 		while (lastpp != firstpp) {
3299 			ASSERT(lastpp->p_szc == pszc);
3300 			lastpp->p_szc = szc - 1;
3301 			lastpp--;
3302 		}
3303 		firstpp->p_szc = szc - 1;
3304 		szc--;
3305 	}
3306 
3307 	/*
3308 	 * Second loop description:
3309 	 * First iteration changes p_szc to 0 of every
3310 	 * page of level 1 region that covers pp.
3311 	 * Subsequent iterations find last page of szc region
3312 	 * immediately to the left of szc region that covered pp
3313 	 * and first page of (szc + 1) region that covers pp.
3314 	 * From last to first page change p_szc of every page to szc.
3315 	 * Increment szc and continue looping until szc is pszc.
3316 	 * If pp belongs to the fist szc region of (szc + 1) region
3317 	 * skip to the next iteration.
3318 	 *
3319 	 */
3320 	szc = 0;
3321 	while (szc < pszc) {
3322 		firstpp = PP_GROUPLEADER(pp, (szc + 1));
3323 		if (szc == 0) {
3324 			pgcnt = page_get_pagecnt(1);
3325 			lastpp = firstpp + (pgcnt - 1);
3326 		} else {
3327 			lastpp = PP_GROUPLEADER(pp, szc);
3328 			if (firstpp == lastpp) {
3329 				szc++;
3330 				continue;
3331 			}
3332 			lastpp--;
3333 			pgcnt = page_get_pagecnt(szc);
3334 		}
3335 		while (lastpp != firstpp) {
3336 			ASSERT(lastpp->p_szc == pszc);
3337 			lastpp->p_szc = szc;
3338 			lastpp--;
3339 		}
3340 		firstpp->p_szc = szc;
3341 		if (firstpp == rootpp)
3342 			break;
3343 		szc++;
3344 	}
3345 	x86_hm_exit(rootpp);
3346 }
3347 
3348 /*
3349  * get hw stats from hardware into page struct and reset hw stats
3350  * returns attributes of page
3351  * Flags for hat_pagesync, hat_getstat, hat_sync
3352  *
3353  * define	HAT_SYNC_ZERORM		0x01
3354  *
3355  * Additional flags for hat_pagesync
3356  *
3357  * define	HAT_SYNC_STOPON_REF	0x02
3358  * define	HAT_SYNC_STOPON_MOD	0x04
3359  * define	HAT_SYNC_STOPON_RM	0x06
3360  * define	HAT_SYNC_STOPON_SHARED	0x08
3361  */
3362 uint_t
3363 hat_pagesync(struct page *pp, uint_t flags)
3364 {
3365 	hment_t		*hm = NULL;
3366 	htable_t	*ht;
3367 	uint_t		entry;
3368 	x86pte_t	old, save_old;
3369 	x86pte_t	new;
3370 	uchar_t		nrmbits = P_REF|P_MOD|P_RO;
3371 	extern ulong_t	po_share;
3372 	page_t		*save_pp = pp;
3373 	uint_t		pszc = 0;
3374 
3375 	ASSERT(PAGE_LOCKED(pp) || panicstr);
3376 
3377 	if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
3378 		return (pp->p_nrm & nrmbits);
3379 
3380 	if ((flags & HAT_SYNC_ZERORM) == 0) {
3381 
3382 		if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
3383 			return (pp->p_nrm & nrmbits);
3384 
3385 		if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
3386 			return (pp->p_nrm & nrmbits);
3387 
3388 		if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
3389 		    hat_page_getshare(pp) > po_share) {
3390 			if (PP_ISRO(pp))
3391 				PP_SETREF(pp);
3392 			return (pp->p_nrm & nrmbits);
3393 		}
3394 	}
3395 
3396 next_size:
3397 	/*
3398 	 * walk thru the mapping list syncing (and clearing) ref/mod bits.
3399 	 */
3400 	x86_hm_enter(pp);
3401 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3402 		if (ht->ht_level < pszc)
3403 			continue;
3404 		old = x86pte_get(ht, entry);
3405 try_again:
3406 
3407 		ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
3408 
3409 		if (PTE_GET(old, PT_REF | PT_MOD) == 0)
3410 			continue;
3411 
3412 		save_old = old;
3413 		if ((flags & HAT_SYNC_ZERORM) != 0) {
3414 
3415 			/*
3416 			 * Need to clear ref or mod bits. Need to demap
3417 			 * to make sure any executing TLBs see cleared bits.
3418 			 */
3419 			new = old;
3420 			PTE_CLR(new, PT_REF | PT_MOD);
3421 			old = hati_update_pte(ht, entry, old, new);
3422 			if (old != 0)
3423 				goto try_again;
3424 
3425 			old = save_old;
3426 		}
3427 
3428 		/*
3429 		 * Sync the PTE
3430 		 */
3431 		if (!(flags & HAT_SYNC_ZERORM) && PTE_GET(old, PT_NOSYNC) == 0)
3432 			hati_sync_pte_to_page(pp, old, ht->ht_level);
3433 
3434 		/*
3435 		 * can stop short if we found a ref'd or mod'd page
3436 		 */
3437 		if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) ||
3438 		    (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) {
3439 			x86_hm_exit(pp);
3440 			return (save_pp->p_nrm & nrmbits);
3441 		}
3442 	}
3443 	x86_hm_exit(pp);
3444 	while (pszc < pp->p_szc) {
3445 		page_t *tpp;
3446 		pszc++;
3447 		tpp = PP_GROUPLEADER(pp, pszc);
3448 		if (pp != tpp) {
3449 			pp = tpp;
3450 			goto next_size;
3451 		}
3452 	}
3453 	return (save_pp->p_nrm & nrmbits);
3454 }
3455 
3456 /*
3457  * returns approx number of mappings to this pp.  A return of 0 implies
3458  * there are no mappings to the page.
3459  */
3460 ulong_t
3461 hat_page_getshare(page_t *pp)
3462 {
3463 	uint_t cnt;
3464 	cnt = hment_mapcnt(pp);
3465 	return (cnt);
3466 }
3467 
3468 /*
3469  * hat_softlock isn't supported anymore
3470  */
3471 /*ARGSUSED*/
3472 faultcode_t
3473 hat_softlock(
3474 	hat_t *hat,
3475 	caddr_t addr,
3476 	size_t *len,
3477 	struct page **page_array,
3478 	uint_t flags)
3479 {
3480 	return (FC_NOSUPPORT);
3481 }
3482 
3483 
3484 
3485 /*
3486  * Routine to expose supported HAT features to platform independent code.
3487  */
3488 /*ARGSUSED*/
3489 int
3490 hat_supported(enum hat_features feature, void *arg)
3491 {
3492 	switch (feature) {
3493 
3494 	case HAT_SHARED_PT:	/* this is really ISM */
3495 		return (1);
3496 
3497 	case HAT_DYNAMIC_ISM_UNMAP:
3498 		return (0);
3499 
3500 	case HAT_VMODSORT:
3501 		return (1);
3502 
3503 	default:
3504 		panic("hat_supported() - unknown feature");
3505 	}
3506 	return (0);
3507 }
3508 
3509 /*
3510  * Called when a thread is exiting and has been switched to the kernel AS
3511  */
3512 void
3513 hat_thread_exit(kthread_t *thd)
3514 {
3515 	ASSERT(thd->t_procp->p_as == &kas);
3516 	hat_switch(thd->t_procp->p_as->a_hat);
3517 }
3518 
3519 /*
3520  * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
3521  */
3522 /*ARGSUSED*/
3523 void
3524 hat_setup(hat_t *hat, int flags)
3525 {
3526 	kpreempt_disable();
3527 
3528 	hat_switch(hat);
3529 
3530 	kpreempt_enable();
3531 }
3532 
3533 /*
3534  * Prepare for a CPU private mapping for the given address.
3535  *
3536  * The address can only be used from a single CPU and can be remapped
3537  * using hat_mempte_remap().  Return the address of the PTE.
3538  *
3539  * We do the htable_create() if necessary and increment the valid count so
3540  * the htable can't disappear.  We also hat_devload() the page table into
3541  * kernel so that the PTE is quickly accessed.
3542  */
3543 void *
3544 hat_mempte_kern_setup(caddr_t addr, void *pt)
3545 {
3546 	uintptr_t	va = (uintptr_t)addr;
3547 	htable_t	*ht;
3548 	uint_t		entry;
3549 	x86pte_t	oldpte;
3550 	caddr_t		p = (caddr_t)pt;
3551 
3552 	ASSERT(IS_PAGEALIGNED(va));
3553 	ASSERT(!IN_VA_HOLE(va));
3554 	ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
3555 	if (ht == NULL) {
3556 		/*
3557 		 * Note that we don't need a hat_reserves_exit() check
3558 		 * for this htable_create(), since that'll be done by the
3559 		 * hat_devload() just below.
3560 		 */
3561 		ht = htable_create(kas.a_hat, va, 0, NULL);
3562 		entry = htable_va2entry(va, ht);
3563 		ASSERT(ht->ht_level == 0);
3564 		oldpte = x86pte_get(ht, entry);
3565 	}
3566 	if (PTE_ISVALID(oldpte))
3567 		panic("hat_mempte_setup(): address already mapped"
3568 		    "ht=%p, entry=%d, pte=" FMT_PTE, ht, entry, oldpte);
3569 
3570 	/*
3571 	 * increment ht_valid_cnt so that the pagetable can't disappear
3572 	 */
3573 	HTABLE_INC(ht->ht_valid_cnt);
3574 
3575 	/*
3576 	 * now we need to map the page holding the pagetable for va into
3577 	 * the kernel's address space.
3578 	 */
3579 	hat_devload(kas.a_hat, p, MMU_PAGESIZE, ht->ht_pfn,
3580 	    PROT_READ | PROT_WRITE | HAT_NOSYNC | HAT_UNORDERED_OK,
3581 	    HAT_LOAD | HAT_LOAD_NOCONSIST);
3582 
3583 	/*
3584 	 * return the PTE address to the caller.
3585 	 */
3586 	htable_release(ht);
3587 	p += entry << mmu.pte_size_shift;
3588 	return ((void *)p);
3589 }
3590 
3591 /*
3592  * Prepare for a CPU private mapping for the given address.
3593  */
3594 void *
3595 hat_mempte_setup(caddr_t addr)
3596 {
3597 	x86pte_t	*p;
3598 
3599 	p = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
3600 	return (hat_mempte_kern_setup(addr, p));
3601 }
3602 
3603 /*
3604  * Release a CPU private mapping for the given address.
3605  * We decrement the htable valid count so it might be destroyed.
3606  */
3607 void
3608 hat_mempte_release(caddr_t addr, void *pteptr)
3609 {
3610 	htable_t	*ht;
3611 	uintptr_t	va = ALIGN2PAGE(pteptr);
3612 
3613 	/*
3614 	 * first invalidate any left over mapping and decrement the
3615 	 * htable's mapping count
3616 	 */
3617 	if (mmu.pae_hat)
3618 		*(x86pte_t *)pteptr = 0;
3619 	else
3620 		*(x86pte32_t *)pteptr = 0;
3621 	mmu_tlbflush_entry(addr);
3622 	ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
3623 	if (ht == NULL)
3624 		panic("hat_mempte_release(): invalid address");
3625 	ASSERT(ht->ht_level == 0);
3626 	HTABLE_DEC(ht->ht_valid_cnt);
3627 	htable_release(ht);
3628 
3629 	/*
3630 	 * now blow away the kernel mapping to the page table page
3631 	 * XX64 -- see comment in hat_mempte_setup()
3632 	 */
3633 	hat_unload_callback(kas.a_hat, (caddr_t)va, MMU_PAGESIZE,
3634 	    HAT_UNLOAD, NULL);
3635 }
3636 
3637 /*
3638  * Apply a temporary CPU private mapping to a page. We flush the TLB only
3639  * on this CPU, so this ought to have been called with preemption disabled.
3640  */
3641 void
3642 hat_mempte_remap(
3643 	pfn_t pfn,
3644 	caddr_t addr,
3645 	void *pteptr,
3646 	uint_t attr,
3647 	uint_t flags)
3648 {
3649 	uintptr_t	va = (uintptr_t)addr;
3650 	x86pte_t	pte;
3651 
3652 	/*
3653 	 * Remap the given PTE to the new page's PFN. Invalidate only
3654 	 * on this CPU.
3655 	 */
3656 #ifdef DEBUG
3657 	htable_t	*ht;
3658 	uint_t		entry;
3659 
3660 	ASSERT(IS_PAGEALIGNED(va));
3661 	ASSERT(!IN_VA_HOLE(va));
3662 	ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
3663 	ASSERT(ht != NULL);
3664 	ASSERT(ht->ht_level == 0);
3665 	ASSERT(ht->ht_valid_cnt > 0);
3666 	htable_release(ht);
3667 #endif
3668 	pte = hati_mkpte(pfn, attr, 0, flags);
3669 	if (mmu.pae_hat)
3670 		*(x86pte_t *)pteptr = pte;
3671 	else
3672 		*(x86pte32_t *)pteptr = (x86pte32_t)pte;
3673 	mmu_tlbflush_entry(addr);
3674 }
3675 
3676 
3677 
3678 /*
3679  * Hat locking functions
3680  * XXX - these two functions are currently being used by hatstats
3681  * 	they can be removed by using a per-as mutex for hatstats.
3682  */
3683 void
3684 hat_enter(hat_t *hat)
3685 {
3686 	mutex_enter(&hat->hat_mutex);
3687 }
3688 
3689 void
3690 hat_exit(hat_t *hat)
3691 {
3692 	mutex_exit(&hat->hat_mutex);
3693 }
3694 
3695 
3696 /*
3697  * Used by hat_kern_setup() to create initial kernel HAT mappings from
3698  * the boot loader's mappings.
3699  *
3700  * - size is either PAGESIZE or some multiple of a level one pagesize
3701  * - there may not be page_t's for every pfn. (ie. the nucleus pages)
3702  * - pfn's are continguous for the given va range (va to va + size * cnt)
3703  */
3704 void
3705 hati_kern_setup_load(
3706 	uintptr_t va,	/* starting va of range to map */
3707 	size_t size,	/* either PAGESIZE or multiple of large page size */
3708 	pfn_t pfn,	/* starting PFN */
3709 	pgcnt_t cnt,	/* number of mappings, (cnt * size) == total size */
3710 	uint_t prot)	/* protections (PROT_READ, PROT_WRITE, PROT_EXEC) */
3711 {
3712 	level_t level = (size == MMU_PAGESIZE ? 0 : 1);
3713 	size_t bytes = size * cnt;
3714 	size_t pgsize = LEVEL_SIZE(level);
3715 	page_t *pp;
3716 	uint_t flags = HAT_LOAD;
3717 
3718 	/*
3719 	 * We're only going to throw away mappings below kernelbase or in
3720 	 * boot's special double-mapping region, so set noconsist to avoid
3721 	 * using hments
3722 	 */
3723 	if (BOOT_VA(va))
3724 		flags |= HAT_LOAD_NOCONSIST;
3725 
3726 	prot |= HAT_STORECACHING_OK;
3727 	while (bytes != 0) {
3728 		ASSERT(bytes >= pgsize);
3729 
3730 		pp = NULL;
3731 		if (pf_is_memory(pfn) && !BOOT_VA(va) && level == 0)
3732 			pp = page_numtopp_nolock(pfn);
3733 
3734 		hati_load_common(kas.a_hat, va, pp, prot, flags, level, pfn);
3735 
3736 		va += pgsize;
3737 		pfn += mmu_btop(pgsize);
3738 		bytes -= pgsize;
3739 	}
3740 }
3741 
3742 /*
3743  * HAT part of cpu intialization.
3744  */
3745 void
3746 hat_cpu_online(struct cpu *cpup)
3747 {
3748 	if (cpup != CPU) {
3749 		x86pte_cpu_init(cpup, NULL);
3750 		hat_vlp_setup(cpup);
3751 	}
3752 	CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
3753 }
3754 
3755 /*
3756  * Function called after all CPUs are brought online.
3757  * Used to remove low address boot mappings.
3758  */
3759 void
3760 clear_boot_mappings(uintptr_t low, uintptr_t high)
3761 {
3762 	uintptr_t vaddr = low;
3763 	htable_t *ht = NULL;
3764 	level_t level;
3765 	uint_t entry;
3766 	x86pte_t pte;
3767 
3768 	/*
3769 	 * On 1st CPU we can unload the prom mappings, basically we blow away
3770 	 * all virtual mappings under kernelbase.
3771 	 */
3772 	while (vaddr < high) {
3773 		pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
3774 		if (ht == NULL)
3775 			break;
3776 
3777 		level = ht->ht_level;
3778 		entry = htable_va2entry(vaddr, ht);
3779 		ASSERT(level <= mmu.max_page_level);
3780 		ASSERT(PTE_ISPAGE(pte, level));
3781 
3782 		/*
3783 		 * Unload the mapping from the page tables.
3784 		 */
3785 		(void) x86pte_set(ht, entry, 0, NULL);
3786 		ASSERT(ht->ht_valid_cnt > 0);
3787 		HTABLE_DEC(ht->ht_valid_cnt);
3788 		PGCNT_DEC(ht->ht_hat, ht->ht_level);
3789 
3790 		vaddr += LEVEL_SIZE(ht->ht_level);
3791 	}
3792 	if (ht)
3793 		htable_release(ht);
3794 
3795 	/*
3796 	 * cross call for a complete invalidate.
3797 	 */
3798 	hat_demap(kas.a_hat, DEMAP_ALL_ADDR);
3799 }
3800 
3801 /*
3802  * Initialize a special area in the kernel that always holds some PTEs for
3803  * faster performance. This always holds segmap's PTEs.
3804  * In the 32 bit kernel this maps the kernel heap too.
3805  */
3806 void
3807 hat_kmap_init(uintptr_t base, size_t len)
3808 {
3809 	uintptr_t map_addr;	/* base rounded down to large page size */
3810 	uintptr_t map_eaddr;	/* base + len rounded up */
3811 	size_t map_len;
3812 	caddr_t ptes;		/* mapping area in kernel as for ptes */
3813 	size_t window_size;	/* size of mapping area for ptes */
3814 	ulong_t htable_cnt;	/* # of page tables to cover map_len */
3815 	ulong_t i;
3816 	htable_t *ht;
3817 
3818 	/*
3819 	 * we have to map in an area that matches an entire page table
3820 	 */
3821 	map_addr = base & LEVEL_MASK(1);
3822 	map_eaddr = (base + len + LEVEL_SIZE(1) - 1) & LEVEL_MASK(1);
3823 	map_len = map_eaddr - map_addr;
3824 	window_size = mmu_btop(map_len) * mmu.pte_size;
3825 	htable_cnt = mmu_btop(map_len) / mmu.ptes_per_table;
3826 
3827 	/*
3828 	 * allocate vmem for the kmap_ptes
3829 	 */
3830 	ptes = vmem_xalloc(heap_arena, window_size, MMU_PAGESIZE, 0,
3831 	    0, NULL, NULL, VM_SLEEP);
3832 	mmu.kmap_htables =
3833 	    kmem_alloc(htable_cnt * sizeof (htable_t *), KM_SLEEP);
3834 
3835 	/*
3836 	 * Map the page tables that cover kmap into the allocated range.
3837 	 * Note we don't ever htable_release() the kmap page tables - they
3838 	 * can't ever be stolen, freed, etc.
3839 	 */
3840 	for (i = 0; i < htable_cnt; ++i) {
3841 		ht = htable_create(kas.a_hat, map_addr + i * LEVEL_SIZE(1),
3842 		    0, NULL);
3843 		mmu.kmap_htables[i] = ht;
3844 
3845 		hat_devload(kas.a_hat, ptes + i * MMU_PAGESIZE, MMU_PAGESIZE,
3846 		    ht->ht_pfn,
3847 		    PROT_READ | PROT_WRITE | HAT_NOSYNC | HAT_UNORDERED_OK,
3848 		    HAT_LOAD | HAT_LOAD_NOCONSIST);
3849 
3850 	}
3851 
3852 	/*
3853 	 * set information in mmu to activate handling of kmap
3854 	 */
3855 	mmu.kmap_addr = base;
3856 	mmu.kmap_eaddr = base + len;
3857 	mmu.kmap_ptes =
3858 	    (x86pte_t *)(ptes + mmu.pte_size * mmu_btop(base - map_addr));
3859 }
3860 
3861 /*
3862  * Atomically update a new translation for a single page.  If the
3863  * currently installed PTE doesn't match the value we expect to find,
3864  * it's not updated and we return the PTE we found.
3865  *
3866  * If activating nosync or NOWRITE and the page was modified we need to sync
3867  * with the page_t. Also sync with page_t if clearing ref/mod bits.
3868  */
3869 static x86pte_t
3870 hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
3871 {
3872 	page_t		*pp;
3873 	uint_t		rm = 0;
3874 	x86pte_t	replaced;
3875 
3876 	if (!PTE_GET(expected, PT_NOSYNC | PT_NOCONSIST) &&
3877 	    PTE_GET(expected, PT_MOD | PT_REF) &&
3878 	    (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
3879 		!PTE_GET(new, PT_MOD | PT_REF))) {
3880 
3881 		pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
3882 		ASSERT(pp != NULL);
3883 		if (PTE_GET(expected, PT_MOD))
3884 			rm |= P_MOD;
3885 		if (PTE_GET(expected, PT_REF))
3886 			rm |= P_REF;
3887 		PTE_CLR(new, PT_MOD | PT_REF);
3888 	}
3889 
3890 	replaced = x86pte_update(ht, entry, expected, new);
3891 	if (replaced != expected)
3892 		return (replaced);
3893 
3894 	if (rm) {
3895 		/*
3896 		 * sync to all constituent pages of a large page
3897 		 */
3898 		pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
3899 		ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
3900 		while (pgcnt-- > 0) {
3901 			/*
3902 			 * hat_page_demote() can't decrease
3903 			 * pszc below this mapping size
3904 			 * since large mapping existed after we
3905 			 * took mlist lock.
3906 			 */
3907 			ASSERT(pp->p_szc >= ht->ht_level);
3908 			hat_page_setattr(pp, rm);
3909 			++pp;
3910 		}
3911 	}
3912 
3913 	return (0);
3914 }
3915 
3916 /*
3917  * Kernel Physical Mapping (kpm) facility
3918  *
3919  * Most of the routines needed to support segkpm are almost no-ops on the
3920  * x86 platform.  We map in the entire segment when it is created and leave
3921  * it mapped in, so there is no additional work required to set up and tear
3922  * down individual mappings.  All of these routines were created to support
3923  * SPARC platforms that have to avoid aliasing in their virtually indexed
3924  * caches.
3925  *
3926  * Most of the routines have sanity checks in them (e.g. verifying that the
3927  * passed-in page is locked).  We don't actually care about most of these
3928  * checks on x86, but we leave them in place to identify problems in the
3929  * upper levels.
3930  */
3931 
3932 /*
3933  * Map in a locked page and return the vaddr.
3934  */
3935 /*ARGSUSED*/
3936 caddr_t
3937 hat_kpm_mapin(struct page *pp, struct kpme *kpme)
3938 {
3939 	caddr_t		vaddr;
3940 
3941 #ifdef DEBUG
3942 	if (kpm_enable == 0) {
3943 		cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
3944 		return ((caddr_t)NULL);
3945 	}
3946 
3947 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
3948 		cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
3949 		return ((caddr_t)NULL);
3950 	}
3951 #endif
3952 
3953 	vaddr = hat_kpm_page2va(pp, 1);
3954 
3955 	return (vaddr);
3956 }
3957 
3958 /*
3959  * Mapout a locked page.
3960  */
3961 /*ARGSUSED*/
3962 void
3963 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
3964 {
3965 #ifdef DEBUG
3966 	if (kpm_enable == 0) {
3967 		cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
3968 		return;
3969 	}
3970 
3971 	if (IS_KPM_ADDR(vaddr) == 0) {
3972 		cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
3973 		return;
3974 	}
3975 
3976 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
3977 		cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
3978 		return;
3979 	}
3980 #endif
3981 }
3982 
3983 /*
3984  * Return the kpm virtual address for a specific pfn
3985  */
3986 caddr_t
3987 hat_kpm_pfn2va(pfn_t pfn)
3988 {
3989 	uintptr_t vaddr;
3990 
3991 	ASSERT(kpm_enable);
3992 
3993 	vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
3994 
3995 	return ((caddr_t)vaddr);
3996 }
3997 
3998 /*
3999  * Return the kpm virtual address for the page at pp.
4000  */
4001 /*ARGSUSED*/
4002 caddr_t
4003 hat_kpm_page2va(struct page *pp, int checkswap)
4004 {
4005 	return (hat_kpm_pfn2va(pp->p_pagenum));
4006 }
4007 
4008 /*
4009  * Return the page frame number for the kpm virtual address vaddr.
4010  */
4011 pfn_t
4012 hat_kpm_va2pfn(caddr_t vaddr)
4013 {
4014 	pfn_t		pfn;
4015 
4016 	ASSERT(IS_KPM_ADDR(vaddr));
4017 
4018 	pfn = (pfn_t)btop(vaddr - kpm_vbase);
4019 
4020 	return (pfn);
4021 }
4022 
4023 
4024 /*
4025  * Return the page for the kpm virtual address vaddr.
4026  */
4027 page_t *
4028 hat_kpm_vaddr2page(caddr_t vaddr)
4029 {
4030 	pfn_t		pfn;
4031 
4032 	ASSERT(IS_KPM_ADDR(vaddr));
4033 
4034 	pfn = hat_kpm_va2pfn(vaddr);
4035 
4036 	return (page_numtopp_nolock(pfn));
4037 }
4038 
4039 /*
4040  * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
4041  * KPM page.  This should never happen on x86
4042  */
4043 int
4044 hat_kpm_fault(hat_t *hat, caddr_t vaddr)
4045 {
4046 	panic("pagefault in seg_kpm.  hat: 0x%p  vaddr: 0x%p", hat, vaddr);
4047 
4048 	return (0);
4049 }
4050 
4051 /*ARGSUSED*/
4052 void
4053 hat_kpm_mseghash_clear(int nentries)
4054 {}
4055 
4056 /*ARGSUSED*/
4057 void
4058 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
4059 {}
4060