xref: /illumos-gate/usr/src/uts/i86pc/vm/hat_i86.c (revision c2e5ad711a2ab2bdc091fb89f679218c8cb3405b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 /*
25  * Copyright (c) 2010, Intel Corporation.
26  * All rights reserved.
27  */
28 /*
29  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
30  * Copyright 2018 Joyent, Inc.  All rights reserved.
31  * Copyright (c) 2014, 2015 by Delphix. All rights reserved.
32  */
33 
34 /*
35  * VM - Hardware Address Translation management for i386 and amd64
36  *
37  * Implementation of the interfaces described in <common/vm/hat.h>
38  *
39  * Nearly all the details of how the hardware is managed should not be
40  * visible outside this layer except for misc. machine specific functions
41  * that work in conjunction with this code.
42  *
43  * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
44  */
45 
46 #include <sys/machparam.h>
47 #include <sys/machsystm.h>
48 #include <sys/mman.h>
49 #include <sys/types.h>
50 #include <sys/systm.h>
51 #include <sys/cpuvar.h>
52 #include <sys/thread.h>
53 #include <sys/proc.h>
54 #include <sys/cpu.h>
55 #include <sys/kmem.h>
56 #include <sys/disp.h>
57 #include <sys/shm.h>
58 #include <sys/sysmacros.h>
59 #include <sys/machparam.h>
60 #include <sys/vmem.h>
61 #include <sys/vmsystm.h>
62 #include <sys/promif.h>
63 #include <sys/var.h>
64 #include <sys/x86_archext.h>
65 #include <sys/atomic.h>
66 #include <sys/bitmap.h>
67 #include <sys/controlregs.h>
68 #include <sys/bootconf.h>
69 #include <sys/bootsvcs.h>
70 #include <sys/bootinfo.h>
71 #include <sys/archsystm.h>
72 
73 #include <vm/seg_kmem.h>
74 #include <vm/hat_i86.h>
75 #include <vm/as.h>
76 #include <vm/seg.h>
77 #include <vm/page.h>
78 #include <vm/seg_kp.h>
79 #include <vm/seg_kpm.h>
80 #include <vm/vm_dep.h>
81 #ifdef __xpv
82 #include <sys/hypervisor.h>
83 #endif
84 #include <vm/kboot_mmu.h>
85 #include <vm/seg_spt.h>
86 
87 #include <sys/cmn_err.h>
88 
89 /*
90  * Basic parameters for hat operation.
91  */
92 struct hat_mmu_info mmu;
93 
94 /*
95  * The page that is the kernel's top level pagetable.
96  *
97  * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries
98  * on this 4K page for its top level page table. The remaining groups of
99  * 4 entries are used for per processor copies of user VLP pagetables for
100  * running threads.  See hat_switch() and reload_pae32() for details.
101  *
102  * vlp_page[0..3] - level==2 PTEs for kernel HAT
103  * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0
104  * vlp_page[8..11]  - level==2 PTE for user thread on cpu 1
105  * etc...
106  */
107 static x86pte_t *vlp_page;
108 
109 /*
110  * forward declaration of internal utility routines
111  */
112 static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
113 	x86pte_t new);
114 
115 /*
116  * The kernel address space exists in all HATs. To implement this the
117  * kernel reserves a fixed number of entries in the topmost level(s) of page
118  * tables. The values are setup during startup and then copied to every user
119  * hat created by hat_alloc(). This means that kernelbase must be:
120  *
121  *	  4Meg aligned for 32 bit kernels
122  *	512Gig aligned for x86_64 64 bit kernel
123  *
124  * The hat_kernel_range_ts describe what needs to be copied from kernel hat
125  * to each user hat.
126  */
127 typedef struct hat_kernel_range {
128 	level_t		hkr_level;
129 	uintptr_t	hkr_start_va;
130 	uintptr_t	hkr_end_va;	/* zero means to end of memory */
131 } hat_kernel_range_t;
132 #define	NUM_KERNEL_RANGE 2
133 static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE];
134 static int num_kernel_ranges;
135 
136 uint_t use_boot_reserve = 1;	/* cleared after early boot process */
137 uint_t can_steal_post_boot = 0;	/* set late in boot to enable stealing */
138 
139 /*
140  * enable_1gpg: controls 1g page support for user applications.
141  * By default, 1g pages are exported to user applications. enable_1gpg can
142  * be set to 0 to not export.
143  */
144 int	enable_1gpg = 1;
145 
146 /*
147  * AMD shanghai processors provide better management of 1gb ptes in its tlb.
148  * By default, 1g page support will be disabled for pre-shanghai AMD
149  * processors that don't have optimal tlb support for the 1g page size.
150  * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal
151  * processors.
152  */
153 int	chk_optimal_1gtlb = 1;
154 
155 
156 #ifdef DEBUG
157 uint_t	map1gcnt;
158 #endif
159 
160 
161 /*
162  * A cpuset for all cpus. This is used for kernel address cross calls, since
163  * the kernel addresses apply to all cpus.
164  */
165 cpuset_t khat_cpuset;
166 
167 /*
168  * management stuff for hat structures
169  */
170 kmutex_t	hat_list_lock;
171 kcondvar_t	hat_list_cv;
172 kmem_cache_t	*hat_cache;
173 kmem_cache_t	*hat_hash_cache;
174 kmem_cache_t	*vlp_hash_cache;
175 
176 /*
177  * Simple statistics
178  */
179 struct hatstats hatstat;
180 
181 /*
182  * Some earlier hypervisor versions do not emulate cmpxchg of PTEs
183  * correctly.  For such hypervisors we must set PT_USER for kernel
184  * entries ourselves (normally the emulation would set PT_USER for
185  * kernel entries and PT_USER|PT_GLOBAL for user entries).  pt_kern is
186  * thus set appropriately.  Note that dboot/kbm is OK, as only the full
187  * HAT uses cmpxchg() and the other paths (hypercall etc.) were never
188  * incorrect.
189  */
190 int pt_kern;
191 
192 /*
193  * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's.
194  */
195 extern void atomic_orb(uchar_t *addr, uchar_t val);
196 extern void atomic_andb(uchar_t *addr, uchar_t val);
197 
198 #ifndef __xpv
199 extern pfn_t memseg_get_start(struct memseg *);
200 #endif
201 
202 #define	PP_GETRM(pp, rmmask)    (pp->p_nrm & rmmask)
203 #define	PP_ISMOD(pp)		PP_GETRM(pp, P_MOD)
204 #define	PP_ISREF(pp)		PP_GETRM(pp, P_REF)
205 #define	PP_ISRO(pp)		PP_GETRM(pp, P_RO)
206 
207 #define	PP_SETRM(pp, rm)	atomic_orb(&(pp->p_nrm), rm)
208 #define	PP_SETMOD(pp)		PP_SETRM(pp, P_MOD)
209 #define	PP_SETREF(pp)		PP_SETRM(pp, P_REF)
210 #define	PP_SETRO(pp)		PP_SETRM(pp, P_RO)
211 
212 #define	PP_CLRRM(pp, rm)	atomic_andb(&(pp->p_nrm), ~(rm))
213 #define	PP_CLRMOD(pp)   	PP_CLRRM(pp, P_MOD)
214 #define	PP_CLRREF(pp)   	PP_CLRRM(pp, P_REF)
215 #define	PP_CLRRO(pp)    	PP_CLRRM(pp, P_RO)
216 #define	PP_CLRALL(pp)		PP_CLRRM(pp, P_MOD | P_REF | P_RO)
217 
218 /*
219  * kmem cache constructor for struct hat
220  */
221 /*ARGSUSED*/
222 static int
223 hati_constructor(void *buf, void *handle, int kmflags)
224 {
225 	hat_t	*hat = buf;
226 
227 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
228 	bzero(hat->hat_pages_mapped,
229 	    sizeof (pgcnt_t) * (mmu.max_page_level + 1));
230 	hat->hat_ism_pgcnt = 0;
231 	hat->hat_stats = 0;
232 	hat->hat_flags = 0;
233 	CPUSET_ZERO(hat->hat_cpus);
234 	hat->hat_htable = NULL;
235 	hat->hat_ht_hash = NULL;
236 	return (0);
237 }
238 
239 /*
240  * Allocate a hat structure for as. We also create the top level
241  * htable and initialize it to contain the kernel hat entries.
242  */
243 hat_t *
244 hat_alloc(struct as *as)
245 {
246 	hat_t			*hat;
247 	htable_t		*ht;	/* top level htable */
248 	uint_t			use_vlp;
249 	uint_t			r;
250 	hat_kernel_range_t	*rp;
251 	uintptr_t		va;
252 	uintptr_t		eva;
253 	uint_t			start;
254 	uint_t			cnt;
255 	htable_t		*src;
256 
257 	/*
258 	 * Once we start creating user process HATs we can enable
259 	 * the htable_steal() code.
260 	 */
261 	if (can_steal_post_boot == 0)
262 		can_steal_post_boot = 1;
263 
264 	ASSERT(AS_WRITE_HELD(as));
265 	hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
266 	hat->hat_as = as;
267 	mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
268 	ASSERT(hat->hat_flags == 0);
269 
270 #if defined(__xpv)
271 	/*
272 	 * No VLP stuff on the hypervisor due to the 64-bit split top level
273 	 * page tables.  On 32-bit it's not needed as the hypervisor takes
274 	 * care of copying the top level PTEs to a below 4Gig page.
275 	 */
276 	use_vlp = 0;
277 #else	/* __xpv */
278 	/* 32 bit processes uses a VLP style hat when running with PAE */
279 #if defined(__amd64)
280 	use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32);
281 #elif defined(__i386)
282 	use_vlp = mmu.pae_hat;
283 #endif
284 #endif	/* __xpv */
285 	if (use_vlp) {
286 		hat->hat_flags = HAT_VLP;
287 		bzero(hat->hat_vlp_ptes, VLP_SIZE);
288 	}
289 
290 	/*
291 	 * Allocate the htable hash
292 	 */
293 	if ((hat->hat_flags & HAT_VLP)) {
294 		hat->hat_num_hash = mmu.vlp_hash_cnt;
295 		hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
296 	} else {
297 		hat->hat_num_hash = mmu.hash_cnt;
298 		hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
299 	}
300 	bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
301 
302 	/*
303 	 * Initialize Kernel HAT entries at the top of the top level page
304 	 * tables for the new hat.
305 	 */
306 	hat->hat_htable = NULL;
307 	hat->hat_ht_cached = NULL;
308 	XPV_DISALLOW_MIGRATE();
309 	ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
310 	hat->hat_htable = ht;
311 
312 #if defined(__amd64)
313 	if (hat->hat_flags & HAT_VLP)
314 		goto init_done;
315 #endif
316 
317 	for (r = 0; r < num_kernel_ranges; ++r) {
318 		rp = &kernel_ranges[r];
319 		for (va = rp->hkr_start_va; va != rp->hkr_end_va;
320 		    va += cnt * LEVEL_SIZE(rp->hkr_level)) {
321 
322 			if (rp->hkr_level == TOP_LEVEL(hat))
323 				ht = hat->hat_htable;
324 			else
325 				ht = htable_create(hat, va, rp->hkr_level,
326 				    NULL);
327 
328 			start = htable_va2entry(va, ht);
329 			cnt = HTABLE_NUM_PTES(ht) - start;
330 			eva = va +
331 			    ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level));
332 			if (rp->hkr_end_va != 0 &&
333 			    (eva > rp->hkr_end_va || eva == 0))
334 				cnt = htable_va2entry(rp->hkr_end_va, ht) -
335 				    start;
336 
337 #if defined(__i386) && !defined(__xpv)
338 			if (ht->ht_flags & HTABLE_VLP) {
339 				bcopy(&vlp_page[start],
340 				    &hat->hat_vlp_ptes[start],
341 				    cnt * sizeof (x86pte_t));
342 				continue;
343 			}
344 #endif
345 			src = htable_lookup(kas.a_hat, va, rp->hkr_level);
346 			ASSERT(src != NULL);
347 			x86pte_copy(src, ht, start, cnt);
348 			htable_release(src);
349 		}
350 	}
351 
352 init_done:
353 
354 #if defined(__xpv)
355 	/*
356 	 * Pin top level page tables after initializing them
357 	 */
358 	xen_pin(hat->hat_htable->ht_pfn, mmu.max_level);
359 #if defined(__amd64)
360 	xen_pin(hat->hat_user_ptable, mmu.max_level);
361 #endif
362 #endif
363 	XPV_ALLOW_MIGRATE();
364 
365 	/*
366 	 * Put it at the start of the global list of all hats (used by stealing)
367 	 *
368 	 * kas.a_hat is not in the list but is instead used to find the
369 	 * first and last items in the list.
370 	 *
371 	 * - kas.a_hat->hat_next points to the start of the user hats.
372 	 *   The list ends where hat->hat_next == NULL
373 	 *
374 	 * - kas.a_hat->hat_prev points to the last of the user hats.
375 	 *   The list begins where hat->hat_prev == NULL
376 	 */
377 	mutex_enter(&hat_list_lock);
378 	hat->hat_prev = NULL;
379 	hat->hat_next = kas.a_hat->hat_next;
380 	if (hat->hat_next)
381 		hat->hat_next->hat_prev = hat;
382 	else
383 		kas.a_hat->hat_prev = hat;
384 	kas.a_hat->hat_next = hat;
385 	mutex_exit(&hat_list_lock);
386 
387 	return (hat);
388 }
389 
390 /*
391  * process has finished executing but as has not been cleaned up yet.
392  */
393 /*ARGSUSED*/
394 void
395 hat_free_start(hat_t *hat)
396 {
397 	ASSERT(AS_WRITE_HELD(hat->hat_as));
398 
399 	/*
400 	 * If the hat is currently a stealing victim, wait for the stealing
401 	 * to finish.  Once we mark it as HAT_FREEING, htable_steal()
402 	 * won't look at its pagetables anymore.
403 	 */
404 	mutex_enter(&hat_list_lock);
405 	while (hat->hat_flags & HAT_VICTIM)
406 		cv_wait(&hat_list_cv, &hat_list_lock);
407 	hat->hat_flags |= HAT_FREEING;
408 	mutex_exit(&hat_list_lock);
409 }
410 
411 /*
412  * An address space is being destroyed, so we destroy the associated hat.
413  */
414 void
415 hat_free_end(hat_t *hat)
416 {
417 	kmem_cache_t *cache;
418 
419 	ASSERT(hat->hat_flags & HAT_FREEING);
420 
421 	/*
422 	 * must not be running on the given hat
423 	 */
424 	ASSERT(CPU->cpu_current_hat != hat);
425 
426 	/*
427 	 * Remove it from the list of HATs
428 	 */
429 	mutex_enter(&hat_list_lock);
430 	if (hat->hat_prev)
431 		hat->hat_prev->hat_next = hat->hat_next;
432 	else
433 		kas.a_hat->hat_next = hat->hat_next;
434 	if (hat->hat_next)
435 		hat->hat_next->hat_prev = hat->hat_prev;
436 	else
437 		kas.a_hat->hat_prev = hat->hat_prev;
438 	mutex_exit(&hat_list_lock);
439 	hat->hat_next = hat->hat_prev = NULL;
440 
441 #if defined(__xpv)
442 	/*
443 	 * On the hypervisor, unpin top level page table(s)
444 	 */
445 	xen_unpin(hat->hat_htable->ht_pfn);
446 #if defined(__amd64)
447 	xen_unpin(hat->hat_user_ptable);
448 #endif
449 #endif
450 
451 	/*
452 	 * Make a pass through the htables freeing them all up.
453 	 */
454 	htable_purge_hat(hat);
455 
456 	/*
457 	 * Decide which kmem cache the hash table came from, then free it.
458 	 */
459 	if (hat->hat_flags & HAT_VLP)
460 		cache = vlp_hash_cache;
461 	else
462 		cache = hat_hash_cache;
463 	kmem_cache_free(cache, hat->hat_ht_hash);
464 	hat->hat_ht_hash = NULL;
465 
466 	hat->hat_flags = 0;
467 	kmem_cache_free(hat_cache, hat);
468 }
469 
470 /*
471  * round kernelbase down to a supported value to use for _userlimit
472  *
473  * userlimit must be aligned down to an entry in the top level htable.
474  * The one exception is for 32 bit HAT's running PAE.
475  */
476 uintptr_t
477 hat_kernelbase(uintptr_t va)
478 {
479 #if defined(__i386)
480 	va &= LEVEL_MASK(1);
481 #endif
482 	if (IN_VA_HOLE(va))
483 		panic("_userlimit %p will fall in VA hole\n", (void *)va);
484 	return (va);
485 }
486 
487 /*
488  *
489  */
490 static void
491 set_max_page_level()
492 {
493 	level_t lvl;
494 
495 	if (!kbm_largepage_support) {
496 		lvl = 0;
497 	} else {
498 		if (is_x86_feature(x86_featureset, X86FSET_1GPG)) {
499 			lvl = 2;
500 			if (chk_optimal_1gtlb &&
501 			    cpuid_opteron_erratum(CPU, 6671130)) {
502 				lvl = 1;
503 			}
504 			if (plat_mnode_xcheck(LEVEL_SIZE(2) >>
505 			    LEVEL_SHIFT(0))) {
506 				lvl = 1;
507 			}
508 		} else {
509 			lvl = 1;
510 		}
511 	}
512 	mmu.max_page_level = lvl;
513 
514 	if ((lvl == 2) && (enable_1gpg == 0))
515 		mmu.umax_page_level = 1;
516 	else
517 		mmu.umax_page_level = lvl;
518 }
519 
520 /*
521  * Initialize hat data structures based on processor MMU information.
522  */
523 void
524 mmu_init(void)
525 {
526 	uint_t max_htables;
527 	uint_t pa_bits;
528 	uint_t va_bits;
529 	int i;
530 
531 	/*
532 	 * If CPU enabled the page table global bit, use it for the kernel
533 	 * This is bit 7 in CR4 (PGE - Page Global Enable).
534 	 */
535 	if (is_x86_feature(x86_featureset, X86FSET_PGE) &&
536 	    (getcr4() & CR4_PGE) != 0)
537 		mmu.pt_global = PT_GLOBAL;
538 
539 	/*
540 	 * Detect NX and PAE usage.
541 	 */
542 	mmu.pae_hat = kbm_pae_support;
543 	if (kbm_nx_support)
544 		mmu.pt_nx = PT_NX;
545 	else
546 		mmu.pt_nx = 0;
547 
548 	/*
549 	 * Use CPU info to set various MMU parameters
550 	 */
551 	cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
552 
553 	if (va_bits < sizeof (void *) * NBBY) {
554 		mmu.hole_start = (1ul << (va_bits - 1));
555 		mmu.hole_end = 0ul - mmu.hole_start - 1;
556 	} else {
557 		mmu.hole_end = 0;
558 		mmu.hole_start = mmu.hole_end - 1;
559 	}
560 #if defined(OPTERON_ERRATUM_121)
561 	/*
562 	 * If erratum 121 has already been detected at this time, hole_start
563 	 * contains the value to be subtracted from mmu.hole_start.
564 	 */
565 	ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
566 	hole_start = mmu.hole_start - hole_start;
567 #else
568 	hole_start = mmu.hole_start;
569 #endif
570 	hole_end = mmu.hole_end;
571 
572 	mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
573 	if (mmu.pae_hat == 0 && pa_bits > 32)
574 		mmu.highest_pfn = PFN_4G - 1;
575 
576 	if (mmu.pae_hat) {
577 		mmu.pte_size = 8;	/* 8 byte PTEs */
578 		mmu.pte_size_shift = 3;
579 	} else {
580 		mmu.pte_size = 4;	/* 4 byte PTEs */
581 		mmu.pte_size_shift = 2;
582 	}
583 
584 	if (mmu.pae_hat && !is_x86_feature(x86_featureset, X86FSET_PAE))
585 		panic("Processor does not support PAE");
586 
587 	if (!is_x86_feature(x86_featureset, X86FSET_CX8))
588 		panic("Processor does not support cmpxchg8b instruction");
589 
590 #if defined(__amd64)
591 
592 	mmu.num_level = 4;
593 	mmu.max_level = 3;
594 	mmu.ptes_per_table = 512;
595 	mmu.top_level_count = 512;
596 
597 	mmu.level_shift[0] = 12;
598 	mmu.level_shift[1] = 21;
599 	mmu.level_shift[2] = 30;
600 	mmu.level_shift[3] = 39;
601 
602 #elif defined(__i386)
603 
604 	if (mmu.pae_hat) {
605 		mmu.num_level = 3;
606 		mmu.max_level = 2;
607 		mmu.ptes_per_table = 512;
608 		mmu.top_level_count = 4;
609 
610 		mmu.level_shift[0] = 12;
611 		mmu.level_shift[1] = 21;
612 		mmu.level_shift[2] = 30;
613 
614 	} else {
615 		mmu.num_level = 2;
616 		mmu.max_level = 1;
617 		mmu.ptes_per_table = 1024;
618 		mmu.top_level_count = 1024;
619 
620 		mmu.level_shift[0] = 12;
621 		mmu.level_shift[1] = 22;
622 	}
623 
624 #endif	/* __i386 */
625 
626 	for (i = 0; i < mmu.num_level; ++i) {
627 		mmu.level_size[i] = 1UL << mmu.level_shift[i];
628 		mmu.level_offset[i] = mmu.level_size[i] - 1;
629 		mmu.level_mask[i] = ~mmu.level_offset[i];
630 	}
631 
632 	set_max_page_level();
633 
634 	mmu_page_sizes = mmu.max_page_level + 1;
635 	mmu_exported_page_sizes = mmu.umax_page_level + 1;
636 
637 	/* restrict legacy applications from using pagesizes 1g and above */
638 	mmu_legacy_page_sizes =
639 	    (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes;
640 
641 
642 	for (i = 0; i <= mmu.max_page_level; ++i) {
643 		mmu.pte_bits[i] = PT_VALID | pt_kern;
644 		if (i > 0)
645 			mmu.pte_bits[i] |= PT_PAGESIZE;
646 	}
647 
648 	/*
649 	 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
650 	 */
651 	for (i = 1; i < mmu.num_level; ++i)
652 		mmu.ptp_bits[i] = PT_PTPBITS;
653 
654 #if defined(__i386)
655 	mmu.ptp_bits[2] = PT_VALID;
656 #endif
657 
658 	/*
659 	 * Compute how many hash table entries to have per process for htables.
660 	 * We start with 1 page's worth of entries.
661 	 *
662 	 * If physical memory is small, reduce the amount need to cover it.
663 	 */
664 	max_htables = physmax / mmu.ptes_per_table;
665 	mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
666 	while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
667 		mmu.hash_cnt >>= 1;
668 	mmu.vlp_hash_cnt = mmu.hash_cnt;
669 
670 #if defined(__amd64)
671 	/*
672 	 * If running in 64 bits and physical memory is large,
673 	 * increase the size of the cache to cover all of memory for
674 	 * a 64 bit process.
675 	 */
676 #define	HASH_MAX_LENGTH 4
677 	while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
678 		mmu.hash_cnt <<= 1;
679 #endif
680 }
681 
682 
683 /*
684  * initialize hat data structures
685  */
686 void
687 hat_init()
688 {
689 #if defined(__i386)
690 	/*
691 	 * _userlimit must be aligned correctly
692 	 */
693 	if ((_userlimit & LEVEL_MASK(1)) != _userlimit) {
694 		prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n",
695 		    (void *)_userlimit, (void *)LEVEL_SIZE(1));
696 		halt("hat_init(): Unable to continue");
697 	}
698 #endif
699 
700 	cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
701 
702 	/*
703 	 * initialize kmem caches
704 	 */
705 	htable_init();
706 	hment_init();
707 
708 	hat_cache = kmem_cache_create("hat_t",
709 	    sizeof (hat_t), 0, hati_constructor, NULL, NULL,
710 	    NULL, 0, 0);
711 
712 	hat_hash_cache = kmem_cache_create("HatHash",
713 	    mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
714 	    NULL, 0, 0);
715 
716 	/*
717 	 * VLP hats can use a smaller hash table size on large memroy machines
718 	 */
719 	if (mmu.hash_cnt == mmu.vlp_hash_cnt) {
720 		vlp_hash_cache = hat_hash_cache;
721 	} else {
722 		vlp_hash_cache = kmem_cache_create("HatVlpHash",
723 		    mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
724 		    NULL, 0, 0);
725 	}
726 
727 	/*
728 	 * Set up the kernel's hat
729 	 */
730 	AS_LOCK_ENTER(&kas, RW_WRITER);
731 	kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
732 	mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
733 	kas.a_hat->hat_as = &kas;
734 	kas.a_hat->hat_flags = 0;
735 	AS_LOCK_EXIT(&kas);
736 
737 	CPUSET_ZERO(khat_cpuset);
738 	CPUSET_ADD(khat_cpuset, CPU->cpu_id);
739 
740 	/*
741 	 * The kernel hat's next pointer serves as the head of the hat list .
742 	 * The kernel hat's prev pointer tracks the last hat on the list for
743 	 * htable_steal() to use.
744 	 */
745 	kas.a_hat->hat_next = NULL;
746 	kas.a_hat->hat_prev = NULL;
747 
748 	/*
749 	 * Allocate an htable hash bucket for the kernel
750 	 * XX64 - tune for 64 bit procs
751 	 */
752 	kas.a_hat->hat_num_hash = mmu.hash_cnt;
753 	kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
754 	bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
755 
756 	/*
757 	 * zero out the top level and cached htable pointers
758 	 */
759 	kas.a_hat->hat_ht_cached = NULL;
760 	kas.a_hat->hat_htable = NULL;
761 
762 	/*
763 	 * Pre-allocate hrm_hashtab before enabling the collection of
764 	 * refmod statistics.  Allocating on the fly would mean us
765 	 * running the risk of suffering recursive mutex enters or
766 	 * deadlocks.
767 	 */
768 	hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
769 	    KM_SLEEP);
770 }
771 
772 /*
773  * Prepare CPU specific pagetables for VLP processes on 64 bit kernels.
774  *
775  * Each CPU has a set of 2 pagetables that are reused for any 32 bit
776  * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and
777  * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes.
778  */
779 /*ARGSUSED*/
780 static void
781 hat_vlp_setup(struct cpu *cpu)
782 {
783 #if defined(__amd64) && !defined(__xpv)
784 	struct hat_cpu_info *hci = cpu->cpu_hat_info;
785 	pfn_t pfn;
786 
787 	/*
788 	 * allocate the level==2 page table for the bottom most
789 	 * 512Gig of address space (this is where 32 bit apps live)
790 	 */
791 	ASSERT(hci != NULL);
792 	hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
793 
794 	/*
795 	 * Allocate a top level pagetable and copy the kernel's
796 	 * entries into it. Then link in hci_vlp_l2ptes in the 1st entry.
797 	 */
798 	hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
799 	hci->hci_vlp_pfn =
800 	    hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes);
801 	ASSERT(hci->hci_vlp_pfn != PFN_INVALID);
802 	bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE);
803 
804 	pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes);
805 	ASSERT(pfn != PFN_INVALID);
806 	hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2);
807 #endif /* __amd64 && !__xpv */
808 }
809 
810 /*ARGSUSED*/
811 static void
812 hat_vlp_teardown(cpu_t *cpu)
813 {
814 #if defined(__amd64) && !defined(__xpv)
815 	struct hat_cpu_info *hci;
816 
817 	if ((hci = cpu->cpu_hat_info) == NULL)
818 		return;
819 	if (hci->hci_vlp_l2ptes)
820 		kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE);
821 	if (hci->hci_vlp_l3ptes)
822 		kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE);
823 #endif
824 }
825 
826 #define	NEXT_HKR(r, l, s, e) {			\
827 	kernel_ranges[r].hkr_level = l;		\
828 	kernel_ranges[r].hkr_start_va = s;	\
829 	kernel_ranges[r].hkr_end_va = e;	\
830 	++r;					\
831 }
832 
833 /*
834  * Finish filling in the kernel hat.
835  * Pre fill in all top level kernel page table entries for the kernel's
836  * part of the address range.  From this point on we can't use any new
837  * kernel large pages if they need PTE's at max_level
838  *
839  * create the kmap mappings.
840  */
841 void
842 hat_init_finish(void)
843 {
844 	size_t		size;
845 	uint_t		r = 0;
846 	uintptr_t	va;
847 	hat_kernel_range_t *rp;
848 
849 
850 	/*
851 	 * We are now effectively running on the kernel hat.
852 	 * Clearing use_boot_reserve shuts off using the pre-allocated boot
853 	 * reserve for all HAT allocations.  From here on, the reserves are
854 	 * only used when avoiding recursion in kmem_alloc().
855 	 */
856 	use_boot_reserve = 0;
857 	htable_adjust_reserve();
858 
859 	/*
860 	 * User HATs are initialized with copies of all kernel mappings in
861 	 * higher level page tables. Ensure that those entries exist.
862 	 */
863 #if defined(__amd64)
864 
865 	NEXT_HKR(r, 3, kernelbase, 0);
866 #if defined(__xpv)
867 	NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END);
868 #endif
869 
870 #elif defined(__i386)
871 
872 #if !defined(__xpv)
873 	if (mmu.pae_hat) {
874 		va = kernelbase;
875 		if ((va & LEVEL_MASK(2)) != va) {
876 			va = P2ROUNDUP(va, LEVEL_SIZE(2));
877 			NEXT_HKR(r, 1, kernelbase, va);
878 		}
879 		if (va != 0)
880 			NEXT_HKR(r, 2, va, 0);
881 	} else
882 #endif /* __xpv */
883 		NEXT_HKR(r, 1, kernelbase, 0);
884 
885 #endif /* __i386 */
886 
887 	num_kernel_ranges = r;
888 
889 	/*
890 	 * Create all the kernel pagetables that will have entries
891 	 * shared to user HATs.
892 	 */
893 	for (r = 0; r < num_kernel_ranges; ++r) {
894 		rp = &kernel_ranges[r];
895 		for (va = rp->hkr_start_va; va != rp->hkr_end_va;
896 		    va += LEVEL_SIZE(rp->hkr_level)) {
897 			htable_t *ht;
898 
899 			if (IN_HYPERVISOR_VA(va))
900 				continue;
901 
902 			/* can/must skip if a page mapping already exists */
903 			if (rp->hkr_level <= mmu.max_page_level &&
904 			    (ht = htable_getpage(kas.a_hat, va, NULL)) !=
905 			    NULL) {
906 				htable_release(ht);
907 				continue;
908 			}
909 
910 			(void) htable_create(kas.a_hat, va, rp->hkr_level - 1,
911 			    NULL);
912 		}
913 	}
914 
915 	/*
916 	 * 32 bit PAE metal kernels use only 4 of the 512 entries in the
917 	 * page holding the top level pagetable. We use the remainder for
918 	 * the "per CPU" page tables for VLP processes.
919 	 * Map the top level kernel pagetable into the kernel to make
920 	 * it easy to use bcopy access these tables.
921 	 */
922 	if (mmu.pae_hat) {
923 		vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
924 		hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE,
925 		    kas.a_hat->hat_htable->ht_pfn,
926 #if !defined(__xpv)
927 		    PROT_WRITE |
928 #endif
929 		    PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
930 		    HAT_LOAD | HAT_LOAD_NOCONSIST);
931 	}
932 	hat_vlp_setup(CPU);
933 
934 	/*
935 	 * Create kmap (cached mappings of kernel PTEs)
936 	 * for 32 bit we map from segmap_start .. ekernelheap
937 	 * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
938 	 */
939 #if defined(__i386)
940 	size = (uintptr_t)ekernelheap - segmap_start;
941 #elif defined(__amd64)
942 	size = segmapsize;
943 #endif
944 	hat_kmap_init((uintptr_t)segmap_start, size);
945 }
946 
947 /*
948  * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
949  * are 32 bit, so for safety we must use atomic_cas_64() to install these.
950  */
951 #ifdef __i386
952 static void
953 reload_pae32(hat_t *hat, cpu_t *cpu)
954 {
955 	x86pte_t *src;
956 	x86pte_t *dest;
957 	x86pte_t pte;
958 	int i;
959 
960 	/*
961 	 * Load the 4 entries of the level 2 page table into this
962 	 * cpu's range of the vlp_page and point cr3 at them.
963 	 */
964 	ASSERT(mmu.pae_hat);
965 	src = hat->hat_vlp_ptes;
966 	dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
967 	for (i = 0; i < VLP_NUM_PTES; ++i) {
968 		for (;;) {
969 			pte = dest[i];
970 			if (pte == src[i])
971 				break;
972 			if (atomic_cas_64(dest + i, pte, src[i]) != src[i])
973 				break;
974 		}
975 	}
976 }
977 #endif
978 
979 /*
980  * Switch to a new active hat, maintaining bit masks to track active CPUs.
981  *
982  * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it
983  * remains a 32-bit value.
984  */
985 void
986 hat_switch(hat_t *hat)
987 {
988 	uint64_t	newcr3;
989 	cpu_t		*cpu = CPU;
990 	hat_t		*old = cpu->cpu_current_hat;
991 
992 	/*
993 	 * set up this information first, so we don't miss any cross calls
994 	 */
995 	if (old != NULL) {
996 		if (old == hat)
997 			return;
998 		if (old != kas.a_hat)
999 			CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
1000 	}
1001 
1002 	/*
1003 	 * Add this CPU to the active set for this HAT.
1004 	 */
1005 	if (hat != kas.a_hat) {
1006 		CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
1007 	}
1008 	cpu->cpu_current_hat = hat;
1009 
1010 	/*
1011 	 * now go ahead and load cr3
1012 	 */
1013 	if (hat->hat_flags & HAT_VLP) {
1014 #if defined(__amd64)
1015 		x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1016 
1017 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1018 		newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn);
1019 #elif defined(__i386)
1020 		reload_pae32(hat, cpu);
1021 		newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) +
1022 		    (cpu->cpu_id + 1) * VLP_SIZE;
1023 #endif
1024 	} else {
1025 		newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn);
1026 	}
1027 #ifdef __xpv
1028 	{
1029 		struct mmuext_op t[2];
1030 		uint_t retcnt;
1031 		uint_t opcnt = 1;
1032 
1033 		t[0].cmd = MMUEXT_NEW_BASEPTR;
1034 		t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1035 #if defined(__amd64)
1036 		/*
1037 		 * There's an interesting problem here, as to what to
1038 		 * actually specify when switching to the kernel hat.
1039 		 * For now we'll reuse the kernel hat again.
1040 		 */
1041 		t[1].cmd = MMUEXT_NEW_USER_BASEPTR;
1042 		if (hat == kas.a_hat)
1043 			t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1044 		else
1045 			t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable);
1046 		++opcnt;
1047 #endif	/* __amd64 */
1048 		if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0)
1049 			panic("HYPERVISOR_mmu_update() failed");
1050 		ASSERT(retcnt == opcnt);
1051 
1052 	}
1053 #else
1054 	setcr3(newcr3);
1055 #endif
1056 	ASSERT(cpu == CPU);
1057 }
1058 
1059 /*
1060  * Utility to return a valid x86pte_t from protections, pfn, and level number
1061  */
1062 static x86pte_t
1063 hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
1064 {
1065 	x86pte_t	pte;
1066 	uint_t		cache_attr = attr & HAT_ORDER_MASK;
1067 
1068 	pte = MAKEPTE(pfn, level);
1069 
1070 	if (attr & PROT_WRITE)
1071 		PTE_SET(pte, PT_WRITABLE);
1072 
1073 	if (attr & PROT_USER)
1074 		PTE_SET(pte, PT_USER);
1075 
1076 	if (!(attr & PROT_EXEC))
1077 		PTE_SET(pte, mmu.pt_nx);
1078 
1079 	/*
1080 	 * Set the software bits used track ref/mod sync's and hments.
1081 	 * If not using REF/MOD, set them to avoid h/w rewriting PTEs.
1082 	 */
1083 	if (flags & HAT_LOAD_NOCONSIST)
1084 		PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD);
1085 	else if (attr & HAT_NOSYNC)
1086 		PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD);
1087 
1088 	/*
1089 	 * Set the caching attributes in the PTE. The combination
1090 	 * of attributes are poorly defined, so we pay attention
1091 	 * to them in the given order.
1092 	 *
1093 	 * The test for HAT_STRICTORDER is different because it's defined
1094 	 * as "0" - which was a stupid thing to do, but is too late to change!
1095 	 */
1096 	if (cache_attr == HAT_STRICTORDER) {
1097 		PTE_SET(pte, PT_NOCACHE);
1098 	/*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
1099 	} else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
1100 		/* nothing to set */;
1101 	} else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
1102 		PTE_SET(pte, PT_NOCACHE);
1103 		if (is_x86_feature(x86_featureset, X86FSET_PAT))
1104 			PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
1105 		else
1106 			PTE_SET(pte, PT_WRITETHRU);
1107 	} else {
1108 		panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
1109 	}
1110 
1111 	return (pte);
1112 }
1113 
1114 /*
1115  * Duplicate address translations of the parent to the child.
1116  * This function really isn't used anymore.
1117  */
1118 /*ARGSUSED*/
1119 int
1120 hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
1121 {
1122 	ASSERT((uintptr_t)addr < kernelbase);
1123 	ASSERT(new != kas.a_hat);
1124 	ASSERT(old != kas.a_hat);
1125 	return (0);
1126 }
1127 
1128 /*
1129  * Allocate any hat resources required for a process being swapped in.
1130  */
1131 /*ARGSUSED*/
1132 void
1133 hat_swapin(hat_t *hat)
1134 {
1135 	/* do nothing - we let everything fault back in */
1136 }
1137 
1138 /*
1139  * Unload all translations associated with an address space of a process
1140  * that is being swapped out.
1141  */
1142 void
1143 hat_swapout(hat_t *hat)
1144 {
1145 	uintptr_t	vaddr = (uintptr_t)0;
1146 	uintptr_t	eaddr = _userlimit;
1147 	htable_t	*ht = NULL;
1148 	level_t		l;
1149 
1150 	XPV_DISALLOW_MIGRATE();
1151 	/*
1152 	 * We can't just call hat_unload(hat, 0, _userlimit...)  here, because
1153 	 * seg_spt and shared pagetables can't be swapped out.
1154 	 * Take a look at segspt_shmswapout() - it's a big no-op.
1155 	 *
1156 	 * Instead we'll walk through all the address space and unload
1157 	 * any mappings which we are sure are not shared, not locked.
1158 	 */
1159 	ASSERT(IS_PAGEALIGNED(vaddr));
1160 	ASSERT(IS_PAGEALIGNED(eaddr));
1161 	ASSERT(AS_LOCK_HELD(hat->hat_as));
1162 	if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
1163 		eaddr = (uintptr_t)hat->hat_as->a_userlimit;
1164 
1165 	while (vaddr < eaddr) {
1166 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
1167 		if (ht == NULL)
1168 			break;
1169 
1170 		ASSERT(!IN_VA_HOLE(vaddr));
1171 
1172 		/*
1173 		 * If the page table is shared skip its entire range.
1174 		 */
1175 		l = ht->ht_level;
1176 		if (ht->ht_flags & HTABLE_SHARED_PFN) {
1177 			vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1);
1178 			htable_release(ht);
1179 			ht = NULL;
1180 			continue;
1181 		}
1182 
1183 		/*
1184 		 * If the page table has no locked entries, unload this one.
1185 		 */
1186 		if (ht->ht_lock_cnt == 0)
1187 			hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
1188 			    HAT_UNLOAD_UNMAP);
1189 
1190 		/*
1191 		 * If we have a level 0 page table with locked entries,
1192 		 * skip the entire page table, otherwise skip just one entry.
1193 		 */
1194 		if (ht->ht_lock_cnt > 0 && l == 0)
1195 			vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
1196 		else
1197 			vaddr += LEVEL_SIZE(l);
1198 	}
1199 	if (ht)
1200 		htable_release(ht);
1201 
1202 	/*
1203 	 * We're in swapout because the system is low on memory, so
1204 	 * go back and flush all the htables off the cached list.
1205 	 */
1206 	htable_purge_hat(hat);
1207 	XPV_ALLOW_MIGRATE();
1208 }
1209 
1210 /*
1211  * returns number of bytes that have valid mappings in hat.
1212  */
1213 size_t
1214 hat_get_mapped_size(hat_t *hat)
1215 {
1216 	size_t total = 0;
1217 	int l;
1218 
1219 	for (l = 0; l <= mmu.max_page_level; l++)
1220 		total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
1221 	total += hat->hat_ism_pgcnt;
1222 
1223 	return (total);
1224 }
1225 
1226 /*
1227  * enable/disable collection of stats for hat.
1228  */
1229 int
1230 hat_stats_enable(hat_t *hat)
1231 {
1232 	atomic_inc_32(&hat->hat_stats);
1233 	return (1);
1234 }
1235 
1236 void
1237 hat_stats_disable(hat_t *hat)
1238 {
1239 	atomic_dec_32(&hat->hat_stats);
1240 }
1241 
1242 /*
1243  * Utility to sync the ref/mod bits from a page table entry to the page_t
1244  * We must be holding the mapping list lock when this is called.
1245  */
1246 static void
1247 hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
1248 {
1249 	uint_t	rm = 0;
1250 	pgcnt_t	pgcnt;
1251 
1252 	if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
1253 		return;
1254 
1255 	if (PTE_GET(pte, PT_REF))
1256 		rm |= P_REF;
1257 
1258 	if (PTE_GET(pte, PT_MOD))
1259 		rm |= P_MOD;
1260 
1261 	if (rm == 0)
1262 		return;
1263 
1264 	/*
1265 	 * sync to all constituent pages of a large page
1266 	 */
1267 	ASSERT(x86_hm_held(pp));
1268 	pgcnt = page_get_pagecnt(level);
1269 	ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
1270 	for (; pgcnt > 0; --pgcnt) {
1271 		/*
1272 		 * hat_page_demote() can't decrease
1273 		 * pszc below this mapping size
1274 		 * since this large mapping existed after we
1275 		 * took mlist lock.
1276 		 */
1277 		ASSERT(pp->p_szc >= level);
1278 		hat_page_setattr(pp, rm);
1279 		++pp;
1280 	}
1281 }
1282 
1283 /*
1284  * This the set of PTE bits for PFN, permissions and caching
1285  * that are allowed to change on a HAT_LOAD_REMAP
1286  */
1287 #define	PT_REMAP_BITS							\
1288 	(PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU |		\
1289 	PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD)
1290 
1291 #define	REMAPASSERT(EX)	if (!(EX)) panic("hati_pte_map: " #EX)
1292 /*
1293  * Do the low-level work to get a mapping entered into a HAT's pagetables
1294  * and in the mapping list of the associated page_t.
1295  */
1296 static int
1297 hati_pte_map(
1298 	htable_t	*ht,
1299 	uint_t		entry,
1300 	page_t		*pp,
1301 	x86pte_t	pte,
1302 	int		flags,
1303 	void		*pte_ptr)
1304 {
1305 	hat_t		*hat = ht->ht_hat;
1306 	x86pte_t	old_pte;
1307 	level_t		l = ht->ht_level;
1308 	hment_t		*hm;
1309 	uint_t		is_consist;
1310 	uint_t		is_locked;
1311 	int		rv = 0;
1312 
1313 	/*
1314 	 * Is this a consistent (ie. need mapping list lock) mapping?
1315 	 */
1316 	is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
1317 
1318 	/*
1319 	 * Track locked mapping count in the htable.  Do this first,
1320 	 * as we track locking even if there already is a mapping present.
1321 	 */
1322 	is_locked = (flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat;
1323 	if (is_locked)
1324 		HTABLE_LOCK_INC(ht);
1325 
1326 	/*
1327 	 * Acquire the page's mapping list lock and get an hment to use.
1328 	 * Note that hment_prepare() might return NULL.
1329 	 */
1330 	if (is_consist) {
1331 		x86_hm_enter(pp);
1332 		hm = hment_prepare(ht, entry, pp);
1333 	}
1334 
1335 	/*
1336 	 * Set the new pte, retrieving the old one at the same time.
1337 	 */
1338 	old_pte = x86pte_set(ht, entry, pte, pte_ptr);
1339 
1340 	/*
1341 	 * Did we get a large page / page table collision?
1342 	 */
1343 	if (old_pte == LPAGE_ERROR) {
1344 		if (is_locked)
1345 			HTABLE_LOCK_DEC(ht);
1346 		rv = -1;
1347 		goto done;
1348 	}
1349 
1350 	/*
1351 	 * If the mapping didn't change there is nothing more to do.
1352 	 */
1353 	if (PTE_EQUIV(pte, old_pte))
1354 		goto done;
1355 
1356 	/*
1357 	 * Install a new mapping in the page's mapping list
1358 	 */
1359 	if (!PTE_ISVALID(old_pte)) {
1360 		if (is_consist) {
1361 			hment_assign(ht, entry, pp, hm);
1362 			x86_hm_exit(pp);
1363 		} else {
1364 			ASSERT(flags & HAT_LOAD_NOCONSIST);
1365 		}
1366 #if defined(__amd64)
1367 		if (ht->ht_flags & HTABLE_VLP) {
1368 			cpu_t *cpu = CPU;
1369 			x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1370 			VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1371 		}
1372 #endif
1373 		HTABLE_INC(ht->ht_valid_cnt);
1374 		PGCNT_INC(hat, l);
1375 		return (rv);
1376 	}
1377 
1378 	/*
1379 	 * Remap's are more complicated:
1380 	 *  - HAT_LOAD_REMAP must be specified if changing the pfn.
1381 	 *    We also require that NOCONSIST be specified.
1382 	 *  - Otherwise only permission or caching bits may change.
1383 	 */
1384 	if (!PTE_ISPAGE(old_pte, l))
1385 		panic("non-null/page mapping pte=" FMT_PTE, old_pte);
1386 
1387 	if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1388 		REMAPASSERT(flags & HAT_LOAD_REMAP);
1389 		REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
1390 		REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
1391 		REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
1392 		    pf_is_memory(PTE2PFN(pte, l)));
1393 		REMAPASSERT(!is_consist);
1394 	}
1395 
1396 	/*
1397 	 * We only let remaps change the certain bits in the PTE.
1398 	 */
1399 	if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS))
1400 		panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n",
1401 		    old_pte, pte);
1402 
1403 	/*
1404 	 * We don't create any mapping list entries on a remap, so release
1405 	 * any allocated hment after we drop the mapping list lock.
1406 	 */
1407 done:
1408 	if (is_consist) {
1409 		x86_hm_exit(pp);
1410 		if (hm != NULL)
1411 			hment_free(hm);
1412 	}
1413 	return (rv);
1414 }
1415 
1416 /*
1417  * Internal routine to load a single page table entry. This only fails if
1418  * we attempt to overwrite a page table link with a large page.
1419  */
1420 static int
1421 hati_load_common(
1422 	hat_t		*hat,
1423 	uintptr_t	va,
1424 	page_t		*pp,
1425 	uint_t		attr,
1426 	uint_t		flags,
1427 	level_t		level,
1428 	pfn_t		pfn)
1429 {
1430 	htable_t	*ht;
1431 	uint_t		entry;
1432 	x86pte_t	pte;
1433 	int		rv = 0;
1434 
1435 	/*
1436 	 * The number 16 is arbitrary and here to catch a recursion problem
1437 	 * early before we blow out the kernel stack.
1438 	 */
1439 	++curthread->t_hatdepth;
1440 	ASSERT(curthread->t_hatdepth < 16);
1441 
1442 	ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1443 
1444 	if (flags & HAT_LOAD_SHARE)
1445 		hat->hat_flags |= HAT_SHARED;
1446 
1447 	/*
1448 	 * Find the page table that maps this page if it already exists.
1449 	 */
1450 	ht = htable_lookup(hat, va, level);
1451 
1452 	/*
1453 	 * We must have HAT_LOAD_NOCONSIST if page_t is NULL.
1454 	 */
1455 	if (pp == NULL)
1456 		flags |= HAT_LOAD_NOCONSIST;
1457 
1458 	if (ht == NULL) {
1459 		ht = htable_create(hat, va, level, NULL);
1460 		ASSERT(ht != NULL);
1461 	}
1462 	entry = htable_va2entry(va, ht);
1463 
1464 	/*
1465 	 * a bunch of paranoid error checking
1466 	 */
1467 	ASSERT(ht->ht_busy > 0);
1468 	if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht))
1469 		panic("hati_load_common: bad htable %p, va %p",
1470 		    (void *)ht, (void *)va);
1471 	ASSERT(ht->ht_level == level);
1472 
1473 	/*
1474 	 * construct the new PTE
1475 	 */
1476 	if (hat == kas.a_hat)
1477 		attr &= ~PROT_USER;
1478 	pte = hati_mkpte(pfn, attr, level, flags);
1479 	if (hat == kas.a_hat && va >= kernelbase)
1480 		PTE_SET(pte, mmu.pt_global);
1481 
1482 	/*
1483 	 * establish the mapping
1484 	 */
1485 	rv = hati_pte_map(ht, entry, pp, pte, flags, NULL);
1486 
1487 	/*
1488 	 * release the htable and any reserves
1489 	 */
1490 	htable_release(ht);
1491 	--curthread->t_hatdepth;
1492 	return (rv);
1493 }
1494 
1495 /*
1496  * special case of hat_memload to deal with some kernel addrs for performance
1497  */
1498 static void
1499 hat_kmap_load(
1500 	caddr_t		addr,
1501 	page_t		*pp,
1502 	uint_t		attr,
1503 	uint_t		flags)
1504 {
1505 	uintptr_t	va = (uintptr_t)addr;
1506 	x86pte_t	pte;
1507 	pfn_t		pfn = page_pptonum(pp);
1508 	pgcnt_t		pg_off = mmu_btop(va - mmu.kmap_addr);
1509 	htable_t	*ht;
1510 	uint_t		entry;
1511 	void		*pte_ptr;
1512 
1513 	/*
1514 	 * construct the requested PTE
1515 	 */
1516 	attr &= ~PROT_USER;
1517 	attr |= HAT_STORECACHING_OK;
1518 	pte = hati_mkpte(pfn, attr, 0, flags);
1519 	PTE_SET(pte, mmu.pt_global);
1520 
1521 	/*
1522 	 * Figure out the pte_ptr and htable and use common code to finish up
1523 	 */
1524 	if (mmu.pae_hat)
1525 		pte_ptr = mmu.kmap_ptes + pg_off;
1526 	else
1527 		pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
1528 	ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
1529 	    LEVEL_SHIFT(1)];
1530 	entry = htable_va2entry(va, ht);
1531 	++curthread->t_hatdepth;
1532 	ASSERT(curthread->t_hatdepth < 16);
1533 	(void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
1534 	--curthread->t_hatdepth;
1535 }
1536 
1537 /*
1538  * hat_memload() - load a translation to the given page struct
1539  *
1540  * Flags for hat_memload/hat_devload/hat_*attr.
1541  *
1542  * 	HAT_LOAD	Default flags to load a translation to the page.
1543  *
1544  * 	HAT_LOAD_LOCK	Lock down mapping resources; hat_map(), hat_memload(),
1545  *			and hat_devload().
1546  *
1547  *	HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
1548  *			sets PT_NOCONSIST
1549  *
1550  *	HAT_LOAD_SHARE	A flag to hat_memload() to indicate h/w page tables
1551  *			that map some user pages (not kas) is shared by more
1552  *			than one process (eg. ISM).
1553  *
1554  *	HAT_LOAD_REMAP	Reload a valid pte with a different page frame.
1555  *
1556  *	HAT_NO_KALLOC	Do not kmem_alloc while creating the mapping; at this
1557  *			point, it's setting up mapping to allocate internal
1558  *			hat layer data structures.  This flag forces hat layer
1559  *			to tap its reserves in order to prevent infinite
1560  *			recursion.
1561  *
1562  * The following is a protection attribute (like PROT_READ, etc.)
1563  *
1564  *	HAT_NOSYNC	set PT_NOSYNC - this mapping's ref/mod bits
1565  *			are never cleared.
1566  *
1567  * Installing new valid PTE's and creation of the mapping list
1568  * entry are controlled under the same lock. It's derived from the
1569  * page_t being mapped.
1570  */
1571 static uint_t supported_memload_flags =
1572 	HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
1573 	HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
1574 
1575 void
1576 hat_memload(
1577 	hat_t		*hat,
1578 	caddr_t		addr,
1579 	page_t		*pp,
1580 	uint_t		attr,
1581 	uint_t		flags)
1582 {
1583 	uintptr_t	va = (uintptr_t)addr;
1584 	level_t		level = 0;
1585 	pfn_t		pfn = page_pptonum(pp);
1586 
1587 	XPV_DISALLOW_MIGRATE();
1588 	ASSERT(IS_PAGEALIGNED(va));
1589 	ASSERT(hat == kas.a_hat || va < _userlimit);
1590 	ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1591 	ASSERT((flags & supported_memload_flags) == flags);
1592 
1593 	ASSERT(!IN_VA_HOLE(va));
1594 	ASSERT(!PP_ISFREE(pp));
1595 
1596 	/*
1597 	 * kernel address special case for performance.
1598 	 */
1599 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
1600 		ASSERT(hat == kas.a_hat);
1601 		hat_kmap_load(addr, pp, attr, flags);
1602 		XPV_ALLOW_MIGRATE();
1603 		return;
1604 	}
1605 
1606 	/*
1607 	 * This is used for memory with normal caching enabled, so
1608 	 * always set HAT_STORECACHING_OK.
1609 	 */
1610 	attr |= HAT_STORECACHING_OK;
1611 	if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
1612 		panic("unexpected hati_load_common() failure");
1613 	XPV_ALLOW_MIGRATE();
1614 }
1615 
1616 /* ARGSUSED */
1617 void
1618 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
1619     uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
1620 {
1621 	hat_memload(hat, addr, pp, attr, flags);
1622 }
1623 
1624 /*
1625  * Load the given array of page structs using large pages when possible
1626  */
1627 void
1628 hat_memload_array(
1629 	hat_t		*hat,
1630 	caddr_t		addr,
1631 	size_t		len,
1632 	page_t		**pages,
1633 	uint_t		attr,
1634 	uint_t		flags)
1635 {
1636 	uintptr_t	va = (uintptr_t)addr;
1637 	uintptr_t	eaddr = va + len;
1638 	level_t		level;
1639 	size_t		pgsize;
1640 	pgcnt_t		pgindx = 0;
1641 	pfn_t		pfn;
1642 	pgcnt_t		i;
1643 
1644 	XPV_DISALLOW_MIGRATE();
1645 	ASSERT(IS_PAGEALIGNED(va));
1646 	ASSERT(hat == kas.a_hat || va + len <= _userlimit);
1647 	ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1648 	ASSERT((flags & supported_memload_flags) == flags);
1649 
1650 	/*
1651 	 * memload is used for memory with full caching enabled, so
1652 	 * set HAT_STORECACHING_OK.
1653 	 */
1654 	attr |= HAT_STORECACHING_OK;
1655 
1656 	/*
1657 	 * handle all pages using largest possible pagesize
1658 	 */
1659 	while (va < eaddr) {
1660 		/*
1661 		 * decide what level mapping to use (ie. pagesize)
1662 		 */
1663 		pfn = page_pptonum(pages[pgindx]);
1664 		for (level = mmu.max_page_level; ; --level) {
1665 			pgsize = LEVEL_SIZE(level);
1666 			if (level == 0)
1667 				break;
1668 
1669 			if (!IS_P2ALIGNED(va, pgsize) ||
1670 			    (eaddr - va) < pgsize ||
1671 			    !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize))
1672 				continue;
1673 
1674 			/*
1675 			 * To use a large mapping of this size, all the
1676 			 * pages we are passed must be sequential subpages
1677 			 * of the large page.
1678 			 * hat_page_demote() can't change p_szc because
1679 			 * all pages are locked.
1680 			 */
1681 			if (pages[pgindx]->p_szc >= level) {
1682 				for (i = 0; i < mmu_btop(pgsize); ++i) {
1683 					if (pfn + i !=
1684 					    page_pptonum(pages[pgindx + i]))
1685 						break;
1686 					ASSERT(pages[pgindx + i]->p_szc >=
1687 					    level);
1688 					ASSERT(pages[pgindx] + i ==
1689 					    pages[pgindx + i]);
1690 				}
1691 				if (i == mmu_btop(pgsize)) {
1692 #ifdef DEBUG
1693 					if (level == 2)
1694 						map1gcnt++;
1695 #endif
1696 					break;
1697 				}
1698 			}
1699 		}
1700 
1701 		/*
1702 		 * Load this page mapping. If the load fails, try a smaller
1703 		 * pagesize.
1704 		 */
1705 		ASSERT(!IN_VA_HOLE(va));
1706 		while (hati_load_common(hat, va, pages[pgindx], attr,
1707 		    flags, level, pfn) != 0) {
1708 			if (level == 0)
1709 				panic("unexpected hati_load_common() failure");
1710 			--level;
1711 			pgsize = LEVEL_SIZE(level);
1712 		}
1713 
1714 		/*
1715 		 * move to next page
1716 		 */
1717 		va += pgsize;
1718 		pgindx += mmu_btop(pgsize);
1719 	}
1720 	XPV_ALLOW_MIGRATE();
1721 }
1722 
1723 /* ARGSUSED */
1724 void
1725 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
1726     struct page **pps, uint_t attr, uint_t flags,
1727     hat_region_cookie_t rcookie)
1728 {
1729 	hat_memload_array(hat, addr, len, pps, attr, flags);
1730 }
1731 
1732 /*
1733  * void hat_devload(hat, addr, len, pf, attr, flags)
1734  *	load/lock the given page frame number
1735  *
1736  * Advisory ordering attributes. Apply only to device mappings.
1737  *
1738  * HAT_STRICTORDER: the CPU must issue the references in order, as the
1739  *	programmer specified.  This is the default.
1740  * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
1741  *	of reordering; store or load with store or load).
1742  * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
1743  *	to consecutive locations (for example, turn two consecutive byte
1744  *	stores into one halfword store), and it may batch individual loads
1745  *	(for example, turn two consecutive byte loads into one halfword load).
1746  *	This also implies re-ordering.
1747  * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
1748  *	until another store occurs.  The default is to fetch new data
1749  *	on every load.  This also implies merging.
1750  * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
1751  *	the device (perhaps with other data) at a later time.  The default is
1752  *	to push the data right away.  This also implies load caching.
1753  *
1754  * Equivalent of hat_memload(), but can be used for device memory where
1755  * there are no page_t's and we support additional flags (write merging, etc).
1756  * Note that we can have large page mappings with this interface.
1757  */
1758 int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
1759 	HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
1760 	HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
1761 
1762 void
1763 hat_devload(
1764 	hat_t		*hat,
1765 	caddr_t		addr,
1766 	size_t		len,
1767 	pfn_t		pfn,
1768 	uint_t		attr,
1769 	int		flags)
1770 {
1771 	uintptr_t	va = ALIGN2PAGE(addr);
1772 	uintptr_t	eva = va + len;
1773 	level_t		level;
1774 	size_t		pgsize;
1775 	page_t		*pp;
1776 	int		f;	/* per PTE copy of flags  - maybe modified */
1777 	uint_t		a;	/* per PTE copy of attr */
1778 
1779 	XPV_DISALLOW_MIGRATE();
1780 	ASSERT(IS_PAGEALIGNED(va));
1781 	ASSERT(hat == kas.a_hat || eva <= _userlimit);
1782 	ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1783 	ASSERT((flags & supported_devload_flags) == flags);
1784 
1785 	/*
1786 	 * handle all pages
1787 	 */
1788 	while (va < eva) {
1789 
1790 		/*
1791 		 * decide what level mapping to use (ie. pagesize)
1792 		 */
1793 		for (level = mmu.max_page_level; ; --level) {
1794 			pgsize = LEVEL_SIZE(level);
1795 			if (level == 0)
1796 				break;
1797 			if (IS_P2ALIGNED(va, pgsize) &&
1798 			    (eva - va) >= pgsize &&
1799 			    IS_P2ALIGNED(pfn, mmu_btop(pgsize))) {
1800 #ifdef DEBUG
1801 				if (level == 2)
1802 					map1gcnt++;
1803 #endif
1804 				break;
1805 			}
1806 		}
1807 
1808 		/*
1809 		 * If this is just memory then allow caching (this happens
1810 		 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
1811 		 * to override that. If we don't have a page_t then make sure
1812 		 * NOCONSIST is set.
1813 		 */
1814 		a = attr;
1815 		f = flags;
1816 		if (!pf_is_memory(pfn))
1817 			f |= HAT_LOAD_NOCONSIST;
1818 		else if (!(a & HAT_PLAT_NOCACHE))
1819 			a |= HAT_STORECACHING_OK;
1820 
1821 		if (f & HAT_LOAD_NOCONSIST)
1822 			pp = NULL;
1823 		else
1824 			pp = page_numtopp_nolock(pfn);
1825 
1826 		/*
1827 		 * Check to make sure we are really trying to map a valid
1828 		 * memory page. The caller wishing to intentionally map
1829 		 * free memory pages will have passed the HAT_LOAD_NOCONSIST
1830 		 * flag, then pp will be NULL.
1831 		 */
1832 		if (pp != NULL) {
1833 			if (PP_ISFREE(pp)) {
1834 				panic("hat_devload: loading "
1835 				    "a mapping to free page %p", (void *)pp);
1836 			}
1837 
1838 			if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
1839 				panic("hat_devload: loading a mapping "
1840 				    "to an unlocked page %p",
1841 				    (void *)pp);
1842 			}
1843 		}
1844 
1845 		/*
1846 		 * load this page mapping
1847 		 */
1848 		ASSERT(!IN_VA_HOLE(va));
1849 		while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
1850 			if (level == 0)
1851 				panic("unexpected hati_load_common() failure");
1852 			--level;
1853 			pgsize = LEVEL_SIZE(level);
1854 		}
1855 
1856 		/*
1857 		 * move to next page
1858 		 */
1859 		va += pgsize;
1860 		pfn += mmu_btop(pgsize);
1861 	}
1862 	XPV_ALLOW_MIGRATE();
1863 }
1864 
1865 /*
1866  * void hat_unlock(hat, addr, len)
1867  *	unlock the mappings to a given range of addresses
1868  *
1869  * Locks are tracked by ht_lock_cnt in the htable.
1870  */
1871 void
1872 hat_unlock(hat_t *hat, caddr_t addr, size_t len)
1873 {
1874 	uintptr_t	vaddr = (uintptr_t)addr;
1875 	uintptr_t	eaddr = vaddr + len;
1876 	htable_t	*ht = NULL;
1877 
1878 	/*
1879 	 * kernel entries are always locked, we don't track lock counts
1880 	 */
1881 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
1882 	ASSERT(IS_PAGEALIGNED(vaddr));
1883 	ASSERT(IS_PAGEALIGNED(eaddr));
1884 	if (hat == kas.a_hat)
1885 		return;
1886 	if (eaddr > _userlimit)
1887 		panic("hat_unlock() address out of range - above _userlimit");
1888 
1889 	XPV_DISALLOW_MIGRATE();
1890 	ASSERT(AS_LOCK_HELD(hat->hat_as));
1891 	while (vaddr < eaddr) {
1892 		(void) htable_walk(hat, &ht, &vaddr, eaddr);
1893 		if (ht == NULL)
1894 			break;
1895 
1896 		ASSERT(!IN_VA_HOLE(vaddr));
1897 
1898 		if (ht->ht_lock_cnt < 1)
1899 			panic("hat_unlock(): lock_cnt < 1, "
1900 			    "htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr);
1901 		HTABLE_LOCK_DEC(ht);
1902 
1903 		vaddr += LEVEL_SIZE(ht->ht_level);
1904 	}
1905 	if (ht)
1906 		htable_release(ht);
1907 	XPV_ALLOW_MIGRATE();
1908 }
1909 
1910 /* ARGSUSED */
1911 void
1912 hat_unlock_region(struct hat *hat, caddr_t addr, size_t len,
1913     hat_region_cookie_t rcookie)
1914 {
1915 	panic("No shared region support on x86");
1916 }
1917 
1918 /*
1919  * A range of virtual pages for purposes of demapping.
1920  */
1921 typedef struct range_info {
1922 	uintptr_t	rng_va; 	/* address of page */
1923 	ulong_t		rng_cnt; 	/* number of pages in range */
1924 	level_t		rng_level; 	/* page table level */
1925 } range_info_t;
1926 
1927 #if !defined(__xpv)
1928 /*
1929  * Cross call service routine to demap a range of virtual
1930  * pages on the current CPU or flush all mappings in TLB.
1931  */
1932 /*ARGSUSED*/
1933 static int
1934 hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
1935 {
1936 	hat_t		*hat = (hat_t *)a1;
1937 	range_info_t	*range = (range_info_t *)a2;
1938 	size_t		len = (size_t)a3;
1939 	caddr_t		addr = (caddr_t)range->rng_va;
1940 	size_t		pgsz = LEVEL_SIZE(range->rng_level);
1941 
1942 	/*
1943 	 * If the target hat isn't the kernel and this CPU isn't operating
1944 	 * in the target hat, we can ignore the cross call.
1945 	 */
1946 	if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
1947 		return (0);
1948 
1949 	/*
1950 	 * For a normal address, we flush a range of contiguous mappings
1951 	 */
1952 	if ((uintptr_t)addr != DEMAP_ALL_ADDR) {
1953 		for (size_t i = 0; i < len; i += pgsz)
1954 			mmu_tlbflush_entry(addr + i);
1955 		return (0);
1956 	}
1957 
1958 	/*
1959 	 * Otherwise we reload cr3 to effect a complete TLB flush.
1960 	 *
1961 	 * A reload of cr3 on a VLP process also means we must also recopy in
1962 	 * the pte values from the struct hat
1963 	 */
1964 	if (hat->hat_flags & HAT_VLP) {
1965 #if defined(__amd64)
1966 		x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes;
1967 
1968 		VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1969 #elif defined(__i386)
1970 		reload_pae32(hat, CPU);
1971 #endif
1972 	}
1973 	reload_cr3();
1974 	return (0);
1975 }
1976 
1977 /*
1978  * Flush all TLB entries, including global (ie. kernel) ones.
1979  */
1980 static void
1981 flush_all_tlb_entries(void)
1982 {
1983 	ulong_t cr4 = getcr4();
1984 
1985 	if (cr4 & CR4_PGE) {
1986 		setcr4(cr4 & ~(ulong_t)CR4_PGE);
1987 		setcr4(cr4);
1988 
1989 		/*
1990 		 * 32 bit PAE also needs to always reload_cr3()
1991 		 */
1992 		if (mmu.max_level == 2)
1993 			reload_cr3();
1994 	} else {
1995 		reload_cr3();
1996 	}
1997 }
1998 
1999 #define	TLB_CPU_HALTED	(01ul)
2000 #define	TLB_INVAL_ALL	(02ul)
2001 #define	CAS_TLB_INFO(cpu, old, new)	\
2002 	atomic_cas_ulong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
2003 
2004 /*
2005  * Record that a CPU is going idle
2006  */
2007 void
2008 tlb_going_idle(void)
2009 {
2010 	atomic_or_ulong((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
2011 }
2012 
2013 /*
2014  * Service a delayed TLB flush if coming out of being idle.
2015  * It will be called from cpu idle notification with interrupt disabled.
2016  */
2017 void
2018 tlb_service(void)
2019 {
2020 	ulong_t tlb_info;
2021 	ulong_t found;
2022 
2023 	/*
2024 	 * We only have to do something if coming out of being idle.
2025 	 */
2026 	tlb_info = CPU->cpu_m.mcpu_tlb_info;
2027 	if (tlb_info & TLB_CPU_HALTED) {
2028 		ASSERT(CPU->cpu_current_hat == kas.a_hat);
2029 
2030 		/*
2031 		 * Atomic clear and fetch of old state.
2032 		 */
2033 		while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) {
2034 			ASSERT(found & TLB_CPU_HALTED);
2035 			tlb_info = found;
2036 			SMT_PAUSE();
2037 		}
2038 		if (tlb_info & TLB_INVAL_ALL)
2039 			flush_all_tlb_entries();
2040 	}
2041 }
2042 #endif /* !__xpv */
2043 
2044 /*
2045  * Internal routine to do cross calls to invalidate a range of pages on
2046  * all CPUs using a given hat.
2047  */
2048 void
2049 hat_tlb_inval_range(hat_t *hat, range_info_t *range)
2050 {
2051 	extern int	flushes_require_xcalls;	/* from mp_startup.c */
2052 	cpuset_t	justme;
2053 	cpuset_t	cpus_to_shootdown;
2054 	uintptr_t	va = range->rng_va;
2055 	size_t		len = range->rng_cnt << LEVEL_SHIFT(range->rng_level);
2056 #ifndef __xpv
2057 	cpuset_t	check_cpus;
2058 	cpu_t		*cpup;
2059 	int		c;
2060 #endif
2061 
2062 	/*
2063 	 * If the hat is being destroyed, there are no more users, so
2064 	 * demap need not do anything.
2065 	 */
2066 	if (hat->hat_flags & HAT_FREEING)
2067 		return;
2068 
2069 	/*
2070 	 * If demapping from a shared pagetable, we best demap the
2071 	 * entire set of user TLBs, since we don't know what addresses
2072 	 * these were shared at.
2073 	 */
2074 	if (hat->hat_flags & HAT_SHARED) {
2075 		hat = kas.a_hat;
2076 		va = DEMAP_ALL_ADDR;
2077 	}
2078 
2079 	/*
2080 	 * if not running with multiple CPUs, don't use cross calls
2081 	 */
2082 	if (panicstr || !flushes_require_xcalls) {
2083 #ifdef __xpv
2084 		if (va == DEMAP_ALL_ADDR) {
2085 			xen_flush_tlb();
2086 		} else {
2087 			for (size_t i = 0; i < len; i += MMU_PAGESIZE)
2088 				xen_flush_va((caddr_t)(va + i));
2089 		}
2090 #else
2091 		(void) hati_demap_func((xc_arg_t)hat,
2092 		    (xc_arg_t)range, (xc_arg_t)len);
2093 #endif
2094 		return;
2095 	}
2096 
2097 
2098 	/*
2099 	 * Determine CPUs to shootdown. Kernel changes always do all CPUs.
2100 	 * Otherwise it's just CPUs currently executing in this hat.
2101 	 */
2102 	kpreempt_disable();
2103 	CPUSET_ONLY(justme, CPU->cpu_id);
2104 	if (hat == kas.a_hat)
2105 		cpus_to_shootdown = khat_cpuset;
2106 	else
2107 		cpus_to_shootdown = hat->hat_cpus;
2108 
2109 #ifndef __xpv
2110 	/*
2111 	 * If any CPUs in the set are idle, just request a delayed flush
2112 	 * and avoid waking them up.
2113 	 */
2114 	check_cpus = cpus_to_shootdown;
2115 	for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) {
2116 		ulong_t tlb_info;
2117 
2118 		if (!CPU_IN_SET(check_cpus, c))
2119 			continue;
2120 		CPUSET_DEL(check_cpus, c);
2121 		cpup = cpu[c];
2122 		if (cpup == NULL)
2123 			continue;
2124 
2125 		tlb_info = cpup->cpu_m.mcpu_tlb_info;
2126 		while (tlb_info == TLB_CPU_HALTED) {
2127 			(void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED,
2128 			    TLB_CPU_HALTED | TLB_INVAL_ALL);
2129 			SMT_PAUSE();
2130 			tlb_info = cpup->cpu_m.mcpu_tlb_info;
2131 		}
2132 		if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) {
2133 			HATSTAT_INC(hs_tlb_inval_delayed);
2134 			CPUSET_DEL(cpus_to_shootdown, c);
2135 		}
2136 	}
2137 #endif
2138 
2139 	if (CPUSET_ISNULL(cpus_to_shootdown) ||
2140 	    CPUSET_ISEQUAL(cpus_to_shootdown, justme)) {
2141 
2142 #ifdef __xpv
2143 		if (va == DEMAP_ALL_ADDR) {
2144 			xen_flush_tlb();
2145 		} else {
2146 			for (size_t i = 0; i < len; i += MMU_PAGESIZE)
2147 				xen_flush_va((caddr_t)(va + i));
2148 		}
2149 #else
2150 		(void) hati_demap_func((xc_arg_t)hat,
2151 		    (xc_arg_t)range, (xc_arg_t)len);
2152 #endif
2153 
2154 	} else {
2155 
2156 		CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
2157 #ifdef __xpv
2158 		if (va == DEMAP_ALL_ADDR) {
2159 			xen_gflush_tlb(cpus_to_shootdown);
2160 		} else {
2161 			for (size_t i = 0; i < len; i += MMU_PAGESIZE) {
2162 				xen_gflush_va((caddr_t)(va + i),
2163 				    cpus_to_shootdown);
2164 			}
2165 		}
2166 #else
2167 		xc_call((xc_arg_t)hat, (xc_arg_t)range, (xc_arg_t)len,
2168 		    CPUSET2BV(cpus_to_shootdown), hati_demap_func);
2169 #endif
2170 
2171 	}
2172 	kpreempt_enable();
2173 }
2174 
2175 void
2176 hat_tlb_inval(hat_t *hat, uintptr_t va)
2177 {
2178 	/*
2179 	 * Create range for a single page.
2180 	 */
2181 	range_info_t range;
2182 	range.rng_va = va;
2183 	range.rng_cnt = 1; /* one page */
2184 	range.rng_level = MIN_PAGE_LEVEL; /* pages are MMU_PAGESIZE */
2185 
2186 	hat_tlb_inval_range(hat, &range);
2187 }
2188 
2189 /*
2190  * Interior routine for HAT_UNLOADs from hat_unload_callback(),
2191  * hat_kmap_unload() OR from hat_steal() code.  This routine doesn't
2192  * handle releasing of the htables.
2193  */
2194 void
2195 hat_pte_unmap(
2196 	htable_t	*ht,
2197 	uint_t		entry,
2198 	uint_t		flags,
2199 	x86pte_t	old_pte,
2200 	void		*pte_ptr,
2201 	boolean_t	tlb)
2202 {
2203 	hat_t		*hat = ht->ht_hat;
2204 	hment_t		*hm = NULL;
2205 	page_t		*pp = NULL;
2206 	level_t		l = ht->ht_level;
2207 	pfn_t		pfn;
2208 
2209 	/*
2210 	 * We always track the locking counts, even if nothing is unmapped
2211 	 */
2212 	if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
2213 		ASSERT(ht->ht_lock_cnt > 0);
2214 		HTABLE_LOCK_DEC(ht);
2215 	}
2216 
2217 	/*
2218 	 * Figure out which page's mapping list lock to acquire using the PFN
2219 	 * passed in "old" PTE. We then attempt to invalidate the PTE.
2220 	 * If another thread, probably a hat_pageunload, has asynchronously
2221 	 * unmapped/remapped this address we'll loop here.
2222 	 */
2223 	ASSERT(ht->ht_busy > 0);
2224 	while (PTE_ISVALID(old_pte)) {
2225 		pfn = PTE2PFN(old_pte, l);
2226 		if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) {
2227 			pp = NULL;
2228 		} else {
2229 #ifdef __xpv
2230 			if (pfn == PFN_INVALID)
2231 				panic("Invalid PFN, but not PT_NOCONSIST");
2232 #endif
2233 			pp = page_numtopp_nolock(pfn);
2234 			if (pp == NULL) {
2235 				panic("no page_t, not NOCONSIST: old_pte="
2236 				    FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
2237 				    old_pte, (uintptr_t)ht, entry,
2238 				    (uintptr_t)pte_ptr);
2239 			}
2240 			x86_hm_enter(pp);
2241 		}
2242 
2243 		old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr, tlb);
2244 
2245 		/*
2246 		 * If the page hadn't changed we've unmapped it and can proceed
2247 		 */
2248 		if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
2249 			break;
2250 
2251 		/*
2252 		 * Otherwise, we'll have to retry with the current old_pte.
2253 		 * Drop the hment lock, since the pfn may have changed.
2254 		 */
2255 		if (pp != NULL) {
2256 			x86_hm_exit(pp);
2257 			pp = NULL;
2258 		} else {
2259 			ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
2260 		}
2261 	}
2262 
2263 	/*
2264 	 * If the old mapping wasn't valid, there's nothing more to do
2265 	 */
2266 	if (!PTE_ISVALID(old_pte)) {
2267 		if (pp != NULL)
2268 			x86_hm_exit(pp);
2269 		return;
2270 	}
2271 
2272 	/*
2273 	 * Take care of syncing any MOD/REF bits and removing the hment.
2274 	 */
2275 	if (pp != NULL) {
2276 		if (!(flags & HAT_UNLOAD_NOSYNC))
2277 			hati_sync_pte_to_page(pp, old_pte, l);
2278 		hm = hment_remove(pp, ht, entry);
2279 		x86_hm_exit(pp);
2280 		if (hm != NULL)
2281 			hment_free(hm);
2282 	}
2283 
2284 	/*
2285 	 * Handle book keeping in the htable and hat
2286 	 */
2287 	ASSERT(ht->ht_valid_cnt > 0);
2288 	HTABLE_DEC(ht->ht_valid_cnt);
2289 	PGCNT_DEC(hat, l);
2290 }
2291 
2292 /*
2293  * very cheap unload implementation to special case some kernel addresses
2294  */
2295 static void
2296 hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
2297 {
2298 	uintptr_t	va = (uintptr_t)addr;
2299 	uintptr_t	eva = va + len;
2300 	pgcnt_t		pg_index;
2301 	htable_t	*ht;
2302 	uint_t		entry;
2303 	x86pte_t	*pte_ptr;
2304 	x86pte_t	old_pte;
2305 
2306 	for (; va < eva; va += MMU_PAGESIZE) {
2307 		/*
2308 		 * Get the PTE
2309 		 */
2310 		pg_index = mmu_btop(va - mmu.kmap_addr);
2311 		pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index);
2312 		old_pte = GET_PTE(pte_ptr);
2313 
2314 		/*
2315 		 * get the htable / entry
2316 		 */
2317 		ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
2318 		    >> LEVEL_SHIFT(1)];
2319 		entry = htable_va2entry(va, ht);
2320 
2321 		/*
2322 		 * use mostly common code to unmap it.
2323 		 */
2324 		hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr, B_TRUE);
2325 	}
2326 }
2327 
2328 
2329 /*
2330  * unload a range of virtual address space (no callback)
2331  */
2332 void
2333 hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2334 {
2335 	uintptr_t va = (uintptr_t)addr;
2336 
2337 	XPV_DISALLOW_MIGRATE();
2338 	ASSERT(hat == kas.a_hat || va + len <= _userlimit);
2339 
2340 	/*
2341 	 * special case for performance.
2342 	 */
2343 	if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
2344 		ASSERT(hat == kas.a_hat);
2345 		hat_kmap_unload(addr, len, flags);
2346 	} else {
2347 		hat_unload_callback(hat, addr, len, flags, NULL);
2348 	}
2349 	XPV_ALLOW_MIGRATE();
2350 }
2351 
2352 /*
2353  * Invalidate the TLB, and perform the callback to the upper level VM system,
2354  * for the specified ranges of contiguous pages.
2355  */
2356 static void
2357 handle_ranges(hat_t *hat, hat_callback_t *cb, uint_t cnt, range_info_t *range)
2358 {
2359 	while (cnt > 0) {
2360 		--cnt;
2361 		hat_tlb_inval_range(hat, &range[cnt]);
2362 
2363 		if (cb != NULL) {
2364 			cb->hcb_start_addr = (caddr_t)range[cnt].rng_va;
2365 			cb->hcb_end_addr = cb->hcb_start_addr;
2366 			cb->hcb_end_addr += range[cnt].rng_cnt <<
2367 			    LEVEL_SHIFT(range[cnt].rng_level);
2368 			cb->hcb_function(cb);
2369 		}
2370 	}
2371 }
2372 
2373 /*
2374  * Unload a given range of addresses (has optional callback)
2375  *
2376  * Flags:
2377  * define	HAT_UNLOAD		0x00
2378  * define	HAT_UNLOAD_NOSYNC	0x02
2379  * define	HAT_UNLOAD_UNLOCK	0x04
2380  * define	HAT_UNLOAD_OTHER	0x08 - not used
2381  * define	HAT_UNLOAD_UNMAP	0x10 - same as HAT_UNLOAD
2382  */
2383 #define	MAX_UNLOAD_CNT (8)
2384 void
2385 hat_unload_callback(
2386 	hat_t		*hat,
2387 	caddr_t		addr,
2388 	size_t		len,
2389 	uint_t		flags,
2390 	hat_callback_t	*cb)
2391 {
2392 	uintptr_t	vaddr = (uintptr_t)addr;
2393 	uintptr_t	eaddr = vaddr + len;
2394 	htable_t	*ht = NULL;
2395 	uint_t		entry;
2396 	uintptr_t	contig_va = (uintptr_t)-1L;
2397 	range_info_t	r[MAX_UNLOAD_CNT];
2398 	uint_t		r_cnt = 0;
2399 	x86pte_t	old_pte;
2400 
2401 	XPV_DISALLOW_MIGRATE();
2402 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2403 	ASSERT(IS_PAGEALIGNED(vaddr));
2404 	ASSERT(IS_PAGEALIGNED(eaddr));
2405 
2406 	/*
2407 	 * Special case a single page being unloaded for speed. This happens
2408 	 * quite frequently, COW faults after a fork() for example.
2409 	 */
2410 	if (cb == NULL && len == MMU_PAGESIZE) {
2411 		ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
2412 		if (ht != NULL) {
2413 			if (PTE_ISVALID(old_pte)) {
2414 				hat_pte_unmap(ht, entry, flags, old_pte,
2415 				    NULL, B_TRUE);
2416 			}
2417 			htable_release(ht);
2418 		}
2419 		XPV_ALLOW_MIGRATE();
2420 		return;
2421 	}
2422 
2423 	while (vaddr < eaddr) {
2424 		old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
2425 		if (ht == NULL)
2426 			break;
2427 
2428 		ASSERT(!IN_VA_HOLE(vaddr));
2429 
2430 		if (vaddr < (uintptr_t)addr)
2431 			panic("hat_unload_callback(): unmap inside large page");
2432 
2433 		/*
2434 		 * We'll do the call backs for contiguous ranges
2435 		 */
2436 		if (vaddr != contig_va ||
2437 		    (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) {
2438 			if (r_cnt == MAX_UNLOAD_CNT) {
2439 				handle_ranges(hat, cb, r_cnt, r);
2440 				r_cnt = 0;
2441 			}
2442 			r[r_cnt].rng_va = vaddr;
2443 			r[r_cnt].rng_cnt = 0;
2444 			r[r_cnt].rng_level = ht->ht_level;
2445 			++r_cnt;
2446 		}
2447 
2448 		/*
2449 		 * Unload one mapping (for a single page) from the page tables.
2450 		 * Note that we do not remove the mapping from the TLB yet,
2451 		 * as indicated by the tlb=FALSE argument to hat_pte_unmap().
2452 		 * handle_ranges() will clear the TLB entries with one call to
2453 		 * hat_tlb_inval_range() per contiguous range.  This is
2454 		 * safe because the page can not be reused until the
2455 		 * callback is made (or we return).
2456 		 */
2457 		entry = htable_va2entry(vaddr, ht);
2458 		hat_pte_unmap(ht, entry, flags, old_pte, NULL, B_FALSE);
2459 		ASSERT(ht->ht_level <= mmu.max_page_level);
2460 		vaddr += LEVEL_SIZE(ht->ht_level);
2461 		contig_va = vaddr;
2462 		++r[r_cnt - 1].rng_cnt;
2463 	}
2464 	if (ht)
2465 		htable_release(ht);
2466 
2467 	/*
2468 	 * handle last range for callbacks
2469 	 */
2470 	if (r_cnt > 0)
2471 		handle_ranges(hat, cb, r_cnt, r);
2472 	XPV_ALLOW_MIGRATE();
2473 }
2474 
2475 /*
2476  * Invalidate a virtual address translation on a slave CPU during
2477  * panic() dumps.
2478  */
2479 void
2480 hat_flush_range(hat_t *hat, caddr_t va, size_t size)
2481 {
2482 	ssize_t sz;
2483 	caddr_t endva = va + size;
2484 
2485 	while (va < endva) {
2486 		sz = hat_getpagesize(hat, va);
2487 		if (sz < 0) {
2488 #ifdef __xpv
2489 			xen_flush_tlb();
2490 #else
2491 			flush_all_tlb_entries();
2492 #endif
2493 			break;
2494 		}
2495 #ifdef __xpv
2496 		xen_flush_va(va);
2497 #else
2498 		mmu_tlbflush_entry(va);
2499 #endif
2500 		va += sz;
2501 	}
2502 }
2503 
2504 /*
2505  * synchronize mapping with software data structures
2506  *
2507  * This interface is currently only used by the working set monitor
2508  * driver.
2509  */
2510 /*ARGSUSED*/
2511 void
2512 hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2513 {
2514 	uintptr_t	vaddr = (uintptr_t)addr;
2515 	uintptr_t	eaddr = vaddr + len;
2516 	htable_t	*ht = NULL;
2517 	uint_t		entry;
2518 	x86pte_t	pte;
2519 	x86pte_t	save_pte;
2520 	x86pte_t	new;
2521 	page_t		*pp;
2522 
2523 	ASSERT(!IN_VA_HOLE(vaddr));
2524 	ASSERT(IS_PAGEALIGNED(vaddr));
2525 	ASSERT(IS_PAGEALIGNED(eaddr));
2526 	ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2527 
2528 	XPV_DISALLOW_MIGRATE();
2529 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2530 try_again:
2531 		pte = htable_walk(hat, &ht, &vaddr, eaddr);
2532 		if (ht == NULL)
2533 			break;
2534 		entry = htable_va2entry(vaddr, ht);
2535 
2536 		if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2537 		    PTE_GET(pte, PT_REF | PT_MOD) == 0)
2538 			continue;
2539 
2540 		/*
2541 		 * We need to acquire the mapping list lock to protect
2542 		 * against hat_pageunload(), hat_unload(), etc.
2543 		 */
2544 		pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
2545 		if (pp == NULL)
2546 			break;
2547 		x86_hm_enter(pp);
2548 		save_pte = pte;
2549 		pte = x86pte_get(ht, entry);
2550 		if (pte != save_pte) {
2551 			x86_hm_exit(pp);
2552 			goto try_again;
2553 		}
2554 		if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2555 		    PTE_GET(pte, PT_REF | PT_MOD) == 0) {
2556 			x86_hm_exit(pp);
2557 			continue;
2558 		}
2559 
2560 		/*
2561 		 * Need to clear ref or mod bits. We may compete with
2562 		 * hardware updating the R/M bits and have to try again.
2563 		 */
2564 		if (flags == HAT_SYNC_ZERORM) {
2565 			new = pte;
2566 			PTE_CLR(new, PT_REF | PT_MOD);
2567 			pte = hati_update_pte(ht, entry, pte, new);
2568 			if (pte != 0) {
2569 				x86_hm_exit(pp);
2570 				goto try_again;
2571 			}
2572 		} else {
2573 			/*
2574 			 * sync the PTE to the page_t
2575 			 */
2576 			hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
2577 		}
2578 		x86_hm_exit(pp);
2579 	}
2580 	if (ht)
2581 		htable_release(ht);
2582 	XPV_ALLOW_MIGRATE();
2583 }
2584 
2585 /*
2586  * void	hat_map(hat, addr, len, flags)
2587  */
2588 /*ARGSUSED*/
2589 void
2590 hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2591 {
2592 	/* does nothing */
2593 }
2594 
2595 /*
2596  * uint_t hat_getattr(hat, addr, *attr)
2597  *	returns attr for <hat,addr> in *attr.  returns 0 if there was a
2598  *	mapping and *attr is valid, nonzero if there was no mapping and
2599  *	*attr is not valid.
2600  */
2601 uint_t
2602 hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
2603 {
2604 	uintptr_t	vaddr = ALIGN2PAGE(addr);
2605 	htable_t	*ht = NULL;
2606 	x86pte_t	pte;
2607 
2608 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2609 
2610 	if (IN_VA_HOLE(vaddr))
2611 		return ((uint_t)-1);
2612 
2613 	ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
2614 	if (ht == NULL)
2615 		return ((uint_t)-1);
2616 
2617 	if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
2618 		htable_release(ht);
2619 		return ((uint_t)-1);
2620 	}
2621 
2622 	*attr = PROT_READ;
2623 	if (PTE_GET(pte, PT_WRITABLE))
2624 		*attr |= PROT_WRITE;
2625 	if (PTE_GET(pte, PT_USER))
2626 		*attr |= PROT_USER;
2627 	if (!PTE_GET(pte, mmu.pt_nx))
2628 		*attr |= PROT_EXEC;
2629 	if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
2630 		*attr |= HAT_NOSYNC;
2631 	htable_release(ht);
2632 	return (0);
2633 }
2634 
2635 /*
2636  * hat_updateattr() applies the given attribute change to an existing mapping
2637  */
2638 #define	HAT_LOAD_ATTR		1
2639 #define	HAT_SET_ATTR		2
2640 #define	HAT_CLR_ATTR		3
2641 
2642 static void
2643 hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
2644 {
2645 	uintptr_t	vaddr = (uintptr_t)addr;
2646 	uintptr_t	eaddr = (uintptr_t)addr + len;
2647 	htable_t	*ht = NULL;
2648 	uint_t		entry;
2649 	x86pte_t	oldpte, newpte;
2650 	page_t		*pp;
2651 
2652 	XPV_DISALLOW_MIGRATE();
2653 	ASSERT(IS_PAGEALIGNED(vaddr));
2654 	ASSERT(IS_PAGEALIGNED(eaddr));
2655 	ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
2656 	for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2657 try_again:
2658 		oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
2659 		if (ht == NULL)
2660 			break;
2661 		if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
2662 			continue;
2663 
2664 		pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
2665 		if (pp == NULL)
2666 			continue;
2667 		x86_hm_enter(pp);
2668 
2669 		newpte = oldpte;
2670 		/*
2671 		 * We found a page table entry in the desired range,
2672 		 * figure out the new attributes.
2673 		 */
2674 		if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
2675 			if ((attr & PROT_WRITE) &&
2676 			    !PTE_GET(oldpte, PT_WRITABLE))
2677 				newpte |= PT_WRITABLE;
2678 
2679 			if ((attr & HAT_NOSYNC) &&
2680 			    PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC)
2681 				newpte |= PT_NOSYNC;
2682 
2683 			if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
2684 				newpte &= ~mmu.pt_nx;
2685 		}
2686 
2687 		if (what == HAT_LOAD_ATTR) {
2688 			if (!(attr & PROT_WRITE) &&
2689 			    PTE_GET(oldpte, PT_WRITABLE))
2690 				newpte &= ~PT_WRITABLE;
2691 
2692 			if (!(attr & HAT_NOSYNC) &&
2693 			    PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2694 				newpte &= ~PT_SOFTWARE;
2695 
2696 			if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2697 				newpte |= mmu.pt_nx;
2698 		}
2699 
2700 		if (what == HAT_CLR_ATTR) {
2701 			if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
2702 				newpte &= ~PT_WRITABLE;
2703 
2704 			if ((attr & HAT_NOSYNC) &&
2705 			    PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2706 				newpte &= ~PT_SOFTWARE;
2707 
2708 			if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2709 				newpte |= mmu.pt_nx;
2710 		}
2711 
2712 		/*
2713 		 * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set.
2714 		 * x86pte_set() depends on this.
2715 		 */
2716 		if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC)
2717 			newpte |= PT_REF | PT_MOD;
2718 
2719 		/*
2720 		 * what about PROT_READ or others? this code only handles:
2721 		 * EXEC, WRITE, NOSYNC
2722 		 */
2723 
2724 		/*
2725 		 * If new PTE really changed, update the table.
2726 		 */
2727 		if (newpte != oldpte) {
2728 			entry = htable_va2entry(vaddr, ht);
2729 			oldpte = hati_update_pte(ht, entry, oldpte, newpte);
2730 			if (oldpte != 0) {
2731 				x86_hm_exit(pp);
2732 				goto try_again;
2733 			}
2734 		}
2735 		x86_hm_exit(pp);
2736 	}
2737 	if (ht)
2738 		htable_release(ht);
2739 	XPV_ALLOW_MIGRATE();
2740 }
2741 
2742 /*
2743  * Various wrappers for hat_updateattr()
2744  */
2745 void
2746 hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2747 {
2748 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2749 	hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
2750 }
2751 
2752 void
2753 hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2754 {
2755 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2756 	hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
2757 }
2758 
2759 void
2760 hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2761 {
2762 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2763 	hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
2764 }
2765 
2766 void
2767 hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
2768 {
2769 	ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2770 	hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
2771 }
2772 
2773 /*
2774  * size_t hat_getpagesize(hat, addr)
2775  *	returns pagesize in bytes for <hat, addr>. returns -1 of there is
2776  *	no mapping. This is an advisory call.
2777  */
2778 ssize_t
2779 hat_getpagesize(hat_t *hat, caddr_t addr)
2780 {
2781 	uintptr_t	vaddr = ALIGN2PAGE(addr);
2782 	htable_t	*ht;
2783 	size_t		pagesize;
2784 
2785 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2786 	if (IN_VA_HOLE(vaddr))
2787 		return (-1);
2788 	ht = htable_getpage(hat, vaddr, NULL);
2789 	if (ht == NULL)
2790 		return (-1);
2791 	pagesize = LEVEL_SIZE(ht->ht_level);
2792 	htable_release(ht);
2793 	return (pagesize);
2794 }
2795 
2796 
2797 
2798 /*
2799  * pfn_t hat_getpfnum(hat, addr)
2800  *	returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
2801  */
2802 pfn_t
2803 hat_getpfnum(hat_t *hat, caddr_t addr)
2804 {
2805 	uintptr_t	vaddr = ALIGN2PAGE(addr);
2806 	htable_t	*ht;
2807 	uint_t		entry;
2808 	pfn_t		pfn = PFN_INVALID;
2809 
2810 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2811 	if (khat_running == 0)
2812 		return (PFN_INVALID);
2813 
2814 	if (IN_VA_HOLE(vaddr))
2815 		return (PFN_INVALID);
2816 
2817 	XPV_DISALLOW_MIGRATE();
2818 	/*
2819 	 * A very common use of hat_getpfnum() is from the DDI for kernel pages.
2820 	 * Use the kmap_ptes (which also covers the 32 bit heap) to speed
2821 	 * this up.
2822 	 */
2823 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2824 		x86pte_t pte;
2825 		pgcnt_t pg_index;
2826 
2827 		pg_index = mmu_btop(vaddr - mmu.kmap_addr);
2828 		pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index));
2829 		if (PTE_ISVALID(pte))
2830 			/*LINTED [use of constant 0 causes a lint warning] */
2831 			pfn = PTE2PFN(pte, 0);
2832 		XPV_ALLOW_MIGRATE();
2833 		return (pfn);
2834 	}
2835 
2836 	ht = htable_getpage(hat, vaddr, &entry);
2837 	if (ht == NULL) {
2838 		XPV_ALLOW_MIGRATE();
2839 		return (PFN_INVALID);
2840 	}
2841 	ASSERT(vaddr >= ht->ht_vaddr);
2842 	ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
2843 	pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
2844 	if (ht->ht_level > 0)
2845 		pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
2846 	htable_release(ht);
2847 	XPV_ALLOW_MIGRATE();
2848 	return (pfn);
2849 }
2850 
2851 /*
2852  * int hat_probe(hat, addr)
2853  *	return 0 if no valid mapping is present.  Faster version
2854  *	of hat_getattr in certain architectures.
2855  */
2856 int
2857 hat_probe(hat_t *hat, caddr_t addr)
2858 {
2859 	uintptr_t	vaddr = ALIGN2PAGE(addr);
2860 	uint_t		entry;
2861 	htable_t	*ht;
2862 	pgcnt_t		pg_off;
2863 
2864 	ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2865 	ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
2866 	if (IN_VA_HOLE(vaddr))
2867 		return (0);
2868 
2869 	/*
2870 	 * Most common use of hat_probe is from segmap. We special case it
2871 	 * for performance.
2872 	 */
2873 	if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2874 		pg_off = mmu_btop(vaddr - mmu.kmap_addr);
2875 		if (mmu.pae_hat)
2876 			return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
2877 		else
2878 			return (PTE_ISVALID(
2879 			    ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
2880 	}
2881 
2882 	ht = htable_getpage(hat, vaddr, &entry);
2883 	htable_release(ht);
2884 	return (ht != NULL);
2885 }
2886 
2887 /*
2888  * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM.
2889  */
2890 static int
2891 is_it_dism(hat_t *hat, caddr_t va)
2892 {
2893 	struct seg *seg;
2894 	struct shm_data *shmd;
2895 	struct spt_data *sptd;
2896 
2897 	seg = as_findseg(hat->hat_as, va, 0);
2898 	ASSERT(seg != NULL);
2899 	ASSERT(seg->s_base <= va);
2900 	shmd = (struct shm_data *)seg->s_data;
2901 	ASSERT(shmd != NULL);
2902 	sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2903 	ASSERT(sptd != NULL);
2904 	if (sptd->spt_flags & SHM_PAGEABLE)
2905 		return (1);
2906 	return (0);
2907 }
2908 
2909 /*
2910  * Simple implementation of ISM. hat_share() is similar to hat_memload_array(),
2911  * except that we use the ism_hat's existing mappings to determine the pages
2912  * and protections to use for this hat. If we find a full properly aligned
2913  * and sized pagetable, we will attempt to share the pagetable itself.
2914  */
2915 /*ARGSUSED*/
2916 int
2917 hat_share(
2918 	hat_t		*hat,
2919 	caddr_t		addr,
2920 	hat_t		*ism_hat,
2921 	caddr_t		src_addr,
2922 	size_t		len,	/* almost useless value, see below.. */
2923 	uint_t		ismszc)
2924 {
2925 	uintptr_t	vaddr_start = (uintptr_t)addr;
2926 	uintptr_t	vaddr;
2927 	uintptr_t	eaddr = vaddr_start + len;
2928 	uintptr_t	ism_addr_start = (uintptr_t)src_addr;
2929 	uintptr_t	ism_addr = ism_addr_start;
2930 	uintptr_t	e_ism_addr = ism_addr + len;
2931 	htable_t	*ism_ht = NULL;
2932 	htable_t	*ht;
2933 	x86pte_t	pte;
2934 	page_t		*pp;
2935 	pfn_t		pfn;
2936 	level_t		l;
2937 	pgcnt_t		pgcnt;
2938 	uint_t		prot;
2939 	int		is_dism;
2940 	int		flags;
2941 
2942 	/*
2943 	 * We might be asked to share an empty DISM hat by as_dup()
2944 	 */
2945 	ASSERT(hat != kas.a_hat);
2946 	ASSERT(eaddr <= _userlimit);
2947 	if (!(ism_hat->hat_flags & HAT_SHARED)) {
2948 		ASSERT(hat_get_mapped_size(ism_hat) == 0);
2949 		return (0);
2950 	}
2951 	XPV_DISALLOW_MIGRATE();
2952 
2953 	/*
2954 	 * The SPT segment driver often passes us a size larger than there are
2955 	 * valid mappings. That's because it rounds the segment size up to a
2956 	 * large pagesize, even if the actual memory mapped by ism_hat is less.
2957 	 */
2958 	ASSERT(IS_PAGEALIGNED(vaddr_start));
2959 	ASSERT(IS_PAGEALIGNED(ism_addr_start));
2960 	ASSERT(ism_hat->hat_flags & HAT_SHARED);
2961 	is_dism = is_it_dism(hat, addr);
2962 	while (ism_addr < e_ism_addr) {
2963 		/*
2964 		 * use htable_walk to get the next valid ISM mapping
2965 		 */
2966 		pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
2967 		if (ism_ht == NULL)
2968 			break;
2969 
2970 		/*
2971 		 * First check to see if we already share the page table.
2972 		 */
2973 		l = ism_ht->ht_level;
2974 		vaddr = vaddr_start + (ism_addr - ism_addr_start);
2975 		ht = htable_lookup(hat, vaddr, l);
2976 		if (ht != NULL) {
2977 			if (ht->ht_flags & HTABLE_SHARED_PFN)
2978 				goto shared;
2979 			htable_release(ht);
2980 			goto not_shared;
2981 		}
2982 
2983 		/*
2984 		 * Can't ever share top table.
2985 		 */
2986 		if (l == mmu.max_level)
2987 			goto not_shared;
2988 
2989 		/*
2990 		 * Avoid level mismatches later due to DISM faults.
2991 		 */
2992 		if (is_dism && l > 0)
2993 			goto not_shared;
2994 
2995 		/*
2996 		 * addresses and lengths must align
2997 		 * table must be fully populated
2998 		 * no lower level page tables
2999 		 */
3000 		if (ism_addr != ism_ht->ht_vaddr ||
3001 		    (vaddr & LEVEL_OFFSET(l + 1)) != 0)
3002 			goto not_shared;
3003 
3004 		/*
3005 		 * The range of address space must cover a full table.
3006 		 */
3007 		if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1))
3008 			goto not_shared;
3009 
3010 		/*
3011 		 * All entries in the ISM page table must be leaf PTEs.
3012 		 */
3013 		if (l > 0) {
3014 			int e;
3015 
3016 			/*
3017 			 * We know the 0th is from htable_walk() above.
3018 			 */
3019 			for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) {
3020 				x86pte_t pte;
3021 				pte = x86pte_get(ism_ht, e);
3022 				if (!PTE_ISPAGE(pte, l))
3023 					goto not_shared;
3024 			}
3025 		}
3026 
3027 		/*
3028 		 * share the page table
3029 		 */
3030 		ht = htable_create(hat, vaddr, l, ism_ht);
3031 shared:
3032 		ASSERT(ht->ht_flags & HTABLE_SHARED_PFN);
3033 		ASSERT(ht->ht_shares == ism_ht);
3034 		hat->hat_ism_pgcnt +=
3035 		    (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) <<
3036 		    (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3037 		ht->ht_valid_cnt = ism_ht->ht_valid_cnt;
3038 		htable_release(ht);
3039 		ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1);
3040 		htable_release(ism_ht);
3041 		ism_ht = NULL;
3042 		continue;
3043 
3044 not_shared:
3045 		/*
3046 		 * Unable to share the page table. Instead we will
3047 		 * create new mappings from the values in the ISM mappings.
3048 		 * Figure out what level size mappings to use;
3049 		 */
3050 		for (l = ism_ht->ht_level; l > 0; --l) {
3051 			if (LEVEL_SIZE(l) <= eaddr - vaddr &&
3052 			    (vaddr & LEVEL_OFFSET(l)) == 0)
3053 				break;
3054 		}
3055 
3056 		/*
3057 		 * The ISM mapping might be larger than the share area,
3058 		 * be careful to truncate it if needed.
3059 		 */
3060 		if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
3061 			pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
3062 		} else {
3063 			pgcnt = mmu_btop(eaddr - vaddr);
3064 			l = 0;
3065 		}
3066 
3067 		pfn = PTE2PFN(pte, ism_ht->ht_level);
3068 		ASSERT(pfn != PFN_INVALID);
3069 		while (pgcnt > 0) {
3070 			/*
3071 			 * Make a new pte for the PFN for this level.
3072 			 * Copy protections for the pte from the ISM pte.
3073 			 */
3074 			pp = page_numtopp_nolock(pfn);
3075 			ASSERT(pp != NULL);
3076 
3077 			prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
3078 			if (PTE_GET(pte, PT_WRITABLE))
3079 				prot |= PROT_WRITE;
3080 			if (!PTE_GET(pte, PT_NX))
3081 				prot |= PROT_EXEC;
3082 
3083 			flags = HAT_LOAD;
3084 			if (!is_dism)
3085 				flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST;
3086 			while (hati_load_common(hat, vaddr, pp, prot, flags,
3087 			    l, pfn) != 0) {
3088 				if (l == 0)
3089 					panic("hati_load_common() failure");
3090 				--l;
3091 			}
3092 
3093 			vaddr += LEVEL_SIZE(l);
3094 			ism_addr += LEVEL_SIZE(l);
3095 			pfn += mmu_btop(LEVEL_SIZE(l));
3096 			pgcnt -= mmu_btop(LEVEL_SIZE(l));
3097 		}
3098 	}
3099 	if (ism_ht != NULL)
3100 		htable_release(ism_ht);
3101 	XPV_ALLOW_MIGRATE();
3102 	return (0);
3103 }
3104 
3105 
3106 /*
3107  * hat_unshare() is similar to hat_unload_callback(), but
3108  * we have to look for empty shared pagetables. Note that
3109  * hat_unshare() is always invoked against an entire segment.
3110  */
3111 /*ARGSUSED*/
3112 void
3113 hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
3114 {
3115 	uint64_t	vaddr = (uintptr_t)addr;
3116 	uintptr_t	eaddr = vaddr + len;
3117 	htable_t	*ht = NULL;
3118 	uint_t		need_demaps = 0;
3119 	int		flags = HAT_UNLOAD_UNMAP;
3120 	level_t		l;
3121 
3122 	ASSERT(hat != kas.a_hat);
3123 	ASSERT(eaddr <= _userlimit);
3124 	ASSERT(IS_PAGEALIGNED(vaddr));
3125 	ASSERT(IS_PAGEALIGNED(eaddr));
3126 	XPV_DISALLOW_MIGRATE();
3127 
3128 	/*
3129 	 * First go through and remove any shared pagetables.
3130 	 *
3131 	 * Note that it's ok to delay the TLB shootdown till the entire range is
3132 	 * finished, because if hat_pageunload() were to unload a shared
3133 	 * pagetable page, its hat_tlb_inval() will do a global TLB invalidate.
3134 	 */
3135 	l = mmu.max_page_level;
3136 	if (l == mmu.max_level)
3137 		--l;
3138 	for (; l >= 0; --l) {
3139 		for (vaddr = (uintptr_t)addr; vaddr < eaddr;
3140 		    vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) {
3141 			ASSERT(!IN_VA_HOLE(vaddr));
3142 			/*
3143 			 * find a pagetable that maps the current address
3144 			 */
3145 			ht = htable_lookup(hat, vaddr, l);
3146 			if (ht == NULL)
3147 				continue;
3148 			if (ht->ht_flags & HTABLE_SHARED_PFN) {
3149 				/*
3150 				 * clear page count, set valid_cnt to 0,
3151 				 * let htable_release() finish the job
3152 				 */
3153 				hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
3154 				    (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3155 				ht->ht_valid_cnt = 0;
3156 				need_demaps = 1;
3157 			}
3158 			htable_release(ht);
3159 		}
3160 	}
3161 
3162 	/*
3163 	 * flush the TLBs - since we're probably dealing with MANY mappings
3164 	 * we do just one CR3 reload.
3165 	 */
3166 	if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
3167 		hat_tlb_inval(hat, DEMAP_ALL_ADDR);
3168 
3169 	/*
3170 	 * Now go back and clean up any unaligned mappings that
3171 	 * couldn't share pagetables.
3172 	 */
3173 	if (!is_it_dism(hat, addr))
3174 		flags |= HAT_UNLOAD_UNLOCK;
3175 	hat_unload(hat, addr, len, flags);
3176 	XPV_ALLOW_MIGRATE();
3177 }
3178 
3179 
3180 /*
3181  * hat_reserve() does nothing
3182  */
3183 /*ARGSUSED*/
3184 void
3185 hat_reserve(struct as *as, caddr_t addr, size_t len)
3186 {
3187 }
3188 
3189 
3190 /*
3191  * Called when all mappings to a page should have write permission removed.
3192  * Mostly stolen from hat_pagesync()
3193  */
3194 static void
3195 hati_page_clrwrt(struct page *pp)
3196 {
3197 	hment_t		*hm = NULL;
3198 	htable_t	*ht;
3199 	uint_t		entry;
3200 	x86pte_t	old;
3201 	x86pte_t	new;
3202 	uint_t		pszc = 0;
3203 
3204 	XPV_DISALLOW_MIGRATE();
3205 next_size:
3206 	/*
3207 	 * walk thru the mapping list clearing write permission
3208 	 */
3209 	x86_hm_enter(pp);
3210 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3211 		if (ht->ht_level < pszc)
3212 			continue;
3213 		old = x86pte_get(ht, entry);
3214 
3215 		for (;;) {
3216 			/*
3217 			 * Is this mapping of interest?
3218 			 */
3219 			if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
3220 			    PTE_GET(old, PT_WRITABLE) == 0)
3221 				break;
3222 
3223 			/*
3224 			 * Clear ref/mod writable bits. This requires cross
3225 			 * calls to ensure any executing TLBs see cleared bits.
3226 			 */
3227 			new = old;
3228 			PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
3229 			old = hati_update_pte(ht, entry, old, new);
3230 			if (old != 0)
3231 				continue;
3232 
3233 			break;
3234 		}
3235 	}
3236 	x86_hm_exit(pp);
3237 	while (pszc < pp->p_szc) {
3238 		page_t *tpp;
3239 		pszc++;
3240 		tpp = PP_GROUPLEADER(pp, pszc);
3241 		if (pp != tpp) {
3242 			pp = tpp;
3243 			goto next_size;
3244 		}
3245 	}
3246 	XPV_ALLOW_MIGRATE();
3247 }
3248 
3249 /*
3250  * void hat_page_setattr(pp, flag)
3251  * void hat_page_clrattr(pp, flag)
3252  *	used to set/clr ref/mod bits.
3253  */
3254 void
3255 hat_page_setattr(struct page *pp, uint_t flag)
3256 {
3257 	vnode_t		*vp = pp->p_vnode;
3258 	kmutex_t	*vphm = NULL;
3259 	page_t		**listp;
3260 	int		noshuffle;
3261 
3262 	noshuffle = flag & P_NSH;
3263 	flag &= ~P_NSH;
3264 
3265 	if (PP_GETRM(pp, flag) == flag)
3266 		return;
3267 
3268 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
3269 	    !noshuffle) {
3270 		vphm = page_vnode_mutex(vp);
3271 		mutex_enter(vphm);
3272 	}
3273 
3274 	PP_SETRM(pp, flag);
3275 
3276 	if (vphm != NULL) {
3277 
3278 		/*
3279 		 * Some File Systems examine v_pages for NULL w/o
3280 		 * grabbing the vphm mutex. Must not let it become NULL when
3281 		 * pp is the only page on the list.
3282 		 */
3283 		if (pp->p_vpnext != pp) {
3284 			page_vpsub(&vp->v_pages, pp);
3285 			if (vp->v_pages != NULL)
3286 				listp = &vp->v_pages->p_vpprev->p_vpnext;
3287 			else
3288 				listp = &vp->v_pages;
3289 			page_vpadd(listp, pp);
3290 		}
3291 		mutex_exit(vphm);
3292 	}
3293 }
3294 
3295 void
3296 hat_page_clrattr(struct page *pp, uint_t flag)
3297 {
3298 	vnode_t		*vp = pp->p_vnode;
3299 	ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
3300 
3301 	/*
3302 	 * Caller is expected to hold page's io lock for VMODSORT to work
3303 	 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
3304 	 * bit is cleared.
3305 	 * We don't have assert to avoid tripping some existing third party
3306 	 * code. The dirty page is moved back to top of the v_page list
3307 	 * after IO is done in pvn_write_done().
3308 	 */
3309 	PP_CLRRM(pp, flag);
3310 
3311 	if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
3312 
3313 		/*
3314 		 * VMODSORT works by removing write permissions and getting
3315 		 * a fault when a page is made dirty. At this point
3316 		 * we need to remove write permission from all mappings
3317 		 * to this page.
3318 		 */
3319 		hati_page_clrwrt(pp);
3320 	}
3321 }
3322 
3323 /*
3324  *	If flag is specified, returns 0 if attribute is disabled
3325  *	and non zero if enabled.  If flag specifes multiple attributes
3326  *	then returns 0 if ALL attributes are disabled.  This is an advisory
3327  *	call.
3328  */
3329 uint_t
3330 hat_page_getattr(struct page *pp, uint_t flag)
3331 {
3332 	return (PP_GETRM(pp, flag));
3333 }
3334 
3335 
3336 /*
3337  * common code used by hat_pageunload() and hment_steal()
3338  */
3339 hment_t *
3340 hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
3341 {
3342 	x86pte_t old_pte;
3343 	pfn_t pfn = pp->p_pagenum;
3344 	hment_t *hm;
3345 
3346 	/*
3347 	 * We need to acquire a hold on the htable in order to
3348 	 * do the invalidate. We know the htable must exist, since
3349 	 * unmap's don't release the htable until after removing any
3350 	 * hment. Having x86_hm_enter() keeps that from proceeding.
3351 	 */
3352 	htable_acquire(ht);
3353 
3354 	/*
3355 	 * Invalidate the PTE and remove the hment.
3356 	 */
3357 	old_pte = x86pte_inval(ht, entry, 0, NULL, B_TRUE);
3358 	if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
3359 		panic("x86pte_inval() failure found PTE = " FMT_PTE
3360 		    " pfn being unmapped is %lx ht=0x%lx entry=0x%x",
3361 		    old_pte, pfn, (uintptr_t)ht, entry);
3362 	}
3363 
3364 	/*
3365 	 * Clean up all the htable information for this mapping
3366 	 */
3367 	ASSERT(ht->ht_valid_cnt > 0);
3368 	HTABLE_DEC(ht->ht_valid_cnt);
3369 	PGCNT_DEC(ht->ht_hat, ht->ht_level);
3370 
3371 	/*
3372 	 * sync ref/mod bits to the page_t
3373 	 */
3374 	if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC)
3375 		hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
3376 
3377 	/*
3378 	 * Remove the mapping list entry for this page.
3379 	 */
3380 	hm = hment_remove(pp, ht, entry);
3381 
3382 	/*
3383 	 * drop the mapping list lock so that we might free the
3384 	 * hment and htable.
3385 	 */
3386 	x86_hm_exit(pp);
3387 	htable_release(ht);
3388 	return (hm);
3389 }
3390 
3391 extern int	vpm_enable;
3392 /*
3393  * Unload all translations to a page. If the page is a subpage of a large
3394  * page, the large page mappings are also removed.
3395  *
3396  * The forceflags are unused.
3397  */
3398 
3399 /*ARGSUSED*/
3400 static int
3401 hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
3402 {
3403 	page_t		*cur_pp = pp;
3404 	hment_t		*hm;
3405 	hment_t		*prev;
3406 	htable_t	*ht;
3407 	uint_t		entry;
3408 	level_t		level;
3409 
3410 	XPV_DISALLOW_MIGRATE();
3411 
3412 	/*
3413 	 * prevent recursion due to kmem_free()
3414 	 */
3415 	++curthread->t_hatdepth;
3416 	ASSERT(curthread->t_hatdepth < 16);
3417 
3418 #if defined(__amd64)
3419 	/*
3420 	 * clear the vpm ref.
3421 	 */
3422 	if (vpm_enable) {
3423 		pp->p_vpmref = 0;
3424 	}
3425 #endif
3426 	/*
3427 	 * The loop with next_size handles pages with multiple pagesize mappings
3428 	 */
3429 next_size:
3430 	for (;;) {
3431 
3432 		/*
3433 		 * Get a mapping list entry
3434 		 */
3435 		x86_hm_enter(cur_pp);
3436 		for (prev = NULL; ; prev = hm) {
3437 			hm = hment_walk(cur_pp, &ht, &entry, prev);
3438 			if (hm == NULL) {
3439 				x86_hm_exit(cur_pp);
3440 
3441 				/*
3442 				 * If not part of a larger page, we're done.
3443 				 */
3444 				if (cur_pp->p_szc <= pg_szcd) {
3445 					ASSERT(curthread->t_hatdepth > 0);
3446 					--curthread->t_hatdepth;
3447 					XPV_ALLOW_MIGRATE();
3448 					return (0);
3449 				}
3450 
3451 				/*
3452 				 * Else check the next larger page size.
3453 				 * hat_page_demote() may decrease p_szc
3454 				 * but that's ok we'll just take an extra
3455 				 * trip discover there're no larger mappings
3456 				 * and return.
3457 				 */
3458 				++pg_szcd;
3459 				cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
3460 				goto next_size;
3461 			}
3462 
3463 			/*
3464 			 * If this mapping size matches, remove it.
3465 			 */
3466 			level = ht->ht_level;
3467 			if (level == pg_szcd)
3468 				break;
3469 		}
3470 
3471 		/*
3472 		 * Remove the mapping list entry for this page.
3473 		 * Note this does the x86_hm_exit() for us.
3474 		 */
3475 		hm = hati_page_unmap(cur_pp, ht, entry);
3476 		if (hm != NULL)
3477 			hment_free(hm);
3478 	}
3479 }
3480 
3481 int
3482 hat_pageunload(struct page *pp, uint_t forceflag)
3483 {
3484 	ASSERT(PAGE_EXCL(pp));
3485 	return (hati_pageunload(pp, 0, forceflag));
3486 }
3487 
3488 /*
3489  * Unload all large mappings to pp and reduce by 1 p_szc field of every large
3490  * page level that included pp.
3491  *
3492  * pp must be locked EXCL. Even though no other constituent pages are locked
3493  * it's legal to unload large mappings to pp because all constituent pages of
3494  * large locked mappings have to be locked SHARED.  therefore if we have EXCL
3495  * lock on one of constituent pages none of the large mappings to pp are
3496  * locked.
3497  *
3498  * Change (always decrease) p_szc field starting from the last constituent
3499  * page and ending with root constituent page so that root's pszc always shows
3500  * the area where hat_page_demote() may be active.
3501  *
3502  * This mechanism is only used for file system pages where it's not always
3503  * possible to get EXCL locks on all constituent pages to demote the size code
3504  * (as is done for anonymous or kernel large pages).
3505  */
3506 void
3507 hat_page_demote(page_t *pp)
3508 {
3509 	uint_t		pszc;
3510 	uint_t		rszc;
3511 	uint_t		szc;
3512 	page_t		*rootpp;
3513 	page_t		*firstpp;
3514 	page_t		*lastpp;
3515 	pgcnt_t		pgcnt;
3516 
3517 	ASSERT(PAGE_EXCL(pp));
3518 	ASSERT(!PP_ISFREE(pp));
3519 	ASSERT(page_szc_lock_assert(pp));
3520 
3521 	if (pp->p_szc == 0)
3522 		return;
3523 
3524 	rootpp = PP_GROUPLEADER(pp, 1);
3525 	(void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
3526 
3527 	/*
3528 	 * all large mappings to pp are gone
3529 	 * and no new can be setup since pp is locked exclusively.
3530 	 *
3531 	 * Lock the root to make sure there's only one hat_page_demote()
3532 	 * outstanding within the area of this root's pszc.
3533 	 *
3534 	 * Second potential hat_page_demote() is already eliminated by upper
3535 	 * VM layer via page_szc_lock() but we don't rely on it and use our
3536 	 * own locking (so that upper layer locking can be changed without
3537 	 * assumptions that hat depends on upper layer VM to prevent multiple
3538 	 * hat_page_demote() to be issued simultaneously to the same large
3539 	 * page).
3540 	 */
3541 again:
3542 	pszc = pp->p_szc;
3543 	if (pszc == 0)
3544 		return;
3545 	rootpp = PP_GROUPLEADER(pp, pszc);
3546 	x86_hm_enter(rootpp);
3547 	/*
3548 	 * If root's p_szc is different from pszc we raced with another
3549 	 * hat_page_demote().  Drop the lock and try to find the root again.
3550 	 * If root's p_szc is greater than pszc previous hat_page_demote() is
3551 	 * not done yet.  Take and release mlist lock of root's root to wait
3552 	 * for previous hat_page_demote() to complete.
3553 	 */
3554 	if ((rszc = rootpp->p_szc) != pszc) {
3555 		x86_hm_exit(rootpp);
3556 		if (rszc > pszc) {
3557 			/* p_szc of a locked non free page can't increase */
3558 			ASSERT(pp != rootpp);
3559 
3560 			rootpp = PP_GROUPLEADER(rootpp, rszc);
3561 			x86_hm_enter(rootpp);
3562 			x86_hm_exit(rootpp);
3563 		}
3564 		goto again;
3565 	}
3566 	ASSERT(pp->p_szc == pszc);
3567 
3568 	/*
3569 	 * Decrement by 1 p_szc of every constituent page of a region that
3570 	 * covered pp. For example if original szc is 3 it gets changed to 2
3571 	 * everywhere except in region 2 that covered pp. Region 2 that
3572 	 * covered pp gets demoted to 1 everywhere except in region 1 that
3573 	 * covered pp. The region 1 that covered pp is demoted to region
3574 	 * 0. It's done this way because from region 3 we removed level 3
3575 	 * mappings, from region 2 that covered pp we removed level 2 mappings
3576 	 * and from region 1 that covered pp we removed level 1 mappings.  All
3577 	 * changes are done from from high pfn's to low pfn's so that roots
3578 	 * are changed last allowing one to know the largest region where
3579 	 * hat_page_demote() is stil active by only looking at the root page.
3580 	 *
3581 	 * This algorithm is implemented in 2 while loops. First loop changes
3582 	 * p_szc of pages to the right of pp's level 1 region and second
3583 	 * loop changes p_szc of pages of level 1 region that covers pp
3584 	 * and all pages to the left of level 1 region that covers pp.
3585 	 * In the first loop p_szc keeps dropping with every iteration
3586 	 * and in the second loop it keeps increasing with every iteration.
3587 	 *
3588 	 * First loop description: Demote pages to the right of pp outside of
3589 	 * level 1 region that covers pp.  In every iteration of the while
3590 	 * loop below find the last page of szc region and the first page of
3591 	 * (szc - 1) region that is immediately to the right of (szc - 1)
3592 	 * region that covers pp.  From last such page to first such page
3593 	 * change every page's szc to szc - 1. Decrement szc and continue
3594 	 * looping until szc is 1. If pp belongs to the last (szc - 1) region
3595 	 * of szc region skip to the next iteration.
3596 	 */
3597 	szc = pszc;
3598 	while (szc > 1) {
3599 		lastpp = PP_GROUPLEADER(pp, szc);
3600 		pgcnt = page_get_pagecnt(szc);
3601 		lastpp += pgcnt - 1;
3602 		firstpp = PP_GROUPLEADER(pp, (szc - 1));
3603 		pgcnt = page_get_pagecnt(szc - 1);
3604 		if (lastpp - firstpp < pgcnt) {
3605 			szc--;
3606 			continue;
3607 		}
3608 		firstpp += pgcnt;
3609 		while (lastpp != firstpp) {
3610 			ASSERT(lastpp->p_szc == pszc);
3611 			lastpp->p_szc = szc - 1;
3612 			lastpp--;
3613 		}
3614 		firstpp->p_szc = szc - 1;
3615 		szc--;
3616 	}
3617 
3618 	/*
3619 	 * Second loop description:
3620 	 * First iteration changes p_szc to 0 of every
3621 	 * page of level 1 region that covers pp.
3622 	 * Subsequent iterations find last page of szc region
3623 	 * immediately to the left of szc region that covered pp
3624 	 * and first page of (szc + 1) region that covers pp.
3625 	 * From last to first page change p_szc of every page to szc.
3626 	 * Increment szc and continue looping until szc is pszc.
3627 	 * If pp belongs to the fist szc region of (szc + 1) region
3628 	 * skip to the next iteration.
3629 	 *
3630 	 */
3631 	szc = 0;
3632 	while (szc < pszc) {
3633 		firstpp = PP_GROUPLEADER(pp, (szc + 1));
3634 		if (szc == 0) {
3635 			pgcnt = page_get_pagecnt(1);
3636 			lastpp = firstpp + (pgcnt - 1);
3637 		} else {
3638 			lastpp = PP_GROUPLEADER(pp, szc);
3639 			if (firstpp == lastpp) {
3640 				szc++;
3641 				continue;
3642 			}
3643 			lastpp--;
3644 			pgcnt = page_get_pagecnt(szc);
3645 		}
3646 		while (lastpp != firstpp) {
3647 			ASSERT(lastpp->p_szc == pszc);
3648 			lastpp->p_szc = szc;
3649 			lastpp--;
3650 		}
3651 		firstpp->p_szc = szc;
3652 		if (firstpp == rootpp)
3653 			break;
3654 		szc++;
3655 	}
3656 	x86_hm_exit(rootpp);
3657 }
3658 
3659 /*
3660  * get hw stats from hardware into page struct and reset hw stats
3661  * returns attributes of page
3662  * Flags for hat_pagesync, hat_getstat, hat_sync
3663  *
3664  * define	HAT_SYNC_ZERORM		0x01
3665  *
3666  * Additional flags for hat_pagesync
3667  *
3668  * define	HAT_SYNC_STOPON_REF	0x02
3669  * define	HAT_SYNC_STOPON_MOD	0x04
3670  * define	HAT_SYNC_STOPON_RM	0x06
3671  * define	HAT_SYNC_STOPON_SHARED	0x08
3672  */
3673 uint_t
3674 hat_pagesync(struct page *pp, uint_t flags)
3675 {
3676 	hment_t		*hm = NULL;
3677 	htable_t	*ht;
3678 	uint_t		entry;
3679 	x86pte_t	old, save_old;
3680 	x86pte_t	new;
3681 	uchar_t		nrmbits = P_REF|P_MOD|P_RO;
3682 	extern ulong_t	po_share;
3683 	page_t		*save_pp = pp;
3684 	uint_t		pszc = 0;
3685 
3686 	ASSERT(PAGE_LOCKED(pp) || panicstr);
3687 
3688 	if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
3689 		return (pp->p_nrm & nrmbits);
3690 
3691 	if ((flags & HAT_SYNC_ZERORM) == 0) {
3692 
3693 		if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
3694 			return (pp->p_nrm & nrmbits);
3695 
3696 		if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
3697 			return (pp->p_nrm & nrmbits);
3698 
3699 		if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
3700 		    hat_page_getshare(pp) > po_share) {
3701 			if (PP_ISRO(pp))
3702 				PP_SETREF(pp);
3703 			return (pp->p_nrm & nrmbits);
3704 		}
3705 	}
3706 
3707 	XPV_DISALLOW_MIGRATE();
3708 next_size:
3709 	/*
3710 	 * walk thru the mapping list syncing (and clearing) ref/mod bits.
3711 	 */
3712 	x86_hm_enter(pp);
3713 	while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3714 		if (ht->ht_level < pszc)
3715 			continue;
3716 		old = x86pte_get(ht, entry);
3717 try_again:
3718 
3719 		ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
3720 
3721 		if (PTE_GET(old, PT_REF | PT_MOD) == 0)
3722 			continue;
3723 
3724 		save_old = old;
3725 		if ((flags & HAT_SYNC_ZERORM) != 0) {
3726 
3727 			/*
3728 			 * Need to clear ref or mod bits. Need to demap
3729 			 * to make sure any executing TLBs see cleared bits.
3730 			 */
3731 			new = old;
3732 			PTE_CLR(new, PT_REF | PT_MOD);
3733 			old = hati_update_pte(ht, entry, old, new);
3734 			if (old != 0)
3735 				goto try_again;
3736 
3737 			old = save_old;
3738 		}
3739 
3740 		/*
3741 		 * Sync the PTE
3742 		 */
3743 		if (!(flags & HAT_SYNC_ZERORM) &&
3744 		    PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC)
3745 			hati_sync_pte_to_page(pp, old, ht->ht_level);
3746 
3747 		/*
3748 		 * can stop short if we found a ref'd or mod'd page
3749 		 */
3750 		if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) ||
3751 		    (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) {
3752 			x86_hm_exit(pp);
3753 			goto done;
3754 		}
3755 	}
3756 	x86_hm_exit(pp);
3757 	while (pszc < pp->p_szc) {
3758 		page_t *tpp;
3759 		pszc++;
3760 		tpp = PP_GROUPLEADER(pp, pszc);
3761 		if (pp != tpp) {
3762 			pp = tpp;
3763 			goto next_size;
3764 		}
3765 	}
3766 done:
3767 	XPV_ALLOW_MIGRATE();
3768 	return (save_pp->p_nrm & nrmbits);
3769 }
3770 
3771 /*
3772  * returns approx number of mappings to this pp.  A return of 0 implies
3773  * there are no mappings to the page.
3774  */
3775 ulong_t
3776 hat_page_getshare(page_t *pp)
3777 {
3778 	uint_t cnt;
3779 	cnt = hment_mapcnt(pp);
3780 #if defined(__amd64)
3781 	if (vpm_enable && pp->p_vpmref) {
3782 		cnt += 1;
3783 	}
3784 #endif
3785 	return (cnt);
3786 }
3787 
3788 /*
3789  * Return 1 the number of mappings exceeds sh_thresh. Return 0
3790  * otherwise.
3791  */
3792 int
3793 hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
3794 {
3795 	return (hat_page_getshare(pp) > sh_thresh);
3796 }
3797 
3798 /*
3799  * hat_softlock isn't supported anymore
3800  */
3801 /*ARGSUSED*/
3802 faultcode_t
3803 hat_softlock(
3804 	hat_t *hat,
3805 	caddr_t addr,
3806 	size_t *len,
3807 	struct page **page_array,
3808 	uint_t flags)
3809 {
3810 	return (FC_NOSUPPORT);
3811 }
3812 
3813 
3814 
3815 /*
3816  * Routine to expose supported HAT features to platform independent code.
3817  */
3818 /*ARGSUSED*/
3819 int
3820 hat_supported(enum hat_features feature, void *arg)
3821 {
3822 	switch (feature) {
3823 
3824 	case HAT_SHARED_PT:	/* this is really ISM */
3825 		return (1);
3826 
3827 	case HAT_DYNAMIC_ISM_UNMAP:
3828 		return (0);
3829 
3830 	case HAT_VMODSORT:
3831 		return (1);
3832 
3833 	case HAT_SHARED_REGIONS:
3834 		return (0);
3835 
3836 	default:
3837 		panic("hat_supported() - unknown feature");
3838 	}
3839 	return (0);
3840 }
3841 
3842 /*
3843  * Called when a thread is exiting and has been switched to the kernel AS
3844  */
3845 void
3846 hat_thread_exit(kthread_t *thd)
3847 {
3848 	ASSERT(thd->t_procp->p_as == &kas);
3849 	XPV_DISALLOW_MIGRATE();
3850 	hat_switch(thd->t_procp->p_as->a_hat);
3851 	XPV_ALLOW_MIGRATE();
3852 }
3853 
3854 /*
3855  * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
3856  */
3857 /*ARGSUSED*/
3858 void
3859 hat_setup(hat_t *hat, int flags)
3860 {
3861 	XPV_DISALLOW_MIGRATE();
3862 	kpreempt_disable();
3863 
3864 	hat_switch(hat);
3865 
3866 	kpreempt_enable();
3867 	XPV_ALLOW_MIGRATE();
3868 }
3869 
3870 /*
3871  * Prepare for a CPU private mapping for the given address.
3872  *
3873  * The address can only be used from a single CPU and can be remapped
3874  * using hat_mempte_remap().  Return the address of the PTE.
3875  *
3876  * We do the htable_create() if necessary and increment the valid count so
3877  * the htable can't disappear.  We also hat_devload() the page table into
3878  * kernel so that the PTE is quickly accessed.
3879  */
3880 hat_mempte_t
3881 hat_mempte_setup(caddr_t addr)
3882 {
3883 	uintptr_t	va = (uintptr_t)addr;
3884 	htable_t	*ht;
3885 	uint_t		entry;
3886 	x86pte_t	oldpte;
3887 	hat_mempte_t	p;
3888 
3889 	ASSERT(IS_PAGEALIGNED(va));
3890 	ASSERT(!IN_VA_HOLE(va));
3891 	++curthread->t_hatdepth;
3892 	XPV_DISALLOW_MIGRATE();
3893 	ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
3894 	if (ht == NULL) {
3895 		ht = htable_create(kas.a_hat, va, 0, NULL);
3896 		entry = htable_va2entry(va, ht);
3897 		ASSERT(ht->ht_level == 0);
3898 		oldpte = x86pte_get(ht, entry);
3899 	}
3900 	if (PTE_ISVALID(oldpte))
3901 		panic("hat_mempte_setup(): address already mapped"
3902 		    "ht=%p, entry=%d, pte=" FMT_PTE, (void *)ht, entry, oldpte);
3903 
3904 	/*
3905 	 * increment ht_valid_cnt so that the pagetable can't disappear
3906 	 */
3907 	HTABLE_INC(ht->ht_valid_cnt);
3908 
3909 	/*
3910 	 * return the PTE physical address to the caller.
3911 	 */
3912 	htable_release(ht);
3913 	XPV_ALLOW_MIGRATE();
3914 	p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry);
3915 	--curthread->t_hatdepth;
3916 	return (p);
3917 }
3918 
3919 /*
3920  * Release a CPU private mapping for the given address.
3921  * We decrement the htable valid count so it might be destroyed.
3922  */
3923 /*ARGSUSED1*/
3924 void
3925 hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa)
3926 {
3927 	htable_t	*ht;
3928 
3929 	XPV_DISALLOW_MIGRATE();
3930 	/*
3931 	 * invalidate any left over mapping and decrement the htable valid count
3932 	 */
3933 #ifdef __xpv
3934 	if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0,
3935 	    UVMF_INVLPG | UVMF_LOCAL))
3936 		panic("HYPERVISOR_update_va_mapping() failed");
3937 #else
3938 	{
3939 		x86pte_t *pteptr;
3940 
3941 		pteptr = x86pte_mapin(mmu_btop(pte_pa),
3942 		    (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
3943 		if (mmu.pae_hat)
3944 			*pteptr = 0;
3945 		else
3946 			*(x86pte32_t *)pteptr = 0;
3947 		mmu_tlbflush_entry(addr);
3948 		x86pte_mapout();
3949 	}
3950 #endif
3951 
3952 	ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
3953 	if (ht == NULL)
3954 		panic("hat_mempte_release(): invalid address");
3955 	ASSERT(ht->ht_level == 0);
3956 	HTABLE_DEC(ht->ht_valid_cnt);
3957 	htable_release(ht);
3958 	XPV_ALLOW_MIGRATE();
3959 }
3960 
3961 /*
3962  * Apply a temporary CPU private mapping to a page. We flush the TLB only
3963  * on this CPU, so this ought to have been called with preemption disabled.
3964  */
3965 void
3966 hat_mempte_remap(
3967 	pfn_t		pfn,
3968 	caddr_t		addr,
3969 	hat_mempte_t	pte_pa,
3970 	uint_t		attr,
3971 	uint_t		flags)
3972 {
3973 	uintptr_t	va = (uintptr_t)addr;
3974 	x86pte_t	pte;
3975 
3976 	/*
3977 	 * Remap the given PTE to the new page's PFN. Invalidate only
3978 	 * on this CPU.
3979 	 */
3980 #ifdef DEBUG
3981 	htable_t	*ht;
3982 	uint_t		entry;
3983 
3984 	ASSERT(IS_PAGEALIGNED(va));
3985 	ASSERT(!IN_VA_HOLE(va));
3986 	ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
3987 	ASSERT(ht != NULL);
3988 	ASSERT(ht->ht_level == 0);
3989 	ASSERT(ht->ht_valid_cnt > 0);
3990 	ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
3991 	htable_release(ht);
3992 #endif
3993 	XPV_DISALLOW_MIGRATE();
3994 	pte = hati_mkpte(pfn, attr, 0, flags);
3995 #ifdef __xpv
3996 	if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL))
3997 		panic("HYPERVISOR_update_va_mapping() failed");
3998 #else
3999 	{
4000 		x86pte_t *pteptr;
4001 
4002 		pteptr = x86pte_mapin(mmu_btop(pte_pa),
4003 		    (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
4004 		if (mmu.pae_hat)
4005 			*(x86pte_t *)pteptr = pte;
4006 		else
4007 			*(x86pte32_t *)pteptr = (x86pte32_t)pte;
4008 		mmu_tlbflush_entry(addr);
4009 		x86pte_mapout();
4010 	}
4011 #endif
4012 	XPV_ALLOW_MIGRATE();
4013 }
4014 
4015 
4016 
4017 /*
4018  * Hat locking functions
4019  * XXX - these two functions are currently being used by hatstats
4020  * 	they can be removed by using a per-as mutex for hatstats.
4021  */
4022 void
4023 hat_enter(hat_t *hat)
4024 {
4025 	mutex_enter(&hat->hat_mutex);
4026 }
4027 
4028 void
4029 hat_exit(hat_t *hat)
4030 {
4031 	mutex_exit(&hat->hat_mutex);
4032 }
4033 
4034 /*
4035  * HAT part of cpu initialization.
4036  */
4037 void
4038 hat_cpu_online(struct cpu *cpup)
4039 {
4040 	if (cpup != CPU) {
4041 		x86pte_cpu_init(cpup);
4042 		hat_vlp_setup(cpup);
4043 	}
4044 	CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
4045 }
4046 
4047 /*
4048  * HAT part of cpu deletion.
4049  * (currently, we only call this after the cpu is safely passivated.)
4050  */
4051 void
4052 hat_cpu_offline(struct cpu *cpup)
4053 {
4054 	ASSERT(cpup != CPU);
4055 
4056 	CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id);
4057 	hat_vlp_teardown(cpup);
4058 	x86pte_cpu_fini(cpup);
4059 }
4060 
4061 /*
4062  * Function called after all CPUs are brought online.
4063  * Used to remove low address boot mappings.
4064  */
4065 void
4066 clear_boot_mappings(uintptr_t low, uintptr_t high)
4067 {
4068 	uintptr_t vaddr = low;
4069 	htable_t *ht = NULL;
4070 	level_t level;
4071 	uint_t entry;
4072 	x86pte_t pte;
4073 
4074 	/*
4075 	 * On 1st CPU we can unload the prom mappings, basically we blow away
4076 	 * all virtual mappings under _userlimit.
4077 	 */
4078 	while (vaddr < high) {
4079 		pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
4080 		if (ht == NULL)
4081 			break;
4082 
4083 		level = ht->ht_level;
4084 		entry = htable_va2entry(vaddr, ht);
4085 		ASSERT(level <= mmu.max_page_level);
4086 		ASSERT(PTE_ISPAGE(pte, level));
4087 
4088 		/*
4089 		 * Unload the mapping from the page tables.
4090 		 */
4091 		(void) x86pte_inval(ht, entry, 0, NULL, B_TRUE);
4092 		ASSERT(ht->ht_valid_cnt > 0);
4093 		HTABLE_DEC(ht->ht_valid_cnt);
4094 		PGCNT_DEC(ht->ht_hat, ht->ht_level);
4095 
4096 		vaddr += LEVEL_SIZE(ht->ht_level);
4097 	}
4098 	if (ht)
4099 		htable_release(ht);
4100 }
4101 
4102 /*
4103  * Atomically update a new translation for a single page.  If the
4104  * currently installed PTE doesn't match the value we expect to find,
4105  * it's not updated and we return the PTE we found.
4106  *
4107  * If activating nosync or NOWRITE and the page was modified we need to sync
4108  * with the page_t. Also sync with page_t if clearing ref/mod bits.
4109  */
4110 static x86pte_t
4111 hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
4112 {
4113 	page_t		*pp;
4114 	uint_t		rm = 0;
4115 	x86pte_t	replaced;
4116 
4117 	if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC &&
4118 	    PTE_GET(expected, PT_MOD | PT_REF) &&
4119 	    (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
4120 	    !PTE_GET(new, PT_MOD | PT_REF))) {
4121 
4122 		ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level)));
4123 		pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
4124 		ASSERT(pp != NULL);
4125 		if (PTE_GET(expected, PT_MOD))
4126 			rm |= P_MOD;
4127 		if (PTE_GET(expected, PT_REF))
4128 			rm |= P_REF;
4129 		PTE_CLR(new, PT_MOD | PT_REF);
4130 	}
4131 
4132 	replaced = x86pte_update(ht, entry, expected, new);
4133 	if (replaced != expected)
4134 		return (replaced);
4135 
4136 	if (rm) {
4137 		/*
4138 		 * sync to all constituent pages of a large page
4139 		 */
4140 		pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
4141 		ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
4142 		while (pgcnt-- > 0) {
4143 			/*
4144 			 * hat_page_demote() can't decrease
4145 			 * pszc below this mapping size
4146 			 * since large mapping existed after we
4147 			 * took mlist lock.
4148 			 */
4149 			ASSERT(pp->p_szc >= ht->ht_level);
4150 			hat_page_setattr(pp, rm);
4151 			++pp;
4152 		}
4153 	}
4154 
4155 	return (0);
4156 }
4157 
4158 /* ARGSUSED */
4159 void
4160 hat_join_srd(struct hat *hat, vnode_t *evp)
4161 {
4162 }
4163 
4164 /* ARGSUSED */
4165 hat_region_cookie_t
4166 hat_join_region(struct hat *hat,
4167     caddr_t r_saddr,
4168     size_t r_size,
4169     void *r_obj,
4170     u_offset_t r_objoff,
4171     uchar_t r_perm,
4172     uchar_t r_pgszc,
4173     hat_rgn_cb_func_t r_cb_function,
4174     uint_t flags)
4175 {
4176 	panic("No shared region support on x86");
4177 	return (HAT_INVALID_REGION_COOKIE);
4178 }
4179 
4180 /* ARGSUSED */
4181 void
4182 hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags)
4183 {
4184 	panic("No shared region support on x86");
4185 }
4186 
4187 /* ARGSUSED */
4188 void
4189 hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie)
4190 {
4191 	panic("No shared region support on x86");
4192 }
4193 
4194 
4195 /*
4196  * Kernel Physical Mapping (kpm) facility
4197  *
4198  * Most of the routines needed to support segkpm are almost no-ops on the
4199  * x86 platform.  We map in the entire segment when it is created and leave
4200  * it mapped in, so there is no additional work required to set up and tear
4201  * down individual mappings.  All of these routines were created to support
4202  * SPARC platforms that have to avoid aliasing in their virtually indexed
4203  * caches.
4204  *
4205  * Most of the routines have sanity checks in them (e.g. verifying that the
4206  * passed-in page is locked).  We don't actually care about most of these
4207  * checks on x86, but we leave them in place to identify problems in the
4208  * upper levels.
4209  */
4210 
4211 /*
4212  * Map in a locked page and return the vaddr.
4213  */
4214 /*ARGSUSED*/
4215 caddr_t
4216 hat_kpm_mapin(struct page *pp, struct kpme *kpme)
4217 {
4218 	caddr_t		vaddr;
4219 
4220 #ifdef DEBUG
4221 	if (kpm_enable == 0) {
4222 		cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
4223 		return ((caddr_t)NULL);
4224 	}
4225 
4226 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4227 		cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
4228 		return ((caddr_t)NULL);
4229 	}
4230 #endif
4231 
4232 	vaddr = hat_kpm_page2va(pp, 1);
4233 
4234 	return (vaddr);
4235 }
4236 
4237 /*
4238  * Mapout a locked page.
4239  */
4240 /*ARGSUSED*/
4241 void
4242 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
4243 {
4244 #ifdef DEBUG
4245 	if (kpm_enable == 0) {
4246 		cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
4247 		return;
4248 	}
4249 
4250 	if (IS_KPM_ADDR(vaddr) == 0) {
4251 		cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
4252 		return;
4253 	}
4254 
4255 	if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4256 		cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
4257 		return;
4258 	}
4259 #endif
4260 }
4261 
4262 /*
4263  * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical
4264  * memory addresses that are not described by a page_t.  It can
4265  * also be used for normal pages that are not locked, but beware
4266  * this is dangerous - no locking is performed, so the identity of
4267  * the page could change.  hat_kpm_mapin_pfn is not supported when
4268  * vac_colors > 1, because the chosen va depends on the page identity,
4269  * which could change.
4270  * The caller must only pass pfn's for valid physical addresses; violation
4271  * of this rule will cause panic.
4272  */
4273 caddr_t
4274 hat_kpm_mapin_pfn(pfn_t pfn)
4275 {
4276 	caddr_t paddr, vaddr;
4277 
4278 	if (kpm_enable == 0)
4279 		return ((caddr_t)NULL);
4280 
4281 	paddr = (caddr_t)ptob(pfn);
4282 	vaddr = (uintptr_t)kpm_vbase + paddr;
4283 
4284 	return ((caddr_t)vaddr);
4285 }
4286 
4287 /*ARGSUSED*/
4288 void
4289 hat_kpm_mapout_pfn(pfn_t pfn)
4290 {
4291 	/* empty */
4292 }
4293 
4294 /*
4295  * Return the kpm virtual address for a specific pfn
4296  */
4297 caddr_t
4298 hat_kpm_pfn2va(pfn_t pfn)
4299 {
4300 	uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
4301 
4302 	ASSERT(!pfn_is_foreign(pfn));
4303 	return ((caddr_t)vaddr);
4304 }
4305 
4306 /*
4307  * Return the kpm virtual address for the page at pp.
4308  */
4309 /*ARGSUSED*/
4310 caddr_t
4311 hat_kpm_page2va(struct page *pp, int checkswap)
4312 {
4313 	return (hat_kpm_pfn2va(pp->p_pagenum));
4314 }
4315 
4316 /*
4317  * Return the page frame number for the kpm virtual address vaddr.
4318  */
4319 pfn_t
4320 hat_kpm_va2pfn(caddr_t vaddr)
4321 {
4322 	pfn_t		pfn;
4323 
4324 	ASSERT(IS_KPM_ADDR(vaddr));
4325 
4326 	pfn = (pfn_t)btop(vaddr - kpm_vbase);
4327 
4328 	return (pfn);
4329 }
4330 
4331 
4332 /*
4333  * Return the page for the kpm virtual address vaddr.
4334  */
4335 page_t *
4336 hat_kpm_vaddr2page(caddr_t vaddr)
4337 {
4338 	pfn_t		pfn;
4339 
4340 	ASSERT(IS_KPM_ADDR(vaddr));
4341 
4342 	pfn = hat_kpm_va2pfn(vaddr);
4343 
4344 	return (page_numtopp_nolock(pfn));
4345 }
4346 
4347 /*
4348  * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
4349  * KPM page.  This should never happen on x86
4350  */
4351 int
4352 hat_kpm_fault(hat_t *hat, caddr_t vaddr)
4353 {
4354 	panic("pagefault in seg_kpm.  hat: 0x%p  vaddr: 0x%p",
4355 	    (void *)hat, (void *)vaddr);
4356 
4357 	return (0);
4358 }
4359 
4360 /*ARGSUSED*/
4361 void
4362 hat_kpm_mseghash_clear(int nentries)
4363 {}
4364 
4365 /*ARGSUSED*/
4366 void
4367 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
4368 {}
4369 
4370 #ifndef	__xpv
4371 void
4372 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
4373 	offset_t kpm_pages_off)
4374 {
4375 	_NOTE(ARGUNUSED(nkpmpgs, kpm_pages_off));
4376 	pfn_t base, end;
4377 
4378 	/*
4379 	 * kphysm_add_memory_dynamic() does not set nkpmpgs
4380 	 * when page_t memory is externally allocated.  That
4381 	 * code must properly calculate nkpmpgs in all cases
4382 	 * if nkpmpgs needs to be used at some point.
4383 	 */
4384 
4385 	/*
4386 	 * The meta (page_t) pages for dynamically added memory are allocated
4387 	 * either from the incoming memory itself or from existing memory.
4388 	 * In the former case the base of the incoming pages will be different
4389 	 * than the base of the dynamic segment so call memseg_get_start() to
4390 	 * get the actual base of the incoming memory for each case.
4391 	 */
4392 
4393 	base = memseg_get_start(msp);
4394 	end = msp->pages_end;
4395 
4396 	hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base),
4397 	    mmu_ptob(end - base), base, PROT_READ | PROT_WRITE,
4398 	    HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
4399 }
4400 
4401 void
4402 hat_kpm_addmem_mseg_insert(struct memseg *msp)
4403 {
4404 	_NOTE(ARGUNUSED(msp));
4405 }
4406 
4407 void
4408 hat_kpm_addmem_memsegs_update(struct memseg *msp)
4409 {
4410 	_NOTE(ARGUNUSED(msp));
4411 }
4412 
4413 /*
4414  * Return end of metadata for an already setup memseg.
4415  * X86 platforms don't need per-page meta data to support kpm.
4416  */
4417 caddr_t
4418 hat_kpm_mseg_reuse(struct memseg *msp)
4419 {
4420 	return ((caddr_t)msp->epages);
4421 }
4422 
4423 void
4424 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
4425 {
4426 	_NOTE(ARGUNUSED(msp, mspp));
4427 	ASSERT(0);
4428 }
4429 
4430 void
4431 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
4432 	struct memseg *lo, struct memseg *mid, struct memseg *hi)
4433 {
4434 	_NOTE(ARGUNUSED(msp, mspp, lo, mid, hi));
4435 	ASSERT(0);
4436 }
4437 
4438 /*
4439  * Walk the memsegs chain, applying func to each memseg span.
4440  */
4441 void
4442 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
4443 {
4444 	pfn_t	pbase, pend;
4445 	void	*base;
4446 	size_t	size;
4447 	struct memseg *msp;
4448 
4449 	for (msp = memsegs; msp; msp = msp->next) {
4450 		pbase = msp->pages_base;
4451 		pend = msp->pages_end;
4452 		base = ptob(pbase) + kpm_vbase;
4453 		size = ptob(pend - pbase);
4454 		func(arg, base, size);
4455 	}
4456 }
4457 
4458 #else	/* __xpv */
4459 
4460 /*
4461  * There are specific Hypervisor calls to establish and remove mappings
4462  * to grant table references and the privcmd driver. We have to ensure
4463  * that a page table actually exists.
4464  */
4465 void
4466 hat_prepare_mapping(hat_t *hat, caddr_t addr, uint64_t *pte_ma)
4467 {
4468 	maddr_t base_ma;
4469 	htable_t *ht;
4470 	uint_t entry;
4471 
4472 	ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4473 	XPV_DISALLOW_MIGRATE();
4474 	ht = htable_create(hat, (uintptr_t)addr, 0, NULL);
4475 
4476 	/*
4477 	 * if an address for pte_ma is passed in, return the MA of the pte
4478 	 * for this specific address.  This address is only valid as long
4479 	 * as the htable stays locked.
4480 	 */
4481 	if (pte_ma != NULL) {
4482 		entry = htable_va2entry((uintptr_t)addr, ht);
4483 		base_ma = pa_to_ma(ptob(ht->ht_pfn));
4484 		*pte_ma = base_ma + (entry << mmu.pte_size_shift);
4485 	}
4486 	XPV_ALLOW_MIGRATE();
4487 }
4488 
4489 void
4490 hat_release_mapping(hat_t *hat, caddr_t addr)
4491 {
4492 	htable_t *ht;
4493 
4494 	ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4495 	XPV_DISALLOW_MIGRATE();
4496 	ht = htable_lookup(hat, (uintptr_t)addr, 0);
4497 	ASSERT(ht != NULL);
4498 	ASSERT(ht->ht_busy >= 2);
4499 	htable_release(ht);
4500 	htable_release(ht);
4501 	XPV_ALLOW_MIGRATE();
4502 }
4503 #endif	/* __xpv */
4504