1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * VM - Hardware Address Translation management for i386 and amd64 30 * 31 * Implementation of the interfaces described in <common/vm/hat.h> 32 * 33 * Nearly all the details of how the hardware is managed should not be 34 * visible outside this layer except for misc. machine specific functions 35 * that work in conjunction with this code. 36 * 37 * Routines used only inside of i86pc/vm start with hati_ for HAT Internal. 38 */ 39 40 #include <sys/machparam.h> 41 #include <sys/machsystm.h> 42 #include <sys/mman.h> 43 #include <sys/types.h> 44 #include <sys/systm.h> 45 #include <sys/cpuvar.h> 46 #include <sys/thread.h> 47 #include <sys/proc.h> 48 #include <sys/cpu.h> 49 #include <sys/kmem.h> 50 #include <sys/disp.h> 51 #include <sys/shm.h> 52 #include <sys/sysmacros.h> 53 #include <sys/machparam.h> 54 #include <sys/vmem.h> 55 #include <sys/vmsystm.h> 56 #include <sys/promif.h> 57 #include <sys/var.h> 58 #include <sys/x86_archext.h> 59 #include <sys/atomic.h> 60 #include <sys/bitmap.h> 61 #include <sys/controlregs.h> 62 #include <sys/bootconf.h> 63 #include <sys/bootsvcs.h> 64 #include <sys/bootinfo.h> 65 #include <sys/archsystm.h> 66 67 #include <vm/seg_kmem.h> 68 #include <vm/hat_i86.h> 69 #include <vm/as.h> 70 #include <vm/seg.h> 71 #include <vm/page.h> 72 #include <vm/seg_kp.h> 73 #include <vm/seg_kpm.h> 74 #include <vm/vm_dep.h> 75 #ifdef __xpv 76 #include <sys/hypervisor.h> 77 #endif 78 #include <vm/kboot_mmu.h> 79 #include <vm/seg_spt.h> 80 81 #include <sys/cmn_err.h> 82 83 /* 84 * Basic parameters for hat operation. 85 */ 86 struct hat_mmu_info mmu; 87 88 /* 89 * The page that is the kernel's top level pagetable. 90 * 91 * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries 92 * on this 4K page for its top level page table. The remaining groups of 93 * 4 entries are used for per processor copies of user VLP pagetables for 94 * running threads. See hat_switch() and reload_pae32() for details. 95 * 96 * vlp_page[0..3] - level==2 PTEs for kernel HAT 97 * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0 98 * vlp_page[8..11] - level==2 PTE for user thread on cpu 1 99 * etc... 100 */ 101 static x86pte_t *vlp_page; 102 103 /* 104 * forward declaration of internal utility routines 105 */ 106 static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, 107 x86pte_t new); 108 109 /* 110 * The kernel address space exists in all HATs. To implement this the 111 * kernel reserves a fixed number of entries in the topmost level(s) of page 112 * tables. The values are setup during startup and then copied to every user 113 * hat created by hat_alloc(). This means that kernelbase must be: 114 * 115 * 4Meg aligned for 32 bit kernels 116 * 512Gig aligned for x86_64 64 bit kernel 117 * 118 * The hat_kernel_range_ts describe what needs to be copied from kernel hat 119 * to each user hat. 120 */ 121 typedef struct hat_kernel_range { 122 level_t hkr_level; 123 uintptr_t hkr_start_va; 124 uintptr_t hkr_end_va; /* zero means to end of memory */ 125 } hat_kernel_range_t; 126 #define NUM_KERNEL_RANGE 2 127 static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE]; 128 static int num_kernel_ranges; 129 130 uint_t use_boot_reserve = 1; /* cleared after early boot process */ 131 uint_t can_steal_post_boot = 0; /* set late in boot to enable stealing */ 132 133 /* export 1g page size to user applications if set */ 134 int enable_1gpg = 1; 135 136 #ifdef DEBUG 137 uint_t map1gcnt; 138 #endif 139 140 141 /* 142 * A cpuset for all cpus. This is used for kernel address cross calls, since 143 * the kernel addresses apply to all cpus. 144 */ 145 cpuset_t khat_cpuset; 146 147 /* 148 * management stuff for hat structures 149 */ 150 kmutex_t hat_list_lock; 151 kcondvar_t hat_list_cv; 152 kmem_cache_t *hat_cache; 153 kmem_cache_t *hat_hash_cache; 154 kmem_cache_t *vlp_hash_cache; 155 156 /* 157 * Simple statistics 158 */ 159 struct hatstats hatstat; 160 161 /* 162 * Some earlier hypervisor versions do not emulate cmpxchg of PTEs 163 * correctly. For such hypervisors we must set PT_USER for kernel 164 * entries ourselves (normally the emulation would set PT_USER for 165 * kernel entries and PT_USER|PT_GLOBAL for user entries). pt_kern is 166 * thus set appropriately. Note that dboot/kbm is OK, as only the full 167 * HAT uses cmpxchg() and the other paths (hypercall etc.) were never 168 * incorrect. 169 */ 170 int pt_kern; 171 172 /* 173 * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's. 174 */ 175 extern void atomic_orb(uchar_t *addr, uchar_t val); 176 extern void atomic_andb(uchar_t *addr, uchar_t val); 177 178 #define PP_GETRM(pp, rmmask) (pp->p_nrm & rmmask) 179 #define PP_ISMOD(pp) PP_GETRM(pp, P_MOD) 180 #define PP_ISREF(pp) PP_GETRM(pp, P_REF) 181 #define PP_ISRO(pp) PP_GETRM(pp, P_RO) 182 183 #define PP_SETRM(pp, rm) atomic_orb(&(pp->p_nrm), rm) 184 #define PP_SETMOD(pp) PP_SETRM(pp, P_MOD) 185 #define PP_SETREF(pp) PP_SETRM(pp, P_REF) 186 #define PP_SETRO(pp) PP_SETRM(pp, P_RO) 187 188 #define PP_CLRRM(pp, rm) atomic_andb(&(pp->p_nrm), ~(rm)) 189 #define PP_CLRMOD(pp) PP_CLRRM(pp, P_MOD) 190 #define PP_CLRREF(pp) PP_CLRRM(pp, P_REF) 191 #define PP_CLRRO(pp) PP_CLRRM(pp, P_RO) 192 #define PP_CLRALL(pp) PP_CLRRM(pp, P_MOD | P_REF | P_RO) 193 194 /* 195 * kmem cache constructor for struct hat 196 */ 197 /*ARGSUSED*/ 198 static int 199 hati_constructor(void *buf, void *handle, int kmflags) 200 { 201 hat_t *hat = buf; 202 203 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 204 bzero(hat->hat_pages_mapped, 205 sizeof (pgcnt_t) * (mmu.max_page_level + 1)); 206 hat->hat_ism_pgcnt = 0; 207 hat->hat_stats = 0; 208 hat->hat_flags = 0; 209 CPUSET_ZERO(hat->hat_cpus); 210 hat->hat_htable = NULL; 211 hat->hat_ht_hash = NULL; 212 return (0); 213 } 214 215 /* 216 * Allocate a hat structure for as. We also create the top level 217 * htable and initialize it to contain the kernel hat entries. 218 */ 219 hat_t * 220 hat_alloc(struct as *as) 221 { 222 hat_t *hat; 223 htable_t *ht; /* top level htable */ 224 uint_t use_vlp; 225 uint_t r; 226 hat_kernel_range_t *rp; 227 uintptr_t va; 228 uintptr_t eva; 229 uint_t start; 230 uint_t cnt; 231 htable_t *src; 232 233 /* 234 * Once we start creating user process HATs we can enable 235 * the htable_steal() code. 236 */ 237 if (can_steal_post_boot == 0) 238 can_steal_post_boot = 1; 239 240 ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 241 hat = kmem_cache_alloc(hat_cache, KM_SLEEP); 242 hat->hat_as = as; 243 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 244 ASSERT(hat->hat_flags == 0); 245 246 #if defined(__xpv) 247 /* 248 * No VLP stuff on the hypervisor due to the 64-bit split top level 249 * page tables. On 32-bit it's not needed as the hypervisor takes 250 * care of copying the top level PTEs to a below 4Gig page. 251 */ 252 use_vlp = 0; 253 #else /* __xpv */ 254 /* 32 bit processes uses a VLP style hat when running with PAE */ 255 #if defined(__amd64) 256 use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32); 257 #elif defined(__i386) 258 use_vlp = mmu.pae_hat; 259 #endif 260 #endif /* __xpv */ 261 if (use_vlp) { 262 hat->hat_flags = HAT_VLP; 263 bzero(hat->hat_vlp_ptes, VLP_SIZE); 264 } 265 266 /* 267 * Allocate the htable hash 268 */ 269 if ((hat->hat_flags & HAT_VLP)) { 270 hat->hat_num_hash = mmu.vlp_hash_cnt; 271 hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP); 272 } else { 273 hat->hat_num_hash = mmu.hash_cnt; 274 hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP); 275 } 276 bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *)); 277 278 /* 279 * Initialize Kernel HAT entries at the top of the top level page 280 * tables for the new hat. 281 */ 282 hat->hat_htable = NULL; 283 hat->hat_ht_cached = NULL; 284 XPV_DISALLOW_MIGRATE(); 285 ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL); 286 hat->hat_htable = ht; 287 288 #if defined(__amd64) 289 if (hat->hat_flags & HAT_VLP) 290 goto init_done; 291 #endif 292 293 for (r = 0; r < num_kernel_ranges; ++r) { 294 rp = &kernel_ranges[r]; 295 for (va = rp->hkr_start_va; va != rp->hkr_end_va; 296 va += cnt * LEVEL_SIZE(rp->hkr_level)) { 297 298 if (rp->hkr_level == TOP_LEVEL(hat)) 299 ht = hat->hat_htable; 300 else 301 ht = htable_create(hat, va, rp->hkr_level, 302 NULL); 303 304 start = htable_va2entry(va, ht); 305 cnt = HTABLE_NUM_PTES(ht) - start; 306 eva = va + 307 ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level)); 308 if (rp->hkr_end_va != 0 && 309 (eva > rp->hkr_end_va || eva == 0)) 310 cnt = htable_va2entry(rp->hkr_end_va, ht) - 311 start; 312 313 #if defined(__i386) && !defined(__xpv) 314 if (ht->ht_flags & HTABLE_VLP) { 315 bcopy(&vlp_page[start], 316 &hat->hat_vlp_ptes[start], 317 cnt * sizeof (x86pte_t)); 318 continue; 319 } 320 #endif 321 src = htable_lookup(kas.a_hat, va, rp->hkr_level); 322 ASSERT(src != NULL); 323 x86pte_copy(src, ht, start, cnt); 324 htable_release(src); 325 } 326 } 327 328 init_done: 329 330 #if defined(__xpv) 331 /* 332 * Pin top level page tables after initializing them 333 */ 334 xen_pin(hat->hat_htable->ht_pfn, mmu.max_level); 335 #if defined(__amd64) 336 xen_pin(hat->hat_user_ptable, mmu.max_level); 337 #endif 338 #endif 339 XPV_ALLOW_MIGRATE(); 340 341 /* 342 * Put it at the start of the global list of all hats (used by stealing) 343 * 344 * kas.a_hat is not in the list but is instead used to find the 345 * first and last items in the list. 346 * 347 * - kas.a_hat->hat_next points to the start of the user hats. 348 * The list ends where hat->hat_next == NULL 349 * 350 * - kas.a_hat->hat_prev points to the last of the user hats. 351 * The list begins where hat->hat_prev == NULL 352 */ 353 mutex_enter(&hat_list_lock); 354 hat->hat_prev = NULL; 355 hat->hat_next = kas.a_hat->hat_next; 356 if (hat->hat_next) 357 hat->hat_next->hat_prev = hat; 358 else 359 kas.a_hat->hat_prev = hat; 360 kas.a_hat->hat_next = hat; 361 mutex_exit(&hat_list_lock); 362 363 return (hat); 364 } 365 366 /* 367 * process has finished executing but as has not been cleaned up yet. 368 */ 369 /*ARGSUSED*/ 370 void 371 hat_free_start(hat_t *hat) 372 { 373 ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock)); 374 375 /* 376 * If the hat is currently a stealing victim, wait for the stealing 377 * to finish. Once we mark it as HAT_FREEING, htable_steal() 378 * won't look at its pagetables anymore. 379 */ 380 mutex_enter(&hat_list_lock); 381 while (hat->hat_flags & HAT_VICTIM) 382 cv_wait(&hat_list_cv, &hat_list_lock); 383 hat->hat_flags |= HAT_FREEING; 384 mutex_exit(&hat_list_lock); 385 } 386 387 /* 388 * An address space is being destroyed, so we destroy the associated hat. 389 */ 390 void 391 hat_free_end(hat_t *hat) 392 { 393 kmem_cache_t *cache; 394 395 ASSERT(hat->hat_flags & HAT_FREEING); 396 397 /* 398 * must not be running on the given hat 399 */ 400 ASSERT(CPU->cpu_current_hat != hat); 401 402 /* 403 * Remove it from the list of HATs 404 */ 405 mutex_enter(&hat_list_lock); 406 if (hat->hat_prev) 407 hat->hat_prev->hat_next = hat->hat_next; 408 else 409 kas.a_hat->hat_next = hat->hat_next; 410 if (hat->hat_next) 411 hat->hat_next->hat_prev = hat->hat_prev; 412 else 413 kas.a_hat->hat_prev = hat->hat_prev; 414 mutex_exit(&hat_list_lock); 415 hat->hat_next = hat->hat_prev = NULL; 416 417 #if defined(__xpv) 418 /* 419 * On the hypervisor, unpin top level page table(s) 420 */ 421 xen_unpin(hat->hat_htable->ht_pfn); 422 #if defined(__amd64) 423 xen_unpin(hat->hat_user_ptable); 424 #endif 425 #endif 426 427 /* 428 * Make a pass through the htables freeing them all up. 429 */ 430 htable_purge_hat(hat); 431 432 /* 433 * Decide which kmem cache the hash table came from, then free it. 434 */ 435 if (hat->hat_flags & HAT_VLP) 436 cache = vlp_hash_cache; 437 else 438 cache = hat_hash_cache; 439 kmem_cache_free(cache, hat->hat_ht_hash); 440 hat->hat_ht_hash = NULL; 441 442 hat->hat_flags = 0; 443 kmem_cache_free(hat_cache, hat); 444 } 445 446 /* 447 * round kernelbase down to a supported value to use for _userlimit 448 * 449 * userlimit must be aligned down to an entry in the top level htable. 450 * The one exception is for 32 bit HAT's running PAE. 451 */ 452 uintptr_t 453 hat_kernelbase(uintptr_t va) 454 { 455 #if defined(__i386) 456 va &= LEVEL_MASK(1); 457 #endif 458 if (IN_VA_HOLE(va)) 459 panic("_userlimit %p will fall in VA hole\n", (void *)va); 460 return (va); 461 } 462 463 /* 464 * Initialize hat data structures based on processor MMU information. 465 */ 466 void 467 mmu_init(void) 468 { 469 uint_t max_htables; 470 uint_t pa_bits; 471 uint_t va_bits; 472 int i; 473 474 /* 475 * If CPU enabled the page table global bit, use it for the kernel 476 * This is bit 7 in CR4 (PGE - Page Global Enable). 477 */ 478 if ((x86_feature & X86_PGE) != 0 && (getcr4() & CR4_PGE) != 0) 479 mmu.pt_global = PT_GLOBAL; 480 481 /* 482 * Detect NX and PAE usage. 483 */ 484 mmu.pae_hat = kbm_pae_support; 485 if (kbm_nx_support) 486 mmu.pt_nx = PT_NX; 487 else 488 mmu.pt_nx = 0; 489 490 /* 491 * Use CPU info to set various MMU parameters 492 */ 493 cpuid_get_addrsize(CPU, &pa_bits, &va_bits); 494 495 if (va_bits < sizeof (void *) * NBBY) { 496 mmu.hole_start = (1ul << (va_bits - 1)); 497 mmu.hole_end = 0ul - mmu.hole_start - 1; 498 } else { 499 mmu.hole_end = 0; 500 mmu.hole_start = mmu.hole_end - 1; 501 } 502 #if defined(OPTERON_ERRATUM_121) 503 /* 504 * If erratum 121 has already been detected at this time, hole_start 505 * contains the value to be subtracted from mmu.hole_start. 506 */ 507 ASSERT(hole_start == 0 || opteron_erratum_121 != 0); 508 hole_start = mmu.hole_start - hole_start; 509 #else 510 hole_start = mmu.hole_start; 511 #endif 512 hole_end = mmu.hole_end; 513 514 mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1); 515 if (mmu.pae_hat == 0 && pa_bits > 32) 516 mmu.highest_pfn = PFN_4G - 1; 517 518 if (mmu.pae_hat) { 519 mmu.pte_size = 8; /* 8 byte PTEs */ 520 mmu.pte_size_shift = 3; 521 } else { 522 mmu.pte_size = 4; /* 4 byte PTEs */ 523 mmu.pte_size_shift = 2; 524 } 525 526 if (mmu.pae_hat && (x86_feature & X86_PAE) == 0) 527 panic("Processor does not support PAE"); 528 529 if ((x86_feature & X86_CX8) == 0) 530 panic("Processor does not support cmpxchg8b instruction"); 531 532 /* 533 * Initialize parameters based on the 64 or 32 bit kernels and 534 * for the 32 bit kernel decide if we should use PAE. 535 */ 536 if (kbm_largepage_support) { 537 if (x86_feature & X86_1GPG) { 538 mmu.max_page_level = 2; 539 mmu.umax_page_level = (enable_1gpg) ? 2 : 1; 540 } else { 541 mmu.max_page_level = 1; 542 mmu.umax_page_level = 1; 543 } 544 } else { 545 mmu.max_page_level = 0; 546 mmu.umax_page_level = 0; 547 } 548 mmu_page_sizes = mmu.max_page_level + 1; 549 mmu_exported_page_sizes = mmu.umax_page_level + 1; 550 551 /* restrict legacy applications from using pagesizes 1g and above */ 552 mmu_legacy_page_sizes = 553 (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes; 554 555 556 #if defined(__amd64) 557 558 mmu.num_level = 4; 559 mmu.max_level = 3; 560 mmu.ptes_per_table = 512; 561 mmu.top_level_count = 512; 562 563 mmu.level_shift[0] = 12; 564 mmu.level_shift[1] = 21; 565 mmu.level_shift[2] = 30; 566 mmu.level_shift[3] = 39; 567 568 #elif defined(__i386) 569 570 if (mmu.pae_hat) { 571 mmu.num_level = 3; 572 mmu.max_level = 2; 573 mmu.ptes_per_table = 512; 574 mmu.top_level_count = 4; 575 576 mmu.level_shift[0] = 12; 577 mmu.level_shift[1] = 21; 578 mmu.level_shift[2] = 30; 579 580 } else { 581 mmu.num_level = 2; 582 mmu.max_level = 1; 583 mmu.ptes_per_table = 1024; 584 mmu.top_level_count = 1024; 585 586 mmu.level_shift[0] = 12; 587 mmu.level_shift[1] = 22; 588 } 589 590 #endif /* __i386 */ 591 592 for (i = 0; i < mmu.num_level; ++i) { 593 mmu.level_size[i] = 1UL << mmu.level_shift[i]; 594 mmu.level_offset[i] = mmu.level_size[i] - 1; 595 mmu.level_mask[i] = ~mmu.level_offset[i]; 596 } 597 598 for (i = 0; i <= mmu.max_page_level; ++i) { 599 mmu.pte_bits[i] = PT_VALID | pt_kern; 600 if (i > 0) 601 mmu.pte_bits[i] |= PT_PAGESIZE; 602 } 603 604 /* 605 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level. 606 */ 607 for (i = 1; i < mmu.num_level; ++i) 608 mmu.ptp_bits[i] = PT_PTPBITS; 609 610 #if defined(__i386) 611 mmu.ptp_bits[2] = PT_VALID; 612 #endif 613 614 /* 615 * Compute how many hash table entries to have per process for htables. 616 * We start with 1 page's worth of entries. 617 * 618 * If physical memory is small, reduce the amount need to cover it. 619 */ 620 max_htables = physmax / mmu.ptes_per_table; 621 mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *); 622 while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables) 623 mmu.hash_cnt >>= 1; 624 mmu.vlp_hash_cnt = mmu.hash_cnt; 625 626 #if defined(__amd64) 627 /* 628 * If running in 64 bits and physical memory is large, 629 * increase the size of the cache to cover all of memory for 630 * a 64 bit process. 631 */ 632 #define HASH_MAX_LENGTH 4 633 while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables) 634 mmu.hash_cnt <<= 1; 635 #endif 636 } 637 638 639 /* 640 * initialize hat data structures 641 */ 642 void 643 hat_init() 644 { 645 #if defined(__i386) 646 /* 647 * _userlimit must be aligned correctly 648 */ 649 if ((_userlimit & LEVEL_MASK(1)) != _userlimit) { 650 prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n", 651 (void *)_userlimit, (void *)LEVEL_SIZE(1)); 652 halt("hat_init(): Unable to continue"); 653 } 654 #endif 655 656 cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL); 657 658 /* 659 * initialize kmem caches 660 */ 661 htable_init(); 662 hment_init(); 663 664 hat_cache = kmem_cache_create("hat_t", 665 sizeof (hat_t), 0, hati_constructor, NULL, NULL, 666 NULL, 0, 0); 667 668 hat_hash_cache = kmem_cache_create("HatHash", 669 mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL, 670 NULL, 0, 0); 671 672 /* 673 * VLP hats can use a smaller hash table size on large memroy machines 674 */ 675 if (mmu.hash_cnt == mmu.vlp_hash_cnt) { 676 vlp_hash_cache = hat_hash_cache; 677 } else { 678 vlp_hash_cache = kmem_cache_create("HatVlpHash", 679 mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL, 680 NULL, 0, 0); 681 } 682 683 /* 684 * Set up the kernel's hat 685 */ 686 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 687 kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP); 688 mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 689 kas.a_hat->hat_as = &kas; 690 kas.a_hat->hat_flags = 0; 691 AS_LOCK_EXIT(&kas, &kas.a_lock); 692 693 CPUSET_ZERO(khat_cpuset); 694 CPUSET_ADD(khat_cpuset, CPU->cpu_id); 695 696 /* 697 * The kernel hat's next pointer serves as the head of the hat list . 698 * The kernel hat's prev pointer tracks the last hat on the list for 699 * htable_steal() to use. 700 */ 701 kas.a_hat->hat_next = NULL; 702 kas.a_hat->hat_prev = NULL; 703 704 /* 705 * Allocate an htable hash bucket for the kernel 706 * XX64 - tune for 64 bit procs 707 */ 708 kas.a_hat->hat_num_hash = mmu.hash_cnt; 709 kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP); 710 bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *)); 711 712 /* 713 * zero out the top level and cached htable pointers 714 */ 715 kas.a_hat->hat_ht_cached = NULL; 716 kas.a_hat->hat_htable = NULL; 717 718 /* 719 * Pre-allocate hrm_hashtab before enabling the collection of 720 * refmod statistics. Allocating on the fly would mean us 721 * running the risk of suffering recursive mutex enters or 722 * deadlocks. 723 */ 724 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *), 725 KM_SLEEP); 726 } 727 728 /* 729 * Prepare CPU specific pagetables for VLP processes on 64 bit kernels. 730 * 731 * Each CPU has a set of 2 pagetables that are reused for any 32 bit 732 * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and 733 * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes. 734 */ 735 /*ARGSUSED*/ 736 static void 737 hat_vlp_setup(struct cpu *cpu) 738 { 739 #if defined(__amd64) && !defined(__xpv) 740 struct hat_cpu_info *hci = cpu->cpu_hat_info; 741 pfn_t pfn; 742 743 /* 744 * allocate the level==2 page table for the bottom most 745 * 512Gig of address space (this is where 32 bit apps live) 746 */ 747 ASSERT(hci != NULL); 748 hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP); 749 750 /* 751 * Allocate a top level pagetable and copy the kernel's 752 * entries into it. Then link in hci_vlp_l2ptes in the 1st entry. 753 */ 754 hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP); 755 hci->hci_vlp_pfn = 756 hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes); 757 ASSERT(hci->hci_vlp_pfn != PFN_INVALID); 758 bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE); 759 760 pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes); 761 ASSERT(pfn != PFN_INVALID); 762 hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2); 763 #endif /* __amd64 && !__xpv */ 764 } 765 766 /*ARGSUSED*/ 767 static void 768 hat_vlp_teardown(cpu_t *cpu) 769 { 770 #if defined(__amd64) && !defined(__xpv) 771 struct hat_cpu_info *hci; 772 773 if ((hci = cpu->cpu_hat_info) == NULL) 774 return; 775 if (hci->hci_vlp_l2ptes) 776 kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE); 777 if (hci->hci_vlp_l3ptes) 778 kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE); 779 #endif 780 } 781 782 #define NEXT_HKR(r, l, s, e) { \ 783 kernel_ranges[r].hkr_level = l; \ 784 kernel_ranges[r].hkr_start_va = s; \ 785 kernel_ranges[r].hkr_end_va = e; \ 786 ++r; \ 787 } 788 789 /* 790 * Finish filling in the kernel hat. 791 * Pre fill in all top level kernel page table entries for the kernel's 792 * part of the address range. From this point on we can't use any new 793 * kernel large pages if they need PTE's at max_level 794 * 795 * create the kmap mappings. 796 */ 797 void 798 hat_init_finish(void) 799 { 800 size_t size; 801 uint_t r = 0; 802 uintptr_t va; 803 hat_kernel_range_t *rp; 804 805 806 /* 807 * We are now effectively running on the kernel hat. 808 * Clearing use_boot_reserve shuts off using the pre-allocated boot 809 * reserve for all HAT allocations. From here on, the reserves are 810 * only used when avoiding recursion in kmem_alloc(). 811 */ 812 use_boot_reserve = 0; 813 htable_adjust_reserve(); 814 815 /* 816 * User HATs are initialized with copies of all kernel mappings in 817 * higher level page tables. Ensure that those entries exist. 818 */ 819 #if defined(__amd64) 820 821 NEXT_HKR(r, 3, kernelbase, 0); 822 #if defined(__xpv) 823 NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END); 824 #endif 825 826 #elif defined(__i386) 827 828 #if !defined(__xpv) 829 if (mmu.pae_hat) { 830 va = kernelbase; 831 if ((va & LEVEL_MASK(2)) != va) { 832 va = P2ROUNDUP(va, LEVEL_SIZE(2)); 833 NEXT_HKR(r, 1, kernelbase, va); 834 } 835 if (va != 0) 836 NEXT_HKR(r, 2, va, 0); 837 } else 838 #endif /* __xpv */ 839 NEXT_HKR(r, 1, kernelbase, 0); 840 841 #endif /* __i386 */ 842 843 num_kernel_ranges = r; 844 845 /* 846 * Create all the kernel pagetables that will have entries 847 * shared to user HATs. 848 */ 849 for (r = 0; r < num_kernel_ranges; ++r) { 850 rp = &kernel_ranges[r]; 851 for (va = rp->hkr_start_va; va != rp->hkr_end_va; 852 va += LEVEL_SIZE(rp->hkr_level)) { 853 htable_t *ht; 854 855 if (IN_HYPERVISOR_VA(va)) 856 continue; 857 858 /* can/must skip if a page mapping already exists */ 859 if (rp->hkr_level <= mmu.max_page_level && 860 (ht = htable_getpage(kas.a_hat, va, NULL)) != 861 NULL) { 862 htable_release(ht); 863 continue; 864 } 865 866 (void) htable_create(kas.a_hat, va, rp->hkr_level - 1, 867 NULL); 868 } 869 } 870 871 /* 872 * 32 bit PAE metal kernels use only 4 of the 512 entries in the 873 * page holding the top level pagetable. We use the remainder for 874 * the "per CPU" page tables for VLP processes. 875 * Map the top level kernel pagetable into the kernel to make 876 * it easy to use bcopy access these tables. 877 */ 878 if (mmu.pae_hat) { 879 vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP); 880 hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE, 881 kas.a_hat->hat_htable->ht_pfn, 882 #if !defined(__xpv) 883 PROT_WRITE | 884 #endif 885 PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK, 886 HAT_LOAD | HAT_LOAD_NOCONSIST); 887 } 888 hat_vlp_setup(CPU); 889 890 /* 891 * Create kmap (cached mappings of kernel PTEs) 892 * for 32 bit we map from segmap_start .. ekernelheap 893 * for 64 bit we map from segmap_start .. segmap_start + segmapsize; 894 */ 895 #if defined(__i386) 896 size = (uintptr_t)ekernelheap - segmap_start; 897 #elif defined(__amd64) 898 size = segmapsize; 899 #endif 900 hat_kmap_init((uintptr_t)segmap_start, size); 901 } 902 903 /* 904 * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references 905 * are 32 bit, so for safety we must use cas64() to install these. 906 */ 907 #ifdef __i386 908 static void 909 reload_pae32(hat_t *hat, cpu_t *cpu) 910 { 911 x86pte_t *src; 912 x86pte_t *dest; 913 x86pte_t pte; 914 int i; 915 916 /* 917 * Load the 4 entries of the level 2 page table into this 918 * cpu's range of the vlp_page and point cr3 at them. 919 */ 920 ASSERT(mmu.pae_hat); 921 src = hat->hat_vlp_ptes; 922 dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES; 923 for (i = 0; i < VLP_NUM_PTES; ++i) { 924 for (;;) { 925 pte = dest[i]; 926 if (pte == src[i]) 927 break; 928 if (cas64(dest + i, pte, src[i]) != src[i]) 929 break; 930 } 931 } 932 } 933 #endif 934 935 /* 936 * Switch to a new active hat, maintaining bit masks to track active CPUs. 937 * 938 * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it 939 * remains a 32-bit value. 940 */ 941 void 942 hat_switch(hat_t *hat) 943 { 944 uint64_t newcr3; 945 cpu_t *cpu = CPU; 946 hat_t *old = cpu->cpu_current_hat; 947 948 /* 949 * set up this information first, so we don't miss any cross calls 950 */ 951 if (old != NULL) { 952 if (old == hat) 953 return; 954 if (old != kas.a_hat) 955 CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id); 956 } 957 958 /* 959 * Add this CPU to the active set for this HAT. 960 */ 961 if (hat != kas.a_hat) { 962 CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id); 963 } 964 cpu->cpu_current_hat = hat; 965 966 /* 967 * now go ahead and load cr3 968 */ 969 if (hat->hat_flags & HAT_VLP) { 970 #if defined(__amd64) 971 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes; 972 973 VLP_COPY(hat->hat_vlp_ptes, vlpptep); 974 newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn); 975 #elif defined(__i386) 976 reload_pae32(hat, cpu); 977 newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) + 978 (cpu->cpu_id + 1) * VLP_SIZE; 979 #endif 980 } else { 981 newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn); 982 } 983 #ifdef __xpv 984 { 985 struct mmuext_op t[2]; 986 uint_t retcnt; 987 uint_t opcnt = 1; 988 989 t[0].cmd = MMUEXT_NEW_BASEPTR; 990 t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3)); 991 #if defined(__amd64) 992 /* 993 * There's an interesting problem here, as to what to 994 * actually specify when switching to the kernel hat. 995 * For now we'll reuse the kernel hat again. 996 */ 997 t[1].cmd = MMUEXT_NEW_USER_BASEPTR; 998 if (hat == kas.a_hat) 999 t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3)); 1000 else 1001 t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable); 1002 ++opcnt; 1003 #endif /* __amd64 */ 1004 if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0) 1005 panic("HYPERVISOR_mmu_update() failed"); 1006 ASSERT(retcnt == opcnt); 1007 1008 } 1009 #else 1010 setcr3(newcr3); 1011 #endif 1012 ASSERT(cpu == CPU); 1013 } 1014 1015 /* 1016 * Utility to return a valid x86pte_t from protections, pfn, and level number 1017 */ 1018 static x86pte_t 1019 hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags) 1020 { 1021 x86pte_t pte; 1022 uint_t cache_attr = attr & HAT_ORDER_MASK; 1023 1024 pte = MAKEPTE(pfn, level); 1025 1026 if (attr & PROT_WRITE) 1027 PTE_SET(pte, PT_WRITABLE); 1028 1029 if (attr & PROT_USER) 1030 PTE_SET(pte, PT_USER); 1031 1032 if (!(attr & PROT_EXEC)) 1033 PTE_SET(pte, mmu.pt_nx); 1034 1035 /* 1036 * Set the software bits used track ref/mod sync's and hments. 1037 * If not using REF/MOD, set them to avoid h/w rewriting PTEs. 1038 */ 1039 if (flags & HAT_LOAD_NOCONSIST) 1040 PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD); 1041 else if (attr & HAT_NOSYNC) 1042 PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD); 1043 1044 /* 1045 * Set the caching attributes in the PTE. The combination 1046 * of attributes are poorly defined, so we pay attention 1047 * to them in the given order. 1048 * 1049 * The test for HAT_STRICTORDER is different because it's defined 1050 * as "0" - which was a stupid thing to do, but is too late to change! 1051 */ 1052 if (cache_attr == HAT_STRICTORDER) { 1053 PTE_SET(pte, PT_NOCACHE); 1054 /*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */ 1055 } else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) { 1056 /* nothing to set */; 1057 } else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) { 1058 PTE_SET(pte, PT_NOCACHE); 1059 if (x86_feature & X86_PAT) 1060 PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE); 1061 else 1062 PTE_SET(pte, PT_WRITETHRU); 1063 } else { 1064 panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr); 1065 } 1066 1067 return (pte); 1068 } 1069 1070 /* 1071 * Duplicate address translations of the parent to the child. 1072 * This function really isn't used anymore. 1073 */ 1074 /*ARGSUSED*/ 1075 int 1076 hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag) 1077 { 1078 ASSERT((uintptr_t)addr < kernelbase); 1079 ASSERT(new != kas.a_hat); 1080 ASSERT(old != kas.a_hat); 1081 return (0); 1082 } 1083 1084 /* 1085 * Allocate any hat resources required for a process being swapped in. 1086 */ 1087 /*ARGSUSED*/ 1088 void 1089 hat_swapin(hat_t *hat) 1090 { 1091 /* do nothing - we let everything fault back in */ 1092 } 1093 1094 /* 1095 * Unload all translations associated with an address space of a process 1096 * that is being swapped out. 1097 */ 1098 void 1099 hat_swapout(hat_t *hat) 1100 { 1101 uintptr_t vaddr = (uintptr_t)0; 1102 uintptr_t eaddr = _userlimit; 1103 htable_t *ht = NULL; 1104 level_t l; 1105 1106 XPV_DISALLOW_MIGRATE(); 1107 /* 1108 * We can't just call hat_unload(hat, 0, _userlimit...) here, because 1109 * seg_spt and shared pagetables can't be swapped out. 1110 * Take a look at segspt_shmswapout() - it's a big no-op. 1111 * 1112 * Instead we'll walk through all the address space and unload 1113 * any mappings which we are sure are not shared, not locked. 1114 */ 1115 ASSERT(IS_PAGEALIGNED(vaddr)); 1116 ASSERT(IS_PAGEALIGNED(eaddr)); 1117 ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1118 if ((uintptr_t)hat->hat_as->a_userlimit < eaddr) 1119 eaddr = (uintptr_t)hat->hat_as->a_userlimit; 1120 1121 while (vaddr < eaddr) { 1122 (void) htable_walk(hat, &ht, &vaddr, eaddr); 1123 if (ht == NULL) 1124 break; 1125 1126 ASSERT(!IN_VA_HOLE(vaddr)); 1127 1128 /* 1129 * If the page table is shared skip its entire range. 1130 */ 1131 l = ht->ht_level; 1132 if (ht->ht_flags & HTABLE_SHARED_PFN) { 1133 vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1); 1134 htable_release(ht); 1135 ht = NULL; 1136 continue; 1137 } 1138 1139 /* 1140 * If the page table has no locked entries, unload this one. 1141 */ 1142 if (ht->ht_lock_cnt == 0) 1143 hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l), 1144 HAT_UNLOAD_UNMAP); 1145 1146 /* 1147 * If we have a level 0 page table with locked entries, 1148 * skip the entire page table, otherwise skip just one entry. 1149 */ 1150 if (ht->ht_lock_cnt > 0 && l == 0) 1151 vaddr = ht->ht_vaddr + LEVEL_SIZE(1); 1152 else 1153 vaddr += LEVEL_SIZE(l); 1154 } 1155 if (ht) 1156 htable_release(ht); 1157 1158 /* 1159 * We're in swapout because the system is low on memory, so 1160 * go back and flush all the htables off the cached list. 1161 */ 1162 htable_purge_hat(hat); 1163 XPV_ALLOW_MIGRATE(); 1164 } 1165 1166 /* 1167 * returns number of bytes that have valid mappings in hat. 1168 */ 1169 size_t 1170 hat_get_mapped_size(hat_t *hat) 1171 { 1172 size_t total = 0; 1173 int l; 1174 1175 for (l = 0; l <= mmu.max_page_level; l++) 1176 total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l)); 1177 total += hat->hat_ism_pgcnt; 1178 1179 return (total); 1180 } 1181 1182 /* 1183 * enable/disable collection of stats for hat. 1184 */ 1185 int 1186 hat_stats_enable(hat_t *hat) 1187 { 1188 atomic_add_32(&hat->hat_stats, 1); 1189 return (1); 1190 } 1191 1192 void 1193 hat_stats_disable(hat_t *hat) 1194 { 1195 atomic_add_32(&hat->hat_stats, -1); 1196 } 1197 1198 /* 1199 * Utility to sync the ref/mod bits from a page table entry to the page_t 1200 * We must be holding the mapping list lock when this is called. 1201 */ 1202 static void 1203 hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level) 1204 { 1205 uint_t rm = 0; 1206 pgcnt_t pgcnt; 1207 1208 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC) 1209 return; 1210 1211 if (PTE_GET(pte, PT_REF)) 1212 rm |= P_REF; 1213 1214 if (PTE_GET(pte, PT_MOD)) 1215 rm |= P_MOD; 1216 1217 if (rm == 0) 1218 return; 1219 1220 /* 1221 * sync to all constituent pages of a large page 1222 */ 1223 ASSERT(x86_hm_held(pp)); 1224 pgcnt = page_get_pagecnt(level); 1225 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt)); 1226 for (; pgcnt > 0; --pgcnt) { 1227 /* 1228 * hat_page_demote() can't decrease 1229 * pszc below this mapping size 1230 * since this large mapping existed after we 1231 * took mlist lock. 1232 */ 1233 ASSERT(pp->p_szc >= level); 1234 hat_page_setattr(pp, rm); 1235 ++pp; 1236 } 1237 } 1238 1239 /* 1240 * This the set of PTE bits for PFN, permissions and caching 1241 * that are allowed to change on a HAT_LOAD_REMAP 1242 */ 1243 #define PT_REMAP_BITS \ 1244 (PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU | \ 1245 PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD) 1246 1247 #define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX) 1248 /* 1249 * Do the low-level work to get a mapping entered into a HAT's pagetables 1250 * and in the mapping list of the associated page_t. 1251 */ 1252 static int 1253 hati_pte_map( 1254 htable_t *ht, 1255 uint_t entry, 1256 page_t *pp, 1257 x86pte_t pte, 1258 int flags, 1259 void *pte_ptr) 1260 { 1261 hat_t *hat = ht->ht_hat; 1262 x86pte_t old_pte; 1263 level_t l = ht->ht_level; 1264 hment_t *hm; 1265 uint_t is_consist; 1266 int rv = 0; 1267 1268 /* 1269 * Is this a consistant (ie. need mapping list lock) mapping? 1270 */ 1271 is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0); 1272 1273 /* 1274 * Track locked mapping count in the htable. Do this first, 1275 * as we track locking even if there already is a mapping present. 1276 */ 1277 if ((flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat) 1278 HTABLE_LOCK_INC(ht); 1279 1280 /* 1281 * Acquire the page's mapping list lock and get an hment to use. 1282 * Note that hment_prepare() might return NULL. 1283 */ 1284 if (is_consist) { 1285 x86_hm_enter(pp); 1286 hm = hment_prepare(ht, entry, pp); 1287 } 1288 1289 /* 1290 * Set the new pte, retrieving the old one at the same time. 1291 */ 1292 old_pte = x86pte_set(ht, entry, pte, pte_ptr); 1293 1294 /* 1295 * did we get a large page / page table collision? 1296 */ 1297 if (old_pte == LPAGE_ERROR) { 1298 rv = -1; 1299 goto done; 1300 } 1301 1302 /* 1303 * If the mapping didn't change there is nothing more to do. 1304 */ 1305 if (PTE_EQUIV(pte, old_pte)) 1306 goto done; 1307 1308 /* 1309 * Install a new mapping in the page's mapping list 1310 */ 1311 if (!PTE_ISVALID(old_pte)) { 1312 if (is_consist) { 1313 hment_assign(ht, entry, pp, hm); 1314 x86_hm_exit(pp); 1315 } else { 1316 ASSERT(flags & HAT_LOAD_NOCONSIST); 1317 } 1318 #if defined(__amd64) 1319 if (ht->ht_flags & HTABLE_VLP) { 1320 cpu_t *cpu = CPU; 1321 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes; 1322 VLP_COPY(hat->hat_vlp_ptes, vlpptep); 1323 } 1324 #endif 1325 HTABLE_INC(ht->ht_valid_cnt); 1326 PGCNT_INC(hat, l); 1327 return (rv); 1328 } 1329 1330 /* 1331 * Remap's are more complicated: 1332 * - HAT_LOAD_REMAP must be specified if changing the pfn. 1333 * We also require that NOCONSIST be specified. 1334 * - Otherwise only permission or caching bits may change. 1335 */ 1336 if (!PTE_ISPAGE(old_pte, l)) 1337 panic("non-null/page mapping pte=" FMT_PTE, old_pte); 1338 1339 if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) { 1340 REMAPASSERT(flags & HAT_LOAD_REMAP); 1341 REMAPASSERT(flags & HAT_LOAD_NOCONSIST); 1342 REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST); 1343 REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) == 1344 pf_is_memory(PTE2PFN(pte, l))); 1345 REMAPASSERT(!is_consist); 1346 } 1347 1348 /* 1349 * We only let remaps change the certain bits in the PTE. 1350 */ 1351 if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS)) 1352 panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n", 1353 old_pte, pte); 1354 1355 /* 1356 * We don't create any mapping list entries on a remap, so release 1357 * any allocated hment after we drop the mapping list lock. 1358 */ 1359 done: 1360 if (is_consist) { 1361 x86_hm_exit(pp); 1362 if (hm != NULL) 1363 hment_free(hm); 1364 } 1365 return (rv); 1366 } 1367 1368 /* 1369 * Internal routine to load a single page table entry. This only fails if 1370 * we attempt to overwrite a page table link with a large page. 1371 */ 1372 static int 1373 hati_load_common( 1374 hat_t *hat, 1375 uintptr_t va, 1376 page_t *pp, 1377 uint_t attr, 1378 uint_t flags, 1379 level_t level, 1380 pfn_t pfn) 1381 { 1382 htable_t *ht; 1383 uint_t entry; 1384 x86pte_t pte; 1385 int rv = 0; 1386 1387 /* 1388 * The number 16 is arbitrary and here to catch a recursion problem 1389 * early before we blow out the kernel stack. 1390 */ 1391 ++curthread->t_hatdepth; 1392 ASSERT(curthread->t_hatdepth < 16); 1393 1394 ASSERT(hat == kas.a_hat || 1395 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1396 1397 if (flags & HAT_LOAD_SHARE) 1398 hat->hat_flags |= HAT_SHARED; 1399 1400 /* 1401 * Find the page table that maps this page if it already exists. 1402 */ 1403 ht = htable_lookup(hat, va, level); 1404 1405 /* 1406 * We must have HAT_LOAD_NOCONSIST if page_t is NULL. 1407 */ 1408 if (pp == NULL) 1409 flags |= HAT_LOAD_NOCONSIST; 1410 1411 if (ht == NULL) { 1412 ht = htable_create(hat, va, level, NULL); 1413 ASSERT(ht != NULL); 1414 } 1415 entry = htable_va2entry(va, ht); 1416 1417 /* 1418 * a bunch of paranoid error checking 1419 */ 1420 ASSERT(ht->ht_busy > 0); 1421 if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht)) 1422 panic("hati_load_common: bad htable %p, va %p", ht, (void *)va); 1423 ASSERT(ht->ht_level == level); 1424 1425 /* 1426 * construct the new PTE 1427 */ 1428 if (hat == kas.a_hat) 1429 attr &= ~PROT_USER; 1430 pte = hati_mkpte(pfn, attr, level, flags); 1431 if (hat == kas.a_hat && va >= kernelbase) 1432 PTE_SET(pte, mmu.pt_global); 1433 1434 /* 1435 * establish the mapping 1436 */ 1437 rv = hati_pte_map(ht, entry, pp, pte, flags, NULL); 1438 1439 /* 1440 * release the htable and any reserves 1441 */ 1442 htable_release(ht); 1443 --curthread->t_hatdepth; 1444 return (rv); 1445 } 1446 1447 /* 1448 * special case of hat_memload to deal with some kernel addrs for performance 1449 */ 1450 static void 1451 hat_kmap_load( 1452 caddr_t addr, 1453 page_t *pp, 1454 uint_t attr, 1455 uint_t flags) 1456 { 1457 uintptr_t va = (uintptr_t)addr; 1458 x86pte_t pte; 1459 pfn_t pfn = page_pptonum(pp); 1460 pgcnt_t pg_off = mmu_btop(va - mmu.kmap_addr); 1461 htable_t *ht; 1462 uint_t entry; 1463 void *pte_ptr; 1464 1465 /* 1466 * construct the requested PTE 1467 */ 1468 attr &= ~PROT_USER; 1469 attr |= HAT_STORECACHING_OK; 1470 pte = hati_mkpte(pfn, attr, 0, flags); 1471 PTE_SET(pte, mmu.pt_global); 1472 1473 /* 1474 * Figure out the pte_ptr and htable and use common code to finish up 1475 */ 1476 if (mmu.pae_hat) 1477 pte_ptr = mmu.kmap_ptes + pg_off; 1478 else 1479 pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off; 1480 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >> 1481 LEVEL_SHIFT(1)]; 1482 entry = htable_va2entry(va, ht); 1483 ++curthread->t_hatdepth; 1484 ASSERT(curthread->t_hatdepth < 16); 1485 (void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr); 1486 --curthread->t_hatdepth; 1487 } 1488 1489 /* 1490 * hat_memload() - load a translation to the given page struct 1491 * 1492 * Flags for hat_memload/hat_devload/hat_*attr. 1493 * 1494 * HAT_LOAD Default flags to load a translation to the page. 1495 * 1496 * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(), 1497 * and hat_devload(). 1498 * 1499 * HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list. 1500 * sets PT_NOCONSIST 1501 * 1502 * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables 1503 * that map some user pages (not kas) is shared by more 1504 * than one process (eg. ISM). 1505 * 1506 * HAT_LOAD_REMAP Reload a valid pte with a different page frame. 1507 * 1508 * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this 1509 * point, it's setting up mapping to allocate internal 1510 * hat layer data structures. This flag forces hat layer 1511 * to tap its reserves in order to prevent infinite 1512 * recursion. 1513 * 1514 * The following is a protection attribute (like PROT_READ, etc.) 1515 * 1516 * HAT_NOSYNC set PT_NOSYNC - this mapping's ref/mod bits 1517 * are never cleared. 1518 * 1519 * Installing new valid PTE's and creation of the mapping list 1520 * entry are controlled under the same lock. It's derived from the 1521 * page_t being mapped. 1522 */ 1523 static uint_t supported_memload_flags = 1524 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST | 1525 HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT; 1526 1527 void 1528 hat_memload( 1529 hat_t *hat, 1530 caddr_t addr, 1531 page_t *pp, 1532 uint_t attr, 1533 uint_t flags) 1534 { 1535 uintptr_t va = (uintptr_t)addr; 1536 level_t level = 0; 1537 pfn_t pfn = page_pptonum(pp); 1538 1539 XPV_DISALLOW_MIGRATE(); 1540 ASSERT(IS_PAGEALIGNED(va)); 1541 ASSERT(hat == kas.a_hat || va < _userlimit); 1542 ASSERT(hat == kas.a_hat || 1543 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1544 ASSERT((flags & supported_memload_flags) == flags); 1545 1546 ASSERT(!IN_VA_HOLE(va)); 1547 ASSERT(!PP_ISFREE(pp)); 1548 1549 /* 1550 * kernel address special case for performance. 1551 */ 1552 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { 1553 ASSERT(hat == kas.a_hat); 1554 hat_kmap_load(addr, pp, attr, flags); 1555 XPV_ALLOW_MIGRATE(); 1556 return; 1557 } 1558 1559 /* 1560 * This is used for memory with normal caching enabled, so 1561 * always set HAT_STORECACHING_OK. 1562 */ 1563 attr |= HAT_STORECACHING_OK; 1564 if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0) 1565 panic("unexpected hati_load_common() failure"); 1566 XPV_ALLOW_MIGRATE(); 1567 } 1568 1569 /* ARGSUSED */ 1570 void 1571 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp, 1572 uint_t attr, uint_t flags, hat_region_cookie_t rcookie) 1573 { 1574 hat_memload(hat, addr, pp, attr, flags); 1575 } 1576 1577 /* 1578 * Load the given array of page structs using large pages when possible 1579 */ 1580 void 1581 hat_memload_array( 1582 hat_t *hat, 1583 caddr_t addr, 1584 size_t len, 1585 page_t **pages, 1586 uint_t attr, 1587 uint_t flags) 1588 { 1589 uintptr_t va = (uintptr_t)addr; 1590 uintptr_t eaddr = va + len; 1591 level_t level; 1592 size_t pgsize; 1593 pgcnt_t pgindx = 0; 1594 pfn_t pfn; 1595 pgcnt_t i; 1596 1597 XPV_DISALLOW_MIGRATE(); 1598 ASSERT(IS_PAGEALIGNED(va)); 1599 ASSERT(hat == kas.a_hat || va + len <= _userlimit); 1600 ASSERT(hat == kas.a_hat || 1601 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1602 ASSERT((flags & supported_memload_flags) == flags); 1603 1604 /* 1605 * memload is used for memory with full caching enabled, so 1606 * set HAT_STORECACHING_OK. 1607 */ 1608 attr |= HAT_STORECACHING_OK; 1609 1610 /* 1611 * handle all pages using largest possible pagesize 1612 */ 1613 while (va < eaddr) { 1614 /* 1615 * decide what level mapping to use (ie. pagesize) 1616 */ 1617 pfn = page_pptonum(pages[pgindx]); 1618 for (level = mmu.max_page_level; ; --level) { 1619 pgsize = LEVEL_SIZE(level); 1620 if (level == 0) 1621 break; 1622 1623 if (!IS_P2ALIGNED(va, pgsize) || 1624 (eaddr - va) < pgsize || 1625 !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize)) 1626 continue; 1627 1628 /* 1629 * To use a large mapping of this size, all the 1630 * pages we are passed must be sequential subpages 1631 * of the large page. 1632 * hat_page_demote() can't change p_szc because 1633 * all pages are locked. 1634 */ 1635 if (pages[pgindx]->p_szc >= level) { 1636 for (i = 0; i < mmu_btop(pgsize); ++i) { 1637 if (pfn + i != 1638 page_pptonum(pages[pgindx + i])) 1639 break; 1640 ASSERT(pages[pgindx + i]->p_szc >= 1641 level); 1642 ASSERT(pages[pgindx] + i == 1643 pages[pgindx + i]); 1644 } 1645 if (i == mmu_btop(pgsize)) { 1646 #ifdef DEBUG 1647 if (level == 2) 1648 map1gcnt++; 1649 #endif 1650 break; 1651 } 1652 } 1653 } 1654 1655 /* 1656 * Load this page mapping. If the load fails, try a smaller 1657 * pagesize. 1658 */ 1659 ASSERT(!IN_VA_HOLE(va)); 1660 while (hati_load_common(hat, va, pages[pgindx], attr, 1661 flags, level, pfn) != 0) { 1662 if (level == 0) 1663 panic("unexpected hati_load_common() failure"); 1664 --level; 1665 pgsize = LEVEL_SIZE(level); 1666 } 1667 1668 /* 1669 * move to next page 1670 */ 1671 va += pgsize; 1672 pgindx += mmu_btop(pgsize); 1673 } 1674 XPV_ALLOW_MIGRATE(); 1675 } 1676 1677 /* ARGSUSED */ 1678 void 1679 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len, 1680 struct page **pps, uint_t attr, uint_t flags, 1681 hat_region_cookie_t rcookie) 1682 { 1683 hat_memload_array(hat, addr, len, pps, attr, flags); 1684 } 1685 1686 /* 1687 * void hat_devload(hat, addr, len, pf, attr, flags) 1688 * load/lock the given page frame number 1689 * 1690 * Advisory ordering attributes. Apply only to device mappings. 1691 * 1692 * HAT_STRICTORDER: the CPU must issue the references in order, as the 1693 * programmer specified. This is the default. 1694 * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds 1695 * of reordering; store or load with store or load). 1696 * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores 1697 * to consecutive locations (for example, turn two consecutive byte 1698 * stores into one halfword store), and it may batch individual loads 1699 * (for example, turn two consecutive byte loads into one halfword load). 1700 * This also implies re-ordering. 1701 * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it 1702 * until another store occurs. The default is to fetch new data 1703 * on every load. This also implies merging. 1704 * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to 1705 * the device (perhaps with other data) at a later time. The default is 1706 * to push the data right away. This also implies load caching. 1707 * 1708 * Equivalent of hat_memload(), but can be used for device memory where 1709 * there are no page_t's and we support additional flags (write merging, etc). 1710 * Note that we can have large page mappings with this interface. 1711 */ 1712 int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK | 1713 HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK | 1714 HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK; 1715 1716 void 1717 hat_devload( 1718 hat_t *hat, 1719 caddr_t addr, 1720 size_t len, 1721 pfn_t pfn, 1722 uint_t attr, 1723 int flags) 1724 { 1725 uintptr_t va = ALIGN2PAGE(addr); 1726 uintptr_t eva = va + len; 1727 level_t level; 1728 size_t pgsize; 1729 page_t *pp; 1730 int f; /* per PTE copy of flags - maybe modified */ 1731 uint_t a; /* per PTE copy of attr */ 1732 1733 XPV_DISALLOW_MIGRATE(); 1734 ASSERT(IS_PAGEALIGNED(va)); 1735 ASSERT(hat == kas.a_hat || eva <= _userlimit); 1736 ASSERT(hat == kas.a_hat || 1737 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1738 ASSERT((flags & supported_devload_flags) == flags); 1739 1740 /* 1741 * handle all pages 1742 */ 1743 while (va < eva) { 1744 1745 /* 1746 * decide what level mapping to use (ie. pagesize) 1747 */ 1748 for (level = mmu.max_page_level; ; --level) { 1749 pgsize = LEVEL_SIZE(level); 1750 if (level == 0) 1751 break; 1752 if (IS_P2ALIGNED(va, pgsize) && 1753 (eva - va) >= pgsize && 1754 IS_P2ALIGNED(pfn, mmu_btop(pgsize))) { 1755 #ifdef DEBUG 1756 if (level == 2) 1757 map1gcnt++; 1758 #endif 1759 break; 1760 } 1761 } 1762 1763 /* 1764 * If this is just memory then allow caching (this happens 1765 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used 1766 * to override that. If we don't have a page_t then make sure 1767 * NOCONSIST is set. 1768 */ 1769 a = attr; 1770 f = flags; 1771 if (!pf_is_memory(pfn)) 1772 f |= HAT_LOAD_NOCONSIST; 1773 else if (!(a & HAT_PLAT_NOCACHE)) 1774 a |= HAT_STORECACHING_OK; 1775 1776 if (f & HAT_LOAD_NOCONSIST) 1777 pp = NULL; 1778 else 1779 pp = page_numtopp_nolock(pfn); 1780 1781 /* 1782 * load this page mapping 1783 */ 1784 ASSERT(!IN_VA_HOLE(va)); 1785 while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) { 1786 if (level == 0) 1787 panic("unexpected hati_load_common() failure"); 1788 --level; 1789 pgsize = LEVEL_SIZE(level); 1790 } 1791 1792 /* 1793 * move to next page 1794 */ 1795 va += pgsize; 1796 pfn += mmu_btop(pgsize); 1797 } 1798 XPV_ALLOW_MIGRATE(); 1799 } 1800 1801 /* 1802 * void hat_unlock(hat, addr, len) 1803 * unlock the mappings to a given range of addresses 1804 * 1805 * Locks are tracked by ht_lock_cnt in the htable. 1806 */ 1807 void 1808 hat_unlock(hat_t *hat, caddr_t addr, size_t len) 1809 { 1810 uintptr_t vaddr = (uintptr_t)addr; 1811 uintptr_t eaddr = vaddr + len; 1812 htable_t *ht = NULL; 1813 1814 /* 1815 * kernel entries are always locked, we don't track lock counts 1816 */ 1817 ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 1818 ASSERT(IS_PAGEALIGNED(vaddr)); 1819 ASSERT(IS_PAGEALIGNED(eaddr)); 1820 if (hat == kas.a_hat) 1821 return; 1822 if (eaddr > _userlimit) 1823 panic("hat_unlock() address out of range - above _userlimit"); 1824 1825 XPV_DISALLOW_MIGRATE(); 1826 ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1827 while (vaddr < eaddr) { 1828 (void) htable_walk(hat, &ht, &vaddr, eaddr); 1829 if (ht == NULL) 1830 break; 1831 1832 ASSERT(!IN_VA_HOLE(vaddr)); 1833 1834 if (ht->ht_lock_cnt < 1) 1835 panic("hat_unlock(): lock_cnt < 1, " 1836 "htable=%p, vaddr=%p\n", ht, (caddr_t)vaddr); 1837 HTABLE_LOCK_DEC(ht); 1838 1839 vaddr += LEVEL_SIZE(ht->ht_level); 1840 } 1841 if (ht) 1842 htable_release(ht); 1843 XPV_ALLOW_MIGRATE(); 1844 } 1845 1846 /* ARGSUSED */ 1847 void 1848 hat_unlock_region(struct hat *hat, caddr_t addr, size_t len, 1849 hat_region_cookie_t rcookie) 1850 { 1851 panic("No shared region support on x86"); 1852 } 1853 1854 #if !defined(__xpv) 1855 /* 1856 * Cross call service routine to demap a virtual page on 1857 * the current CPU or flush all mappings in TLB. 1858 */ 1859 /*ARGSUSED*/ 1860 static int 1861 hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3) 1862 { 1863 hat_t *hat = (hat_t *)a1; 1864 caddr_t addr = (caddr_t)a2; 1865 1866 /* 1867 * If the target hat isn't the kernel and this CPU isn't operating 1868 * in the target hat, we can ignore the cross call. 1869 */ 1870 if (hat != kas.a_hat && hat != CPU->cpu_current_hat) 1871 return (0); 1872 1873 /* 1874 * For a normal address, we just flush one page mapping 1875 */ 1876 if ((uintptr_t)addr != DEMAP_ALL_ADDR) { 1877 mmu_tlbflush_entry(addr); 1878 return (0); 1879 } 1880 1881 /* 1882 * Otherwise we reload cr3 to effect a complete TLB flush. 1883 * 1884 * A reload of cr3 on a VLP process also means we must also recopy in 1885 * the pte values from the struct hat 1886 */ 1887 if (hat->hat_flags & HAT_VLP) { 1888 #if defined(__amd64) 1889 x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes; 1890 1891 VLP_COPY(hat->hat_vlp_ptes, vlpptep); 1892 #elif defined(__i386) 1893 reload_pae32(hat, CPU); 1894 #endif 1895 } 1896 reload_cr3(); 1897 return (0); 1898 } 1899 1900 /* 1901 * Flush all TLB entries, including global (ie. kernel) ones. 1902 */ 1903 static void 1904 flush_all_tlb_entries(void) 1905 { 1906 ulong_t cr4 = getcr4(); 1907 1908 if (cr4 & CR4_PGE) { 1909 setcr4(cr4 & ~(ulong_t)CR4_PGE); 1910 setcr4(cr4); 1911 1912 /* 1913 * 32 bit PAE also needs to always reload_cr3() 1914 */ 1915 if (mmu.max_level == 2) 1916 reload_cr3(); 1917 } else { 1918 reload_cr3(); 1919 } 1920 } 1921 1922 #define TLB_CPU_HALTED (01ul) 1923 #define TLB_INVAL_ALL (02ul) 1924 #define CAS_TLB_INFO(cpu, old, new) \ 1925 caslong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new)) 1926 1927 /* 1928 * Record that a CPU is going idle 1929 */ 1930 void 1931 tlb_going_idle(void) 1932 { 1933 atomic_or_long((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED); 1934 } 1935 1936 /* 1937 * Service a delayed TLB flush if coming out of being idle. 1938 */ 1939 void 1940 tlb_service(void) 1941 { 1942 ulong_t flags = getflags(); 1943 ulong_t tlb_info; 1944 ulong_t found; 1945 1946 /* 1947 * Be sure interrupts are off while doing this so that 1948 * higher level interrupts correctly wait for flushes to finish. 1949 */ 1950 if (flags & PS_IE) 1951 flags = intr_clear(); 1952 1953 /* 1954 * We only have to do something if coming out of being idle. 1955 */ 1956 tlb_info = CPU->cpu_m.mcpu_tlb_info; 1957 if (tlb_info & TLB_CPU_HALTED) { 1958 ASSERT(CPU->cpu_current_hat == kas.a_hat); 1959 1960 /* 1961 * Atomic clear and fetch of old state. 1962 */ 1963 while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) { 1964 ASSERT(found & TLB_CPU_HALTED); 1965 tlb_info = found; 1966 SMT_PAUSE(); 1967 } 1968 if (tlb_info & TLB_INVAL_ALL) 1969 flush_all_tlb_entries(); 1970 } 1971 1972 /* 1973 * Restore interrupt enable control bit. 1974 */ 1975 if (flags & PS_IE) 1976 sti(); 1977 } 1978 #endif /* !__xpv */ 1979 1980 /* 1981 * Internal routine to do cross calls to invalidate a range of pages on 1982 * all CPUs using a given hat. 1983 */ 1984 void 1985 hat_tlb_inval(hat_t *hat, uintptr_t va) 1986 { 1987 extern int flushes_require_xcalls; /* from mp_startup.c */ 1988 cpuset_t justme; 1989 cpuset_t cpus_to_shootdown; 1990 #ifndef __xpv 1991 cpuset_t check_cpus; 1992 cpu_t *cpup; 1993 int c; 1994 #endif 1995 1996 /* 1997 * If the hat is being destroyed, there are no more users, so 1998 * demap need not do anything. 1999 */ 2000 if (hat->hat_flags & HAT_FREEING) 2001 return; 2002 2003 /* 2004 * If demapping from a shared pagetable, we best demap the 2005 * entire set of user TLBs, since we don't know what addresses 2006 * these were shared at. 2007 */ 2008 if (hat->hat_flags & HAT_SHARED) { 2009 hat = kas.a_hat; 2010 va = DEMAP_ALL_ADDR; 2011 } 2012 2013 /* 2014 * if not running with multiple CPUs, don't use cross calls 2015 */ 2016 if (panicstr || !flushes_require_xcalls) { 2017 #ifdef __xpv 2018 if (va == DEMAP_ALL_ADDR) 2019 xen_flush_tlb(); 2020 else 2021 xen_flush_va((caddr_t)va); 2022 #else 2023 (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL); 2024 #endif 2025 return; 2026 } 2027 2028 2029 /* 2030 * Determine CPUs to shootdown. Kernel changes always do all CPUs. 2031 * Otherwise it's just CPUs currently executing in this hat. 2032 */ 2033 kpreempt_disable(); 2034 CPUSET_ONLY(justme, CPU->cpu_id); 2035 if (hat == kas.a_hat) 2036 cpus_to_shootdown = khat_cpuset; 2037 else 2038 cpus_to_shootdown = hat->hat_cpus; 2039 2040 #ifndef __xpv 2041 /* 2042 * If any CPUs in the set are idle, just request a delayed flush 2043 * and avoid waking them up. 2044 */ 2045 check_cpus = cpus_to_shootdown; 2046 for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) { 2047 ulong_t tlb_info; 2048 2049 if (!CPU_IN_SET(check_cpus, c)) 2050 continue; 2051 CPUSET_DEL(check_cpus, c); 2052 cpup = cpu[c]; 2053 if (cpup == NULL) 2054 continue; 2055 2056 tlb_info = cpup->cpu_m.mcpu_tlb_info; 2057 while (tlb_info == TLB_CPU_HALTED) { 2058 (void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED, 2059 TLB_CPU_HALTED | TLB_INVAL_ALL); 2060 SMT_PAUSE(); 2061 tlb_info = cpup->cpu_m.mcpu_tlb_info; 2062 } 2063 if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) { 2064 HATSTAT_INC(hs_tlb_inval_delayed); 2065 CPUSET_DEL(cpus_to_shootdown, c); 2066 } 2067 } 2068 #endif 2069 2070 if (CPUSET_ISNULL(cpus_to_shootdown) || 2071 CPUSET_ISEQUAL(cpus_to_shootdown, justme)) { 2072 2073 #ifdef __xpv 2074 if (va == DEMAP_ALL_ADDR) 2075 xen_flush_tlb(); 2076 else 2077 xen_flush_va((caddr_t)va); 2078 #else 2079 (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL); 2080 #endif 2081 2082 } else { 2083 2084 CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id); 2085 #ifdef __xpv 2086 if (va == DEMAP_ALL_ADDR) 2087 xen_gflush_tlb(cpus_to_shootdown); 2088 else 2089 xen_gflush_va((caddr_t)va, cpus_to_shootdown); 2090 #else 2091 xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL, X_CALL_HIPRI, 2092 cpus_to_shootdown, hati_demap_func); 2093 #endif 2094 2095 } 2096 kpreempt_enable(); 2097 } 2098 2099 /* 2100 * Interior routine for HAT_UNLOADs from hat_unload_callback(), 2101 * hat_kmap_unload() OR from hat_steal() code. This routine doesn't 2102 * handle releasing of the htables. 2103 */ 2104 void 2105 hat_pte_unmap( 2106 htable_t *ht, 2107 uint_t entry, 2108 uint_t flags, 2109 x86pte_t old_pte, 2110 void *pte_ptr) 2111 { 2112 hat_t *hat = ht->ht_hat; 2113 hment_t *hm = NULL; 2114 page_t *pp = NULL; 2115 level_t l = ht->ht_level; 2116 pfn_t pfn; 2117 2118 /* 2119 * We always track the locking counts, even if nothing is unmapped 2120 */ 2121 if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) { 2122 ASSERT(ht->ht_lock_cnt > 0); 2123 HTABLE_LOCK_DEC(ht); 2124 } 2125 2126 /* 2127 * Figure out which page's mapping list lock to acquire using the PFN 2128 * passed in "old" PTE. We then attempt to invalidate the PTE. 2129 * If another thread, probably a hat_pageunload, has asynchronously 2130 * unmapped/remapped this address we'll loop here. 2131 */ 2132 ASSERT(ht->ht_busy > 0); 2133 while (PTE_ISVALID(old_pte)) { 2134 pfn = PTE2PFN(old_pte, l); 2135 if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) { 2136 pp = NULL; 2137 } else { 2138 #ifdef __xpv 2139 if (pfn == PFN_INVALID) 2140 panic("Invalid PFN, but not PT_NOCONSIST"); 2141 #endif 2142 pp = page_numtopp_nolock(pfn); 2143 if (pp == NULL) { 2144 panic("no page_t, not NOCONSIST: old_pte=" 2145 FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx", 2146 old_pte, (uintptr_t)ht, entry, 2147 (uintptr_t)pte_ptr); 2148 } 2149 x86_hm_enter(pp); 2150 } 2151 2152 /* 2153 * If freeing the address space, check that the PTE 2154 * hasn't changed, as the mappings are no longer in use by 2155 * any thread, invalidation is unnecessary. 2156 * If not freeing, do a full invalidate. 2157 * 2158 * On the hypervisor we must always remove mappings, as a 2159 * writable mapping left behind could cause a page table 2160 * allocation to fail. 2161 */ 2162 #if !defined(__xpv) 2163 if (hat->hat_flags & HAT_FREEING) 2164 old_pte = x86pte_get(ht, entry); 2165 else 2166 #endif 2167 old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr); 2168 2169 /* 2170 * If the page hadn't changed we've unmapped it and can proceed 2171 */ 2172 if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn) 2173 break; 2174 2175 /* 2176 * Otherwise, we'll have to retry with the current old_pte. 2177 * Drop the hment lock, since the pfn may have changed. 2178 */ 2179 if (pp != NULL) { 2180 x86_hm_exit(pp); 2181 pp = NULL; 2182 } else { 2183 ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST); 2184 } 2185 } 2186 2187 /* 2188 * If the old mapping wasn't valid, there's nothing more to do 2189 */ 2190 if (!PTE_ISVALID(old_pte)) { 2191 if (pp != NULL) 2192 x86_hm_exit(pp); 2193 return; 2194 } 2195 2196 /* 2197 * Take care of syncing any MOD/REF bits and removing the hment. 2198 */ 2199 if (pp != NULL) { 2200 if (!(flags & HAT_UNLOAD_NOSYNC)) 2201 hati_sync_pte_to_page(pp, old_pte, l); 2202 hm = hment_remove(pp, ht, entry); 2203 x86_hm_exit(pp); 2204 if (hm != NULL) 2205 hment_free(hm); 2206 } 2207 2208 /* 2209 * Handle book keeping in the htable and hat 2210 */ 2211 ASSERT(ht->ht_valid_cnt > 0); 2212 HTABLE_DEC(ht->ht_valid_cnt); 2213 PGCNT_DEC(hat, l); 2214 } 2215 2216 /* 2217 * very cheap unload implementation to special case some kernel addresses 2218 */ 2219 static void 2220 hat_kmap_unload(caddr_t addr, size_t len, uint_t flags) 2221 { 2222 uintptr_t va = (uintptr_t)addr; 2223 uintptr_t eva = va + len; 2224 pgcnt_t pg_index; 2225 htable_t *ht; 2226 uint_t entry; 2227 x86pte_t *pte_ptr; 2228 x86pte_t old_pte; 2229 2230 for (; va < eva; va += MMU_PAGESIZE) { 2231 /* 2232 * Get the PTE 2233 */ 2234 pg_index = mmu_btop(va - mmu.kmap_addr); 2235 pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index); 2236 old_pte = GET_PTE(pte_ptr); 2237 2238 /* 2239 * get the htable / entry 2240 */ 2241 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) 2242 >> LEVEL_SHIFT(1)]; 2243 entry = htable_va2entry(va, ht); 2244 2245 /* 2246 * use mostly common code to unmap it. 2247 */ 2248 hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr); 2249 } 2250 } 2251 2252 2253 /* 2254 * unload a range of virtual address space (no callback) 2255 */ 2256 void 2257 hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 2258 { 2259 uintptr_t va = (uintptr_t)addr; 2260 2261 XPV_DISALLOW_MIGRATE(); 2262 ASSERT(hat == kas.a_hat || va + len <= _userlimit); 2263 2264 /* 2265 * special case for performance. 2266 */ 2267 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { 2268 ASSERT(hat == kas.a_hat); 2269 hat_kmap_unload(addr, len, flags); 2270 } else { 2271 hat_unload_callback(hat, addr, len, flags, NULL); 2272 } 2273 XPV_ALLOW_MIGRATE(); 2274 } 2275 2276 /* 2277 * Do the callbacks for ranges being unloaded. 2278 */ 2279 typedef struct range_info { 2280 uintptr_t rng_va; 2281 ulong_t rng_cnt; 2282 level_t rng_level; 2283 } range_info_t; 2284 2285 static void 2286 handle_ranges(hat_callback_t *cb, uint_t cnt, range_info_t *range) 2287 { 2288 /* 2289 * do callbacks to upper level VM system 2290 */ 2291 while (cb != NULL && cnt > 0) { 2292 --cnt; 2293 cb->hcb_start_addr = (caddr_t)range[cnt].rng_va; 2294 cb->hcb_end_addr = cb->hcb_start_addr; 2295 cb->hcb_end_addr += 2296 range[cnt].rng_cnt << LEVEL_SIZE(range[cnt].rng_level); 2297 cb->hcb_function(cb); 2298 } 2299 } 2300 2301 /* 2302 * Unload a given range of addresses (has optional callback) 2303 * 2304 * Flags: 2305 * define HAT_UNLOAD 0x00 2306 * define HAT_UNLOAD_NOSYNC 0x02 2307 * define HAT_UNLOAD_UNLOCK 0x04 2308 * define HAT_UNLOAD_OTHER 0x08 - not used 2309 * define HAT_UNLOAD_UNMAP 0x10 - same as HAT_UNLOAD 2310 */ 2311 #define MAX_UNLOAD_CNT (8) 2312 void 2313 hat_unload_callback( 2314 hat_t *hat, 2315 caddr_t addr, 2316 size_t len, 2317 uint_t flags, 2318 hat_callback_t *cb) 2319 { 2320 uintptr_t vaddr = (uintptr_t)addr; 2321 uintptr_t eaddr = vaddr + len; 2322 htable_t *ht = NULL; 2323 uint_t entry; 2324 uintptr_t contig_va = (uintptr_t)-1L; 2325 range_info_t r[MAX_UNLOAD_CNT]; 2326 uint_t r_cnt = 0; 2327 x86pte_t old_pte; 2328 2329 XPV_DISALLOW_MIGRATE(); 2330 ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 2331 ASSERT(IS_PAGEALIGNED(vaddr)); 2332 ASSERT(IS_PAGEALIGNED(eaddr)); 2333 2334 /* 2335 * Special case a single page being unloaded for speed. This happens 2336 * quite frequently, COW faults after a fork() for example. 2337 */ 2338 if (cb == NULL && len == MMU_PAGESIZE) { 2339 ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0); 2340 if (ht != NULL) { 2341 if (PTE_ISVALID(old_pte)) 2342 hat_pte_unmap(ht, entry, flags, old_pte, NULL); 2343 htable_release(ht); 2344 } 2345 XPV_ALLOW_MIGRATE(); 2346 return; 2347 } 2348 2349 while (vaddr < eaddr) { 2350 old_pte = htable_walk(hat, &ht, &vaddr, eaddr); 2351 if (ht == NULL) 2352 break; 2353 2354 ASSERT(!IN_VA_HOLE(vaddr)); 2355 2356 if (vaddr < (uintptr_t)addr) 2357 panic("hat_unload_callback(): unmap inside large page"); 2358 2359 /* 2360 * We'll do the call backs for contiguous ranges 2361 */ 2362 if (vaddr != contig_va || 2363 (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) { 2364 if (r_cnt == MAX_UNLOAD_CNT) { 2365 handle_ranges(cb, r_cnt, r); 2366 r_cnt = 0; 2367 } 2368 r[r_cnt].rng_va = vaddr; 2369 r[r_cnt].rng_cnt = 0; 2370 r[r_cnt].rng_level = ht->ht_level; 2371 ++r_cnt; 2372 } 2373 2374 /* 2375 * Unload one mapping from the page tables. 2376 */ 2377 entry = htable_va2entry(vaddr, ht); 2378 hat_pte_unmap(ht, entry, flags, old_pte, NULL); 2379 ASSERT(ht->ht_level <= mmu.max_page_level); 2380 vaddr += LEVEL_SIZE(ht->ht_level); 2381 contig_va = vaddr; 2382 ++r[r_cnt - 1].rng_cnt; 2383 } 2384 if (ht) 2385 htable_release(ht); 2386 2387 /* 2388 * handle last range for callbacks 2389 */ 2390 if (r_cnt > 0) 2391 handle_ranges(cb, r_cnt, r); 2392 XPV_ALLOW_MIGRATE(); 2393 } 2394 2395 /* 2396 * synchronize mapping with software data structures 2397 * 2398 * This interface is currently only used by the working set monitor 2399 * driver. 2400 */ 2401 /*ARGSUSED*/ 2402 void 2403 hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 2404 { 2405 uintptr_t vaddr = (uintptr_t)addr; 2406 uintptr_t eaddr = vaddr + len; 2407 htable_t *ht = NULL; 2408 uint_t entry; 2409 x86pte_t pte; 2410 x86pte_t save_pte; 2411 x86pte_t new; 2412 page_t *pp; 2413 2414 ASSERT(!IN_VA_HOLE(vaddr)); 2415 ASSERT(IS_PAGEALIGNED(vaddr)); 2416 ASSERT(IS_PAGEALIGNED(eaddr)); 2417 ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 2418 2419 XPV_DISALLOW_MIGRATE(); 2420 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { 2421 try_again: 2422 pte = htable_walk(hat, &ht, &vaddr, eaddr); 2423 if (ht == NULL) 2424 break; 2425 entry = htable_va2entry(vaddr, ht); 2426 2427 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC || 2428 PTE_GET(pte, PT_REF | PT_MOD) == 0) 2429 continue; 2430 2431 /* 2432 * We need to acquire the mapping list lock to protect 2433 * against hat_pageunload(), hat_unload(), etc. 2434 */ 2435 pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level)); 2436 if (pp == NULL) 2437 break; 2438 x86_hm_enter(pp); 2439 save_pte = pte; 2440 pte = x86pte_get(ht, entry); 2441 if (pte != save_pte) { 2442 x86_hm_exit(pp); 2443 goto try_again; 2444 } 2445 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC || 2446 PTE_GET(pte, PT_REF | PT_MOD) == 0) { 2447 x86_hm_exit(pp); 2448 continue; 2449 } 2450 2451 /* 2452 * Need to clear ref or mod bits. We may compete with 2453 * hardware updating the R/M bits and have to try again. 2454 */ 2455 if (flags == HAT_SYNC_ZERORM) { 2456 new = pte; 2457 PTE_CLR(new, PT_REF | PT_MOD); 2458 pte = hati_update_pte(ht, entry, pte, new); 2459 if (pte != 0) { 2460 x86_hm_exit(pp); 2461 goto try_again; 2462 } 2463 } else { 2464 /* 2465 * sync the PTE to the page_t 2466 */ 2467 hati_sync_pte_to_page(pp, save_pte, ht->ht_level); 2468 } 2469 x86_hm_exit(pp); 2470 } 2471 if (ht) 2472 htable_release(ht); 2473 XPV_ALLOW_MIGRATE(); 2474 } 2475 2476 /* 2477 * void hat_map(hat, addr, len, flags) 2478 */ 2479 /*ARGSUSED*/ 2480 void 2481 hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 2482 { 2483 /* does nothing */ 2484 } 2485 2486 /* 2487 * uint_t hat_getattr(hat, addr, *attr) 2488 * returns attr for <hat,addr> in *attr. returns 0 if there was a 2489 * mapping and *attr is valid, nonzero if there was no mapping and 2490 * *attr is not valid. 2491 */ 2492 uint_t 2493 hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr) 2494 { 2495 uintptr_t vaddr = ALIGN2PAGE(addr); 2496 htable_t *ht = NULL; 2497 x86pte_t pte; 2498 2499 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2500 2501 if (IN_VA_HOLE(vaddr)) 2502 return ((uint_t)-1); 2503 2504 ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level); 2505 if (ht == NULL) 2506 return ((uint_t)-1); 2507 2508 if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) { 2509 htable_release(ht); 2510 return ((uint_t)-1); 2511 } 2512 2513 *attr = PROT_READ; 2514 if (PTE_GET(pte, PT_WRITABLE)) 2515 *attr |= PROT_WRITE; 2516 if (PTE_GET(pte, PT_USER)) 2517 *attr |= PROT_USER; 2518 if (!PTE_GET(pte, mmu.pt_nx)) 2519 *attr |= PROT_EXEC; 2520 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC) 2521 *attr |= HAT_NOSYNC; 2522 htable_release(ht); 2523 return (0); 2524 } 2525 2526 /* 2527 * hat_updateattr() applies the given attribute change to an existing mapping 2528 */ 2529 #define HAT_LOAD_ATTR 1 2530 #define HAT_SET_ATTR 2 2531 #define HAT_CLR_ATTR 3 2532 2533 static void 2534 hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what) 2535 { 2536 uintptr_t vaddr = (uintptr_t)addr; 2537 uintptr_t eaddr = (uintptr_t)addr + len; 2538 htable_t *ht = NULL; 2539 uint_t entry; 2540 x86pte_t oldpte, newpte; 2541 page_t *pp; 2542 2543 XPV_DISALLOW_MIGRATE(); 2544 ASSERT(IS_PAGEALIGNED(vaddr)); 2545 ASSERT(IS_PAGEALIGNED(eaddr)); 2546 ASSERT(hat == kas.a_hat || 2547 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 2548 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { 2549 try_again: 2550 oldpte = htable_walk(hat, &ht, &vaddr, eaddr); 2551 if (ht == NULL) 2552 break; 2553 if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST) 2554 continue; 2555 2556 pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level)); 2557 if (pp == NULL) 2558 continue; 2559 x86_hm_enter(pp); 2560 2561 newpte = oldpte; 2562 /* 2563 * We found a page table entry in the desired range, 2564 * figure out the new attributes. 2565 */ 2566 if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) { 2567 if ((attr & PROT_WRITE) && 2568 !PTE_GET(oldpte, PT_WRITABLE)) 2569 newpte |= PT_WRITABLE; 2570 2571 if ((attr & HAT_NOSYNC) && 2572 PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC) 2573 newpte |= PT_NOSYNC; 2574 2575 if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx)) 2576 newpte &= ~mmu.pt_nx; 2577 } 2578 2579 if (what == HAT_LOAD_ATTR) { 2580 if (!(attr & PROT_WRITE) && 2581 PTE_GET(oldpte, PT_WRITABLE)) 2582 newpte &= ~PT_WRITABLE; 2583 2584 if (!(attr & HAT_NOSYNC) && 2585 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC) 2586 newpte &= ~PT_SOFTWARE; 2587 2588 if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) 2589 newpte |= mmu.pt_nx; 2590 } 2591 2592 if (what == HAT_CLR_ATTR) { 2593 if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE)) 2594 newpte &= ~PT_WRITABLE; 2595 2596 if ((attr & HAT_NOSYNC) && 2597 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC) 2598 newpte &= ~PT_SOFTWARE; 2599 2600 if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) 2601 newpte |= mmu.pt_nx; 2602 } 2603 2604 /* 2605 * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set. 2606 * x86pte_set() depends on this. 2607 */ 2608 if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC) 2609 newpte |= PT_REF | PT_MOD; 2610 2611 /* 2612 * what about PROT_READ or others? this code only handles: 2613 * EXEC, WRITE, NOSYNC 2614 */ 2615 2616 /* 2617 * If new PTE really changed, update the table. 2618 */ 2619 if (newpte != oldpte) { 2620 entry = htable_va2entry(vaddr, ht); 2621 oldpte = hati_update_pte(ht, entry, oldpte, newpte); 2622 if (oldpte != 0) { 2623 x86_hm_exit(pp); 2624 goto try_again; 2625 } 2626 } 2627 x86_hm_exit(pp); 2628 } 2629 if (ht) 2630 htable_release(ht); 2631 XPV_ALLOW_MIGRATE(); 2632 } 2633 2634 /* 2635 * Various wrappers for hat_updateattr() 2636 */ 2637 void 2638 hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 2639 { 2640 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2641 hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR); 2642 } 2643 2644 void 2645 hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 2646 { 2647 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2648 hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR); 2649 } 2650 2651 void 2652 hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 2653 { 2654 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2655 hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR); 2656 } 2657 2658 void 2659 hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot) 2660 { 2661 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2662 hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR); 2663 } 2664 2665 /* 2666 * size_t hat_getpagesize(hat, addr) 2667 * returns pagesize in bytes for <hat, addr>. returns -1 of there is 2668 * no mapping. This is an advisory call. 2669 */ 2670 ssize_t 2671 hat_getpagesize(hat_t *hat, caddr_t addr) 2672 { 2673 uintptr_t vaddr = ALIGN2PAGE(addr); 2674 htable_t *ht; 2675 size_t pagesize; 2676 2677 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2678 if (IN_VA_HOLE(vaddr)) 2679 return (-1); 2680 ht = htable_getpage(hat, vaddr, NULL); 2681 if (ht == NULL) 2682 return (-1); 2683 pagesize = LEVEL_SIZE(ht->ht_level); 2684 htable_release(ht); 2685 return (pagesize); 2686 } 2687 2688 2689 2690 /* 2691 * pfn_t hat_getpfnum(hat, addr) 2692 * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid. 2693 */ 2694 pfn_t 2695 hat_getpfnum(hat_t *hat, caddr_t addr) 2696 { 2697 uintptr_t vaddr = ALIGN2PAGE(addr); 2698 htable_t *ht; 2699 uint_t entry; 2700 pfn_t pfn = PFN_INVALID; 2701 2702 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2703 if (khat_running == 0) 2704 return (PFN_INVALID); 2705 2706 if (IN_VA_HOLE(vaddr)) 2707 return (PFN_INVALID); 2708 2709 XPV_DISALLOW_MIGRATE(); 2710 /* 2711 * A very common use of hat_getpfnum() is from the DDI for kernel pages. 2712 * Use the kmap_ptes (which also covers the 32 bit heap) to speed 2713 * this up. 2714 */ 2715 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { 2716 x86pte_t pte; 2717 pgcnt_t pg_index; 2718 2719 pg_index = mmu_btop(vaddr - mmu.kmap_addr); 2720 pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index)); 2721 if (PTE_ISVALID(pte)) 2722 /*LINTED [use of constant 0 causes a lint warning] */ 2723 pfn = PTE2PFN(pte, 0); 2724 XPV_ALLOW_MIGRATE(); 2725 return (pfn); 2726 } 2727 2728 ht = htable_getpage(hat, vaddr, &entry); 2729 if (ht == NULL) { 2730 XPV_ALLOW_MIGRATE(); 2731 return (PFN_INVALID); 2732 } 2733 ASSERT(vaddr >= ht->ht_vaddr); 2734 ASSERT(vaddr <= HTABLE_LAST_PAGE(ht)); 2735 pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level); 2736 if (ht->ht_level > 0) 2737 pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level)); 2738 htable_release(ht); 2739 XPV_ALLOW_MIGRATE(); 2740 return (pfn); 2741 } 2742 2743 /* 2744 * hat_getkpfnum() is an obsolete DDI routine, and its use is discouraged. 2745 * Use hat_getpfnum(kas.a_hat, ...) instead. 2746 * 2747 * We'd like to return PFN_INVALID if the mappings have underlying page_t's 2748 * but can't right now due to the fact that some software has grown to use 2749 * this interface incorrectly. So for now when the interface is misused, 2750 * return a warning to the user that in the future it won't work in the 2751 * way they're abusing it, and carry on. 2752 * 2753 * Note that hat_getkpfnum() is never supported on amd64. 2754 */ 2755 #if !defined(__amd64) 2756 pfn_t 2757 hat_getkpfnum(caddr_t addr) 2758 { 2759 pfn_t pfn; 2760 int badcaller = 0; 2761 2762 if (khat_running == 0) 2763 panic("hat_getkpfnum(): called too early\n"); 2764 if ((uintptr_t)addr < kernelbase) 2765 return (PFN_INVALID); 2766 2767 XPV_DISALLOW_MIGRATE(); 2768 if (segkpm && IS_KPM_ADDR(addr)) { 2769 badcaller = 1; 2770 pfn = hat_kpm_va2pfn(addr); 2771 } else { 2772 pfn = hat_getpfnum(kas.a_hat, addr); 2773 badcaller = pf_is_memory(pfn); 2774 } 2775 2776 if (badcaller) 2777 hat_getkpfnum_badcall(caller()); 2778 XPV_ALLOW_MIGRATE(); 2779 return (pfn); 2780 } 2781 #endif /* __amd64 */ 2782 2783 /* 2784 * int hat_probe(hat, addr) 2785 * return 0 if no valid mapping is present. Faster version 2786 * of hat_getattr in certain architectures. 2787 */ 2788 int 2789 hat_probe(hat_t *hat, caddr_t addr) 2790 { 2791 uintptr_t vaddr = ALIGN2PAGE(addr); 2792 uint_t entry; 2793 htable_t *ht; 2794 pgcnt_t pg_off; 2795 2796 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2797 ASSERT(hat == kas.a_hat || 2798 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 2799 if (IN_VA_HOLE(vaddr)) 2800 return (0); 2801 2802 /* 2803 * Most common use of hat_probe is from segmap. We special case it 2804 * for performance. 2805 */ 2806 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { 2807 pg_off = mmu_btop(vaddr - mmu.kmap_addr); 2808 if (mmu.pae_hat) 2809 return (PTE_ISVALID(mmu.kmap_ptes[pg_off])); 2810 else 2811 return (PTE_ISVALID( 2812 ((x86pte32_t *)mmu.kmap_ptes)[pg_off])); 2813 } 2814 2815 ht = htable_getpage(hat, vaddr, &entry); 2816 htable_release(ht); 2817 return (ht != NULL); 2818 } 2819 2820 /* 2821 * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM. 2822 */ 2823 static int 2824 is_it_dism(hat_t *hat, caddr_t va) 2825 { 2826 struct seg *seg; 2827 struct shm_data *shmd; 2828 struct spt_data *sptd; 2829 2830 seg = as_findseg(hat->hat_as, va, 0); 2831 ASSERT(seg != NULL); 2832 ASSERT(seg->s_base <= va); 2833 shmd = (struct shm_data *)seg->s_data; 2834 ASSERT(shmd != NULL); 2835 sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2836 ASSERT(sptd != NULL); 2837 if (sptd->spt_flags & SHM_PAGEABLE) 2838 return (1); 2839 return (0); 2840 } 2841 2842 /* 2843 * Simple implementation of ISM. hat_share() is similar to hat_memload_array(), 2844 * except that we use the ism_hat's existing mappings to determine the pages 2845 * and protections to use for this hat. If we find a full properly aligned 2846 * and sized pagetable, we will attempt to share the pagetable itself. 2847 */ 2848 /*ARGSUSED*/ 2849 int 2850 hat_share( 2851 hat_t *hat, 2852 caddr_t addr, 2853 hat_t *ism_hat, 2854 caddr_t src_addr, 2855 size_t len, /* almost useless value, see below.. */ 2856 uint_t ismszc) 2857 { 2858 uintptr_t vaddr_start = (uintptr_t)addr; 2859 uintptr_t vaddr; 2860 uintptr_t eaddr = vaddr_start + len; 2861 uintptr_t ism_addr_start = (uintptr_t)src_addr; 2862 uintptr_t ism_addr = ism_addr_start; 2863 uintptr_t e_ism_addr = ism_addr + len; 2864 htable_t *ism_ht = NULL; 2865 htable_t *ht; 2866 x86pte_t pte; 2867 page_t *pp; 2868 pfn_t pfn; 2869 level_t l; 2870 pgcnt_t pgcnt; 2871 uint_t prot; 2872 int is_dism; 2873 int flags; 2874 2875 /* 2876 * We might be asked to share an empty DISM hat by as_dup() 2877 */ 2878 ASSERT(hat != kas.a_hat); 2879 ASSERT(eaddr <= _userlimit); 2880 if (!(ism_hat->hat_flags & HAT_SHARED)) { 2881 ASSERT(hat_get_mapped_size(ism_hat) == 0); 2882 return (0); 2883 } 2884 XPV_DISALLOW_MIGRATE(); 2885 2886 /* 2887 * The SPT segment driver often passes us a size larger than there are 2888 * valid mappings. That's because it rounds the segment size up to a 2889 * large pagesize, even if the actual memory mapped by ism_hat is less. 2890 */ 2891 ASSERT(IS_PAGEALIGNED(vaddr_start)); 2892 ASSERT(IS_PAGEALIGNED(ism_addr_start)); 2893 ASSERT(ism_hat->hat_flags & HAT_SHARED); 2894 is_dism = is_it_dism(hat, addr); 2895 while (ism_addr < e_ism_addr) { 2896 /* 2897 * use htable_walk to get the next valid ISM mapping 2898 */ 2899 pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr); 2900 if (ism_ht == NULL) 2901 break; 2902 2903 /* 2904 * First check to see if we already share the page table. 2905 */ 2906 l = ism_ht->ht_level; 2907 vaddr = vaddr_start + (ism_addr - ism_addr_start); 2908 ht = htable_lookup(hat, vaddr, l); 2909 if (ht != NULL) { 2910 if (ht->ht_flags & HTABLE_SHARED_PFN) 2911 goto shared; 2912 htable_release(ht); 2913 goto not_shared; 2914 } 2915 2916 /* 2917 * Can't ever share top table. 2918 */ 2919 if (l == mmu.max_level) 2920 goto not_shared; 2921 2922 /* 2923 * Avoid level mismatches later due to DISM faults. 2924 */ 2925 if (is_dism && l > 0) 2926 goto not_shared; 2927 2928 /* 2929 * addresses and lengths must align 2930 * table must be fully populated 2931 * no lower level page tables 2932 */ 2933 if (ism_addr != ism_ht->ht_vaddr || 2934 (vaddr & LEVEL_OFFSET(l + 1)) != 0) 2935 goto not_shared; 2936 2937 /* 2938 * The range of address space must cover a full table. 2939 */ 2940 if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1)) 2941 goto not_shared; 2942 2943 /* 2944 * All entries in the ISM page table must be leaf PTEs. 2945 */ 2946 if (l > 0) { 2947 int e; 2948 2949 /* 2950 * We know the 0th is from htable_walk() above. 2951 */ 2952 for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) { 2953 x86pte_t pte; 2954 pte = x86pte_get(ism_ht, e); 2955 if (!PTE_ISPAGE(pte, l)) 2956 goto not_shared; 2957 } 2958 } 2959 2960 /* 2961 * share the page table 2962 */ 2963 ht = htable_create(hat, vaddr, l, ism_ht); 2964 shared: 2965 ASSERT(ht->ht_flags & HTABLE_SHARED_PFN); 2966 ASSERT(ht->ht_shares == ism_ht); 2967 hat->hat_ism_pgcnt += 2968 (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) << 2969 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT); 2970 ht->ht_valid_cnt = ism_ht->ht_valid_cnt; 2971 htable_release(ht); 2972 ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1); 2973 htable_release(ism_ht); 2974 ism_ht = NULL; 2975 continue; 2976 2977 not_shared: 2978 /* 2979 * Unable to share the page table. Instead we will 2980 * create new mappings from the values in the ISM mappings. 2981 * Figure out what level size mappings to use; 2982 */ 2983 for (l = ism_ht->ht_level; l > 0; --l) { 2984 if (LEVEL_SIZE(l) <= eaddr - vaddr && 2985 (vaddr & LEVEL_OFFSET(l)) == 0) 2986 break; 2987 } 2988 2989 /* 2990 * The ISM mapping might be larger than the share area, 2991 * be careful to truncate it if needed. 2992 */ 2993 if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) { 2994 pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level)); 2995 } else { 2996 pgcnt = mmu_btop(eaddr - vaddr); 2997 l = 0; 2998 } 2999 3000 pfn = PTE2PFN(pte, ism_ht->ht_level); 3001 ASSERT(pfn != PFN_INVALID); 3002 while (pgcnt > 0) { 3003 /* 3004 * Make a new pte for the PFN for this level. 3005 * Copy protections for the pte from the ISM pte. 3006 */ 3007 pp = page_numtopp_nolock(pfn); 3008 ASSERT(pp != NULL); 3009 3010 prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK; 3011 if (PTE_GET(pte, PT_WRITABLE)) 3012 prot |= PROT_WRITE; 3013 if (!PTE_GET(pte, PT_NX)) 3014 prot |= PROT_EXEC; 3015 3016 flags = HAT_LOAD; 3017 if (!is_dism) 3018 flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST; 3019 while (hati_load_common(hat, vaddr, pp, prot, flags, 3020 l, pfn) != 0) { 3021 if (l == 0) 3022 panic("hati_load_common() failure"); 3023 --l; 3024 } 3025 3026 vaddr += LEVEL_SIZE(l); 3027 ism_addr += LEVEL_SIZE(l); 3028 pfn += mmu_btop(LEVEL_SIZE(l)); 3029 pgcnt -= mmu_btop(LEVEL_SIZE(l)); 3030 } 3031 } 3032 if (ism_ht != NULL) 3033 htable_release(ism_ht); 3034 XPV_ALLOW_MIGRATE(); 3035 return (0); 3036 } 3037 3038 3039 /* 3040 * hat_unshare() is similar to hat_unload_callback(), but 3041 * we have to look for empty shared pagetables. Note that 3042 * hat_unshare() is always invoked against an entire segment. 3043 */ 3044 /*ARGSUSED*/ 3045 void 3046 hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc) 3047 { 3048 uint64_t vaddr = (uintptr_t)addr; 3049 uintptr_t eaddr = vaddr + len; 3050 htable_t *ht = NULL; 3051 uint_t need_demaps = 0; 3052 int flags = HAT_UNLOAD_UNMAP; 3053 level_t l; 3054 3055 ASSERT(hat != kas.a_hat); 3056 ASSERT(eaddr <= _userlimit); 3057 ASSERT(IS_PAGEALIGNED(vaddr)); 3058 ASSERT(IS_PAGEALIGNED(eaddr)); 3059 XPV_DISALLOW_MIGRATE(); 3060 3061 /* 3062 * First go through and remove any shared pagetables. 3063 * 3064 * Note that it's ok to delay the TLB shootdown till the entire range is 3065 * finished, because if hat_pageunload() were to unload a shared 3066 * pagetable page, its hat_tlb_inval() will do a global TLB invalidate. 3067 */ 3068 l = mmu.max_page_level; 3069 if (l == mmu.max_level) 3070 --l; 3071 for (; l >= 0; --l) { 3072 for (vaddr = (uintptr_t)addr; vaddr < eaddr; 3073 vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) { 3074 ASSERT(!IN_VA_HOLE(vaddr)); 3075 /* 3076 * find a pagetable that maps the current address 3077 */ 3078 ht = htable_lookup(hat, vaddr, l); 3079 if (ht == NULL) 3080 continue; 3081 if (ht->ht_flags & HTABLE_SHARED_PFN) { 3082 /* 3083 * clear page count, set valid_cnt to 0, 3084 * let htable_release() finish the job 3085 */ 3086 hat->hat_ism_pgcnt -= ht->ht_valid_cnt << 3087 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT); 3088 ht->ht_valid_cnt = 0; 3089 need_demaps = 1; 3090 } 3091 htable_release(ht); 3092 } 3093 } 3094 3095 /* 3096 * flush the TLBs - since we're probably dealing with MANY mappings 3097 * we do just one CR3 reload. 3098 */ 3099 if (!(hat->hat_flags & HAT_FREEING) && need_demaps) 3100 hat_tlb_inval(hat, DEMAP_ALL_ADDR); 3101 3102 /* 3103 * Now go back and clean up any unaligned mappings that 3104 * couldn't share pagetables. 3105 */ 3106 if (!is_it_dism(hat, addr)) 3107 flags |= HAT_UNLOAD_UNLOCK; 3108 hat_unload(hat, addr, len, flags); 3109 XPV_ALLOW_MIGRATE(); 3110 } 3111 3112 3113 /* 3114 * hat_reserve() does nothing 3115 */ 3116 /*ARGSUSED*/ 3117 void 3118 hat_reserve(struct as *as, caddr_t addr, size_t len) 3119 { 3120 } 3121 3122 3123 /* 3124 * Called when all mappings to a page should have write permission removed. 3125 * Mostly stolem from hat_pagesync() 3126 */ 3127 static void 3128 hati_page_clrwrt(struct page *pp) 3129 { 3130 hment_t *hm = NULL; 3131 htable_t *ht; 3132 uint_t entry; 3133 x86pte_t old; 3134 x86pte_t new; 3135 uint_t pszc = 0; 3136 3137 XPV_DISALLOW_MIGRATE(); 3138 next_size: 3139 /* 3140 * walk thru the mapping list clearing write permission 3141 */ 3142 x86_hm_enter(pp); 3143 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) { 3144 if (ht->ht_level < pszc) 3145 continue; 3146 old = x86pte_get(ht, entry); 3147 3148 for (;;) { 3149 /* 3150 * Is this mapping of interest? 3151 */ 3152 if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum || 3153 PTE_GET(old, PT_WRITABLE) == 0) 3154 break; 3155 3156 /* 3157 * Clear ref/mod writable bits. This requires cross 3158 * calls to ensure any executing TLBs see cleared bits. 3159 */ 3160 new = old; 3161 PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE); 3162 old = hati_update_pte(ht, entry, old, new); 3163 if (old != 0) 3164 continue; 3165 3166 break; 3167 } 3168 } 3169 x86_hm_exit(pp); 3170 while (pszc < pp->p_szc) { 3171 page_t *tpp; 3172 pszc++; 3173 tpp = PP_GROUPLEADER(pp, pszc); 3174 if (pp != tpp) { 3175 pp = tpp; 3176 goto next_size; 3177 } 3178 } 3179 XPV_ALLOW_MIGRATE(); 3180 } 3181 3182 /* 3183 * void hat_page_setattr(pp, flag) 3184 * void hat_page_clrattr(pp, flag) 3185 * used to set/clr ref/mod bits. 3186 */ 3187 void 3188 hat_page_setattr(struct page *pp, uint_t flag) 3189 { 3190 vnode_t *vp = pp->p_vnode; 3191 kmutex_t *vphm = NULL; 3192 page_t **listp; 3193 int noshuffle; 3194 3195 noshuffle = flag & P_NSH; 3196 flag &= ~P_NSH; 3197 3198 if (PP_GETRM(pp, flag) == flag) 3199 return; 3200 3201 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) && 3202 !noshuffle) { 3203 vphm = page_vnode_mutex(vp); 3204 mutex_enter(vphm); 3205 } 3206 3207 PP_SETRM(pp, flag); 3208 3209 if (vphm != NULL) { 3210 3211 /* 3212 * Some File Systems examine v_pages for NULL w/o 3213 * grabbing the vphm mutex. Must not let it become NULL when 3214 * pp is the only page on the list. 3215 */ 3216 if (pp->p_vpnext != pp) { 3217 page_vpsub(&vp->v_pages, pp); 3218 if (vp->v_pages != NULL) 3219 listp = &vp->v_pages->p_vpprev->p_vpnext; 3220 else 3221 listp = &vp->v_pages; 3222 page_vpadd(listp, pp); 3223 } 3224 mutex_exit(vphm); 3225 } 3226 } 3227 3228 void 3229 hat_page_clrattr(struct page *pp, uint_t flag) 3230 { 3231 vnode_t *vp = pp->p_vnode; 3232 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 3233 3234 /* 3235 * Caller is expected to hold page's io lock for VMODSORT to work 3236 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 3237 * bit is cleared. 3238 * We don't have assert to avoid tripping some existing third party 3239 * code. The dirty page is moved back to top of the v_page list 3240 * after IO is done in pvn_write_done(). 3241 */ 3242 PP_CLRRM(pp, flag); 3243 3244 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 3245 3246 /* 3247 * VMODSORT works by removing write permissions and getting 3248 * a fault when a page is made dirty. At this point 3249 * we need to remove write permission from all mappings 3250 * to this page. 3251 */ 3252 hati_page_clrwrt(pp); 3253 } 3254 } 3255 3256 /* 3257 * If flag is specified, returns 0 if attribute is disabled 3258 * and non zero if enabled. If flag specifes multiple attributs 3259 * then returns 0 if ALL atriibutes are disabled. This is an advisory 3260 * call. 3261 */ 3262 uint_t 3263 hat_page_getattr(struct page *pp, uint_t flag) 3264 { 3265 return (PP_GETRM(pp, flag)); 3266 } 3267 3268 3269 /* 3270 * common code used by hat_pageunload() and hment_steal() 3271 */ 3272 hment_t * 3273 hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry) 3274 { 3275 x86pte_t old_pte; 3276 pfn_t pfn = pp->p_pagenum; 3277 hment_t *hm; 3278 3279 /* 3280 * We need to acquire a hold on the htable in order to 3281 * do the invalidate. We know the htable must exist, since 3282 * unmap's don't release the htable until after removing any 3283 * hment. Having x86_hm_enter() keeps that from proceeding. 3284 */ 3285 htable_acquire(ht); 3286 3287 /* 3288 * Invalidate the PTE and remove the hment. 3289 */ 3290 old_pte = x86pte_inval(ht, entry, 0, NULL); 3291 if (PTE2PFN(old_pte, ht->ht_level) != pfn) { 3292 panic("x86pte_inval() failure found PTE = " FMT_PTE 3293 " pfn being unmapped is %lx ht=0x%lx entry=0x%x", 3294 old_pte, pfn, (uintptr_t)ht, entry); 3295 } 3296 3297 /* 3298 * Clean up all the htable information for this mapping 3299 */ 3300 ASSERT(ht->ht_valid_cnt > 0); 3301 HTABLE_DEC(ht->ht_valid_cnt); 3302 PGCNT_DEC(ht->ht_hat, ht->ht_level); 3303 3304 /* 3305 * sync ref/mod bits to the page_t 3306 */ 3307 if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC) 3308 hati_sync_pte_to_page(pp, old_pte, ht->ht_level); 3309 3310 /* 3311 * Remove the mapping list entry for this page. 3312 */ 3313 hm = hment_remove(pp, ht, entry); 3314 3315 /* 3316 * drop the mapping list lock so that we might free the 3317 * hment and htable. 3318 */ 3319 x86_hm_exit(pp); 3320 htable_release(ht); 3321 return (hm); 3322 } 3323 3324 extern int vpm_enable; 3325 /* 3326 * Unload all translations to a page. If the page is a subpage of a large 3327 * page, the large page mappings are also removed. 3328 * 3329 * The forceflags are unused. 3330 */ 3331 3332 /*ARGSUSED*/ 3333 static int 3334 hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag) 3335 { 3336 page_t *cur_pp = pp; 3337 hment_t *hm; 3338 hment_t *prev; 3339 htable_t *ht; 3340 uint_t entry; 3341 level_t level; 3342 3343 XPV_DISALLOW_MIGRATE(); 3344 #if defined(__amd64) 3345 /* 3346 * clear the vpm ref. 3347 */ 3348 if (vpm_enable) { 3349 pp->p_vpmref = 0; 3350 } 3351 #endif 3352 /* 3353 * The loop with next_size handles pages with multiple pagesize mappings 3354 */ 3355 next_size: 3356 for (;;) { 3357 3358 /* 3359 * Get a mapping list entry 3360 */ 3361 x86_hm_enter(cur_pp); 3362 for (prev = NULL; ; prev = hm) { 3363 hm = hment_walk(cur_pp, &ht, &entry, prev); 3364 if (hm == NULL) { 3365 x86_hm_exit(cur_pp); 3366 3367 /* 3368 * If not part of a larger page, we're done. 3369 */ 3370 if (cur_pp->p_szc <= pg_szcd) { 3371 XPV_ALLOW_MIGRATE(); 3372 return (0); 3373 } 3374 3375 /* 3376 * Else check the next larger page size. 3377 * hat_page_demote() may decrease p_szc 3378 * but that's ok we'll just take an extra 3379 * trip discover there're no larger mappings 3380 * and return. 3381 */ 3382 ++pg_szcd; 3383 cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd); 3384 goto next_size; 3385 } 3386 3387 /* 3388 * If this mapping size matches, remove it. 3389 */ 3390 level = ht->ht_level; 3391 if (level == pg_szcd) 3392 break; 3393 } 3394 3395 /* 3396 * Remove the mapping list entry for this page. 3397 * Note this does the x86_hm_exit() for us. 3398 */ 3399 hm = hati_page_unmap(cur_pp, ht, entry); 3400 if (hm != NULL) 3401 hment_free(hm); 3402 } 3403 } 3404 3405 int 3406 hat_pageunload(struct page *pp, uint_t forceflag) 3407 { 3408 ASSERT(PAGE_EXCL(pp)); 3409 return (hati_pageunload(pp, 0, forceflag)); 3410 } 3411 3412 /* 3413 * Unload all large mappings to pp and reduce by 1 p_szc field of every large 3414 * page level that included pp. 3415 * 3416 * pp must be locked EXCL. Even though no other constituent pages are locked 3417 * it's legal to unload large mappings to pp because all constituent pages of 3418 * large locked mappings have to be locked SHARED. therefore if we have EXCL 3419 * lock on one of constituent pages none of the large mappings to pp are 3420 * locked. 3421 * 3422 * Change (always decrease) p_szc field starting from the last constituent 3423 * page and ending with root constituent page so that root's pszc always shows 3424 * the area where hat_page_demote() may be active. 3425 * 3426 * This mechanism is only used for file system pages where it's not always 3427 * possible to get EXCL locks on all constituent pages to demote the size code 3428 * (as is done for anonymous or kernel large pages). 3429 */ 3430 void 3431 hat_page_demote(page_t *pp) 3432 { 3433 uint_t pszc; 3434 uint_t rszc; 3435 uint_t szc; 3436 page_t *rootpp; 3437 page_t *firstpp; 3438 page_t *lastpp; 3439 pgcnt_t pgcnt; 3440 3441 ASSERT(PAGE_EXCL(pp)); 3442 ASSERT(!PP_ISFREE(pp)); 3443 ASSERT(page_szc_lock_assert(pp)); 3444 3445 if (pp->p_szc == 0) 3446 return; 3447 3448 rootpp = PP_GROUPLEADER(pp, 1); 3449 (void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD); 3450 3451 /* 3452 * all large mappings to pp are gone 3453 * and no new can be setup since pp is locked exclusively. 3454 * 3455 * Lock the root to make sure there's only one hat_page_demote() 3456 * outstanding within the area of this root's pszc. 3457 * 3458 * Second potential hat_page_demote() is already eliminated by upper 3459 * VM layer via page_szc_lock() but we don't rely on it and use our 3460 * own locking (so that upper layer locking can be changed without 3461 * assumptions that hat depends on upper layer VM to prevent multiple 3462 * hat_page_demote() to be issued simultaneously to the same large 3463 * page). 3464 */ 3465 again: 3466 pszc = pp->p_szc; 3467 if (pszc == 0) 3468 return; 3469 rootpp = PP_GROUPLEADER(pp, pszc); 3470 x86_hm_enter(rootpp); 3471 /* 3472 * If root's p_szc is different from pszc we raced with another 3473 * hat_page_demote(). Drop the lock and try to find the root again. 3474 * If root's p_szc is greater than pszc previous hat_page_demote() is 3475 * not done yet. Take and release mlist lock of root's root to wait 3476 * for previous hat_page_demote() to complete. 3477 */ 3478 if ((rszc = rootpp->p_szc) != pszc) { 3479 x86_hm_exit(rootpp); 3480 if (rszc > pszc) { 3481 /* p_szc of a locked non free page can't increase */ 3482 ASSERT(pp != rootpp); 3483 3484 rootpp = PP_GROUPLEADER(rootpp, rszc); 3485 x86_hm_enter(rootpp); 3486 x86_hm_exit(rootpp); 3487 } 3488 goto again; 3489 } 3490 ASSERT(pp->p_szc == pszc); 3491 3492 /* 3493 * Decrement by 1 p_szc of every constituent page of a region that 3494 * covered pp. For example if original szc is 3 it gets changed to 2 3495 * everywhere except in region 2 that covered pp. Region 2 that 3496 * covered pp gets demoted to 1 everywhere except in region 1 that 3497 * covered pp. The region 1 that covered pp is demoted to region 3498 * 0. It's done this way because from region 3 we removed level 3 3499 * mappings, from region 2 that covered pp we removed level 2 mappings 3500 * and from region 1 that covered pp we removed level 1 mappings. All 3501 * changes are done from from high pfn's to low pfn's so that roots 3502 * are changed last allowing one to know the largest region where 3503 * hat_page_demote() is stil active by only looking at the root page. 3504 * 3505 * This algorithm is implemented in 2 while loops. First loop changes 3506 * p_szc of pages to the right of pp's level 1 region and second 3507 * loop changes p_szc of pages of level 1 region that covers pp 3508 * and all pages to the left of level 1 region that covers pp. 3509 * In the first loop p_szc keeps dropping with every iteration 3510 * and in the second loop it keeps increasing with every iteration. 3511 * 3512 * First loop description: Demote pages to the right of pp outside of 3513 * level 1 region that covers pp. In every iteration of the while 3514 * loop below find the last page of szc region and the first page of 3515 * (szc - 1) region that is immediately to the right of (szc - 1) 3516 * region that covers pp. From last such page to first such page 3517 * change every page's szc to szc - 1. Decrement szc and continue 3518 * looping until szc is 1. If pp belongs to the last (szc - 1) region 3519 * of szc region skip to the next iteration. 3520 */ 3521 szc = pszc; 3522 while (szc > 1) { 3523 lastpp = PP_GROUPLEADER(pp, szc); 3524 pgcnt = page_get_pagecnt(szc); 3525 lastpp += pgcnt - 1; 3526 firstpp = PP_GROUPLEADER(pp, (szc - 1)); 3527 pgcnt = page_get_pagecnt(szc - 1); 3528 if (lastpp - firstpp < pgcnt) { 3529 szc--; 3530 continue; 3531 } 3532 firstpp += pgcnt; 3533 while (lastpp != firstpp) { 3534 ASSERT(lastpp->p_szc == pszc); 3535 lastpp->p_szc = szc - 1; 3536 lastpp--; 3537 } 3538 firstpp->p_szc = szc - 1; 3539 szc--; 3540 } 3541 3542 /* 3543 * Second loop description: 3544 * First iteration changes p_szc to 0 of every 3545 * page of level 1 region that covers pp. 3546 * Subsequent iterations find last page of szc region 3547 * immediately to the left of szc region that covered pp 3548 * and first page of (szc + 1) region that covers pp. 3549 * From last to first page change p_szc of every page to szc. 3550 * Increment szc and continue looping until szc is pszc. 3551 * If pp belongs to the fist szc region of (szc + 1) region 3552 * skip to the next iteration. 3553 * 3554 */ 3555 szc = 0; 3556 while (szc < pszc) { 3557 firstpp = PP_GROUPLEADER(pp, (szc + 1)); 3558 if (szc == 0) { 3559 pgcnt = page_get_pagecnt(1); 3560 lastpp = firstpp + (pgcnt - 1); 3561 } else { 3562 lastpp = PP_GROUPLEADER(pp, szc); 3563 if (firstpp == lastpp) { 3564 szc++; 3565 continue; 3566 } 3567 lastpp--; 3568 pgcnt = page_get_pagecnt(szc); 3569 } 3570 while (lastpp != firstpp) { 3571 ASSERT(lastpp->p_szc == pszc); 3572 lastpp->p_szc = szc; 3573 lastpp--; 3574 } 3575 firstpp->p_szc = szc; 3576 if (firstpp == rootpp) 3577 break; 3578 szc++; 3579 } 3580 x86_hm_exit(rootpp); 3581 } 3582 3583 /* 3584 * get hw stats from hardware into page struct and reset hw stats 3585 * returns attributes of page 3586 * Flags for hat_pagesync, hat_getstat, hat_sync 3587 * 3588 * define HAT_SYNC_ZERORM 0x01 3589 * 3590 * Additional flags for hat_pagesync 3591 * 3592 * define HAT_SYNC_STOPON_REF 0x02 3593 * define HAT_SYNC_STOPON_MOD 0x04 3594 * define HAT_SYNC_STOPON_RM 0x06 3595 * define HAT_SYNC_STOPON_SHARED 0x08 3596 */ 3597 uint_t 3598 hat_pagesync(struct page *pp, uint_t flags) 3599 { 3600 hment_t *hm = NULL; 3601 htable_t *ht; 3602 uint_t entry; 3603 x86pte_t old, save_old; 3604 x86pte_t new; 3605 uchar_t nrmbits = P_REF|P_MOD|P_RO; 3606 extern ulong_t po_share; 3607 page_t *save_pp = pp; 3608 uint_t pszc = 0; 3609 3610 ASSERT(PAGE_LOCKED(pp) || panicstr); 3611 3612 if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD)) 3613 return (pp->p_nrm & nrmbits); 3614 3615 if ((flags & HAT_SYNC_ZERORM) == 0) { 3616 3617 if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp)) 3618 return (pp->p_nrm & nrmbits); 3619 3620 if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp)) 3621 return (pp->p_nrm & nrmbits); 3622 3623 if ((flags & HAT_SYNC_STOPON_SHARED) != 0 && 3624 hat_page_getshare(pp) > po_share) { 3625 if (PP_ISRO(pp)) 3626 PP_SETREF(pp); 3627 return (pp->p_nrm & nrmbits); 3628 } 3629 } 3630 3631 XPV_DISALLOW_MIGRATE(); 3632 next_size: 3633 /* 3634 * walk thru the mapping list syncing (and clearing) ref/mod bits. 3635 */ 3636 x86_hm_enter(pp); 3637 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) { 3638 if (ht->ht_level < pszc) 3639 continue; 3640 old = x86pte_get(ht, entry); 3641 try_again: 3642 3643 ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum); 3644 3645 if (PTE_GET(old, PT_REF | PT_MOD) == 0) 3646 continue; 3647 3648 save_old = old; 3649 if ((flags & HAT_SYNC_ZERORM) != 0) { 3650 3651 /* 3652 * Need to clear ref or mod bits. Need to demap 3653 * to make sure any executing TLBs see cleared bits. 3654 */ 3655 new = old; 3656 PTE_CLR(new, PT_REF | PT_MOD); 3657 old = hati_update_pte(ht, entry, old, new); 3658 if (old != 0) 3659 goto try_again; 3660 3661 old = save_old; 3662 } 3663 3664 /* 3665 * Sync the PTE 3666 */ 3667 if (!(flags & HAT_SYNC_ZERORM) && 3668 PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC) 3669 hati_sync_pte_to_page(pp, old, ht->ht_level); 3670 3671 /* 3672 * can stop short if we found a ref'd or mod'd page 3673 */ 3674 if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) || 3675 (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) { 3676 x86_hm_exit(pp); 3677 goto done; 3678 } 3679 } 3680 x86_hm_exit(pp); 3681 while (pszc < pp->p_szc) { 3682 page_t *tpp; 3683 pszc++; 3684 tpp = PP_GROUPLEADER(pp, pszc); 3685 if (pp != tpp) { 3686 pp = tpp; 3687 goto next_size; 3688 } 3689 } 3690 done: 3691 XPV_ALLOW_MIGRATE(); 3692 return (save_pp->p_nrm & nrmbits); 3693 } 3694 3695 /* 3696 * returns approx number of mappings to this pp. A return of 0 implies 3697 * there are no mappings to the page. 3698 */ 3699 ulong_t 3700 hat_page_getshare(page_t *pp) 3701 { 3702 uint_t cnt; 3703 cnt = hment_mapcnt(pp); 3704 #if defined(__amd64) 3705 if (vpm_enable && pp->p_vpmref) { 3706 cnt += 1; 3707 } 3708 #endif 3709 return (cnt); 3710 } 3711 3712 /* 3713 * Return 1 the number of mappings exceeds sh_thresh. Return 0 3714 * otherwise. 3715 */ 3716 int 3717 hat_page_checkshare(page_t *pp, ulong_t sh_thresh) 3718 { 3719 return (hat_page_getshare(pp) > sh_thresh); 3720 } 3721 3722 /* 3723 * hat_softlock isn't supported anymore 3724 */ 3725 /*ARGSUSED*/ 3726 faultcode_t 3727 hat_softlock( 3728 hat_t *hat, 3729 caddr_t addr, 3730 size_t *len, 3731 struct page **page_array, 3732 uint_t flags) 3733 { 3734 return (FC_NOSUPPORT); 3735 } 3736 3737 3738 3739 /* 3740 * Routine to expose supported HAT features to platform independent code. 3741 */ 3742 /*ARGSUSED*/ 3743 int 3744 hat_supported(enum hat_features feature, void *arg) 3745 { 3746 switch (feature) { 3747 3748 case HAT_SHARED_PT: /* this is really ISM */ 3749 return (1); 3750 3751 case HAT_DYNAMIC_ISM_UNMAP: 3752 return (0); 3753 3754 case HAT_VMODSORT: 3755 return (1); 3756 3757 case HAT_SHARED_REGIONS: 3758 return (0); 3759 3760 default: 3761 panic("hat_supported() - unknown feature"); 3762 } 3763 return (0); 3764 } 3765 3766 /* 3767 * Called when a thread is exiting and has been switched to the kernel AS 3768 */ 3769 void 3770 hat_thread_exit(kthread_t *thd) 3771 { 3772 ASSERT(thd->t_procp->p_as == &kas); 3773 XPV_DISALLOW_MIGRATE(); 3774 hat_switch(thd->t_procp->p_as->a_hat); 3775 XPV_ALLOW_MIGRATE(); 3776 } 3777 3778 /* 3779 * Setup the given brand new hat structure as the new HAT on this cpu's mmu. 3780 */ 3781 /*ARGSUSED*/ 3782 void 3783 hat_setup(hat_t *hat, int flags) 3784 { 3785 XPV_DISALLOW_MIGRATE(); 3786 kpreempt_disable(); 3787 3788 hat_switch(hat); 3789 3790 kpreempt_enable(); 3791 XPV_ALLOW_MIGRATE(); 3792 } 3793 3794 /* 3795 * Prepare for a CPU private mapping for the given address. 3796 * 3797 * The address can only be used from a single CPU and can be remapped 3798 * using hat_mempte_remap(). Return the address of the PTE. 3799 * 3800 * We do the htable_create() if necessary and increment the valid count so 3801 * the htable can't disappear. We also hat_devload() the page table into 3802 * kernel so that the PTE is quickly accessed. 3803 */ 3804 hat_mempte_t 3805 hat_mempte_setup(caddr_t addr) 3806 { 3807 uintptr_t va = (uintptr_t)addr; 3808 htable_t *ht; 3809 uint_t entry; 3810 x86pte_t oldpte; 3811 hat_mempte_t p; 3812 3813 ASSERT(IS_PAGEALIGNED(va)); 3814 ASSERT(!IN_VA_HOLE(va)); 3815 ++curthread->t_hatdepth; 3816 XPV_DISALLOW_MIGRATE(); 3817 ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0); 3818 if (ht == NULL) { 3819 ht = htable_create(kas.a_hat, va, 0, NULL); 3820 entry = htable_va2entry(va, ht); 3821 ASSERT(ht->ht_level == 0); 3822 oldpte = x86pte_get(ht, entry); 3823 } 3824 if (PTE_ISVALID(oldpte)) 3825 panic("hat_mempte_setup(): address already mapped" 3826 "ht=%p, entry=%d, pte=" FMT_PTE, ht, entry, oldpte); 3827 3828 /* 3829 * increment ht_valid_cnt so that the pagetable can't disappear 3830 */ 3831 HTABLE_INC(ht->ht_valid_cnt); 3832 3833 /* 3834 * return the PTE physical address to the caller. 3835 */ 3836 htable_release(ht); 3837 XPV_ALLOW_MIGRATE(); 3838 p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry); 3839 --curthread->t_hatdepth; 3840 return (p); 3841 } 3842 3843 /* 3844 * Release a CPU private mapping for the given address. 3845 * We decrement the htable valid count so it might be destroyed. 3846 */ 3847 /*ARGSUSED1*/ 3848 void 3849 hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa) 3850 { 3851 htable_t *ht; 3852 3853 XPV_DISALLOW_MIGRATE(); 3854 /* 3855 * invalidate any left over mapping and decrement the htable valid count 3856 */ 3857 #ifdef __xpv 3858 if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0, 3859 UVMF_INVLPG | UVMF_LOCAL)) 3860 panic("HYPERVISOR_update_va_mapping() failed"); 3861 #else 3862 { 3863 x86pte_t *pteptr; 3864 3865 pteptr = x86pte_mapin(mmu_btop(pte_pa), 3866 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); 3867 if (mmu.pae_hat) 3868 *pteptr = 0; 3869 else 3870 *(x86pte32_t *)pteptr = 0; 3871 mmu_tlbflush_entry(addr); 3872 x86pte_mapout(); 3873 } 3874 #endif 3875 3876 ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0); 3877 if (ht == NULL) 3878 panic("hat_mempte_release(): invalid address"); 3879 ASSERT(ht->ht_level == 0); 3880 HTABLE_DEC(ht->ht_valid_cnt); 3881 htable_release(ht); 3882 XPV_ALLOW_MIGRATE(); 3883 } 3884 3885 /* 3886 * Apply a temporary CPU private mapping to a page. We flush the TLB only 3887 * on this CPU, so this ought to have been called with preemption disabled. 3888 */ 3889 void 3890 hat_mempte_remap( 3891 pfn_t pfn, 3892 caddr_t addr, 3893 hat_mempte_t pte_pa, 3894 uint_t attr, 3895 uint_t flags) 3896 { 3897 uintptr_t va = (uintptr_t)addr; 3898 x86pte_t pte; 3899 3900 /* 3901 * Remap the given PTE to the new page's PFN. Invalidate only 3902 * on this CPU. 3903 */ 3904 #ifdef DEBUG 3905 htable_t *ht; 3906 uint_t entry; 3907 3908 ASSERT(IS_PAGEALIGNED(va)); 3909 ASSERT(!IN_VA_HOLE(va)); 3910 ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0); 3911 ASSERT(ht != NULL); 3912 ASSERT(ht->ht_level == 0); 3913 ASSERT(ht->ht_valid_cnt > 0); 3914 ASSERT(ht->ht_pfn == mmu_btop(pte_pa)); 3915 htable_release(ht); 3916 #endif 3917 XPV_DISALLOW_MIGRATE(); 3918 pte = hati_mkpte(pfn, attr, 0, flags); 3919 #ifdef __xpv 3920 if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL)) 3921 panic("HYPERVISOR_update_va_mapping() failed"); 3922 #else 3923 { 3924 x86pte_t *pteptr; 3925 3926 pteptr = x86pte_mapin(mmu_btop(pte_pa), 3927 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); 3928 if (mmu.pae_hat) 3929 *(x86pte_t *)pteptr = pte; 3930 else 3931 *(x86pte32_t *)pteptr = (x86pte32_t)pte; 3932 mmu_tlbflush_entry(addr); 3933 x86pte_mapout(); 3934 } 3935 #endif 3936 XPV_ALLOW_MIGRATE(); 3937 } 3938 3939 3940 3941 /* 3942 * Hat locking functions 3943 * XXX - these two functions are currently being used by hatstats 3944 * they can be removed by using a per-as mutex for hatstats. 3945 */ 3946 void 3947 hat_enter(hat_t *hat) 3948 { 3949 mutex_enter(&hat->hat_mutex); 3950 } 3951 3952 void 3953 hat_exit(hat_t *hat) 3954 { 3955 mutex_exit(&hat->hat_mutex); 3956 } 3957 3958 /* 3959 * HAT part of cpu initialization. 3960 */ 3961 void 3962 hat_cpu_online(struct cpu *cpup) 3963 { 3964 if (cpup != CPU) { 3965 x86pte_cpu_init(cpup); 3966 hat_vlp_setup(cpup); 3967 } 3968 CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id); 3969 } 3970 3971 /* 3972 * HAT part of cpu deletion. 3973 * (currently, we only call this after the cpu is safely passivated.) 3974 */ 3975 void 3976 hat_cpu_offline(struct cpu *cpup) 3977 { 3978 ASSERT(cpup != CPU); 3979 3980 CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id); 3981 x86pte_cpu_fini(cpup); 3982 hat_vlp_teardown(cpup); 3983 } 3984 3985 /* 3986 * Function called after all CPUs are brought online. 3987 * Used to remove low address boot mappings. 3988 */ 3989 void 3990 clear_boot_mappings(uintptr_t low, uintptr_t high) 3991 { 3992 uintptr_t vaddr = low; 3993 htable_t *ht = NULL; 3994 level_t level; 3995 uint_t entry; 3996 x86pte_t pte; 3997 3998 /* 3999 * On 1st CPU we can unload the prom mappings, basically we blow away 4000 * all virtual mappings under _userlimit. 4001 */ 4002 while (vaddr < high) { 4003 pte = htable_walk(kas.a_hat, &ht, &vaddr, high); 4004 if (ht == NULL) 4005 break; 4006 4007 level = ht->ht_level; 4008 entry = htable_va2entry(vaddr, ht); 4009 ASSERT(level <= mmu.max_page_level); 4010 ASSERT(PTE_ISPAGE(pte, level)); 4011 4012 /* 4013 * Unload the mapping from the page tables. 4014 */ 4015 (void) x86pte_inval(ht, entry, 0, NULL); 4016 ASSERT(ht->ht_valid_cnt > 0); 4017 HTABLE_DEC(ht->ht_valid_cnt); 4018 PGCNT_DEC(ht->ht_hat, ht->ht_level); 4019 4020 vaddr += LEVEL_SIZE(ht->ht_level); 4021 } 4022 if (ht) 4023 htable_release(ht); 4024 } 4025 4026 /* 4027 * Atomically update a new translation for a single page. If the 4028 * currently installed PTE doesn't match the value we expect to find, 4029 * it's not updated and we return the PTE we found. 4030 * 4031 * If activating nosync or NOWRITE and the page was modified we need to sync 4032 * with the page_t. Also sync with page_t if clearing ref/mod bits. 4033 */ 4034 static x86pte_t 4035 hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new) 4036 { 4037 page_t *pp; 4038 uint_t rm = 0; 4039 x86pte_t replaced; 4040 4041 if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC && 4042 PTE_GET(expected, PT_MOD | PT_REF) && 4043 (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) || 4044 !PTE_GET(new, PT_MOD | PT_REF))) { 4045 4046 ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level))); 4047 pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level)); 4048 ASSERT(pp != NULL); 4049 if (PTE_GET(expected, PT_MOD)) 4050 rm |= P_MOD; 4051 if (PTE_GET(expected, PT_REF)) 4052 rm |= P_REF; 4053 PTE_CLR(new, PT_MOD | PT_REF); 4054 } 4055 4056 replaced = x86pte_update(ht, entry, expected, new); 4057 if (replaced != expected) 4058 return (replaced); 4059 4060 if (rm) { 4061 /* 4062 * sync to all constituent pages of a large page 4063 */ 4064 pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level); 4065 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt)); 4066 while (pgcnt-- > 0) { 4067 /* 4068 * hat_page_demote() can't decrease 4069 * pszc below this mapping size 4070 * since large mapping existed after we 4071 * took mlist lock. 4072 */ 4073 ASSERT(pp->p_szc >= ht->ht_level); 4074 hat_page_setattr(pp, rm); 4075 ++pp; 4076 } 4077 } 4078 4079 return (0); 4080 } 4081 4082 /* ARGSUSED */ 4083 void 4084 hat_join_srd(struct hat *hat, vnode_t *evp) 4085 { 4086 } 4087 4088 /* ARGSUSED */ 4089 hat_region_cookie_t 4090 hat_join_region(struct hat *hat, 4091 caddr_t r_saddr, 4092 size_t r_size, 4093 void *r_obj, 4094 u_offset_t r_objoff, 4095 uchar_t r_perm, 4096 uchar_t r_pgszc, 4097 hat_rgn_cb_func_t r_cb_function, 4098 uint_t flags) 4099 { 4100 panic("No shared region support on x86"); 4101 return (HAT_INVALID_REGION_COOKIE); 4102 } 4103 4104 /* ARGSUSED */ 4105 void 4106 hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags) 4107 { 4108 panic("No shared region support on x86"); 4109 } 4110 4111 /* ARGSUSED */ 4112 void 4113 hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie) 4114 { 4115 panic("No shared region support on x86"); 4116 } 4117 4118 4119 /* 4120 * Kernel Physical Mapping (kpm) facility 4121 * 4122 * Most of the routines needed to support segkpm are almost no-ops on the 4123 * x86 platform. We map in the entire segment when it is created and leave 4124 * it mapped in, so there is no additional work required to set up and tear 4125 * down individual mappings. All of these routines were created to support 4126 * SPARC platforms that have to avoid aliasing in their virtually indexed 4127 * caches. 4128 * 4129 * Most of the routines have sanity checks in them (e.g. verifying that the 4130 * passed-in page is locked). We don't actually care about most of these 4131 * checks on x86, but we leave them in place to identify problems in the 4132 * upper levels. 4133 */ 4134 4135 /* 4136 * Map in a locked page and return the vaddr. 4137 */ 4138 /*ARGSUSED*/ 4139 caddr_t 4140 hat_kpm_mapin(struct page *pp, struct kpme *kpme) 4141 { 4142 caddr_t vaddr; 4143 4144 #ifdef DEBUG 4145 if (kpm_enable == 0) { 4146 cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n"); 4147 return ((caddr_t)NULL); 4148 } 4149 4150 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 4151 cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n"); 4152 return ((caddr_t)NULL); 4153 } 4154 #endif 4155 4156 vaddr = hat_kpm_page2va(pp, 1); 4157 4158 return (vaddr); 4159 } 4160 4161 /* 4162 * Mapout a locked page. 4163 */ 4164 /*ARGSUSED*/ 4165 void 4166 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr) 4167 { 4168 #ifdef DEBUG 4169 if (kpm_enable == 0) { 4170 cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n"); 4171 return; 4172 } 4173 4174 if (IS_KPM_ADDR(vaddr) == 0) { 4175 cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n"); 4176 return; 4177 } 4178 4179 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 4180 cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n"); 4181 return; 4182 } 4183 #endif 4184 } 4185 4186 /* 4187 * Return the kpm virtual address for a specific pfn 4188 */ 4189 caddr_t 4190 hat_kpm_pfn2va(pfn_t pfn) 4191 { 4192 uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn); 4193 4194 ASSERT(!pfn_is_foreign(pfn)); 4195 return ((caddr_t)vaddr); 4196 } 4197 4198 /* 4199 * Return the kpm virtual address for the page at pp. 4200 */ 4201 /*ARGSUSED*/ 4202 caddr_t 4203 hat_kpm_page2va(struct page *pp, int checkswap) 4204 { 4205 return (hat_kpm_pfn2va(pp->p_pagenum)); 4206 } 4207 4208 /* 4209 * Return the page frame number for the kpm virtual address vaddr. 4210 */ 4211 pfn_t 4212 hat_kpm_va2pfn(caddr_t vaddr) 4213 { 4214 pfn_t pfn; 4215 4216 ASSERT(IS_KPM_ADDR(vaddr)); 4217 4218 pfn = (pfn_t)btop(vaddr - kpm_vbase); 4219 4220 return (pfn); 4221 } 4222 4223 4224 /* 4225 * Return the page for the kpm virtual address vaddr. 4226 */ 4227 page_t * 4228 hat_kpm_vaddr2page(caddr_t vaddr) 4229 { 4230 pfn_t pfn; 4231 4232 ASSERT(IS_KPM_ADDR(vaddr)); 4233 4234 pfn = hat_kpm_va2pfn(vaddr); 4235 4236 return (page_numtopp_nolock(pfn)); 4237 } 4238 4239 /* 4240 * hat_kpm_fault is called from segkpm_fault when we take a page fault on a 4241 * KPM page. This should never happen on x86 4242 */ 4243 int 4244 hat_kpm_fault(hat_t *hat, caddr_t vaddr) 4245 { 4246 panic("pagefault in seg_kpm. hat: 0x%p vaddr: 0x%p", hat, vaddr); 4247 4248 return (0); 4249 } 4250 4251 /*ARGSUSED*/ 4252 void 4253 hat_kpm_mseghash_clear(int nentries) 4254 {} 4255 4256 /*ARGSUSED*/ 4257 void 4258 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp) 4259 {} 4260 4261 #ifdef __xpv 4262 /* 4263 * There are specific Hypervisor calls to establish and remove mappings 4264 * to grant table references and the privcmd driver. We have to ensure 4265 * that a page table actually exists. 4266 */ 4267 void 4268 hat_prepare_mapping(hat_t *hat, caddr_t addr) 4269 { 4270 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE)); 4271 XPV_DISALLOW_MIGRATE(); 4272 (void) htable_create(hat, (uintptr_t)addr, 0, NULL); 4273 XPV_ALLOW_MIGRATE(); 4274 } 4275 4276 void 4277 hat_release_mapping(hat_t *hat, caddr_t addr) 4278 { 4279 htable_t *ht; 4280 4281 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE)); 4282 XPV_DISALLOW_MIGRATE(); 4283 ht = htable_lookup(hat, (uintptr_t)addr, 0); 4284 ASSERT(ht != NULL); 4285 ASSERT(ht->ht_busy >= 2); 4286 htable_release(ht); 4287 htable_release(ht); 4288 XPV_ALLOW_MIGRATE(); 4289 } 4290 #endif 4291