1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 /* 25 * Copyright (c) 2010, Intel Corporation. 26 * All rights reserved. 27 */ 28 /* 29 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 30 */ 31 32 /* 33 * VM - Hardware Address Translation management for i386 and amd64 34 * 35 * Implementation of the interfaces described in <common/vm/hat.h> 36 * 37 * Nearly all the details of how the hardware is managed should not be 38 * visible outside this layer except for misc. machine specific functions 39 * that work in conjunction with this code. 40 * 41 * Routines used only inside of i86pc/vm start with hati_ for HAT Internal. 42 */ 43 44 #include <sys/machparam.h> 45 #include <sys/machsystm.h> 46 #include <sys/mman.h> 47 #include <sys/types.h> 48 #include <sys/systm.h> 49 #include <sys/cpuvar.h> 50 #include <sys/thread.h> 51 #include <sys/proc.h> 52 #include <sys/cpu.h> 53 #include <sys/kmem.h> 54 #include <sys/disp.h> 55 #include <sys/shm.h> 56 #include <sys/sysmacros.h> 57 #include <sys/machparam.h> 58 #include <sys/vmem.h> 59 #include <sys/vmsystm.h> 60 #include <sys/promif.h> 61 #include <sys/var.h> 62 #include <sys/x86_archext.h> 63 #include <sys/atomic.h> 64 #include <sys/bitmap.h> 65 #include <sys/controlregs.h> 66 #include <sys/bootconf.h> 67 #include <sys/bootsvcs.h> 68 #include <sys/bootinfo.h> 69 #include <sys/archsystm.h> 70 71 #include <vm/seg_kmem.h> 72 #include <vm/hat_i86.h> 73 #include <vm/as.h> 74 #include <vm/seg.h> 75 #include <vm/page.h> 76 #include <vm/seg_kp.h> 77 #include <vm/seg_kpm.h> 78 #include <vm/vm_dep.h> 79 #ifdef __xpv 80 #include <sys/hypervisor.h> 81 #endif 82 #include <vm/kboot_mmu.h> 83 #include <vm/seg_spt.h> 84 85 #include <sys/cmn_err.h> 86 87 /* 88 * Basic parameters for hat operation. 89 */ 90 struct hat_mmu_info mmu; 91 92 /* 93 * The page that is the kernel's top level pagetable. 94 * 95 * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries 96 * on this 4K page for its top level page table. The remaining groups of 97 * 4 entries are used for per processor copies of user VLP pagetables for 98 * running threads. See hat_switch() and reload_pae32() for details. 99 * 100 * vlp_page[0..3] - level==2 PTEs for kernel HAT 101 * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0 102 * vlp_page[8..11] - level==2 PTE for user thread on cpu 1 103 * etc... 104 */ 105 static x86pte_t *vlp_page; 106 107 /* 108 * forward declaration of internal utility routines 109 */ 110 static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, 111 x86pte_t new); 112 113 /* 114 * The kernel address space exists in all HATs. To implement this the 115 * kernel reserves a fixed number of entries in the topmost level(s) of page 116 * tables. The values are setup during startup and then copied to every user 117 * hat created by hat_alloc(). This means that kernelbase must be: 118 * 119 * 4Meg aligned for 32 bit kernels 120 * 512Gig aligned for x86_64 64 bit kernel 121 * 122 * The hat_kernel_range_ts describe what needs to be copied from kernel hat 123 * to each user hat. 124 */ 125 typedef struct hat_kernel_range { 126 level_t hkr_level; 127 uintptr_t hkr_start_va; 128 uintptr_t hkr_end_va; /* zero means to end of memory */ 129 } hat_kernel_range_t; 130 #define NUM_KERNEL_RANGE 2 131 static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE]; 132 static int num_kernel_ranges; 133 134 uint_t use_boot_reserve = 1; /* cleared after early boot process */ 135 uint_t can_steal_post_boot = 0; /* set late in boot to enable stealing */ 136 137 /* 138 * enable_1gpg: controls 1g page support for user applications. 139 * By default, 1g pages are exported to user applications. enable_1gpg can 140 * be set to 0 to not export. 141 */ 142 int enable_1gpg = 1; 143 144 /* 145 * AMD shanghai processors provide better management of 1gb ptes in its tlb. 146 * By default, 1g page support will be disabled for pre-shanghai AMD 147 * processors that don't have optimal tlb support for the 1g page size. 148 * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal 149 * processors. 150 */ 151 int chk_optimal_1gtlb = 1; 152 153 154 #ifdef DEBUG 155 uint_t map1gcnt; 156 #endif 157 158 159 /* 160 * A cpuset for all cpus. This is used for kernel address cross calls, since 161 * the kernel addresses apply to all cpus. 162 */ 163 cpuset_t khat_cpuset; 164 165 /* 166 * management stuff for hat structures 167 */ 168 kmutex_t hat_list_lock; 169 kcondvar_t hat_list_cv; 170 kmem_cache_t *hat_cache; 171 kmem_cache_t *hat_hash_cache; 172 kmem_cache_t *vlp_hash_cache; 173 174 /* 175 * Simple statistics 176 */ 177 struct hatstats hatstat; 178 179 /* 180 * Some earlier hypervisor versions do not emulate cmpxchg of PTEs 181 * correctly. For such hypervisors we must set PT_USER for kernel 182 * entries ourselves (normally the emulation would set PT_USER for 183 * kernel entries and PT_USER|PT_GLOBAL for user entries). pt_kern is 184 * thus set appropriately. Note that dboot/kbm is OK, as only the full 185 * HAT uses cmpxchg() and the other paths (hypercall etc.) were never 186 * incorrect. 187 */ 188 int pt_kern; 189 190 /* 191 * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's. 192 */ 193 extern void atomic_orb(uchar_t *addr, uchar_t val); 194 extern void atomic_andb(uchar_t *addr, uchar_t val); 195 196 #ifndef __xpv 197 extern pfn_t memseg_get_start(struct memseg *); 198 #endif 199 200 #define PP_GETRM(pp, rmmask) (pp->p_nrm & rmmask) 201 #define PP_ISMOD(pp) PP_GETRM(pp, P_MOD) 202 #define PP_ISREF(pp) PP_GETRM(pp, P_REF) 203 #define PP_ISRO(pp) PP_GETRM(pp, P_RO) 204 205 #define PP_SETRM(pp, rm) atomic_orb(&(pp->p_nrm), rm) 206 #define PP_SETMOD(pp) PP_SETRM(pp, P_MOD) 207 #define PP_SETREF(pp) PP_SETRM(pp, P_REF) 208 #define PP_SETRO(pp) PP_SETRM(pp, P_RO) 209 210 #define PP_CLRRM(pp, rm) atomic_andb(&(pp->p_nrm), ~(rm)) 211 #define PP_CLRMOD(pp) PP_CLRRM(pp, P_MOD) 212 #define PP_CLRREF(pp) PP_CLRRM(pp, P_REF) 213 #define PP_CLRRO(pp) PP_CLRRM(pp, P_RO) 214 #define PP_CLRALL(pp) PP_CLRRM(pp, P_MOD | P_REF | P_RO) 215 216 /* 217 * kmem cache constructor for struct hat 218 */ 219 /*ARGSUSED*/ 220 static int 221 hati_constructor(void *buf, void *handle, int kmflags) 222 { 223 hat_t *hat = buf; 224 225 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 226 bzero(hat->hat_pages_mapped, 227 sizeof (pgcnt_t) * (mmu.max_page_level + 1)); 228 hat->hat_ism_pgcnt = 0; 229 hat->hat_stats = 0; 230 hat->hat_flags = 0; 231 CPUSET_ZERO(hat->hat_cpus); 232 hat->hat_htable = NULL; 233 hat->hat_ht_hash = NULL; 234 return (0); 235 } 236 237 /* 238 * Allocate a hat structure for as. We also create the top level 239 * htable and initialize it to contain the kernel hat entries. 240 */ 241 hat_t * 242 hat_alloc(struct as *as) 243 { 244 hat_t *hat; 245 htable_t *ht; /* top level htable */ 246 uint_t use_vlp; 247 uint_t r; 248 hat_kernel_range_t *rp; 249 uintptr_t va; 250 uintptr_t eva; 251 uint_t start; 252 uint_t cnt; 253 htable_t *src; 254 255 /* 256 * Once we start creating user process HATs we can enable 257 * the htable_steal() code. 258 */ 259 if (can_steal_post_boot == 0) 260 can_steal_post_boot = 1; 261 262 ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 263 hat = kmem_cache_alloc(hat_cache, KM_SLEEP); 264 hat->hat_as = as; 265 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 266 ASSERT(hat->hat_flags == 0); 267 268 #if defined(__xpv) 269 /* 270 * No VLP stuff on the hypervisor due to the 64-bit split top level 271 * page tables. On 32-bit it's not needed as the hypervisor takes 272 * care of copying the top level PTEs to a below 4Gig page. 273 */ 274 use_vlp = 0; 275 #else /* __xpv */ 276 /* 32 bit processes uses a VLP style hat when running with PAE */ 277 #if defined(__amd64) 278 use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32); 279 #elif defined(__i386) 280 use_vlp = mmu.pae_hat; 281 #endif 282 #endif /* __xpv */ 283 if (use_vlp) { 284 hat->hat_flags = HAT_VLP; 285 bzero(hat->hat_vlp_ptes, VLP_SIZE); 286 } 287 288 /* 289 * Allocate the htable hash 290 */ 291 if ((hat->hat_flags & HAT_VLP)) { 292 hat->hat_num_hash = mmu.vlp_hash_cnt; 293 hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP); 294 } else { 295 hat->hat_num_hash = mmu.hash_cnt; 296 hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP); 297 } 298 bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *)); 299 300 /* 301 * Initialize Kernel HAT entries at the top of the top level page 302 * tables for the new hat. 303 */ 304 hat->hat_htable = NULL; 305 hat->hat_ht_cached = NULL; 306 XPV_DISALLOW_MIGRATE(); 307 ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL); 308 hat->hat_htable = ht; 309 310 #if defined(__amd64) 311 if (hat->hat_flags & HAT_VLP) 312 goto init_done; 313 #endif 314 315 for (r = 0; r < num_kernel_ranges; ++r) { 316 rp = &kernel_ranges[r]; 317 for (va = rp->hkr_start_va; va != rp->hkr_end_va; 318 va += cnt * LEVEL_SIZE(rp->hkr_level)) { 319 320 if (rp->hkr_level == TOP_LEVEL(hat)) 321 ht = hat->hat_htable; 322 else 323 ht = htable_create(hat, va, rp->hkr_level, 324 NULL); 325 326 start = htable_va2entry(va, ht); 327 cnt = HTABLE_NUM_PTES(ht) - start; 328 eva = va + 329 ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level)); 330 if (rp->hkr_end_va != 0 && 331 (eva > rp->hkr_end_va || eva == 0)) 332 cnt = htable_va2entry(rp->hkr_end_va, ht) - 333 start; 334 335 #if defined(__i386) && !defined(__xpv) 336 if (ht->ht_flags & HTABLE_VLP) { 337 bcopy(&vlp_page[start], 338 &hat->hat_vlp_ptes[start], 339 cnt * sizeof (x86pte_t)); 340 continue; 341 } 342 #endif 343 src = htable_lookup(kas.a_hat, va, rp->hkr_level); 344 ASSERT(src != NULL); 345 x86pte_copy(src, ht, start, cnt); 346 htable_release(src); 347 } 348 } 349 350 init_done: 351 352 #if defined(__xpv) 353 /* 354 * Pin top level page tables after initializing them 355 */ 356 xen_pin(hat->hat_htable->ht_pfn, mmu.max_level); 357 #if defined(__amd64) 358 xen_pin(hat->hat_user_ptable, mmu.max_level); 359 #endif 360 #endif 361 XPV_ALLOW_MIGRATE(); 362 363 /* 364 * Put it at the start of the global list of all hats (used by stealing) 365 * 366 * kas.a_hat is not in the list but is instead used to find the 367 * first and last items in the list. 368 * 369 * - kas.a_hat->hat_next points to the start of the user hats. 370 * The list ends where hat->hat_next == NULL 371 * 372 * - kas.a_hat->hat_prev points to the last of the user hats. 373 * The list begins where hat->hat_prev == NULL 374 */ 375 mutex_enter(&hat_list_lock); 376 hat->hat_prev = NULL; 377 hat->hat_next = kas.a_hat->hat_next; 378 if (hat->hat_next) 379 hat->hat_next->hat_prev = hat; 380 else 381 kas.a_hat->hat_prev = hat; 382 kas.a_hat->hat_next = hat; 383 mutex_exit(&hat_list_lock); 384 385 return (hat); 386 } 387 388 /* 389 * process has finished executing but as has not been cleaned up yet. 390 */ 391 /*ARGSUSED*/ 392 void 393 hat_free_start(hat_t *hat) 394 { 395 ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock)); 396 397 /* 398 * If the hat is currently a stealing victim, wait for the stealing 399 * to finish. Once we mark it as HAT_FREEING, htable_steal() 400 * won't look at its pagetables anymore. 401 */ 402 mutex_enter(&hat_list_lock); 403 while (hat->hat_flags & HAT_VICTIM) 404 cv_wait(&hat_list_cv, &hat_list_lock); 405 hat->hat_flags |= HAT_FREEING; 406 mutex_exit(&hat_list_lock); 407 } 408 409 /* 410 * An address space is being destroyed, so we destroy the associated hat. 411 */ 412 void 413 hat_free_end(hat_t *hat) 414 { 415 kmem_cache_t *cache; 416 417 ASSERT(hat->hat_flags & HAT_FREEING); 418 419 /* 420 * must not be running on the given hat 421 */ 422 ASSERT(CPU->cpu_current_hat != hat); 423 424 /* 425 * Remove it from the list of HATs 426 */ 427 mutex_enter(&hat_list_lock); 428 if (hat->hat_prev) 429 hat->hat_prev->hat_next = hat->hat_next; 430 else 431 kas.a_hat->hat_next = hat->hat_next; 432 if (hat->hat_next) 433 hat->hat_next->hat_prev = hat->hat_prev; 434 else 435 kas.a_hat->hat_prev = hat->hat_prev; 436 mutex_exit(&hat_list_lock); 437 hat->hat_next = hat->hat_prev = NULL; 438 439 #if defined(__xpv) 440 /* 441 * On the hypervisor, unpin top level page table(s) 442 */ 443 xen_unpin(hat->hat_htable->ht_pfn); 444 #if defined(__amd64) 445 xen_unpin(hat->hat_user_ptable); 446 #endif 447 #endif 448 449 /* 450 * Make a pass through the htables freeing them all up. 451 */ 452 htable_purge_hat(hat); 453 454 /* 455 * Decide which kmem cache the hash table came from, then free it. 456 */ 457 if (hat->hat_flags & HAT_VLP) 458 cache = vlp_hash_cache; 459 else 460 cache = hat_hash_cache; 461 kmem_cache_free(cache, hat->hat_ht_hash); 462 hat->hat_ht_hash = NULL; 463 464 hat->hat_flags = 0; 465 kmem_cache_free(hat_cache, hat); 466 } 467 468 /* 469 * round kernelbase down to a supported value to use for _userlimit 470 * 471 * userlimit must be aligned down to an entry in the top level htable. 472 * The one exception is for 32 bit HAT's running PAE. 473 */ 474 uintptr_t 475 hat_kernelbase(uintptr_t va) 476 { 477 #if defined(__i386) 478 va &= LEVEL_MASK(1); 479 #endif 480 if (IN_VA_HOLE(va)) 481 panic("_userlimit %p will fall in VA hole\n", (void *)va); 482 return (va); 483 } 484 485 /* 486 * 487 */ 488 static void 489 set_max_page_level() 490 { 491 level_t lvl; 492 493 if (!kbm_largepage_support) { 494 lvl = 0; 495 } else { 496 if (is_x86_feature(x86_featureset, X86FSET_1GPG)) { 497 lvl = 2; 498 if (chk_optimal_1gtlb && 499 cpuid_opteron_erratum(CPU, 6671130)) { 500 lvl = 1; 501 } 502 if (plat_mnode_xcheck(LEVEL_SIZE(2) >> 503 LEVEL_SHIFT(0))) { 504 lvl = 1; 505 } 506 } else { 507 lvl = 1; 508 } 509 } 510 mmu.max_page_level = lvl; 511 512 if ((lvl == 2) && (enable_1gpg == 0)) 513 mmu.umax_page_level = 1; 514 else 515 mmu.umax_page_level = lvl; 516 } 517 518 /* 519 * Initialize hat data structures based on processor MMU information. 520 */ 521 void 522 mmu_init(void) 523 { 524 uint_t max_htables; 525 uint_t pa_bits; 526 uint_t va_bits; 527 int i; 528 529 /* 530 * If CPU enabled the page table global bit, use it for the kernel 531 * This is bit 7 in CR4 (PGE - Page Global Enable). 532 */ 533 if (is_x86_feature(x86_featureset, X86FSET_PGE) && 534 (getcr4() & CR4_PGE) != 0) 535 mmu.pt_global = PT_GLOBAL; 536 537 /* 538 * Detect NX and PAE usage. 539 */ 540 mmu.pae_hat = kbm_pae_support; 541 if (kbm_nx_support) 542 mmu.pt_nx = PT_NX; 543 else 544 mmu.pt_nx = 0; 545 546 /* 547 * Use CPU info to set various MMU parameters 548 */ 549 cpuid_get_addrsize(CPU, &pa_bits, &va_bits); 550 551 if (va_bits < sizeof (void *) * NBBY) { 552 mmu.hole_start = (1ul << (va_bits - 1)); 553 mmu.hole_end = 0ul - mmu.hole_start - 1; 554 } else { 555 mmu.hole_end = 0; 556 mmu.hole_start = mmu.hole_end - 1; 557 } 558 #if defined(OPTERON_ERRATUM_121) 559 /* 560 * If erratum 121 has already been detected at this time, hole_start 561 * contains the value to be subtracted from mmu.hole_start. 562 */ 563 ASSERT(hole_start == 0 || opteron_erratum_121 != 0); 564 hole_start = mmu.hole_start - hole_start; 565 #else 566 hole_start = mmu.hole_start; 567 #endif 568 hole_end = mmu.hole_end; 569 570 mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1); 571 if (mmu.pae_hat == 0 && pa_bits > 32) 572 mmu.highest_pfn = PFN_4G - 1; 573 574 if (mmu.pae_hat) { 575 mmu.pte_size = 8; /* 8 byte PTEs */ 576 mmu.pte_size_shift = 3; 577 } else { 578 mmu.pte_size = 4; /* 4 byte PTEs */ 579 mmu.pte_size_shift = 2; 580 } 581 582 if (mmu.pae_hat && !is_x86_feature(x86_featureset, X86FSET_PAE)) 583 panic("Processor does not support PAE"); 584 585 if (!is_x86_feature(x86_featureset, X86FSET_CX8)) 586 panic("Processor does not support cmpxchg8b instruction"); 587 588 #if defined(__amd64) 589 590 mmu.num_level = 4; 591 mmu.max_level = 3; 592 mmu.ptes_per_table = 512; 593 mmu.top_level_count = 512; 594 595 mmu.level_shift[0] = 12; 596 mmu.level_shift[1] = 21; 597 mmu.level_shift[2] = 30; 598 mmu.level_shift[3] = 39; 599 600 #elif defined(__i386) 601 602 if (mmu.pae_hat) { 603 mmu.num_level = 3; 604 mmu.max_level = 2; 605 mmu.ptes_per_table = 512; 606 mmu.top_level_count = 4; 607 608 mmu.level_shift[0] = 12; 609 mmu.level_shift[1] = 21; 610 mmu.level_shift[2] = 30; 611 612 } else { 613 mmu.num_level = 2; 614 mmu.max_level = 1; 615 mmu.ptes_per_table = 1024; 616 mmu.top_level_count = 1024; 617 618 mmu.level_shift[0] = 12; 619 mmu.level_shift[1] = 22; 620 } 621 622 #endif /* __i386 */ 623 624 for (i = 0; i < mmu.num_level; ++i) { 625 mmu.level_size[i] = 1UL << mmu.level_shift[i]; 626 mmu.level_offset[i] = mmu.level_size[i] - 1; 627 mmu.level_mask[i] = ~mmu.level_offset[i]; 628 } 629 630 set_max_page_level(); 631 632 mmu_page_sizes = mmu.max_page_level + 1; 633 mmu_exported_page_sizes = mmu.umax_page_level + 1; 634 635 /* restrict legacy applications from using pagesizes 1g and above */ 636 mmu_legacy_page_sizes = 637 (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes; 638 639 640 for (i = 0; i <= mmu.max_page_level; ++i) { 641 mmu.pte_bits[i] = PT_VALID | pt_kern; 642 if (i > 0) 643 mmu.pte_bits[i] |= PT_PAGESIZE; 644 } 645 646 /* 647 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level. 648 */ 649 for (i = 1; i < mmu.num_level; ++i) 650 mmu.ptp_bits[i] = PT_PTPBITS; 651 652 #if defined(__i386) 653 mmu.ptp_bits[2] = PT_VALID; 654 #endif 655 656 /* 657 * Compute how many hash table entries to have per process for htables. 658 * We start with 1 page's worth of entries. 659 * 660 * If physical memory is small, reduce the amount need to cover it. 661 */ 662 max_htables = physmax / mmu.ptes_per_table; 663 mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *); 664 while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables) 665 mmu.hash_cnt >>= 1; 666 mmu.vlp_hash_cnt = mmu.hash_cnt; 667 668 #if defined(__amd64) 669 /* 670 * If running in 64 bits and physical memory is large, 671 * increase the size of the cache to cover all of memory for 672 * a 64 bit process. 673 */ 674 #define HASH_MAX_LENGTH 4 675 while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables) 676 mmu.hash_cnt <<= 1; 677 #endif 678 } 679 680 681 /* 682 * initialize hat data structures 683 */ 684 void 685 hat_init() 686 { 687 #if defined(__i386) 688 /* 689 * _userlimit must be aligned correctly 690 */ 691 if ((_userlimit & LEVEL_MASK(1)) != _userlimit) { 692 prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n", 693 (void *)_userlimit, (void *)LEVEL_SIZE(1)); 694 halt("hat_init(): Unable to continue"); 695 } 696 #endif 697 698 cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL); 699 700 /* 701 * initialize kmem caches 702 */ 703 htable_init(); 704 hment_init(); 705 706 hat_cache = kmem_cache_create("hat_t", 707 sizeof (hat_t), 0, hati_constructor, NULL, NULL, 708 NULL, 0, 0); 709 710 hat_hash_cache = kmem_cache_create("HatHash", 711 mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL, 712 NULL, 0, 0); 713 714 /* 715 * VLP hats can use a smaller hash table size on large memroy machines 716 */ 717 if (mmu.hash_cnt == mmu.vlp_hash_cnt) { 718 vlp_hash_cache = hat_hash_cache; 719 } else { 720 vlp_hash_cache = kmem_cache_create("HatVlpHash", 721 mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL, 722 NULL, 0, 0); 723 } 724 725 /* 726 * Set up the kernel's hat 727 */ 728 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 729 kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP); 730 mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 731 kas.a_hat->hat_as = &kas; 732 kas.a_hat->hat_flags = 0; 733 AS_LOCK_EXIT(&kas, &kas.a_lock); 734 735 CPUSET_ZERO(khat_cpuset); 736 CPUSET_ADD(khat_cpuset, CPU->cpu_id); 737 738 /* 739 * The kernel hat's next pointer serves as the head of the hat list . 740 * The kernel hat's prev pointer tracks the last hat on the list for 741 * htable_steal() to use. 742 */ 743 kas.a_hat->hat_next = NULL; 744 kas.a_hat->hat_prev = NULL; 745 746 /* 747 * Allocate an htable hash bucket for the kernel 748 * XX64 - tune for 64 bit procs 749 */ 750 kas.a_hat->hat_num_hash = mmu.hash_cnt; 751 kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP); 752 bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *)); 753 754 /* 755 * zero out the top level and cached htable pointers 756 */ 757 kas.a_hat->hat_ht_cached = NULL; 758 kas.a_hat->hat_htable = NULL; 759 760 /* 761 * Pre-allocate hrm_hashtab before enabling the collection of 762 * refmod statistics. Allocating on the fly would mean us 763 * running the risk of suffering recursive mutex enters or 764 * deadlocks. 765 */ 766 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *), 767 KM_SLEEP); 768 } 769 770 /* 771 * Prepare CPU specific pagetables for VLP processes on 64 bit kernels. 772 * 773 * Each CPU has a set of 2 pagetables that are reused for any 32 bit 774 * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and 775 * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes. 776 */ 777 /*ARGSUSED*/ 778 static void 779 hat_vlp_setup(struct cpu *cpu) 780 { 781 #if defined(__amd64) && !defined(__xpv) 782 struct hat_cpu_info *hci = cpu->cpu_hat_info; 783 pfn_t pfn; 784 785 /* 786 * allocate the level==2 page table for the bottom most 787 * 512Gig of address space (this is where 32 bit apps live) 788 */ 789 ASSERT(hci != NULL); 790 hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP); 791 792 /* 793 * Allocate a top level pagetable and copy the kernel's 794 * entries into it. Then link in hci_vlp_l2ptes in the 1st entry. 795 */ 796 hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP); 797 hci->hci_vlp_pfn = 798 hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes); 799 ASSERT(hci->hci_vlp_pfn != PFN_INVALID); 800 bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE); 801 802 pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes); 803 ASSERT(pfn != PFN_INVALID); 804 hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2); 805 #endif /* __amd64 && !__xpv */ 806 } 807 808 /*ARGSUSED*/ 809 static void 810 hat_vlp_teardown(cpu_t *cpu) 811 { 812 #if defined(__amd64) && !defined(__xpv) 813 struct hat_cpu_info *hci; 814 815 if ((hci = cpu->cpu_hat_info) == NULL) 816 return; 817 if (hci->hci_vlp_l2ptes) 818 kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE); 819 if (hci->hci_vlp_l3ptes) 820 kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE); 821 #endif 822 } 823 824 #define NEXT_HKR(r, l, s, e) { \ 825 kernel_ranges[r].hkr_level = l; \ 826 kernel_ranges[r].hkr_start_va = s; \ 827 kernel_ranges[r].hkr_end_va = e; \ 828 ++r; \ 829 } 830 831 /* 832 * Finish filling in the kernel hat. 833 * Pre fill in all top level kernel page table entries for the kernel's 834 * part of the address range. From this point on we can't use any new 835 * kernel large pages if they need PTE's at max_level 836 * 837 * create the kmap mappings. 838 */ 839 void 840 hat_init_finish(void) 841 { 842 size_t size; 843 uint_t r = 0; 844 uintptr_t va; 845 hat_kernel_range_t *rp; 846 847 848 /* 849 * We are now effectively running on the kernel hat. 850 * Clearing use_boot_reserve shuts off using the pre-allocated boot 851 * reserve for all HAT allocations. From here on, the reserves are 852 * only used when avoiding recursion in kmem_alloc(). 853 */ 854 use_boot_reserve = 0; 855 htable_adjust_reserve(); 856 857 /* 858 * User HATs are initialized with copies of all kernel mappings in 859 * higher level page tables. Ensure that those entries exist. 860 */ 861 #if defined(__amd64) 862 863 NEXT_HKR(r, 3, kernelbase, 0); 864 #if defined(__xpv) 865 NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END); 866 #endif 867 868 #elif defined(__i386) 869 870 #if !defined(__xpv) 871 if (mmu.pae_hat) { 872 va = kernelbase; 873 if ((va & LEVEL_MASK(2)) != va) { 874 va = P2ROUNDUP(va, LEVEL_SIZE(2)); 875 NEXT_HKR(r, 1, kernelbase, va); 876 } 877 if (va != 0) 878 NEXT_HKR(r, 2, va, 0); 879 } else 880 #endif /* __xpv */ 881 NEXT_HKR(r, 1, kernelbase, 0); 882 883 #endif /* __i386 */ 884 885 num_kernel_ranges = r; 886 887 /* 888 * Create all the kernel pagetables that will have entries 889 * shared to user HATs. 890 */ 891 for (r = 0; r < num_kernel_ranges; ++r) { 892 rp = &kernel_ranges[r]; 893 for (va = rp->hkr_start_va; va != rp->hkr_end_va; 894 va += LEVEL_SIZE(rp->hkr_level)) { 895 htable_t *ht; 896 897 if (IN_HYPERVISOR_VA(va)) 898 continue; 899 900 /* can/must skip if a page mapping already exists */ 901 if (rp->hkr_level <= mmu.max_page_level && 902 (ht = htable_getpage(kas.a_hat, va, NULL)) != 903 NULL) { 904 htable_release(ht); 905 continue; 906 } 907 908 (void) htable_create(kas.a_hat, va, rp->hkr_level - 1, 909 NULL); 910 } 911 } 912 913 /* 914 * 32 bit PAE metal kernels use only 4 of the 512 entries in the 915 * page holding the top level pagetable. We use the remainder for 916 * the "per CPU" page tables for VLP processes. 917 * Map the top level kernel pagetable into the kernel to make 918 * it easy to use bcopy access these tables. 919 */ 920 if (mmu.pae_hat) { 921 vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP); 922 hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE, 923 kas.a_hat->hat_htable->ht_pfn, 924 #if !defined(__xpv) 925 PROT_WRITE | 926 #endif 927 PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK, 928 HAT_LOAD | HAT_LOAD_NOCONSIST); 929 } 930 hat_vlp_setup(CPU); 931 932 /* 933 * Create kmap (cached mappings of kernel PTEs) 934 * for 32 bit we map from segmap_start .. ekernelheap 935 * for 64 bit we map from segmap_start .. segmap_start + segmapsize; 936 */ 937 #if defined(__i386) 938 size = (uintptr_t)ekernelheap - segmap_start; 939 #elif defined(__amd64) 940 size = segmapsize; 941 #endif 942 hat_kmap_init((uintptr_t)segmap_start, size); 943 } 944 945 /* 946 * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references 947 * are 32 bit, so for safety we must use cas64() to install these. 948 */ 949 #ifdef __i386 950 static void 951 reload_pae32(hat_t *hat, cpu_t *cpu) 952 { 953 x86pte_t *src; 954 x86pte_t *dest; 955 x86pte_t pte; 956 int i; 957 958 /* 959 * Load the 4 entries of the level 2 page table into this 960 * cpu's range of the vlp_page and point cr3 at them. 961 */ 962 ASSERT(mmu.pae_hat); 963 src = hat->hat_vlp_ptes; 964 dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES; 965 for (i = 0; i < VLP_NUM_PTES; ++i) { 966 for (;;) { 967 pte = dest[i]; 968 if (pte == src[i]) 969 break; 970 if (cas64(dest + i, pte, src[i]) != src[i]) 971 break; 972 } 973 } 974 } 975 #endif 976 977 /* 978 * Switch to a new active hat, maintaining bit masks to track active CPUs. 979 * 980 * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it 981 * remains a 32-bit value. 982 */ 983 void 984 hat_switch(hat_t *hat) 985 { 986 uint64_t newcr3; 987 cpu_t *cpu = CPU; 988 hat_t *old = cpu->cpu_current_hat; 989 990 /* 991 * set up this information first, so we don't miss any cross calls 992 */ 993 if (old != NULL) { 994 if (old == hat) 995 return; 996 if (old != kas.a_hat) 997 CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id); 998 } 999 1000 /* 1001 * Add this CPU to the active set for this HAT. 1002 */ 1003 if (hat != kas.a_hat) { 1004 CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id); 1005 } 1006 cpu->cpu_current_hat = hat; 1007 1008 /* 1009 * now go ahead and load cr3 1010 */ 1011 if (hat->hat_flags & HAT_VLP) { 1012 #if defined(__amd64) 1013 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes; 1014 1015 VLP_COPY(hat->hat_vlp_ptes, vlpptep); 1016 newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn); 1017 #elif defined(__i386) 1018 reload_pae32(hat, cpu); 1019 newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) + 1020 (cpu->cpu_id + 1) * VLP_SIZE; 1021 #endif 1022 } else { 1023 newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn); 1024 } 1025 #ifdef __xpv 1026 { 1027 struct mmuext_op t[2]; 1028 uint_t retcnt; 1029 uint_t opcnt = 1; 1030 1031 t[0].cmd = MMUEXT_NEW_BASEPTR; 1032 t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3)); 1033 #if defined(__amd64) 1034 /* 1035 * There's an interesting problem here, as to what to 1036 * actually specify when switching to the kernel hat. 1037 * For now we'll reuse the kernel hat again. 1038 */ 1039 t[1].cmd = MMUEXT_NEW_USER_BASEPTR; 1040 if (hat == kas.a_hat) 1041 t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3)); 1042 else 1043 t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable); 1044 ++opcnt; 1045 #endif /* __amd64 */ 1046 if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0) 1047 panic("HYPERVISOR_mmu_update() failed"); 1048 ASSERT(retcnt == opcnt); 1049 1050 } 1051 #else 1052 setcr3(newcr3); 1053 #endif 1054 ASSERT(cpu == CPU); 1055 } 1056 1057 /* 1058 * Utility to return a valid x86pte_t from protections, pfn, and level number 1059 */ 1060 static x86pte_t 1061 hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags) 1062 { 1063 x86pte_t pte; 1064 uint_t cache_attr = attr & HAT_ORDER_MASK; 1065 1066 pte = MAKEPTE(pfn, level); 1067 1068 if (attr & PROT_WRITE) 1069 PTE_SET(pte, PT_WRITABLE); 1070 1071 if (attr & PROT_USER) 1072 PTE_SET(pte, PT_USER); 1073 1074 if (!(attr & PROT_EXEC)) 1075 PTE_SET(pte, mmu.pt_nx); 1076 1077 /* 1078 * Set the software bits used track ref/mod sync's and hments. 1079 * If not using REF/MOD, set them to avoid h/w rewriting PTEs. 1080 */ 1081 if (flags & HAT_LOAD_NOCONSIST) 1082 PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD); 1083 else if (attr & HAT_NOSYNC) 1084 PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD); 1085 1086 /* 1087 * Set the caching attributes in the PTE. The combination 1088 * of attributes are poorly defined, so we pay attention 1089 * to them in the given order. 1090 * 1091 * The test for HAT_STRICTORDER is different because it's defined 1092 * as "0" - which was a stupid thing to do, but is too late to change! 1093 */ 1094 if (cache_attr == HAT_STRICTORDER) { 1095 PTE_SET(pte, PT_NOCACHE); 1096 /*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */ 1097 } else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) { 1098 /* nothing to set */; 1099 } else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) { 1100 PTE_SET(pte, PT_NOCACHE); 1101 if (is_x86_feature(x86_featureset, X86FSET_PAT)) 1102 PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE); 1103 else 1104 PTE_SET(pte, PT_WRITETHRU); 1105 } else { 1106 panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr); 1107 } 1108 1109 return (pte); 1110 } 1111 1112 /* 1113 * Duplicate address translations of the parent to the child. 1114 * This function really isn't used anymore. 1115 */ 1116 /*ARGSUSED*/ 1117 int 1118 hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag) 1119 { 1120 ASSERT((uintptr_t)addr < kernelbase); 1121 ASSERT(new != kas.a_hat); 1122 ASSERT(old != kas.a_hat); 1123 return (0); 1124 } 1125 1126 /* 1127 * returns number of bytes that have valid mappings in hat. 1128 */ 1129 size_t 1130 hat_get_mapped_size(hat_t *hat) 1131 { 1132 size_t total = 0; 1133 int l; 1134 1135 for (l = 0; l <= mmu.max_page_level; l++) 1136 total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l)); 1137 total += hat->hat_ism_pgcnt; 1138 1139 return (total); 1140 } 1141 1142 /* 1143 * enable/disable collection of stats for hat. 1144 */ 1145 int 1146 hat_stats_enable(hat_t *hat) 1147 { 1148 atomic_add_32(&hat->hat_stats, 1); 1149 return (1); 1150 } 1151 1152 void 1153 hat_stats_disable(hat_t *hat) 1154 { 1155 atomic_add_32(&hat->hat_stats, -1); 1156 } 1157 1158 /* 1159 * Utility to sync the ref/mod bits from a page table entry to the page_t 1160 * We must be holding the mapping list lock when this is called. 1161 */ 1162 static void 1163 hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level) 1164 { 1165 uint_t rm = 0; 1166 pgcnt_t pgcnt; 1167 1168 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC) 1169 return; 1170 1171 if (PTE_GET(pte, PT_REF)) 1172 rm |= P_REF; 1173 1174 if (PTE_GET(pte, PT_MOD)) 1175 rm |= P_MOD; 1176 1177 if (rm == 0) 1178 return; 1179 1180 /* 1181 * sync to all constituent pages of a large page 1182 */ 1183 ASSERT(x86_hm_held(pp)); 1184 pgcnt = page_get_pagecnt(level); 1185 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt)); 1186 for (; pgcnt > 0; --pgcnt) { 1187 /* 1188 * hat_page_demote() can't decrease 1189 * pszc below this mapping size 1190 * since this large mapping existed after we 1191 * took mlist lock. 1192 */ 1193 ASSERT(pp->p_szc >= level); 1194 hat_page_setattr(pp, rm); 1195 ++pp; 1196 } 1197 } 1198 1199 /* 1200 * This the set of PTE bits for PFN, permissions and caching 1201 * that are allowed to change on a HAT_LOAD_REMAP 1202 */ 1203 #define PT_REMAP_BITS \ 1204 (PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU | \ 1205 PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD) 1206 1207 #define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX) 1208 /* 1209 * Do the low-level work to get a mapping entered into a HAT's pagetables 1210 * and in the mapping list of the associated page_t. 1211 */ 1212 static int 1213 hati_pte_map( 1214 htable_t *ht, 1215 uint_t entry, 1216 page_t *pp, 1217 x86pte_t pte, 1218 int flags, 1219 void *pte_ptr) 1220 { 1221 hat_t *hat = ht->ht_hat; 1222 x86pte_t old_pte; 1223 level_t l = ht->ht_level; 1224 hment_t *hm; 1225 uint_t is_consist; 1226 uint_t is_locked; 1227 int rv = 0; 1228 1229 /* 1230 * Is this a consistent (ie. need mapping list lock) mapping? 1231 */ 1232 is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0); 1233 1234 /* 1235 * Track locked mapping count in the htable. Do this first, 1236 * as we track locking even if there already is a mapping present. 1237 */ 1238 is_locked = (flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat; 1239 if (is_locked) 1240 HTABLE_LOCK_INC(ht); 1241 1242 /* 1243 * Acquire the page's mapping list lock and get an hment to use. 1244 * Note that hment_prepare() might return NULL. 1245 */ 1246 if (is_consist) { 1247 x86_hm_enter(pp); 1248 hm = hment_prepare(ht, entry, pp); 1249 } 1250 1251 /* 1252 * Set the new pte, retrieving the old one at the same time. 1253 */ 1254 old_pte = x86pte_set(ht, entry, pte, pte_ptr); 1255 1256 /* 1257 * Did we get a large page / page table collision? 1258 */ 1259 if (old_pte == LPAGE_ERROR) { 1260 if (is_locked) 1261 HTABLE_LOCK_DEC(ht); 1262 rv = -1; 1263 goto done; 1264 } 1265 1266 /* 1267 * If the mapping didn't change there is nothing more to do. 1268 */ 1269 if (PTE_EQUIV(pte, old_pte)) 1270 goto done; 1271 1272 /* 1273 * Install a new mapping in the page's mapping list 1274 */ 1275 if (!PTE_ISVALID(old_pte)) { 1276 if (is_consist) { 1277 hment_assign(ht, entry, pp, hm); 1278 x86_hm_exit(pp); 1279 } else { 1280 ASSERT(flags & HAT_LOAD_NOCONSIST); 1281 } 1282 #if defined(__amd64) 1283 if (ht->ht_flags & HTABLE_VLP) { 1284 cpu_t *cpu = CPU; 1285 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes; 1286 VLP_COPY(hat->hat_vlp_ptes, vlpptep); 1287 } 1288 #endif 1289 HTABLE_INC(ht->ht_valid_cnt); 1290 PGCNT_INC(hat, l); 1291 return (rv); 1292 } 1293 1294 /* 1295 * Remap's are more complicated: 1296 * - HAT_LOAD_REMAP must be specified if changing the pfn. 1297 * We also require that NOCONSIST be specified. 1298 * - Otherwise only permission or caching bits may change. 1299 */ 1300 if (!PTE_ISPAGE(old_pte, l)) 1301 panic("non-null/page mapping pte=" FMT_PTE, old_pte); 1302 1303 if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) { 1304 REMAPASSERT(flags & HAT_LOAD_REMAP); 1305 REMAPASSERT(flags & HAT_LOAD_NOCONSIST); 1306 REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST); 1307 REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) == 1308 pf_is_memory(PTE2PFN(pte, l))); 1309 REMAPASSERT(!is_consist); 1310 } 1311 1312 /* 1313 * We only let remaps change the certain bits in the PTE. 1314 */ 1315 if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS)) 1316 panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n", 1317 old_pte, pte); 1318 1319 /* 1320 * We don't create any mapping list entries on a remap, so release 1321 * any allocated hment after we drop the mapping list lock. 1322 */ 1323 done: 1324 if (is_consist) { 1325 x86_hm_exit(pp); 1326 if (hm != NULL) 1327 hment_free(hm); 1328 } 1329 return (rv); 1330 } 1331 1332 /* 1333 * Internal routine to load a single page table entry. This only fails if 1334 * we attempt to overwrite a page table link with a large page. 1335 */ 1336 static int 1337 hati_load_common( 1338 hat_t *hat, 1339 uintptr_t va, 1340 page_t *pp, 1341 uint_t attr, 1342 uint_t flags, 1343 level_t level, 1344 pfn_t pfn) 1345 { 1346 htable_t *ht; 1347 uint_t entry; 1348 x86pte_t pte; 1349 int rv = 0; 1350 1351 /* 1352 * The number 16 is arbitrary and here to catch a recursion problem 1353 * early before we blow out the kernel stack. 1354 */ 1355 ++curthread->t_hatdepth; 1356 ASSERT(curthread->t_hatdepth < 16); 1357 1358 ASSERT(hat == kas.a_hat || 1359 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1360 1361 if (flags & HAT_LOAD_SHARE) 1362 hat->hat_flags |= HAT_SHARED; 1363 1364 /* 1365 * Find the page table that maps this page if it already exists. 1366 */ 1367 ht = htable_lookup(hat, va, level); 1368 1369 /* 1370 * We must have HAT_LOAD_NOCONSIST if page_t is NULL. 1371 */ 1372 if (pp == NULL) 1373 flags |= HAT_LOAD_NOCONSIST; 1374 1375 if (ht == NULL) { 1376 ht = htable_create(hat, va, level, NULL); 1377 ASSERT(ht != NULL); 1378 } 1379 entry = htable_va2entry(va, ht); 1380 1381 /* 1382 * a bunch of paranoid error checking 1383 */ 1384 ASSERT(ht->ht_busy > 0); 1385 if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht)) 1386 panic("hati_load_common: bad htable %p, va %p", 1387 (void *)ht, (void *)va); 1388 ASSERT(ht->ht_level == level); 1389 1390 /* 1391 * construct the new PTE 1392 */ 1393 if (hat == kas.a_hat) 1394 attr &= ~PROT_USER; 1395 pte = hati_mkpte(pfn, attr, level, flags); 1396 if (hat == kas.a_hat && va >= kernelbase) 1397 PTE_SET(pte, mmu.pt_global); 1398 1399 /* 1400 * establish the mapping 1401 */ 1402 rv = hati_pte_map(ht, entry, pp, pte, flags, NULL); 1403 1404 /* 1405 * release the htable and any reserves 1406 */ 1407 htable_release(ht); 1408 --curthread->t_hatdepth; 1409 return (rv); 1410 } 1411 1412 /* 1413 * special case of hat_memload to deal with some kernel addrs for performance 1414 */ 1415 static void 1416 hat_kmap_load( 1417 caddr_t addr, 1418 page_t *pp, 1419 uint_t attr, 1420 uint_t flags) 1421 { 1422 uintptr_t va = (uintptr_t)addr; 1423 x86pte_t pte; 1424 pfn_t pfn = page_pptonum(pp); 1425 pgcnt_t pg_off = mmu_btop(va - mmu.kmap_addr); 1426 htable_t *ht; 1427 uint_t entry; 1428 void *pte_ptr; 1429 1430 /* 1431 * construct the requested PTE 1432 */ 1433 attr &= ~PROT_USER; 1434 attr |= HAT_STORECACHING_OK; 1435 pte = hati_mkpte(pfn, attr, 0, flags); 1436 PTE_SET(pte, mmu.pt_global); 1437 1438 /* 1439 * Figure out the pte_ptr and htable and use common code to finish up 1440 */ 1441 if (mmu.pae_hat) 1442 pte_ptr = mmu.kmap_ptes + pg_off; 1443 else 1444 pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off; 1445 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >> 1446 LEVEL_SHIFT(1)]; 1447 entry = htable_va2entry(va, ht); 1448 ++curthread->t_hatdepth; 1449 ASSERT(curthread->t_hatdepth < 16); 1450 (void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr); 1451 --curthread->t_hatdepth; 1452 } 1453 1454 /* 1455 * hat_memload() - load a translation to the given page struct 1456 * 1457 * Flags for hat_memload/hat_devload/hat_*attr. 1458 * 1459 * HAT_LOAD Default flags to load a translation to the page. 1460 * 1461 * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(), 1462 * and hat_devload(). 1463 * 1464 * HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list. 1465 * sets PT_NOCONSIST 1466 * 1467 * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables 1468 * that map some user pages (not kas) is shared by more 1469 * than one process (eg. ISM). 1470 * 1471 * HAT_LOAD_REMAP Reload a valid pte with a different page frame. 1472 * 1473 * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this 1474 * point, it's setting up mapping to allocate internal 1475 * hat layer data structures. This flag forces hat layer 1476 * to tap its reserves in order to prevent infinite 1477 * recursion. 1478 * 1479 * The following is a protection attribute (like PROT_READ, etc.) 1480 * 1481 * HAT_NOSYNC set PT_NOSYNC - this mapping's ref/mod bits 1482 * are never cleared. 1483 * 1484 * Installing new valid PTE's and creation of the mapping list 1485 * entry are controlled under the same lock. It's derived from the 1486 * page_t being mapped. 1487 */ 1488 static uint_t supported_memload_flags = 1489 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST | 1490 HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT; 1491 1492 void 1493 hat_memload( 1494 hat_t *hat, 1495 caddr_t addr, 1496 page_t *pp, 1497 uint_t attr, 1498 uint_t flags) 1499 { 1500 uintptr_t va = (uintptr_t)addr; 1501 level_t level = 0; 1502 pfn_t pfn = page_pptonum(pp); 1503 1504 XPV_DISALLOW_MIGRATE(); 1505 ASSERT(IS_PAGEALIGNED(va)); 1506 ASSERT(hat == kas.a_hat || va < _userlimit); 1507 ASSERT(hat == kas.a_hat || 1508 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1509 ASSERT((flags & supported_memload_flags) == flags); 1510 1511 ASSERT(!IN_VA_HOLE(va)); 1512 ASSERT(!PP_ISFREE(pp)); 1513 1514 /* 1515 * kernel address special case for performance. 1516 */ 1517 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { 1518 ASSERT(hat == kas.a_hat); 1519 hat_kmap_load(addr, pp, attr, flags); 1520 XPV_ALLOW_MIGRATE(); 1521 return; 1522 } 1523 1524 /* 1525 * This is used for memory with normal caching enabled, so 1526 * always set HAT_STORECACHING_OK. 1527 */ 1528 attr |= HAT_STORECACHING_OK; 1529 if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0) 1530 panic("unexpected hati_load_common() failure"); 1531 XPV_ALLOW_MIGRATE(); 1532 } 1533 1534 /* ARGSUSED */ 1535 void 1536 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp, 1537 uint_t attr, uint_t flags, hat_region_cookie_t rcookie) 1538 { 1539 hat_memload(hat, addr, pp, attr, flags); 1540 } 1541 1542 /* 1543 * Load the given array of page structs using large pages when possible 1544 */ 1545 void 1546 hat_memload_array( 1547 hat_t *hat, 1548 caddr_t addr, 1549 size_t len, 1550 page_t **pages, 1551 uint_t attr, 1552 uint_t flags) 1553 { 1554 uintptr_t va = (uintptr_t)addr; 1555 uintptr_t eaddr = va + len; 1556 level_t level; 1557 size_t pgsize; 1558 pgcnt_t pgindx = 0; 1559 pfn_t pfn; 1560 pgcnt_t i; 1561 1562 XPV_DISALLOW_MIGRATE(); 1563 ASSERT(IS_PAGEALIGNED(va)); 1564 ASSERT(hat == kas.a_hat || va + len <= _userlimit); 1565 ASSERT(hat == kas.a_hat || 1566 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1567 ASSERT((flags & supported_memload_flags) == flags); 1568 1569 /* 1570 * memload is used for memory with full caching enabled, so 1571 * set HAT_STORECACHING_OK. 1572 */ 1573 attr |= HAT_STORECACHING_OK; 1574 1575 /* 1576 * handle all pages using largest possible pagesize 1577 */ 1578 while (va < eaddr) { 1579 /* 1580 * decide what level mapping to use (ie. pagesize) 1581 */ 1582 pfn = page_pptonum(pages[pgindx]); 1583 for (level = mmu.max_page_level; ; --level) { 1584 pgsize = LEVEL_SIZE(level); 1585 if (level == 0) 1586 break; 1587 1588 if (!IS_P2ALIGNED(va, pgsize) || 1589 (eaddr - va) < pgsize || 1590 !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize)) 1591 continue; 1592 1593 /* 1594 * To use a large mapping of this size, all the 1595 * pages we are passed must be sequential subpages 1596 * of the large page. 1597 * hat_page_demote() can't change p_szc because 1598 * all pages are locked. 1599 */ 1600 if (pages[pgindx]->p_szc >= level) { 1601 for (i = 0; i < mmu_btop(pgsize); ++i) { 1602 if (pfn + i != 1603 page_pptonum(pages[pgindx + i])) 1604 break; 1605 ASSERT(pages[pgindx + i]->p_szc >= 1606 level); 1607 ASSERT(pages[pgindx] + i == 1608 pages[pgindx + i]); 1609 } 1610 if (i == mmu_btop(pgsize)) { 1611 #ifdef DEBUG 1612 if (level == 2) 1613 map1gcnt++; 1614 #endif 1615 break; 1616 } 1617 } 1618 } 1619 1620 /* 1621 * Load this page mapping. If the load fails, try a smaller 1622 * pagesize. 1623 */ 1624 ASSERT(!IN_VA_HOLE(va)); 1625 while (hati_load_common(hat, va, pages[pgindx], attr, 1626 flags, level, pfn) != 0) { 1627 if (level == 0) 1628 panic("unexpected hati_load_common() failure"); 1629 --level; 1630 pgsize = LEVEL_SIZE(level); 1631 } 1632 1633 /* 1634 * move to next page 1635 */ 1636 va += pgsize; 1637 pgindx += mmu_btop(pgsize); 1638 } 1639 XPV_ALLOW_MIGRATE(); 1640 } 1641 1642 /* ARGSUSED */ 1643 void 1644 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len, 1645 struct page **pps, uint_t attr, uint_t flags, 1646 hat_region_cookie_t rcookie) 1647 { 1648 hat_memload_array(hat, addr, len, pps, attr, flags); 1649 } 1650 1651 /* 1652 * void hat_devload(hat, addr, len, pf, attr, flags) 1653 * load/lock the given page frame number 1654 * 1655 * Advisory ordering attributes. Apply only to device mappings. 1656 * 1657 * HAT_STRICTORDER: the CPU must issue the references in order, as the 1658 * programmer specified. This is the default. 1659 * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds 1660 * of reordering; store or load with store or load). 1661 * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores 1662 * to consecutive locations (for example, turn two consecutive byte 1663 * stores into one halfword store), and it may batch individual loads 1664 * (for example, turn two consecutive byte loads into one halfword load). 1665 * This also implies re-ordering. 1666 * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it 1667 * until another store occurs. The default is to fetch new data 1668 * on every load. This also implies merging. 1669 * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to 1670 * the device (perhaps with other data) at a later time. The default is 1671 * to push the data right away. This also implies load caching. 1672 * 1673 * Equivalent of hat_memload(), but can be used for device memory where 1674 * there are no page_t's and we support additional flags (write merging, etc). 1675 * Note that we can have large page mappings with this interface. 1676 */ 1677 int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK | 1678 HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK | 1679 HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK; 1680 1681 void 1682 hat_devload( 1683 hat_t *hat, 1684 caddr_t addr, 1685 size_t len, 1686 pfn_t pfn, 1687 uint_t attr, 1688 int flags) 1689 { 1690 uintptr_t va = ALIGN2PAGE(addr); 1691 uintptr_t eva = va + len; 1692 level_t level; 1693 size_t pgsize; 1694 page_t *pp; 1695 int f; /* per PTE copy of flags - maybe modified */ 1696 uint_t a; /* per PTE copy of attr */ 1697 1698 XPV_DISALLOW_MIGRATE(); 1699 ASSERT(IS_PAGEALIGNED(va)); 1700 ASSERT(hat == kas.a_hat || eva <= _userlimit); 1701 ASSERT(hat == kas.a_hat || 1702 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1703 ASSERT((flags & supported_devload_flags) == flags); 1704 1705 /* 1706 * handle all pages 1707 */ 1708 while (va < eva) { 1709 1710 /* 1711 * decide what level mapping to use (ie. pagesize) 1712 */ 1713 for (level = mmu.max_page_level; ; --level) { 1714 pgsize = LEVEL_SIZE(level); 1715 if (level == 0) 1716 break; 1717 if (IS_P2ALIGNED(va, pgsize) && 1718 (eva - va) >= pgsize && 1719 IS_P2ALIGNED(pfn, mmu_btop(pgsize))) { 1720 #ifdef DEBUG 1721 if (level == 2) 1722 map1gcnt++; 1723 #endif 1724 break; 1725 } 1726 } 1727 1728 /* 1729 * If this is just memory then allow caching (this happens 1730 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used 1731 * to override that. If we don't have a page_t then make sure 1732 * NOCONSIST is set. 1733 */ 1734 a = attr; 1735 f = flags; 1736 if (!pf_is_memory(pfn)) 1737 f |= HAT_LOAD_NOCONSIST; 1738 else if (!(a & HAT_PLAT_NOCACHE)) 1739 a |= HAT_STORECACHING_OK; 1740 1741 if (f & HAT_LOAD_NOCONSIST) 1742 pp = NULL; 1743 else 1744 pp = page_numtopp_nolock(pfn); 1745 1746 /* 1747 * Check to make sure we are really trying to map a valid 1748 * memory page. The caller wishing to intentionally map 1749 * free memory pages will have passed the HAT_LOAD_NOCONSIST 1750 * flag, then pp will be NULL. 1751 */ 1752 if (pp != NULL) { 1753 if (PP_ISFREE(pp)) { 1754 panic("hat_devload: loading " 1755 "a mapping to free page %p", (void *)pp); 1756 } 1757 1758 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) { 1759 panic("hat_devload: loading a mapping " 1760 "to an unlocked page %p", 1761 (void *)pp); 1762 } 1763 } 1764 1765 /* 1766 * load this page mapping 1767 */ 1768 ASSERT(!IN_VA_HOLE(va)); 1769 while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) { 1770 if (level == 0) 1771 panic("unexpected hati_load_common() failure"); 1772 --level; 1773 pgsize = LEVEL_SIZE(level); 1774 } 1775 1776 /* 1777 * move to next page 1778 */ 1779 va += pgsize; 1780 pfn += mmu_btop(pgsize); 1781 } 1782 XPV_ALLOW_MIGRATE(); 1783 } 1784 1785 /* 1786 * void hat_unlock(hat, addr, len) 1787 * unlock the mappings to a given range of addresses 1788 * 1789 * Locks are tracked by ht_lock_cnt in the htable. 1790 */ 1791 void 1792 hat_unlock(hat_t *hat, caddr_t addr, size_t len) 1793 { 1794 uintptr_t vaddr = (uintptr_t)addr; 1795 uintptr_t eaddr = vaddr + len; 1796 htable_t *ht = NULL; 1797 1798 /* 1799 * kernel entries are always locked, we don't track lock counts 1800 */ 1801 ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 1802 ASSERT(IS_PAGEALIGNED(vaddr)); 1803 ASSERT(IS_PAGEALIGNED(eaddr)); 1804 if (hat == kas.a_hat) 1805 return; 1806 if (eaddr > _userlimit) 1807 panic("hat_unlock() address out of range - above _userlimit"); 1808 1809 XPV_DISALLOW_MIGRATE(); 1810 ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1811 while (vaddr < eaddr) { 1812 (void) htable_walk(hat, &ht, &vaddr, eaddr); 1813 if (ht == NULL) 1814 break; 1815 1816 ASSERT(!IN_VA_HOLE(vaddr)); 1817 1818 if (ht->ht_lock_cnt < 1) 1819 panic("hat_unlock(): lock_cnt < 1, " 1820 "htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr); 1821 HTABLE_LOCK_DEC(ht); 1822 1823 vaddr += LEVEL_SIZE(ht->ht_level); 1824 } 1825 if (ht) 1826 htable_release(ht); 1827 XPV_ALLOW_MIGRATE(); 1828 } 1829 1830 /* ARGSUSED */ 1831 void 1832 hat_unlock_region(struct hat *hat, caddr_t addr, size_t len, 1833 hat_region_cookie_t rcookie) 1834 { 1835 panic("No shared region support on x86"); 1836 } 1837 1838 #if !defined(__xpv) 1839 /* 1840 * Cross call service routine to demap a virtual page on 1841 * the current CPU or flush all mappings in TLB. 1842 */ 1843 /*ARGSUSED*/ 1844 static int 1845 hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3) 1846 { 1847 hat_t *hat = (hat_t *)a1; 1848 caddr_t addr = (caddr_t)a2; 1849 1850 /* 1851 * If the target hat isn't the kernel and this CPU isn't operating 1852 * in the target hat, we can ignore the cross call. 1853 */ 1854 if (hat != kas.a_hat && hat != CPU->cpu_current_hat) 1855 return (0); 1856 1857 /* 1858 * For a normal address, we just flush one page mapping 1859 */ 1860 if ((uintptr_t)addr != DEMAP_ALL_ADDR) { 1861 mmu_tlbflush_entry(addr); 1862 return (0); 1863 } 1864 1865 /* 1866 * Otherwise we reload cr3 to effect a complete TLB flush. 1867 * 1868 * A reload of cr3 on a VLP process also means we must also recopy in 1869 * the pte values from the struct hat 1870 */ 1871 if (hat->hat_flags & HAT_VLP) { 1872 #if defined(__amd64) 1873 x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes; 1874 1875 VLP_COPY(hat->hat_vlp_ptes, vlpptep); 1876 #elif defined(__i386) 1877 reload_pae32(hat, CPU); 1878 #endif 1879 } 1880 reload_cr3(); 1881 return (0); 1882 } 1883 1884 /* 1885 * Flush all TLB entries, including global (ie. kernel) ones. 1886 */ 1887 static void 1888 flush_all_tlb_entries(void) 1889 { 1890 ulong_t cr4 = getcr4(); 1891 1892 if (cr4 & CR4_PGE) { 1893 setcr4(cr4 & ~(ulong_t)CR4_PGE); 1894 setcr4(cr4); 1895 1896 /* 1897 * 32 bit PAE also needs to always reload_cr3() 1898 */ 1899 if (mmu.max_level == 2) 1900 reload_cr3(); 1901 } else { 1902 reload_cr3(); 1903 } 1904 } 1905 1906 #define TLB_CPU_HALTED (01ul) 1907 #define TLB_INVAL_ALL (02ul) 1908 #define CAS_TLB_INFO(cpu, old, new) \ 1909 caslong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new)) 1910 1911 /* 1912 * Record that a CPU is going idle 1913 */ 1914 void 1915 tlb_going_idle(void) 1916 { 1917 atomic_or_long((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED); 1918 } 1919 1920 /* 1921 * Service a delayed TLB flush if coming out of being idle. 1922 * It will be called from cpu idle notification with interrupt disabled. 1923 */ 1924 void 1925 tlb_service(void) 1926 { 1927 ulong_t tlb_info; 1928 ulong_t found; 1929 1930 /* 1931 * We only have to do something if coming out of being idle. 1932 */ 1933 tlb_info = CPU->cpu_m.mcpu_tlb_info; 1934 if (tlb_info & TLB_CPU_HALTED) { 1935 ASSERT(CPU->cpu_current_hat == kas.a_hat); 1936 1937 /* 1938 * Atomic clear and fetch of old state. 1939 */ 1940 while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) { 1941 ASSERT(found & TLB_CPU_HALTED); 1942 tlb_info = found; 1943 SMT_PAUSE(); 1944 } 1945 if (tlb_info & TLB_INVAL_ALL) 1946 flush_all_tlb_entries(); 1947 } 1948 } 1949 #endif /* !__xpv */ 1950 1951 /* 1952 * Internal routine to do cross calls to invalidate a range of pages on 1953 * all CPUs using a given hat. 1954 */ 1955 void 1956 hat_tlb_inval(hat_t *hat, uintptr_t va) 1957 { 1958 extern int flushes_require_xcalls; /* from mp_startup.c */ 1959 cpuset_t justme; 1960 cpuset_t cpus_to_shootdown; 1961 #ifndef __xpv 1962 cpuset_t check_cpus; 1963 cpu_t *cpup; 1964 int c; 1965 #endif 1966 1967 /* 1968 * If the hat is being destroyed, there are no more users, so 1969 * demap need not do anything. 1970 */ 1971 if (hat->hat_flags & HAT_FREEING) 1972 return; 1973 1974 /* 1975 * If demapping from a shared pagetable, we best demap the 1976 * entire set of user TLBs, since we don't know what addresses 1977 * these were shared at. 1978 */ 1979 if (hat->hat_flags & HAT_SHARED) { 1980 hat = kas.a_hat; 1981 va = DEMAP_ALL_ADDR; 1982 } 1983 1984 /* 1985 * if not running with multiple CPUs, don't use cross calls 1986 */ 1987 if (panicstr || !flushes_require_xcalls) { 1988 #ifdef __xpv 1989 if (va == DEMAP_ALL_ADDR) 1990 xen_flush_tlb(); 1991 else 1992 xen_flush_va((caddr_t)va); 1993 #else 1994 (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL); 1995 #endif 1996 return; 1997 } 1998 1999 2000 /* 2001 * Determine CPUs to shootdown. Kernel changes always do all CPUs. 2002 * Otherwise it's just CPUs currently executing in this hat. 2003 */ 2004 kpreempt_disable(); 2005 CPUSET_ONLY(justme, CPU->cpu_id); 2006 if (hat == kas.a_hat) 2007 cpus_to_shootdown = khat_cpuset; 2008 else 2009 cpus_to_shootdown = hat->hat_cpus; 2010 2011 #ifndef __xpv 2012 /* 2013 * If any CPUs in the set are idle, just request a delayed flush 2014 * and avoid waking them up. 2015 */ 2016 check_cpus = cpus_to_shootdown; 2017 for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) { 2018 ulong_t tlb_info; 2019 2020 if (!CPU_IN_SET(check_cpus, c)) 2021 continue; 2022 CPUSET_DEL(check_cpus, c); 2023 cpup = cpu[c]; 2024 if (cpup == NULL) 2025 continue; 2026 2027 tlb_info = cpup->cpu_m.mcpu_tlb_info; 2028 while (tlb_info == TLB_CPU_HALTED) { 2029 (void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED, 2030 TLB_CPU_HALTED | TLB_INVAL_ALL); 2031 SMT_PAUSE(); 2032 tlb_info = cpup->cpu_m.mcpu_tlb_info; 2033 } 2034 if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) { 2035 HATSTAT_INC(hs_tlb_inval_delayed); 2036 CPUSET_DEL(cpus_to_shootdown, c); 2037 } 2038 } 2039 #endif 2040 2041 if (CPUSET_ISNULL(cpus_to_shootdown) || 2042 CPUSET_ISEQUAL(cpus_to_shootdown, justme)) { 2043 2044 #ifdef __xpv 2045 if (va == DEMAP_ALL_ADDR) 2046 xen_flush_tlb(); 2047 else 2048 xen_flush_va((caddr_t)va); 2049 #else 2050 (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)va, NULL); 2051 #endif 2052 2053 } else { 2054 2055 CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id); 2056 #ifdef __xpv 2057 if (va == DEMAP_ALL_ADDR) 2058 xen_gflush_tlb(cpus_to_shootdown); 2059 else 2060 xen_gflush_va((caddr_t)va, cpus_to_shootdown); 2061 #else 2062 xc_call((xc_arg_t)hat, (xc_arg_t)va, NULL, 2063 CPUSET2BV(cpus_to_shootdown), hati_demap_func); 2064 #endif 2065 2066 } 2067 kpreempt_enable(); 2068 } 2069 2070 /* 2071 * Interior routine for HAT_UNLOADs from hat_unload_callback(), 2072 * hat_kmap_unload() OR from hat_steal() code. This routine doesn't 2073 * handle releasing of the htables. 2074 */ 2075 void 2076 hat_pte_unmap( 2077 htable_t *ht, 2078 uint_t entry, 2079 uint_t flags, 2080 x86pte_t old_pte, 2081 void *pte_ptr) 2082 { 2083 hat_t *hat = ht->ht_hat; 2084 hment_t *hm = NULL; 2085 page_t *pp = NULL; 2086 level_t l = ht->ht_level; 2087 pfn_t pfn; 2088 2089 /* 2090 * We always track the locking counts, even if nothing is unmapped 2091 */ 2092 if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) { 2093 ASSERT(ht->ht_lock_cnt > 0); 2094 HTABLE_LOCK_DEC(ht); 2095 } 2096 2097 /* 2098 * Figure out which page's mapping list lock to acquire using the PFN 2099 * passed in "old" PTE. We then attempt to invalidate the PTE. 2100 * If another thread, probably a hat_pageunload, has asynchronously 2101 * unmapped/remapped this address we'll loop here. 2102 */ 2103 ASSERT(ht->ht_busy > 0); 2104 while (PTE_ISVALID(old_pte)) { 2105 pfn = PTE2PFN(old_pte, l); 2106 if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) { 2107 pp = NULL; 2108 } else { 2109 #ifdef __xpv 2110 if (pfn == PFN_INVALID) 2111 panic("Invalid PFN, but not PT_NOCONSIST"); 2112 #endif 2113 pp = page_numtopp_nolock(pfn); 2114 if (pp == NULL) { 2115 panic("no page_t, not NOCONSIST: old_pte=" 2116 FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx", 2117 old_pte, (uintptr_t)ht, entry, 2118 (uintptr_t)pte_ptr); 2119 } 2120 x86_hm_enter(pp); 2121 } 2122 2123 old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr); 2124 2125 /* 2126 * If the page hadn't changed we've unmapped it and can proceed 2127 */ 2128 if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn) 2129 break; 2130 2131 /* 2132 * Otherwise, we'll have to retry with the current old_pte. 2133 * Drop the hment lock, since the pfn may have changed. 2134 */ 2135 if (pp != NULL) { 2136 x86_hm_exit(pp); 2137 pp = NULL; 2138 } else { 2139 ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST); 2140 } 2141 } 2142 2143 /* 2144 * If the old mapping wasn't valid, there's nothing more to do 2145 */ 2146 if (!PTE_ISVALID(old_pte)) { 2147 if (pp != NULL) 2148 x86_hm_exit(pp); 2149 return; 2150 } 2151 2152 /* 2153 * Take care of syncing any MOD/REF bits and removing the hment. 2154 */ 2155 if (pp != NULL) { 2156 if (!(flags & HAT_UNLOAD_NOSYNC)) 2157 hati_sync_pte_to_page(pp, old_pte, l); 2158 hm = hment_remove(pp, ht, entry); 2159 x86_hm_exit(pp); 2160 if (hm != NULL) 2161 hment_free(hm); 2162 } 2163 2164 /* 2165 * Handle book keeping in the htable and hat 2166 */ 2167 ASSERT(ht->ht_valid_cnt > 0); 2168 HTABLE_DEC(ht->ht_valid_cnt); 2169 PGCNT_DEC(hat, l); 2170 } 2171 2172 /* 2173 * very cheap unload implementation to special case some kernel addresses 2174 */ 2175 static void 2176 hat_kmap_unload(caddr_t addr, size_t len, uint_t flags) 2177 { 2178 uintptr_t va = (uintptr_t)addr; 2179 uintptr_t eva = va + len; 2180 pgcnt_t pg_index; 2181 htable_t *ht; 2182 uint_t entry; 2183 x86pte_t *pte_ptr; 2184 x86pte_t old_pte; 2185 2186 for (; va < eva; va += MMU_PAGESIZE) { 2187 /* 2188 * Get the PTE 2189 */ 2190 pg_index = mmu_btop(va - mmu.kmap_addr); 2191 pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index); 2192 old_pte = GET_PTE(pte_ptr); 2193 2194 /* 2195 * get the htable / entry 2196 */ 2197 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) 2198 >> LEVEL_SHIFT(1)]; 2199 entry = htable_va2entry(va, ht); 2200 2201 /* 2202 * use mostly common code to unmap it. 2203 */ 2204 hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr); 2205 } 2206 } 2207 2208 2209 /* 2210 * unload a range of virtual address space (no callback) 2211 */ 2212 void 2213 hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 2214 { 2215 uintptr_t va = (uintptr_t)addr; 2216 2217 XPV_DISALLOW_MIGRATE(); 2218 ASSERT(hat == kas.a_hat || va + len <= _userlimit); 2219 2220 /* 2221 * special case for performance. 2222 */ 2223 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { 2224 ASSERT(hat == kas.a_hat); 2225 hat_kmap_unload(addr, len, flags); 2226 } else { 2227 hat_unload_callback(hat, addr, len, flags, NULL); 2228 } 2229 XPV_ALLOW_MIGRATE(); 2230 } 2231 2232 /* 2233 * Do the callbacks for ranges being unloaded. 2234 */ 2235 typedef struct range_info { 2236 uintptr_t rng_va; 2237 ulong_t rng_cnt; 2238 level_t rng_level; 2239 } range_info_t; 2240 2241 static void 2242 handle_ranges(hat_callback_t *cb, uint_t cnt, range_info_t *range) 2243 { 2244 /* 2245 * do callbacks to upper level VM system 2246 */ 2247 while (cb != NULL && cnt > 0) { 2248 --cnt; 2249 cb->hcb_start_addr = (caddr_t)range[cnt].rng_va; 2250 cb->hcb_end_addr = cb->hcb_start_addr; 2251 cb->hcb_end_addr += 2252 range[cnt].rng_cnt << LEVEL_SIZE(range[cnt].rng_level); 2253 cb->hcb_function(cb); 2254 } 2255 } 2256 2257 /* 2258 * Unload a given range of addresses (has optional callback) 2259 * 2260 * Flags: 2261 * define HAT_UNLOAD 0x00 2262 * define HAT_UNLOAD_NOSYNC 0x02 2263 * define HAT_UNLOAD_UNLOCK 0x04 2264 * define HAT_UNLOAD_OTHER 0x08 - not used 2265 * define HAT_UNLOAD_UNMAP 0x10 - same as HAT_UNLOAD 2266 */ 2267 #define MAX_UNLOAD_CNT (8) 2268 void 2269 hat_unload_callback( 2270 hat_t *hat, 2271 caddr_t addr, 2272 size_t len, 2273 uint_t flags, 2274 hat_callback_t *cb) 2275 { 2276 uintptr_t vaddr = (uintptr_t)addr; 2277 uintptr_t eaddr = vaddr + len; 2278 htable_t *ht = NULL; 2279 uint_t entry; 2280 uintptr_t contig_va = (uintptr_t)-1L; 2281 range_info_t r[MAX_UNLOAD_CNT]; 2282 uint_t r_cnt = 0; 2283 x86pte_t old_pte; 2284 2285 XPV_DISALLOW_MIGRATE(); 2286 ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 2287 ASSERT(IS_PAGEALIGNED(vaddr)); 2288 ASSERT(IS_PAGEALIGNED(eaddr)); 2289 2290 /* 2291 * Special case a single page being unloaded for speed. This happens 2292 * quite frequently, COW faults after a fork() for example. 2293 */ 2294 if (cb == NULL && len == MMU_PAGESIZE) { 2295 ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0); 2296 if (ht != NULL) { 2297 if (PTE_ISVALID(old_pte)) 2298 hat_pte_unmap(ht, entry, flags, old_pte, NULL); 2299 htable_release(ht); 2300 } 2301 XPV_ALLOW_MIGRATE(); 2302 return; 2303 } 2304 2305 while (vaddr < eaddr) { 2306 old_pte = htable_walk(hat, &ht, &vaddr, eaddr); 2307 if (ht == NULL) 2308 break; 2309 2310 ASSERT(!IN_VA_HOLE(vaddr)); 2311 2312 if (vaddr < (uintptr_t)addr) 2313 panic("hat_unload_callback(): unmap inside large page"); 2314 2315 /* 2316 * We'll do the call backs for contiguous ranges 2317 */ 2318 if (vaddr != contig_va || 2319 (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) { 2320 if (r_cnt == MAX_UNLOAD_CNT) { 2321 handle_ranges(cb, r_cnt, r); 2322 r_cnt = 0; 2323 } 2324 r[r_cnt].rng_va = vaddr; 2325 r[r_cnt].rng_cnt = 0; 2326 r[r_cnt].rng_level = ht->ht_level; 2327 ++r_cnt; 2328 } 2329 2330 /* 2331 * Unload one mapping from the page tables. 2332 */ 2333 entry = htable_va2entry(vaddr, ht); 2334 hat_pte_unmap(ht, entry, flags, old_pte, NULL); 2335 ASSERT(ht->ht_level <= mmu.max_page_level); 2336 vaddr += LEVEL_SIZE(ht->ht_level); 2337 contig_va = vaddr; 2338 ++r[r_cnt - 1].rng_cnt; 2339 } 2340 if (ht) 2341 htable_release(ht); 2342 2343 /* 2344 * handle last range for callbacks 2345 */ 2346 if (r_cnt > 0) 2347 handle_ranges(cb, r_cnt, r); 2348 XPV_ALLOW_MIGRATE(); 2349 } 2350 2351 /* 2352 * Invalidate a virtual address translation on a slave CPU during 2353 * panic() dumps. 2354 */ 2355 void 2356 hat_flush_range(hat_t *hat, caddr_t va, size_t size) 2357 { 2358 ssize_t sz; 2359 caddr_t endva = va + size; 2360 2361 while (va < endva) { 2362 sz = hat_getpagesize(hat, va); 2363 if (sz < 0) { 2364 #ifdef __xpv 2365 xen_flush_tlb(); 2366 #else 2367 flush_all_tlb_entries(); 2368 #endif 2369 break; 2370 } 2371 #ifdef __xpv 2372 xen_flush_va(va); 2373 #else 2374 mmu_tlbflush_entry(va); 2375 #endif 2376 va += sz; 2377 } 2378 } 2379 2380 /* 2381 * synchronize mapping with software data structures 2382 * 2383 * This interface is currently only used by the working set monitor 2384 * driver. 2385 */ 2386 /*ARGSUSED*/ 2387 void 2388 hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 2389 { 2390 uintptr_t vaddr = (uintptr_t)addr; 2391 uintptr_t eaddr = vaddr + len; 2392 htable_t *ht = NULL; 2393 uint_t entry; 2394 x86pte_t pte; 2395 x86pte_t save_pte; 2396 x86pte_t new; 2397 page_t *pp; 2398 2399 ASSERT(!IN_VA_HOLE(vaddr)); 2400 ASSERT(IS_PAGEALIGNED(vaddr)); 2401 ASSERT(IS_PAGEALIGNED(eaddr)); 2402 ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 2403 2404 XPV_DISALLOW_MIGRATE(); 2405 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { 2406 try_again: 2407 pte = htable_walk(hat, &ht, &vaddr, eaddr); 2408 if (ht == NULL) 2409 break; 2410 entry = htable_va2entry(vaddr, ht); 2411 2412 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC || 2413 PTE_GET(pte, PT_REF | PT_MOD) == 0) 2414 continue; 2415 2416 /* 2417 * We need to acquire the mapping list lock to protect 2418 * against hat_pageunload(), hat_unload(), etc. 2419 */ 2420 pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level)); 2421 if (pp == NULL) 2422 break; 2423 x86_hm_enter(pp); 2424 save_pte = pte; 2425 pte = x86pte_get(ht, entry); 2426 if (pte != save_pte) { 2427 x86_hm_exit(pp); 2428 goto try_again; 2429 } 2430 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC || 2431 PTE_GET(pte, PT_REF | PT_MOD) == 0) { 2432 x86_hm_exit(pp); 2433 continue; 2434 } 2435 2436 /* 2437 * Need to clear ref or mod bits. We may compete with 2438 * hardware updating the R/M bits and have to try again. 2439 */ 2440 if (flags == HAT_SYNC_ZERORM) { 2441 new = pte; 2442 PTE_CLR(new, PT_REF | PT_MOD); 2443 pte = hati_update_pte(ht, entry, pte, new); 2444 if (pte != 0) { 2445 x86_hm_exit(pp); 2446 goto try_again; 2447 } 2448 } else { 2449 /* 2450 * sync the PTE to the page_t 2451 */ 2452 hati_sync_pte_to_page(pp, save_pte, ht->ht_level); 2453 } 2454 x86_hm_exit(pp); 2455 } 2456 if (ht) 2457 htable_release(ht); 2458 XPV_ALLOW_MIGRATE(); 2459 } 2460 2461 /* 2462 * void hat_map(hat, addr, len, flags) 2463 */ 2464 /*ARGSUSED*/ 2465 void 2466 hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 2467 { 2468 /* does nothing */ 2469 } 2470 2471 /* 2472 * uint_t hat_getattr(hat, addr, *attr) 2473 * returns attr for <hat,addr> in *attr. returns 0 if there was a 2474 * mapping and *attr is valid, nonzero if there was no mapping and 2475 * *attr is not valid. 2476 */ 2477 uint_t 2478 hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr) 2479 { 2480 uintptr_t vaddr = ALIGN2PAGE(addr); 2481 htable_t *ht = NULL; 2482 x86pte_t pte; 2483 2484 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2485 2486 if (IN_VA_HOLE(vaddr)) 2487 return ((uint_t)-1); 2488 2489 ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level); 2490 if (ht == NULL) 2491 return ((uint_t)-1); 2492 2493 if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) { 2494 htable_release(ht); 2495 return ((uint_t)-1); 2496 } 2497 2498 *attr = PROT_READ; 2499 if (PTE_GET(pte, PT_WRITABLE)) 2500 *attr |= PROT_WRITE; 2501 if (PTE_GET(pte, PT_USER)) 2502 *attr |= PROT_USER; 2503 if (!PTE_GET(pte, mmu.pt_nx)) 2504 *attr |= PROT_EXEC; 2505 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC) 2506 *attr |= HAT_NOSYNC; 2507 htable_release(ht); 2508 return (0); 2509 } 2510 2511 /* 2512 * hat_updateattr() applies the given attribute change to an existing mapping 2513 */ 2514 #define HAT_LOAD_ATTR 1 2515 #define HAT_SET_ATTR 2 2516 #define HAT_CLR_ATTR 3 2517 2518 static void 2519 hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what) 2520 { 2521 uintptr_t vaddr = (uintptr_t)addr; 2522 uintptr_t eaddr = (uintptr_t)addr + len; 2523 htable_t *ht = NULL; 2524 uint_t entry; 2525 x86pte_t oldpte, newpte; 2526 page_t *pp; 2527 2528 XPV_DISALLOW_MIGRATE(); 2529 ASSERT(IS_PAGEALIGNED(vaddr)); 2530 ASSERT(IS_PAGEALIGNED(eaddr)); 2531 ASSERT(hat == kas.a_hat || 2532 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 2533 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { 2534 try_again: 2535 oldpte = htable_walk(hat, &ht, &vaddr, eaddr); 2536 if (ht == NULL) 2537 break; 2538 if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST) 2539 continue; 2540 2541 pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level)); 2542 if (pp == NULL) 2543 continue; 2544 x86_hm_enter(pp); 2545 2546 newpte = oldpte; 2547 /* 2548 * We found a page table entry in the desired range, 2549 * figure out the new attributes. 2550 */ 2551 if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) { 2552 if ((attr & PROT_WRITE) && 2553 !PTE_GET(oldpte, PT_WRITABLE)) 2554 newpte |= PT_WRITABLE; 2555 2556 if ((attr & HAT_NOSYNC) && 2557 PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC) 2558 newpte |= PT_NOSYNC; 2559 2560 if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx)) 2561 newpte &= ~mmu.pt_nx; 2562 } 2563 2564 if (what == HAT_LOAD_ATTR) { 2565 if (!(attr & PROT_WRITE) && 2566 PTE_GET(oldpte, PT_WRITABLE)) 2567 newpte &= ~PT_WRITABLE; 2568 2569 if (!(attr & HAT_NOSYNC) && 2570 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC) 2571 newpte &= ~PT_SOFTWARE; 2572 2573 if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) 2574 newpte |= mmu.pt_nx; 2575 } 2576 2577 if (what == HAT_CLR_ATTR) { 2578 if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE)) 2579 newpte &= ~PT_WRITABLE; 2580 2581 if ((attr & HAT_NOSYNC) && 2582 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC) 2583 newpte &= ~PT_SOFTWARE; 2584 2585 if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) 2586 newpte |= mmu.pt_nx; 2587 } 2588 2589 /* 2590 * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set. 2591 * x86pte_set() depends on this. 2592 */ 2593 if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC) 2594 newpte |= PT_REF | PT_MOD; 2595 2596 /* 2597 * what about PROT_READ or others? this code only handles: 2598 * EXEC, WRITE, NOSYNC 2599 */ 2600 2601 /* 2602 * If new PTE really changed, update the table. 2603 */ 2604 if (newpte != oldpte) { 2605 entry = htable_va2entry(vaddr, ht); 2606 oldpte = hati_update_pte(ht, entry, oldpte, newpte); 2607 if (oldpte != 0) { 2608 x86_hm_exit(pp); 2609 goto try_again; 2610 } 2611 } 2612 x86_hm_exit(pp); 2613 } 2614 if (ht) 2615 htable_release(ht); 2616 XPV_ALLOW_MIGRATE(); 2617 } 2618 2619 /* 2620 * Various wrappers for hat_updateattr() 2621 */ 2622 void 2623 hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 2624 { 2625 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2626 hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR); 2627 } 2628 2629 void 2630 hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 2631 { 2632 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2633 hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR); 2634 } 2635 2636 void 2637 hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 2638 { 2639 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2640 hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR); 2641 } 2642 2643 void 2644 hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot) 2645 { 2646 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2647 hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR); 2648 } 2649 2650 /* 2651 * size_t hat_getpagesize(hat, addr) 2652 * returns pagesize in bytes for <hat, addr>. returns -1 of there is 2653 * no mapping. This is an advisory call. 2654 */ 2655 ssize_t 2656 hat_getpagesize(hat_t *hat, caddr_t addr) 2657 { 2658 uintptr_t vaddr = ALIGN2PAGE(addr); 2659 htable_t *ht; 2660 size_t pagesize; 2661 2662 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2663 if (IN_VA_HOLE(vaddr)) 2664 return (-1); 2665 ht = htable_getpage(hat, vaddr, NULL); 2666 if (ht == NULL) 2667 return (-1); 2668 pagesize = LEVEL_SIZE(ht->ht_level); 2669 htable_release(ht); 2670 return (pagesize); 2671 } 2672 2673 2674 2675 /* 2676 * pfn_t hat_getpfnum(hat, addr) 2677 * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid. 2678 */ 2679 pfn_t 2680 hat_getpfnum(hat_t *hat, caddr_t addr) 2681 { 2682 uintptr_t vaddr = ALIGN2PAGE(addr); 2683 htable_t *ht; 2684 uint_t entry; 2685 pfn_t pfn = PFN_INVALID; 2686 2687 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2688 if (khat_running == 0) 2689 return (PFN_INVALID); 2690 2691 if (IN_VA_HOLE(vaddr)) 2692 return (PFN_INVALID); 2693 2694 XPV_DISALLOW_MIGRATE(); 2695 /* 2696 * A very common use of hat_getpfnum() is from the DDI for kernel pages. 2697 * Use the kmap_ptes (which also covers the 32 bit heap) to speed 2698 * this up. 2699 */ 2700 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { 2701 x86pte_t pte; 2702 pgcnt_t pg_index; 2703 2704 pg_index = mmu_btop(vaddr - mmu.kmap_addr); 2705 pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index)); 2706 if (PTE_ISVALID(pte)) 2707 /*LINTED [use of constant 0 causes a lint warning] */ 2708 pfn = PTE2PFN(pte, 0); 2709 XPV_ALLOW_MIGRATE(); 2710 return (pfn); 2711 } 2712 2713 ht = htable_getpage(hat, vaddr, &entry); 2714 if (ht == NULL) { 2715 XPV_ALLOW_MIGRATE(); 2716 return (PFN_INVALID); 2717 } 2718 ASSERT(vaddr >= ht->ht_vaddr); 2719 ASSERT(vaddr <= HTABLE_LAST_PAGE(ht)); 2720 pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level); 2721 if (ht->ht_level > 0) 2722 pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level)); 2723 htable_release(ht); 2724 XPV_ALLOW_MIGRATE(); 2725 return (pfn); 2726 } 2727 2728 /* 2729 * int hat_probe(hat, addr) 2730 * return 0 if no valid mapping is present. Faster version 2731 * of hat_getattr in certain architectures. 2732 */ 2733 int 2734 hat_probe(hat_t *hat, caddr_t addr) 2735 { 2736 uintptr_t vaddr = ALIGN2PAGE(addr); 2737 uint_t entry; 2738 htable_t *ht; 2739 pgcnt_t pg_off; 2740 2741 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2742 ASSERT(hat == kas.a_hat || 2743 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 2744 if (IN_VA_HOLE(vaddr)) 2745 return (0); 2746 2747 /* 2748 * Most common use of hat_probe is from segmap. We special case it 2749 * for performance. 2750 */ 2751 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { 2752 pg_off = mmu_btop(vaddr - mmu.kmap_addr); 2753 if (mmu.pae_hat) 2754 return (PTE_ISVALID(mmu.kmap_ptes[pg_off])); 2755 else 2756 return (PTE_ISVALID( 2757 ((x86pte32_t *)mmu.kmap_ptes)[pg_off])); 2758 } 2759 2760 ht = htable_getpage(hat, vaddr, &entry); 2761 htable_release(ht); 2762 return (ht != NULL); 2763 } 2764 2765 /* 2766 * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM. 2767 */ 2768 static int 2769 is_it_dism(hat_t *hat, caddr_t va) 2770 { 2771 struct seg *seg; 2772 struct shm_data *shmd; 2773 struct spt_data *sptd; 2774 2775 seg = as_findseg(hat->hat_as, va, 0); 2776 ASSERT(seg != NULL); 2777 ASSERT(seg->s_base <= va); 2778 shmd = (struct shm_data *)seg->s_data; 2779 ASSERT(shmd != NULL); 2780 sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2781 ASSERT(sptd != NULL); 2782 if (sptd->spt_flags & SHM_PAGEABLE) 2783 return (1); 2784 return (0); 2785 } 2786 2787 /* 2788 * Simple implementation of ISM. hat_share() is similar to hat_memload_array(), 2789 * except that we use the ism_hat's existing mappings to determine the pages 2790 * and protections to use for this hat. If we find a full properly aligned 2791 * and sized pagetable, we will attempt to share the pagetable itself. 2792 */ 2793 /*ARGSUSED*/ 2794 int 2795 hat_share( 2796 hat_t *hat, 2797 caddr_t addr, 2798 hat_t *ism_hat, 2799 caddr_t src_addr, 2800 size_t len, /* almost useless value, see below.. */ 2801 uint_t ismszc) 2802 { 2803 uintptr_t vaddr_start = (uintptr_t)addr; 2804 uintptr_t vaddr; 2805 uintptr_t eaddr = vaddr_start + len; 2806 uintptr_t ism_addr_start = (uintptr_t)src_addr; 2807 uintptr_t ism_addr = ism_addr_start; 2808 uintptr_t e_ism_addr = ism_addr + len; 2809 htable_t *ism_ht = NULL; 2810 htable_t *ht; 2811 x86pte_t pte; 2812 page_t *pp; 2813 pfn_t pfn; 2814 level_t l; 2815 pgcnt_t pgcnt; 2816 uint_t prot; 2817 int is_dism; 2818 int flags; 2819 2820 /* 2821 * We might be asked to share an empty DISM hat by as_dup() 2822 */ 2823 ASSERT(hat != kas.a_hat); 2824 ASSERT(eaddr <= _userlimit); 2825 if (!(ism_hat->hat_flags & HAT_SHARED)) { 2826 ASSERT(hat_get_mapped_size(ism_hat) == 0); 2827 return (0); 2828 } 2829 XPV_DISALLOW_MIGRATE(); 2830 2831 /* 2832 * The SPT segment driver often passes us a size larger than there are 2833 * valid mappings. That's because it rounds the segment size up to a 2834 * large pagesize, even if the actual memory mapped by ism_hat is less. 2835 */ 2836 ASSERT(IS_PAGEALIGNED(vaddr_start)); 2837 ASSERT(IS_PAGEALIGNED(ism_addr_start)); 2838 ASSERT(ism_hat->hat_flags & HAT_SHARED); 2839 is_dism = is_it_dism(hat, addr); 2840 while (ism_addr < e_ism_addr) { 2841 /* 2842 * use htable_walk to get the next valid ISM mapping 2843 */ 2844 pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr); 2845 if (ism_ht == NULL) 2846 break; 2847 2848 /* 2849 * First check to see if we already share the page table. 2850 */ 2851 l = ism_ht->ht_level; 2852 vaddr = vaddr_start + (ism_addr - ism_addr_start); 2853 ht = htable_lookup(hat, vaddr, l); 2854 if (ht != NULL) { 2855 if (ht->ht_flags & HTABLE_SHARED_PFN) 2856 goto shared; 2857 htable_release(ht); 2858 goto not_shared; 2859 } 2860 2861 /* 2862 * Can't ever share top table. 2863 */ 2864 if (l == mmu.max_level) 2865 goto not_shared; 2866 2867 /* 2868 * Avoid level mismatches later due to DISM faults. 2869 */ 2870 if (is_dism && l > 0) 2871 goto not_shared; 2872 2873 /* 2874 * addresses and lengths must align 2875 * table must be fully populated 2876 * no lower level page tables 2877 */ 2878 if (ism_addr != ism_ht->ht_vaddr || 2879 (vaddr & LEVEL_OFFSET(l + 1)) != 0) 2880 goto not_shared; 2881 2882 /* 2883 * The range of address space must cover a full table. 2884 */ 2885 if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1)) 2886 goto not_shared; 2887 2888 /* 2889 * All entries in the ISM page table must be leaf PTEs. 2890 */ 2891 if (l > 0) { 2892 int e; 2893 2894 /* 2895 * We know the 0th is from htable_walk() above. 2896 */ 2897 for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) { 2898 x86pte_t pte; 2899 pte = x86pte_get(ism_ht, e); 2900 if (!PTE_ISPAGE(pte, l)) 2901 goto not_shared; 2902 } 2903 } 2904 2905 /* 2906 * share the page table 2907 */ 2908 ht = htable_create(hat, vaddr, l, ism_ht); 2909 shared: 2910 ASSERT(ht->ht_flags & HTABLE_SHARED_PFN); 2911 ASSERT(ht->ht_shares == ism_ht); 2912 hat->hat_ism_pgcnt += 2913 (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) << 2914 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT); 2915 ht->ht_valid_cnt = ism_ht->ht_valid_cnt; 2916 htable_release(ht); 2917 ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1); 2918 htable_release(ism_ht); 2919 ism_ht = NULL; 2920 continue; 2921 2922 not_shared: 2923 /* 2924 * Unable to share the page table. Instead we will 2925 * create new mappings from the values in the ISM mappings. 2926 * Figure out what level size mappings to use; 2927 */ 2928 for (l = ism_ht->ht_level; l > 0; --l) { 2929 if (LEVEL_SIZE(l) <= eaddr - vaddr && 2930 (vaddr & LEVEL_OFFSET(l)) == 0) 2931 break; 2932 } 2933 2934 /* 2935 * The ISM mapping might be larger than the share area, 2936 * be careful to truncate it if needed. 2937 */ 2938 if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) { 2939 pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level)); 2940 } else { 2941 pgcnt = mmu_btop(eaddr - vaddr); 2942 l = 0; 2943 } 2944 2945 pfn = PTE2PFN(pte, ism_ht->ht_level); 2946 ASSERT(pfn != PFN_INVALID); 2947 while (pgcnt > 0) { 2948 /* 2949 * Make a new pte for the PFN for this level. 2950 * Copy protections for the pte from the ISM pte. 2951 */ 2952 pp = page_numtopp_nolock(pfn); 2953 ASSERT(pp != NULL); 2954 2955 prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK; 2956 if (PTE_GET(pte, PT_WRITABLE)) 2957 prot |= PROT_WRITE; 2958 if (!PTE_GET(pte, PT_NX)) 2959 prot |= PROT_EXEC; 2960 2961 flags = HAT_LOAD; 2962 if (!is_dism) 2963 flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST; 2964 while (hati_load_common(hat, vaddr, pp, prot, flags, 2965 l, pfn) != 0) { 2966 if (l == 0) 2967 panic("hati_load_common() failure"); 2968 --l; 2969 } 2970 2971 vaddr += LEVEL_SIZE(l); 2972 ism_addr += LEVEL_SIZE(l); 2973 pfn += mmu_btop(LEVEL_SIZE(l)); 2974 pgcnt -= mmu_btop(LEVEL_SIZE(l)); 2975 } 2976 } 2977 if (ism_ht != NULL) 2978 htable_release(ism_ht); 2979 XPV_ALLOW_MIGRATE(); 2980 return (0); 2981 } 2982 2983 2984 /* 2985 * hat_unshare() is similar to hat_unload_callback(), but 2986 * we have to look for empty shared pagetables. Note that 2987 * hat_unshare() is always invoked against an entire segment. 2988 */ 2989 /*ARGSUSED*/ 2990 void 2991 hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc) 2992 { 2993 uint64_t vaddr = (uintptr_t)addr; 2994 uintptr_t eaddr = vaddr + len; 2995 htable_t *ht = NULL; 2996 uint_t need_demaps = 0; 2997 int flags = HAT_UNLOAD_UNMAP; 2998 level_t l; 2999 3000 ASSERT(hat != kas.a_hat); 3001 ASSERT(eaddr <= _userlimit); 3002 ASSERT(IS_PAGEALIGNED(vaddr)); 3003 ASSERT(IS_PAGEALIGNED(eaddr)); 3004 XPV_DISALLOW_MIGRATE(); 3005 3006 /* 3007 * First go through and remove any shared pagetables. 3008 * 3009 * Note that it's ok to delay the TLB shootdown till the entire range is 3010 * finished, because if hat_pageunload() were to unload a shared 3011 * pagetable page, its hat_tlb_inval() will do a global TLB invalidate. 3012 */ 3013 l = mmu.max_page_level; 3014 if (l == mmu.max_level) 3015 --l; 3016 for (; l >= 0; --l) { 3017 for (vaddr = (uintptr_t)addr; vaddr < eaddr; 3018 vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) { 3019 ASSERT(!IN_VA_HOLE(vaddr)); 3020 /* 3021 * find a pagetable that maps the current address 3022 */ 3023 ht = htable_lookup(hat, vaddr, l); 3024 if (ht == NULL) 3025 continue; 3026 if (ht->ht_flags & HTABLE_SHARED_PFN) { 3027 /* 3028 * clear page count, set valid_cnt to 0, 3029 * let htable_release() finish the job 3030 */ 3031 hat->hat_ism_pgcnt -= ht->ht_valid_cnt << 3032 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT); 3033 ht->ht_valid_cnt = 0; 3034 need_demaps = 1; 3035 } 3036 htable_release(ht); 3037 } 3038 } 3039 3040 /* 3041 * flush the TLBs - since we're probably dealing with MANY mappings 3042 * we do just one CR3 reload. 3043 */ 3044 if (!(hat->hat_flags & HAT_FREEING) && need_demaps) 3045 hat_tlb_inval(hat, DEMAP_ALL_ADDR); 3046 3047 /* 3048 * Now go back and clean up any unaligned mappings that 3049 * couldn't share pagetables. 3050 */ 3051 if (!is_it_dism(hat, addr)) 3052 flags |= HAT_UNLOAD_UNLOCK; 3053 hat_unload(hat, addr, len, flags); 3054 XPV_ALLOW_MIGRATE(); 3055 } 3056 3057 3058 /* 3059 * hat_reserve() does nothing 3060 */ 3061 /*ARGSUSED*/ 3062 void 3063 hat_reserve(struct as *as, caddr_t addr, size_t len) 3064 { 3065 } 3066 3067 3068 /* 3069 * Called when all mappings to a page should have write permission removed. 3070 * Mostly stolen from hat_pagesync() 3071 */ 3072 static void 3073 hati_page_clrwrt(struct page *pp) 3074 { 3075 hment_t *hm = NULL; 3076 htable_t *ht; 3077 uint_t entry; 3078 x86pte_t old; 3079 x86pte_t new; 3080 uint_t pszc = 0; 3081 3082 XPV_DISALLOW_MIGRATE(); 3083 next_size: 3084 /* 3085 * walk thru the mapping list clearing write permission 3086 */ 3087 x86_hm_enter(pp); 3088 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) { 3089 if (ht->ht_level < pszc) 3090 continue; 3091 old = x86pte_get(ht, entry); 3092 3093 for (;;) { 3094 /* 3095 * Is this mapping of interest? 3096 */ 3097 if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum || 3098 PTE_GET(old, PT_WRITABLE) == 0) 3099 break; 3100 3101 /* 3102 * Clear ref/mod writable bits. This requires cross 3103 * calls to ensure any executing TLBs see cleared bits. 3104 */ 3105 new = old; 3106 PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE); 3107 old = hati_update_pte(ht, entry, old, new); 3108 if (old != 0) 3109 continue; 3110 3111 break; 3112 } 3113 } 3114 x86_hm_exit(pp); 3115 while (pszc < pp->p_szc) { 3116 page_t *tpp; 3117 pszc++; 3118 tpp = PP_GROUPLEADER(pp, pszc); 3119 if (pp != tpp) { 3120 pp = tpp; 3121 goto next_size; 3122 } 3123 } 3124 XPV_ALLOW_MIGRATE(); 3125 } 3126 3127 /* 3128 * void hat_page_setattr(pp, flag) 3129 * void hat_page_clrattr(pp, flag) 3130 * used to set/clr ref/mod bits. 3131 */ 3132 void 3133 hat_page_setattr(struct page *pp, uint_t flag) 3134 { 3135 vnode_t *vp = pp->p_vnode; 3136 kmutex_t *vphm = NULL; 3137 page_t **listp; 3138 int noshuffle; 3139 3140 noshuffle = flag & P_NSH; 3141 flag &= ~P_NSH; 3142 3143 if (PP_GETRM(pp, flag) == flag) 3144 return; 3145 3146 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) && 3147 !noshuffle) { 3148 vphm = page_vnode_mutex(vp); 3149 mutex_enter(vphm); 3150 } 3151 3152 PP_SETRM(pp, flag); 3153 3154 if (vphm != NULL) { 3155 3156 /* 3157 * Some File Systems examine v_pages for NULL w/o 3158 * grabbing the vphm mutex. Must not let it become NULL when 3159 * pp is the only page on the list. 3160 */ 3161 if (pp->p_vpnext != pp) { 3162 page_vpsub(&vp->v_pages, pp); 3163 if (vp->v_pages != NULL) 3164 listp = &vp->v_pages->p_vpprev->p_vpnext; 3165 else 3166 listp = &vp->v_pages; 3167 page_vpadd(listp, pp); 3168 } 3169 mutex_exit(vphm); 3170 } 3171 } 3172 3173 void 3174 hat_page_clrattr(struct page *pp, uint_t flag) 3175 { 3176 vnode_t *vp = pp->p_vnode; 3177 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 3178 3179 /* 3180 * Caller is expected to hold page's io lock for VMODSORT to work 3181 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 3182 * bit is cleared. 3183 * We don't have assert to avoid tripping some existing third party 3184 * code. The dirty page is moved back to top of the v_page list 3185 * after IO is done in pvn_write_done(). 3186 */ 3187 PP_CLRRM(pp, flag); 3188 3189 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 3190 3191 /* 3192 * VMODSORT works by removing write permissions and getting 3193 * a fault when a page is made dirty. At this point 3194 * we need to remove write permission from all mappings 3195 * to this page. 3196 */ 3197 hati_page_clrwrt(pp); 3198 } 3199 } 3200 3201 /* 3202 * If flag is specified, returns 0 if attribute is disabled 3203 * and non zero if enabled. If flag specifes multiple attributes 3204 * then returns 0 if ALL attributes are disabled. This is an advisory 3205 * call. 3206 */ 3207 uint_t 3208 hat_page_getattr(struct page *pp, uint_t flag) 3209 { 3210 return (PP_GETRM(pp, flag)); 3211 } 3212 3213 3214 /* 3215 * common code used by hat_pageunload() and hment_steal() 3216 */ 3217 hment_t * 3218 hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry) 3219 { 3220 x86pte_t old_pte; 3221 pfn_t pfn = pp->p_pagenum; 3222 hment_t *hm; 3223 3224 /* 3225 * We need to acquire a hold on the htable in order to 3226 * do the invalidate. We know the htable must exist, since 3227 * unmap's don't release the htable until after removing any 3228 * hment. Having x86_hm_enter() keeps that from proceeding. 3229 */ 3230 htable_acquire(ht); 3231 3232 /* 3233 * Invalidate the PTE and remove the hment. 3234 */ 3235 old_pte = x86pte_inval(ht, entry, 0, NULL); 3236 if (PTE2PFN(old_pte, ht->ht_level) != pfn) { 3237 panic("x86pte_inval() failure found PTE = " FMT_PTE 3238 " pfn being unmapped is %lx ht=0x%lx entry=0x%x", 3239 old_pte, pfn, (uintptr_t)ht, entry); 3240 } 3241 3242 /* 3243 * Clean up all the htable information for this mapping 3244 */ 3245 ASSERT(ht->ht_valid_cnt > 0); 3246 HTABLE_DEC(ht->ht_valid_cnt); 3247 PGCNT_DEC(ht->ht_hat, ht->ht_level); 3248 3249 /* 3250 * sync ref/mod bits to the page_t 3251 */ 3252 if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC) 3253 hati_sync_pte_to_page(pp, old_pte, ht->ht_level); 3254 3255 /* 3256 * Remove the mapping list entry for this page. 3257 */ 3258 hm = hment_remove(pp, ht, entry); 3259 3260 /* 3261 * drop the mapping list lock so that we might free the 3262 * hment and htable. 3263 */ 3264 x86_hm_exit(pp); 3265 htable_release(ht); 3266 return (hm); 3267 } 3268 3269 extern int vpm_enable; 3270 /* 3271 * Unload all translations to a page. If the page is a subpage of a large 3272 * page, the large page mappings are also removed. 3273 * 3274 * The forceflags are unused. 3275 */ 3276 3277 /*ARGSUSED*/ 3278 static int 3279 hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag) 3280 { 3281 page_t *cur_pp = pp; 3282 hment_t *hm; 3283 hment_t *prev; 3284 htable_t *ht; 3285 uint_t entry; 3286 level_t level; 3287 3288 XPV_DISALLOW_MIGRATE(); 3289 3290 /* 3291 * prevent recursion due to kmem_free() 3292 */ 3293 ++curthread->t_hatdepth; 3294 ASSERT(curthread->t_hatdepth < 16); 3295 3296 #if defined(__amd64) 3297 /* 3298 * clear the vpm ref. 3299 */ 3300 if (vpm_enable) { 3301 pp->p_vpmref = 0; 3302 } 3303 #endif 3304 /* 3305 * The loop with next_size handles pages with multiple pagesize mappings 3306 */ 3307 next_size: 3308 for (;;) { 3309 3310 /* 3311 * Get a mapping list entry 3312 */ 3313 x86_hm_enter(cur_pp); 3314 for (prev = NULL; ; prev = hm) { 3315 hm = hment_walk(cur_pp, &ht, &entry, prev); 3316 if (hm == NULL) { 3317 x86_hm_exit(cur_pp); 3318 3319 /* 3320 * If not part of a larger page, we're done. 3321 */ 3322 if (cur_pp->p_szc <= pg_szcd) { 3323 ASSERT(curthread->t_hatdepth > 0); 3324 --curthread->t_hatdepth; 3325 XPV_ALLOW_MIGRATE(); 3326 return (0); 3327 } 3328 3329 /* 3330 * Else check the next larger page size. 3331 * hat_page_demote() may decrease p_szc 3332 * but that's ok we'll just take an extra 3333 * trip discover there're no larger mappings 3334 * and return. 3335 */ 3336 ++pg_szcd; 3337 cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd); 3338 goto next_size; 3339 } 3340 3341 /* 3342 * If this mapping size matches, remove it. 3343 */ 3344 level = ht->ht_level; 3345 if (level == pg_szcd) 3346 break; 3347 } 3348 3349 /* 3350 * Remove the mapping list entry for this page. 3351 * Note this does the x86_hm_exit() for us. 3352 */ 3353 hm = hati_page_unmap(cur_pp, ht, entry); 3354 if (hm != NULL) 3355 hment_free(hm); 3356 } 3357 } 3358 3359 int 3360 hat_pageunload(struct page *pp, uint_t forceflag) 3361 { 3362 ASSERT(PAGE_EXCL(pp)); 3363 return (hati_pageunload(pp, 0, forceflag)); 3364 } 3365 3366 /* 3367 * Unload all large mappings to pp and reduce by 1 p_szc field of every large 3368 * page level that included pp. 3369 * 3370 * pp must be locked EXCL. Even though no other constituent pages are locked 3371 * it's legal to unload large mappings to pp because all constituent pages of 3372 * large locked mappings have to be locked SHARED. therefore if we have EXCL 3373 * lock on one of constituent pages none of the large mappings to pp are 3374 * locked. 3375 * 3376 * Change (always decrease) p_szc field starting from the last constituent 3377 * page and ending with root constituent page so that root's pszc always shows 3378 * the area where hat_page_demote() may be active. 3379 * 3380 * This mechanism is only used for file system pages where it's not always 3381 * possible to get EXCL locks on all constituent pages to demote the size code 3382 * (as is done for anonymous or kernel large pages). 3383 */ 3384 void 3385 hat_page_demote(page_t *pp) 3386 { 3387 uint_t pszc; 3388 uint_t rszc; 3389 uint_t szc; 3390 page_t *rootpp; 3391 page_t *firstpp; 3392 page_t *lastpp; 3393 pgcnt_t pgcnt; 3394 3395 ASSERT(PAGE_EXCL(pp)); 3396 ASSERT(!PP_ISFREE(pp)); 3397 ASSERT(page_szc_lock_assert(pp)); 3398 3399 if (pp->p_szc == 0) 3400 return; 3401 3402 rootpp = PP_GROUPLEADER(pp, 1); 3403 (void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD); 3404 3405 /* 3406 * all large mappings to pp are gone 3407 * and no new can be setup since pp is locked exclusively. 3408 * 3409 * Lock the root to make sure there's only one hat_page_demote() 3410 * outstanding within the area of this root's pszc. 3411 * 3412 * Second potential hat_page_demote() is already eliminated by upper 3413 * VM layer via page_szc_lock() but we don't rely on it and use our 3414 * own locking (so that upper layer locking can be changed without 3415 * assumptions that hat depends on upper layer VM to prevent multiple 3416 * hat_page_demote() to be issued simultaneously to the same large 3417 * page). 3418 */ 3419 again: 3420 pszc = pp->p_szc; 3421 if (pszc == 0) 3422 return; 3423 rootpp = PP_GROUPLEADER(pp, pszc); 3424 x86_hm_enter(rootpp); 3425 /* 3426 * If root's p_szc is different from pszc we raced with another 3427 * hat_page_demote(). Drop the lock and try to find the root again. 3428 * If root's p_szc is greater than pszc previous hat_page_demote() is 3429 * not done yet. Take and release mlist lock of root's root to wait 3430 * for previous hat_page_demote() to complete. 3431 */ 3432 if ((rszc = rootpp->p_szc) != pszc) { 3433 x86_hm_exit(rootpp); 3434 if (rszc > pszc) { 3435 /* p_szc of a locked non free page can't increase */ 3436 ASSERT(pp != rootpp); 3437 3438 rootpp = PP_GROUPLEADER(rootpp, rszc); 3439 x86_hm_enter(rootpp); 3440 x86_hm_exit(rootpp); 3441 } 3442 goto again; 3443 } 3444 ASSERT(pp->p_szc == pszc); 3445 3446 /* 3447 * Decrement by 1 p_szc of every constituent page of a region that 3448 * covered pp. For example if original szc is 3 it gets changed to 2 3449 * everywhere except in region 2 that covered pp. Region 2 that 3450 * covered pp gets demoted to 1 everywhere except in region 1 that 3451 * covered pp. The region 1 that covered pp is demoted to region 3452 * 0. It's done this way because from region 3 we removed level 3 3453 * mappings, from region 2 that covered pp we removed level 2 mappings 3454 * and from region 1 that covered pp we removed level 1 mappings. All 3455 * changes are done from from high pfn's to low pfn's so that roots 3456 * are changed last allowing one to know the largest region where 3457 * hat_page_demote() is stil active by only looking at the root page. 3458 * 3459 * This algorithm is implemented in 2 while loops. First loop changes 3460 * p_szc of pages to the right of pp's level 1 region and second 3461 * loop changes p_szc of pages of level 1 region that covers pp 3462 * and all pages to the left of level 1 region that covers pp. 3463 * In the first loop p_szc keeps dropping with every iteration 3464 * and in the second loop it keeps increasing with every iteration. 3465 * 3466 * First loop description: Demote pages to the right of pp outside of 3467 * level 1 region that covers pp. In every iteration of the while 3468 * loop below find the last page of szc region and the first page of 3469 * (szc - 1) region that is immediately to the right of (szc - 1) 3470 * region that covers pp. From last such page to first such page 3471 * change every page's szc to szc - 1. Decrement szc and continue 3472 * looping until szc is 1. If pp belongs to the last (szc - 1) region 3473 * of szc region skip to the next iteration. 3474 */ 3475 szc = pszc; 3476 while (szc > 1) { 3477 lastpp = PP_GROUPLEADER(pp, szc); 3478 pgcnt = page_get_pagecnt(szc); 3479 lastpp += pgcnt - 1; 3480 firstpp = PP_GROUPLEADER(pp, (szc - 1)); 3481 pgcnt = page_get_pagecnt(szc - 1); 3482 if (lastpp - firstpp < pgcnt) { 3483 szc--; 3484 continue; 3485 } 3486 firstpp += pgcnt; 3487 while (lastpp != firstpp) { 3488 ASSERT(lastpp->p_szc == pszc); 3489 lastpp->p_szc = szc - 1; 3490 lastpp--; 3491 } 3492 firstpp->p_szc = szc - 1; 3493 szc--; 3494 } 3495 3496 /* 3497 * Second loop description: 3498 * First iteration changes p_szc to 0 of every 3499 * page of level 1 region that covers pp. 3500 * Subsequent iterations find last page of szc region 3501 * immediately to the left of szc region that covered pp 3502 * and first page of (szc + 1) region that covers pp. 3503 * From last to first page change p_szc of every page to szc. 3504 * Increment szc and continue looping until szc is pszc. 3505 * If pp belongs to the fist szc region of (szc + 1) region 3506 * skip to the next iteration. 3507 * 3508 */ 3509 szc = 0; 3510 while (szc < pszc) { 3511 firstpp = PP_GROUPLEADER(pp, (szc + 1)); 3512 if (szc == 0) { 3513 pgcnt = page_get_pagecnt(1); 3514 lastpp = firstpp + (pgcnt - 1); 3515 } else { 3516 lastpp = PP_GROUPLEADER(pp, szc); 3517 if (firstpp == lastpp) { 3518 szc++; 3519 continue; 3520 } 3521 lastpp--; 3522 pgcnt = page_get_pagecnt(szc); 3523 } 3524 while (lastpp != firstpp) { 3525 ASSERT(lastpp->p_szc == pszc); 3526 lastpp->p_szc = szc; 3527 lastpp--; 3528 } 3529 firstpp->p_szc = szc; 3530 if (firstpp == rootpp) 3531 break; 3532 szc++; 3533 } 3534 x86_hm_exit(rootpp); 3535 } 3536 3537 /* 3538 * get hw stats from hardware into page struct and reset hw stats 3539 * returns attributes of page 3540 * Flags for hat_pagesync, hat_getstat, hat_sync 3541 * 3542 * define HAT_SYNC_ZERORM 0x01 3543 * 3544 * Additional flags for hat_pagesync 3545 * 3546 * define HAT_SYNC_STOPON_REF 0x02 3547 * define HAT_SYNC_STOPON_MOD 0x04 3548 * define HAT_SYNC_STOPON_RM 0x06 3549 * define HAT_SYNC_STOPON_SHARED 0x08 3550 */ 3551 uint_t 3552 hat_pagesync(struct page *pp, uint_t flags) 3553 { 3554 hment_t *hm = NULL; 3555 htable_t *ht; 3556 uint_t entry; 3557 x86pte_t old, save_old; 3558 x86pte_t new; 3559 uchar_t nrmbits = P_REF|P_MOD|P_RO; 3560 extern ulong_t po_share; 3561 page_t *save_pp = pp; 3562 uint_t pszc = 0; 3563 3564 ASSERT(PAGE_LOCKED(pp) || panicstr); 3565 3566 if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD)) 3567 return (pp->p_nrm & nrmbits); 3568 3569 if ((flags & HAT_SYNC_ZERORM) == 0) { 3570 3571 if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp)) 3572 return (pp->p_nrm & nrmbits); 3573 3574 if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp)) 3575 return (pp->p_nrm & nrmbits); 3576 3577 if ((flags & HAT_SYNC_STOPON_SHARED) != 0 && 3578 hat_page_getshare(pp) > po_share) { 3579 if (PP_ISRO(pp)) 3580 PP_SETREF(pp); 3581 return (pp->p_nrm & nrmbits); 3582 } 3583 } 3584 3585 XPV_DISALLOW_MIGRATE(); 3586 next_size: 3587 /* 3588 * walk thru the mapping list syncing (and clearing) ref/mod bits. 3589 */ 3590 x86_hm_enter(pp); 3591 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) { 3592 if (ht->ht_level < pszc) 3593 continue; 3594 old = x86pte_get(ht, entry); 3595 try_again: 3596 3597 ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum); 3598 3599 if (PTE_GET(old, PT_REF | PT_MOD) == 0) 3600 continue; 3601 3602 save_old = old; 3603 if ((flags & HAT_SYNC_ZERORM) != 0) { 3604 3605 /* 3606 * Need to clear ref or mod bits. Need to demap 3607 * to make sure any executing TLBs see cleared bits. 3608 */ 3609 new = old; 3610 PTE_CLR(new, PT_REF | PT_MOD); 3611 old = hati_update_pte(ht, entry, old, new); 3612 if (old != 0) 3613 goto try_again; 3614 3615 old = save_old; 3616 } 3617 3618 /* 3619 * Sync the PTE 3620 */ 3621 if (!(flags & HAT_SYNC_ZERORM) && 3622 PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC) 3623 hati_sync_pte_to_page(pp, old, ht->ht_level); 3624 3625 /* 3626 * can stop short if we found a ref'd or mod'd page 3627 */ 3628 if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) || 3629 (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) { 3630 x86_hm_exit(pp); 3631 goto done; 3632 } 3633 } 3634 x86_hm_exit(pp); 3635 while (pszc < pp->p_szc) { 3636 page_t *tpp; 3637 pszc++; 3638 tpp = PP_GROUPLEADER(pp, pszc); 3639 if (pp != tpp) { 3640 pp = tpp; 3641 goto next_size; 3642 } 3643 } 3644 done: 3645 XPV_ALLOW_MIGRATE(); 3646 return (save_pp->p_nrm & nrmbits); 3647 } 3648 3649 /* 3650 * returns approx number of mappings to this pp. A return of 0 implies 3651 * there are no mappings to the page. 3652 */ 3653 ulong_t 3654 hat_page_getshare(page_t *pp) 3655 { 3656 uint_t cnt; 3657 cnt = hment_mapcnt(pp); 3658 #if defined(__amd64) 3659 if (vpm_enable && pp->p_vpmref) { 3660 cnt += 1; 3661 } 3662 #endif 3663 return (cnt); 3664 } 3665 3666 /* 3667 * Return 1 the number of mappings exceeds sh_thresh. Return 0 3668 * otherwise. 3669 */ 3670 int 3671 hat_page_checkshare(page_t *pp, ulong_t sh_thresh) 3672 { 3673 return (hat_page_getshare(pp) > sh_thresh); 3674 } 3675 3676 /* 3677 * hat_softlock isn't supported anymore 3678 */ 3679 /*ARGSUSED*/ 3680 faultcode_t 3681 hat_softlock( 3682 hat_t *hat, 3683 caddr_t addr, 3684 size_t *len, 3685 struct page **page_array, 3686 uint_t flags) 3687 { 3688 return (FC_NOSUPPORT); 3689 } 3690 3691 3692 3693 /* 3694 * Routine to expose supported HAT features to platform independent code. 3695 */ 3696 /*ARGSUSED*/ 3697 int 3698 hat_supported(enum hat_features feature, void *arg) 3699 { 3700 switch (feature) { 3701 3702 case HAT_SHARED_PT: /* this is really ISM */ 3703 return (1); 3704 3705 case HAT_DYNAMIC_ISM_UNMAP: 3706 return (0); 3707 3708 case HAT_VMODSORT: 3709 return (1); 3710 3711 case HAT_SHARED_REGIONS: 3712 return (0); 3713 3714 default: 3715 panic("hat_supported() - unknown feature"); 3716 } 3717 return (0); 3718 } 3719 3720 /* 3721 * Called when a thread is exiting and has been switched to the kernel AS 3722 */ 3723 void 3724 hat_thread_exit(kthread_t *thd) 3725 { 3726 ASSERT(thd->t_procp->p_as == &kas); 3727 XPV_DISALLOW_MIGRATE(); 3728 hat_switch(thd->t_procp->p_as->a_hat); 3729 XPV_ALLOW_MIGRATE(); 3730 } 3731 3732 /* 3733 * Setup the given brand new hat structure as the new HAT on this cpu's mmu. 3734 */ 3735 /*ARGSUSED*/ 3736 void 3737 hat_setup(hat_t *hat, int flags) 3738 { 3739 XPV_DISALLOW_MIGRATE(); 3740 kpreempt_disable(); 3741 3742 hat_switch(hat); 3743 3744 kpreempt_enable(); 3745 XPV_ALLOW_MIGRATE(); 3746 } 3747 3748 /* 3749 * Prepare for a CPU private mapping for the given address. 3750 * 3751 * The address can only be used from a single CPU and can be remapped 3752 * using hat_mempte_remap(). Return the address of the PTE. 3753 * 3754 * We do the htable_create() if necessary and increment the valid count so 3755 * the htable can't disappear. We also hat_devload() the page table into 3756 * kernel so that the PTE is quickly accessed. 3757 */ 3758 hat_mempte_t 3759 hat_mempte_setup(caddr_t addr) 3760 { 3761 uintptr_t va = (uintptr_t)addr; 3762 htable_t *ht; 3763 uint_t entry; 3764 x86pte_t oldpte; 3765 hat_mempte_t p; 3766 3767 ASSERT(IS_PAGEALIGNED(va)); 3768 ASSERT(!IN_VA_HOLE(va)); 3769 ++curthread->t_hatdepth; 3770 XPV_DISALLOW_MIGRATE(); 3771 ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0); 3772 if (ht == NULL) { 3773 ht = htable_create(kas.a_hat, va, 0, NULL); 3774 entry = htable_va2entry(va, ht); 3775 ASSERT(ht->ht_level == 0); 3776 oldpte = x86pte_get(ht, entry); 3777 } 3778 if (PTE_ISVALID(oldpte)) 3779 panic("hat_mempte_setup(): address already mapped" 3780 "ht=%p, entry=%d, pte=" FMT_PTE, (void *)ht, entry, oldpte); 3781 3782 /* 3783 * increment ht_valid_cnt so that the pagetable can't disappear 3784 */ 3785 HTABLE_INC(ht->ht_valid_cnt); 3786 3787 /* 3788 * return the PTE physical address to the caller. 3789 */ 3790 htable_release(ht); 3791 XPV_ALLOW_MIGRATE(); 3792 p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry); 3793 --curthread->t_hatdepth; 3794 return (p); 3795 } 3796 3797 /* 3798 * Release a CPU private mapping for the given address. 3799 * We decrement the htable valid count so it might be destroyed. 3800 */ 3801 /*ARGSUSED1*/ 3802 void 3803 hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa) 3804 { 3805 htable_t *ht; 3806 3807 XPV_DISALLOW_MIGRATE(); 3808 /* 3809 * invalidate any left over mapping and decrement the htable valid count 3810 */ 3811 #ifdef __xpv 3812 if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0, 3813 UVMF_INVLPG | UVMF_LOCAL)) 3814 panic("HYPERVISOR_update_va_mapping() failed"); 3815 #else 3816 { 3817 x86pte_t *pteptr; 3818 3819 pteptr = x86pte_mapin(mmu_btop(pte_pa), 3820 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); 3821 if (mmu.pae_hat) 3822 *pteptr = 0; 3823 else 3824 *(x86pte32_t *)pteptr = 0; 3825 mmu_tlbflush_entry(addr); 3826 x86pte_mapout(); 3827 } 3828 #endif 3829 3830 ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0); 3831 if (ht == NULL) 3832 panic("hat_mempte_release(): invalid address"); 3833 ASSERT(ht->ht_level == 0); 3834 HTABLE_DEC(ht->ht_valid_cnt); 3835 htable_release(ht); 3836 XPV_ALLOW_MIGRATE(); 3837 } 3838 3839 /* 3840 * Apply a temporary CPU private mapping to a page. We flush the TLB only 3841 * on this CPU, so this ought to have been called with preemption disabled. 3842 */ 3843 void 3844 hat_mempte_remap( 3845 pfn_t pfn, 3846 caddr_t addr, 3847 hat_mempte_t pte_pa, 3848 uint_t attr, 3849 uint_t flags) 3850 { 3851 uintptr_t va = (uintptr_t)addr; 3852 x86pte_t pte; 3853 3854 /* 3855 * Remap the given PTE to the new page's PFN. Invalidate only 3856 * on this CPU. 3857 */ 3858 #ifdef DEBUG 3859 htable_t *ht; 3860 uint_t entry; 3861 3862 ASSERT(IS_PAGEALIGNED(va)); 3863 ASSERT(!IN_VA_HOLE(va)); 3864 ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0); 3865 ASSERT(ht != NULL); 3866 ASSERT(ht->ht_level == 0); 3867 ASSERT(ht->ht_valid_cnt > 0); 3868 ASSERT(ht->ht_pfn == mmu_btop(pte_pa)); 3869 htable_release(ht); 3870 #endif 3871 XPV_DISALLOW_MIGRATE(); 3872 pte = hati_mkpte(pfn, attr, 0, flags); 3873 #ifdef __xpv 3874 if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL)) 3875 panic("HYPERVISOR_update_va_mapping() failed"); 3876 #else 3877 { 3878 x86pte_t *pteptr; 3879 3880 pteptr = x86pte_mapin(mmu_btop(pte_pa), 3881 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); 3882 if (mmu.pae_hat) 3883 *(x86pte_t *)pteptr = pte; 3884 else 3885 *(x86pte32_t *)pteptr = (x86pte32_t)pte; 3886 mmu_tlbflush_entry(addr); 3887 x86pte_mapout(); 3888 } 3889 #endif 3890 XPV_ALLOW_MIGRATE(); 3891 } 3892 3893 3894 3895 /* 3896 * Hat locking functions 3897 * XXX - these two functions are currently being used by hatstats 3898 * they can be removed by using a per-as mutex for hatstats. 3899 */ 3900 void 3901 hat_enter(hat_t *hat) 3902 { 3903 mutex_enter(&hat->hat_mutex); 3904 } 3905 3906 void 3907 hat_exit(hat_t *hat) 3908 { 3909 mutex_exit(&hat->hat_mutex); 3910 } 3911 3912 /* 3913 * HAT part of cpu initialization. 3914 */ 3915 void 3916 hat_cpu_online(struct cpu *cpup) 3917 { 3918 if (cpup != CPU) { 3919 x86pte_cpu_init(cpup); 3920 hat_vlp_setup(cpup); 3921 } 3922 CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id); 3923 } 3924 3925 /* 3926 * HAT part of cpu deletion. 3927 * (currently, we only call this after the cpu is safely passivated.) 3928 */ 3929 void 3930 hat_cpu_offline(struct cpu *cpup) 3931 { 3932 ASSERT(cpup != CPU); 3933 3934 CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id); 3935 hat_vlp_teardown(cpup); 3936 x86pte_cpu_fini(cpup); 3937 } 3938 3939 /* 3940 * Function called after all CPUs are brought online. 3941 * Used to remove low address boot mappings. 3942 */ 3943 void 3944 clear_boot_mappings(uintptr_t low, uintptr_t high) 3945 { 3946 uintptr_t vaddr = low; 3947 htable_t *ht = NULL; 3948 level_t level; 3949 uint_t entry; 3950 x86pte_t pte; 3951 3952 /* 3953 * On 1st CPU we can unload the prom mappings, basically we blow away 3954 * all virtual mappings under _userlimit. 3955 */ 3956 while (vaddr < high) { 3957 pte = htable_walk(kas.a_hat, &ht, &vaddr, high); 3958 if (ht == NULL) 3959 break; 3960 3961 level = ht->ht_level; 3962 entry = htable_va2entry(vaddr, ht); 3963 ASSERT(level <= mmu.max_page_level); 3964 ASSERT(PTE_ISPAGE(pte, level)); 3965 3966 /* 3967 * Unload the mapping from the page tables. 3968 */ 3969 (void) x86pte_inval(ht, entry, 0, NULL); 3970 ASSERT(ht->ht_valid_cnt > 0); 3971 HTABLE_DEC(ht->ht_valid_cnt); 3972 PGCNT_DEC(ht->ht_hat, ht->ht_level); 3973 3974 vaddr += LEVEL_SIZE(ht->ht_level); 3975 } 3976 if (ht) 3977 htable_release(ht); 3978 } 3979 3980 /* 3981 * Atomically update a new translation for a single page. If the 3982 * currently installed PTE doesn't match the value we expect to find, 3983 * it's not updated and we return the PTE we found. 3984 * 3985 * If activating nosync or NOWRITE and the page was modified we need to sync 3986 * with the page_t. Also sync with page_t if clearing ref/mod bits. 3987 */ 3988 static x86pte_t 3989 hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new) 3990 { 3991 page_t *pp; 3992 uint_t rm = 0; 3993 x86pte_t replaced; 3994 3995 if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC && 3996 PTE_GET(expected, PT_MOD | PT_REF) && 3997 (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) || 3998 !PTE_GET(new, PT_MOD | PT_REF))) { 3999 4000 ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level))); 4001 pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level)); 4002 ASSERT(pp != NULL); 4003 if (PTE_GET(expected, PT_MOD)) 4004 rm |= P_MOD; 4005 if (PTE_GET(expected, PT_REF)) 4006 rm |= P_REF; 4007 PTE_CLR(new, PT_MOD | PT_REF); 4008 } 4009 4010 replaced = x86pte_update(ht, entry, expected, new); 4011 if (replaced != expected) 4012 return (replaced); 4013 4014 if (rm) { 4015 /* 4016 * sync to all constituent pages of a large page 4017 */ 4018 pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level); 4019 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt)); 4020 while (pgcnt-- > 0) { 4021 /* 4022 * hat_page_demote() can't decrease 4023 * pszc below this mapping size 4024 * since large mapping existed after we 4025 * took mlist lock. 4026 */ 4027 ASSERT(pp->p_szc >= ht->ht_level); 4028 hat_page_setattr(pp, rm); 4029 ++pp; 4030 } 4031 } 4032 4033 return (0); 4034 } 4035 4036 /* ARGSUSED */ 4037 void 4038 hat_join_srd(struct hat *hat, vnode_t *evp) 4039 { 4040 } 4041 4042 /* ARGSUSED */ 4043 hat_region_cookie_t 4044 hat_join_region(struct hat *hat, 4045 caddr_t r_saddr, 4046 size_t r_size, 4047 void *r_obj, 4048 u_offset_t r_objoff, 4049 uchar_t r_perm, 4050 uchar_t r_pgszc, 4051 hat_rgn_cb_func_t r_cb_function, 4052 uint_t flags) 4053 { 4054 panic("No shared region support on x86"); 4055 return (HAT_INVALID_REGION_COOKIE); 4056 } 4057 4058 /* ARGSUSED */ 4059 void 4060 hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags) 4061 { 4062 panic("No shared region support on x86"); 4063 } 4064 4065 /* ARGSUSED */ 4066 void 4067 hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie) 4068 { 4069 panic("No shared region support on x86"); 4070 } 4071 4072 4073 /* 4074 * Kernel Physical Mapping (kpm) facility 4075 * 4076 * Most of the routines needed to support segkpm are almost no-ops on the 4077 * x86 platform. We map in the entire segment when it is created and leave 4078 * it mapped in, so there is no additional work required to set up and tear 4079 * down individual mappings. All of these routines were created to support 4080 * SPARC platforms that have to avoid aliasing in their virtually indexed 4081 * caches. 4082 * 4083 * Most of the routines have sanity checks in them (e.g. verifying that the 4084 * passed-in page is locked). We don't actually care about most of these 4085 * checks on x86, but we leave them in place to identify problems in the 4086 * upper levels. 4087 */ 4088 4089 /* 4090 * Map in a locked page and return the vaddr. 4091 */ 4092 /*ARGSUSED*/ 4093 caddr_t 4094 hat_kpm_mapin(struct page *pp, struct kpme *kpme) 4095 { 4096 caddr_t vaddr; 4097 4098 #ifdef DEBUG 4099 if (kpm_enable == 0) { 4100 cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n"); 4101 return ((caddr_t)NULL); 4102 } 4103 4104 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 4105 cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n"); 4106 return ((caddr_t)NULL); 4107 } 4108 #endif 4109 4110 vaddr = hat_kpm_page2va(pp, 1); 4111 4112 return (vaddr); 4113 } 4114 4115 /* 4116 * Mapout a locked page. 4117 */ 4118 /*ARGSUSED*/ 4119 void 4120 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr) 4121 { 4122 #ifdef DEBUG 4123 if (kpm_enable == 0) { 4124 cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n"); 4125 return; 4126 } 4127 4128 if (IS_KPM_ADDR(vaddr) == 0) { 4129 cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n"); 4130 return; 4131 } 4132 4133 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 4134 cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n"); 4135 return; 4136 } 4137 #endif 4138 } 4139 4140 /* 4141 * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical 4142 * memory addresses that are not described by a page_t. It can 4143 * also be used for normal pages that are not locked, but beware 4144 * this is dangerous - no locking is performed, so the identity of 4145 * the page could change. hat_kpm_mapin_pfn is not supported when 4146 * vac_colors > 1, because the chosen va depends on the page identity, 4147 * which could change. 4148 * The caller must only pass pfn's for valid physical addresses; violation 4149 * of this rule will cause panic. 4150 */ 4151 caddr_t 4152 hat_kpm_mapin_pfn(pfn_t pfn) 4153 { 4154 caddr_t paddr, vaddr; 4155 4156 if (kpm_enable == 0) 4157 return ((caddr_t)NULL); 4158 4159 paddr = (caddr_t)ptob(pfn); 4160 vaddr = (uintptr_t)kpm_vbase + paddr; 4161 4162 return ((caddr_t)vaddr); 4163 } 4164 4165 /*ARGSUSED*/ 4166 void 4167 hat_kpm_mapout_pfn(pfn_t pfn) 4168 { 4169 /* empty */ 4170 } 4171 4172 /* 4173 * Return the kpm virtual address for a specific pfn 4174 */ 4175 caddr_t 4176 hat_kpm_pfn2va(pfn_t pfn) 4177 { 4178 uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn); 4179 4180 ASSERT(!pfn_is_foreign(pfn)); 4181 return ((caddr_t)vaddr); 4182 } 4183 4184 /* 4185 * Return the kpm virtual address for the page at pp. 4186 */ 4187 /*ARGSUSED*/ 4188 caddr_t 4189 hat_kpm_page2va(struct page *pp, int checkswap) 4190 { 4191 return (hat_kpm_pfn2va(pp->p_pagenum)); 4192 } 4193 4194 /* 4195 * Return the page frame number for the kpm virtual address vaddr. 4196 */ 4197 pfn_t 4198 hat_kpm_va2pfn(caddr_t vaddr) 4199 { 4200 pfn_t pfn; 4201 4202 ASSERT(IS_KPM_ADDR(vaddr)); 4203 4204 pfn = (pfn_t)btop(vaddr - kpm_vbase); 4205 4206 return (pfn); 4207 } 4208 4209 4210 /* 4211 * Return the page for the kpm virtual address vaddr. 4212 */ 4213 page_t * 4214 hat_kpm_vaddr2page(caddr_t vaddr) 4215 { 4216 pfn_t pfn; 4217 4218 ASSERT(IS_KPM_ADDR(vaddr)); 4219 4220 pfn = hat_kpm_va2pfn(vaddr); 4221 4222 return (page_numtopp_nolock(pfn)); 4223 } 4224 4225 /* 4226 * hat_kpm_fault is called from segkpm_fault when we take a page fault on a 4227 * KPM page. This should never happen on x86 4228 */ 4229 int 4230 hat_kpm_fault(hat_t *hat, caddr_t vaddr) 4231 { 4232 panic("pagefault in seg_kpm. hat: 0x%p vaddr: 0x%p", 4233 (void *)hat, (void *)vaddr); 4234 4235 return (0); 4236 } 4237 4238 /*ARGSUSED*/ 4239 void 4240 hat_kpm_mseghash_clear(int nentries) 4241 {} 4242 4243 /*ARGSUSED*/ 4244 void 4245 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp) 4246 {} 4247 4248 #ifndef __xpv 4249 void 4250 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs, 4251 offset_t kpm_pages_off) 4252 { 4253 _NOTE(ARGUNUSED(nkpmpgs, kpm_pages_off)); 4254 pfn_t base, end; 4255 4256 /* 4257 * kphysm_add_memory_dynamic() does not set nkpmpgs 4258 * when page_t memory is externally allocated. That 4259 * code must properly calculate nkpmpgs in all cases 4260 * if nkpmpgs needs to be used at some point. 4261 */ 4262 4263 /* 4264 * The meta (page_t) pages for dynamically added memory are allocated 4265 * either from the incoming memory itself or from existing memory. 4266 * In the former case the base of the incoming pages will be different 4267 * than the base of the dynamic segment so call memseg_get_start() to 4268 * get the actual base of the incoming memory for each case. 4269 */ 4270 4271 base = memseg_get_start(msp); 4272 end = msp->pages_end; 4273 4274 hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base), 4275 mmu_ptob(end - base), base, PROT_READ | PROT_WRITE, 4276 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST); 4277 } 4278 4279 void 4280 hat_kpm_addmem_mseg_insert(struct memseg *msp) 4281 { 4282 _NOTE(ARGUNUSED(msp)); 4283 } 4284 4285 void 4286 hat_kpm_addmem_memsegs_update(struct memseg *msp) 4287 { 4288 _NOTE(ARGUNUSED(msp)); 4289 } 4290 4291 /* 4292 * Return end of metadata for an already setup memseg. 4293 * X86 platforms don't need per-page meta data to support kpm. 4294 */ 4295 caddr_t 4296 hat_kpm_mseg_reuse(struct memseg *msp) 4297 { 4298 return ((caddr_t)msp->epages); 4299 } 4300 4301 void 4302 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp) 4303 { 4304 _NOTE(ARGUNUSED(msp, mspp)); 4305 ASSERT(0); 4306 } 4307 4308 void 4309 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp, 4310 struct memseg *lo, struct memseg *mid, struct memseg *hi) 4311 { 4312 _NOTE(ARGUNUSED(msp, mspp, lo, mid, hi)); 4313 ASSERT(0); 4314 } 4315 4316 /* 4317 * Walk the memsegs chain, applying func to each memseg span. 4318 */ 4319 void 4320 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg) 4321 { 4322 pfn_t pbase, pend; 4323 void *base; 4324 size_t size; 4325 struct memseg *msp; 4326 4327 for (msp = memsegs; msp; msp = msp->next) { 4328 pbase = msp->pages_base; 4329 pend = msp->pages_end; 4330 base = ptob(pbase) + kpm_vbase; 4331 size = ptob(pend - pbase); 4332 func(arg, base, size); 4333 } 4334 } 4335 4336 #else /* __xpv */ 4337 4338 /* 4339 * There are specific Hypervisor calls to establish and remove mappings 4340 * to grant table references and the privcmd driver. We have to ensure 4341 * that a page table actually exists. 4342 */ 4343 void 4344 hat_prepare_mapping(hat_t *hat, caddr_t addr, uint64_t *pte_ma) 4345 { 4346 maddr_t base_ma; 4347 htable_t *ht; 4348 uint_t entry; 4349 4350 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE)); 4351 XPV_DISALLOW_MIGRATE(); 4352 ht = htable_create(hat, (uintptr_t)addr, 0, NULL); 4353 4354 /* 4355 * if an address for pte_ma is passed in, return the MA of the pte 4356 * for this specific address. This address is only valid as long 4357 * as the htable stays locked. 4358 */ 4359 if (pte_ma != NULL) { 4360 entry = htable_va2entry((uintptr_t)addr, ht); 4361 base_ma = pa_to_ma(ptob(ht->ht_pfn)); 4362 *pte_ma = base_ma + (entry << mmu.pte_size_shift); 4363 } 4364 XPV_ALLOW_MIGRATE(); 4365 } 4366 4367 void 4368 hat_release_mapping(hat_t *hat, caddr_t addr) 4369 { 4370 htable_t *ht; 4371 4372 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE)); 4373 XPV_DISALLOW_MIGRATE(); 4374 ht = htable_lookup(hat, (uintptr_t)addr, 0); 4375 ASSERT(ht != NULL); 4376 ASSERT(ht->ht_busy >= 2); 4377 htable_release(ht); 4378 htable_release(ht); 4379 XPV_ALLOW_MIGRATE(); 4380 } 4381 #endif /* __xpv */