1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 /* 25 * Copyright (c) 2010, Intel Corporation. 26 * All rights reserved. 27 */ 28 /* 29 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 30 * Copyright (c) 2014, 2015 by Delphix. All rights reserved. 31 */ 32 33 /* 34 * VM - Hardware Address Translation management for i386 and amd64 35 * 36 * Implementation of the interfaces described in <common/vm/hat.h> 37 * 38 * Nearly all the details of how the hardware is managed should not be 39 * visible outside this layer except for misc. machine specific functions 40 * that work in conjunction with this code. 41 * 42 * Routines used only inside of i86pc/vm start with hati_ for HAT Internal. 43 */ 44 45 #include <sys/machparam.h> 46 #include <sys/machsystm.h> 47 #include <sys/mman.h> 48 #include <sys/types.h> 49 #include <sys/systm.h> 50 #include <sys/cpuvar.h> 51 #include <sys/thread.h> 52 #include <sys/proc.h> 53 #include <sys/cpu.h> 54 #include <sys/kmem.h> 55 #include <sys/disp.h> 56 #include <sys/shm.h> 57 #include <sys/sysmacros.h> 58 #include <sys/machparam.h> 59 #include <sys/vmem.h> 60 #include <sys/vmsystm.h> 61 #include <sys/promif.h> 62 #include <sys/var.h> 63 #include <sys/x86_archext.h> 64 #include <sys/atomic.h> 65 #include <sys/bitmap.h> 66 #include <sys/controlregs.h> 67 #include <sys/bootconf.h> 68 #include <sys/bootsvcs.h> 69 #include <sys/bootinfo.h> 70 #include <sys/archsystm.h> 71 72 #include <vm/seg_kmem.h> 73 #include <vm/hat_i86.h> 74 #include <vm/as.h> 75 #include <vm/seg.h> 76 #include <vm/page.h> 77 #include <vm/seg_kp.h> 78 #include <vm/seg_kpm.h> 79 #include <vm/vm_dep.h> 80 #ifdef __xpv 81 #include <sys/hypervisor.h> 82 #endif 83 #include <vm/kboot_mmu.h> 84 #include <vm/seg_spt.h> 85 86 #include <sys/cmn_err.h> 87 88 /* 89 * Basic parameters for hat operation. 90 */ 91 struct hat_mmu_info mmu; 92 93 /* 94 * The page that is the kernel's top level pagetable. 95 * 96 * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries 97 * on this 4K page for its top level page table. The remaining groups of 98 * 4 entries are used for per processor copies of user VLP pagetables for 99 * running threads. See hat_switch() and reload_pae32() for details. 100 * 101 * vlp_page[0..3] - level==2 PTEs for kernel HAT 102 * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0 103 * vlp_page[8..11] - level==2 PTE for user thread on cpu 1 104 * etc... 105 */ 106 static x86pte_t *vlp_page; 107 108 /* 109 * forward declaration of internal utility routines 110 */ 111 static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, 112 x86pte_t new); 113 114 /* 115 * The kernel address space exists in all HATs. To implement this the 116 * kernel reserves a fixed number of entries in the topmost level(s) of page 117 * tables. The values are setup during startup and then copied to every user 118 * hat created by hat_alloc(). This means that kernelbase must be: 119 * 120 * 4Meg aligned for 32 bit kernels 121 * 512Gig aligned for x86_64 64 bit kernel 122 * 123 * The hat_kernel_range_ts describe what needs to be copied from kernel hat 124 * to each user hat. 125 */ 126 typedef struct hat_kernel_range { 127 level_t hkr_level; 128 uintptr_t hkr_start_va; 129 uintptr_t hkr_end_va; /* zero means to end of memory */ 130 } hat_kernel_range_t; 131 #define NUM_KERNEL_RANGE 2 132 static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE]; 133 static int num_kernel_ranges; 134 135 uint_t use_boot_reserve = 1; /* cleared after early boot process */ 136 uint_t can_steal_post_boot = 0; /* set late in boot to enable stealing */ 137 138 /* 139 * enable_1gpg: controls 1g page support for user applications. 140 * By default, 1g pages are exported to user applications. enable_1gpg can 141 * be set to 0 to not export. 142 */ 143 int enable_1gpg = 1; 144 145 /* 146 * AMD shanghai processors provide better management of 1gb ptes in its tlb. 147 * By default, 1g page support will be disabled for pre-shanghai AMD 148 * processors that don't have optimal tlb support for the 1g page size. 149 * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal 150 * processors. 151 */ 152 int chk_optimal_1gtlb = 1; 153 154 155 #ifdef DEBUG 156 uint_t map1gcnt; 157 #endif 158 159 160 /* 161 * A cpuset for all cpus. This is used for kernel address cross calls, since 162 * the kernel addresses apply to all cpus. 163 */ 164 cpuset_t khat_cpuset; 165 166 /* 167 * management stuff for hat structures 168 */ 169 kmutex_t hat_list_lock; 170 kcondvar_t hat_list_cv; 171 kmem_cache_t *hat_cache; 172 kmem_cache_t *hat_hash_cache; 173 kmem_cache_t *vlp_hash_cache; 174 175 /* 176 * Simple statistics 177 */ 178 struct hatstats hatstat; 179 180 /* 181 * Some earlier hypervisor versions do not emulate cmpxchg of PTEs 182 * correctly. For such hypervisors we must set PT_USER for kernel 183 * entries ourselves (normally the emulation would set PT_USER for 184 * kernel entries and PT_USER|PT_GLOBAL for user entries). pt_kern is 185 * thus set appropriately. Note that dboot/kbm is OK, as only the full 186 * HAT uses cmpxchg() and the other paths (hypercall etc.) were never 187 * incorrect. 188 */ 189 int pt_kern; 190 191 /* 192 * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's. 193 */ 194 extern void atomic_orb(uchar_t *addr, uchar_t val); 195 extern void atomic_andb(uchar_t *addr, uchar_t val); 196 197 #ifndef __xpv 198 extern pfn_t memseg_get_start(struct memseg *); 199 #endif 200 201 #define PP_GETRM(pp, rmmask) (pp->p_nrm & rmmask) 202 #define PP_ISMOD(pp) PP_GETRM(pp, P_MOD) 203 #define PP_ISREF(pp) PP_GETRM(pp, P_REF) 204 #define PP_ISRO(pp) PP_GETRM(pp, P_RO) 205 206 #define PP_SETRM(pp, rm) atomic_orb(&(pp->p_nrm), rm) 207 #define PP_SETMOD(pp) PP_SETRM(pp, P_MOD) 208 #define PP_SETREF(pp) PP_SETRM(pp, P_REF) 209 #define PP_SETRO(pp) PP_SETRM(pp, P_RO) 210 211 #define PP_CLRRM(pp, rm) atomic_andb(&(pp->p_nrm), ~(rm)) 212 #define PP_CLRMOD(pp) PP_CLRRM(pp, P_MOD) 213 #define PP_CLRREF(pp) PP_CLRRM(pp, P_REF) 214 #define PP_CLRRO(pp) PP_CLRRM(pp, P_RO) 215 #define PP_CLRALL(pp) PP_CLRRM(pp, P_MOD | P_REF | P_RO) 216 217 /* 218 * kmem cache constructor for struct hat 219 */ 220 /*ARGSUSED*/ 221 static int 222 hati_constructor(void *buf, void *handle, int kmflags) 223 { 224 hat_t *hat = buf; 225 226 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 227 bzero(hat->hat_pages_mapped, 228 sizeof (pgcnt_t) * (mmu.max_page_level + 1)); 229 hat->hat_ism_pgcnt = 0; 230 hat->hat_stats = 0; 231 hat->hat_flags = 0; 232 CPUSET_ZERO(hat->hat_cpus); 233 hat->hat_htable = NULL; 234 hat->hat_ht_hash = NULL; 235 return (0); 236 } 237 238 /* 239 * Allocate a hat structure for as. We also create the top level 240 * htable and initialize it to contain the kernel hat entries. 241 */ 242 hat_t * 243 hat_alloc(struct as *as) 244 { 245 hat_t *hat; 246 htable_t *ht; /* top level htable */ 247 uint_t use_vlp; 248 uint_t r; 249 hat_kernel_range_t *rp; 250 uintptr_t va; 251 uintptr_t eva; 252 uint_t start; 253 uint_t cnt; 254 htable_t *src; 255 256 /* 257 * Once we start creating user process HATs we can enable 258 * the htable_steal() code. 259 */ 260 if (can_steal_post_boot == 0) 261 can_steal_post_boot = 1; 262 263 ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 264 hat = kmem_cache_alloc(hat_cache, KM_SLEEP); 265 hat->hat_as = as; 266 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 267 ASSERT(hat->hat_flags == 0); 268 269 #if defined(__xpv) 270 /* 271 * No VLP stuff on the hypervisor due to the 64-bit split top level 272 * page tables. On 32-bit it's not needed as the hypervisor takes 273 * care of copying the top level PTEs to a below 4Gig page. 274 */ 275 use_vlp = 0; 276 #else /* __xpv */ 277 /* 32 bit processes uses a VLP style hat when running with PAE */ 278 #if defined(__amd64) 279 use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32); 280 #elif defined(__i386) 281 use_vlp = mmu.pae_hat; 282 #endif 283 #endif /* __xpv */ 284 if (use_vlp) { 285 hat->hat_flags = HAT_VLP; 286 bzero(hat->hat_vlp_ptes, VLP_SIZE); 287 } 288 289 /* 290 * Allocate the htable hash 291 */ 292 if ((hat->hat_flags & HAT_VLP)) { 293 hat->hat_num_hash = mmu.vlp_hash_cnt; 294 hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP); 295 } else { 296 hat->hat_num_hash = mmu.hash_cnt; 297 hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP); 298 } 299 bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *)); 300 301 /* 302 * Initialize Kernel HAT entries at the top of the top level page 303 * tables for the new hat. 304 */ 305 hat->hat_htable = NULL; 306 hat->hat_ht_cached = NULL; 307 XPV_DISALLOW_MIGRATE(); 308 ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL); 309 hat->hat_htable = ht; 310 311 #if defined(__amd64) 312 if (hat->hat_flags & HAT_VLP) 313 goto init_done; 314 #endif 315 316 for (r = 0; r < num_kernel_ranges; ++r) { 317 rp = &kernel_ranges[r]; 318 for (va = rp->hkr_start_va; va != rp->hkr_end_va; 319 va += cnt * LEVEL_SIZE(rp->hkr_level)) { 320 321 if (rp->hkr_level == TOP_LEVEL(hat)) 322 ht = hat->hat_htable; 323 else 324 ht = htable_create(hat, va, rp->hkr_level, 325 NULL); 326 327 start = htable_va2entry(va, ht); 328 cnt = HTABLE_NUM_PTES(ht) - start; 329 eva = va + 330 ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level)); 331 if (rp->hkr_end_va != 0 && 332 (eva > rp->hkr_end_va || eva == 0)) 333 cnt = htable_va2entry(rp->hkr_end_va, ht) - 334 start; 335 336 #if defined(__i386) && !defined(__xpv) 337 if (ht->ht_flags & HTABLE_VLP) { 338 bcopy(&vlp_page[start], 339 &hat->hat_vlp_ptes[start], 340 cnt * sizeof (x86pte_t)); 341 continue; 342 } 343 #endif 344 src = htable_lookup(kas.a_hat, va, rp->hkr_level); 345 ASSERT(src != NULL); 346 x86pte_copy(src, ht, start, cnt); 347 htable_release(src); 348 } 349 } 350 351 init_done: 352 353 #if defined(__xpv) 354 /* 355 * Pin top level page tables after initializing them 356 */ 357 xen_pin(hat->hat_htable->ht_pfn, mmu.max_level); 358 #if defined(__amd64) 359 xen_pin(hat->hat_user_ptable, mmu.max_level); 360 #endif 361 #endif 362 XPV_ALLOW_MIGRATE(); 363 364 /* 365 * Put it at the start of the global list of all hats (used by stealing) 366 * 367 * kas.a_hat is not in the list but is instead used to find the 368 * first and last items in the list. 369 * 370 * - kas.a_hat->hat_next points to the start of the user hats. 371 * The list ends where hat->hat_next == NULL 372 * 373 * - kas.a_hat->hat_prev points to the last of the user hats. 374 * The list begins where hat->hat_prev == NULL 375 */ 376 mutex_enter(&hat_list_lock); 377 hat->hat_prev = NULL; 378 hat->hat_next = kas.a_hat->hat_next; 379 if (hat->hat_next) 380 hat->hat_next->hat_prev = hat; 381 else 382 kas.a_hat->hat_prev = hat; 383 kas.a_hat->hat_next = hat; 384 mutex_exit(&hat_list_lock); 385 386 return (hat); 387 } 388 389 /* 390 * process has finished executing but as has not been cleaned up yet. 391 */ 392 /*ARGSUSED*/ 393 void 394 hat_free_start(hat_t *hat) 395 { 396 ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock)); 397 398 /* 399 * If the hat is currently a stealing victim, wait for the stealing 400 * to finish. Once we mark it as HAT_FREEING, htable_steal() 401 * won't look at its pagetables anymore. 402 */ 403 mutex_enter(&hat_list_lock); 404 while (hat->hat_flags & HAT_VICTIM) 405 cv_wait(&hat_list_cv, &hat_list_lock); 406 hat->hat_flags |= HAT_FREEING; 407 mutex_exit(&hat_list_lock); 408 } 409 410 /* 411 * An address space is being destroyed, so we destroy the associated hat. 412 */ 413 void 414 hat_free_end(hat_t *hat) 415 { 416 kmem_cache_t *cache; 417 418 ASSERT(hat->hat_flags & HAT_FREEING); 419 420 /* 421 * must not be running on the given hat 422 */ 423 ASSERT(CPU->cpu_current_hat != hat); 424 425 /* 426 * Remove it from the list of HATs 427 */ 428 mutex_enter(&hat_list_lock); 429 if (hat->hat_prev) 430 hat->hat_prev->hat_next = hat->hat_next; 431 else 432 kas.a_hat->hat_next = hat->hat_next; 433 if (hat->hat_next) 434 hat->hat_next->hat_prev = hat->hat_prev; 435 else 436 kas.a_hat->hat_prev = hat->hat_prev; 437 mutex_exit(&hat_list_lock); 438 hat->hat_next = hat->hat_prev = NULL; 439 440 #if defined(__xpv) 441 /* 442 * On the hypervisor, unpin top level page table(s) 443 */ 444 xen_unpin(hat->hat_htable->ht_pfn); 445 #if defined(__amd64) 446 xen_unpin(hat->hat_user_ptable); 447 #endif 448 #endif 449 450 /* 451 * Make a pass through the htables freeing them all up. 452 */ 453 htable_purge_hat(hat); 454 455 /* 456 * Decide which kmem cache the hash table came from, then free it. 457 */ 458 if (hat->hat_flags & HAT_VLP) 459 cache = vlp_hash_cache; 460 else 461 cache = hat_hash_cache; 462 kmem_cache_free(cache, hat->hat_ht_hash); 463 hat->hat_ht_hash = NULL; 464 465 hat->hat_flags = 0; 466 kmem_cache_free(hat_cache, hat); 467 } 468 469 /* 470 * round kernelbase down to a supported value to use for _userlimit 471 * 472 * userlimit must be aligned down to an entry in the top level htable. 473 * The one exception is for 32 bit HAT's running PAE. 474 */ 475 uintptr_t 476 hat_kernelbase(uintptr_t va) 477 { 478 #if defined(__i386) 479 va &= LEVEL_MASK(1); 480 #endif 481 if (IN_VA_HOLE(va)) 482 panic("_userlimit %p will fall in VA hole\n", (void *)va); 483 return (va); 484 } 485 486 /* 487 * 488 */ 489 static void 490 set_max_page_level() 491 { 492 level_t lvl; 493 494 if (!kbm_largepage_support) { 495 lvl = 0; 496 } else { 497 if (is_x86_feature(x86_featureset, X86FSET_1GPG)) { 498 lvl = 2; 499 if (chk_optimal_1gtlb && 500 cpuid_opteron_erratum(CPU, 6671130)) { 501 lvl = 1; 502 } 503 if (plat_mnode_xcheck(LEVEL_SIZE(2) >> 504 LEVEL_SHIFT(0))) { 505 lvl = 1; 506 } 507 } else { 508 lvl = 1; 509 } 510 } 511 mmu.max_page_level = lvl; 512 513 if ((lvl == 2) && (enable_1gpg == 0)) 514 mmu.umax_page_level = 1; 515 else 516 mmu.umax_page_level = lvl; 517 } 518 519 /* 520 * Initialize hat data structures based on processor MMU information. 521 */ 522 void 523 mmu_init(void) 524 { 525 uint_t max_htables; 526 uint_t pa_bits; 527 uint_t va_bits; 528 int i; 529 530 /* 531 * If CPU enabled the page table global bit, use it for the kernel 532 * This is bit 7 in CR4 (PGE - Page Global Enable). 533 */ 534 if (is_x86_feature(x86_featureset, X86FSET_PGE) && 535 (getcr4() & CR4_PGE) != 0) 536 mmu.pt_global = PT_GLOBAL; 537 538 /* 539 * Detect NX and PAE usage. 540 */ 541 mmu.pae_hat = kbm_pae_support; 542 if (kbm_nx_support) 543 mmu.pt_nx = PT_NX; 544 else 545 mmu.pt_nx = 0; 546 547 /* 548 * Use CPU info to set various MMU parameters 549 */ 550 cpuid_get_addrsize(CPU, &pa_bits, &va_bits); 551 552 if (va_bits < sizeof (void *) * NBBY) { 553 mmu.hole_start = (1ul << (va_bits - 1)); 554 mmu.hole_end = 0ul - mmu.hole_start - 1; 555 } else { 556 mmu.hole_end = 0; 557 mmu.hole_start = mmu.hole_end - 1; 558 } 559 #if defined(OPTERON_ERRATUM_121) 560 /* 561 * If erratum 121 has already been detected at this time, hole_start 562 * contains the value to be subtracted from mmu.hole_start. 563 */ 564 ASSERT(hole_start == 0 || opteron_erratum_121 != 0); 565 hole_start = mmu.hole_start - hole_start; 566 #else 567 hole_start = mmu.hole_start; 568 #endif 569 hole_end = mmu.hole_end; 570 571 mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1); 572 if (mmu.pae_hat == 0 && pa_bits > 32) 573 mmu.highest_pfn = PFN_4G - 1; 574 575 if (mmu.pae_hat) { 576 mmu.pte_size = 8; /* 8 byte PTEs */ 577 mmu.pte_size_shift = 3; 578 } else { 579 mmu.pte_size = 4; /* 4 byte PTEs */ 580 mmu.pte_size_shift = 2; 581 } 582 583 if (mmu.pae_hat && !is_x86_feature(x86_featureset, X86FSET_PAE)) 584 panic("Processor does not support PAE"); 585 586 if (!is_x86_feature(x86_featureset, X86FSET_CX8)) 587 panic("Processor does not support cmpxchg8b instruction"); 588 589 #if defined(__amd64) 590 591 mmu.num_level = 4; 592 mmu.max_level = 3; 593 mmu.ptes_per_table = 512; 594 mmu.top_level_count = 512; 595 596 mmu.level_shift[0] = 12; 597 mmu.level_shift[1] = 21; 598 mmu.level_shift[2] = 30; 599 mmu.level_shift[3] = 39; 600 601 #elif defined(__i386) 602 603 if (mmu.pae_hat) { 604 mmu.num_level = 3; 605 mmu.max_level = 2; 606 mmu.ptes_per_table = 512; 607 mmu.top_level_count = 4; 608 609 mmu.level_shift[0] = 12; 610 mmu.level_shift[1] = 21; 611 mmu.level_shift[2] = 30; 612 613 } else { 614 mmu.num_level = 2; 615 mmu.max_level = 1; 616 mmu.ptes_per_table = 1024; 617 mmu.top_level_count = 1024; 618 619 mmu.level_shift[0] = 12; 620 mmu.level_shift[1] = 22; 621 } 622 623 #endif /* __i386 */ 624 625 for (i = 0; i < mmu.num_level; ++i) { 626 mmu.level_size[i] = 1UL << mmu.level_shift[i]; 627 mmu.level_offset[i] = mmu.level_size[i] - 1; 628 mmu.level_mask[i] = ~mmu.level_offset[i]; 629 } 630 631 set_max_page_level(); 632 633 mmu_page_sizes = mmu.max_page_level + 1; 634 mmu_exported_page_sizes = mmu.umax_page_level + 1; 635 636 /* restrict legacy applications from using pagesizes 1g and above */ 637 mmu_legacy_page_sizes = 638 (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes; 639 640 641 for (i = 0; i <= mmu.max_page_level; ++i) { 642 mmu.pte_bits[i] = PT_VALID | pt_kern; 643 if (i > 0) 644 mmu.pte_bits[i] |= PT_PAGESIZE; 645 } 646 647 /* 648 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level. 649 */ 650 for (i = 1; i < mmu.num_level; ++i) 651 mmu.ptp_bits[i] = PT_PTPBITS; 652 653 #if defined(__i386) 654 mmu.ptp_bits[2] = PT_VALID; 655 #endif 656 657 /* 658 * Compute how many hash table entries to have per process for htables. 659 * We start with 1 page's worth of entries. 660 * 661 * If physical memory is small, reduce the amount need to cover it. 662 */ 663 max_htables = physmax / mmu.ptes_per_table; 664 mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *); 665 while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables) 666 mmu.hash_cnt >>= 1; 667 mmu.vlp_hash_cnt = mmu.hash_cnt; 668 669 #if defined(__amd64) 670 /* 671 * If running in 64 bits and physical memory is large, 672 * increase the size of the cache to cover all of memory for 673 * a 64 bit process. 674 */ 675 #define HASH_MAX_LENGTH 4 676 while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables) 677 mmu.hash_cnt <<= 1; 678 #endif 679 } 680 681 682 /* 683 * initialize hat data structures 684 */ 685 void 686 hat_init() 687 { 688 #if defined(__i386) 689 /* 690 * _userlimit must be aligned correctly 691 */ 692 if ((_userlimit & LEVEL_MASK(1)) != _userlimit) { 693 prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n", 694 (void *)_userlimit, (void *)LEVEL_SIZE(1)); 695 halt("hat_init(): Unable to continue"); 696 } 697 #endif 698 699 cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL); 700 701 /* 702 * initialize kmem caches 703 */ 704 htable_init(); 705 hment_init(); 706 707 hat_cache = kmem_cache_create("hat_t", 708 sizeof (hat_t), 0, hati_constructor, NULL, NULL, 709 NULL, 0, 0); 710 711 hat_hash_cache = kmem_cache_create("HatHash", 712 mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL, 713 NULL, 0, 0); 714 715 /* 716 * VLP hats can use a smaller hash table size on large memroy machines 717 */ 718 if (mmu.hash_cnt == mmu.vlp_hash_cnt) { 719 vlp_hash_cache = hat_hash_cache; 720 } else { 721 vlp_hash_cache = kmem_cache_create("HatVlpHash", 722 mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL, 723 NULL, 0, 0); 724 } 725 726 /* 727 * Set up the kernel's hat 728 */ 729 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 730 kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP); 731 mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL); 732 kas.a_hat->hat_as = &kas; 733 kas.a_hat->hat_flags = 0; 734 AS_LOCK_EXIT(&kas, &kas.a_lock); 735 736 CPUSET_ZERO(khat_cpuset); 737 CPUSET_ADD(khat_cpuset, CPU->cpu_id); 738 739 /* 740 * The kernel hat's next pointer serves as the head of the hat list . 741 * The kernel hat's prev pointer tracks the last hat on the list for 742 * htable_steal() to use. 743 */ 744 kas.a_hat->hat_next = NULL; 745 kas.a_hat->hat_prev = NULL; 746 747 /* 748 * Allocate an htable hash bucket for the kernel 749 * XX64 - tune for 64 bit procs 750 */ 751 kas.a_hat->hat_num_hash = mmu.hash_cnt; 752 kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP); 753 bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *)); 754 755 /* 756 * zero out the top level and cached htable pointers 757 */ 758 kas.a_hat->hat_ht_cached = NULL; 759 kas.a_hat->hat_htable = NULL; 760 761 /* 762 * Pre-allocate hrm_hashtab before enabling the collection of 763 * refmod statistics. Allocating on the fly would mean us 764 * running the risk of suffering recursive mutex enters or 765 * deadlocks. 766 */ 767 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *), 768 KM_SLEEP); 769 } 770 771 /* 772 * Prepare CPU specific pagetables for VLP processes on 64 bit kernels. 773 * 774 * Each CPU has a set of 2 pagetables that are reused for any 32 bit 775 * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and 776 * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes. 777 */ 778 /*ARGSUSED*/ 779 static void 780 hat_vlp_setup(struct cpu *cpu) 781 { 782 #if defined(__amd64) && !defined(__xpv) 783 struct hat_cpu_info *hci = cpu->cpu_hat_info; 784 pfn_t pfn; 785 786 /* 787 * allocate the level==2 page table for the bottom most 788 * 512Gig of address space (this is where 32 bit apps live) 789 */ 790 ASSERT(hci != NULL); 791 hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP); 792 793 /* 794 * Allocate a top level pagetable and copy the kernel's 795 * entries into it. Then link in hci_vlp_l2ptes in the 1st entry. 796 */ 797 hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP); 798 hci->hci_vlp_pfn = 799 hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes); 800 ASSERT(hci->hci_vlp_pfn != PFN_INVALID); 801 bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE); 802 803 pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes); 804 ASSERT(pfn != PFN_INVALID); 805 hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2); 806 #endif /* __amd64 && !__xpv */ 807 } 808 809 /*ARGSUSED*/ 810 static void 811 hat_vlp_teardown(cpu_t *cpu) 812 { 813 #if defined(__amd64) && !defined(__xpv) 814 struct hat_cpu_info *hci; 815 816 if ((hci = cpu->cpu_hat_info) == NULL) 817 return; 818 if (hci->hci_vlp_l2ptes) 819 kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE); 820 if (hci->hci_vlp_l3ptes) 821 kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE); 822 #endif 823 } 824 825 #define NEXT_HKR(r, l, s, e) { \ 826 kernel_ranges[r].hkr_level = l; \ 827 kernel_ranges[r].hkr_start_va = s; \ 828 kernel_ranges[r].hkr_end_va = e; \ 829 ++r; \ 830 } 831 832 /* 833 * Finish filling in the kernel hat. 834 * Pre fill in all top level kernel page table entries for the kernel's 835 * part of the address range. From this point on we can't use any new 836 * kernel large pages if they need PTE's at max_level 837 * 838 * create the kmap mappings. 839 */ 840 void 841 hat_init_finish(void) 842 { 843 size_t size; 844 uint_t r = 0; 845 uintptr_t va; 846 hat_kernel_range_t *rp; 847 848 849 /* 850 * We are now effectively running on the kernel hat. 851 * Clearing use_boot_reserve shuts off using the pre-allocated boot 852 * reserve for all HAT allocations. From here on, the reserves are 853 * only used when avoiding recursion in kmem_alloc(). 854 */ 855 use_boot_reserve = 0; 856 htable_adjust_reserve(); 857 858 /* 859 * User HATs are initialized with copies of all kernel mappings in 860 * higher level page tables. Ensure that those entries exist. 861 */ 862 #if defined(__amd64) 863 864 NEXT_HKR(r, 3, kernelbase, 0); 865 #if defined(__xpv) 866 NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END); 867 #endif 868 869 #elif defined(__i386) 870 871 #if !defined(__xpv) 872 if (mmu.pae_hat) { 873 va = kernelbase; 874 if ((va & LEVEL_MASK(2)) != va) { 875 va = P2ROUNDUP(va, LEVEL_SIZE(2)); 876 NEXT_HKR(r, 1, kernelbase, va); 877 } 878 if (va != 0) 879 NEXT_HKR(r, 2, va, 0); 880 } else 881 #endif /* __xpv */ 882 NEXT_HKR(r, 1, kernelbase, 0); 883 884 #endif /* __i386 */ 885 886 num_kernel_ranges = r; 887 888 /* 889 * Create all the kernel pagetables that will have entries 890 * shared to user HATs. 891 */ 892 for (r = 0; r < num_kernel_ranges; ++r) { 893 rp = &kernel_ranges[r]; 894 for (va = rp->hkr_start_va; va != rp->hkr_end_va; 895 va += LEVEL_SIZE(rp->hkr_level)) { 896 htable_t *ht; 897 898 if (IN_HYPERVISOR_VA(va)) 899 continue; 900 901 /* can/must skip if a page mapping already exists */ 902 if (rp->hkr_level <= mmu.max_page_level && 903 (ht = htable_getpage(kas.a_hat, va, NULL)) != 904 NULL) { 905 htable_release(ht); 906 continue; 907 } 908 909 (void) htable_create(kas.a_hat, va, rp->hkr_level - 1, 910 NULL); 911 } 912 } 913 914 /* 915 * 32 bit PAE metal kernels use only 4 of the 512 entries in the 916 * page holding the top level pagetable. We use the remainder for 917 * the "per CPU" page tables for VLP processes. 918 * Map the top level kernel pagetable into the kernel to make 919 * it easy to use bcopy access these tables. 920 */ 921 if (mmu.pae_hat) { 922 vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP); 923 hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE, 924 kas.a_hat->hat_htable->ht_pfn, 925 #if !defined(__xpv) 926 PROT_WRITE | 927 #endif 928 PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK, 929 HAT_LOAD | HAT_LOAD_NOCONSIST); 930 } 931 hat_vlp_setup(CPU); 932 933 /* 934 * Create kmap (cached mappings of kernel PTEs) 935 * for 32 bit we map from segmap_start .. ekernelheap 936 * for 64 bit we map from segmap_start .. segmap_start + segmapsize; 937 */ 938 #if defined(__i386) 939 size = (uintptr_t)ekernelheap - segmap_start; 940 #elif defined(__amd64) 941 size = segmapsize; 942 #endif 943 hat_kmap_init((uintptr_t)segmap_start, size); 944 } 945 946 /* 947 * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references 948 * are 32 bit, so for safety we must use atomic_cas_64() to install these. 949 */ 950 #ifdef __i386 951 static void 952 reload_pae32(hat_t *hat, cpu_t *cpu) 953 { 954 x86pte_t *src; 955 x86pte_t *dest; 956 x86pte_t pte; 957 int i; 958 959 /* 960 * Load the 4 entries of the level 2 page table into this 961 * cpu's range of the vlp_page and point cr3 at them. 962 */ 963 ASSERT(mmu.pae_hat); 964 src = hat->hat_vlp_ptes; 965 dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES; 966 for (i = 0; i < VLP_NUM_PTES; ++i) { 967 for (;;) { 968 pte = dest[i]; 969 if (pte == src[i]) 970 break; 971 if (atomic_cas_64(dest + i, pte, src[i]) != src[i]) 972 break; 973 } 974 } 975 } 976 #endif 977 978 /* 979 * Switch to a new active hat, maintaining bit masks to track active CPUs. 980 * 981 * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it 982 * remains a 32-bit value. 983 */ 984 void 985 hat_switch(hat_t *hat) 986 { 987 uint64_t newcr3; 988 cpu_t *cpu = CPU; 989 hat_t *old = cpu->cpu_current_hat; 990 991 /* 992 * set up this information first, so we don't miss any cross calls 993 */ 994 if (old != NULL) { 995 if (old == hat) 996 return; 997 if (old != kas.a_hat) 998 CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id); 999 } 1000 1001 /* 1002 * Add this CPU to the active set for this HAT. 1003 */ 1004 if (hat != kas.a_hat) { 1005 CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id); 1006 } 1007 cpu->cpu_current_hat = hat; 1008 1009 /* 1010 * now go ahead and load cr3 1011 */ 1012 if (hat->hat_flags & HAT_VLP) { 1013 #if defined(__amd64) 1014 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes; 1015 1016 VLP_COPY(hat->hat_vlp_ptes, vlpptep); 1017 newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn); 1018 #elif defined(__i386) 1019 reload_pae32(hat, cpu); 1020 newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) + 1021 (cpu->cpu_id + 1) * VLP_SIZE; 1022 #endif 1023 } else { 1024 newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn); 1025 } 1026 #ifdef __xpv 1027 { 1028 struct mmuext_op t[2]; 1029 uint_t retcnt; 1030 uint_t opcnt = 1; 1031 1032 t[0].cmd = MMUEXT_NEW_BASEPTR; 1033 t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3)); 1034 #if defined(__amd64) 1035 /* 1036 * There's an interesting problem here, as to what to 1037 * actually specify when switching to the kernel hat. 1038 * For now we'll reuse the kernel hat again. 1039 */ 1040 t[1].cmd = MMUEXT_NEW_USER_BASEPTR; 1041 if (hat == kas.a_hat) 1042 t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3)); 1043 else 1044 t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable); 1045 ++opcnt; 1046 #endif /* __amd64 */ 1047 if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0) 1048 panic("HYPERVISOR_mmu_update() failed"); 1049 ASSERT(retcnt == opcnt); 1050 1051 } 1052 #else 1053 setcr3(newcr3); 1054 #endif 1055 ASSERT(cpu == CPU); 1056 } 1057 1058 /* 1059 * Utility to return a valid x86pte_t from protections, pfn, and level number 1060 */ 1061 static x86pte_t 1062 hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags) 1063 { 1064 x86pte_t pte; 1065 uint_t cache_attr = attr & HAT_ORDER_MASK; 1066 1067 pte = MAKEPTE(pfn, level); 1068 1069 if (attr & PROT_WRITE) 1070 PTE_SET(pte, PT_WRITABLE); 1071 1072 if (attr & PROT_USER) 1073 PTE_SET(pte, PT_USER); 1074 1075 if (!(attr & PROT_EXEC)) 1076 PTE_SET(pte, mmu.pt_nx); 1077 1078 /* 1079 * Set the software bits used track ref/mod sync's and hments. 1080 * If not using REF/MOD, set them to avoid h/w rewriting PTEs. 1081 */ 1082 if (flags & HAT_LOAD_NOCONSIST) 1083 PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD); 1084 else if (attr & HAT_NOSYNC) 1085 PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD); 1086 1087 /* 1088 * Set the caching attributes in the PTE. The combination 1089 * of attributes are poorly defined, so we pay attention 1090 * to them in the given order. 1091 * 1092 * The test for HAT_STRICTORDER is different because it's defined 1093 * as "0" - which was a stupid thing to do, but is too late to change! 1094 */ 1095 if (cache_attr == HAT_STRICTORDER) { 1096 PTE_SET(pte, PT_NOCACHE); 1097 /*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */ 1098 } else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) { 1099 /* nothing to set */; 1100 } else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) { 1101 PTE_SET(pte, PT_NOCACHE); 1102 if (is_x86_feature(x86_featureset, X86FSET_PAT)) 1103 PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE); 1104 else 1105 PTE_SET(pte, PT_WRITETHRU); 1106 } else { 1107 panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr); 1108 } 1109 1110 return (pte); 1111 } 1112 1113 /* 1114 * Duplicate address translations of the parent to the child. 1115 * This function really isn't used anymore. 1116 */ 1117 /*ARGSUSED*/ 1118 int 1119 hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag) 1120 { 1121 ASSERT((uintptr_t)addr < kernelbase); 1122 ASSERT(new != kas.a_hat); 1123 ASSERT(old != kas.a_hat); 1124 return (0); 1125 } 1126 1127 /* 1128 * returns number of bytes that have valid mappings in hat. 1129 */ 1130 size_t 1131 hat_get_mapped_size(hat_t *hat) 1132 { 1133 size_t total = 0; 1134 int l; 1135 1136 for (l = 0; l <= mmu.max_page_level; l++) 1137 total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l)); 1138 total += hat->hat_ism_pgcnt; 1139 1140 return (total); 1141 } 1142 1143 /* 1144 * enable/disable collection of stats for hat. 1145 */ 1146 int 1147 hat_stats_enable(hat_t *hat) 1148 { 1149 atomic_inc_32(&hat->hat_stats); 1150 return (1); 1151 } 1152 1153 void 1154 hat_stats_disable(hat_t *hat) 1155 { 1156 atomic_dec_32(&hat->hat_stats); 1157 } 1158 1159 /* 1160 * Utility to sync the ref/mod bits from a page table entry to the page_t 1161 * We must be holding the mapping list lock when this is called. 1162 */ 1163 static void 1164 hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level) 1165 { 1166 uint_t rm = 0; 1167 pgcnt_t pgcnt; 1168 1169 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC) 1170 return; 1171 1172 if (PTE_GET(pte, PT_REF)) 1173 rm |= P_REF; 1174 1175 if (PTE_GET(pte, PT_MOD)) 1176 rm |= P_MOD; 1177 1178 if (rm == 0) 1179 return; 1180 1181 /* 1182 * sync to all constituent pages of a large page 1183 */ 1184 ASSERT(x86_hm_held(pp)); 1185 pgcnt = page_get_pagecnt(level); 1186 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt)); 1187 for (; pgcnt > 0; --pgcnt) { 1188 /* 1189 * hat_page_demote() can't decrease 1190 * pszc below this mapping size 1191 * since this large mapping existed after we 1192 * took mlist lock. 1193 */ 1194 ASSERT(pp->p_szc >= level); 1195 hat_page_setattr(pp, rm); 1196 ++pp; 1197 } 1198 } 1199 1200 /* 1201 * This the set of PTE bits for PFN, permissions and caching 1202 * that are allowed to change on a HAT_LOAD_REMAP 1203 */ 1204 #define PT_REMAP_BITS \ 1205 (PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU | \ 1206 PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD) 1207 1208 #define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX) 1209 /* 1210 * Do the low-level work to get a mapping entered into a HAT's pagetables 1211 * and in the mapping list of the associated page_t. 1212 */ 1213 static int 1214 hati_pte_map( 1215 htable_t *ht, 1216 uint_t entry, 1217 page_t *pp, 1218 x86pte_t pte, 1219 int flags, 1220 void *pte_ptr) 1221 { 1222 hat_t *hat = ht->ht_hat; 1223 x86pte_t old_pte; 1224 level_t l = ht->ht_level; 1225 hment_t *hm; 1226 uint_t is_consist; 1227 uint_t is_locked; 1228 int rv = 0; 1229 1230 /* 1231 * Is this a consistent (ie. need mapping list lock) mapping? 1232 */ 1233 is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0); 1234 1235 /* 1236 * Track locked mapping count in the htable. Do this first, 1237 * as we track locking even if there already is a mapping present. 1238 */ 1239 is_locked = (flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat; 1240 if (is_locked) 1241 HTABLE_LOCK_INC(ht); 1242 1243 /* 1244 * Acquire the page's mapping list lock and get an hment to use. 1245 * Note that hment_prepare() might return NULL. 1246 */ 1247 if (is_consist) { 1248 x86_hm_enter(pp); 1249 hm = hment_prepare(ht, entry, pp); 1250 } 1251 1252 /* 1253 * Set the new pte, retrieving the old one at the same time. 1254 */ 1255 old_pte = x86pte_set(ht, entry, pte, pte_ptr); 1256 1257 /* 1258 * Did we get a large page / page table collision? 1259 */ 1260 if (old_pte == LPAGE_ERROR) { 1261 if (is_locked) 1262 HTABLE_LOCK_DEC(ht); 1263 rv = -1; 1264 goto done; 1265 } 1266 1267 /* 1268 * If the mapping didn't change there is nothing more to do. 1269 */ 1270 if (PTE_EQUIV(pte, old_pte)) 1271 goto done; 1272 1273 /* 1274 * Install a new mapping in the page's mapping list 1275 */ 1276 if (!PTE_ISVALID(old_pte)) { 1277 if (is_consist) { 1278 hment_assign(ht, entry, pp, hm); 1279 x86_hm_exit(pp); 1280 } else { 1281 ASSERT(flags & HAT_LOAD_NOCONSIST); 1282 } 1283 #if defined(__amd64) 1284 if (ht->ht_flags & HTABLE_VLP) { 1285 cpu_t *cpu = CPU; 1286 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes; 1287 VLP_COPY(hat->hat_vlp_ptes, vlpptep); 1288 } 1289 #endif 1290 HTABLE_INC(ht->ht_valid_cnt); 1291 PGCNT_INC(hat, l); 1292 return (rv); 1293 } 1294 1295 /* 1296 * Remap's are more complicated: 1297 * - HAT_LOAD_REMAP must be specified if changing the pfn. 1298 * We also require that NOCONSIST be specified. 1299 * - Otherwise only permission or caching bits may change. 1300 */ 1301 if (!PTE_ISPAGE(old_pte, l)) 1302 panic("non-null/page mapping pte=" FMT_PTE, old_pte); 1303 1304 if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) { 1305 REMAPASSERT(flags & HAT_LOAD_REMAP); 1306 REMAPASSERT(flags & HAT_LOAD_NOCONSIST); 1307 REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST); 1308 REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) == 1309 pf_is_memory(PTE2PFN(pte, l))); 1310 REMAPASSERT(!is_consist); 1311 } 1312 1313 /* 1314 * We only let remaps change the certain bits in the PTE. 1315 */ 1316 if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS)) 1317 panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n", 1318 old_pte, pte); 1319 1320 /* 1321 * We don't create any mapping list entries on a remap, so release 1322 * any allocated hment after we drop the mapping list lock. 1323 */ 1324 done: 1325 if (is_consist) { 1326 x86_hm_exit(pp); 1327 if (hm != NULL) 1328 hment_free(hm); 1329 } 1330 return (rv); 1331 } 1332 1333 /* 1334 * Internal routine to load a single page table entry. This only fails if 1335 * we attempt to overwrite a page table link with a large page. 1336 */ 1337 static int 1338 hati_load_common( 1339 hat_t *hat, 1340 uintptr_t va, 1341 page_t *pp, 1342 uint_t attr, 1343 uint_t flags, 1344 level_t level, 1345 pfn_t pfn) 1346 { 1347 htable_t *ht; 1348 uint_t entry; 1349 x86pte_t pte; 1350 int rv = 0; 1351 1352 /* 1353 * The number 16 is arbitrary and here to catch a recursion problem 1354 * early before we blow out the kernel stack. 1355 */ 1356 ++curthread->t_hatdepth; 1357 ASSERT(curthread->t_hatdepth < 16); 1358 1359 ASSERT(hat == kas.a_hat || 1360 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1361 1362 if (flags & HAT_LOAD_SHARE) 1363 hat->hat_flags |= HAT_SHARED; 1364 1365 /* 1366 * Find the page table that maps this page if it already exists. 1367 */ 1368 ht = htable_lookup(hat, va, level); 1369 1370 /* 1371 * We must have HAT_LOAD_NOCONSIST if page_t is NULL. 1372 */ 1373 if (pp == NULL) 1374 flags |= HAT_LOAD_NOCONSIST; 1375 1376 if (ht == NULL) { 1377 ht = htable_create(hat, va, level, NULL); 1378 ASSERT(ht != NULL); 1379 } 1380 entry = htable_va2entry(va, ht); 1381 1382 /* 1383 * a bunch of paranoid error checking 1384 */ 1385 ASSERT(ht->ht_busy > 0); 1386 if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht)) 1387 panic("hati_load_common: bad htable %p, va %p", 1388 (void *)ht, (void *)va); 1389 ASSERT(ht->ht_level == level); 1390 1391 /* 1392 * construct the new PTE 1393 */ 1394 if (hat == kas.a_hat) 1395 attr &= ~PROT_USER; 1396 pte = hati_mkpte(pfn, attr, level, flags); 1397 if (hat == kas.a_hat && va >= kernelbase) 1398 PTE_SET(pte, mmu.pt_global); 1399 1400 /* 1401 * establish the mapping 1402 */ 1403 rv = hati_pte_map(ht, entry, pp, pte, flags, NULL); 1404 1405 /* 1406 * release the htable and any reserves 1407 */ 1408 htable_release(ht); 1409 --curthread->t_hatdepth; 1410 return (rv); 1411 } 1412 1413 /* 1414 * special case of hat_memload to deal with some kernel addrs for performance 1415 */ 1416 static void 1417 hat_kmap_load( 1418 caddr_t addr, 1419 page_t *pp, 1420 uint_t attr, 1421 uint_t flags) 1422 { 1423 uintptr_t va = (uintptr_t)addr; 1424 x86pte_t pte; 1425 pfn_t pfn = page_pptonum(pp); 1426 pgcnt_t pg_off = mmu_btop(va - mmu.kmap_addr); 1427 htable_t *ht; 1428 uint_t entry; 1429 void *pte_ptr; 1430 1431 /* 1432 * construct the requested PTE 1433 */ 1434 attr &= ~PROT_USER; 1435 attr |= HAT_STORECACHING_OK; 1436 pte = hati_mkpte(pfn, attr, 0, flags); 1437 PTE_SET(pte, mmu.pt_global); 1438 1439 /* 1440 * Figure out the pte_ptr and htable and use common code to finish up 1441 */ 1442 if (mmu.pae_hat) 1443 pte_ptr = mmu.kmap_ptes + pg_off; 1444 else 1445 pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off; 1446 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >> 1447 LEVEL_SHIFT(1)]; 1448 entry = htable_va2entry(va, ht); 1449 ++curthread->t_hatdepth; 1450 ASSERT(curthread->t_hatdepth < 16); 1451 (void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr); 1452 --curthread->t_hatdepth; 1453 } 1454 1455 /* 1456 * hat_memload() - load a translation to the given page struct 1457 * 1458 * Flags for hat_memload/hat_devload/hat_*attr. 1459 * 1460 * HAT_LOAD Default flags to load a translation to the page. 1461 * 1462 * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(), 1463 * and hat_devload(). 1464 * 1465 * HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list. 1466 * sets PT_NOCONSIST 1467 * 1468 * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables 1469 * that map some user pages (not kas) is shared by more 1470 * than one process (eg. ISM). 1471 * 1472 * HAT_LOAD_REMAP Reload a valid pte with a different page frame. 1473 * 1474 * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this 1475 * point, it's setting up mapping to allocate internal 1476 * hat layer data structures. This flag forces hat layer 1477 * to tap its reserves in order to prevent infinite 1478 * recursion. 1479 * 1480 * The following is a protection attribute (like PROT_READ, etc.) 1481 * 1482 * HAT_NOSYNC set PT_NOSYNC - this mapping's ref/mod bits 1483 * are never cleared. 1484 * 1485 * Installing new valid PTE's and creation of the mapping list 1486 * entry are controlled under the same lock. It's derived from the 1487 * page_t being mapped. 1488 */ 1489 static uint_t supported_memload_flags = 1490 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST | 1491 HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT; 1492 1493 void 1494 hat_memload( 1495 hat_t *hat, 1496 caddr_t addr, 1497 page_t *pp, 1498 uint_t attr, 1499 uint_t flags) 1500 { 1501 uintptr_t va = (uintptr_t)addr; 1502 level_t level = 0; 1503 pfn_t pfn = page_pptonum(pp); 1504 1505 XPV_DISALLOW_MIGRATE(); 1506 ASSERT(IS_PAGEALIGNED(va)); 1507 ASSERT(hat == kas.a_hat || va < _userlimit); 1508 ASSERT(hat == kas.a_hat || 1509 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1510 ASSERT((flags & supported_memload_flags) == flags); 1511 1512 ASSERT(!IN_VA_HOLE(va)); 1513 ASSERT(!PP_ISFREE(pp)); 1514 1515 /* 1516 * kernel address special case for performance. 1517 */ 1518 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { 1519 ASSERT(hat == kas.a_hat); 1520 hat_kmap_load(addr, pp, attr, flags); 1521 XPV_ALLOW_MIGRATE(); 1522 return; 1523 } 1524 1525 /* 1526 * This is used for memory with normal caching enabled, so 1527 * always set HAT_STORECACHING_OK. 1528 */ 1529 attr |= HAT_STORECACHING_OK; 1530 if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0) 1531 panic("unexpected hati_load_common() failure"); 1532 XPV_ALLOW_MIGRATE(); 1533 } 1534 1535 /* ARGSUSED */ 1536 void 1537 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp, 1538 uint_t attr, uint_t flags, hat_region_cookie_t rcookie) 1539 { 1540 hat_memload(hat, addr, pp, attr, flags); 1541 } 1542 1543 /* 1544 * Load the given array of page structs using large pages when possible 1545 */ 1546 void 1547 hat_memload_array( 1548 hat_t *hat, 1549 caddr_t addr, 1550 size_t len, 1551 page_t **pages, 1552 uint_t attr, 1553 uint_t flags) 1554 { 1555 uintptr_t va = (uintptr_t)addr; 1556 uintptr_t eaddr = va + len; 1557 level_t level; 1558 size_t pgsize; 1559 pgcnt_t pgindx = 0; 1560 pfn_t pfn; 1561 pgcnt_t i; 1562 1563 XPV_DISALLOW_MIGRATE(); 1564 ASSERT(IS_PAGEALIGNED(va)); 1565 ASSERT(hat == kas.a_hat || va + len <= _userlimit); 1566 ASSERT(hat == kas.a_hat || 1567 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1568 ASSERT((flags & supported_memload_flags) == flags); 1569 1570 /* 1571 * memload is used for memory with full caching enabled, so 1572 * set HAT_STORECACHING_OK. 1573 */ 1574 attr |= HAT_STORECACHING_OK; 1575 1576 /* 1577 * handle all pages using largest possible pagesize 1578 */ 1579 while (va < eaddr) { 1580 /* 1581 * decide what level mapping to use (ie. pagesize) 1582 */ 1583 pfn = page_pptonum(pages[pgindx]); 1584 for (level = mmu.max_page_level; ; --level) { 1585 pgsize = LEVEL_SIZE(level); 1586 if (level == 0) 1587 break; 1588 1589 if (!IS_P2ALIGNED(va, pgsize) || 1590 (eaddr - va) < pgsize || 1591 !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize)) 1592 continue; 1593 1594 /* 1595 * To use a large mapping of this size, all the 1596 * pages we are passed must be sequential subpages 1597 * of the large page. 1598 * hat_page_demote() can't change p_szc because 1599 * all pages are locked. 1600 */ 1601 if (pages[pgindx]->p_szc >= level) { 1602 for (i = 0; i < mmu_btop(pgsize); ++i) { 1603 if (pfn + i != 1604 page_pptonum(pages[pgindx + i])) 1605 break; 1606 ASSERT(pages[pgindx + i]->p_szc >= 1607 level); 1608 ASSERT(pages[pgindx] + i == 1609 pages[pgindx + i]); 1610 } 1611 if (i == mmu_btop(pgsize)) { 1612 #ifdef DEBUG 1613 if (level == 2) 1614 map1gcnt++; 1615 #endif 1616 break; 1617 } 1618 } 1619 } 1620 1621 /* 1622 * Load this page mapping. If the load fails, try a smaller 1623 * pagesize. 1624 */ 1625 ASSERT(!IN_VA_HOLE(va)); 1626 while (hati_load_common(hat, va, pages[pgindx], attr, 1627 flags, level, pfn) != 0) { 1628 if (level == 0) 1629 panic("unexpected hati_load_common() failure"); 1630 --level; 1631 pgsize = LEVEL_SIZE(level); 1632 } 1633 1634 /* 1635 * move to next page 1636 */ 1637 va += pgsize; 1638 pgindx += mmu_btop(pgsize); 1639 } 1640 XPV_ALLOW_MIGRATE(); 1641 } 1642 1643 /* ARGSUSED */ 1644 void 1645 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len, 1646 struct page **pps, uint_t attr, uint_t flags, 1647 hat_region_cookie_t rcookie) 1648 { 1649 hat_memload_array(hat, addr, len, pps, attr, flags); 1650 } 1651 1652 /* 1653 * void hat_devload(hat, addr, len, pf, attr, flags) 1654 * load/lock the given page frame number 1655 * 1656 * Advisory ordering attributes. Apply only to device mappings. 1657 * 1658 * HAT_STRICTORDER: the CPU must issue the references in order, as the 1659 * programmer specified. This is the default. 1660 * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds 1661 * of reordering; store or load with store or load). 1662 * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores 1663 * to consecutive locations (for example, turn two consecutive byte 1664 * stores into one halfword store), and it may batch individual loads 1665 * (for example, turn two consecutive byte loads into one halfword load). 1666 * This also implies re-ordering. 1667 * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it 1668 * until another store occurs. The default is to fetch new data 1669 * on every load. This also implies merging. 1670 * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to 1671 * the device (perhaps with other data) at a later time. The default is 1672 * to push the data right away. This also implies load caching. 1673 * 1674 * Equivalent of hat_memload(), but can be used for device memory where 1675 * there are no page_t's and we support additional flags (write merging, etc). 1676 * Note that we can have large page mappings with this interface. 1677 */ 1678 int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK | 1679 HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK | 1680 HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK; 1681 1682 void 1683 hat_devload( 1684 hat_t *hat, 1685 caddr_t addr, 1686 size_t len, 1687 pfn_t pfn, 1688 uint_t attr, 1689 int flags) 1690 { 1691 uintptr_t va = ALIGN2PAGE(addr); 1692 uintptr_t eva = va + len; 1693 level_t level; 1694 size_t pgsize; 1695 page_t *pp; 1696 int f; /* per PTE copy of flags - maybe modified */ 1697 uint_t a; /* per PTE copy of attr */ 1698 1699 XPV_DISALLOW_MIGRATE(); 1700 ASSERT(IS_PAGEALIGNED(va)); 1701 ASSERT(hat == kas.a_hat || eva <= _userlimit); 1702 ASSERT(hat == kas.a_hat || 1703 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1704 ASSERT((flags & supported_devload_flags) == flags); 1705 1706 /* 1707 * handle all pages 1708 */ 1709 while (va < eva) { 1710 1711 /* 1712 * decide what level mapping to use (ie. pagesize) 1713 */ 1714 for (level = mmu.max_page_level; ; --level) { 1715 pgsize = LEVEL_SIZE(level); 1716 if (level == 0) 1717 break; 1718 if (IS_P2ALIGNED(va, pgsize) && 1719 (eva - va) >= pgsize && 1720 IS_P2ALIGNED(pfn, mmu_btop(pgsize))) { 1721 #ifdef DEBUG 1722 if (level == 2) 1723 map1gcnt++; 1724 #endif 1725 break; 1726 } 1727 } 1728 1729 /* 1730 * If this is just memory then allow caching (this happens 1731 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used 1732 * to override that. If we don't have a page_t then make sure 1733 * NOCONSIST is set. 1734 */ 1735 a = attr; 1736 f = flags; 1737 if (!pf_is_memory(pfn)) 1738 f |= HAT_LOAD_NOCONSIST; 1739 else if (!(a & HAT_PLAT_NOCACHE)) 1740 a |= HAT_STORECACHING_OK; 1741 1742 if (f & HAT_LOAD_NOCONSIST) 1743 pp = NULL; 1744 else 1745 pp = page_numtopp_nolock(pfn); 1746 1747 /* 1748 * Check to make sure we are really trying to map a valid 1749 * memory page. The caller wishing to intentionally map 1750 * free memory pages will have passed the HAT_LOAD_NOCONSIST 1751 * flag, then pp will be NULL. 1752 */ 1753 if (pp != NULL) { 1754 if (PP_ISFREE(pp)) { 1755 panic("hat_devload: loading " 1756 "a mapping to free page %p", (void *)pp); 1757 } 1758 1759 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) { 1760 panic("hat_devload: loading a mapping " 1761 "to an unlocked page %p", 1762 (void *)pp); 1763 } 1764 } 1765 1766 /* 1767 * load this page mapping 1768 */ 1769 ASSERT(!IN_VA_HOLE(va)); 1770 while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) { 1771 if (level == 0) 1772 panic("unexpected hati_load_common() failure"); 1773 --level; 1774 pgsize = LEVEL_SIZE(level); 1775 } 1776 1777 /* 1778 * move to next page 1779 */ 1780 va += pgsize; 1781 pfn += mmu_btop(pgsize); 1782 } 1783 XPV_ALLOW_MIGRATE(); 1784 } 1785 1786 /* 1787 * void hat_unlock(hat, addr, len) 1788 * unlock the mappings to a given range of addresses 1789 * 1790 * Locks are tracked by ht_lock_cnt in the htable. 1791 */ 1792 void 1793 hat_unlock(hat_t *hat, caddr_t addr, size_t len) 1794 { 1795 uintptr_t vaddr = (uintptr_t)addr; 1796 uintptr_t eaddr = vaddr + len; 1797 htable_t *ht = NULL; 1798 1799 /* 1800 * kernel entries are always locked, we don't track lock counts 1801 */ 1802 ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 1803 ASSERT(IS_PAGEALIGNED(vaddr)); 1804 ASSERT(IS_PAGEALIGNED(eaddr)); 1805 if (hat == kas.a_hat) 1806 return; 1807 if (eaddr > _userlimit) 1808 panic("hat_unlock() address out of range - above _userlimit"); 1809 1810 XPV_DISALLOW_MIGRATE(); 1811 ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 1812 while (vaddr < eaddr) { 1813 (void) htable_walk(hat, &ht, &vaddr, eaddr); 1814 if (ht == NULL) 1815 break; 1816 1817 ASSERT(!IN_VA_HOLE(vaddr)); 1818 1819 if (ht->ht_lock_cnt < 1) 1820 panic("hat_unlock(): lock_cnt < 1, " 1821 "htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr); 1822 HTABLE_LOCK_DEC(ht); 1823 1824 vaddr += LEVEL_SIZE(ht->ht_level); 1825 } 1826 if (ht) 1827 htable_release(ht); 1828 XPV_ALLOW_MIGRATE(); 1829 } 1830 1831 /* ARGSUSED */ 1832 void 1833 hat_unlock_region(struct hat *hat, caddr_t addr, size_t len, 1834 hat_region_cookie_t rcookie) 1835 { 1836 panic("No shared region support on x86"); 1837 } 1838 1839 #if !defined(__xpv) 1840 /* 1841 * Cross call service routine to demap a virtual page on 1842 * the current CPU or flush all mappings in TLB. 1843 */ 1844 /*ARGSUSED*/ 1845 static int 1846 hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3) 1847 { 1848 hat_t *hat = (hat_t *)a1; 1849 caddr_t addr = (caddr_t)a2; 1850 size_t len = (size_t)a3; 1851 1852 /* 1853 * If the target hat isn't the kernel and this CPU isn't operating 1854 * in the target hat, we can ignore the cross call. 1855 */ 1856 if (hat != kas.a_hat && hat != CPU->cpu_current_hat) 1857 return (0); 1858 1859 /* 1860 * For a normal address, we flush a range of contiguous mappings 1861 */ 1862 if ((uintptr_t)addr != DEMAP_ALL_ADDR) { 1863 for (size_t i = 0; i < len; i += MMU_PAGESIZE) 1864 mmu_tlbflush_entry(addr + i); 1865 return (0); 1866 } 1867 1868 /* 1869 * Otherwise we reload cr3 to effect a complete TLB flush. 1870 * 1871 * A reload of cr3 on a VLP process also means we must also recopy in 1872 * the pte values from the struct hat 1873 */ 1874 if (hat->hat_flags & HAT_VLP) { 1875 #if defined(__amd64) 1876 x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes; 1877 1878 VLP_COPY(hat->hat_vlp_ptes, vlpptep); 1879 #elif defined(__i386) 1880 reload_pae32(hat, CPU); 1881 #endif 1882 } 1883 reload_cr3(); 1884 return (0); 1885 } 1886 1887 /* 1888 * Flush all TLB entries, including global (ie. kernel) ones. 1889 */ 1890 static void 1891 flush_all_tlb_entries(void) 1892 { 1893 ulong_t cr4 = getcr4(); 1894 1895 if (cr4 & CR4_PGE) { 1896 setcr4(cr4 & ~(ulong_t)CR4_PGE); 1897 setcr4(cr4); 1898 1899 /* 1900 * 32 bit PAE also needs to always reload_cr3() 1901 */ 1902 if (mmu.max_level == 2) 1903 reload_cr3(); 1904 } else { 1905 reload_cr3(); 1906 } 1907 } 1908 1909 #define TLB_CPU_HALTED (01ul) 1910 #define TLB_INVAL_ALL (02ul) 1911 #define CAS_TLB_INFO(cpu, old, new) \ 1912 atomic_cas_ulong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new)) 1913 1914 /* 1915 * Record that a CPU is going idle 1916 */ 1917 void 1918 tlb_going_idle(void) 1919 { 1920 atomic_or_ulong((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED); 1921 } 1922 1923 /* 1924 * Service a delayed TLB flush if coming out of being idle. 1925 * It will be called from cpu idle notification with interrupt disabled. 1926 */ 1927 void 1928 tlb_service(void) 1929 { 1930 ulong_t tlb_info; 1931 ulong_t found; 1932 1933 /* 1934 * We only have to do something if coming out of being idle. 1935 */ 1936 tlb_info = CPU->cpu_m.mcpu_tlb_info; 1937 if (tlb_info & TLB_CPU_HALTED) { 1938 ASSERT(CPU->cpu_current_hat == kas.a_hat); 1939 1940 /* 1941 * Atomic clear and fetch of old state. 1942 */ 1943 while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) { 1944 ASSERT(found & TLB_CPU_HALTED); 1945 tlb_info = found; 1946 SMT_PAUSE(); 1947 } 1948 if (tlb_info & TLB_INVAL_ALL) 1949 flush_all_tlb_entries(); 1950 } 1951 } 1952 #endif /* !__xpv */ 1953 1954 /* 1955 * Internal routine to do cross calls to invalidate a range of pages on 1956 * all CPUs using a given hat. 1957 */ 1958 void 1959 hat_tlb_inval_range(hat_t *hat, uintptr_t va, size_t len) 1960 { 1961 extern int flushes_require_xcalls; /* from mp_startup.c */ 1962 cpuset_t justme; 1963 cpuset_t cpus_to_shootdown; 1964 #ifndef __xpv 1965 cpuset_t check_cpus; 1966 cpu_t *cpup; 1967 int c; 1968 #endif 1969 1970 /* 1971 * If the hat is being destroyed, there are no more users, so 1972 * demap need not do anything. 1973 */ 1974 if (hat->hat_flags & HAT_FREEING) 1975 return; 1976 1977 /* 1978 * If demapping from a shared pagetable, we best demap the 1979 * entire set of user TLBs, since we don't know what addresses 1980 * these were shared at. 1981 */ 1982 if (hat->hat_flags & HAT_SHARED) { 1983 hat = kas.a_hat; 1984 va = DEMAP_ALL_ADDR; 1985 } 1986 1987 /* 1988 * if not running with multiple CPUs, don't use cross calls 1989 */ 1990 if (panicstr || !flushes_require_xcalls) { 1991 #ifdef __xpv 1992 if (va == DEMAP_ALL_ADDR) { 1993 xen_flush_tlb(); 1994 } else { 1995 for (size_t i = 0; i < len; i += MMU_PAGESIZE) 1996 xen_flush_va((caddr_t)(va + i)); 1997 } 1998 #else 1999 (void) hati_demap_func((xc_arg_t)hat, 2000 (xc_arg_t)va, (xc_arg_t)len); 2001 #endif 2002 return; 2003 } 2004 2005 2006 /* 2007 * Determine CPUs to shootdown. Kernel changes always do all CPUs. 2008 * Otherwise it's just CPUs currently executing in this hat. 2009 */ 2010 kpreempt_disable(); 2011 CPUSET_ONLY(justme, CPU->cpu_id); 2012 if (hat == kas.a_hat) 2013 cpus_to_shootdown = khat_cpuset; 2014 else 2015 cpus_to_shootdown = hat->hat_cpus; 2016 2017 #ifndef __xpv 2018 /* 2019 * If any CPUs in the set are idle, just request a delayed flush 2020 * and avoid waking them up. 2021 */ 2022 check_cpus = cpus_to_shootdown; 2023 for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) { 2024 ulong_t tlb_info; 2025 2026 if (!CPU_IN_SET(check_cpus, c)) 2027 continue; 2028 CPUSET_DEL(check_cpus, c); 2029 cpup = cpu[c]; 2030 if (cpup == NULL) 2031 continue; 2032 2033 tlb_info = cpup->cpu_m.mcpu_tlb_info; 2034 while (tlb_info == TLB_CPU_HALTED) { 2035 (void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED, 2036 TLB_CPU_HALTED | TLB_INVAL_ALL); 2037 SMT_PAUSE(); 2038 tlb_info = cpup->cpu_m.mcpu_tlb_info; 2039 } 2040 if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) { 2041 HATSTAT_INC(hs_tlb_inval_delayed); 2042 CPUSET_DEL(cpus_to_shootdown, c); 2043 } 2044 } 2045 #endif 2046 2047 if (CPUSET_ISNULL(cpus_to_shootdown) || 2048 CPUSET_ISEQUAL(cpus_to_shootdown, justme)) { 2049 2050 #ifdef __xpv 2051 if (va == DEMAP_ALL_ADDR) { 2052 xen_flush_tlb(); 2053 } else { 2054 for (size_t i = 0; i < len; i += MMU_PAGESIZE) 2055 xen_flush_va((caddr_t)(va + i)); 2056 } 2057 #else 2058 (void) hati_demap_func((xc_arg_t)hat, 2059 (xc_arg_t)va, (xc_arg_t)len); 2060 #endif 2061 2062 } else { 2063 2064 CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id); 2065 #ifdef __xpv 2066 if (va == DEMAP_ALL_ADDR) { 2067 xen_gflush_tlb(cpus_to_shootdown); 2068 } else { 2069 for (size_t i = 0; i < len; i += MMU_PAGESIZE) { 2070 xen_gflush_va((caddr_t)(va + i), 2071 cpus_to_shootdown); 2072 } 2073 } 2074 #else 2075 xc_call((xc_arg_t)hat, (xc_arg_t)va, (xc_arg_t)len, 2076 CPUSET2BV(cpus_to_shootdown), hati_demap_func); 2077 #endif 2078 2079 } 2080 kpreempt_enable(); 2081 } 2082 2083 void 2084 hat_tlb_inval(hat_t *hat, uintptr_t va) 2085 { 2086 hat_tlb_inval_range(hat, va, MMU_PAGESIZE); 2087 } 2088 2089 /* 2090 * Interior routine for HAT_UNLOADs from hat_unload_callback(), 2091 * hat_kmap_unload() OR from hat_steal() code. This routine doesn't 2092 * handle releasing of the htables. 2093 */ 2094 void 2095 hat_pte_unmap( 2096 htable_t *ht, 2097 uint_t entry, 2098 uint_t flags, 2099 x86pte_t old_pte, 2100 void *pte_ptr, 2101 boolean_t tlb) 2102 { 2103 hat_t *hat = ht->ht_hat; 2104 hment_t *hm = NULL; 2105 page_t *pp = NULL; 2106 level_t l = ht->ht_level; 2107 pfn_t pfn; 2108 2109 /* 2110 * We always track the locking counts, even if nothing is unmapped 2111 */ 2112 if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) { 2113 ASSERT(ht->ht_lock_cnt > 0); 2114 HTABLE_LOCK_DEC(ht); 2115 } 2116 2117 /* 2118 * Figure out which page's mapping list lock to acquire using the PFN 2119 * passed in "old" PTE. We then attempt to invalidate the PTE. 2120 * If another thread, probably a hat_pageunload, has asynchronously 2121 * unmapped/remapped this address we'll loop here. 2122 */ 2123 ASSERT(ht->ht_busy > 0); 2124 while (PTE_ISVALID(old_pte)) { 2125 pfn = PTE2PFN(old_pte, l); 2126 if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) { 2127 pp = NULL; 2128 } else { 2129 #ifdef __xpv 2130 if (pfn == PFN_INVALID) 2131 panic("Invalid PFN, but not PT_NOCONSIST"); 2132 #endif 2133 pp = page_numtopp_nolock(pfn); 2134 if (pp == NULL) { 2135 panic("no page_t, not NOCONSIST: old_pte=" 2136 FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx", 2137 old_pte, (uintptr_t)ht, entry, 2138 (uintptr_t)pte_ptr); 2139 } 2140 x86_hm_enter(pp); 2141 } 2142 2143 old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr, tlb); 2144 2145 /* 2146 * If the page hadn't changed we've unmapped it and can proceed 2147 */ 2148 if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn) 2149 break; 2150 2151 /* 2152 * Otherwise, we'll have to retry with the current old_pte. 2153 * Drop the hment lock, since the pfn may have changed. 2154 */ 2155 if (pp != NULL) { 2156 x86_hm_exit(pp); 2157 pp = NULL; 2158 } else { 2159 ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST); 2160 } 2161 } 2162 2163 /* 2164 * If the old mapping wasn't valid, there's nothing more to do 2165 */ 2166 if (!PTE_ISVALID(old_pte)) { 2167 if (pp != NULL) 2168 x86_hm_exit(pp); 2169 return; 2170 } 2171 2172 /* 2173 * Take care of syncing any MOD/REF bits and removing the hment. 2174 */ 2175 if (pp != NULL) { 2176 if (!(flags & HAT_UNLOAD_NOSYNC)) 2177 hati_sync_pte_to_page(pp, old_pte, l); 2178 hm = hment_remove(pp, ht, entry); 2179 x86_hm_exit(pp); 2180 if (hm != NULL) 2181 hment_free(hm); 2182 } 2183 2184 /* 2185 * Handle book keeping in the htable and hat 2186 */ 2187 ASSERT(ht->ht_valid_cnt > 0); 2188 HTABLE_DEC(ht->ht_valid_cnt); 2189 PGCNT_DEC(hat, l); 2190 } 2191 2192 /* 2193 * very cheap unload implementation to special case some kernel addresses 2194 */ 2195 static void 2196 hat_kmap_unload(caddr_t addr, size_t len, uint_t flags) 2197 { 2198 uintptr_t va = (uintptr_t)addr; 2199 uintptr_t eva = va + len; 2200 pgcnt_t pg_index; 2201 htable_t *ht; 2202 uint_t entry; 2203 x86pte_t *pte_ptr; 2204 x86pte_t old_pte; 2205 2206 for (; va < eva; va += MMU_PAGESIZE) { 2207 /* 2208 * Get the PTE 2209 */ 2210 pg_index = mmu_btop(va - mmu.kmap_addr); 2211 pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index); 2212 old_pte = GET_PTE(pte_ptr); 2213 2214 /* 2215 * get the htable / entry 2216 */ 2217 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) 2218 >> LEVEL_SHIFT(1)]; 2219 entry = htable_va2entry(va, ht); 2220 2221 /* 2222 * use mostly common code to unmap it. 2223 */ 2224 hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr, B_TRUE); 2225 } 2226 } 2227 2228 2229 /* 2230 * unload a range of virtual address space (no callback) 2231 */ 2232 void 2233 hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 2234 { 2235 uintptr_t va = (uintptr_t)addr; 2236 2237 XPV_DISALLOW_MIGRATE(); 2238 ASSERT(hat == kas.a_hat || va + len <= _userlimit); 2239 2240 /* 2241 * special case for performance. 2242 */ 2243 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) { 2244 ASSERT(hat == kas.a_hat); 2245 hat_kmap_unload(addr, len, flags); 2246 } else { 2247 hat_unload_callback(hat, addr, len, flags, NULL); 2248 } 2249 XPV_ALLOW_MIGRATE(); 2250 } 2251 2252 /* 2253 * Do the callbacks for ranges being unloaded. 2254 */ 2255 typedef struct range_info { 2256 uintptr_t rng_va; 2257 ulong_t rng_cnt; 2258 level_t rng_level; 2259 } range_info_t; 2260 2261 /* 2262 * Invalidate the TLB, and perform the callback to the upper level VM system, 2263 * for the specified ranges of contiguous pages. 2264 */ 2265 static void 2266 handle_ranges(hat_t *hat, hat_callback_t *cb, uint_t cnt, range_info_t *range) 2267 { 2268 while (cnt > 0) { 2269 size_t len; 2270 2271 --cnt; 2272 len = range[cnt].rng_cnt << LEVEL_SHIFT(range[cnt].rng_level); 2273 hat_tlb_inval_range(hat, (uintptr_t)range[cnt].rng_va, len); 2274 2275 if (cb != NULL) { 2276 cb->hcb_start_addr = (caddr_t)range[cnt].rng_va; 2277 cb->hcb_end_addr = cb->hcb_start_addr; 2278 cb->hcb_end_addr += len; 2279 cb->hcb_function(cb); 2280 } 2281 } 2282 } 2283 2284 /* 2285 * Unload a given range of addresses (has optional callback) 2286 * 2287 * Flags: 2288 * define HAT_UNLOAD 0x00 2289 * define HAT_UNLOAD_NOSYNC 0x02 2290 * define HAT_UNLOAD_UNLOCK 0x04 2291 * define HAT_UNLOAD_OTHER 0x08 - not used 2292 * define HAT_UNLOAD_UNMAP 0x10 - same as HAT_UNLOAD 2293 */ 2294 #define MAX_UNLOAD_CNT (8) 2295 void 2296 hat_unload_callback( 2297 hat_t *hat, 2298 caddr_t addr, 2299 size_t len, 2300 uint_t flags, 2301 hat_callback_t *cb) 2302 { 2303 uintptr_t vaddr = (uintptr_t)addr; 2304 uintptr_t eaddr = vaddr + len; 2305 htable_t *ht = NULL; 2306 uint_t entry; 2307 uintptr_t contig_va = (uintptr_t)-1L; 2308 range_info_t r[MAX_UNLOAD_CNT]; 2309 uint_t r_cnt = 0; 2310 x86pte_t old_pte; 2311 2312 XPV_DISALLOW_MIGRATE(); 2313 ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 2314 ASSERT(IS_PAGEALIGNED(vaddr)); 2315 ASSERT(IS_PAGEALIGNED(eaddr)); 2316 2317 /* 2318 * Special case a single page being unloaded for speed. This happens 2319 * quite frequently, COW faults after a fork() for example. 2320 */ 2321 if (cb == NULL && len == MMU_PAGESIZE) { 2322 ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0); 2323 if (ht != NULL) { 2324 if (PTE_ISVALID(old_pte)) { 2325 hat_pte_unmap(ht, entry, flags, old_pte, 2326 NULL, B_TRUE); 2327 } 2328 htable_release(ht); 2329 } 2330 XPV_ALLOW_MIGRATE(); 2331 return; 2332 } 2333 2334 while (vaddr < eaddr) { 2335 old_pte = htable_walk(hat, &ht, &vaddr, eaddr); 2336 if (ht == NULL) 2337 break; 2338 2339 ASSERT(!IN_VA_HOLE(vaddr)); 2340 2341 if (vaddr < (uintptr_t)addr) 2342 panic("hat_unload_callback(): unmap inside large page"); 2343 2344 /* 2345 * We'll do the call backs for contiguous ranges 2346 */ 2347 if (vaddr != contig_va || 2348 (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) { 2349 if (r_cnt == MAX_UNLOAD_CNT) { 2350 handle_ranges(hat, cb, r_cnt, r); 2351 r_cnt = 0; 2352 } 2353 r[r_cnt].rng_va = vaddr; 2354 r[r_cnt].rng_cnt = 0; 2355 r[r_cnt].rng_level = ht->ht_level; 2356 ++r_cnt; 2357 } 2358 2359 /* 2360 * Unload one mapping (for a single page) from the page tables. 2361 * Note that we do not remove the mapping from the TLB yet, 2362 * as indicated by the tlb=FALSE argument to hat_pte_unmap(). 2363 * handle_ranges() will clear the TLB entries with one call to 2364 * hat_tlb_inval_range() per contiguous range. This is 2365 * safe because the page can not be reused until the 2366 * callback is made (or we return). 2367 */ 2368 entry = htable_va2entry(vaddr, ht); 2369 hat_pte_unmap(ht, entry, flags, old_pte, NULL, B_FALSE); 2370 ASSERT(ht->ht_level <= mmu.max_page_level); 2371 vaddr += LEVEL_SIZE(ht->ht_level); 2372 contig_va = vaddr; 2373 ++r[r_cnt - 1].rng_cnt; 2374 } 2375 if (ht) 2376 htable_release(ht); 2377 2378 /* 2379 * handle last range for callbacks 2380 */ 2381 if (r_cnt > 0) 2382 handle_ranges(hat, cb, r_cnt, r); 2383 XPV_ALLOW_MIGRATE(); 2384 } 2385 2386 /* 2387 * Invalidate a virtual address translation on a slave CPU during 2388 * panic() dumps. 2389 */ 2390 void 2391 hat_flush_range(hat_t *hat, caddr_t va, size_t size) 2392 { 2393 ssize_t sz; 2394 caddr_t endva = va + size; 2395 2396 while (va < endva) { 2397 sz = hat_getpagesize(hat, va); 2398 if (sz < 0) { 2399 #ifdef __xpv 2400 xen_flush_tlb(); 2401 #else 2402 flush_all_tlb_entries(); 2403 #endif 2404 break; 2405 } 2406 #ifdef __xpv 2407 xen_flush_va(va); 2408 #else 2409 mmu_tlbflush_entry(va); 2410 #endif 2411 va += sz; 2412 } 2413 } 2414 2415 /* 2416 * synchronize mapping with software data structures 2417 * 2418 * This interface is currently only used by the working set monitor 2419 * driver. 2420 */ 2421 /*ARGSUSED*/ 2422 void 2423 hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 2424 { 2425 uintptr_t vaddr = (uintptr_t)addr; 2426 uintptr_t eaddr = vaddr + len; 2427 htable_t *ht = NULL; 2428 uint_t entry; 2429 x86pte_t pte; 2430 x86pte_t save_pte; 2431 x86pte_t new; 2432 page_t *pp; 2433 2434 ASSERT(!IN_VA_HOLE(vaddr)); 2435 ASSERT(IS_PAGEALIGNED(vaddr)); 2436 ASSERT(IS_PAGEALIGNED(eaddr)); 2437 ASSERT(hat == kas.a_hat || eaddr <= _userlimit); 2438 2439 XPV_DISALLOW_MIGRATE(); 2440 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { 2441 try_again: 2442 pte = htable_walk(hat, &ht, &vaddr, eaddr); 2443 if (ht == NULL) 2444 break; 2445 entry = htable_va2entry(vaddr, ht); 2446 2447 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC || 2448 PTE_GET(pte, PT_REF | PT_MOD) == 0) 2449 continue; 2450 2451 /* 2452 * We need to acquire the mapping list lock to protect 2453 * against hat_pageunload(), hat_unload(), etc. 2454 */ 2455 pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level)); 2456 if (pp == NULL) 2457 break; 2458 x86_hm_enter(pp); 2459 save_pte = pte; 2460 pte = x86pte_get(ht, entry); 2461 if (pte != save_pte) { 2462 x86_hm_exit(pp); 2463 goto try_again; 2464 } 2465 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC || 2466 PTE_GET(pte, PT_REF | PT_MOD) == 0) { 2467 x86_hm_exit(pp); 2468 continue; 2469 } 2470 2471 /* 2472 * Need to clear ref or mod bits. We may compete with 2473 * hardware updating the R/M bits and have to try again. 2474 */ 2475 if (flags == HAT_SYNC_ZERORM) { 2476 new = pte; 2477 PTE_CLR(new, PT_REF | PT_MOD); 2478 pte = hati_update_pte(ht, entry, pte, new); 2479 if (pte != 0) { 2480 x86_hm_exit(pp); 2481 goto try_again; 2482 } 2483 } else { 2484 /* 2485 * sync the PTE to the page_t 2486 */ 2487 hati_sync_pte_to_page(pp, save_pte, ht->ht_level); 2488 } 2489 x86_hm_exit(pp); 2490 } 2491 if (ht) 2492 htable_release(ht); 2493 XPV_ALLOW_MIGRATE(); 2494 } 2495 2496 /* 2497 * void hat_map(hat, addr, len, flags) 2498 */ 2499 /*ARGSUSED*/ 2500 void 2501 hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags) 2502 { 2503 /* does nothing */ 2504 } 2505 2506 /* 2507 * uint_t hat_getattr(hat, addr, *attr) 2508 * returns attr for <hat,addr> in *attr. returns 0 if there was a 2509 * mapping and *attr is valid, nonzero if there was no mapping and 2510 * *attr is not valid. 2511 */ 2512 uint_t 2513 hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr) 2514 { 2515 uintptr_t vaddr = ALIGN2PAGE(addr); 2516 htable_t *ht = NULL; 2517 x86pte_t pte; 2518 2519 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2520 2521 if (IN_VA_HOLE(vaddr)) 2522 return ((uint_t)-1); 2523 2524 ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level); 2525 if (ht == NULL) 2526 return ((uint_t)-1); 2527 2528 if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) { 2529 htable_release(ht); 2530 return ((uint_t)-1); 2531 } 2532 2533 *attr = PROT_READ; 2534 if (PTE_GET(pte, PT_WRITABLE)) 2535 *attr |= PROT_WRITE; 2536 if (PTE_GET(pte, PT_USER)) 2537 *attr |= PROT_USER; 2538 if (!PTE_GET(pte, mmu.pt_nx)) 2539 *attr |= PROT_EXEC; 2540 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC) 2541 *attr |= HAT_NOSYNC; 2542 htable_release(ht); 2543 return (0); 2544 } 2545 2546 /* 2547 * hat_updateattr() applies the given attribute change to an existing mapping 2548 */ 2549 #define HAT_LOAD_ATTR 1 2550 #define HAT_SET_ATTR 2 2551 #define HAT_CLR_ATTR 3 2552 2553 static void 2554 hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what) 2555 { 2556 uintptr_t vaddr = (uintptr_t)addr; 2557 uintptr_t eaddr = (uintptr_t)addr + len; 2558 htable_t *ht = NULL; 2559 uint_t entry; 2560 x86pte_t oldpte, newpte; 2561 page_t *pp; 2562 2563 XPV_DISALLOW_MIGRATE(); 2564 ASSERT(IS_PAGEALIGNED(vaddr)); 2565 ASSERT(IS_PAGEALIGNED(eaddr)); 2566 ASSERT(hat == kas.a_hat || 2567 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 2568 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) { 2569 try_again: 2570 oldpte = htable_walk(hat, &ht, &vaddr, eaddr); 2571 if (ht == NULL) 2572 break; 2573 if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST) 2574 continue; 2575 2576 pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level)); 2577 if (pp == NULL) 2578 continue; 2579 x86_hm_enter(pp); 2580 2581 newpte = oldpte; 2582 /* 2583 * We found a page table entry in the desired range, 2584 * figure out the new attributes. 2585 */ 2586 if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) { 2587 if ((attr & PROT_WRITE) && 2588 !PTE_GET(oldpte, PT_WRITABLE)) 2589 newpte |= PT_WRITABLE; 2590 2591 if ((attr & HAT_NOSYNC) && 2592 PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC) 2593 newpte |= PT_NOSYNC; 2594 2595 if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx)) 2596 newpte &= ~mmu.pt_nx; 2597 } 2598 2599 if (what == HAT_LOAD_ATTR) { 2600 if (!(attr & PROT_WRITE) && 2601 PTE_GET(oldpte, PT_WRITABLE)) 2602 newpte &= ~PT_WRITABLE; 2603 2604 if (!(attr & HAT_NOSYNC) && 2605 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC) 2606 newpte &= ~PT_SOFTWARE; 2607 2608 if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) 2609 newpte |= mmu.pt_nx; 2610 } 2611 2612 if (what == HAT_CLR_ATTR) { 2613 if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE)) 2614 newpte &= ~PT_WRITABLE; 2615 2616 if ((attr & HAT_NOSYNC) && 2617 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC) 2618 newpte &= ~PT_SOFTWARE; 2619 2620 if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx)) 2621 newpte |= mmu.pt_nx; 2622 } 2623 2624 /* 2625 * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set. 2626 * x86pte_set() depends on this. 2627 */ 2628 if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC) 2629 newpte |= PT_REF | PT_MOD; 2630 2631 /* 2632 * what about PROT_READ or others? this code only handles: 2633 * EXEC, WRITE, NOSYNC 2634 */ 2635 2636 /* 2637 * If new PTE really changed, update the table. 2638 */ 2639 if (newpte != oldpte) { 2640 entry = htable_va2entry(vaddr, ht); 2641 oldpte = hati_update_pte(ht, entry, oldpte, newpte); 2642 if (oldpte != 0) { 2643 x86_hm_exit(pp); 2644 goto try_again; 2645 } 2646 } 2647 x86_hm_exit(pp); 2648 } 2649 if (ht) 2650 htable_release(ht); 2651 XPV_ALLOW_MIGRATE(); 2652 } 2653 2654 /* 2655 * Various wrappers for hat_updateattr() 2656 */ 2657 void 2658 hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 2659 { 2660 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2661 hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR); 2662 } 2663 2664 void 2665 hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 2666 { 2667 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2668 hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR); 2669 } 2670 2671 void 2672 hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr) 2673 { 2674 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2675 hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR); 2676 } 2677 2678 void 2679 hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot) 2680 { 2681 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit); 2682 hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR); 2683 } 2684 2685 /* 2686 * size_t hat_getpagesize(hat, addr) 2687 * returns pagesize in bytes for <hat, addr>. returns -1 of there is 2688 * no mapping. This is an advisory call. 2689 */ 2690 ssize_t 2691 hat_getpagesize(hat_t *hat, caddr_t addr) 2692 { 2693 uintptr_t vaddr = ALIGN2PAGE(addr); 2694 htable_t *ht; 2695 size_t pagesize; 2696 2697 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2698 if (IN_VA_HOLE(vaddr)) 2699 return (-1); 2700 ht = htable_getpage(hat, vaddr, NULL); 2701 if (ht == NULL) 2702 return (-1); 2703 pagesize = LEVEL_SIZE(ht->ht_level); 2704 htable_release(ht); 2705 return (pagesize); 2706 } 2707 2708 2709 2710 /* 2711 * pfn_t hat_getpfnum(hat, addr) 2712 * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid. 2713 */ 2714 pfn_t 2715 hat_getpfnum(hat_t *hat, caddr_t addr) 2716 { 2717 uintptr_t vaddr = ALIGN2PAGE(addr); 2718 htable_t *ht; 2719 uint_t entry; 2720 pfn_t pfn = PFN_INVALID; 2721 2722 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2723 if (khat_running == 0) 2724 return (PFN_INVALID); 2725 2726 if (IN_VA_HOLE(vaddr)) 2727 return (PFN_INVALID); 2728 2729 XPV_DISALLOW_MIGRATE(); 2730 /* 2731 * A very common use of hat_getpfnum() is from the DDI for kernel pages. 2732 * Use the kmap_ptes (which also covers the 32 bit heap) to speed 2733 * this up. 2734 */ 2735 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { 2736 x86pte_t pte; 2737 pgcnt_t pg_index; 2738 2739 pg_index = mmu_btop(vaddr - mmu.kmap_addr); 2740 pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index)); 2741 if (PTE_ISVALID(pte)) 2742 /*LINTED [use of constant 0 causes a lint warning] */ 2743 pfn = PTE2PFN(pte, 0); 2744 XPV_ALLOW_MIGRATE(); 2745 return (pfn); 2746 } 2747 2748 ht = htable_getpage(hat, vaddr, &entry); 2749 if (ht == NULL) { 2750 XPV_ALLOW_MIGRATE(); 2751 return (PFN_INVALID); 2752 } 2753 ASSERT(vaddr >= ht->ht_vaddr); 2754 ASSERT(vaddr <= HTABLE_LAST_PAGE(ht)); 2755 pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level); 2756 if (ht->ht_level > 0) 2757 pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level)); 2758 htable_release(ht); 2759 XPV_ALLOW_MIGRATE(); 2760 return (pfn); 2761 } 2762 2763 /* 2764 * int hat_probe(hat, addr) 2765 * return 0 if no valid mapping is present. Faster version 2766 * of hat_getattr in certain architectures. 2767 */ 2768 int 2769 hat_probe(hat_t *hat, caddr_t addr) 2770 { 2771 uintptr_t vaddr = ALIGN2PAGE(addr); 2772 uint_t entry; 2773 htable_t *ht; 2774 pgcnt_t pg_off; 2775 2776 ASSERT(hat == kas.a_hat || vaddr <= _userlimit); 2777 ASSERT(hat == kas.a_hat || 2778 AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock)); 2779 if (IN_VA_HOLE(vaddr)) 2780 return (0); 2781 2782 /* 2783 * Most common use of hat_probe is from segmap. We special case it 2784 * for performance. 2785 */ 2786 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) { 2787 pg_off = mmu_btop(vaddr - mmu.kmap_addr); 2788 if (mmu.pae_hat) 2789 return (PTE_ISVALID(mmu.kmap_ptes[pg_off])); 2790 else 2791 return (PTE_ISVALID( 2792 ((x86pte32_t *)mmu.kmap_ptes)[pg_off])); 2793 } 2794 2795 ht = htable_getpage(hat, vaddr, &entry); 2796 htable_release(ht); 2797 return (ht != NULL); 2798 } 2799 2800 /* 2801 * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM. 2802 */ 2803 static int 2804 is_it_dism(hat_t *hat, caddr_t va) 2805 { 2806 struct seg *seg; 2807 struct shm_data *shmd; 2808 struct spt_data *sptd; 2809 2810 seg = as_findseg(hat->hat_as, va, 0); 2811 ASSERT(seg != NULL); 2812 ASSERT(seg->s_base <= va); 2813 shmd = (struct shm_data *)seg->s_data; 2814 ASSERT(shmd != NULL); 2815 sptd = (struct spt_data *)shmd->shm_sptseg->s_data; 2816 ASSERT(sptd != NULL); 2817 if (sptd->spt_flags & SHM_PAGEABLE) 2818 return (1); 2819 return (0); 2820 } 2821 2822 /* 2823 * Simple implementation of ISM. hat_share() is similar to hat_memload_array(), 2824 * except that we use the ism_hat's existing mappings to determine the pages 2825 * and protections to use for this hat. If we find a full properly aligned 2826 * and sized pagetable, we will attempt to share the pagetable itself. 2827 */ 2828 /*ARGSUSED*/ 2829 int 2830 hat_share( 2831 hat_t *hat, 2832 caddr_t addr, 2833 hat_t *ism_hat, 2834 caddr_t src_addr, 2835 size_t len, /* almost useless value, see below.. */ 2836 uint_t ismszc) 2837 { 2838 uintptr_t vaddr_start = (uintptr_t)addr; 2839 uintptr_t vaddr; 2840 uintptr_t eaddr = vaddr_start + len; 2841 uintptr_t ism_addr_start = (uintptr_t)src_addr; 2842 uintptr_t ism_addr = ism_addr_start; 2843 uintptr_t e_ism_addr = ism_addr + len; 2844 htable_t *ism_ht = NULL; 2845 htable_t *ht; 2846 x86pte_t pte; 2847 page_t *pp; 2848 pfn_t pfn; 2849 level_t l; 2850 pgcnt_t pgcnt; 2851 uint_t prot; 2852 int is_dism; 2853 int flags; 2854 2855 /* 2856 * We might be asked to share an empty DISM hat by as_dup() 2857 */ 2858 ASSERT(hat != kas.a_hat); 2859 ASSERT(eaddr <= _userlimit); 2860 if (!(ism_hat->hat_flags & HAT_SHARED)) { 2861 ASSERT(hat_get_mapped_size(ism_hat) == 0); 2862 return (0); 2863 } 2864 XPV_DISALLOW_MIGRATE(); 2865 2866 /* 2867 * The SPT segment driver often passes us a size larger than there are 2868 * valid mappings. That's because it rounds the segment size up to a 2869 * large pagesize, even if the actual memory mapped by ism_hat is less. 2870 */ 2871 ASSERT(IS_PAGEALIGNED(vaddr_start)); 2872 ASSERT(IS_PAGEALIGNED(ism_addr_start)); 2873 ASSERT(ism_hat->hat_flags & HAT_SHARED); 2874 is_dism = is_it_dism(hat, addr); 2875 while (ism_addr < e_ism_addr) { 2876 /* 2877 * use htable_walk to get the next valid ISM mapping 2878 */ 2879 pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr); 2880 if (ism_ht == NULL) 2881 break; 2882 2883 /* 2884 * First check to see if we already share the page table. 2885 */ 2886 l = ism_ht->ht_level; 2887 vaddr = vaddr_start + (ism_addr - ism_addr_start); 2888 ht = htable_lookup(hat, vaddr, l); 2889 if (ht != NULL) { 2890 if (ht->ht_flags & HTABLE_SHARED_PFN) 2891 goto shared; 2892 htable_release(ht); 2893 goto not_shared; 2894 } 2895 2896 /* 2897 * Can't ever share top table. 2898 */ 2899 if (l == mmu.max_level) 2900 goto not_shared; 2901 2902 /* 2903 * Avoid level mismatches later due to DISM faults. 2904 */ 2905 if (is_dism && l > 0) 2906 goto not_shared; 2907 2908 /* 2909 * addresses and lengths must align 2910 * table must be fully populated 2911 * no lower level page tables 2912 */ 2913 if (ism_addr != ism_ht->ht_vaddr || 2914 (vaddr & LEVEL_OFFSET(l + 1)) != 0) 2915 goto not_shared; 2916 2917 /* 2918 * The range of address space must cover a full table. 2919 */ 2920 if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1)) 2921 goto not_shared; 2922 2923 /* 2924 * All entries in the ISM page table must be leaf PTEs. 2925 */ 2926 if (l > 0) { 2927 int e; 2928 2929 /* 2930 * We know the 0th is from htable_walk() above. 2931 */ 2932 for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) { 2933 x86pte_t pte; 2934 pte = x86pte_get(ism_ht, e); 2935 if (!PTE_ISPAGE(pte, l)) 2936 goto not_shared; 2937 } 2938 } 2939 2940 /* 2941 * share the page table 2942 */ 2943 ht = htable_create(hat, vaddr, l, ism_ht); 2944 shared: 2945 ASSERT(ht->ht_flags & HTABLE_SHARED_PFN); 2946 ASSERT(ht->ht_shares == ism_ht); 2947 hat->hat_ism_pgcnt += 2948 (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) << 2949 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT); 2950 ht->ht_valid_cnt = ism_ht->ht_valid_cnt; 2951 htable_release(ht); 2952 ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1); 2953 htable_release(ism_ht); 2954 ism_ht = NULL; 2955 continue; 2956 2957 not_shared: 2958 /* 2959 * Unable to share the page table. Instead we will 2960 * create new mappings from the values in the ISM mappings. 2961 * Figure out what level size mappings to use; 2962 */ 2963 for (l = ism_ht->ht_level; l > 0; --l) { 2964 if (LEVEL_SIZE(l) <= eaddr - vaddr && 2965 (vaddr & LEVEL_OFFSET(l)) == 0) 2966 break; 2967 } 2968 2969 /* 2970 * The ISM mapping might be larger than the share area, 2971 * be careful to truncate it if needed. 2972 */ 2973 if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) { 2974 pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level)); 2975 } else { 2976 pgcnt = mmu_btop(eaddr - vaddr); 2977 l = 0; 2978 } 2979 2980 pfn = PTE2PFN(pte, ism_ht->ht_level); 2981 ASSERT(pfn != PFN_INVALID); 2982 while (pgcnt > 0) { 2983 /* 2984 * Make a new pte for the PFN for this level. 2985 * Copy protections for the pte from the ISM pte. 2986 */ 2987 pp = page_numtopp_nolock(pfn); 2988 ASSERT(pp != NULL); 2989 2990 prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK; 2991 if (PTE_GET(pte, PT_WRITABLE)) 2992 prot |= PROT_WRITE; 2993 if (!PTE_GET(pte, PT_NX)) 2994 prot |= PROT_EXEC; 2995 2996 flags = HAT_LOAD; 2997 if (!is_dism) 2998 flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST; 2999 while (hati_load_common(hat, vaddr, pp, prot, flags, 3000 l, pfn) != 0) { 3001 if (l == 0) 3002 panic("hati_load_common() failure"); 3003 --l; 3004 } 3005 3006 vaddr += LEVEL_SIZE(l); 3007 ism_addr += LEVEL_SIZE(l); 3008 pfn += mmu_btop(LEVEL_SIZE(l)); 3009 pgcnt -= mmu_btop(LEVEL_SIZE(l)); 3010 } 3011 } 3012 if (ism_ht != NULL) 3013 htable_release(ism_ht); 3014 XPV_ALLOW_MIGRATE(); 3015 return (0); 3016 } 3017 3018 3019 /* 3020 * hat_unshare() is similar to hat_unload_callback(), but 3021 * we have to look for empty shared pagetables. Note that 3022 * hat_unshare() is always invoked against an entire segment. 3023 */ 3024 /*ARGSUSED*/ 3025 void 3026 hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc) 3027 { 3028 uint64_t vaddr = (uintptr_t)addr; 3029 uintptr_t eaddr = vaddr + len; 3030 htable_t *ht = NULL; 3031 uint_t need_demaps = 0; 3032 int flags = HAT_UNLOAD_UNMAP; 3033 level_t l; 3034 3035 ASSERT(hat != kas.a_hat); 3036 ASSERT(eaddr <= _userlimit); 3037 ASSERT(IS_PAGEALIGNED(vaddr)); 3038 ASSERT(IS_PAGEALIGNED(eaddr)); 3039 XPV_DISALLOW_MIGRATE(); 3040 3041 /* 3042 * First go through and remove any shared pagetables. 3043 * 3044 * Note that it's ok to delay the TLB shootdown till the entire range is 3045 * finished, because if hat_pageunload() were to unload a shared 3046 * pagetable page, its hat_tlb_inval() will do a global TLB invalidate. 3047 */ 3048 l = mmu.max_page_level; 3049 if (l == mmu.max_level) 3050 --l; 3051 for (; l >= 0; --l) { 3052 for (vaddr = (uintptr_t)addr; vaddr < eaddr; 3053 vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) { 3054 ASSERT(!IN_VA_HOLE(vaddr)); 3055 /* 3056 * find a pagetable that maps the current address 3057 */ 3058 ht = htable_lookup(hat, vaddr, l); 3059 if (ht == NULL) 3060 continue; 3061 if (ht->ht_flags & HTABLE_SHARED_PFN) { 3062 /* 3063 * clear page count, set valid_cnt to 0, 3064 * let htable_release() finish the job 3065 */ 3066 hat->hat_ism_pgcnt -= ht->ht_valid_cnt << 3067 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT); 3068 ht->ht_valid_cnt = 0; 3069 need_demaps = 1; 3070 } 3071 htable_release(ht); 3072 } 3073 } 3074 3075 /* 3076 * flush the TLBs - since we're probably dealing with MANY mappings 3077 * we do just one CR3 reload. 3078 */ 3079 if (!(hat->hat_flags & HAT_FREEING) && need_demaps) 3080 hat_tlb_inval(hat, DEMAP_ALL_ADDR); 3081 3082 /* 3083 * Now go back and clean up any unaligned mappings that 3084 * couldn't share pagetables. 3085 */ 3086 if (!is_it_dism(hat, addr)) 3087 flags |= HAT_UNLOAD_UNLOCK; 3088 hat_unload(hat, addr, len, flags); 3089 XPV_ALLOW_MIGRATE(); 3090 } 3091 3092 3093 /* 3094 * hat_reserve() does nothing 3095 */ 3096 /*ARGSUSED*/ 3097 void 3098 hat_reserve(struct as *as, caddr_t addr, size_t len) 3099 { 3100 } 3101 3102 3103 /* 3104 * Called when all mappings to a page should have write permission removed. 3105 * Mostly stolen from hat_pagesync() 3106 */ 3107 static void 3108 hati_page_clrwrt(struct page *pp) 3109 { 3110 hment_t *hm = NULL; 3111 htable_t *ht; 3112 uint_t entry; 3113 x86pte_t old; 3114 x86pte_t new; 3115 uint_t pszc = 0; 3116 3117 XPV_DISALLOW_MIGRATE(); 3118 next_size: 3119 /* 3120 * walk thru the mapping list clearing write permission 3121 */ 3122 x86_hm_enter(pp); 3123 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) { 3124 if (ht->ht_level < pszc) 3125 continue; 3126 old = x86pte_get(ht, entry); 3127 3128 for (;;) { 3129 /* 3130 * Is this mapping of interest? 3131 */ 3132 if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum || 3133 PTE_GET(old, PT_WRITABLE) == 0) 3134 break; 3135 3136 /* 3137 * Clear ref/mod writable bits. This requires cross 3138 * calls to ensure any executing TLBs see cleared bits. 3139 */ 3140 new = old; 3141 PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE); 3142 old = hati_update_pte(ht, entry, old, new); 3143 if (old != 0) 3144 continue; 3145 3146 break; 3147 } 3148 } 3149 x86_hm_exit(pp); 3150 while (pszc < pp->p_szc) { 3151 page_t *tpp; 3152 pszc++; 3153 tpp = PP_GROUPLEADER(pp, pszc); 3154 if (pp != tpp) { 3155 pp = tpp; 3156 goto next_size; 3157 } 3158 } 3159 XPV_ALLOW_MIGRATE(); 3160 } 3161 3162 /* 3163 * void hat_page_setattr(pp, flag) 3164 * void hat_page_clrattr(pp, flag) 3165 * used to set/clr ref/mod bits. 3166 */ 3167 void 3168 hat_page_setattr(struct page *pp, uint_t flag) 3169 { 3170 vnode_t *vp = pp->p_vnode; 3171 kmutex_t *vphm = NULL; 3172 page_t **listp; 3173 int noshuffle; 3174 3175 noshuffle = flag & P_NSH; 3176 flag &= ~P_NSH; 3177 3178 if (PP_GETRM(pp, flag) == flag) 3179 return; 3180 3181 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) && 3182 !noshuffle) { 3183 vphm = page_vnode_mutex(vp); 3184 mutex_enter(vphm); 3185 } 3186 3187 PP_SETRM(pp, flag); 3188 3189 if (vphm != NULL) { 3190 3191 /* 3192 * Some File Systems examine v_pages for NULL w/o 3193 * grabbing the vphm mutex. Must not let it become NULL when 3194 * pp is the only page on the list. 3195 */ 3196 if (pp->p_vpnext != pp) { 3197 page_vpsub(&vp->v_pages, pp); 3198 if (vp->v_pages != NULL) 3199 listp = &vp->v_pages->p_vpprev->p_vpnext; 3200 else 3201 listp = &vp->v_pages; 3202 page_vpadd(listp, pp); 3203 } 3204 mutex_exit(vphm); 3205 } 3206 } 3207 3208 void 3209 hat_page_clrattr(struct page *pp, uint_t flag) 3210 { 3211 vnode_t *vp = pp->p_vnode; 3212 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 3213 3214 /* 3215 * Caller is expected to hold page's io lock for VMODSORT to work 3216 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 3217 * bit is cleared. 3218 * We don't have assert to avoid tripping some existing third party 3219 * code. The dirty page is moved back to top of the v_page list 3220 * after IO is done in pvn_write_done(). 3221 */ 3222 PP_CLRRM(pp, flag); 3223 3224 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 3225 3226 /* 3227 * VMODSORT works by removing write permissions and getting 3228 * a fault when a page is made dirty. At this point 3229 * we need to remove write permission from all mappings 3230 * to this page. 3231 */ 3232 hati_page_clrwrt(pp); 3233 } 3234 } 3235 3236 /* 3237 * If flag is specified, returns 0 if attribute is disabled 3238 * and non zero if enabled. If flag specifes multiple attributes 3239 * then returns 0 if ALL attributes are disabled. This is an advisory 3240 * call. 3241 */ 3242 uint_t 3243 hat_page_getattr(struct page *pp, uint_t flag) 3244 { 3245 return (PP_GETRM(pp, flag)); 3246 } 3247 3248 3249 /* 3250 * common code used by hat_pageunload() and hment_steal() 3251 */ 3252 hment_t * 3253 hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry) 3254 { 3255 x86pte_t old_pte; 3256 pfn_t pfn = pp->p_pagenum; 3257 hment_t *hm; 3258 3259 /* 3260 * We need to acquire a hold on the htable in order to 3261 * do the invalidate. We know the htable must exist, since 3262 * unmap's don't release the htable until after removing any 3263 * hment. Having x86_hm_enter() keeps that from proceeding. 3264 */ 3265 htable_acquire(ht); 3266 3267 /* 3268 * Invalidate the PTE and remove the hment. 3269 */ 3270 old_pte = x86pte_inval(ht, entry, 0, NULL, B_TRUE); 3271 if (PTE2PFN(old_pte, ht->ht_level) != pfn) { 3272 panic("x86pte_inval() failure found PTE = " FMT_PTE 3273 " pfn being unmapped is %lx ht=0x%lx entry=0x%x", 3274 old_pte, pfn, (uintptr_t)ht, entry); 3275 } 3276 3277 /* 3278 * Clean up all the htable information for this mapping 3279 */ 3280 ASSERT(ht->ht_valid_cnt > 0); 3281 HTABLE_DEC(ht->ht_valid_cnt); 3282 PGCNT_DEC(ht->ht_hat, ht->ht_level); 3283 3284 /* 3285 * sync ref/mod bits to the page_t 3286 */ 3287 if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC) 3288 hati_sync_pte_to_page(pp, old_pte, ht->ht_level); 3289 3290 /* 3291 * Remove the mapping list entry for this page. 3292 */ 3293 hm = hment_remove(pp, ht, entry); 3294 3295 /* 3296 * drop the mapping list lock so that we might free the 3297 * hment and htable. 3298 */ 3299 x86_hm_exit(pp); 3300 htable_release(ht); 3301 return (hm); 3302 } 3303 3304 extern int vpm_enable; 3305 /* 3306 * Unload all translations to a page. If the page is a subpage of a large 3307 * page, the large page mappings are also removed. 3308 * 3309 * The forceflags are unused. 3310 */ 3311 3312 /*ARGSUSED*/ 3313 static int 3314 hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag) 3315 { 3316 page_t *cur_pp = pp; 3317 hment_t *hm; 3318 hment_t *prev; 3319 htable_t *ht; 3320 uint_t entry; 3321 level_t level; 3322 3323 XPV_DISALLOW_MIGRATE(); 3324 3325 /* 3326 * prevent recursion due to kmem_free() 3327 */ 3328 ++curthread->t_hatdepth; 3329 ASSERT(curthread->t_hatdepth < 16); 3330 3331 #if defined(__amd64) 3332 /* 3333 * clear the vpm ref. 3334 */ 3335 if (vpm_enable) { 3336 pp->p_vpmref = 0; 3337 } 3338 #endif 3339 /* 3340 * The loop with next_size handles pages with multiple pagesize mappings 3341 */ 3342 next_size: 3343 for (;;) { 3344 3345 /* 3346 * Get a mapping list entry 3347 */ 3348 x86_hm_enter(cur_pp); 3349 for (prev = NULL; ; prev = hm) { 3350 hm = hment_walk(cur_pp, &ht, &entry, prev); 3351 if (hm == NULL) { 3352 x86_hm_exit(cur_pp); 3353 3354 /* 3355 * If not part of a larger page, we're done. 3356 */ 3357 if (cur_pp->p_szc <= pg_szcd) { 3358 ASSERT(curthread->t_hatdepth > 0); 3359 --curthread->t_hatdepth; 3360 XPV_ALLOW_MIGRATE(); 3361 return (0); 3362 } 3363 3364 /* 3365 * Else check the next larger page size. 3366 * hat_page_demote() may decrease p_szc 3367 * but that's ok we'll just take an extra 3368 * trip discover there're no larger mappings 3369 * and return. 3370 */ 3371 ++pg_szcd; 3372 cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd); 3373 goto next_size; 3374 } 3375 3376 /* 3377 * If this mapping size matches, remove it. 3378 */ 3379 level = ht->ht_level; 3380 if (level == pg_szcd) 3381 break; 3382 } 3383 3384 /* 3385 * Remove the mapping list entry for this page. 3386 * Note this does the x86_hm_exit() for us. 3387 */ 3388 hm = hati_page_unmap(cur_pp, ht, entry); 3389 if (hm != NULL) 3390 hment_free(hm); 3391 } 3392 } 3393 3394 int 3395 hat_pageunload(struct page *pp, uint_t forceflag) 3396 { 3397 ASSERT(PAGE_EXCL(pp)); 3398 return (hati_pageunload(pp, 0, forceflag)); 3399 } 3400 3401 /* 3402 * Unload all large mappings to pp and reduce by 1 p_szc field of every large 3403 * page level that included pp. 3404 * 3405 * pp must be locked EXCL. Even though no other constituent pages are locked 3406 * it's legal to unload large mappings to pp because all constituent pages of 3407 * large locked mappings have to be locked SHARED. therefore if we have EXCL 3408 * lock on one of constituent pages none of the large mappings to pp are 3409 * locked. 3410 * 3411 * Change (always decrease) p_szc field starting from the last constituent 3412 * page and ending with root constituent page so that root's pszc always shows 3413 * the area where hat_page_demote() may be active. 3414 * 3415 * This mechanism is only used for file system pages where it's not always 3416 * possible to get EXCL locks on all constituent pages to demote the size code 3417 * (as is done for anonymous or kernel large pages). 3418 */ 3419 void 3420 hat_page_demote(page_t *pp) 3421 { 3422 uint_t pszc; 3423 uint_t rszc; 3424 uint_t szc; 3425 page_t *rootpp; 3426 page_t *firstpp; 3427 page_t *lastpp; 3428 pgcnt_t pgcnt; 3429 3430 ASSERT(PAGE_EXCL(pp)); 3431 ASSERT(!PP_ISFREE(pp)); 3432 ASSERT(page_szc_lock_assert(pp)); 3433 3434 if (pp->p_szc == 0) 3435 return; 3436 3437 rootpp = PP_GROUPLEADER(pp, 1); 3438 (void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD); 3439 3440 /* 3441 * all large mappings to pp are gone 3442 * and no new can be setup since pp is locked exclusively. 3443 * 3444 * Lock the root to make sure there's only one hat_page_demote() 3445 * outstanding within the area of this root's pszc. 3446 * 3447 * Second potential hat_page_demote() is already eliminated by upper 3448 * VM layer via page_szc_lock() but we don't rely on it and use our 3449 * own locking (so that upper layer locking can be changed without 3450 * assumptions that hat depends on upper layer VM to prevent multiple 3451 * hat_page_demote() to be issued simultaneously to the same large 3452 * page). 3453 */ 3454 again: 3455 pszc = pp->p_szc; 3456 if (pszc == 0) 3457 return; 3458 rootpp = PP_GROUPLEADER(pp, pszc); 3459 x86_hm_enter(rootpp); 3460 /* 3461 * If root's p_szc is different from pszc we raced with another 3462 * hat_page_demote(). Drop the lock and try to find the root again. 3463 * If root's p_szc is greater than pszc previous hat_page_demote() is 3464 * not done yet. Take and release mlist lock of root's root to wait 3465 * for previous hat_page_demote() to complete. 3466 */ 3467 if ((rszc = rootpp->p_szc) != pszc) { 3468 x86_hm_exit(rootpp); 3469 if (rszc > pszc) { 3470 /* p_szc of a locked non free page can't increase */ 3471 ASSERT(pp != rootpp); 3472 3473 rootpp = PP_GROUPLEADER(rootpp, rszc); 3474 x86_hm_enter(rootpp); 3475 x86_hm_exit(rootpp); 3476 } 3477 goto again; 3478 } 3479 ASSERT(pp->p_szc == pszc); 3480 3481 /* 3482 * Decrement by 1 p_szc of every constituent page of a region that 3483 * covered pp. For example if original szc is 3 it gets changed to 2 3484 * everywhere except in region 2 that covered pp. Region 2 that 3485 * covered pp gets demoted to 1 everywhere except in region 1 that 3486 * covered pp. The region 1 that covered pp is demoted to region 3487 * 0. It's done this way because from region 3 we removed level 3 3488 * mappings, from region 2 that covered pp we removed level 2 mappings 3489 * and from region 1 that covered pp we removed level 1 mappings. All 3490 * changes are done from from high pfn's to low pfn's so that roots 3491 * are changed last allowing one to know the largest region where 3492 * hat_page_demote() is stil active by only looking at the root page. 3493 * 3494 * This algorithm is implemented in 2 while loops. First loop changes 3495 * p_szc of pages to the right of pp's level 1 region and second 3496 * loop changes p_szc of pages of level 1 region that covers pp 3497 * and all pages to the left of level 1 region that covers pp. 3498 * In the first loop p_szc keeps dropping with every iteration 3499 * and in the second loop it keeps increasing with every iteration. 3500 * 3501 * First loop description: Demote pages to the right of pp outside of 3502 * level 1 region that covers pp. In every iteration of the while 3503 * loop below find the last page of szc region and the first page of 3504 * (szc - 1) region that is immediately to the right of (szc - 1) 3505 * region that covers pp. From last such page to first such page 3506 * change every page's szc to szc - 1. Decrement szc and continue 3507 * looping until szc is 1. If pp belongs to the last (szc - 1) region 3508 * of szc region skip to the next iteration. 3509 */ 3510 szc = pszc; 3511 while (szc > 1) { 3512 lastpp = PP_GROUPLEADER(pp, szc); 3513 pgcnt = page_get_pagecnt(szc); 3514 lastpp += pgcnt - 1; 3515 firstpp = PP_GROUPLEADER(pp, (szc - 1)); 3516 pgcnt = page_get_pagecnt(szc - 1); 3517 if (lastpp - firstpp < pgcnt) { 3518 szc--; 3519 continue; 3520 } 3521 firstpp += pgcnt; 3522 while (lastpp != firstpp) { 3523 ASSERT(lastpp->p_szc == pszc); 3524 lastpp->p_szc = szc - 1; 3525 lastpp--; 3526 } 3527 firstpp->p_szc = szc - 1; 3528 szc--; 3529 } 3530 3531 /* 3532 * Second loop description: 3533 * First iteration changes p_szc to 0 of every 3534 * page of level 1 region that covers pp. 3535 * Subsequent iterations find last page of szc region 3536 * immediately to the left of szc region that covered pp 3537 * and first page of (szc + 1) region that covers pp. 3538 * From last to first page change p_szc of every page to szc. 3539 * Increment szc and continue looping until szc is pszc. 3540 * If pp belongs to the fist szc region of (szc + 1) region 3541 * skip to the next iteration. 3542 * 3543 */ 3544 szc = 0; 3545 while (szc < pszc) { 3546 firstpp = PP_GROUPLEADER(pp, (szc + 1)); 3547 if (szc == 0) { 3548 pgcnt = page_get_pagecnt(1); 3549 lastpp = firstpp + (pgcnt - 1); 3550 } else { 3551 lastpp = PP_GROUPLEADER(pp, szc); 3552 if (firstpp == lastpp) { 3553 szc++; 3554 continue; 3555 } 3556 lastpp--; 3557 pgcnt = page_get_pagecnt(szc); 3558 } 3559 while (lastpp != firstpp) { 3560 ASSERT(lastpp->p_szc == pszc); 3561 lastpp->p_szc = szc; 3562 lastpp--; 3563 } 3564 firstpp->p_szc = szc; 3565 if (firstpp == rootpp) 3566 break; 3567 szc++; 3568 } 3569 x86_hm_exit(rootpp); 3570 } 3571 3572 /* 3573 * get hw stats from hardware into page struct and reset hw stats 3574 * returns attributes of page 3575 * Flags for hat_pagesync, hat_getstat, hat_sync 3576 * 3577 * define HAT_SYNC_ZERORM 0x01 3578 * 3579 * Additional flags for hat_pagesync 3580 * 3581 * define HAT_SYNC_STOPON_REF 0x02 3582 * define HAT_SYNC_STOPON_MOD 0x04 3583 * define HAT_SYNC_STOPON_RM 0x06 3584 * define HAT_SYNC_STOPON_SHARED 0x08 3585 */ 3586 uint_t 3587 hat_pagesync(struct page *pp, uint_t flags) 3588 { 3589 hment_t *hm = NULL; 3590 htable_t *ht; 3591 uint_t entry; 3592 x86pte_t old, save_old; 3593 x86pte_t new; 3594 uchar_t nrmbits = P_REF|P_MOD|P_RO; 3595 extern ulong_t po_share; 3596 page_t *save_pp = pp; 3597 uint_t pszc = 0; 3598 3599 ASSERT(PAGE_LOCKED(pp) || panicstr); 3600 3601 if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD)) 3602 return (pp->p_nrm & nrmbits); 3603 3604 if ((flags & HAT_SYNC_ZERORM) == 0) { 3605 3606 if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp)) 3607 return (pp->p_nrm & nrmbits); 3608 3609 if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp)) 3610 return (pp->p_nrm & nrmbits); 3611 3612 if ((flags & HAT_SYNC_STOPON_SHARED) != 0 && 3613 hat_page_getshare(pp) > po_share) { 3614 if (PP_ISRO(pp)) 3615 PP_SETREF(pp); 3616 return (pp->p_nrm & nrmbits); 3617 } 3618 } 3619 3620 XPV_DISALLOW_MIGRATE(); 3621 next_size: 3622 /* 3623 * walk thru the mapping list syncing (and clearing) ref/mod bits. 3624 */ 3625 x86_hm_enter(pp); 3626 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) { 3627 if (ht->ht_level < pszc) 3628 continue; 3629 old = x86pte_get(ht, entry); 3630 try_again: 3631 3632 ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum); 3633 3634 if (PTE_GET(old, PT_REF | PT_MOD) == 0) 3635 continue; 3636 3637 save_old = old; 3638 if ((flags & HAT_SYNC_ZERORM) != 0) { 3639 3640 /* 3641 * Need to clear ref or mod bits. Need to demap 3642 * to make sure any executing TLBs see cleared bits. 3643 */ 3644 new = old; 3645 PTE_CLR(new, PT_REF | PT_MOD); 3646 old = hati_update_pte(ht, entry, old, new); 3647 if (old != 0) 3648 goto try_again; 3649 3650 old = save_old; 3651 } 3652 3653 /* 3654 * Sync the PTE 3655 */ 3656 if (!(flags & HAT_SYNC_ZERORM) && 3657 PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC) 3658 hati_sync_pte_to_page(pp, old, ht->ht_level); 3659 3660 /* 3661 * can stop short if we found a ref'd or mod'd page 3662 */ 3663 if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) || 3664 (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) { 3665 x86_hm_exit(pp); 3666 goto done; 3667 } 3668 } 3669 x86_hm_exit(pp); 3670 while (pszc < pp->p_szc) { 3671 page_t *tpp; 3672 pszc++; 3673 tpp = PP_GROUPLEADER(pp, pszc); 3674 if (pp != tpp) { 3675 pp = tpp; 3676 goto next_size; 3677 } 3678 } 3679 done: 3680 XPV_ALLOW_MIGRATE(); 3681 return (save_pp->p_nrm & nrmbits); 3682 } 3683 3684 /* 3685 * returns approx number of mappings to this pp. A return of 0 implies 3686 * there are no mappings to the page. 3687 */ 3688 ulong_t 3689 hat_page_getshare(page_t *pp) 3690 { 3691 uint_t cnt; 3692 cnt = hment_mapcnt(pp); 3693 #if defined(__amd64) 3694 if (vpm_enable && pp->p_vpmref) { 3695 cnt += 1; 3696 } 3697 #endif 3698 return (cnt); 3699 } 3700 3701 /* 3702 * Return 1 the number of mappings exceeds sh_thresh. Return 0 3703 * otherwise. 3704 */ 3705 int 3706 hat_page_checkshare(page_t *pp, ulong_t sh_thresh) 3707 { 3708 return (hat_page_getshare(pp) > sh_thresh); 3709 } 3710 3711 /* 3712 * hat_softlock isn't supported anymore 3713 */ 3714 /*ARGSUSED*/ 3715 faultcode_t 3716 hat_softlock( 3717 hat_t *hat, 3718 caddr_t addr, 3719 size_t *len, 3720 struct page **page_array, 3721 uint_t flags) 3722 { 3723 return (FC_NOSUPPORT); 3724 } 3725 3726 3727 3728 /* 3729 * Routine to expose supported HAT features to platform independent code. 3730 */ 3731 /*ARGSUSED*/ 3732 int 3733 hat_supported(enum hat_features feature, void *arg) 3734 { 3735 switch (feature) { 3736 3737 case HAT_SHARED_PT: /* this is really ISM */ 3738 return (1); 3739 3740 case HAT_DYNAMIC_ISM_UNMAP: 3741 return (0); 3742 3743 case HAT_VMODSORT: 3744 return (1); 3745 3746 case HAT_SHARED_REGIONS: 3747 return (0); 3748 3749 default: 3750 panic("hat_supported() - unknown feature"); 3751 } 3752 return (0); 3753 } 3754 3755 /* 3756 * Called when a thread is exiting and has been switched to the kernel AS 3757 */ 3758 void 3759 hat_thread_exit(kthread_t *thd) 3760 { 3761 ASSERT(thd->t_procp->p_as == &kas); 3762 XPV_DISALLOW_MIGRATE(); 3763 hat_switch(thd->t_procp->p_as->a_hat); 3764 XPV_ALLOW_MIGRATE(); 3765 } 3766 3767 /* 3768 * Setup the given brand new hat structure as the new HAT on this cpu's mmu. 3769 */ 3770 /*ARGSUSED*/ 3771 void 3772 hat_setup(hat_t *hat, int flags) 3773 { 3774 XPV_DISALLOW_MIGRATE(); 3775 kpreempt_disable(); 3776 3777 hat_switch(hat); 3778 3779 kpreempt_enable(); 3780 XPV_ALLOW_MIGRATE(); 3781 } 3782 3783 /* 3784 * Prepare for a CPU private mapping for the given address. 3785 * 3786 * The address can only be used from a single CPU and can be remapped 3787 * using hat_mempte_remap(). Return the address of the PTE. 3788 * 3789 * We do the htable_create() if necessary and increment the valid count so 3790 * the htable can't disappear. We also hat_devload() the page table into 3791 * kernel so that the PTE is quickly accessed. 3792 */ 3793 hat_mempte_t 3794 hat_mempte_setup(caddr_t addr) 3795 { 3796 uintptr_t va = (uintptr_t)addr; 3797 htable_t *ht; 3798 uint_t entry; 3799 x86pte_t oldpte; 3800 hat_mempte_t p; 3801 3802 ASSERT(IS_PAGEALIGNED(va)); 3803 ASSERT(!IN_VA_HOLE(va)); 3804 ++curthread->t_hatdepth; 3805 XPV_DISALLOW_MIGRATE(); 3806 ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0); 3807 if (ht == NULL) { 3808 ht = htable_create(kas.a_hat, va, 0, NULL); 3809 entry = htable_va2entry(va, ht); 3810 ASSERT(ht->ht_level == 0); 3811 oldpte = x86pte_get(ht, entry); 3812 } 3813 if (PTE_ISVALID(oldpte)) 3814 panic("hat_mempte_setup(): address already mapped" 3815 "ht=%p, entry=%d, pte=" FMT_PTE, (void *)ht, entry, oldpte); 3816 3817 /* 3818 * increment ht_valid_cnt so that the pagetable can't disappear 3819 */ 3820 HTABLE_INC(ht->ht_valid_cnt); 3821 3822 /* 3823 * return the PTE physical address to the caller. 3824 */ 3825 htable_release(ht); 3826 XPV_ALLOW_MIGRATE(); 3827 p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry); 3828 --curthread->t_hatdepth; 3829 return (p); 3830 } 3831 3832 /* 3833 * Release a CPU private mapping for the given address. 3834 * We decrement the htable valid count so it might be destroyed. 3835 */ 3836 /*ARGSUSED1*/ 3837 void 3838 hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa) 3839 { 3840 htable_t *ht; 3841 3842 XPV_DISALLOW_MIGRATE(); 3843 /* 3844 * invalidate any left over mapping and decrement the htable valid count 3845 */ 3846 #ifdef __xpv 3847 if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0, 3848 UVMF_INVLPG | UVMF_LOCAL)) 3849 panic("HYPERVISOR_update_va_mapping() failed"); 3850 #else 3851 { 3852 x86pte_t *pteptr; 3853 3854 pteptr = x86pte_mapin(mmu_btop(pte_pa), 3855 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); 3856 if (mmu.pae_hat) 3857 *pteptr = 0; 3858 else 3859 *(x86pte32_t *)pteptr = 0; 3860 mmu_tlbflush_entry(addr); 3861 x86pte_mapout(); 3862 } 3863 #endif 3864 3865 ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0); 3866 if (ht == NULL) 3867 panic("hat_mempte_release(): invalid address"); 3868 ASSERT(ht->ht_level == 0); 3869 HTABLE_DEC(ht->ht_valid_cnt); 3870 htable_release(ht); 3871 XPV_ALLOW_MIGRATE(); 3872 } 3873 3874 /* 3875 * Apply a temporary CPU private mapping to a page. We flush the TLB only 3876 * on this CPU, so this ought to have been called with preemption disabled. 3877 */ 3878 void 3879 hat_mempte_remap( 3880 pfn_t pfn, 3881 caddr_t addr, 3882 hat_mempte_t pte_pa, 3883 uint_t attr, 3884 uint_t flags) 3885 { 3886 uintptr_t va = (uintptr_t)addr; 3887 x86pte_t pte; 3888 3889 /* 3890 * Remap the given PTE to the new page's PFN. Invalidate only 3891 * on this CPU. 3892 */ 3893 #ifdef DEBUG 3894 htable_t *ht; 3895 uint_t entry; 3896 3897 ASSERT(IS_PAGEALIGNED(va)); 3898 ASSERT(!IN_VA_HOLE(va)); 3899 ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0); 3900 ASSERT(ht != NULL); 3901 ASSERT(ht->ht_level == 0); 3902 ASSERT(ht->ht_valid_cnt > 0); 3903 ASSERT(ht->ht_pfn == mmu_btop(pte_pa)); 3904 htable_release(ht); 3905 #endif 3906 XPV_DISALLOW_MIGRATE(); 3907 pte = hati_mkpte(pfn, attr, 0, flags); 3908 #ifdef __xpv 3909 if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL)) 3910 panic("HYPERVISOR_update_va_mapping() failed"); 3911 #else 3912 { 3913 x86pte_t *pteptr; 3914 3915 pteptr = x86pte_mapin(mmu_btop(pte_pa), 3916 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL); 3917 if (mmu.pae_hat) 3918 *(x86pte_t *)pteptr = pte; 3919 else 3920 *(x86pte32_t *)pteptr = (x86pte32_t)pte; 3921 mmu_tlbflush_entry(addr); 3922 x86pte_mapout(); 3923 } 3924 #endif 3925 XPV_ALLOW_MIGRATE(); 3926 } 3927 3928 3929 3930 /* 3931 * Hat locking functions 3932 * XXX - these two functions are currently being used by hatstats 3933 * they can be removed by using a per-as mutex for hatstats. 3934 */ 3935 void 3936 hat_enter(hat_t *hat) 3937 { 3938 mutex_enter(&hat->hat_mutex); 3939 } 3940 3941 void 3942 hat_exit(hat_t *hat) 3943 { 3944 mutex_exit(&hat->hat_mutex); 3945 } 3946 3947 /* 3948 * HAT part of cpu initialization. 3949 */ 3950 void 3951 hat_cpu_online(struct cpu *cpup) 3952 { 3953 if (cpup != CPU) { 3954 x86pte_cpu_init(cpup); 3955 hat_vlp_setup(cpup); 3956 } 3957 CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id); 3958 } 3959 3960 /* 3961 * HAT part of cpu deletion. 3962 * (currently, we only call this after the cpu is safely passivated.) 3963 */ 3964 void 3965 hat_cpu_offline(struct cpu *cpup) 3966 { 3967 ASSERT(cpup != CPU); 3968 3969 CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id); 3970 hat_vlp_teardown(cpup); 3971 x86pte_cpu_fini(cpup); 3972 } 3973 3974 /* 3975 * Function called after all CPUs are brought online. 3976 * Used to remove low address boot mappings. 3977 */ 3978 void 3979 clear_boot_mappings(uintptr_t low, uintptr_t high) 3980 { 3981 uintptr_t vaddr = low; 3982 htable_t *ht = NULL; 3983 level_t level; 3984 uint_t entry; 3985 x86pte_t pte; 3986 3987 /* 3988 * On 1st CPU we can unload the prom mappings, basically we blow away 3989 * all virtual mappings under _userlimit. 3990 */ 3991 while (vaddr < high) { 3992 pte = htable_walk(kas.a_hat, &ht, &vaddr, high); 3993 if (ht == NULL) 3994 break; 3995 3996 level = ht->ht_level; 3997 entry = htable_va2entry(vaddr, ht); 3998 ASSERT(level <= mmu.max_page_level); 3999 ASSERT(PTE_ISPAGE(pte, level)); 4000 4001 /* 4002 * Unload the mapping from the page tables. 4003 */ 4004 (void) x86pte_inval(ht, entry, 0, NULL, B_TRUE); 4005 ASSERT(ht->ht_valid_cnt > 0); 4006 HTABLE_DEC(ht->ht_valid_cnt); 4007 PGCNT_DEC(ht->ht_hat, ht->ht_level); 4008 4009 vaddr += LEVEL_SIZE(ht->ht_level); 4010 } 4011 if (ht) 4012 htable_release(ht); 4013 } 4014 4015 /* 4016 * Atomically update a new translation for a single page. If the 4017 * currently installed PTE doesn't match the value we expect to find, 4018 * it's not updated and we return the PTE we found. 4019 * 4020 * If activating nosync or NOWRITE and the page was modified we need to sync 4021 * with the page_t. Also sync with page_t if clearing ref/mod bits. 4022 */ 4023 static x86pte_t 4024 hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new) 4025 { 4026 page_t *pp; 4027 uint_t rm = 0; 4028 x86pte_t replaced; 4029 4030 if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC && 4031 PTE_GET(expected, PT_MOD | PT_REF) && 4032 (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) || 4033 !PTE_GET(new, PT_MOD | PT_REF))) { 4034 4035 ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level))); 4036 pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level)); 4037 ASSERT(pp != NULL); 4038 if (PTE_GET(expected, PT_MOD)) 4039 rm |= P_MOD; 4040 if (PTE_GET(expected, PT_REF)) 4041 rm |= P_REF; 4042 PTE_CLR(new, PT_MOD | PT_REF); 4043 } 4044 4045 replaced = x86pte_update(ht, entry, expected, new); 4046 if (replaced != expected) 4047 return (replaced); 4048 4049 if (rm) { 4050 /* 4051 * sync to all constituent pages of a large page 4052 */ 4053 pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level); 4054 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt)); 4055 while (pgcnt-- > 0) { 4056 /* 4057 * hat_page_demote() can't decrease 4058 * pszc below this mapping size 4059 * since large mapping existed after we 4060 * took mlist lock. 4061 */ 4062 ASSERT(pp->p_szc >= ht->ht_level); 4063 hat_page_setattr(pp, rm); 4064 ++pp; 4065 } 4066 } 4067 4068 return (0); 4069 } 4070 4071 /* ARGSUSED */ 4072 void 4073 hat_join_srd(struct hat *hat, vnode_t *evp) 4074 { 4075 } 4076 4077 /* ARGSUSED */ 4078 hat_region_cookie_t 4079 hat_join_region(struct hat *hat, 4080 caddr_t r_saddr, 4081 size_t r_size, 4082 void *r_obj, 4083 u_offset_t r_objoff, 4084 uchar_t r_perm, 4085 uchar_t r_pgszc, 4086 hat_rgn_cb_func_t r_cb_function, 4087 uint_t flags) 4088 { 4089 panic("No shared region support on x86"); 4090 return (HAT_INVALID_REGION_COOKIE); 4091 } 4092 4093 /* ARGSUSED */ 4094 void 4095 hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags) 4096 { 4097 panic("No shared region support on x86"); 4098 } 4099 4100 /* ARGSUSED */ 4101 void 4102 hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie) 4103 { 4104 panic("No shared region support on x86"); 4105 } 4106 4107 4108 /* 4109 * Kernel Physical Mapping (kpm) facility 4110 * 4111 * Most of the routines needed to support segkpm are almost no-ops on the 4112 * x86 platform. We map in the entire segment when it is created and leave 4113 * it mapped in, so there is no additional work required to set up and tear 4114 * down individual mappings. All of these routines were created to support 4115 * SPARC platforms that have to avoid aliasing in their virtually indexed 4116 * caches. 4117 * 4118 * Most of the routines have sanity checks in them (e.g. verifying that the 4119 * passed-in page is locked). We don't actually care about most of these 4120 * checks on x86, but we leave them in place to identify problems in the 4121 * upper levels. 4122 */ 4123 4124 /* 4125 * Map in a locked page and return the vaddr. 4126 */ 4127 /*ARGSUSED*/ 4128 caddr_t 4129 hat_kpm_mapin(struct page *pp, struct kpme *kpme) 4130 { 4131 caddr_t vaddr; 4132 4133 #ifdef DEBUG 4134 if (kpm_enable == 0) { 4135 cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n"); 4136 return ((caddr_t)NULL); 4137 } 4138 4139 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 4140 cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n"); 4141 return ((caddr_t)NULL); 4142 } 4143 #endif 4144 4145 vaddr = hat_kpm_page2va(pp, 1); 4146 4147 return (vaddr); 4148 } 4149 4150 /* 4151 * Mapout a locked page. 4152 */ 4153 /*ARGSUSED*/ 4154 void 4155 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr) 4156 { 4157 #ifdef DEBUG 4158 if (kpm_enable == 0) { 4159 cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n"); 4160 return; 4161 } 4162 4163 if (IS_KPM_ADDR(vaddr) == 0) { 4164 cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n"); 4165 return; 4166 } 4167 4168 if (pp == NULL || PAGE_LOCKED(pp) == 0) { 4169 cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n"); 4170 return; 4171 } 4172 #endif 4173 } 4174 4175 /* 4176 * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical 4177 * memory addresses that are not described by a page_t. It can 4178 * also be used for normal pages that are not locked, but beware 4179 * this is dangerous - no locking is performed, so the identity of 4180 * the page could change. hat_kpm_mapin_pfn is not supported when 4181 * vac_colors > 1, because the chosen va depends on the page identity, 4182 * which could change. 4183 * The caller must only pass pfn's for valid physical addresses; violation 4184 * of this rule will cause panic. 4185 */ 4186 caddr_t 4187 hat_kpm_mapin_pfn(pfn_t pfn) 4188 { 4189 caddr_t paddr, vaddr; 4190 4191 if (kpm_enable == 0) 4192 return ((caddr_t)NULL); 4193 4194 paddr = (caddr_t)ptob(pfn); 4195 vaddr = (uintptr_t)kpm_vbase + paddr; 4196 4197 return ((caddr_t)vaddr); 4198 } 4199 4200 /*ARGSUSED*/ 4201 void 4202 hat_kpm_mapout_pfn(pfn_t pfn) 4203 { 4204 /* empty */ 4205 } 4206 4207 /* 4208 * Return the kpm virtual address for a specific pfn 4209 */ 4210 caddr_t 4211 hat_kpm_pfn2va(pfn_t pfn) 4212 { 4213 uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn); 4214 4215 ASSERT(!pfn_is_foreign(pfn)); 4216 return ((caddr_t)vaddr); 4217 } 4218 4219 /* 4220 * Return the kpm virtual address for the page at pp. 4221 */ 4222 /*ARGSUSED*/ 4223 caddr_t 4224 hat_kpm_page2va(struct page *pp, int checkswap) 4225 { 4226 return (hat_kpm_pfn2va(pp->p_pagenum)); 4227 } 4228 4229 /* 4230 * Return the page frame number for the kpm virtual address vaddr. 4231 */ 4232 pfn_t 4233 hat_kpm_va2pfn(caddr_t vaddr) 4234 { 4235 pfn_t pfn; 4236 4237 ASSERT(IS_KPM_ADDR(vaddr)); 4238 4239 pfn = (pfn_t)btop(vaddr - kpm_vbase); 4240 4241 return (pfn); 4242 } 4243 4244 4245 /* 4246 * Return the page for the kpm virtual address vaddr. 4247 */ 4248 page_t * 4249 hat_kpm_vaddr2page(caddr_t vaddr) 4250 { 4251 pfn_t pfn; 4252 4253 ASSERT(IS_KPM_ADDR(vaddr)); 4254 4255 pfn = hat_kpm_va2pfn(vaddr); 4256 4257 return (page_numtopp_nolock(pfn)); 4258 } 4259 4260 /* 4261 * hat_kpm_fault is called from segkpm_fault when we take a page fault on a 4262 * KPM page. This should never happen on x86 4263 */ 4264 int 4265 hat_kpm_fault(hat_t *hat, caddr_t vaddr) 4266 { 4267 panic("pagefault in seg_kpm. hat: 0x%p vaddr: 0x%p", 4268 (void *)hat, (void *)vaddr); 4269 4270 return (0); 4271 } 4272 4273 /*ARGSUSED*/ 4274 void 4275 hat_kpm_mseghash_clear(int nentries) 4276 {} 4277 4278 /*ARGSUSED*/ 4279 void 4280 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp) 4281 {} 4282 4283 #ifndef __xpv 4284 void 4285 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs, 4286 offset_t kpm_pages_off) 4287 { 4288 _NOTE(ARGUNUSED(nkpmpgs, kpm_pages_off)); 4289 pfn_t base, end; 4290 4291 /* 4292 * kphysm_add_memory_dynamic() does not set nkpmpgs 4293 * when page_t memory is externally allocated. That 4294 * code must properly calculate nkpmpgs in all cases 4295 * if nkpmpgs needs to be used at some point. 4296 */ 4297 4298 /* 4299 * The meta (page_t) pages for dynamically added memory are allocated 4300 * either from the incoming memory itself or from existing memory. 4301 * In the former case the base of the incoming pages will be different 4302 * than the base of the dynamic segment so call memseg_get_start() to 4303 * get the actual base of the incoming memory for each case. 4304 */ 4305 4306 base = memseg_get_start(msp); 4307 end = msp->pages_end; 4308 4309 hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base), 4310 mmu_ptob(end - base), base, PROT_READ | PROT_WRITE, 4311 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST); 4312 } 4313 4314 void 4315 hat_kpm_addmem_mseg_insert(struct memseg *msp) 4316 { 4317 _NOTE(ARGUNUSED(msp)); 4318 } 4319 4320 void 4321 hat_kpm_addmem_memsegs_update(struct memseg *msp) 4322 { 4323 _NOTE(ARGUNUSED(msp)); 4324 } 4325 4326 /* 4327 * Return end of metadata for an already setup memseg. 4328 * X86 platforms don't need per-page meta data to support kpm. 4329 */ 4330 caddr_t 4331 hat_kpm_mseg_reuse(struct memseg *msp) 4332 { 4333 return ((caddr_t)msp->epages); 4334 } 4335 4336 void 4337 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp) 4338 { 4339 _NOTE(ARGUNUSED(msp, mspp)); 4340 ASSERT(0); 4341 } 4342 4343 void 4344 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp, 4345 struct memseg *lo, struct memseg *mid, struct memseg *hi) 4346 { 4347 _NOTE(ARGUNUSED(msp, mspp, lo, mid, hi)); 4348 ASSERT(0); 4349 } 4350 4351 /* 4352 * Walk the memsegs chain, applying func to each memseg span. 4353 */ 4354 void 4355 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg) 4356 { 4357 pfn_t pbase, pend; 4358 void *base; 4359 size_t size; 4360 struct memseg *msp; 4361 4362 for (msp = memsegs; msp; msp = msp->next) { 4363 pbase = msp->pages_base; 4364 pend = msp->pages_end; 4365 base = ptob(pbase) + kpm_vbase; 4366 size = ptob(pend - pbase); 4367 func(arg, base, size); 4368 } 4369 } 4370 4371 #else /* __xpv */ 4372 4373 /* 4374 * There are specific Hypervisor calls to establish and remove mappings 4375 * to grant table references and the privcmd driver. We have to ensure 4376 * that a page table actually exists. 4377 */ 4378 void 4379 hat_prepare_mapping(hat_t *hat, caddr_t addr, uint64_t *pte_ma) 4380 { 4381 maddr_t base_ma; 4382 htable_t *ht; 4383 uint_t entry; 4384 4385 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE)); 4386 XPV_DISALLOW_MIGRATE(); 4387 ht = htable_create(hat, (uintptr_t)addr, 0, NULL); 4388 4389 /* 4390 * if an address for pte_ma is passed in, return the MA of the pte 4391 * for this specific address. This address is only valid as long 4392 * as the htable stays locked. 4393 */ 4394 if (pte_ma != NULL) { 4395 entry = htable_va2entry((uintptr_t)addr, ht); 4396 base_ma = pa_to_ma(ptob(ht->ht_pfn)); 4397 *pte_ma = base_ma + (entry << mmu.pte_size_shift); 4398 } 4399 XPV_ALLOW_MIGRATE(); 4400 } 4401 4402 void 4403 hat_release_mapping(hat_t *hat, caddr_t addr) 4404 { 4405 htable_t *ht; 4406 4407 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE)); 4408 XPV_DISALLOW_MIGRATE(); 4409 ht = htable_lookup(hat, (uintptr_t)addr, 0); 4410 ASSERT(ht != NULL); 4411 ASSERT(ht->ht_busy >= 2); 4412 htable_release(ht); 4413 htable_release(ht); 4414 XPV_ALLOW_MIGRATE(); 4415 } 4416 #endif /* __xpv */