1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 /* 25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 26 */ 27 28 /* 29 * VM - Hardware Address Translation management for Spitfire MMU. 30 * 31 * This file implements the machine specific hardware translation 32 * needed by the VM system. The machine independent interface is 33 * described in <vm/hat.h> while the machine dependent interface 34 * and data structures are described in <vm/hat_sfmmu.h>. 35 * 36 * The hat layer manages the address translation hardware as a cache 37 * driven by calls from the higher levels in the VM system. 38 */ 39 40 #include <sys/types.h> 41 #include <sys/kstat.h> 42 #include <vm/hat.h> 43 #include <vm/hat_sfmmu.h> 44 #include <vm/page.h> 45 #include <sys/pte.h> 46 #include <sys/systm.h> 47 #include <sys/mman.h> 48 #include <sys/sysmacros.h> 49 #include <sys/machparam.h> 50 #include <sys/vtrace.h> 51 #include <sys/kmem.h> 52 #include <sys/mmu.h> 53 #include <sys/cmn_err.h> 54 #include <sys/cpu.h> 55 #include <sys/cpuvar.h> 56 #include <sys/debug.h> 57 #include <sys/lgrp.h> 58 #include <sys/archsystm.h> 59 #include <sys/machsystm.h> 60 #include <sys/vmsystm.h> 61 #include <vm/as.h> 62 #include <vm/seg.h> 63 #include <vm/seg_kp.h> 64 #include <vm/seg_kmem.h> 65 #include <vm/seg_kpm.h> 66 #include <vm/rm.h> 67 #include <sys/t_lock.h> 68 #include <sys/obpdefs.h> 69 #include <sys/vm_machparam.h> 70 #include <sys/var.h> 71 #include <sys/trap.h> 72 #include <sys/machtrap.h> 73 #include <sys/scb.h> 74 #include <sys/bitmap.h> 75 #include <sys/machlock.h> 76 #include <sys/membar.h> 77 #include <sys/atomic.h> 78 #include <sys/cpu_module.h> 79 #include <sys/prom_debug.h> 80 #include <sys/ksynch.h> 81 #include <sys/mem_config.h> 82 #include <sys/mem_cage.h> 83 #include <vm/vm_dep.h> 84 #include <sys/fpu/fpusystm.h> 85 #include <vm/mach_kpm.h> 86 #include <sys/callb.h> 87 88 #ifdef DEBUG 89 #define SFMMU_VALIDATE_HMERID(hat, rid, saddr, len) \ 90 if (SFMMU_IS_SHMERID_VALID(rid)) { \ 91 caddr_t _eaddr = (saddr) + (len); \ 92 sf_srd_t *_srdp; \ 93 sf_region_t *_rgnp; \ 94 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \ 95 ASSERT(SF_RGNMAP_TEST(hat->sfmmu_hmeregion_map, rid)); \ 96 ASSERT((hat) != ksfmmup); \ 97 _srdp = (hat)->sfmmu_srdp; \ 98 ASSERT(_srdp != NULL); \ 99 ASSERT(_srdp->srd_refcnt != 0); \ 100 _rgnp = _srdp->srd_hmergnp[(rid)]; \ 101 ASSERT(_rgnp != NULL && _rgnp->rgn_id == rid); \ 102 ASSERT(_rgnp->rgn_refcnt != 0); \ 103 ASSERT(!(_rgnp->rgn_flags & SFMMU_REGION_FREE)); \ 104 ASSERT((_rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == \ 105 SFMMU_REGION_HME); \ 106 ASSERT((saddr) >= _rgnp->rgn_saddr); \ 107 ASSERT((saddr) < _rgnp->rgn_saddr + _rgnp->rgn_size); \ 108 ASSERT(_eaddr > _rgnp->rgn_saddr); \ 109 ASSERT(_eaddr <= _rgnp->rgn_saddr + _rgnp->rgn_size); \ 110 } 111 112 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) \ 113 { \ 114 caddr_t _hsva; \ 115 caddr_t _heva; \ 116 caddr_t _rsva; \ 117 caddr_t _reva; \ 118 int _ttesz = get_hblk_ttesz(hmeblkp); \ 119 int _flagtte; \ 120 ASSERT((srdp)->srd_refcnt != 0); \ 121 ASSERT((rid) < SFMMU_MAX_HME_REGIONS); \ 122 ASSERT((rgnp)->rgn_id == rid); \ 123 ASSERT(!((rgnp)->rgn_flags & SFMMU_REGION_FREE)); \ 124 ASSERT(((rgnp)->rgn_flags & SFMMU_REGION_TYPE_MASK) == \ 125 SFMMU_REGION_HME); \ 126 ASSERT(_ttesz <= (rgnp)->rgn_pgszc); \ 127 _hsva = (caddr_t)get_hblk_base(hmeblkp); \ 128 _heva = get_hblk_endaddr(hmeblkp); \ 129 _rsva = (caddr_t)P2ALIGN( \ 130 (uintptr_t)(rgnp)->rgn_saddr, HBLK_MIN_BYTES); \ 131 _reva = (caddr_t)P2ROUNDUP( \ 132 (uintptr_t)((rgnp)->rgn_saddr + (rgnp)->rgn_size), \ 133 HBLK_MIN_BYTES); \ 134 ASSERT(_hsva >= _rsva); \ 135 ASSERT(_hsva < _reva); \ 136 ASSERT(_heva > _rsva); \ 137 ASSERT(_heva <= _reva); \ 138 _flagtte = (_ttesz < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : \ 139 _ttesz; \ 140 ASSERT(rgnp->rgn_hmeflags & (0x1 << _flagtte)); \ 141 } 142 143 #else /* DEBUG */ 144 #define SFMMU_VALIDATE_HMERID(hat, rid, addr, len) 145 #define SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid) 146 #endif /* DEBUG */ 147 148 #if defined(SF_ERRATA_57) 149 extern caddr_t errata57_limit; 150 #endif 151 152 #define HME8BLK_SZ_RND ((roundup(HME8BLK_SZ, sizeof (int64_t))) / \ 153 (sizeof (int64_t))) 154 #define HBLK_RESERVE ((struct hme_blk *)hblk_reserve) 155 156 #define HBLK_RESERVE_CNT 128 157 #define HBLK_RESERVE_MIN 20 158 159 static struct hme_blk *freehblkp; 160 static kmutex_t freehblkp_lock; 161 static int freehblkcnt; 162 163 static int64_t hblk_reserve[HME8BLK_SZ_RND]; 164 static kmutex_t hblk_reserve_lock; 165 static kthread_t *hblk_reserve_thread; 166 167 static nucleus_hblk8_info_t nucleus_hblk8; 168 static nucleus_hblk1_info_t nucleus_hblk1; 169 170 /* 171 * Data to manage per-cpu hmeblk pending queues, hmeblks are queued here 172 * after the initial phase of removing an hmeblk from the hash chain, see 173 * the detailed comment in sfmmu_hblk_hash_rm() for further details. 174 */ 175 static cpu_hme_pend_t *cpu_hme_pend; 176 static uint_t cpu_hme_pend_thresh; 177 /* 178 * SFMMU specific hat functions 179 */ 180 void hat_pagecachectl(struct page *, int); 181 182 /* flags for hat_pagecachectl */ 183 #define HAT_CACHE 0x1 184 #define HAT_UNCACHE 0x2 185 #define HAT_TMPNC 0x4 186 187 /* 188 * Flag to allow the creation of non-cacheable translations 189 * to system memory. It is off by default. At the moment this 190 * flag is used by the ecache error injector. The error injector 191 * will turn it on when creating such a translation then shut it 192 * off when it's finished. 193 */ 194 195 int sfmmu_allow_nc_trans = 0; 196 197 /* 198 * Flag to disable large page support. 199 * value of 1 => disable all large pages. 200 * bits 1, 2, and 3 are to disable 64K, 512K and 4M pages respectively. 201 * 202 * For example, use the value 0x4 to disable 512K pages. 203 * 204 */ 205 #define LARGE_PAGES_OFF 0x1 206 207 /* 208 * The disable_large_pages and disable_ism_large_pages variables control 209 * hat_memload_array and the page sizes to be used by ISM and the kernel. 210 * 211 * The disable_auto_data_large_pages and disable_auto_text_large_pages variables 212 * are only used to control which OOB pages to use at upper VM segment creation 213 * time, and are set in hat_init_pagesizes and used in the map_pgsz* routines. 214 * Their values may come from platform or CPU specific code to disable page 215 * sizes that should not be used. 216 * 217 * WARNING: 512K pages are currently not supported for ISM/DISM. 218 */ 219 uint_t disable_large_pages = 0; 220 uint_t disable_ism_large_pages = (1 << TTE512K); 221 uint_t disable_auto_data_large_pages = 0; 222 uint_t disable_auto_text_large_pages = 0; 223 224 /* 225 * Private sfmmu data structures for hat management 226 */ 227 static struct kmem_cache *sfmmuid_cache; 228 static struct kmem_cache *mmuctxdom_cache; 229 230 /* 231 * Private sfmmu data structures for tsb management 232 */ 233 static struct kmem_cache *sfmmu_tsbinfo_cache; 234 static struct kmem_cache *sfmmu_tsb8k_cache; 235 static struct kmem_cache *sfmmu_tsb_cache[NLGRPS_MAX]; 236 static vmem_t *kmem_bigtsb_arena; 237 static vmem_t *kmem_tsb_arena; 238 239 /* 240 * sfmmu static variables for hmeblk resource management. 241 */ 242 static vmem_t *hat_memload1_arena; /* HAT translation arena for sfmmu1_cache */ 243 static struct kmem_cache *sfmmu8_cache; 244 static struct kmem_cache *sfmmu1_cache; 245 static struct kmem_cache *pa_hment_cache; 246 247 static kmutex_t ism_mlist_lock; /* mutex for ism mapping list */ 248 /* 249 * private data for ism 250 */ 251 static struct kmem_cache *ism_blk_cache; 252 static struct kmem_cache *ism_ment_cache; 253 #define ISMID_STARTADDR NULL 254 255 /* 256 * Region management data structures and function declarations. 257 */ 258 259 static void sfmmu_leave_srd(sfmmu_t *); 260 static int sfmmu_srdcache_constructor(void *, void *, int); 261 static void sfmmu_srdcache_destructor(void *, void *); 262 static int sfmmu_rgncache_constructor(void *, void *, int); 263 static void sfmmu_rgncache_destructor(void *, void *); 264 static int sfrgnmap_isnull(sf_region_map_t *); 265 static int sfhmergnmap_isnull(sf_hmeregion_map_t *); 266 static int sfmmu_scdcache_constructor(void *, void *, int); 267 static void sfmmu_scdcache_destructor(void *, void *); 268 static void sfmmu_rgn_cb_noop(caddr_t, caddr_t, caddr_t, 269 size_t, void *, u_offset_t); 270 271 static uint_t srd_hashmask = SFMMU_MAX_SRD_BUCKETS - 1; 272 static sf_srd_bucket_t *srd_buckets; 273 static struct kmem_cache *srd_cache; 274 static uint_t srd_rgn_hashmask = SFMMU_MAX_REGION_BUCKETS - 1; 275 static struct kmem_cache *region_cache; 276 static struct kmem_cache *scd_cache; 277 278 #ifdef sun4v 279 int use_bigtsb_arena = 1; 280 #else 281 int use_bigtsb_arena = 0; 282 #endif 283 284 /* External /etc/system tunable, for turning on&off the shctx support */ 285 int disable_shctx = 0; 286 /* Internal variable, set by MD if the HW supports shctx feature */ 287 int shctx_on = 0; 288 289 #ifdef DEBUG 290 static void check_scd_sfmmu_list(sfmmu_t **, sfmmu_t *, int); 291 #endif 292 static void sfmmu_to_scd_list(sfmmu_t **, sfmmu_t *); 293 static void sfmmu_from_scd_list(sfmmu_t **, sfmmu_t *); 294 295 static sf_scd_t *sfmmu_alloc_scd(sf_srd_t *, sf_region_map_t *); 296 static void sfmmu_find_scd(sfmmu_t *); 297 static void sfmmu_join_scd(sf_scd_t *, sfmmu_t *); 298 static void sfmmu_finish_join_scd(sfmmu_t *); 299 static void sfmmu_leave_scd(sfmmu_t *, uchar_t); 300 static void sfmmu_destroy_scd(sf_srd_t *, sf_scd_t *, sf_region_map_t *); 301 static int sfmmu_alloc_scd_tsbs(sf_srd_t *, sf_scd_t *); 302 static void sfmmu_free_scd_tsbs(sfmmu_t *); 303 static void sfmmu_tsb_inv_ctx(sfmmu_t *); 304 static int find_ism_rid(sfmmu_t *, sfmmu_t *, caddr_t, uint_t *); 305 static void sfmmu_ism_hatflags(sfmmu_t *, int); 306 static int sfmmu_srd_lock_held(sf_srd_t *); 307 static void sfmmu_remove_scd(sf_scd_t **, sf_scd_t *); 308 static void sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *); 309 static void sfmmu_link_scd_to_regions(sf_srd_t *, sf_scd_t *); 310 static void sfmmu_unlink_scd_from_regions(sf_srd_t *, sf_scd_t *); 311 static void sfmmu_link_to_hmeregion(sfmmu_t *, sf_region_t *); 312 static void sfmmu_unlink_from_hmeregion(sfmmu_t *, sf_region_t *); 313 314 /* 315 * ``hat_lock'' is a hashed mutex lock for protecting sfmmu TSB lists, 316 * HAT flags, synchronizing TLB/TSB coherency, and context management. 317 * The lock is hashed on the sfmmup since the case where we need to lock 318 * all processes is rare but does occur (e.g. we need to unload a shared 319 * mapping from all processes using the mapping). We have a lot of buckets, 320 * and each slab of sfmmu_t's can use about a quarter of them, giving us 321 * a fairly good distribution without wasting too much space and overhead 322 * when we have to grab them all. 323 */ 324 #define SFMMU_NUM_LOCK 128 /* must be power of two */ 325 hatlock_t hat_lock[SFMMU_NUM_LOCK]; 326 327 /* 328 * Hash algorithm optimized for a small number of slabs. 329 * 7 is (highbit((sizeof sfmmu_t)) - 1) 330 * This hash algorithm is based upon the knowledge that sfmmu_t's come from a 331 * kmem_cache, and thus they will be sequential within that cache. In 332 * addition, each new slab will have a different "color" up to cache_maxcolor 333 * which will skew the hashing for each successive slab which is allocated. 334 * If the size of sfmmu_t changed to a larger size, this algorithm may need 335 * to be revisited. 336 */ 337 #define TSB_HASH_SHIFT_BITS (7) 338 #define PTR_HASH(x) ((uintptr_t)x >> TSB_HASH_SHIFT_BITS) 339 340 #ifdef DEBUG 341 int tsb_hash_debug = 0; 342 #define TSB_HASH(sfmmup) \ 343 (tsb_hash_debug ? &hat_lock[0] : \ 344 &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)]) 345 #else /* DEBUG */ 346 #define TSB_HASH(sfmmup) &hat_lock[PTR_HASH(sfmmup) & (SFMMU_NUM_LOCK-1)] 347 #endif /* DEBUG */ 348 349 350 /* sfmmu_replace_tsb() return codes. */ 351 typedef enum tsb_replace_rc { 352 TSB_SUCCESS, 353 TSB_ALLOCFAIL, 354 TSB_LOSTRACE, 355 TSB_ALREADY_SWAPPED, 356 TSB_CANTGROW 357 } tsb_replace_rc_t; 358 359 /* 360 * Flags for TSB allocation routines. 361 */ 362 #define TSB_ALLOC 0x01 363 #define TSB_FORCEALLOC 0x02 364 #define TSB_GROW 0x04 365 #define TSB_SHRINK 0x08 366 #define TSB_SWAPIN 0x10 367 368 /* 369 * Support for HAT callbacks. 370 */ 371 #define SFMMU_MAX_RELOC_CALLBACKS 10 372 int sfmmu_max_cb_id = SFMMU_MAX_RELOC_CALLBACKS; 373 static id_t sfmmu_cb_nextid = 0; 374 static id_t sfmmu_tsb_cb_id; 375 struct sfmmu_callback *sfmmu_cb_table; 376 377 kmutex_t kpr_mutex; 378 kmutex_t kpr_suspendlock; 379 kthread_t *kreloc_thread; 380 381 /* 382 * Enable VA->PA translation sanity checking on DEBUG kernels. 383 * Disabled by default. This is incompatible with some 384 * drivers (error injector, RSM) so if it breaks you get 385 * to keep both pieces. 386 */ 387 int hat_check_vtop = 0; 388 389 /* 390 * Private sfmmu routines (prototypes) 391 */ 392 static struct hme_blk *sfmmu_shadow_hcreate(sfmmu_t *, caddr_t, int, uint_t); 393 static struct hme_blk *sfmmu_hblk_alloc(sfmmu_t *, caddr_t, 394 struct hmehash_bucket *, uint_t, hmeblk_tag, uint_t, 395 uint_t); 396 static caddr_t sfmmu_hblk_unload(struct hat *, struct hme_blk *, caddr_t, 397 caddr_t, demap_range_t *, uint_t); 398 static caddr_t sfmmu_hblk_sync(struct hat *, struct hme_blk *, caddr_t, 399 caddr_t, int); 400 static void sfmmu_hblk_free(struct hme_blk **); 401 static void sfmmu_hblks_list_purge(struct hme_blk **, int); 402 static uint_t sfmmu_get_free_hblk(struct hme_blk **, uint_t); 403 static uint_t sfmmu_put_free_hblk(struct hme_blk *, uint_t); 404 static struct hme_blk *sfmmu_hblk_steal(int); 405 static int sfmmu_steal_this_hblk(struct hmehash_bucket *, 406 struct hme_blk *, uint64_t, struct hme_blk *); 407 static caddr_t sfmmu_hblk_unlock(struct hme_blk *, caddr_t, caddr_t); 408 409 static void hat_do_memload_array(struct hat *, caddr_t, size_t, 410 struct page **, uint_t, uint_t, uint_t); 411 static void hat_do_memload(struct hat *, caddr_t, struct page *, 412 uint_t, uint_t, uint_t); 413 static void sfmmu_memload_batchsmall(struct hat *, caddr_t, page_t **, 414 uint_t, uint_t, pgcnt_t, uint_t); 415 void sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *, 416 uint_t); 417 static int sfmmu_tteload_array(sfmmu_t *, tte_t *, caddr_t, page_t **, 418 uint_t, uint_t); 419 static struct hmehash_bucket *sfmmu_tteload_acquire_hashbucket(sfmmu_t *, 420 caddr_t, int, uint_t); 421 static struct hme_blk *sfmmu_tteload_find_hmeblk(sfmmu_t *, 422 struct hmehash_bucket *, caddr_t, uint_t, uint_t, 423 uint_t); 424 static int sfmmu_tteload_addentry(sfmmu_t *, struct hme_blk *, tte_t *, 425 caddr_t, page_t **, uint_t, uint_t); 426 static void sfmmu_tteload_release_hashbucket(struct hmehash_bucket *); 427 428 static int sfmmu_pagearray_setup(caddr_t, page_t **, tte_t *, int); 429 static pfn_t sfmmu_uvatopfn(caddr_t, sfmmu_t *, tte_t *); 430 void sfmmu_memtte(tte_t *, pfn_t, uint_t, int); 431 #ifdef VAC 432 static void sfmmu_vac_conflict(struct hat *, caddr_t, page_t *); 433 static int sfmmu_vacconflict_array(caddr_t, page_t *, int *); 434 int tst_tnc(page_t *pp, pgcnt_t); 435 void conv_tnc(page_t *pp, int); 436 #endif 437 438 static void sfmmu_get_ctx(sfmmu_t *); 439 static void sfmmu_free_sfmmu(sfmmu_t *); 440 441 static void sfmmu_ttesync(struct hat *, caddr_t, tte_t *, page_t *); 442 static void sfmmu_chgattr(struct hat *, caddr_t, size_t, uint_t, int); 443 444 cpuset_t sfmmu_pageunload(page_t *, struct sf_hment *, int); 445 static void hat_pagereload(struct page *, struct page *); 446 static cpuset_t sfmmu_pagesync(page_t *, struct sf_hment *, uint_t); 447 #ifdef VAC 448 void sfmmu_page_cache_array(page_t *, int, int, pgcnt_t); 449 static void sfmmu_page_cache(page_t *, int, int, int); 450 #endif 451 452 cpuset_t sfmmu_rgntlb_demap(caddr_t, sf_region_t *, 453 struct hme_blk *, int); 454 static void sfmmu_tlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 455 pfn_t, int, int, int, int); 456 static void sfmmu_ismtlbcache_demap(caddr_t, sfmmu_t *, struct hme_blk *, 457 pfn_t, int); 458 static void sfmmu_tlb_demap(caddr_t, sfmmu_t *, struct hme_blk *, int, int); 459 static void sfmmu_tlb_range_demap(demap_range_t *); 460 static void sfmmu_invalidate_ctx(sfmmu_t *); 461 static void sfmmu_sync_mmustate(sfmmu_t *); 462 463 static void sfmmu_tsbinfo_setup_phys(struct tsb_info *, pfn_t); 464 static int sfmmu_tsbinfo_alloc(struct tsb_info **, int, int, uint_t, 465 sfmmu_t *); 466 static void sfmmu_tsb_free(struct tsb_info *); 467 static void sfmmu_tsbinfo_free(struct tsb_info *); 468 static int sfmmu_init_tsbinfo(struct tsb_info *, int, int, uint_t, 469 sfmmu_t *); 470 static void sfmmu_tsb_chk_reloc(sfmmu_t *, hatlock_t *); 471 static void sfmmu_tsb_swapin(sfmmu_t *, hatlock_t *); 472 static int sfmmu_select_tsb_szc(pgcnt_t); 473 static void sfmmu_mod_tsb(sfmmu_t *, caddr_t, tte_t *, int); 474 #define sfmmu_load_tsb(sfmmup, vaddr, tte, szc) \ 475 sfmmu_mod_tsb(sfmmup, vaddr, tte, szc) 476 #define sfmmu_unload_tsb(sfmmup, vaddr, szc) \ 477 sfmmu_mod_tsb(sfmmup, vaddr, NULL, szc) 478 static void sfmmu_copy_tsb(struct tsb_info *, struct tsb_info *); 479 static tsb_replace_rc_t sfmmu_replace_tsb(sfmmu_t *, struct tsb_info *, uint_t, 480 hatlock_t *, uint_t); 481 static void sfmmu_size_tsb(sfmmu_t *, int, uint64_t, uint64_t, int); 482 483 #ifdef VAC 484 void sfmmu_cache_flush(pfn_t, int); 485 void sfmmu_cache_flushcolor(int, pfn_t); 486 #endif 487 static caddr_t sfmmu_hblk_chgattr(sfmmu_t *, struct hme_blk *, caddr_t, 488 caddr_t, demap_range_t *, uint_t, int); 489 490 static uint64_t sfmmu_vtop_attr(uint_t, int mode, tte_t *); 491 static uint_t sfmmu_ptov_attr(tte_t *); 492 static caddr_t sfmmu_hblk_chgprot(sfmmu_t *, struct hme_blk *, caddr_t, 493 caddr_t, demap_range_t *, uint_t); 494 static uint_t sfmmu_vtop_prot(uint_t, uint_t *); 495 static int sfmmu_idcache_constructor(void *, void *, int); 496 static void sfmmu_idcache_destructor(void *, void *); 497 static int sfmmu_hblkcache_constructor(void *, void *, int); 498 static void sfmmu_hblkcache_destructor(void *, void *); 499 static void sfmmu_hblkcache_reclaim(void *); 500 static void sfmmu_shadow_hcleanup(sfmmu_t *, struct hme_blk *, 501 struct hmehash_bucket *); 502 static void sfmmu_hblk_hash_rm(struct hmehash_bucket *, struct hme_blk *, 503 struct hme_blk *, struct hme_blk **, int); 504 static void sfmmu_hblk_hash_add(struct hmehash_bucket *, struct hme_blk *, 505 uint64_t); 506 static struct hme_blk *sfmmu_check_pending_hblks(int); 507 static void sfmmu_free_hblks(sfmmu_t *, caddr_t, caddr_t, int); 508 static void sfmmu_cleanup_rhblk(sf_srd_t *, caddr_t, uint_t, int); 509 static void sfmmu_unload_hmeregion_va(sf_srd_t *, uint_t, caddr_t, caddr_t, 510 int, caddr_t *); 511 static void sfmmu_unload_hmeregion(sf_srd_t *, sf_region_t *); 512 513 static void sfmmu_rm_large_mappings(page_t *, int); 514 515 static void hat_lock_init(void); 516 static void hat_kstat_init(void); 517 static int sfmmu_kstat_percpu_update(kstat_t *ksp, int rw); 518 static void sfmmu_set_scd_rttecnt(sf_srd_t *, sf_scd_t *); 519 static int sfmmu_is_rgnva(sf_srd_t *, caddr_t, ulong_t, ulong_t); 520 static void sfmmu_check_page_sizes(sfmmu_t *, int); 521 int fnd_mapping_sz(page_t *); 522 static void iment_add(struct ism_ment *, struct hat *); 523 static void iment_sub(struct ism_ment *, struct hat *); 524 static pgcnt_t ism_tsb_entries(sfmmu_t *, int szc); 525 extern void sfmmu_setup_tsbinfo(sfmmu_t *); 526 extern void sfmmu_clear_utsbinfo(void); 527 528 static void sfmmu_ctx_wrap_around(mmu_ctx_t *, boolean_t); 529 530 extern int vpm_enable; 531 532 /* kpm globals */ 533 #ifdef DEBUG 534 /* 535 * Enable trap level tsbmiss handling 536 */ 537 int kpm_tsbmtl = 1; 538 539 /* 540 * Flush the TLB on kpm mapout. Note: Xcalls are used (again) for the 541 * required TLB shootdowns in this case, so handle w/ care. Off by default. 542 */ 543 int kpm_tlb_flush; 544 #endif /* DEBUG */ 545 546 static void *sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *, size_t, int); 547 548 #ifdef DEBUG 549 static void sfmmu_check_hblk_flist(); 550 #endif 551 552 /* 553 * Semi-private sfmmu data structures. Some of them are initialize in 554 * startup or in hat_init. Some of them are private but accessed by 555 * assembly code or mach_sfmmu.c 556 */ 557 struct hmehash_bucket *uhme_hash; /* user hmeblk hash table */ 558 struct hmehash_bucket *khme_hash; /* kernel hmeblk hash table */ 559 uint64_t uhme_hash_pa; /* PA of uhme_hash */ 560 uint64_t khme_hash_pa; /* PA of khme_hash */ 561 int uhmehash_num; /* # of buckets in user hash table */ 562 int khmehash_num; /* # of buckets in kernel hash table */ 563 564 uint_t max_mmu_ctxdoms = 0; /* max context domains in the system */ 565 mmu_ctx_t **mmu_ctxs_tbl; /* global array of context domains */ 566 uint64_t mmu_saved_gnum = 0; /* to init incoming MMUs' gnums */ 567 568 #define DEFAULT_NUM_CTXS_PER_MMU 8192 569 static uint_t nctxs = DEFAULT_NUM_CTXS_PER_MMU; 570 571 int cache; /* describes system cache */ 572 573 caddr_t ktsb_base; /* kernel 8k-indexed tsb base address */ 574 uint64_t ktsb_pbase; /* kernel 8k-indexed tsb phys address */ 575 int ktsb_szcode; /* kernel 8k-indexed tsb size code */ 576 int ktsb_sz; /* kernel 8k-indexed tsb size */ 577 578 caddr_t ktsb4m_base; /* kernel 4m-indexed tsb base address */ 579 uint64_t ktsb4m_pbase; /* kernel 4m-indexed tsb phys address */ 580 int ktsb4m_szcode; /* kernel 4m-indexed tsb size code */ 581 int ktsb4m_sz; /* kernel 4m-indexed tsb size */ 582 583 uint64_t kpm_tsbbase; /* kernel seg_kpm 4M TSB base address */ 584 int kpm_tsbsz; /* kernel seg_kpm 4M TSB size code */ 585 uint64_t kpmsm_tsbbase; /* kernel seg_kpm 8K TSB base address */ 586 int kpmsm_tsbsz; /* kernel seg_kpm 8K TSB size code */ 587 588 #ifndef sun4v 589 int utsb_dtlb_ttenum = -1; /* index in TLB for utsb locked TTE */ 590 int utsb4m_dtlb_ttenum = -1; /* index in TLB for 4M TSB TTE */ 591 int dtlb_resv_ttenum; /* index in TLB of first reserved TTE */ 592 caddr_t utsb_vabase; /* reserved kernel virtual memory */ 593 caddr_t utsb4m_vabase; /* for trap handler TSB accesses */ 594 #endif /* sun4v */ 595 uint64_t tsb_alloc_bytes = 0; /* bytes allocated to TSBs */ 596 vmem_t *kmem_tsb_default_arena[NLGRPS_MAX]; /* For dynamic TSBs */ 597 vmem_t *kmem_bigtsb_default_arena[NLGRPS_MAX]; /* dynamic 256M TSBs */ 598 599 /* 600 * Size to use for TSB slabs. Future platforms that support page sizes 601 * larger than 4M may wish to change these values, and provide their own 602 * assembly macros for building and decoding the TSB base register contents. 603 * Note disable_large_pages will override the value set here. 604 */ 605 static uint_t tsb_slab_ttesz = TTE4M; 606 size_t tsb_slab_size = MMU_PAGESIZE4M; 607 uint_t tsb_slab_shift = MMU_PAGESHIFT4M; 608 /* PFN mask for TTE */ 609 size_t tsb_slab_mask = MMU_PAGEOFFSET4M >> MMU_PAGESHIFT; 610 611 /* 612 * Size to use for TSB slabs. These are used only when 256M tsb arenas 613 * exist. 614 */ 615 static uint_t bigtsb_slab_ttesz = TTE256M; 616 static size_t bigtsb_slab_size = MMU_PAGESIZE256M; 617 static uint_t bigtsb_slab_shift = MMU_PAGESHIFT256M; 618 /* 256M page alignment for 8K pfn */ 619 static size_t bigtsb_slab_mask = MMU_PAGEOFFSET256M >> MMU_PAGESHIFT; 620 621 /* largest TSB size to grow to, will be smaller on smaller memory systems */ 622 static int tsb_max_growsize = 0; 623 624 /* 625 * Tunable parameters dealing with TSB policies. 626 */ 627 628 /* 629 * This undocumented tunable forces all 8K TSBs to be allocated from 630 * the kernel heap rather than from the kmem_tsb_default_arena arenas. 631 */ 632 #ifdef DEBUG 633 int tsb_forceheap = 0; 634 #endif /* DEBUG */ 635 636 /* 637 * Decide whether to use per-lgroup arenas, or one global set of 638 * TSB arenas. The default is not to break up per-lgroup, since 639 * most platforms don't recognize any tangible benefit from it. 640 */ 641 int tsb_lgrp_affinity = 0; 642 643 /* 644 * Used for growing the TSB based on the process RSS. 645 * tsb_rss_factor is based on the smallest TSB, and is 646 * shifted by the TSB size to determine if we need to grow. 647 * The default will grow the TSB if the number of TTEs for 648 * this page size exceeds 75% of the number of TSB entries, 649 * which should _almost_ eliminate all conflict misses 650 * (at the expense of using up lots and lots of memory). 651 */ 652 #define TSB_RSS_FACTOR (TSB_ENTRIES(TSB_MIN_SZCODE) * 0.75) 653 #define SFMMU_RSS_TSBSIZE(tsbszc) (tsb_rss_factor << tsbszc) 654 #define SELECT_TSB_SIZECODE(pgcnt) ( \ 655 (enable_tsb_rss_sizing)? sfmmu_select_tsb_szc(pgcnt) : \ 656 default_tsb_size) 657 #define TSB_OK_SHRINK() \ 658 (tsb_alloc_bytes > tsb_alloc_hiwater || freemem < desfree) 659 #define TSB_OK_GROW() \ 660 (tsb_alloc_bytes < tsb_alloc_hiwater && freemem > desfree) 661 662 int enable_tsb_rss_sizing = 1; 663 int tsb_rss_factor = (int)TSB_RSS_FACTOR; 664 665 /* which TSB size code to use for new address spaces or if rss sizing off */ 666 int default_tsb_size = TSB_8K_SZCODE; 667 668 static uint64_t tsb_alloc_hiwater; /* limit TSB reserved memory */ 669 uint64_t tsb_alloc_hiwater_factor; /* tsb_alloc_hiwater = physmem / this */ 670 #define TSB_ALLOC_HIWATER_FACTOR_DEFAULT 32 671 672 #ifdef DEBUG 673 static int tsb_random_size = 0; /* set to 1 to test random tsb sizes on alloc */ 674 static int tsb_grow_stress = 0; /* if set to 1, keep replacing TSB w/ random */ 675 static int tsb_alloc_mtbf = 0; /* fail allocation every n attempts */ 676 static int tsb_alloc_fail_mtbf = 0; 677 static int tsb_alloc_count = 0; 678 #endif /* DEBUG */ 679 680 /* if set to 1, will remap valid TTEs when growing TSB. */ 681 int tsb_remap_ttes = 1; 682 683 /* 684 * If we have more than this many mappings, allocate a second TSB. 685 * This default is chosen because the I/D fully associative TLBs are 686 * assumed to have at least 8 available entries. Platforms with a 687 * larger fully-associative TLB could probably override the default. 688 */ 689 690 #ifdef sun4v 691 int tsb_sectsb_threshold = 0; 692 #else 693 int tsb_sectsb_threshold = 8; 694 #endif 695 696 /* 697 * kstat data 698 */ 699 struct sfmmu_global_stat sfmmu_global_stat; 700 struct sfmmu_tsbsize_stat sfmmu_tsbsize_stat; 701 702 /* 703 * Global data 704 */ 705 sfmmu_t *ksfmmup; /* kernel's hat id */ 706 707 #ifdef DEBUG 708 static void chk_tte(tte_t *, tte_t *, tte_t *, struct hme_blk *); 709 #endif 710 711 /* sfmmu locking operations */ 712 static kmutex_t *sfmmu_mlspl_enter(struct page *, int); 713 static int sfmmu_mlspl_held(struct page *, int); 714 715 kmutex_t *sfmmu_page_enter(page_t *); 716 void sfmmu_page_exit(kmutex_t *); 717 int sfmmu_page_spl_held(struct page *); 718 719 /* sfmmu internal locking operations - accessed directly */ 720 static void sfmmu_mlist_reloc_enter(page_t *, page_t *, 721 kmutex_t **, kmutex_t **); 722 static void sfmmu_mlist_reloc_exit(kmutex_t *, kmutex_t *); 723 static hatlock_t * 724 sfmmu_hat_enter(sfmmu_t *); 725 static hatlock_t * 726 sfmmu_hat_tryenter(sfmmu_t *); 727 static void sfmmu_hat_exit(hatlock_t *); 728 static void sfmmu_hat_lock_all(void); 729 static void sfmmu_hat_unlock_all(void); 730 static void sfmmu_ismhat_enter(sfmmu_t *, int); 731 static void sfmmu_ismhat_exit(sfmmu_t *, int); 732 733 kpm_hlk_t *kpmp_table; 734 uint_t kpmp_table_sz; /* must be a power of 2 */ 735 uchar_t kpmp_shift; 736 737 kpm_shlk_t *kpmp_stable; 738 uint_t kpmp_stable_sz; /* must be a power of 2 */ 739 740 /* 741 * SPL_TABLE_SIZE is 2 * NCPU, but no smaller than 128. 742 * SPL_SHIFT is log2(SPL_TABLE_SIZE). 743 */ 744 #if ((2*NCPU_P2) > 128) 745 #define SPL_SHIFT ((unsigned)(NCPU_LOG2 + 1)) 746 #else 747 #define SPL_SHIFT 7U 748 #endif 749 #define SPL_TABLE_SIZE (1U << SPL_SHIFT) 750 #define SPL_MASK (SPL_TABLE_SIZE - 1) 751 752 /* 753 * We shift by PP_SHIFT to take care of the low-order 0 bits of a page_t 754 * and by multiples of SPL_SHIFT to get as many varied bits as we can. 755 */ 756 #define SPL_INDEX(pp) \ 757 ((((uintptr_t)(pp) >> PP_SHIFT) ^ \ 758 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT)) ^ \ 759 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 2)) ^ \ 760 ((uintptr_t)(pp) >> (PP_SHIFT + SPL_SHIFT * 3))) & \ 761 SPL_MASK) 762 763 #define SPL_HASH(pp) \ 764 (&sfmmu_page_lock[SPL_INDEX(pp)].pad_mutex) 765 766 static pad_mutex_t sfmmu_page_lock[SPL_TABLE_SIZE]; 767 768 /* Array of mutexes protecting a page's mapping list and p_nrm field. */ 769 770 #define MML_TABLE_SIZE SPL_TABLE_SIZE 771 #define MLIST_HASH(pp) (&mml_table[SPL_INDEX(pp)].pad_mutex) 772 773 static pad_mutex_t mml_table[MML_TABLE_SIZE]; 774 775 /* 776 * hat_unload_callback() will group together callbacks in order 777 * to avoid xt_sync() calls. This is the maximum size of the group. 778 */ 779 #define MAX_CB_ADDR 32 780 781 tte_t hw_tte; 782 static ulong_t sfmmu_dmr_maxbit = DMR_MAXBIT; 783 784 static char *mmu_ctx_kstat_names[] = { 785 "mmu_ctx_tsb_exceptions", 786 "mmu_ctx_tsb_raise_exception", 787 "mmu_ctx_wrap_around", 788 }; 789 790 /* 791 * Wrapper for vmem_xalloc since vmem_create only allows limited 792 * parameters for vm_source_alloc functions. This function allows us 793 * to specify alignment consistent with the size of the object being 794 * allocated. 795 */ 796 static void * 797 sfmmu_vmem_xalloc_aligned_wrapper(vmem_t *vmp, size_t size, int vmflag) 798 { 799 return (vmem_xalloc(vmp, size, size, 0, 0, NULL, NULL, vmflag)); 800 } 801 802 /* Common code for setting tsb_alloc_hiwater. */ 803 #define SFMMU_SET_TSB_ALLOC_HIWATER(pages) tsb_alloc_hiwater = \ 804 ptob(pages) / tsb_alloc_hiwater_factor 805 806 /* 807 * Set tsb_max_growsize to allow at most all of physical memory to be mapped by 808 * a single TSB. physmem is the number of physical pages so we need physmem 8K 809 * TTEs to represent all those physical pages. We round this up by using 810 * 1<<highbit(). To figure out which size code to use, remember that the size 811 * code is just an amount to shift the smallest TSB size to get the size of 812 * this TSB. So we subtract that size, TSB_START_SIZE, from highbit() (or 813 * highbit() - 1) to get the size code for the smallest TSB that can represent 814 * all of physical memory, while erring on the side of too much. 815 * 816 * Restrict tsb_max_growsize to make sure that: 817 * 1) TSBs can't grow larger than the TSB slab size 818 * 2) TSBs can't grow larger than UTSB_MAX_SZCODE. 819 */ 820 #define SFMMU_SET_TSB_MAX_GROWSIZE(pages) { \ 821 int _i, _szc, _slabszc, _tsbszc; \ 822 \ 823 _i = highbit(pages); \ 824 if ((1 << (_i - 1)) == (pages)) \ 825 _i--; /* 2^n case, round down */ \ 826 _szc = _i - TSB_START_SIZE; \ 827 _slabszc = bigtsb_slab_shift - (TSB_START_SIZE + TSB_ENTRY_SHIFT); \ 828 _tsbszc = MIN(_szc, _slabszc); \ 829 tsb_max_growsize = MIN(_tsbszc, UTSB_MAX_SZCODE); \ 830 } 831 832 /* 833 * Given a pointer to an sfmmu and a TTE size code, return a pointer to the 834 * tsb_info which handles that TTE size. 835 */ 836 #define SFMMU_GET_TSBINFO(tsbinfop, sfmmup, tte_szc) { \ 837 (tsbinfop) = (sfmmup)->sfmmu_tsb; \ 838 ASSERT(((tsbinfop)->tsb_flags & TSB_SHAREDCTX) || \ 839 sfmmu_hat_lock_held(sfmmup)); \ 840 if ((tte_szc) >= TTE4M) { \ 841 ASSERT((tsbinfop) != NULL); \ 842 (tsbinfop) = (tsbinfop)->tsb_next; \ 843 } \ 844 } 845 846 /* 847 * Macro to use to unload entries from the TSB. 848 * It has knowledge of which page sizes get replicated in the TSB 849 * and will call the appropriate unload routine for the appropriate size. 850 */ 851 #define SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, ismhat) \ 852 { \ 853 int ttesz = get_hblk_ttesz(hmeblkp); \ 854 if (ttesz == TTE8K || ttesz == TTE4M) { \ 855 sfmmu_unload_tsb(sfmmup, addr, ttesz); \ 856 } else { \ 857 caddr_t sva = ismhat ? addr : \ 858 (caddr_t)get_hblk_base(hmeblkp); \ 859 caddr_t eva = sva + get_hblk_span(hmeblkp); \ 860 ASSERT(addr >= sva && addr < eva); \ 861 sfmmu_unload_tsb_range(sfmmup, sva, eva, ttesz); \ 862 } \ 863 } 864 865 866 /* Update tsb_alloc_hiwater after memory is configured. */ 867 /*ARGSUSED*/ 868 static void 869 sfmmu_update_post_add(void *arg, pgcnt_t delta_pages) 870 { 871 /* Assumes physmem has already been updated. */ 872 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 873 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 874 } 875 876 /* 877 * Update tsb_alloc_hiwater before memory is deleted. We'll do nothing here 878 * and update tsb_alloc_hiwater and tsb_max_growsize after the memory is 879 * deleted. 880 */ 881 /*ARGSUSED*/ 882 static int 883 sfmmu_update_pre_del(void *arg, pgcnt_t delta_pages) 884 { 885 return (0); 886 } 887 888 /* Update tsb_alloc_hiwater after memory fails to be unconfigured. */ 889 /*ARGSUSED*/ 890 static void 891 sfmmu_update_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 892 { 893 /* 894 * Whether the delete was cancelled or not, just go ahead and update 895 * tsb_alloc_hiwater and tsb_max_growsize. 896 */ 897 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 898 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 899 } 900 901 static kphysm_setup_vector_t sfmmu_update_vec = { 902 KPHYSM_SETUP_VECTOR_VERSION, /* version */ 903 sfmmu_update_post_add, /* post_add */ 904 sfmmu_update_pre_del, /* pre_del */ 905 sfmmu_update_post_del /* post_del */ 906 }; 907 908 909 /* 910 * HME_BLK HASH PRIMITIVES 911 */ 912 913 /* 914 * Enter a hme on the mapping list for page pp. 915 * When large pages are more prevalent in the system we might want to 916 * keep the mapping list in ascending order by the hment size. For now, 917 * small pages are more frequent, so don't slow it down. 918 */ 919 #define HME_ADD(hme, pp) \ 920 { \ 921 ASSERT(sfmmu_mlist_held(pp)); \ 922 \ 923 hme->hme_prev = NULL; \ 924 hme->hme_next = pp->p_mapping; \ 925 hme->hme_page = pp; \ 926 if (pp->p_mapping) { \ 927 ((struct sf_hment *)(pp->p_mapping))->hme_prev = hme;\ 928 ASSERT(pp->p_share > 0); \ 929 } else { \ 930 /* EMPTY */ \ 931 ASSERT(pp->p_share == 0); \ 932 } \ 933 pp->p_mapping = hme; \ 934 pp->p_share++; \ 935 } 936 937 /* 938 * Enter a hme on the mapping list for page pp. 939 * If we are unmapping a large translation, we need to make sure that the 940 * change is reflect in the corresponding bit of the p_index field. 941 */ 942 #define HME_SUB(hme, pp) \ 943 { \ 944 ASSERT(sfmmu_mlist_held(pp)); \ 945 ASSERT(hme->hme_page == pp || IS_PAHME(hme)); \ 946 \ 947 if (pp->p_mapping == NULL) { \ 948 panic("hme_remove - no mappings"); \ 949 } \ 950 \ 951 membar_stst(); /* ensure previous stores finish */ \ 952 \ 953 ASSERT(pp->p_share > 0); \ 954 pp->p_share--; \ 955 \ 956 if (hme->hme_prev) { \ 957 ASSERT(pp->p_mapping != hme); \ 958 ASSERT(hme->hme_prev->hme_page == pp || \ 959 IS_PAHME(hme->hme_prev)); \ 960 hme->hme_prev->hme_next = hme->hme_next; \ 961 } else { \ 962 ASSERT(pp->p_mapping == hme); \ 963 pp->p_mapping = hme->hme_next; \ 964 ASSERT((pp->p_mapping == NULL) ? \ 965 (pp->p_share == 0) : 1); \ 966 } \ 967 \ 968 if (hme->hme_next) { \ 969 ASSERT(hme->hme_next->hme_page == pp || \ 970 IS_PAHME(hme->hme_next)); \ 971 hme->hme_next->hme_prev = hme->hme_prev; \ 972 } \ 973 \ 974 /* zero out the entry */ \ 975 hme->hme_next = NULL; \ 976 hme->hme_prev = NULL; \ 977 hme->hme_page = NULL; \ 978 \ 979 if (hme_size(hme) > TTE8K) { \ 980 /* remove mappings for remainder of large pg */ \ 981 sfmmu_rm_large_mappings(pp, hme_size(hme)); \ 982 } \ 983 } 984 985 /* 986 * This function returns the hment given the hme_blk and a vaddr. 987 * It assumes addr has already been checked to belong to hme_blk's 988 * range. 989 */ 990 #define HBLKTOHME(hment, hmeblkp, addr) \ 991 { \ 992 int index; \ 993 HBLKTOHME_IDX(hment, hmeblkp, addr, index) \ 994 } 995 996 /* 997 * Version of HBLKTOHME that also returns the index in hmeblkp 998 * of the hment. 999 */ 1000 #define HBLKTOHME_IDX(hment, hmeblkp, addr, idx) \ 1001 { \ 1002 ASSERT(in_hblk_range((hmeblkp), (addr))); \ 1003 \ 1004 if (get_hblk_ttesz(hmeblkp) == TTE8K) { \ 1005 idx = (((uintptr_t)(addr) >> MMU_PAGESHIFT) & (NHMENTS-1)); \ 1006 } else \ 1007 idx = 0; \ 1008 \ 1009 (hment) = &(hmeblkp)->hblk_hme[idx]; \ 1010 } 1011 1012 /* 1013 * Disable any page sizes not supported by the CPU 1014 */ 1015 void 1016 hat_init_pagesizes() 1017 { 1018 int i; 1019 1020 mmu_exported_page_sizes = 0; 1021 for (i = TTE8K; i < max_mmu_page_sizes; i++) { 1022 1023 szc_2_userszc[i] = (uint_t)-1; 1024 userszc_2_szc[i] = (uint_t)-1; 1025 1026 if ((mmu_exported_pagesize_mask & (1 << i)) == 0) { 1027 disable_large_pages |= (1 << i); 1028 } else { 1029 szc_2_userszc[i] = mmu_exported_page_sizes; 1030 userszc_2_szc[mmu_exported_page_sizes] = i; 1031 mmu_exported_page_sizes++; 1032 } 1033 } 1034 1035 disable_ism_large_pages |= disable_large_pages; 1036 disable_auto_data_large_pages = disable_large_pages; 1037 disable_auto_text_large_pages = disable_large_pages; 1038 1039 /* 1040 * Initialize mmu-specific large page sizes. 1041 */ 1042 if (&mmu_large_pages_disabled) { 1043 disable_large_pages |= mmu_large_pages_disabled(HAT_LOAD); 1044 disable_ism_large_pages |= 1045 mmu_large_pages_disabled(HAT_LOAD_SHARE); 1046 disable_auto_data_large_pages |= 1047 mmu_large_pages_disabled(HAT_AUTO_DATA); 1048 disable_auto_text_large_pages |= 1049 mmu_large_pages_disabled(HAT_AUTO_TEXT); 1050 } 1051 } 1052 1053 /* 1054 * Initialize the hardware address translation structures. 1055 */ 1056 void 1057 hat_init(void) 1058 { 1059 int i; 1060 uint_t sz; 1061 size_t size; 1062 1063 hat_lock_init(); 1064 hat_kstat_init(); 1065 1066 /* 1067 * Hardware-only bits in a TTE 1068 */ 1069 MAKE_TTE_MASK(&hw_tte); 1070 1071 hat_init_pagesizes(); 1072 1073 /* Initialize the hash locks */ 1074 for (i = 0; i < khmehash_num; i++) { 1075 mutex_init(&khme_hash[i].hmehash_mutex, NULL, 1076 MUTEX_DEFAULT, NULL); 1077 khme_hash[i].hmeh_nextpa = HMEBLK_ENDPA; 1078 } 1079 for (i = 0; i < uhmehash_num; i++) { 1080 mutex_init(&uhme_hash[i].hmehash_mutex, NULL, 1081 MUTEX_DEFAULT, NULL); 1082 uhme_hash[i].hmeh_nextpa = HMEBLK_ENDPA; 1083 } 1084 khmehash_num--; /* make sure counter starts from 0 */ 1085 uhmehash_num--; /* make sure counter starts from 0 */ 1086 1087 /* 1088 * Allocate context domain structures. 1089 * 1090 * A platform may choose to modify max_mmu_ctxdoms in 1091 * set_platform_defaults(). If a platform does not define 1092 * a set_platform_defaults() or does not choose to modify 1093 * max_mmu_ctxdoms, it gets one MMU context domain for every CPU. 1094 * 1095 * For all platforms that have CPUs sharing MMUs, this 1096 * value must be defined. 1097 */ 1098 if (max_mmu_ctxdoms == 0) 1099 max_mmu_ctxdoms = max_ncpus; 1100 1101 size = max_mmu_ctxdoms * sizeof (mmu_ctx_t *); 1102 mmu_ctxs_tbl = kmem_zalloc(size, KM_SLEEP); 1103 1104 /* mmu_ctx_t is 64 bytes aligned */ 1105 mmuctxdom_cache = kmem_cache_create("mmuctxdom_cache", 1106 sizeof (mmu_ctx_t), 64, NULL, NULL, NULL, NULL, NULL, 0); 1107 /* 1108 * MMU context domain initialization for the Boot CPU. 1109 * This needs the context domains array allocated above. 1110 */ 1111 mutex_enter(&cpu_lock); 1112 sfmmu_cpu_init(CPU); 1113 mutex_exit(&cpu_lock); 1114 1115 /* 1116 * Intialize ism mapping list lock. 1117 */ 1118 1119 mutex_init(&ism_mlist_lock, NULL, MUTEX_DEFAULT, NULL); 1120 1121 /* 1122 * Each sfmmu structure carries an array of MMU context info 1123 * structures, one per context domain. The size of this array depends 1124 * on the maximum number of context domains. So, the size of the 1125 * sfmmu structure varies per platform. 1126 * 1127 * sfmmu is allocated from static arena, because trap 1128 * handler at TL > 0 is not allowed to touch kernel relocatable 1129 * memory. sfmmu's alignment is changed to 64 bytes from 1130 * default 8 bytes, as the lower 6 bits will be used to pass 1131 * pgcnt to vtag_flush_pgcnt_tl1. 1132 */ 1133 size = sizeof (sfmmu_t) + sizeof (sfmmu_ctx_t) * (max_mmu_ctxdoms - 1); 1134 1135 sfmmuid_cache = kmem_cache_create("sfmmuid_cache", size, 1136 64, sfmmu_idcache_constructor, sfmmu_idcache_destructor, 1137 NULL, NULL, static_arena, 0); 1138 1139 sfmmu_tsbinfo_cache = kmem_cache_create("sfmmu_tsbinfo_cache", 1140 sizeof (struct tsb_info), 0, NULL, NULL, NULL, NULL, NULL, 0); 1141 1142 /* 1143 * Since we only use the tsb8k cache to "borrow" pages for TSBs 1144 * from the heap when low on memory or when TSB_FORCEALLOC is 1145 * specified, don't use magazines to cache them--we want to return 1146 * them to the system as quickly as possible. 1147 */ 1148 sfmmu_tsb8k_cache = kmem_cache_create("sfmmu_tsb8k_cache", 1149 MMU_PAGESIZE, MMU_PAGESIZE, NULL, NULL, NULL, NULL, 1150 static_arena, KMC_NOMAGAZINE); 1151 1152 /* 1153 * Set tsb_alloc_hiwater to 1/tsb_alloc_hiwater_factor of physical 1154 * memory, which corresponds to the old static reserve for TSBs. 1155 * tsb_alloc_hiwater_factor defaults to 32. This caps the amount of 1156 * memory we'll allocate for TSB slabs; beyond this point TSB 1157 * allocations will be taken from the kernel heap (via 1158 * sfmmu_tsb8k_cache) and will be throttled as would any other kmem 1159 * consumer. 1160 */ 1161 if (tsb_alloc_hiwater_factor == 0) { 1162 tsb_alloc_hiwater_factor = TSB_ALLOC_HIWATER_FACTOR_DEFAULT; 1163 } 1164 SFMMU_SET_TSB_ALLOC_HIWATER(physmem); 1165 1166 for (sz = tsb_slab_ttesz; sz > 0; sz--) { 1167 if (!(disable_large_pages & (1 << sz))) 1168 break; 1169 } 1170 1171 if (sz < tsb_slab_ttesz) { 1172 tsb_slab_ttesz = sz; 1173 tsb_slab_shift = MMU_PAGESHIFT + (sz << 1) + sz; 1174 tsb_slab_size = 1 << tsb_slab_shift; 1175 tsb_slab_mask = (1 << (tsb_slab_shift - MMU_PAGESHIFT)) - 1; 1176 use_bigtsb_arena = 0; 1177 } else if (use_bigtsb_arena && 1178 (disable_large_pages & (1 << bigtsb_slab_ttesz))) { 1179 use_bigtsb_arena = 0; 1180 } 1181 1182 if (!use_bigtsb_arena) { 1183 bigtsb_slab_shift = tsb_slab_shift; 1184 } 1185 SFMMU_SET_TSB_MAX_GROWSIZE(physmem); 1186 1187 /* 1188 * On smaller memory systems, allocate TSB memory in smaller chunks 1189 * than the default 4M slab size. We also honor disable_large_pages 1190 * here. 1191 * 1192 * The trap handlers need to be patched with the final slab shift, 1193 * since they need to be able to construct the TSB pointer at runtime. 1194 */ 1195 if ((tsb_max_growsize <= TSB_512K_SZCODE) && 1196 !(disable_large_pages & (1 << TTE512K))) { 1197 tsb_slab_ttesz = TTE512K; 1198 tsb_slab_shift = MMU_PAGESHIFT512K; 1199 tsb_slab_size = MMU_PAGESIZE512K; 1200 tsb_slab_mask = MMU_PAGEOFFSET512K >> MMU_PAGESHIFT; 1201 use_bigtsb_arena = 0; 1202 } 1203 1204 if (!use_bigtsb_arena) { 1205 bigtsb_slab_ttesz = tsb_slab_ttesz; 1206 bigtsb_slab_shift = tsb_slab_shift; 1207 bigtsb_slab_size = tsb_slab_size; 1208 bigtsb_slab_mask = tsb_slab_mask; 1209 } 1210 1211 1212 /* 1213 * Set up memory callback to update tsb_alloc_hiwater and 1214 * tsb_max_growsize. 1215 */ 1216 i = kphysm_setup_func_register(&sfmmu_update_vec, (void *) 0); 1217 ASSERT(i == 0); 1218 1219 /* 1220 * kmem_tsb_arena is the source from which large TSB slabs are 1221 * drawn. The quantum of this arena corresponds to the largest 1222 * TSB size we can dynamically allocate for user processes. 1223 * Currently it must also be a supported page size since we 1224 * use exactly one translation entry to map each slab page. 1225 * 1226 * The per-lgroup kmem_tsb_default_arena arenas are the arenas from 1227 * which most TSBs are allocated. Since most TSB allocations are 1228 * typically 8K we have a kmem cache we stack on top of each 1229 * kmem_tsb_default_arena to speed up those allocations. 1230 * 1231 * Note the two-level scheme of arenas is required only 1232 * because vmem_create doesn't allow us to specify alignment 1233 * requirements. If this ever changes the code could be 1234 * simplified to use only one level of arenas. 1235 * 1236 * If 256M page support exists on sun4v, 256MB kmem_bigtsb_arena 1237 * will be provided in addition to the 4M kmem_tsb_arena. 1238 */ 1239 if (use_bigtsb_arena) { 1240 kmem_bigtsb_arena = vmem_create("kmem_bigtsb", NULL, 0, 1241 bigtsb_slab_size, sfmmu_vmem_xalloc_aligned_wrapper, 1242 vmem_xfree, heap_arena, 0, VM_SLEEP); 1243 } 1244 1245 kmem_tsb_arena = vmem_create("kmem_tsb", NULL, 0, tsb_slab_size, 1246 sfmmu_vmem_xalloc_aligned_wrapper, 1247 vmem_xfree, heap_arena, 0, VM_SLEEP); 1248 1249 if (tsb_lgrp_affinity) { 1250 char s[50]; 1251 for (i = 0; i < NLGRPS_MAX; i++) { 1252 if (use_bigtsb_arena) { 1253 (void) sprintf(s, "kmem_bigtsb_lgrp%d", i); 1254 kmem_bigtsb_default_arena[i] = vmem_create(s, 1255 NULL, 0, 2 * tsb_slab_size, 1256 sfmmu_tsb_segkmem_alloc, 1257 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 1258 0, VM_SLEEP | VM_BESTFIT); 1259 } 1260 1261 (void) sprintf(s, "kmem_tsb_lgrp%d", i); 1262 kmem_tsb_default_arena[i] = vmem_create(s, 1263 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1264 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1265 VM_SLEEP | VM_BESTFIT); 1266 1267 (void) sprintf(s, "sfmmu_tsb_lgrp%d_cache", i); 1268 sfmmu_tsb_cache[i] = kmem_cache_create(s, 1269 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1270 kmem_tsb_default_arena[i], 0); 1271 } 1272 } else { 1273 if (use_bigtsb_arena) { 1274 kmem_bigtsb_default_arena[0] = 1275 vmem_create("kmem_bigtsb_default", NULL, 0, 1276 2 * tsb_slab_size, sfmmu_tsb_segkmem_alloc, 1277 sfmmu_tsb_segkmem_free, kmem_bigtsb_arena, 0, 1278 VM_SLEEP | VM_BESTFIT); 1279 } 1280 1281 kmem_tsb_default_arena[0] = vmem_create("kmem_tsb_default", 1282 NULL, 0, PAGESIZE, sfmmu_tsb_segkmem_alloc, 1283 sfmmu_tsb_segkmem_free, kmem_tsb_arena, 0, 1284 VM_SLEEP | VM_BESTFIT); 1285 sfmmu_tsb_cache[0] = kmem_cache_create("sfmmu_tsb_cache", 1286 PAGESIZE, PAGESIZE, NULL, NULL, NULL, NULL, 1287 kmem_tsb_default_arena[0], 0); 1288 } 1289 1290 sfmmu8_cache = kmem_cache_create("sfmmu8_cache", HME8BLK_SZ, 1291 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1292 sfmmu_hblkcache_destructor, 1293 sfmmu_hblkcache_reclaim, (void *)HME8BLK_SZ, 1294 hat_memload_arena, KMC_NOHASH); 1295 1296 hat_memload1_arena = vmem_create("hat_memload1", NULL, 0, PAGESIZE, 1297 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, 1298 VMC_DUMPSAFE | VM_SLEEP); 1299 1300 sfmmu1_cache = kmem_cache_create("sfmmu1_cache", HME1BLK_SZ, 1301 HMEBLK_ALIGN, sfmmu_hblkcache_constructor, 1302 sfmmu_hblkcache_destructor, 1303 NULL, (void *)HME1BLK_SZ, 1304 hat_memload1_arena, KMC_NOHASH); 1305 1306 pa_hment_cache = kmem_cache_create("pa_hment_cache", PAHME_SZ, 1307 0, NULL, NULL, NULL, NULL, static_arena, KMC_NOHASH); 1308 1309 ism_blk_cache = kmem_cache_create("ism_blk_cache", 1310 sizeof (ism_blk_t), ecache_alignsize, NULL, NULL, 1311 NULL, NULL, static_arena, KMC_NOHASH); 1312 1313 ism_ment_cache = kmem_cache_create("ism_ment_cache", 1314 sizeof (ism_ment_t), 0, NULL, NULL, 1315 NULL, NULL, NULL, 0); 1316 1317 /* 1318 * We grab the first hat for the kernel, 1319 */ 1320 AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER); 1321 kas.a_hat = hat_alloc(&kas); 1322 AS_LOCK_EXIT(&kas, &kas.a_lock); 1323 1324 /* 1325 * Initialize hblk_reserve. 1326 */ 1327 ((struct hme_blk *)hblk_reserve)->hblk_nextpa = 1328 va_to_pa((caddr_t)hblk_reserve); 1329 1330 #ifndef UTSB_PHYS 1331 /* 1332 * Reserve some kernel virtual address space for the locked TTEs 1333 * that allow us to probe the TSB from TL>0. 1334 */ 1335 utsb_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1336 0, 0, NULL, NULL, VM_SLEEP); 1337 utsb4m_vabase = vmem_xalloc(heap_arena, tsb_slab_size, tsb_slab_size, 1338 0, 0, NULL, NULL, VM_SLEEP); 1339 #endif 1340 1341 #ifdef VAC 1342 /* 1343 * The big page VAC handling code assumes VAC 1344 * will not be bigger than the smallest big 1345 * page- which is 64K. 1346 */ 1347 if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) { 1348 cmn_err(CE_PANIC, "VAC too big!"); 1349 } 1350 #endif 1351 1352 uhme_hash_pa = va_to_pa(uhme_hash); 1353 khme_hash_pa = va_to_pa(khme_hash); 1354 1355 /* 1356 * Initialize relocation locks. kpr_suspendlock is held 1357 * at PIL_MAX to prevent interrupts from pinning the holder 1358 * of a suspended TTE which may access it leading to a 1359 * deadlock condition. 1360 */ 1361 mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL); 1362 mutex_init(&kpr_suspendlock, NULL, MUTEX_SPIN, (void *)PIL_MAX); 1363 1364 /* 1365 * If Shared context support is disabled via /etc/system 1366 * set shctx_on to 0 here if it was set to 1 earlier in boot 1367 * sequence by cpu module initialization code. 1368 */ 1369 if (shctx_on && disable_shctx) { 1370 shctx_on = 0; 1371 } 1372 1373 if (shctx_on) { 1374 srd_buckets = kmem_zalloc(SFMMU_MAX_SRD_BUCKETS * 1375 sizeof (srd_buckets[0]), KM_SLEEP); 1376 for (i = 0; i < SFMMU_MAX_SRD_BUCKETS; i++) { 1377 mutex_init(&srd_buckets[i].srdb_lock, NULL, 1378 MUTEX_DEFAULT, NULL); 1379 } 1380 1381 srd_cache = kmem_cache_create("srd_cache", sizeof (sf_srd_t), 1382 0, sfmmu_srdcache_constructor, sfmmu_srdcache_destructor, 1383 NULL, NULL, NULL, 0); 1384 region_cache = kmem_cache_create("region_cache", 1385 sizeof (sf_region_t), 0, sfmmu_rgncache_constructor, 1386 sfmmu_rgncache_destructor, NULL, NULL, NULL, 0); 1387 scd_cache = kmem_cache_create("scd_cache", sizeof (sf_scd_t), 1388 0, sfmmu_scdcache_constructor, sfmmu_scdcache_destructor, 1389 NULL, NULL, NULL, 0); 1390 } 1391 1392 /* 1393 * Pre-allocate hrm_hashtab before enabling the collection of 1394 * refmod statistics. Allocating on the fly would mean us 1395 * running the risk of suffering recursive mutex enters or 1396 * deadlocks. 1397 */ 1398 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *), 1399 KM_SLEEP); 1400 1401 /* Allocate per-cpu pending freelist of hmeblks */ 1402 cpu_hme_pend = kmem_zalloc((NCPU * sizeof (cpu_hme_pend_t)) + 64, 1403 KM_SLEEP); 1404 cpu_hme_pend = (cpu_hme_pend_t *)P2ROUNDUP( 1405 (uintptr_t)cpu_hme_pend, 64); 1406 1407 for (i = 0; i < NCPU; i++) { 1408 mutex_init(&cpu_hme_pend[i].chp_mutex, NULL, MUTEX_DEFAULT, 1409 NULL); 1410 } 1411 1412 if (cpu_hme_pend_thresh == 0) { 1413 cpu_hme_pend_thresh = CPU_HME_PEND_THRESH; 1414 } 1415 } 1416 1417 /* 1418 * Initialize locking for the hat layer, called early during boot. 1419 */ 1420 static void 1421 hat_lock_init() 1422 { 1423 int i; 1424 1425 /* 1426 * initialize the array of mutexes protecting a page's mapping 1427 * list and p_nrm field. 1428 */ 1429 for (i = 0; i < MML_TABLE_SIZE; i++) 1430 mutex_init(&mml_table[i].pad_mutex, NULL, MUTEX_DEFAULT, NULL); 1431 1432 if (kpm_enable) { 1433 for (i = 0; i < kpmp_table_sz; i++) { 1434 mutex_init(&kpmp_table[i].khl_mutex, NULL, 1435 MUTEX_DEFAULT, NULL); 1436 } 1437 } 1438 1439 /* 1440 * Initialize array of mutex locks that protects sfmmu fields and 1441 * TSB lists. 1442 */ 1443 for (i = 0; i < SFMMU_NUM_LOCK; i++) 1444 mutex_init(HATLOCK_MUTEXP(&hat_lock[i]), NULL, MUTEX_DEFAULT, 1445 NULL); 1446 } 1447 1448 #define SFMMU_KERNEL_MAXVA \ 1449 (kmem64_base ? (uintptr_t)kmem64_end : (SYSLIMIT)) 1450 1451 /* 1452 * Allocate a hat structure. 1453 * Called when an address space first uses a hat. 1454 */ 1455 struct hat * 1456 hat_alloc(struct as *as) 1457 { 1458 sfmmu_t *sfmmup; 1459 int i; 1460 uint64_t cnum; 1461 extern uint_t get_color_start(struct as *); 1462 1463 ASSERT(AS_WRITE_HELD(as, &as->a_lock)); 1464 sfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 1465 sfmmup->sfmmu_as = as; 1466 sfmmup->sfmmu_flags = 0; 1467 sfmmup->sfmmu_tteflags = 0; 1468 sfmmup->sfmmu_rtteflags = 0; 1469 LOCK_INIT_CLEAR(&sfmmup->sfmmu_ctx_lock); 1470 1471 if (as == &kas) { 1472 ksfmmup = sfmmup; 1473 sfmmup->sfmmu_cext = 0; 1474 cnum = KCONTEXT; 1475 1476 sfmmup->sfmmu_clrstart = 0; 1477 sfmmup->sfmmu_tsb = NULL; 1478 /* 1479 * hat_kern_setup() will call sfmmu_init_ktsbinfo() 1480 * to setup tsb_info for ksfmmup. 1481 */ 1482 } else { 1483 1484 /* 1485 * Just set to invalid ctx. When it faults, it will 1486 * get a valid ctx. This would avoid the situation 1487 * where we get a ctx, but it gets stolen and then 1488 * we fault when we try to run and so have to get 1489 * another ctx. 1490 */ 1491 sfmmup->sfmmu_cext = 0; 1492 cnum = INVALID_CONTEXT; 1493 1494 /* initialize original physical page coloring bin */ 1495 sfmmup->sfmmu_clrstart = get_color_start(as); 1496 #ifdef DEBUG 1497 if (tsb_random_size) { 1498 uint32_t randval = (uint32_t)gettick() >> 4; 1499 int size = randval % (tsb_max_growsize + 1); 1500 1501 /* chose a random tsb size for stress testing */ 1502 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, size, 1503 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1504 } else 1505 #endif /* DEBUG */ 1506 (void) sfmmu_tsbinfo_alloc(&sfmmup->sfmmu_tsb, 1507 default_tsb_size, 1508 TSB8K|TSB64K|TSB512K, 0, sfmmup); 1509 sfmmup->sfmmu_flags = HAT_SWAPPED | HAT_ALLCTX_INVALID; 1510 ASSERT(sfmmup->sfmmu_tsb != NULL); 1511 } 1512 1513 ASSERT(max_mmu_ctxdoms > 0); 1514 for (i = 0; i < max_mmu_ctxdoms; i++) { 1515 sfmmup->sfmmu_ctxs[i].cnum = cnum; 1516 sfmmup->sfmmu_ctxs[i].gnum = 0; 1517 } 1518 1519 for (i = 0; i < max_mmu_page_sizes; i++) { 1520 sfmmup->sfmmu_ttecnt[i] = 0; 1521 sfmmup->sfmmu_scdrttecnt[i] = 0; 1522 sfmmup->sfmmu_ismttecnt[i] = 0; 1523 sfmmup->sfmmu_scdismttecnt[i] = 0; 1524 sfmmup->sfmmu_pgsz[i] = TTE8K; 1525 } 1526 sfmmup->sfmmu_tsb0_4minflcnt = 0; 1527 sfmmup->sfmmu_iblk = NULL; 1528 sfmmup->sfmmu_ismhat = 0; 1529 sfmmup->sfmmu_scdhat = 0; 1530 sfmmup->sfmmu_ismblkpa = (uint64_t)-1; 1531 if (sfmmup == ksfmmup) { 1532 CPUSET_ALL(sfmmup->sfmmu_cpusran); 1533 } else { 1534 CPUSET_ZERO(sfmmup->sfmmu_cpusran); 1535 } 1536 sfmmup->sfmmu_free = 0; 1537 sfmmup->sfmmu_rmstat = 0; 1538 sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart; 1539 cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL); 1540 sfmmup->sfmmu_srdp = NULL; 1541 SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map); 1542 bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE); 1543 sfmmup->sfmmu_scdp = NULL; 1544 sfmmup->sfmmu_scd_link.next = NULL; 1545 sfmmup->sfmmu_scd_link.prev = NULL; 1546 return (sfmmup); 1547 } 1548 1549 /* 1550 * Create per-MMU context domain kstats for a given MMU ctx. 1551 */ 1552 static void 1553 sfmmu_mmu_kstat_create(mmu_ctx_t *mmu_ctxp) 1554 { 1555 mmu_ctx_stat_t stat; 1556 kstat_t *mmu_kstat; 1557 1558 ASSERT(MUTEX_HELD(&cpu_lock)); 1559 ASSERT(mmu_ctxp->mmu_kstat == NULL); 1560 1561 mmu_kstat = kstat_create("unix", mmu_ctxp->mmu_idx, "mmu_ctx", 1562 "hat", KSTAT_TYPE_NAMED, MMU_CTX_NUM_STATS, KSTAT_FLAG_VIRTUAL); 1563 1564 if (mmu_kstat == NULL) { 1565 cmn_err(CE_WARN, "kstat_create for MMU %d failed", 1566 mmu_ctxp->mmu_idx); 1567 } else { 1568 mmu_kstat->ks_data = mmu_ctxp->mmu_kstat_data; 1569 for (stat = 0; stat < MMU_CTX_NUM_STATS; stat++) 1570 kstat_named_init(&mmu_ctxp->mmu_kstat_data[stat], 1571 mmu_ctx_kstat_names[stat], KSTAT_DATA_INT64); 1572 mmu_ctxp->mmu_kstat = mmu_kstat; 1573 kstat_install(mmu_kstat); 1574 } 1575 } 1576 1577 /* 1578 * plat_cpuid_to_mmu_ctx_info() is a platform interface that returns MMU 1579 * context domain information for a given CPU. If a platform does not 1580 * specify that interface, then the function below is used instead to return 1581 * default information. The defaults are as follows: 1582 * 1583 * - The number of MMU context IDs supported on any CPU in the 1584 * system is 8K. 1585 * - There is one MMU context domain per CPU. 1586 */ 1587 /*ARGSUSED*/ 1588 static void 1589 sfmmu_cpuid_to_mmu_ctx_info(processorid_t cpuid, mmu_ctx_info_t *infop) 1590 { 1591 infop->mmu_nctxs = nctxs; 1592 infop->mmu_idx = cpu[cpuid]->cpu_seqid; 1593 } 1594 1595 /* 1596 * Called during CPU initialization to set the MMU context-related information 1597 * for a CPU. 1598 * 1599 * cpu_lock serializes accesses to mmu_ctxs and mmu_saved_gnum. 1600 */ 1601 void 1602 sfmmu_cpu_init(cpu_t *cp) 1603 { 1604 mmu_ctx_info_t info; 1605 mmu_ctx_t *mmu_ctxp; 1606 1607 ASSERT(MUTEX_HELD(&cpu_lock)); 1608 1609 if (&plat_cpuid_to_mmu_ctx_info == NULL) 1610 sfmmu_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1611 else 1612 plat_cpuid_to_mmu_ctx_info(cp->cpu_id, &info); 1613 1614 ASSERT(info.mmu_idx < max_mmu_ctxdoms); 1615 1616 if ((mmu_ctxp = mmu_ctxs_tbl[info.mmu_idx]) == NULL) { 1617 /* Each mmu_ctx is cacheline aligned. */ 1618 mmu_ctxp = kmem_cache_alloc(mmuctxdom_cache, KM_SLEEP); 1619 bzero(mmu_ctxp, sizeof (mmu_ctx_t)); 1620 1621 mutex_init(&mmu_ctxp->mmu_lock, NULL, MUTEX_SPIN, 1622 (void *)ipltospl(DISP_LEVEL)); 1623 mmu_ctxp->mmu_idx = info.mmu_idx; 1624 mmu_ctxp->mmu_nctxs = info.mmu_nctxs; 1625 /* 1626 * Globally for lifetime of a system, 1627 * gnum must always increase. 1628 * mmu_saved_gnum is protected by the cpu_lock. 1629 */ 1630 mmu_ctxp->mmu_gnum = mmu_saved_gnum + 1; 1631 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 1632 1633 sfmmu_mmu_kstat_create(mmu_ctxp); 1634 1635 mmu_ctxs_tbl[info.mmu_idx] = mmu_ctxp; 1636 } else { 1637 ASSERT(mmu_ctxp->mmu_idx == info.mmu_idx); 1638 ASSERT(mmu_ctxp->mmu_nctxs <= info.mmu_nctxs); 1639 } 1640 1641 /* 1642 * The mmu_lock is acquired here to prevent races with 1643 * the wrap-around code. 1644 */ 1645 mutex_enter(&mmu_ctxp->mmu_lock); 1646 1647 1648 mmu_ctxp->mmu_ncpus++; 1649 CPUSET_ADD(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1650 CPU_MMU_IDX(cp) = info.mmu_idx; 1651 CPU_MMU_CTXP(cp) = mmu_ctxp; 1652 1653 mutex_exit(&mmu_ctxp->mmu_lock); 1654 } 1655 1656 static void 1657 sfmmu_ctxdom_free(mmu_ctx_t *mmu_ctxp) 1658 { 1659 ASSERT(MUTEX_HELD(&cpu_lock)); 1660 ASSERT(!MUTEX_HELD(&mmu_ctxp->mmu_lock)); 1661 1662 mutex_destroy(&mmu_ctxp->mmu_lock); 1663 1664 if (mmu_ctxp->mmu_kstat) 1665 kstat_delete(mmu_ctxp->mmu_kstat); 1666 1667 /* mmu_saved_gnum is protected by the cpu_lock. */ 1668 if (mmu_saved_gnum < mmu_ctxp->mmu_gnum) 1669 mmu_saved_gnum = mmu_ctxp->mmu_gnum; 1670 1671 kmem_cache_free(mmuctxdom_cache, mmu_ctxp); 1672 } 1673 1674 /* 1675 * Called to perform MMU context-related cleanup for a CPU. 1676 */ 1677 void 1678 sfmmu_cpu_cleanup(cpu_t *cp) 1679 { 1680 mmu_ctx_t *mmu_ctxp; 1681 1682 ASSERT(MUTEX_HELD(&cpu_lock)); 1683 1684 mmu_ctxp = CPU_MMU_CTXP(cp); 1685 ASSERT(mmu_ctxp != NULL); 1686 1687 /* 1688 * The mmu_lock is acquired here to prevent races with 1689 * the wrap-around code. 1690 */ 1691 mutex_enter(&mmu_ctxp->mmu_lock); 1692 1693 CPU_MMU_CTXP(cp) = NULL; 1694 1695 CPUSET_DEL(mmu_ctxp->mmu_cpuset, cp->cpu_id); 1696 if (--mmu_ctxp->mmu_ncpus == 0) { 1697 mmu_ctxs_tbl[mmu_ctxp->mmu_idx] = NULL; 1698 mutex_exit(&mmu_ctxp->mmu_lock); 1699 sfmmu_ctxdom_free(mmu_ctxp); 1700 return; 1701 } 1702 1703 mutex_exit(&mmu_ctxp->mmu_lock); 1704 } 1705 1706 uint_t 1707 sfmmu_ctxdom_nctxs(int idx) 1708 { 1709 return (mmu_ctxs_tbl[idx]->mmu_nctxs); 1710 } 1711 1712 #ifdef sun4v 1713 /* 1714 * sfmmu_ctxdoms_* is an interface provided to help keep context domains 1715 * consistant after suspend/resume on system that can resume on a different 1716 * hardware than it was suspended. 1717 * 1718 * sfmmu_ctxdom_lock(void) locks all context domains and prevents new contexts 1719 * from being allocated. It acquires all hat_locks, which blocks most access to 1720 * context data, except for a few cases that are handled separately or are 1721 * harmless. It wraps each domain to increment gnum and invalidate on-CPU 1722 * contexts, and forces cnum to its max. As a result of this call all user 1723 * threads that are running on CPUs trap and try to perform wrap around but 1724 * can't because hat_locks are taken. Threads that were not on CPUs but started 1725 * by scheduler go to sfmmu_alloc_ctx() to aquire context without checking 1726 * hat_lock, but fail, because cnum == nctxs, and therefore also trap and block 1727 * on hat_lock trying to wrap. sfmmu_ctxdom_lock() must be called before CPUs 1728 * are paused, else it could deadlock acquiring locks held by paused CPUs. 1729 * 1730 * sfmmu_ctxdoms_remove() removes context domains from every CPUs and records 1731 * the CPUs that had them. It must be called after CPUs have been paused. This 1732 * ensures that no threads are in sfmmu_alloc_ctx() accessing domain data, 1733 * because pause_cpus sends a mondo interrupt to every CPU, and sfmmu_alloc_ctx 1734 * runs with interrupts disabled. When CPUs are later resumed, they may enter 1735 * sfmmu_alloc_ctx, but it will check for CPU_MMU_CTXP = NULL and immediately 1736 * return failure. Or, they will be blocked trying to acquire hat_lock. Thus 1737 * after sfmmu_ctxdoms_remove returns, we are guaranteed that no one is 1738 * accessing the old context domains. 1739 * 1740 * sfmmu_ctxdoms_update(void) frees space used by old context domains and 1741 * allocates new context domains based on hardware layout. It initializes 1742 * every CPU that had context domain before migration to have one again. 1743 * sfmmu_ctxdoms_update must be called after CPUs are resumed, else it 1744 * could deadlock acquiring locks held by paused CPUs. 1745 * 1746 * sfmmu_ctxdoms_unlock(void) releases all hat_locks after which user threads 1747 * acquire new context ids and continue execution. 1748 * 1749 * Therefore functions should be called in the following order: 1750 * suspend_routine() 1751 * sfmmu_ctxdom_lock() 1752 * pause_cpus() 1753 * suspend() 1754 * if (suspend failed) 1755 * sfmmu_ctxdom_unlock() 1756 * ... 1757 * sfmmu_ctxdom_remove() 1758 * resume_cpus() 1759 * sfmmu_ctxdom_update() 1760 * sfmmu_ctxdom_unlock() 1761 */ 1762 static cpuset_t sfmmu_ctxdoms_pset; 1763 1764 void 1765 sfmmu_ctxdoms_remove() 1766 { 1767 processorid_t id; 1768 cpu_t *cp; 1769 1770 /* 1771 * Record the CPUs that have domains in sfmmu_ctxdoms_pset, so they can 1772 * be restored post-migration. A CPU may be powered off and not have a 1773 * domain, for example. 1774 */ 1775 CPUSET_ZERO(sfmmu_ctxdoms_pset); 1776 1777 for (id = 0; id < NCPU; id++) { 1778 if ((cp = cpu[id]) != NULL && CPU_MMU_CTXP(cp) != NULL) { 1779 CPUSET_ADD(sfmmu_ctxdoms_pset, id); 1780 CPU_MMU_CTXP(cp) = NULL; 1781 } 1782 } 1783 } 1784 1785 void 1786 sfmmu_ctxdoms_lock(void) 1787 { 1788 int idx; 1789 mmu_ctx_t *mmu_ctxp; 1790 1791 sfmmu_hat_lock_all(); 1792 1793 /* 1794 * At this point, no thread can be in sfmmu_ctx_wrap_around, because 1795 * hat_lock is always taken before calling it. 1796 * 1797 * For each domain, set mmu_cnum to max so no more contexts can be 1798 * allocated, and wrap to flush on-CPU contexts and force threads to 1799 * acquire a new context when we later drop hat_lock after migration. 1800 * Setting mmu_cnum may race with sfmmu_alloc_ctx which also sets cnum, 1801 * but the latter uses CAS and will miscompare and not overwrite it. 1802 */ 1803 kpreempt_disable(); /* required by sfmmu_ctx_wrap_around */ 1804 for (idx = 0; idx < max_mmu_ctxdoms; idx++) { 1805 if ((mmu_ctxp = mmu_ctxs_tbl[idx]) != NULL) { 1806 mutex_enter(&mmu_ctxp->mmu_lock); 1807 mmu_ctxp->mmu_cnum = mmu_ctxp->mmu_nctxs; 1808 /* make sure updated cnum visible */ 1809 membar_enter(); 1810 mutex_exit(&mmu_ctxp->mmu_lock); 1811 sfmmu_ctx_wrap_around(mmu_ctxp, B_FALSE); 1812 } 1813 } 1814 kpreempt_enable(); 1815 } 1816 1817 void 1818 sfmmu_ctxdoms_unlock(void) 1819 { 1820 sfmmu_hat_unlock_all(); 1821 } 1822 1823 void 1824 sfmmu_ctxdoms_update(void) 1825 { 1826 processorid_t id; 1827 cpu_t *cp; 1828 uint_t idx; 1829 mmu_ctx_t *mmu_ctxp; 1830 1831 /* 1832 * Free all context domains. As side effect, this increases 1833 * mmu_saved_gnum to the maximum gnum over all domains, which is used to 1834 * init gnum in the new domains, which therefore will be larger than the 1835 * sfmmu gnum for any process, guaranteeing that every process will see 1836 * a new generation and allocate a new context regardless of what new 1837 * domain it runs in. 1838 */ 1839 mutex_enter(&cpu_lock); 1840 1841 for (idx = 0; idx < max_mmu_ctxdoms; idx++) { 1842 if (mmu_ctxs_tbl[idx] != NULL) { 1843 mmu_ctxp = mmu_ctxs_tbl[idx]; 1844 mmu_ctxs_tbl[idx] = NULL; 1845 sfmmu_ctxdom_free(mmu_ctxp); 1846 } 1847 } 1848 1849 for (id = 0; id < NCPU; id++) { 1850 if (CPU_IN_SET(sfmmu_ctxdoms_pset, id) && 1851 (cp = cpu[id]) != NULL) 1852 sfmmu_cpu_init(cp); 1853 } 1854 mutex_exit(&cpu_lock); 1855 } 1856 #endif 1857 1858 /* 1859 * Hat_setup, makes an address space context the current active one. 1860 * In sfmmu this translates to setting the secondary context with the 1861 * corresponding context. 1862 */ 1863 void 1864 hat_setup(struct hat *sfmmup, int allocflag) 1865 { 1866 hatlock_t *hatlockp; 1867 1868 /* Init needs some special treatment. */ 1869 if (allocflag == HAT_INIT) { 1870 /* 1871 * Make sure that we have 1872 * 1. a TSB 1873 * 2. a valid ctx that doesn't get stolen after this point. 1874 */ 1875 hatlockp = sfmmu_hat_enter(sfmmup); 1876 1877 /* 1878 * Swap in the TSB. hat_init() allocates tsbinfos without 1879 * TSBs, but we need one for init, since the kernel does some 1880 * special things to set up its stack and needs the TSB to 1881 * resolve page faults. 1882 */ 1883 sfmmu_tsb_swapin(sfmmup, hatlockp); 1884 1885 sfmmu_get_ctx(sfmmup); 1886 1887 sfmmu_hat_exit(hatlockp); 1888 } else { 1889 ASSERT(allocflag == HAT_ALLOC); 1890 1891 hatlockp = sfmmu_hat_enter(sfmmup); 1892 kpreempt_disable(); 1893 1894 CPUSET_ADD(sfmmup->sfmmu_cpusran, CPU->cpu_id); 1895 /* 1896 * sfmmu_setctx_sec takes <pgsz|cnum> as a parameter, 1897 * pagesize bits don't matter in this case since we are passing 1898 * INVALID_CONTEXT to it. 1899 * Compatibility Note: hw takes care of MMU_SCONTEXT1 1900 */ 1901 sfmmu_setctx_sec(INVALID_CONTEXT); 1902 sfmmu_clear_utsbinfo(); 1903 1904 kpreempt_enable(); 1905 sfmmu_hat_exit(hatlockp); 1906 } 1907 } 1908 1909 /* 1910 * Free all the translation resources for the specified address space. 1911 * Called from as_free when an address space is being destroyed. 1912 */ 1913 void 1914 hat_free_start(struct hat *sfmmup) 1915 { 1916 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 1917 ASSERT(sfmmup != ksfmmup); 1918 1919 sfmmup->sfmmu_free = 1; 1920 if (sfmmup->sfmmu_scdp != NULL) { 1921 sfmmu_leave_scd(sfmmup, 0); 1922 } 1923 1924 ASSERT(sfmmup->sfmmu_scdp == NULL); 1925 } 1926 1927 void 1928 hat_free_end(struct hat *sfmmup) 1929 { 1930 int i; 1931 1932 ASSERT(sfmmup->sfmmu_free == 1); 1933 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 1934 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 1935 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 1936 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 1937 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 1938 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 1939 1940 if (sfmmup->sfmmu_rmstat) { 1941 hat_freestat(sfmmup->sfmmu_as, NULL); 1942 } 1943 1944 while (sfmmup->sfmmu_tsb != NULL) { 1945 struct tsb_info *next = sfmmup->sfmmu_tsb->tsb_next; 1946 sfmmu_tsbinfo_free(sfmmup->sfmmu_tsb); 1947 sfmmup->sfmmu_tsb = next; 1948 } 1949 1950 if (sfmmup->sfmmu_srdp != NULL) { 1951 sfmmu_leave_srd(sfmmup); 1952 ASSERT(sfmmup->sfmmu_srdp == NULL); 1953 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 1954 if (sfmmup->sfmmu_hmeregion_links[i] != NULL) { 1955 kmem_free(sfmmup->sfmmu_hmeregion_links[i], 1956 SFMMU_L2_HMERLINKS_SIZE); 1957 sfmmup->sfmmu_hmeregion_links[i] = NULL; 1958 } 1959 } 1960 } 1961 sfmmu_free_sfmmu(sfmmup); 1962 1963 #ifdef DEBUG 1964 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 1965 ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL); 1966 } 1967 #endif 1968 1969 kmem_cache_free(sfmmuid_cache, sfmmup); 1970 } 1971 1972 /* 1973 * Set up any translation structures, for the specified address space, 1974 * that are needed or preferred when the process is being swapped in. 1975 */ 1976 /* ARGSUSED */ 1977 void 1978 hat_swapin(struct hat *hat) 1979 { 1980 } 1981 1982 /* 1983 * Free all of the translation resources, for the specified address space, 1984 * that can be freed while the process is swapped out. Called from as_swapout. 1985 * Also, free up the ctx that this process was using. 1986 */ 1987 void 1988 hat_swapout(struct hat *sfmmup) 1989 { 1990 struct hmehash_bucket *hmebp; 1991 struct hme_blk *hmeblkp; 1992 struct hme_blk *pr_hblk = NULL; 1993 struct hme_blk *nx_hblk; 1994 int i; 1995 struct hme_blk *list = NULL; 1996 hatlock_t *hatlockp; 1997 struct tsb_info *tsbinfop; 1998 struct free_tsb { 1999 struct free_tsb *next; 2000 struct tsb_info *tsbinfop; 2001 }; /* free list of TSBs */ 2002 struct free_tsb *freelist, *last, *next; 2003 2004 SFMMU_STAT(sf_swapout); 2005 2006 /* 2007 * There is no way to go from an as to all its translations in sfmmu. 2008 * Here is one of the times when we take the big hit and traverse 2009 * the hash looking for hme_blks to free up. Not only do we free up 2010 * this as hme_blks but all those that are free. We are obviously 2011 * swapping because we need memory so let's free up as much 2012 * as we can. 2013 * 2014 * Note that we don't flush TLB/TSB here -- it's not necessary 2015 * because: 2016 * 1) we free the ctx we're using and throw away the TSB(s); 2017 * 2) processes aren't runnable while being swapped out. 2018 */ 2019 ASSERT(sfmmup != KHATID); 2020 for (i = 0; i <= UHMEHASH_SZ; i++) { 2021 hmebp = &uhme_hash[i]; 2022 SFMMU_HASH_LOCK(hmebp); 2023 hmeblkp = hmebp->hmeblkp; 2024 pr_hblk = NULL; 2025 while (hmeblkp) { 2026 2027 if ((hmeblkp->hblk_tag.htag_id == sfmmup) && 2028 !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) { 2029 ASSERT(!hmeblkp->hblk_shared); 2030 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 2031 (caddr_t)get_hblk_base(hmeblkp), 2032 get_hblk_endaddr(hmeblkp), 2033 NULL, HAT_UNLOAD); 2034 } 2035 nx_hblk = hmeblkp->hblk_next; 2036 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 2037 ASSERT(!hmeblkp->hblk_lckcnt); 2038 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 2039 &list, 0); 2040 } else { 2041 pr_hblk = hmeblkp; 2042 } 2043 hmeblkp = nx_hblk; 2044 } 2045 SFMMU_HASH_UNLOCK(hmebp); 2046 } 2047 2048 sfmmu_hblks_list_purge(&list, 0); 2049 2050 /* 2051 * Now free up the ctx so that others can reuse it. 2052 */ 2053 hatlockp = sfmmu_hat_enter(sfmmup); 2054 2055 sfmmu_invalidate_ctx(sfmmup); 2056 2057 /* 2058 * Free TSBs, but not tsbinfos, and set SWAPPED flag. 2059 * If TSBs were never swapped in, just return. 2060 * This implies that we don't support partial swapping 2061 * of TSBs -- either all are swapped out, or none are. 2062 * 2063 * We must hold the HAT lock here to prevent racing with another 2064 * thread trying to unmap TTEs from the TSB or running the post- 2065 * relocator after relocating the TSB's memory. Unfortunately, we 2066 * can't free memory while holding the HAT lock or we could 2067 * deadlock, so we build a list of TSBs to be freed after marking 2068 * the tsbinfos as swapped out and free them after dropping the 2069 * lock. 2070 */ 2071 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 2072 sfmmu_hat_exit(hatlockp); 2073 return; 2074 } 2075 2076 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED); 2077 last = freelist = NULL; 2078 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 2079 tsbinfop = tsbinfop->tsb_next) { 2080 ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0); 2081 2082 /* 2083 * Cast the TSB into a struct free_tsb and put it on the free 2084 * list. 2085 */ 2086 if (freelist == NULL) { 2087 last = freelist = (struct free_tsb *)tsbinfop->tsb_va; 2088 } else { 2089 last->next = (struct free_tsb *)tsbinfop->tsb_va; 2090 last = last->next; 2091 } 2092 last->next = NULL; 2093 last->tsbinfop = tsbinfop; 2094 tsbinfop->tsb_flags |= TSB_SWAPPED; 2095 /* 2096 * Zero out the TTE to clear the valid bit. 2097 * Note we can't use a value like 0xbad because we want to 2098 * ensure diagnostic bits are NEVER set on TTEs that might 2099 * be loaded. The intent is to catch any invalid access 2100 * to the swapped TSB, such as a thread running with a valid 2101 * context without first calling sfmmu_tsb_swapin() to 2102 * allocate TSB memory. 2103 */ 2104 tsbinfop->tsb_tte.ll = 0; 2105 } 2106 2107 /* Now we can drop the lock and free the TSB memory. */ 2108 sfmmu_hat_exit(hatlockp); 2109 for (; freelist != NULL; freelist = next) { 2110 next = freelist->next; 2111 sfmmu_tsb_free(freelist->tsbinfop); 2112 } 2113 } 2114 2115 /* 2116 * Duplicate the translations of an as into another newas 2117 */ 2118 /* ARGSUSED */ 2119 int 2120 hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len, 2121 uint_t flag) 2122 { 2123 sf_srd_t *srdp; 2124 sf_scd_t *scdp; 2125 int i; 2126 extern uint_t get_color_start(struct as *); 2127 2128 ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) || 2129 (flag == HAT_DUP_SRD)); 2130 ASSERT(hat != ksfmmup); 2131 ASSERT(newhat != ksfmmup); 2132 ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp); 2133 2134 if (flag == HAT_DUP_COW) { 2135 panic("hat_dup: HAT_DUP_COW not supported"); 2136 } 2137 2138 if (flag == HAT_DUP_SRD && ((srdp = hat->sfmmu_srdp) != NULL)) { 2139 ASSERT(srdp->srd_evp != NULL); 2140 VN_HOLD(srdp->srd_evp); 2141 ASSERT(srdp->srd_refcnt > 0); 2142 newhat->sfmmu_srdp = srdp; 2143 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt); 2144 } 2145 2146 /* 2147 * HAT_DUP_ALL flag is used after as duplication is done. 2148 */ 2149 if (flag == HAT_DUP_ALL && ((srdp = newhat->sfmmu_srdp) != NULL)) { 2150 ASSERT(newhat->sfmmu_srdp->srd_refcnt >= 2); 2151 newhat->sfmmu_rtteflags = hat->sfmmu_rtteflags; 2152 if (hat->sfmmu_flags & HAT_4MTEXT_FLAG) { 2153 newhat->sfmmu_flags |= HAT_4MTEXT_FLAG; 2154 } 2155 2156 /* check if need to join scd */ 2157 if ((scdp = hat->sfmmu_scdp) != NULL && 2158 newhat->sfmmu_scdp != scdp) { 2159 int ret; 2160 SF_RGNMAP_IS_SUBSET(&newhat->sfmmu_region_map, 2161 &scdp->scd_region_map, ret); 2162 ASSERT(ret); 2163 sfmmu_join_scd(scdp, newhat); 2164 ASSERT(newhat->sfmmu_scdp == scdp && 2165 scdp->scd_refcnt >= 2); 2166 for (i = 0; i < max_mmu_page_sizes; i++) { 2167 newhat->sfmmu_ismttecnt[i] = 2168 hat->sfmmu_ismttecnt[i]; 2169 newhat->sfmmu_scdismttecnt[i] = 2170 hat->sfmmu_scdismttecnt[i]; 2171 } 2172 } 2173 2174 sfmmu_check_page_sizes(newhat, 1); 2175 } 2176 2177 if (flag == HAT_DUP_ALL && consistent_coloring == 0 && 2178 update_proc_pgcolorbase_after_fork != 0) { 2179 hat->sfmmu_clrbin = get_color_start(hat->sfmmu_as); 2180 } 2181 return (0); 2182 } 2183 2184 void 2185 hat_memload(struct hat *hat, caddr_t addr, struct page *pp, 2186 uint_t attr, uint_t flags) 2187 { 2188 hat_do_memload(hat, addr, pp, attr, flags, 2189 SFMMU_INVALID_SHMERID); 2190 } 2191 2192 void 2193 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp, 2194 uint_t attr, uint_t flags, hat_region_cookie_t rcookie) 2195 { 2196 uint_t rid; 2197 if (rcookie == HAT_INVALID_REGION_COOKIE) { 2198 hat_do_memload(hat, addr, pp, attr, flags, 2199 SFMMU_INVALID_SHMERID); 2200 return; 2201 } 2202 rid = (uint_t)((uint64_t)rcookie); 2203 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 2204 hat_do_memload(hat, addr, pp, attr, flags, rid); 2205 } 2206 2207 /* 2208 * Set up addr to map to page pp with protection prot. 2209 * As an optimization we also load the TSB with the 2210 * corresponding tte but it is no big deal if the tte gets kicked out. 2211 */ 2212 static void 2213 hat_do_memload(struct hat *hat, caddr_t addr, struct page *pp, 2214 uint_t attr, uint_t flags, uint_t rid) 2215 { 2216 tte_t tte; 2217 2218 2219 ASSERT(hat != NULL); 2220 ASSERT(PAGE_LOCKED(pp)); 2221 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 2222 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 2223 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2224 SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE); 2225 2226 if (PP_ISFREE(pp)) { 2227 panic("hat_memload: loading a mapping to free page %p", 2228 (void *)pp); 2229 } 2230 2231 ASSERT((hat == ksfmmup) || 2232 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 2233 2234 if (flags & ~SFMMU_LOAD_ALLFLAG) 2235 cmn_err(CE_NOTE, "hat_memload: unsupported flags %d", 2236 flags & ~SFMMU_LOAD_ALLFLAG); 2237 2238 if (hat->sfmmu_rmstat) 2239 hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr); 2240 2241 #if defined(SF_ERRATA_57) 2242 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2243 (addr < errata57_limit) && (attr & PROT_EXEC) && 2244 !(flags & HAT_LOAD_SHARE)) { 2245 cmn_err(CE_WARN, "hat_memload: illegal attempt to make user " 2246 " page executable"); 2247 attr &= ~PROT_EXEC; 2248 } 2249 #endif 2250 2251 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2252 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, flags, rid); 2253 2254 /* 2255 * Check TSB and TLB page sizes. 2256 */ 2257 if ((flags & HAT_LOAD_SHARE) == 0) { 2258 sfmmu_check_page_sizes(hat, 1); 2259 } 2260 } 2261 2262 /* 2263 * hat_devload can be called to map real memory (e.g. 2264 * /dev/kmem) and even though hat_devload will determine pf is 2265 * for memory, it will be unable to get a shared lock on the 2266 * page (because someone else has it exclusively) and will 2267 * pass dp = NULL. If tteload doesn't get a non-NULL 2268 * page pointer it can't cache memory. 2269 */ 2270 void 2271 hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn, 2272 uint_t attr, int flags) 2273 { 2274 tte_t tte; 2275 struct page *pp = NULL; 2276 int use_lgpg = 0; 2277 2278 ASSERT(hat != NULL); 2279 2280 ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); 2281 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2282 ASSERT((hat == ksfmmup) || 2283 AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); 2284 if (len == 0) 2285 panic("hat_devload: zero len"); 2286 if (flags & ~SFMMU_LOAD_ALLFLAG) 2287 cmn_err(CE_NOTE, "hat_devload: unsupported flags %d", 2288 flags & ~SFMMU_LOAD_ALLFLAG); 2289 2290 #if defined(SF_ERRATA_57) 2291 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2292 (addr < errata57_limit) && (attr & PROT_EXEC) && 2293 !(flags & HAT_LOAD_SHARE)) { 2294 cmn_err(CE_WARN, "hat_devload: illegal attempt to make user " 2295 " page executable"); 2296 attr &= ~PROT_EXEC; 2297 } 2298 #endif 2299 2300 /* 2301 * If it's a memory page find its pp 2302 */ 2303 if (!(flags & HAT_LOAD_NOCONSIST) && pf_is_memory(pfn)) { 2304 pp = page_numtopp_nolock(pfn); 2305 if (pp == NULL) { 2306 flags |= HAT_LOAD_NOCONSIST; 2307 } else { 2308 if (PP_ISFREE(pp)) { 2309 panic("hat_memload: loading " 2310 "a mapping to free page %p", 2311 (void *)pp); 2312 } 2313 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) { 2314 panic("hat_memload: loading a mapping " 2315 "to unlocked relocatable page %p", 2316 (void *)pp); 2317 } 2318 ASSERT(len == MMU_PAGESIZE); 2319 } 2320 } 2321 2322 if (hat->sfmmu_rmstat) 2323 hat_resvstat(len, hat->sfmmu_as, addr); 2324 2325 if (flags & HAT_LOAD_NOCONSIST) { 2326 attr |= SFMMU_UNCACHEVTTE; 2327 use_lgpg = 1; 2328 } 2329 if (!pf_is_memory(pfn)) { 2330 attr |= SFMMU_UNCACHEPTTE | HAT_NOSYNC; 2331 use_lgpg = 1; 2332 switch (attr & HAT_ORDER_MASK) { 2333 case HAT_STRICTORDER: 2334 case HAT_UNORDERED_OK: 2335 /* 2336 * we set the side effect bit for all non 2337 * memory mappings unless merging is ok 2338 */ 2339 attr |= SFMMU_SIDEFFECT; 2340 break; 2341 case HAT_MERGING_OK: 2342 case HAT_LOADCACHING_OK: 2343 case HAT_STORECACHING_OK: 2344 break; 2345 default: 2346 panic("hat_devload: bad attr"); 2347 break; 2348 } 2349 } 2350 while (len) { 2351 if (!use_lgpg) { 2352 sfmmu_memtte(&tte, pfn, attr, TTE8K); 2353 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2354 flags, SFMMU_INVALID_SHMERID); 2355 len -= MMU_PAGESIZE; 2356 addr += MMU_PAGESIZE; 2357 pfn++; 2358 continue; 2359 } 2360 /* 2361 * try to use large pages, check va/pa alignments 2362 * Note that 32M/256M page sizes are not (yet) supported. 2363 */ 2364 if ((len >= MMU_PAGESIZE4M) && 2365 !((uintptr_t)addr & MMU_PAGEOFFSET4M) && 2366 !(disable_large_pages & (1 << TTE4M)) && 2367 !(mmu_ptob(pfn) & MMU_PAGEOFFSET4M)) { 2368 sfmmu_memtte(&tte, pfn, attr, TTE4M); 2369 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2370 flags, SFMMU_INVALID_SHMERID); 2371 len -= MMU_PAGESIZE4M; 2372 addr += MMU_PAGESIZE4M; 2373 pfn += MMU_PAGESIZE4M / MMU_PAGESIZE; 2374 } else if ((len >= MMU_PAGESIZE512K) && 2375 !((uintptr_t)addr & MMU_PAGEOFFSET512K) && 2376 !(disable_large_pages & (1 << TTE512K)) && 2377 !(mmu_ptob(pfn) & MMU_PAGEOFFSET512K)) { 2378 sfmmu_memtte(&tte, pfn, attr, TTE512K); 2379 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2380 flags, SFMMU_INVALID_SHMERID); 2381 len -= MMU_PAGESIZE512K; 2382 addr += MMU_PAGESIZE512K; 2383 pfn += MMU_PAGESIZE512K / MMU_PAGESIZE; 2384 } else if ((len >= MMU_PAGESIZE64K) && 2385 !((uintptr_t)addr & MMU_PAGEOFFSET64K) && 2386 !(disable_large_pages & (1 << TTE64K)) && 2387 !(mmu_ptob(pfn) & MMU_PAGEOFFSET64K)) { 2388 sfmmu_memtte(&tte, pfn, attr, TTE64K); 2389 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2390 flags, SFMMU_INVALID_SHMERID); 2391 len -= MMU_PAGESIZE64K; 2392 addr += MMU_PAGESIZE64K; 2393 pfn += MMU_PAGESIZE64K / MMU_PAGESIZE; 2394 } else { 2395 sfmmu_memtte(&tte, pfn, attr, TTE8K); 2396 (void) sfmmu_tteload_array(hat, &tte, addr, &pp, 2397 flags, SFMMU_INVALID_SHMERID); 2398 len -= MMU_PAGESIZE; 2399 addr += MMU_PAGESIZE; 2400 pfn++; 2401 } 2402 } 2403 2404 /* 2405 * Check TSB and TLB page sizes. 2406 */ 2407 if ((flags & HAT_LOAD_SHARE) == 0) { 2408 sfmmu_check_page_sizes(hat, 1); 2409 } 2410 } 2411 2412 void 2413 hat_memload_array(struct hat *hat, caddr_t addr, size_t len, 2414 struct page **pps, uint_t attr, uint_t flags) 2415 { 2416 hat_do_memload_array(hat, addr, len, pps, attr, flags, 2417 SFMMU_INVALID_SHMERID); 2418 } 2419 2420 void 2421 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len, 2422 struct page **pps, uint_t attr, uint_t flags, 2423 hat_region_cookie_t rcookie) 2424 { 2425 uint_t rid; 2426 if (rcookie == HAT_INVALID_REGION_COOKIE) { 2427 hat_do_memload_array(hat, addr, len, pps, attr, flags, 2428 SFMMU_INVALID_SHMERID); 2429 return; 2430 } 2431 rid = (uint_t)((uint64_t)rcookie); 2432 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 2433 hat_do_memload_array(hat, addr, len, pps, attr, flags, rid); 2434 } 2435 2436 /* 2437 * Map the largest extend possible out of the page array. The array may NOT 2438 * be in order. The largest possible mapping a page can have 2439 * is specified in the p_szc field. The p_szc field 2440 * cannot change as long as there any mappings (large or small) 2441 * to any of the pages that make up the large page. (ie. any 2442 * promotion/demotion of page size is not up to the hat but up to 2443 * the page free list manager). The array 2444 * should consist of properly aligned contigous pages that are 2445 * part of a big page for a large mapping to be created. 2446 */ 2447 static void 2448 hat_do_memload_array(struct hat *hat, caddr_t addr, size_t len, 2449 struct page **pps, uint_t attr, uint_t flags, uint_t rid) 2450 { 2451 int ttesz; 2452 size_t mapsz; 2453 pgcnt_t numpg, npgs; 2454 tte_t tte; 2455 page_t *pp; 2456 uint_t large_pages_disable; 2457 2458 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 2459 SFMMU_VALIDATE_HMERID(hat, rid, addr, len); 2460 2461 if (hat->sfmmu_rmstat) 2462 hat_resvstat(len, hat->sfmmu_as, addr); 2463 2464 #if defined(SF_ERRATA_57) 2465 if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && 2466 (addr < errata57_limit) && (attr & PROT_EXEC) && 2467 !(flags & HAT_LOAD_SHARE)) { 2468 cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make " 2469 "user page executable"); 2470 attr &= ~PROT_EXEC; 2471 } 2472 #endif 2473 2474 /* Get number of pages */ 2475 npgs = len >> MMU_PAGESHIFT; 2476 2477 if (flags & HAT_LOAD_SHARE) { 2478 large_pages_disable = disable_ism_large_pages; 2479 } else { 2480 large_pages_disable = disable_large_pages; 2481 } 2482 2483 if (npgs < NHMENTS || large_pages_disable == LARGE_PAGES_OFF) { 2484 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs, 2485 rid); 2486 return; 2487 } 2488 2489 while (npgs >= NHMENTS) { 2490 pp = *pps; 2491 for (ttesz = pp->p_szc; ttesz != TTE8K; ttesz--) { 2492 /* 2493 * Check if this page size is disabled. 2494 */ 2495 if (large_pages_disable & (1 << ttesz)) 2496 continue; 2497 2498 numpg = TTEPAGES(ttesz); 2499 mapsz = numpg << MMU_PAGESHIFT; 2500 if ((npgs >= numpg) && 2501 IS_P2ALIGNED(addr, mapsz) && 2502 IS_P2ALIGNED(pp->p_pagenum, numpg)) { 2503 /* 2504 * At this point we have enough pages and 2505 * we know the virtual address and the pfn 2506 * are properly aligned. We still need 2507 * to check for physical contiguity but since 2508 * it is very likely that this is the case 2509 * we will assume they are so and undo 2510 * the request if necessary. It would 2511 * be great if we could get a hint flag 2512 * like HAT_CONTIG which would tell us 2513 * the pages are contigous for sure. 2514 */ 2515 sfmmu_memtte(&tte, (*pps)->p_pagenum, 2516 attr, ttesz); 2517 if (!sfmmu_tteload_array(hat, &tte, addr, 2518 pps, flags, rid)) { 2519 break; 2520 } 2521 } 2522 } 2523 if (ttesz == TTE8K) { 2524 /* 2525 * We were not able to map array using a large page 2526 * batch a hmeblk or fraction at a time. 2527 */ 2528 numpg = ((uintptr_t)addr >> MMU_PAGESHIFT) 2529 & (NHMENTS-1); 2530 numpg = NHMENTS - numpg; 2531 ASSERT(numpg <= npgs); 2532 mapsz = numpg * MMU_PAGESIZE; 2533 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, 2534 numpg, rid); 2535 } 2536 addr += mapsz; 2537 npgs -= numpg; 2538 pps += numpg; 2539 } 2540 2541 if (npgs) { 2542 sfmmu_memload_batchsmall(hat, addr, pps, attr, flags, npgs, 2543 rid); 2544 } 2545 2546 /* 2547 * Check TSB and TLB page sizes. 2548 */ 2549 if ((flags & HAT_LOAD_SHARE) == 0) { 2550 sfmmu_check_page_sizes(hat, 1); 2551 } 2552 } 2553 2554 /* 2555 * Function tries to batch 8K pages into the same hme blk. 2556 */ 2557 static void 2558 sfmmu_memload_batchsmall(struct hat *hat, caddr_t vaddr, page_t **pps, 2559 uint_t attr, uint_t flags, pgcnt_t npgs, uint_t rid) 2560 { 2561 tte_t tte; 2562 page_t *pp; 2563 struct hmehash_bucket *hmebp; 2564 struct hme_blk *hmeblkp; 2565 int index; 2566 2567 while (npgs) { 2568 /* 2569 * Acquire the hash bucket. 2570 */ 2571 hmebp = sfmmu_tteload_acquire_hashbucket(hat, vaddr, TTE8K, 2572 rid); 2573 ASSERT(hmebp); 2574 2575 /* 2576 * Find the hment block. 2577 */ 2578 hmeblkp = sfmmu_tteload_find_hmeblk(hat, hmebp, vaddr, 2579 TTE8K, flags, rid); 2580 ASSERT(hmeblkp); 2581 2582 do { 2583 /* 2584 * Make the tte. 2585 */ 2586 pp = *pps; 2587 sfmmu_memtte(&tte, pp->p_pagenum, attr, TTE8K); 2588 2589 /* 2590 * Add the translation. 2591 */ 2592 (void) sfmmu_tteload_addentry(hat, hmeblkp, &tte, 2593 vaddr, pps, flags, rid); 2594 2595 /* 2596 * Goto next page. 2597 */ 2598 pps++; 2599 npgs--; 2600 2601 /* 2602 * Goto next address. 2603 */ 2604 vaddr += MMU_PAGESIZE; 2605 2606 /* 2607 * Don't crossover into a different hmentblk. 2608 */ 2609 index = (int)(((uintptr_t)vaddr >> MMU_PAGESHIFT) & 2610 (NHMENTS-1)); 2611 2612 } while (index != 0 && npgs != 0); 2613 2614 /* 2615 * Release the hash bucket. 2616 */ 2617 2618 sfmmu_tteload_release_hashbucket(hmebp); 2619 } 2620 } 2621 2622 /* 2623 * Construct a tte for a page: 2624 * 2625 * tte_valid = 1 2626 * tte_size2 = size & TTE_SZ2_BITS (Panther and Olympus-C only) 2627 * tte_size = size 2628 * tte_nfo = attr & HAT_NOFAULT 2629 * tte_ie = attr & HAT_STRUCTURE_LE 2630 * tte_hmenum = hmenum 2631 * tte_pahi = pp->p_pagenum >> TTE_PASHIFT; 2632 * tte_palo = pp->p_pagenum & TTE_PALOMASK; 2633 * tte_ref = 1 (optimization) 2634 * tte_wr_perm = attr & PROT_WRITE; 2635 * tte_no_sync = attr & HAT_NOSYNC 2636 * tte_lock = attr & SFMMU_LOCKTTE 2637 * tte_cp = !(attr & SFMMU_UNCACHEPTTE) 2638 * tte_cv = !(attr & SFMMU_UNCACHEVTTE) 2639 * tte_e = attr & SFMMU_SIDEFFECT 2640 * tte_priv = !(attr & PROT_USER) 2641 * tte_hwwr = if nosync is set and it is writable we set the mod bit (opt) 2642 * tte_glb = 0 2643 */ 2644 void 2645 sfmmu_memtte(tte_t *ttep, pfn_t pfn, uint_t attr, int tte_sz) 2646 { 2647 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 2648 2649 ttep->tte_inthi = MAKE_TTE_INTHI(pfn, attr, tte_sz, 0 /* hmenum */); 2650 ttep->tte_intlo = MAKE_TTE_INTLO(pfn, attr, tte_sz, 0 /* hmenum */); 2651 2652 if (TTE_IS_NOSYNC(ttep)) { 2653 TTE_SET_REF(ttep); 2654 if (TTE_IS_WRITABLE(ttep)) { 2655 TTE_SET_MOD(ttep); 2656 } 2657 } 2658 if (TTE_IS_NFO(ttep) && TTE_IS_EXECUTABLE(ttep)) { 2659 panic("sfmmu_memtte: can't set both NFO and EXEC bits"); 2660 } 2661 } 2662 2663 /* 2664 * This function will add a translation to the hme_blk and allocate the 2665 * hme_blk if one does not exist. 2666 * If a page structure is specified then it will add the 2667 * corresponding hment to the mapping list. 2668 * It will also update the hmenum field for the tte. 2669 * 2670 * Currently this function is only used for kernel mappings. 2671 * So pass invalid region to sfmmu_tteload_array(). 2672 */ 2673 void 2674 sfmmu_tteload(struct hat *sfmmup, tte_t *ttep, caddr_t vaddr, page_t *pp, 2675 uint_t flags) 2676 { 2677 ASSERT(sfmmup == ksfmmup); 2678 (void) sfmmu_tteload_array(sfmmup, ttep, vaddr, &pp, flags, 2679 SFMMU_INVALID_SHMERID); 2680 } 2681 2682 /* 2683 * Load (ttep != NULL) or unload (ttep == NULL) one entry in the TSB. 2684 * Assumes that a particular page size may only be resident in one TSB. 2685 */ 2686 static void 2687 sfmmu_mod_tsb(sfmmu_t *sfmmup, caddr_t vaddr, tte_t *ttep, int ttesz) 2688 { 2689 struct tsb_info *tsbinfop = NULL; 2690 uint64_t tag; 2691 struct tsbe *tsbe_addr; 2692 uint64_t tsb_base; 2693 uint_t tsb_size; 2694 int vpshift = MMU_PAGESHIFT; 2695 int phys = 0; 2696 2697 if (sfmmup == ksfmmup) { /* No support for 32/256M ksfmmu pages */ 2698 phys = ktsb_phys; 2699 if (ttesz >= TTE4M) { 2700 #ifndef sun4v 2701 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2702 #endif 2703 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2704 tsb_size = ktsb4m_szcode; 2705 } else { 2706 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2707 tsb_size = ktsb_szcode; 2708 } 2709 } else { 2710 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2711 2712 /* 2713 * If there isn't a TSB for this page size, or the TSB is 2714 * swapped out, there is nothing to do. Note that the latter 2715 * case seems impossible but can occur if hat_pageunload() 2716 * is called on an ISM mapping while the process is swapped 2717 * out. 2718 */ 2719 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2720 return; 2721 2722 /* 2723 * If another thread is in the middle of relocating a TSB 2724 * we can't unload the entry so set a flag so that the 2725 * TSB will be flushed before it can be accessed by the 2726 * process. 2727 */ 2728 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2729 if (ttep == NULL) 2730 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2731 return; 2732 } 2733 #if defined(UTSB_PHYS) 2734 phys = 1; 2735 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2736 #else 2737 tsb_base = (uint64_t)tsbinfop->tsb_va; 2738 #endif 2739 tsb_size = tsbinfop->tsb_szc; 2740 } 2741 if (ttesz >= TTE4M) 2742 vpshift = MMU_PAGESHIFT4M; 2743 2744 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2745 tag = sfmmu_make_tsbtag(vaddr); 2746 2747 if (ttep == NULL) { 2748 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2749 } else { 2750 if (ttesz >= TTE4M) { 2751 SFMMU_STAT(sf_tsb_load4m); 2752 } else { 2753 SFMMU_STAT(sf_tsb_load8k); 2754 } 2755 2756 sfmmu_load_tsbe(tsbe_addr, tag, ttep, phys); 2757 } 2758 } 2759 2760 /* 2761 * Unmap all entries from [start, end) matching the given page size. 2762 * 2763 * This function is used primarily to unmap replicated 64K or 512K entries 2764 * from the TSB that are inserted using the base page size TSB pointer, but 2765 * it may also be called to unmap a range of addresses from the TSB. 2766 */ 2767 void 2768 sfmmu_unload_tsb_range(sfmmu_t *sfmmup, caddr_t start, caddr_t end, int ttesz) 2769 { 2770 struct tsb_info *tsbinfop; 2771 uint64_t tag; 2772 struct tsbe *tsbe_addr; 2773 caddr_t vaddr; 2774 uint64_t tsb_base; 2775 int vpshift, vpgsz; 2776 uint_t tsb_size; 2777 int phys = 0; 2778 2779 /* 2780 * Assumptions: 2781 * If ttesz == 8K, 64K or 512K, we walk through the range 8K 2782 * at a time shooting down any valid entries we encounter. 2783 * 2784 * If ttesz >= 4M we walk the range 4M at a time shooting 2785 * down any valid mappings we find. 2786 */ 2787 if (sfmmup == ksfmmup) { 2788 phys = ktsb_phys; 2789 if (ttesz >= TTE4M) { 2790 #ifndef sun4v 2791 ASSERT((ttesz != TTE32M) && (ttesz != TTE256M)); 2792 #endif 2793 tsb_base = (phys)? ktsb4m_pbase : (uint64_t)ktsb4m_base; 2794 tsb_size = ktsb4m_szcode; 2795 } else { 2796 tsb_base = (phys)? ktsb_pbase : (uint64_t)ktsb_base; 2797 tsb_size = ktsb_szcode; 2798 } 2799 } else { 2800 SFMMU_GET_TSBINFO(tsbinfop, sfmmup, ttesz); 2801 2802 /* 2803 * If there isn't a TSB for this page size, or the TSB is 2804 * swapped out, there is nothing to do. Note that the latter 2805 * case seems impossible but can occur if hat_pageunload() 2806 * is called on an ISM mapping while the process is swapped 2807 * out. 2808 */ 2809 if (tsbinfop == NULL || (tsbinfop->tsb_flags & TSB_SWAPPED)) 2810 return; 2811 2812 /* 2813 * If another thread is in the middle of relocating a TSB 2814 * we can't unload the entry so set a flag so that the 2815 * TSB will be flushed before it can be accessed by the 2816 * process. 2817 */ 2818 if ((tsbinfop->tsb_flags & TSB_RELOC_FLAG) != 0) { 2819 tsbinfop->tsb_flags |= TSB_FLUSH_NEEDED; 2820 return; 2821 } 2822 #if defined(UTSB_PHYS) 2823 phys = 1; 2824 tsb_base = (uint64_t)tsbinfop->tsb_pa; 2825 #else 2826 tsb_base = (uint64_t)tsbinfop->tsb_va; 2827 #endif 2828 tsb_size = tsbinfop->tsb_szc; 2829 } 2830 if (ttesz >= TTE4M) { 2831 vpshift = MMU_PAGESHIFT4M; 2832 vpgsz = MMU_PAGESIZE4M; 2833 } else { 2834 vpshift = MMU_PAGESHIFT; 2835 vpgsz = MMU_PAGESIZE; 2836 } 2837 2838 for (vaddr = start; vaddr < end; vaddr += vpgsz) { 2839 tag = sfmmu_make_tsbtag(vaddr); 2840 tsbe_addr = sfmmu_get_tsbe(tsb_base, vaddr, vpshift, tsb_size); 2841 sfmmu_unload_tsbe(tsbe_addr, tag, phys); 2842 } 2843 } 2844 2845 /* 2846 * Select the optimum TSB size given the number of mappings 2847 * that need to be cached. 2848 */ 2849 static int 2850 sfmmu_select_tsb_szc(pgcnt_t pgcnt) 2851 { 2852 int szc = 0; 2853 2854 #ifdef DEBUG 2855 if (tsb_grow_stress) { 2856 uint32_t randval = (uint32_t)gettick() >> 4; 2857 return (randval % (tsb_max_growsize + 1)); 2858 } 2859 #endif /* DEBUG */ 2860 2861 while ((szc < tsb_max_growsize) && (pgcnt > SFMMU_RSS_TSBSIZE(szc))) 2862 szc++; 2863 return (szc); 2864 } 2865 2866 /* 2867 * This function will add a translation to the hme_blk and allocate the 2868 * hme_blk if one does not exist. 2869 * If a page structure is specified then it will add the 2870 * corresponding hment to the mapping list. 2871 * It will also update the hmenum field for the tte. 2872 * Furthermore, it attempts to create a large page translation 2873 * for <addr,hat> at page array pps. It assumes addr and first 2874 * pp is correctly aligned. It returns 0 if successful and 1 otherwise. 2875 */ 2876 static int 2877 sfmmu_tteload_array(sfmmu_t *sfmmup, tte_t *ttep, caddr_t vaddr, 2878 page_t **pps, uint_t flags, uint_t rid) 2879 { 2880 struct hmehash_bucket *hmebp; 2881 struct hme_blk *hmeblkp; 2882 int ret; 2883 uint_t size; 2884 2885 /* 2886 * Get mapping size. 2887 */ 2888 size = TTE_CSZ(ttep); 2889 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 2890 2891 /* 2892 * Acquire the hash bucket. 2893 */ 2894 hmebp = sfmmu_tteload_acquire_hashbucket(sfmmup, vaddr, size, rid); 2895 ASSERT(hmebp); 2896 2897 /* 2898 * Find the hment block. 2899 */ 2900 hmeblkp = sfmmu_tteload_find_hmeblk(sfmmup, hmebp, vaddr, size, flags, 2901 rid); 2902 ASSERT(hmeblkp); 2903 2904 /* 2905 * Add the translation. 2906 */ 2907 ret = sfmmu_tteload_addentry(sfmmup, hmeblkp, ttep, vaddr, pps, flags, 2908 rid); 2909 2910 /* 2911 * Release the hash bucket. 2912 */ 2913 sfmmu_tteload_release_hashbucket(hmebp); 2914 2915 return (ret); 2916 } 2917 2918 /* 2919 * Function locks and returns a pointer to the hash bucket for vaddr and size. 2920 */ 2921 static struct hmehash_bucket * 2922 sfmmu_tteload_acquire_hashbucket(sfmmu_t *sfmmup, caddr_t vaddr, int size, 2923 uint_t rid) 2924 { 2925 struct hmehash_bucket *hmebp; 2926 int hmeshift; 2927 void *htagid = sfmmutohtagid(sfmmup, rid); 2928 2929 ASSERT(htagid != NULL); 2930 2931 hmeshift = HME_HASH_SHIFT(size); 2932 2933 hmebp = HME_HASH_FUNCTION(htagid, vaddr, hmeshift); 2934 2935 SFMMU_HASH_LOCK(hmebp); 2936 2937 return (hmebp); 2938 } 2939 2940 /* 2941 * Function returns a pointer to an hmeblk in the hash bucket, hmebp. If the 2942 * hmeblk doesn't exists for the [sfmmup, vaddr & size] signature, a hmeblk is 2943 * allocated. 2944 */ 2945 static struct hme_blk * 2946 sfmmu_tteload_find_hmeblk(sfmmu_t *sfmmup, struct hmehash_bucket *hmebp, 2947 caddr_t vaddr, uint_t size, uint_t flags, uint_t rid) 2948 { 2949 hmeblk_tag hblktag; 2950 int hmeshift; 2951 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 2952 2953 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 2954 2955 hblktag.htag_id = sfmmutohtagid(sfmmup, rid); 2956 ASSERT(hblktag.htag_id != NULL); 2957 hmeshift = HME_HASH_SHIFT(size); 2958 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 2959 hblktag.htag_rehash = HME_HASH_REHASH(size); 2960 hblktag.htag_rid = rid; 2961 2962 ttearray_realloc: 2963 2964 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 2965 2966 /* 2967 * We block until hblk_reserve_lock is released; it's held by 2968 * the thread, temporarily using hblk_reserve, until hblk_reserve is 2969 * replaced by a hblk from sfmmu8_cache. 2970 */ 2971 if (hmeblkp == (struct hme_blk *)hblk_reserve && 2972 hblk_reserve_thread != curthread) { 2973 SFMMU_HASH_UNLOCK(hmebp); 2974 mutex_enter(&hblk_reserve_lock); 2975 mutex_exit(&hblk_reserve_lock); 2976 SFMMU_STAT(sf_hblk_reserve_hit); 2977 SFMMU_HASH_LOCK(hmebp); 2978 goto ttearray_realloc; 2979 } 2980 2981 if (hmeblkp == NULL) { 2982 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 2983 hblktag, flags, rid); 2984 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 2985 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 2986 } else { 2987 /* 2988 * It is possible for 8k and 64k hblks to collide since they 2989 * have the same rehash value. This is because we 2990 * lazily free hblks and 8K/64K blks could be lingering. 2991 * If we find size mismatch we free the block and & try again. 2992 */ 2993 if (get_hblk_ttesz(hmeblkp) != size) { 2994 ASSERT(!hmeblkp->hblk_vcnt); 2995 ASSERT(!hmeblkp->hblk_hmecnt); 2996 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 2997 &list, 0); 2998 goto ttearray_realloc; 2999 } 3000 if (hmeblkp->hblk_shw_bit) { 3001 /* 3002 * if the hblk was previously used as a shadow hblk then 3003 * we will change it to a normal hblk 3004 */ 3005 ASSERT(!hmeblkp->hblk_shared); 3006 if (hmeblkp->hblk_shw_mask) { 3007 sfmmu_shadow_hcleanup(sfmmup, hmeblkp, hmebp); 3008 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3009 goto ttearray_realloc; 3010 } else { 3011 hmeblkp->hblk_shw_bit = 0; 3012 } 3013 } 3014 SFMMU_STAT(sf_hblk_hit); 3015 } 3016 3017 /* 3018 * hat_memload() should never call kmem_cache_free() for kernel hmeblks; 3019 * see block comment showing the stacktrace in sfmmu_hblk_alloc(); 3020 * set the flag parameter to 1 so that sfmmu_hblks_list_purge() will 3021 * just add these hmeblks to the per-cpu pending queue. 3022 */ 3023 sfmmu_hblks_list_purge(&list, 1); 3024 3025 ASSERT(get_hblk_ttesz(hmeblkp) == size); 3026 ASSERT(!hmeblkp->hblk_shw_bit); 3027 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 3028 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 3029 ASSERT(hmeblkp->hblk_tag.htag_rid == rid); 3030 3031 return (hmeblkp); 3032 } 3033 3034 /* 3035 * Function adds a tte entry into the hmeblk. It returns 0 if successful and 1 3036 * otherwise. 3037 */ 3038 static int 3039 sfmmu_tteload_addentry(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, tte_t *ttep, 3040 caddr_t vaddr, page_t **pps, uint_t flags, uint_t rid) 3041 { 3042 page_t *pp = *pps; 3043 int hmenum, size, remap; 3044 tte_t tteold, flush_tte; 3045 #ifdef DEBUG 3046 tte_t orig_old; 3047 #endif /* DEBUG */ 3048 struct sf_hment *sfhme; 3049 kmutex_t *pml, *pmtx; 3050 hatlock_t *hatlockp; 3051 int myflt; 3052 3053 /* 3054 * remove this panic when we decide to let user virtual address 3055 * space be >= USERLIMIT. 3056 */ 3057 if (!TTE_IS_PRIVILEGED(ttep) && vaddr >= (caddr_t)USERLIMIT) 3058 panic("user addr %p in kernel space", (void *)vaddr); 3059 #if defined(TTE_IS_GLOBAL) 3060 if (TTE_IS_GLOBAL(ttep)) 3061 panic("sfmmu_tteload: creating global tte"); 3062 #endif 3063 3064 #ifdef DEBUG 3065 if (pf_is_memory(sfmmu_ttetopfn(ttep, vaddr)) && 3066 !TTE_IS_PCACHEABLE(ttep) && !sfmmu_allow_nc_trans) 3067 panic("sfmmu_tteload: non cacheable memory tte"); 3068 #endif /* DEBUG */ 3069 3070 /* don't simulate dirty bit for writeable ISM/DISM mappings */ 3071 if ((flags & HAT_LOAD_SHARE) && TTE_IS_WRITABLE(ttep)) { 3072 TTE_SET_REF(ttep); 3073 TTE_SET_MOD(ttep); 3074 } 3075 3076 if ((flags & HAT_LOAD_SHARE) || !TTE_IS_REF(ttep) || 3077 !TTE_IS_MOD(ttep)) { 3078 /* 3079 * Don't load TSB for dummy as in ISM. Also don't preload 3080 * the TSB if the TTE isn't writable since we're likely to 3081 * fault on it again -- preloading can be fairly expensive. 3082 */ 3083 flags |= SFMMU_NO_TSBLOAD; 3084 } 3085 3086 size = TTE_CSZ(ttep); 3087 switch (size) { 3088 case TTE8K: 3089 SFMMU_STAT(sf_tteload8k); 3090 break; 3091 case TTE64K: 3092 SFMMU_STAT(sf_tteload64k); 3093 break; 3094 case TTE512K: 3095 SFMMU_STAT(sf_tteload512k); 3096 break; 3097 case TTE4M: 3098 SFMMU_STAT(sf_tteload4m); 3099 break; 3100 case (TTE32M): 3101 SFMMU_STAT(sf_tteload32m); 3102 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 3103 break; 3104 case (TTE256M): 3105 SFMMU_STAT(sf_tteload256m); 3106 ASSERT(mmu_page_sizes == max_mmu_page_sizes); 3107 break; 3108 } 3109 3110 ASSERT(!((uintptr_t)vaddr & TTE_PAGE_OFFSET(size))); 3111 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 3112 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || hmeblkp->hblk_shared); 3113 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || !hmeblkp->hblk_shared); 3114 3115 HBLKTOHME_IDX(sfhme, hmeblkp, vaddr, hmenum); 3116 3117 /* 3118 * Need to grab mlist lock here so that pageunload 3119 * will not change tte behind us. 3120 */ 3121 if (pp) { 3122 pml = sfmmu_mlist_enter(pp); 3123 } 3124 3125 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3126 /* 3127 * Look for corresponding hment and if valid verify 3128 * pfns are equal. 3129 */ 3130 remap = TTE_IS_VALID(&tteold); 3131 if (remap) { 3132 pfn_t new_pfn, old_pfn; 3133 3134 old_pfn = TTE_TO_PFN(vaddr, &tteold); 3135 new_pfn = TTE_TO_PFN(vaddr, ttep); 3136 3137 if (flags & HAT_LOAD_REMAP) { 3138 /* make sure we are remapping same type of pages */ 3139 if (pf_is_memory(old_pfn) != pf_is_memory(new_pfn)) { 3140 panic("sfmmu_tteload - tte remap io<->memory"); 3141 } 3142 if (old_pfn != new_pfn && 3143 (pp != NULL || sfhme->hme_page != NULL)) { 3144 panic("sfmmu_tteload - tte remap pp != NULL"); 3145 } 3146 } else if (old_pfn != new_pfn) { 3147 panic("sfmmu_tteload - tte remap, hmeblkp 0x%p", 3148 (void *)hmeblkp); 3149 } 3150 ASSERT(TTE_CSZ(&tteold) == TTE_CSZ(ttep)); 3151 } 3152 3153 if (pp) { 3154 if (size == TTE8K) { 3155 #ifdef VAC 3156 /* 3157 * Handle VAC consistency 3158 */ 3159 if (!remap && (cache & CACHE_VAC) && !PP_ISNC(pp)) { 3160 sfmmu_vac_conflict(sfmmup, vaddr, pp); 3161 } 3162 #endif 3163 3164 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 3165 pmtx = sfmmu_page_enter(pp); 3166 PP_CLRRO(pp); 3167 sfmmu_page_exit(pmtx); 3168 } else if (!PP_ISMAPPED(pp) && 3169 (!TTE_IS_WRITABLE(ttep)) && !(PP_ISMOD(pp))) { 3170 pmtx = sfmmu_page_enter(pp); 3171 if (!(PP_ISMOD(pp))) { 3172 PP_SETRO(pp); 3173 } 3174 sfmmu_page_exit(pmtx); 3175 } 3176 3177 } else if (sfmmu_pagearray_setup(vaddr, pps, ttep, remap)) { 3178 /* 3179 * sfmmu_pagearray_setup failed so return 3180 */ 3181 sfmmu_mlist_exit(pml); 3182 return (1); 3183 } 3184 } 3185 3186 /* 3187 * Make sure hment is not on a mapping list. 3188 */ 3189 ASSERT(remap || (sfhme->hme_page == NULL)); 3190 3191 /* if it is not a remap then hme->next better be NULL */ 3192 ASSERT((!remap) ? sfhme->hme_next == NULL : 1); 3193 3194 if (flags & HAT_LOAD_LOCK) { 3195 if ((hmeblkp->hblk_lckcnt + 1) >= MAX_HBLK_LCKCNT) { 3196 panic("too high lckcnt-hmeblk %p", 3197 (void *)hmeblkp); 3198 } 3199 atomic_inc_32(&hmeblkp->hblk_lckcnt); 3200 3201 HBLK_STACK_TRACE(hmeblkp, HBLK_LOCK); 3202 } 3203 3204 #ifdef VAC 3205 if (pp && PP_ISNC(pp)) { 3206 /* 3207 * If the physical page is marked to be uncacheable, like 3208 * by a vac conflict, make sure the new mapping is also 3209 * uncacheable. 3210 */ 3211 TTE_CLR_VCACHEABLE(ttep); 3212 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 3213 } 3214 #endif 3215 ttep->tte_hmenum = hmenum; 3216 3217 #ifdef DEBUG 3218 orig_old = tteold; 3219 #endif /* DEBUG */ 3220 3221 while (sfmmu_modifytte_try(&tteold, ttep, &sfhme->hme_tte) < 0) { 3222 if ((sfmmup == KHATID) && 3223 (flags & (HAT_LOAD_LOCK | HAT_LOAD_REMAP))) { 3224 sfmmu_copytte(&sfhme->hme_tte, &tteold); 3225 } 3226 #ifdef DEBUG 3227 chk_tte(&orig_old, &tteold, ttep, hmeblkp); 3228 #endif /* DEBUG */ 3229 } 3230 ASSERT(TTE_IS_VALID(&sfhme->hme_tte)); 3231 3232 if (!TTE_IS_VALID(&tteold)) { 3233 3234 atomic_inc_16(&hmeblkp->hblk_vcnt); 3235 if (rid == SFMMU_INVALID_SHMERID) { 3236 atomic_inc_ulong(&sfmmup->sfmmu_ttecnt[size]); 3237 } else { 3238 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 3239 sf_region_t *rgnp = srdp->srd_hmergnp[rid]; 3240 /* 3241 * We already accounted for region ttecnt's in sfmmu 3242 * during hat_join_region() processing. Here we 3243 * only update ttecnt's in region struture. 3244 */ 3245 atomic_inc_ulong(&rgnp->rgn_ttecnt[size]); 3246 } 3247 } 3248 3249 myflt = (astosfmmu(curthread->t_procp->p_as) == sfmmup); 3250 if (size > TTE8K && (flags & HAT_LOAD_SHARE) == 0 && 3251 sfmmup != ksfmmup) { 3252 uchar_t tteflag = 1 << size; 3253 if (rid == SFMMU_INVALID_SHMERID) { 3254 if (!(sfmmup->sfmmu_tteflags & tteflag)) { 3255 hatlockp = sfmmu_hat_enter(sfmmup); 3256 sfmmup->sfmmu_tteflags |= tteflag; 3257 sfmmu_hat_exit(hatlockp); 3258 } 3259 } else if (!(sfmmup->sfmmu_rtteflags & tteflag)) { 3260 hatlockp = sfmmu_hat_enter(sfmmup); 3261 sfmmup->sfmmu_rtteflags |= tteflag; 3262 sfmmu_hat_exit(hatlockp); 3263 } 3264 /* 3265 * Update the current CPU tsbmiss area, so the current thread 3266 * won't need to take the tsbmiss for the new pagesize. 3267 * The other threads in the process will update their tsb 3268 * miss area lazily in sfmmu_tsbmiss_exception() when they 3269 * fail to find the translation for a newly added pagesize. 3270 */ 3271 if (size > TTE64K && myflt) { 3272 struct tsbmiss *tsbmp; 3273 kpreempt_disable(); 3274 tsbmp = &tsbmiss_area[CPU->cpu_id]; 3275 if (rid == SFMMU_INVALID_SHMERID) { 3276 if (!(tsbmp->uhat_tteflags & tteflag)) { 3277 tsbmp->uhat_tteflags |= tteflag; 3278 } 3279 } else { 3280 if (!(tsbmp->uhat_rtteflags & tteflag)) { 3281 tsbmp->uhat_rtteflags |= tteflag; 3282 } 3283 } 3284 kpreempt_enable(); 3285 } 3286 } 3287 3288 if (size >= TTE4M && (flags & HAT_LOAD_TEXT) && 3289 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 3290 hatlockp = sfmmu_hat_enter(sfmmup); 3291 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 3292 sfmmu_hat_exit(hatlockp); 3293 } 3294 3295 flush_tte.tte_intlo = (tteold.tte_intlo ^ ttep->tte_intlo) & 3296 hw_tte.tte_intlo; 3297 flush_tte.tte_inthi = (tteold.tte_inthi ^ ttep->tte_inthi) & 3298 hw_tte.tte_inthi; 3299 3300 if (remap && (flush_tte.tte_inthi || flush_tte.tte_intlo)) { 3301 /* 3302 * If remap and new tte differs from old tte we need 3303 * to sync the mod bit and flush TLB/TSB. We don't 3304 * need to sync ref bit because we currently always set 3305 * ref bit in tteload. 3306 */ 3307 ASSERT(TTE_IS_REF(ttep)); 3308 if (TTE_IS_MOD(&tteold)) { 3309 sfmmu_ttesync(sfmmup, vaddr, &tteold, pp); 3310 } 3311 /* 3312 * hwtte bits shouldn't change for SRD hmeblks as long as SRD 3313 * hmes are only used for read only text. Adding this code for 3314 * completeness and future use of shared hmeblks with writable 3315 * mappings of VMODSORT vnodes. 3316 */ 3317 if (hmeblkp->hblk_shared) { 3318 cpuset_t cpuset = sfmmu_rgntlb_demap(vaddr, 3319 sfmmup->sfmmu_srdp->srd_hmergnp[rid], hmeblkp, 1); 3320 xt_sync(cpuset); 3321 SFMMU_STAT_ADD(sf_region_remap_demap, 1); 3322 } else { 3323 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 0); 3324 xt_sync(sfmmup->sfmmu_cpusran); 3325 } 3326 } 3327 3328 if ((flags & SFMMU_NO_TSBLOAD) == 0) { 3329 /* 3330 * We only preload 8K and 4M mappings into the TSB, since 3331 * 64K and 512K mappings are replicated and hence don't 3332 * have a single, unique TSB entry. Ditto for 32M/256M. 3333 */ 3334 if (size == TTE8K || size == TTE4M) { 3335 sf_scd_t *scdp; 3336 hatlockp = sfmmu_hat_enter(sfmmup); 3337 /* 3338 * Don't preload private TSB if the mapping is used 3339 * by the shctx in the SCD. 3340 */ 3341 scdp = sfmmup->sfmmu_scdp; 3342 if (rid == SFMMU_INVALID_SHMERID || scdp == NULL || 3343 !SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 3344 sfmmu_load_tsb(sfmmup, vaddr, &sfhme->hme_tte, 3345 size); 3346 } 3347 sfmmu_hat_exit(hatlockp); 3348 } 3349 } 3350 if (pp) { 3351 if (!remap) { 3352 HME_ADD(sfhme, pp); 3353 atomic_inc_16(&hmeblkp->hblk_hmecnt); 3354 ASSERT(hmeblkp->hblk_hmecnt > 0); 3355 3356 /* 3357 * Cannot ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 3358 * see pageunload() for comment. 3359 */ 3360 } 3361 sfmmu_mlist_exit(pml); 3362 } 3363 3364 return (0); 3365 } 3366 /* 3367 * Function unlocks hash bucket. 3368 */ 3369 static void 3370 sfmmu_tteload_release_hashbucket(struct hmehash_bucket *hmebp) 3371 { 3372 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3373 SFMMU_HASH_UNLOCK(hmebp); 3374 } 3375 3376 /* 3377 * function which checks and sets up page array for a large 3378 * translation. Will set p_vcolor, p_index, p_ro fields. 3379 * Assumes addr and pfnum of first page are properly aligned. 3380 * Will check for physical contiguity. If check fails it return 3381 * non null. 3382 */ 3383 static int 3384 sfmmu_pagearray_setup(caddr_t addr, page_t **pps, tte_t *ttep, int remap) 3385 { 3386 int i, index, ttesz; 3387 pfn_t pfnum; 3388 pgcnt_t npgs; 3389 page_t *pp, *pp1; 3390 kmutex_t *pmtx; 3391 #ifdef VAC 3392 int osz; 3393 int cflags = 0; 3394 int vac_err = 0; 3395 #endif 3396 int newidx = 0; 3397 3398 ttesz = TTE_CSZ(ttep); 3399 3400 ASSERT(ttesz > TTE8K); 3401 3402 npgs = TTEPAGES(ttesz); 3403 index = PAGESZ_TO_INDEX(ttesz); 3404 3405 pfnum = (*pps)->p_pagenum; 3406 ASSERT(IS_P2ALIGNED(pfnum, npgs)); 3407 3408 /* 3409 * Save the first pp so we can do HAT_TMPNC at the end. 3410 */ 3411 pp1 = *pps; 3412 #ifdef VAC 3413 osz = fnd_mapping_sz(pp1); 3414 #endif 3415 3416 for (i = 0; i < npgs; i++, pps++) { 3417 pp = *pps; 3418 ASSERT(PAGE_LOCKED(pp)); 3419 ASSERT(pp->p_szc >= ttesz); 3420 ASSERT(pp->p_szc == pp1->p_szc); 3421 ASSERT(sfmmu_mlist_held(pp)); 3422 3423 /* 3424 * XXX is it possible to maintain P_RO on the root only? 3425 */ 3426 if (TTE_IS_WRITABLE(ttep) && PP_ISRO(pp)) { 3427 pmtx = sfmmu_page_enter(pp); 3428 PP_CLRRO(pp); 3429 sfmmu_page_exit(pmtx); 3430 } else if (!PP_ISMAPPED(pp) && !TTE_IS_WRITABLE(ttep) && 3431 !PP_ISMOD(pp)) { 3432 pmtx = sfmmu_page_enter(pp); 3433 if (!(PP_ISMOD(pp))) { 3434 PP_SETRO(pp); 3435 } 3436 sfmmu_page_exit(pmtx); 3437 } 3438 3439 /* 3440 * If this is a remap we skip vac & contiguity checks. 3441 */ 3442 if (remap) 3443 continue; 3444 3445 /* 3446 * set p_vcolor and detect any vac conflicts. 3447 */ 3448 #ifdef VAC 3449 if (vac_err == 0) { 3450 vac_err = sfmmu_vacconflict_array(addr, pp, &cflags); 3451 3452 } 3453 #endif 3454 3455 /* 3456 * Save current index in case we need to undo it. 3457 * Note: "PAGESZ_TO_INDEX(sz) (1 << (sz))" 3458 * "SFMMU_INDEX_SHIFT 6" 3459 * "SFMMU_INDEX_MASK ((1 << SFMMU_INDEX_SHIFT) - 1)" 3460 * "PP_MAPINDEX(p_index) (p_index & SFMMU_INDEX_MASK)" 3461 * 3462 * So: index = PAGESZ_TO_INDEX(ttesz); 3463 * if ttesz == 1 then index = 0x2 3464 * 2 then index = 0x4 3465 * 3 then index = 0x8 3466 * 4 then index = 0x10 3467 * 5 then index = 0x20 3468 * The code below checks if it's a new pagesize (ie, newidx) 3469 * in case we need to take it back out of p_index, 3470 * and then or's the new index into the existing index. 3471 */ 3472 if ((PP_MAPINDEX(pp) & index) == 0) 3473 newidx = 1; 3474 pp->p_index = (PP_MAPINDEX(pp) | index); 3475 3476 /* 3477 * contiguity check 3478 */ 3479 if (pp->p_pagenum != pfnum) { 3480 /* 3481 * If we fail the contiguity test then 3482 * the only thing we need to fix is the p_index field. 3483 * We might get a few extra flushes but since this 3484 * path is rare that is ok. The p_ro field will 3485 * get automatically fixed on the next tteload to 3486 * the page. NO TNC bit is set yet. 3487 */ 3488 while (i >= 0) { 3489 pp = *pps; 3490 if (newidx) 3491 pp->p_index = (PP_MAPINDEX(pp) & 3492 ~index); 3493 pps--; 3494 i--; 3495 } 3496 return (1); 3497 } 3498 pfnum++; 3499 addr += MMU_PAGESIZE; 3500 } 3501 3502 #ifdef VAC 3503 if (vac_err) { 3504 if (ttesz > osz) { 3505 /* 3506 * There are some smaller mappings that causes vac 3507 * conflicts. Convert all existing small mappings to 3508 * TNC. 3509 */ 3510 SFMMU_STAT_ADD(sf_uncache_conflict, npgs); 3511 sfmmu_page_cache_array(pp1, HAT_TMPNC, CACHE_FLUSH, 3512 npgs); 3513 } else { 3514 /* EMPTY */ 3515 /* 3516 * If there exists an big page mapping, 3517 * that means the whole existing big page 3518 * has TNC setting already. No need to covert to 3519 * TNC again. 3520 */ 3521 ASSERT(PP_ISTNC(pp1)); 3522 } 3523 } 3524 #endif /* VAC */ 3525 3526 return (0); 3527 } 3528 3529 #ifdef VAC 3530 /* 3531 * Routine that detects vac consistency for a large page. It also 3532 * sets virtual color for all pp's for this big mapping. 3533 */ 3534 static int 3535 sfmmu_vacconflict_array(caddr_t addr, page_t *pp, int *cflags) 3536 { 3537 int vcolor, ocolor; 3538 3539 ASSERT(sfmmu_mlist_held(pp)); 3540 3541 if (PP_ISNC(pp)) { 3542 return (HAT_TMPNC); 3543 } 3544 3545 vcolor = addr_to_vcolor(addr); 3546 if (PP_NEWPAGE(pp)) { 3547 PP_SET_VCOLOR(pp, vcolor); 3548 return (0); 3549 } 3550 3551 ocolor = PP_GET_VCOLOR(pp); 3552 if (ocolor == vcolor) { 3553 return (0); 3554 } 3555 3556 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 3557 /* 3558 * Previous user of page had a differnet color 3559 * but since there are no current users 3560 * we just flush the cache and change the color. 3561 * As an optimization for large pages we flush the 3562 * entire cache of that color and set a flag. 3563 */ 3564 SFMMU_STAT(sf_pgcolor_conflict); 3565 if (!CacheColor_IsFlushed(*cflags, ocolor)) { 3566 CacheColor_SetFlushed(*cflags, ocolor); 3567 sfmmu_cache_flushcolor(ocolor, pp->p_pagenum); 3568 } 3569 PP_SET_VCOLOR(pp, vcolor); 3570 return (0); 3571 } 3572 3573 /* 3574 * We got a real conflict with a current mapping. 3575 * set flags to start unencaching all mappings 3576 * and return failure so we restart looping 3577 * the pp array from the beginning. 3578 */ 3579 return (HAT_TMPNC); 3580 } 3581 #endif /* VAC */ 3582 3583 /* 3584 * creates a large page shadow hmeblk for a tte. 3585 * The purpose of this routine is to allow us to do quick unloads because 3586 * the vm layer can easily pass a very large but sparsely populated range. 3587 */ 3588 static struct hme_blk * 3589 sfmmu_shadow_hcreate(sfmmu_t *sfmmup, caddr_t vaddr, int ttesz, uint_t flags) 3590 { 3591 struct hmehash_bucket *hmebp; 3592 hmeblk_tag hblktag; 3593 int hmeshift, size, vshift; 3594 uint_t shw_mask, newshw_mask; 3595 struct hme_blk *hmeblkp; 3596 3597 ASSERT(sfmmup != KHATID); 3598 if (mmu_page_sizes == max_mmu_page_sizes) { 3599 ASSERT(ttesz < TTE256M); 3600 } else { 3601 ASSERT(ttesz < TTE4M); 3602 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 3603 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 3604 } 3605 3606 if (ttesz == TTE8K) { 3607 size = TTE512K; 3608 } else { 3609 size = ++ttesz; 3610 } 3611 3612 hblktag.htag_id = sfmmup; 3613 hmeshift = HME_HASH_SHIFT(size); 3614 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 3615 hblktag.htag_rehash = HME_HASH_REHASH(size); 3616 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3617 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 3618 3619 SFMMU_HASH_LOCK(hmebp); 3620 3621 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 3622 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 3623 if (hmeblkp == NULL) { 3624 hmeblkp = sfmmu_hblk_alloc(sfmmup, vaddr, hmebp, size, 3625 hblktag, flags, SFMMU_INVALID_SHMERID); 3626 } 3627 ASSERT(hmeblkp); 3628 if (!hmeblkp->hblk_shw_mask) { 3629 /* 3630 * if this is a unused hblk it was just allocated or could 3631 * potentially be a previous large page hblk so we need to 3632 * set the shadow bit. 3633 */ 3634 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt); 3635 hmeblkp->hblk_shw_bit = 1; 3636 } else if (hmeblkp->hblk_shw_bit == 0) { 3637 panic("sfmmu_shadow_hcreate: shw bit not set in hmeblkp 0x%p", 3638 (void *)hmeblkp); 3639 } 3640 ASSERT(hmeblkp->hblk_shw_bit == 1); 3641 ASSERT(!hmeblkp->hblk_shared); 3642 vshift = vaddr_to_vshift(hblktag, vaddr, size); 3643 ASSERT(vshift < 8); 3644 /* 3645 * Atomically set shw mask bit 3646 */ 3647 do { 3648 shw_mask = hmeblkp->hblk_shw_mask; 3649 newshw_mask = shw_mask | (1 << vshift); 3650 newshw_mask = atomic_cas_32(&hmeblkp->hblk_shw_mask, shw_mask, 3651 newshw_mask); 3652 } while (newshw_mask != shw_mask); 3653 3654 SFMMU_HASH_UNLOCK(hmebp); 3655 3656 return (hmeblkp); 3657 } 3658 3659 /* 3660 * This routine cleanup a previous shadow hmeblk and changes it to 3661 * a regular hblk. This happens rarely but it is possible 3662 * when a process wants to use large pages and there are hblks still 3663 * lying around from the previous as that used these hmeblks. 3664 * The alternative was to cleanup the shadow hblks at unload time 3665 * but since so few user processes actually use large pages, it is 3666 * better to be lazy and cleanup at this time. 3667 */ 3668 static void 3669 sfmmu_shadow_hcleanup(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 3670 struct hmehash_bucket *hmebp) 3671 { 3672 caddr_t addr, endaddr; 3673 int hashno, size; 3674 3675 ASSERT(hmeblkp->hblk_shw_bit); 3676 ASSERT(!hmeblkp->hblk_shared); 3677 3678 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 3679 3680 if (!hmeblkp->hblk_shw_mask) { 3681 hmeblkp->hblk_shw_bit = 0; 3682 return; 3683 } 3684 addr = (caddr_t)get_hblk_base(hmeblkp); 3685 endaddr = get_hblk_endaddr(hmeblkp); 3686 size = get_hblk_ttesz(hmeblkp); 3687 hashno = size - 1; 3688 ASSERT(hashno > 0); 3689 SFMMU_HASH_UNLOCK(hmebp); 3690 3691 sfmmu_free_hblks(sfmmup, addr, endaddr, hashno); 3692 3693 SFMMU_HASH_LOCK(hmebp); 3694 } 3695 3696 static void 3697 sfmmu_free_hblks(sfmmu_t *sfmmup, caddr_t addr, caddr_t endaddr, 3698 int hashno) 3699 { 3700 int hmeshift, shadow = 0; 3701 hmeblk_tag hblktag; 3702 struct hmehash_bucket *hmebp; 3703 struct hme_blk *hmeblkp; 3704 struct hme_blk *nx_hblk, *pr_hblk, *list = NULL; 3705 3706 ASSERT(hashno > 0); 3707 hblktag.htag_id = sfmmup; 3708 hblktag.htag_rehash = hashno; 3709 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3710 3711 hmeshift = HME_HASH_SHIFT(hashno); 3712 3713 while (addr < endaddr) { 3714 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3715 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3716 SFMMU_HASH_LOCK(hmebp); 3717 /* inline HME_HASH_SEARCH */ 3718 hmeblkp = hmebp->hmeblkp; 3719 pr_hblk = NULL; 3720 while (hmeblkp) { 3721 if (HTAGS_EQ(hmeblkp->hblk_tag, hblktag)) { 3722 /* found hme_blk */ 3723 ASSERT(!hmeblkp->hblk_shared); 3724 if (hmeblkp->hblk_shw_bit) { 3725 if (hmeblkp->hblk_shw_mask) { 3726 shadow = 1; 3727 sfmmu_shadow_hcleanup(sfmmup, 3728 hmeblkp, hmebp); 3729 break; 3730 } else { 3731 hmeblkp->hblk_shw_bit = 0; 3732 } 3733 } 3734 3735 /* 3736 * Hblk_hmecnt and hblk_vcnt could be non zero 3737 * since hblk_unload() does not gurantee that. 3738 * 3739 * XXX - this could cause tteload() to spin 3740 * where sfmmu_shadow_hcleanup() is called. 3741 */ 3742 } 3743 3744 nx_hblk = hmeblkp->hblk_next; 3745 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 3746 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 3747 &list, 0); 3748 } else { 3749 pr_hblk = hmeblkp; 3750 } 3751 hmeblkp = nx_hblk; 3752 } 3753 3754 SFMMU_HASH_UNLOCK(hmebp); 3755 3756 if (shadow) { 3757 /* 3758 * We found another shadow hblk so cleaned its 3759 * children. We need to go back and cleanup 3760 * the original hblk so we don't change the 3761 * addr. 3762 */ 3763 shadow = 0; 3764 } else { 3765 addr = (caddr_t)roundup((uintptr_t)addr + 1, 3766 (1 << hmeshift)); 3767 } 3768 } 3769 sfmmu_hblks_list_purge(&list, 0); 3770 } 3771 3772 /* 3773 * This routine's job is to delete stale invalid shared hmeregions hmeblks that 3774 * may still linger on after pageunload. 3775 */ 3776 static void 3777 sfmmu_cleanup_rhblk(sf_srd_t *srdp, caddr_t addr, uint_t rid, int ttesz) 3778 { 3779 int hmeshift; 3780 hmeblk_tag hblktag; 3781 struct hmehash_bucket *hmebp; 3782 struct hme_blk *hmeblkp; 3783 struct hme_blk *pr_hblk; 3784 struct hme_blk *list = NULL; 3785 3786 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3787 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3788 3789 hmeshift = HME_HASH_SHIFT(ttesz); 3790 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3791 hblktag.htag_rehash = ttesz; 3792 hblktag.htag_rid = rid; 3793 hblktag.htag_id = srdp; 3794 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift); 3795 3796 SFMMU_HASH_LOCK(hmebp); 3797 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 3798 if (hmeblkp != NULL) { 3799 ASSERT(hmeblkp->hblk_shared); 3800 ASSERT(!hmeblkp->hblk_shw_bit); 3801 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 3802 panic("sfmmu_cleanup_rhblk: valid hmeblk"); 3803 } 3804 ASSERT(!hmeblkp->hblk_lckcnt); 3805 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 3806 &list, 0); 3807 } 3808 SFMMU_HASH_UNLOCK(hmebp); 3809 sfmmu_hblks_list_purge(&list, 0); 3810 } 3811 3812 /* ARGSUSED */ 3813 static void 3814 sfmmu_rgn_cb_noop(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr, 3815 size_t r_size, void *r_obj, u_offset_t r_objoff) 3816 { 3817 } 3818 3819 /* 3820 * Searches for an hmeblk which maps addr, then unloads this mapping 3821 * and updates *eaddrp, if the hmeblk is found. 3822 */ 3823 static void 3824 sfmmu_unload_hmeregion_va(sf_srd_t *srdp, uint_t rid, caddr_t addr, 3825 caddr_t eaddr, int ttesz, caddr_t *eaddrp) 3826 { 3827 int hmeshift; 3828 hmeblk_tag hblktag; 3829 struct hmehash_bucket *hmebp; 3830 struct hme_blk *hmeblkp; 3831 struct hme_blk *pr_hblk; 3832 struct hme_blk *list = NULL; 3833 3834 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3835 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3836 ASSERT(ttesz >= HBLK_MIN_TTESZ); 3837 3838 hmeshift = HME_HASH_SHIFT(ttesz); 3839 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3840 hblktag.htag_rehash = ttesz; 3841 hblktag.htag_rid = rid; 3842 hblktag.htag_id = srdp; 3843 hmebp = HME_HASH_FUNCTION(srdp, addr, hmeshift); 3844 3845 SFMMU_HASH_LOCK(hmebp); 3846 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 3847 if (hmeblkp != NULL) { 3848 ASSERT(hmeblkp->hblk_shared); 3849 ASSERT(!hmeblkp->hblk_lckcnt); 3850 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 3851 *eaddrp = sfmmu_hblk_unload(NULL, hmeblkp, addr, 3852 eaddr, NULL, HAT_UNLOAD); 3853 ASSERT(*eaddrp > addr); 3854 } 3855 ASSERT(!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt); 3856 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 3857 &list, 0); 3858 } 3859 SFMMU_HASH_UNLOCK(hmebp); 3860 sfmmu_hblks_list_purge(&list, 0); 3861 } 3862 3863 static void 3864 sfmmu_unload_hmeregion(sf_srd_t *srdp, sf_region_t *rgnp) 3865 { 3866 int ttesz = rgnp->rgn_pgszc; 3867 size_t rsz = rgnp->rgn_size; 3868 caddr_t rsaddr = rgnp->rgn_saddr; 3869 caddr_t readdr = rsaddr + rsz; 3870 caddr_t rhsaddr; 3871 caddr_t va; 3872 uint_t rid = rgnp->rgn_id; 3873 caddr_t cbsaddr; 3874 caddr_t cbeaddr; 3875 hat_rgn_cb_func_t rcbfunc; 3876 ulong_t cnt; 3877 3878 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 3879 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 3880 3881 ASSERT(IS_P2ALIGNED(rsaddr, TTEBYTES(ttesz))); 3882 ASSERT(IS_P2ALIGNED(rsz, TTEBYTES(ttesz))); 3883 if (ttesz < HBLK_MIN_TTESZ) { 3884 ttesz = HBLK_MIN_TTESZ; 3885 rhsaddr = (caddr_t)P2ALIGN((uintptr_t)rsaddr, HBLK_MIN_BYTES); 3886 } else { 3887 rhsaddr = rsaddr; 3888 } 3889 3890 if ((rcbfunc = rgnp->rgn_cb_function) == NULL) { 3891 rcbfunc = sfmmu_rgn_cb_noop; 3892 } 3893 3894 while (ttesz >= HBLK_MIN_TTESZ) { 3895 cbsaddr = rsaddr; 3896 cbeaddr = rsaddr; 3897 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) { 3898 ttesz--; 3899 continue; 3900 } 3901 cnt = 0; 3902 va = rsaddr; 3903 while (va < readdr) { 3904 ASSERT(va >= rhsaddr); 3905 if (va != cbeaddr) { 3906 if (cbeaddr != cbsaddr) { 3907 ASSERT(cbeaddr > cbsaddr); 3908 (*rcbfunc)(cbsaddr, cbeaddr, 3909 rsaddr, rsz, rgnp->rgn_obj, 3910 rgnp->rgn_objoff); 3911 } 3912 cbsaddr = va; 3913 cbeaddr = va; 3914 } 3915 sfmmu_unload_hmeregion_va(srdp, rid, va, readdr, 3916 ttesz, &cbeaddr); 3917 cnt++; 3918 va = rhsaddr + (cnt << TTE_PAGE_SHIFT(ttesz)); 3919 } 3920 if (cbeaddr != cbsaddr) { 3921 ASSERT(cbeaddr > cbsaddr); 3922 (*rcbfunc)(cbsaddr, cbeaddr, rsaddr, 3923 rsz, rgnp->rgn_obj, 3924 rgnp->rgn_objoff); 3925 } 3926 ttesz--; 3927 } 3928 } 3929 3930 /* 3931 * Release one hardware address translation lock on the given address range. 3932 */ 3933 void 3934 hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len) 3935 { 3936 struct hmehash_bucket *hmebp; 3937 hmeblk_tag hblktag; 3938 int hmeshift, hashno = 1; 3939 struct hme_blk *hmeblkp, *list = NULL; 3940 caddr_t endaddr; 3941 3942 ASSERT(sfmmup != NULL); 3943 3944 ASSERT((sfmmup == ksfmmup) || 3945 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 3946 ASSERT((len & MMU_PAGEOFFSET) == 0); 3947 endaddr = addr + len; 3948 hblktag.htag_id = sfmmup; 3949 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 3950 3951 /* 3952 * Spitfire supports 4 page sizes. 3953 * Most pages are expected to be of the smallest page size (8K) and 3954 * these will not need to be rehashed. 64K pages also don't need to be 3955 * rehashed because an hmeblk spans 64K of address space. 512K pages 3956 * might need 1 rehash and and 4M pages might need 2 rehashes. 3957 */ 3958 while (addr < endaddr) { 3959 hmeshift = HME_HASH_SHIFT(hashno); 3960 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 3961 hblktag.htag_rehash = hashno; 3962 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 3963 3964 SFMMU_HASH_LOCK(hmebp); 3965 3966 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 3967 if (hmeblkp != NULL) { 3968 ASSERT(!hmeblkp->hblk_shared); 3969 /* 3970 * If we encounter a shadow hmeblk then 3971 * we know there are no valid hmeblks mapping 3972 * this address at this size or larger. 3973 * Just increment address by the smallest 3974 * page size. 3975 */ 3976 if (hmeblkp->hblk_shw_bit) { 3977 addr += MMU_PAGESIZE; 3978 } else { 3979 addr = sfmmu_hblk_unlock(hmeblkp, addr, 3980 endaddr); 3981 } 3982 SFMMU_HASH_UNLOCK(hmebp); 3983 hashno = 1; 3984 continue; 3985 } 3986 SFMMU_HASH_UNLOCK(hmebp); 3987 3988 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 3989 /* 3990 * We have traversed the whole list and rehashed 3991 * if necessary without finding the address to unlock 3992 * which should never happen. 3993 */ 3994 panic("sfmmu_unlock: addr not found. " 3995 "addr %p hat %p", (void *)addr, (void *)sfmmup); 3996 } else { 3997 hashno++; 3998 } 3999 } 4000 4001 sfmmu_hblks_list_purge(&list, 0); 4002 } 4003 4004 void 4005 hat_unlock_region(struct hat *sfmmup, caddr_t addr, size_t len, 4006 hat_region_cookie_t rcookie) 4007 { 4008 sf_srd_t *srdp; 4009 sf_region_t *rgnp; 4010 int ttesz; 4011 uint_t rid; 4012 caddr_t eaddr; 4013 caddr_t va; 4014 int hmeshift; 4015 hmeblk_tag hblktag; 4016 struct hmehash_bucket *hmebp; 4017 struct hme_blk *hmeblkp; 4018 struct hme_blk *pr_hblk; 4019 struct hme_blk *list; 4020 4021 if (rcookie == HAT_INVALID_REGION_COOKIE) { 4022 hat_unlock(sfmmup, addr, len); 4023 return; 4024 } 4025 4026 ASSERT(sfmmup != NULL); 4027 ASSERT(sfmmup != ksfmmup); 4028 4029 srdp = sfmmup->sfmmu_srdp; 4030 rid = (uint_t)((uint64_t)rcookie); 4031 VERIFY3U(rid, <, SFMMU_MAX_HME_REGIONS); 4032 eaddr = addr + len; 4033 va = addr; 4034 list = NULL; 4035 rgnp = srdp->srd_hmergnp[rid]; 4036 SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len); 4037 4038 ASSERT(IS_P2ALIGNED(addr, TTEBYTES(rgnp->rgn_pgszc))); 4039 ASSERT(IS_P2ALIGNED(len, TTEBYTES(rgnp->rgn_pgszc))); 4040 if (rgnp->rgn_pgszc < HBLK_MIN_TTESZ) { 4041 ttesz = HBLK_MIN_TTESZ; 4042 } else { 4043 ttesz = rgnp->rgn_pgszc; 4044 } 4045 while (va < eaddr) { 4046 while (ttesz < rgnp->rgn_pgszc && 4047 IS_P2ALIGNED(va, TTEBYTES(ttesz + 1))) { 4048 ttesz++; 4049 } 4050 while (ttesz >= HBLK_MIN_TTESZ) { 4051 if (!(rgnp->rgn_hmeflags & (1 << ttesz))) { 4052 ttesz--; 4053 continue; 4054 } 4055 hmeshift = HME_HASH_SHIFT(ttesz); 4056 hblktag.htag_bspage = HME_HASH_BSPAGE(va, hmeshift); 4057 hblktag.htag_rehash = ttesz; 4058 hblktag.htag_rid = rid; 4059 hblktag.htag_id = srdp; 4060 hmebp = HME_HASH_FUNCTION(srdp, va, hmeshift); 4061 SFMMU_HASH_LOCK(hmebp); 4062 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, 4063 &list); 4064 if (hmeblkp == NULL) { 4065 SFMMU_HASH_UNLOCK(hmebp); 4066 ttesz--; 4067 continue; 4068 } 4069 ASSERT(hmeblkp->hblk_shared); 4070 va = sfmmu_hblk_unlock(hmeblkp, va, eaddr); 4071 ASSERT(va >= eaddr || 4072 IS_P2ALIGNED((uintptr_t)va, TTEBYTES(ttesz))); 4073 SFMMU_HASH_UNLOCK(hmebp); 4074 break; 4075 } 4076 if (ttesz < HBLK_MIN_TTESZ) { 4077 panic("hat_unlock_region: addr not found " 4078 "addr %p hat %p", (void *)va, (void *)sfmmup); 4079 } 4080 } 4081 sfmmu_hblks_list_purge(&list, 0); 4082 } 4083 4084 /* 4085 * Function to unlock a range of addresses in an hmeblk. It returns the 4086 * next address that needs to be unlocked. 4087 * Should be called with the hash lock held. 4088 */ 4089 static caddr_t 4090 sfmmu_hblk_unlock(struct hme_blk *hmeblkp, caddr_t addr, caddr_t endaddr) 4091 { 4092 struct sf_hment *sfhme; 4093 tte_t tteold, ttemod; 4094 int ttesz, ret; 4095 4096 ASSERT(in_hblk_range(hmeblkp, addr)); 4097 ASSERT(hmeblkp->hblk_shw_bit == 0); 4098 4099 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4100 ttesz = get_hblk_ttesz(hmeblkp); 4101 4102 HBLKTOHME(sfhme, hmeblkp, addr); 4103 while (addr < endaddr) { 4104 readtte: 4105 sfmmu_copytte(&sfhme->hme_tte, &tteold); 4106 if (TTE_IS_VALID(&tteold)) { 4107 4108 ttemod = tteold; 4109 4110 ret = sfmmu_modifytte_try(&tteold, &ttemod, 4111 &sfhme->hme_tte); 4112 4113 if (ret < 0) 4114 goto readtte; 4115 4116 if (hmeblkp->hblk_lckcnt == 0) 4117 panic("zero hblk lckcnt"); 4118 4119 if (((uintptr_t)addr + TTEBYTES(ttesz)) > 4120 (uintptr_t)endaddr) 4121 panic("can't unlock large tte"); 4122 4123 ASSERT(hmeblkp->hblk_lckcnt > 0); 4124 atomic_dec_32(&hmeblkp->hblk_lckcnt); 4125 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 4126 } else { 4127 panic("sfmmu_hblk_unlock: invalid tte"); 4128 } 4129 addr += TTEBYTES(ttesz); 4130 sfhme++; 4131 } 4132 return (addr); 4133 } 4134 4135 /* 4136 * Physical Address Mapping Framework 4137 * 4138 * General rules: 4139 * 4140 * (1) Applies only to seg_kmem memory pages. To make things easier, 4141 * seg_kpm addresses are also accepted by the routines, but nothing 4142 * is done with them since by definition their PA mappings are static. 4143 * (2) hat_add_callback() may only be called while holding the page lock 4144 * SE_SHARED or SE_EXCL of the underlying page (e.g., as_pagelock()), 4145 * or passing HAC_PAGELOCK flag. 4146 * (3) prehandler() and posthandler() may not call hat_add_callback() or 4147 * hat_delete_callback(), nor should they allocate memory. Post quiesce 4148 * callbacks may not sleep or acquire adaptive mutex locks. 4149 * (4) Either prehandler() or posthandler() (but not both) may be specified 4150 * as being NULL. Specifying an errhandler() is optional. 4151 * 4152 * Details of using the framework: 4153 * 4154 * registering a callback (hat_register_callback()) 4155 * 4156 * Pass prehandler, posthandler, errhandler addresses 4157 * as described below. If capture_cpus argument is nonzero, 4158 * suspend callback to the prehandler will occur with CPUs 4159 * captured and executing xc_loop() and CPUs will remain 4160 * captured until after the posthandler suspend callback 4161 * occurs. 4162 * 4163 * adding a callback (hat_add_callback()) 4164 * 4165 * as_pagelock(); 4166 * hat_add_callback(); 4167 * save returned pfn in private data structures or program registers; 4168 * as_pageunlock(); 4169 * 4170 * prehandler() 4171 * 4172 * Stop all accesses by physical address to this memory page. 4173 * Called twice: the first, PRESUSPEND, is a context safe to acquire 4174 * adaptive locks. The second, SUSPEND, is called at high PIL with 4175 * CPUs captured so adaptive locks may NOT be acquired (and all spin 4176 * locks must be XCALL_PIL or higher locks). 4177 * 4178 * May return the following errors: 4179 * EIO: A fatal error has occurred. This will result in panic. 4180 * EAGAIN: The page cannot be suspended. This will fail the 4181 * relocation. 4182 * 0: Success. 4183 * 4184 * posthandler() 4185 * 4186 * Save new pfn in private data structures or program registers; 4187 * not allowed to fail (non-zero return values will result in panic). 4188 * 4189 * errhandler() 4190 * 4191 * called when an error occurs related to the callback. Currently 4192 * the only such error is HAT_CB_ERR_LEAKED which indicates that 4193 * a page is being freed, but there are still outstanding callback(s) 4194 * registered on the page. 4195 * 4196 * removing a callback (hat_delete_callback(); e.g., prior to freeing memory) 4197 * 4198 * stop using physical address 4199 * hat_delete_callback(); 4200 * 4201 */ 4202 4203 /* 4204 * Register a callback class. Each subsystem should do this once and 4205 * cache the id_t returned for use in setting up and tearing down callbacks. 4206 * 4207 * There is no facility for removing callback IDs once they are created; 4208 * the "key" should be unique for each module, so in case a module is unloaded 4209 * and subsequently re-loaded, we can recycle the module's previous entry. 4210 */ 4211 id_t 4212 hat_register_callback(int key, 4213 int (*prehandler)(caddr_t, uint_t, uint_t, void *), 4214 int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t), 4215 int (*errhandler)(caddr_t, uint_t, uint_t, void *), 4216 int capture_cpus) 4217 { 4218 id_t id; 4219 4220 /* 4221 * Search the table for a pre-existing callback associated with 4222 * the identifier "key". If one exists, we re-use that entry in 4223 * the table for this instance, otherwise we assign the next 4224 * available table slot. 4225 */ 4226 for (id = 0; id < sfmmu_max_cb_id; id++) { 4227 if (sfmmu_cb_table[id].key == key) 4228 break; 4229 } 4230 4231 if (id == sfmmu_max_cb_id) { 4232 id = sfmmu_cb_nextid++; 4233 if (id >= sfmmu_max_cb_id) 4234 panic("hat_register_callback: out of callback IDs"); 4235 } 4236 4237 ASSERT(prehandler != NULL || posthandler != NULL); 4238 4239 sfmmu_cb_table[id].key = key; 4240 sfmmu_cb_table[id].prehandler = prehandler; 4241 sfmmu_cb_table[id].posthandler = posthandler; 4242 sfmmu_cb_table[id].errhandler = errhandler; 4243 sfmmu_cb_table[id].capture_cpus = capture_cpus; 4244 4245 return (id); 4246 } 4247 4248 #define HAC_COOKIE_NONE (void *)-1 4249 4250 /* 4251 * Add relocation callbacks to the specified addr/len which will be called 4252 * when relocating the associated page. See the description of pre and 4253 * posthandler above for more details. 4254 * 4255 * If HAC_PAGELOCK is included in flags, the underlying memory page is 4256 * locked internally so the caller must be able to deal with the callback 4257 * running even before this function has returned. If HAC_PAGELOCK is not 4258 * set, it is assumed that the underlying memory pages are locked. 4259 * 4260 * Since the caller must track the individual page boundaries anyway, 4261 * we only allow a callback to be added to a single page (large 4262 * or small). Thus [addr, addr + len) MUST be contained within a single 4263 * page. 4264 * 4265 * Registering multiple callbacks on the same [addr, addr+len) is supported, 4266 * _provided_that_ a unique parameter is specified for each callback. 4267 * If multiple callbacks are registered on the same range the callback will 4268 * be invoked with each unique parameter. Registering the same callback with 4269 * the same argument more than once will result in corrupted kernel state. 4270 * 4271 * Returns the pfn of the underlying kernel page in *rpfn 4272 * on success, or PFN_INVALID on failure. 4273 * 4274 * cookiep (if passed) provides storage space for an opaque cookie 4275 * to return later to hat_delete_callback(). This cookie makes the callback 4276 * deletion significantly quicker by avoiding a potentially lengthy hash 4277 * search. 4278 * 4279 * Returns values: 4280 * 0: success 4281 * ENOMEM: memory allocation failure (e.g. flags was passed as HAC_NOSLEEP) 4282 * EINVAL: callback ID is not valid 4283 * ENXIO: ["vaddr", "vaddr" + len) is not mapped in the kernel's address 4284 * space 4285 * ERANGE: ["vaddr", "vaddr" + len) crosses a page boundary 4286 */ 4287 int 4288 hat_add_callback(id_t callback_id, caddr_t vaddr, uint_t len, uint_t flags, 4289 void *pvt, pfn_t *rpfn, void **cookiep) 4290 { 4291 struct hmehash_bucket *hmebp; 4292 hmeblk_tag hblktag; 4293 struct hme_blk *hmeblkp; 4294 int hmeshift, hashno; 4295 caddr_t saddr, eaddr, baseaddr; 4296 struct pa_hment *pahmep; 4297 struct sf_hment *sfhmep, *osfhmep; 4298 kmutex_t *pml; 4299 tte_t tte; 4300 page_t *pp; 4301 vnode_t *vp; 4302 u_offset_t off; 4303 pfn_t pfn; 4304 int kmflags = (flags & HAC_SLEEP)? KM_SLEEP : KM_NOSLEEP; 4305 int locked = 0; 4306 4307 /* 4308 * For KPM mappings, just return the physical address since we 4309 * don't need to register any callbacks. 4310 */ 4311 if (IS_KPM_ADDR(vaddr)) { 4312 uint64_t paddr; 4313 SFMMU_KPM_VTOP(vaddr, paddr); 4314 *rpfn = btop(paddr); 4315 if (cookiep != NULL) 4316 *cookiep = HAC_COOKIE_NONE; 4317 return (0); 4318 } 4319 4320 if (callback_id < (id_t)0 || callback_id >= sfmmu_cb_nextid) { 4321 *rpfn = PFN_INVALID; 4322 return (EINVAL); 4323 } 4324 4325 if ((pahmep = kmem_cache_alloc(pa_hment_cache, kmflags)) == NULL) { 4326 *rpfn = PFN_INVALID; 4327 return (ENOMEM); 4328 } 4329 4330 sfhmep = &pahmep->sfment; 4331 4332 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 4333 eaddr = saddr + len; 4334 4335 rehash: 4336 /* Find the mapping(s) for this page */ 4337 for (hashno = TTE64K, hmeblkp = NULL; 4338 hmeblkp == NULL && hashno <= mmu_hashcnt; 4339 hashno++) { 4340 hmeshift = HME_HASH_SHIFT(hashno); 4341 hblktag.htag_id = ksfmmup; 4342 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4343 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 4344 hblktag.htag_rehash = hashno; 4345 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 4346 4347 SFMMU_HASH_LOCK(hmebp); 4348 4349 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 4350 4351 if (hmeblkp == NULL) 4352 SFMMU_HASH_UNLOCK(hmebp); 4353 } 4354 4355 if (hmeblkp == NULL) { 4356 kmem_cache_free(pa_hment_cache, pahmep); 4357 *rpfn = PFN_INVALID; 4358 return (ENXIO); 4359 } 4360 4361 ASSERT(!hmeblkp->hblk_shared); 4362 4363 HBLKTOHME(osfhmep, hmeblkp, saddr); 4364 sfmmu_copytte(&osfhmep->hme_tte, &tte); 4365 4366 if (!TTE_IS_VALID(&tte)) { 4367 SFMMU_HASH_UNLOCK(hmebp); 4368 kmem_cache_free(pa_hment_cache, pahmep); 4369 *rpfn = PFN_INVALID; 4370 return (ENXIO); 4371 } 4372 4373 /* 4374 * Make sure the boundaries for the callback fall within this 4375 * single mapping. 4376 */ 4377 baseaddr = (caddr_t)get_hblk_base(hmeblkp); 4378 ASSERT(saddr >= baseaddr); 4379 if (eaddr > saddr + TTEBYTES(TTE_CSZ(&tte))) { 4380 SFMMU_HASH_UNLOCK(hmebp); 4381 kmem_cache_free(pa_hment_cache, pahmep); 4382 *rpfn = PFN_INVALID; 4383 return (ERANGE); 4384 } 4385 4386 pfn = sfmmu_ttetopfn(&tte, vaddr); 4387 4388 /* 4389 * The pfn may not have a page_t underneath in which case we 4390 * just return it. This can happen if we are doing I/O to a 4391 * static portion of the kernel's address space, for instance. 4392 */ 4393 pp = osfhmep->hme_page; 4394 if (pp == NULL) { 4395 SFMMU_HASH_UNLOCK(hmebp); 4396 kmem_cache_free(pa_hment_cache, pahmep); 4397 *rpfn = pfn; 4398 if (cookiep) 4399 *cookiep = HAC_COOKIE_NONE; 4400 return (0); 4401 } 4402 ASSERT(pp == PP_PAGEROOT(pp)); 4403 4404 vp = pp->p_vnode; 4405 off = pp->p_offset; 4406 4407 pml = sfmmu_mlist_enter(pp); 4408 4409 if (flags & HAC_PAGELOCK) { 4410 if (!page_trylock(pp, SE_SHARED)) { 4411 /* 4412 * Somebody is holding SE_EXCL lock. Might 4413 * even be hat_page_relocate(). Drop all 4414 * our locks, lookup the page in &kvp, and 4415 * retry. If it doesn't exist in &kvp and &zvp, 4416 * then we must be dealing with a kernel mapped 4417 * page which doesn't actually belong to 4418 * segkmem so we punt. 4419 */ 4420 sfmmu_mlist_exit(pml); 4421 SFMMU_HASH_UNLOCK(hmebp); 4422 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 4423 4424 /* check zvp before giving up */ 4425 if (pp == NULL) 4426 pp = page_lookup(&zvp, (u_offset_t)saddr, 4427 SE_SHARED); 4428 4429 /* Okay, we didn't find it, give up */ 4430 if (pp == NULL) { 4431 kmem_cache_free(pa_hment_cache, pahmep); 4432 *rpfn = pfn; 4433 if (cookiep) 4434 *cookiep = HAC_COOKIE_NONE; 4435 return (0); 4436 } 4437 page_unlock(pp); 4438 goto rehash; 4439 } 4440 locked = 1; 4441 } 4442 4443 if (!PAGE_LOCKED(pp) && !panicstr) 4444 panic("hat_add_callback: page 0x%p not locked", (void *)pp); 4445 4446 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 4447 pp->p_offset != off) { 4448 /* 4449 * The page moved before we got our hands on it. Drop 4450 * all the locks and try again. 4451 */ 4452 ASSERT((flags & HAC_PAGELOCK) != 0); 4453 sfmmu_mlist_exit(pml); 4454 SFMMU_HASH_UNLOCK(hmebp); 4455 page_unlock(pp); 4456 locked = 0; 4457 goto rehash; 4458 } 4459 4460 if (!VN_ISKAS(vp)) { 4461 /* 4462 * This is not a segkmem page but another page which 4463 * has been kernel mapped. It had better have at least 4464 * a share lock on it. Return the pfn. 4465 */ 4466 sfmmu_mlist_exit(pml); 4467 SFMMU_HASH_UNLOCK(hmebp); 4468 if (locked) 4469 page_unlock(pp); 4470 kmem_cache_free(pa_hment_cache, pahmep); 4471 ASSERT(PAGE_LOCKED(pp)); 4472 *rpfn = pfn; 4473 if (cookiep) 4474 *cookiep = HAC_COOKIE_NONE; 4475 return (0); 4476 } 4477 4478 /* 4479 * Setup this pa_hment and link its embedded dummy sf_hment into 4480 * the mapping list. 4481 */ 4482 pp->p_share++; 4483 pahmep->cb_id = callback_id; 4484 pahmep->addr = vaddr; 4485 pahmep->len = len; 4486 pahmep->refcnt = 1; 4487 pahmep->flags = 0; 4488 pahmep->pvt = pvt; 4489 4490 sfhmep->hme_tte.ll = 0; 4491 sfhmep->hme_data = pahmep; 4492 sfhmep->hme_prev = osfhmep; 4493 sfhmep->hme_next = osfhmep->hme_next; 4494 4495 if (osfhmep->hme_next) 4496 osfhmep->hme_next->hme_prev = sfhmep; 4497 4498 osfhmep->hme_next = sfhmep; 4499 4500 sfmmu_mlist_exit(pml); 4501 SFMMU_HASH_UNLOCK(hmebp); 4502 4503 if (locked) 4504 page_unlock(pp); 4505 4506 *rpfn = pfn; 4507 if (cookiep) 4508 *cookiep = (void *)pahmep; 4509 4510 return (0); 4511 } 4512 4513 /* 4514 * Remove the relocation callbacks from the specified addr/len. 4515 */ 4516 void 4517 hat_delete_callback(caddr_t vaddr, uint_t len, void *pvt, uint_t flags, 4518 void *cookie) 4519 { 4520 struct hmehash_bucket *hmebp; 4521 hmeblk_tag hblktag; 4522 struct hme_blk *hmeblkp; 4523 int hmeshift, hashno; 4524 caddr_t saddr; 4525 struct pa_hment *pahmep; 4526 struct sf_hment *sfhmep, *osfhmep; 4527 kmutex_t *pml; 4528 tte_t tte; 4529 page_t *pp; 4530 vnode_t *vp; 4531 u_offset_t off; 4532 int locked = 0; 4533 4534 /* 4535 * If the cookie is HAC_COOKIE_NONE then there is no pa_hment to 4536 * remove so just return. 4537 */ 4538 if (cookie == HAC_COOKIE_NONE || IS_KPM_ADDR(vaddr)) 4539 return; 4540 4541 saddr = (caddr_t)((uintptr_t)vaddr & MMU_PAGEMASK); 4542 4543 rehash: 4544 /* Find the mapping(s) for this page */ 4545 for (hashno = TTE64K, hmeblkp = NULL; 4546 hmeblkp == NULL && hashno <= mmu_hashcnt; 4547 hashno++) { 4548 hmeshift = HME_HASH_SHIFT(hashno); 4549 hblktag.htag_id = ksfmmup; 4550 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4551 hblktag.htag_bspage = HME_HASH_BSPAGE(saddr, hmeshift); 4552 hblktag.htag_rehash = hashno; 4553 hmebp = HME_HASH_FUNCTION(ksfmmup, saddr, hmeshift); 4554 4555 SFMMU_HASH_LOCK(hmebp); 4556 4557 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 4558 4559 if (hmeblkp == NULL) 4560 SFMMU_HASH_UNLOCK(hmebp); 4561 } 4562 4563 if (hmeblkp == NULL) 4564 return; 4565 4566 ASSERT(!hmeblkp->hblk_shared); 4567 4568 HBLKTOHME(osfhmep, hmeblkp, saddr); 4569 4570 sfmmu_copytte(&osfhmep->hme_tte, &tte); 4571 if (!TTE_IS_VALID(&tte)) { 4572 SFMMU_HASH_UNLOCK(hmebp); 4573 return; 4574 } 4575 4576 pp = osfhmep->hme_page; 4577 if (pp == NULL) { 4578 SFMMU_HASH_UNLOCK(hmebp); 4579 ASSERT(cookie == NULL); 4580 return; 4581 } 4582 4583 vp = pp->p_vnode; 4584 off = pp->p_offset; 4585 4586 pml = sfmmu_mlist_enter(pp); 4587 4588 if (flags & HAC_PAGELOCK) { 4589 if (!page_trylock(pp, SE_SHARED)) { 4590 /* 4591 * Somebody is holding SE_EXCL lock. Might 4592 * even be hat_page_relocate(). Drop all 4593 * our locks, lookup the page in &kvp, and 4594 * retry. If it doesn't exist in &kvp and &zvp, 4595 * then we must be dealing with a kernel mapped 4596 * page which doesn't actually belong to 4597 * segkmem so we punt. 4598 */ 4599 sfmmu_mlist_exit(pml); 4600 SFMMU_HASH_UNLOCK(hmebp); 4601 pp = page_lookup(&kvp, (u_offset_t)saddr, SE_SHARED); 4602 /* check zvp before giving up */ 4603 if (pp == NULL) 4604 pp = page_lookup(&zvp, (u_offset_t)saddr, 4605 SE_SHARED); 4606 4607 if (pp == NULL) { 4608 ASSERT(cookie == NULL); 4609 return; 4610 } 4611 page_unlock(pp); 4612 goto rehash; 4613 } 4614 locked = 1; 4615 } 4616 4617 ASSERT(PAGE_LOCKED(pp)); 4618 4619 if (osfhmep->hme_page != pp || pp->p_vnode != vp || 4620 pp->p_offset != off) { 4621 /* 4622 * The page moved before we got our hands on it. Drop 4623 * all the locks and try again. 4624 */ 4625 ASSERT((flags & HAC_PAGELOCK) != 0); 4626 sfmmu_mlist_exit(pml); 4627 SFMMU_HASH_UNLOCK(hmebp); 4628 page_unlock(pp); 4629 locked = 0; 4630 goto rehash; 4631 } 4632 4633 if (!VN_ISKAS(vp)) { 4634 /* 4635 * This is not a segkmem page but another page which 4636 * has been kernel mapped. 4637 */ 4638 sfmmu_mlist_exit(pml); 4639 SFMMU_HASH_UNLOCK(hmebp); 4640 if (locked) 4641 page_unlock(pp); 4642 ASSERT(cookie == NULL); 4643 return; 4644 } 4645 4646 if (cookie != NULL) { 4647 pahmep = (struct pa_hment *)cookie; 4648 sfhmep = &pahmep->sfment; 4649 } else { 4650 for (sfhmep = pp->p_mapping; sfhmep != NULL; 4651 sfhmep = sfhmep->hme_next) { 4652 4653 /* 4654 * skip va<->pa mappings 4655 */ 4656 if (!IS_PAHME(sfhmep)) 4657 continue; 4658 4659 pahmep = sfhmep->hme_data; 4660 ASSERT(pahmep != NULL); 4661 4662 /* 4663 * if pa_hment matches, remove it 4664 */ 4665 if ((pahmep->pvt == pvt) && 4666 (pahmep->addr == vaddr) && 4667 (pahmep->len == len)) { 4668 break; 4669 } 4670 } 4671 } 4672 4673 if (sfhmep == NULL) { 4674 if (!panicstr) { 4675 panic("hat_delete_callback: pa_hment not found, pp %p", 4676 (void *)pp); 4677 } 4678 return; 4679 } 4680 4681 /* 4682 * Note: at this point a valid kernel mapping must still be 4683 * present on this page. 4684 */ 4685 pp->p_share--; 4686 if (pp->p_share <= 0) 4687 panic("hat_delete_callback: zero p_share"); 4688 4689 if (--pahmep->refcnt == 0) { 4690 if (pahmep->flags != 0) 4691 panic("hat_delete_callback: pa_hment is busy"); 4692 4693 /* 4694 * Remove sfhmep from the mapping list for the page. 4695 */ 4696 if (sfhmep->hme_prev) { 4697 sfhmep->hme_prev->hme_next = sfhmep->hme_next; 4698 } else { 4699 pp->p_mapping = sfhmep->hme_next; 4700 } 4701 4702 if (sfhmep->hme_next) 4703 sfhmep->hme_next->hme_prev = sfhmep->hme_prev; 4704 4705 sfmmu_mlist_exit(pml); 4706 SFMMU_HASH_UNLOCK(hmebp); 4707 4708 if (locked) 4709 page_unlock(pp); 4710 4711 kmem_cache_free(pa_hment_cache, pahmep); 4712 return; 4713 } 4714 4715 sfmmu_mlist_exit(pml); 4716 SFMMU_HASH_UNLOCK(hmebp); 4717 if (locked) 4718 page_unlock(pp); 4719 } 4720 4721 /* 4722 * hat_probe returns 1 if the translation for the address 'addr' is 4723 * loaded, zero otherwise. 4724 * 4725 * hat_probe should be used only for advisorary purposes because it may 4726 * occasionally return the wrong value. The implementation must guarantee that 4727 * returning the wrong value is a very rare event. hat_probe is used 4728 * to implement optimizations in the segment drivers. 4729 * 4730 */ 4731 int 4732 hat_probe(struct hat *sfmmup, caddr_t addr) 4733 { 4734 pfn_t pfn; 4735 tte_t tte; 4736 4737 ASSERT(sfmmup != NULL); 4738 4739 ASSERT((sfmmup == ksfmmup) || 4740 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4741 4742 if (sfmmup == ksfmmup) { 4743 while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte)) 4744 == PFN_SUSPENDED) { 4745 sfmmu_vatopfn_suspended(addr, sfmmup, &tte); 4746 } 4747 } else { 4748 pfn = sfmmu_uvatopfn(addr, sfmmup, NULL); 4749 } 4750 4751 if (pfn != PFN_INVALID) 4752 return (1); 4753 else 4754 return (0); 4755 } 4756 4757 ssize_t 4758 hat_getpagesize(struct hat *sfmmup, caddr_t addr) 4759 { 4760 tte_t tte; 4761 4762 if (sfmmup == ksfmmup) { 4763 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4764 return (-1); 4765 } 4766 } else { 4767 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4768 return (-1); 4769 } 4770 } 4771 4772 ASSERT(TTE_IS_VALID(&tte)); 4773 return (TTEBYTES(TTE_CSZ(&tte))); 4774 } 4775 4776 uint_t 4777 hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr) 4778 { 4779 tte_t tte; 4780 4781 if (sfmmup == ksfmmup) { 4782 if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4783 tte.ll = 0; 4784 } 4785 } else { 4786 if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) { 4787 tte.ll = 0; 4788 } 4789 } 4790 if (TTE_IS_VALID(&tte)) { 4791 *attr = sfmmu_ptov_attr(&tte); 4792 return (0); 4793 } 4794 *attr = 0; 4795 return ((uint_t)0xffffffff); 4796 } 4797 4798 /* 4799 * Enables more attributes on specified address range (ie. logical OR) 4800 */ 4801 void 4802 hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4803 { 4804 ASSERT(hat->sfmmu_as != NULL); 4805 4806 sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR); 4807 } 4808 4809 /* 4810 * Assigns attributes to the specified address range. All the attributes 4811 * are specified. 4812 */ 4813 void 4814 hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4815 { 4816 ASSERT(hat->sfmmu_as != NULL); 4817 4818 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR); 4819 } 4820 4821 /* 4822 * Remove attributes on the specified address range (ie. loginal NAND) 4823 */ 4824 void 4825 hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) 4826 { 4827 ASSERT(hat->sfmmu_as != NULL); 4828 4829 sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR); 4830 } 4831 4832 /* 4833 * Change attributes on an address range to that specified by attr and mode. 4834 */ 4835 static void 4836 sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr, 4837 int mode) 4838 { 4839 struct hmehash_bucket *hmebp; 4840 hmeblk_tag hblktag; 4841 int hmeshift, hashno = 1; 4842 struct hme_blk *hmeblkp, *list = NULL; 4843 caddr_t endaddr; 4844 cpuset_t cpuset; 4845 demap_range_t dmr; 4846 4847 CPUSET_ZERO(cpuset); 4848 4849 ASSERT((sfmmup == ksfmmup) || 4850 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 4851 ASSERT((len & MMU_PAGEOFFSET) == 0); 4852 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 4853 4854 if ((attr & PROT_USER) && (mode != SFMMU_CLRATTR) && 4855 ((addr + len) > (caddr_t)USERLIMIT)) { 4856 panic("user addr %p in kernel space", 4857 (void *)addr); 4858 } 4859 4860 endaddr = addr + len; 4861 hblktag.htag_id = sfmmup; 4862 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 4863 DEMAP_RANGE_INIT(sfmmup, &dmr); 4864 4865 while (addr < endaddr) { 4866 hmeshift = HME_HASH_SHIFT(hashno); 4867 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 4868 hblktag.htag_rehash = hashno; 4869 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 4870 4871 SFMMU_HASH_LOCK(hmebp); 4872 4873 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 4874 if (hmeblkp != NULL) { 4875 ASSERT(!hmeblkp->hblk_shared); 4876 /* 4877 * We've encountered a shadow hmeblk so skip the range 4878 * of the next smaller mapping size. 4879 */ 4880 if (hmeblkp->hblk_shw_bit) { 4881 ASSERT(sfmmup != ksfmmup); 4882 ASSERT(hashno > 1); 4883 addr = (caddr_t)P2END((uintptr_t)addr, 4884 TTEBYTES(hashno - 1)); 4885 } else { 4886 addr = sfmmu_hblk_chgattr(sfmmup, 4887 hmeblkp, addr, endaddr, &dmr, attr, mode); 4888 } 4889 SFMMU_HASH_UNLOCK(hmebp); 4890 hashno = 1; 4891 continue; 4892 } 4893 SFMMU_HASH_UNLOCK(hmebp); 4894 4895 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 4896 /* 4897 * We have traversed the whole list and rehashed 4898 * if necessary without finding the address to chgattr. 4899 * This is ok, so we increment the address by the 4900 * smallest hmeblk range for kernel mappings or for 4901 * user mappings with no large pages, and the largest 4902 * hmeblk range, to account for shadow hmeblks, for 4903 * user mappings with large pages and continue. 4904 */ 4905 if (sfmmup == ksfmmup) 4906 addr = (caddr_t)P2END((uintptr_t)addr, 4907 TTEBYTES(1)); 4908 else 4909 addr = (caddr_t)P2END((uintptr_t)addr, 4910 TTEBYTES(hashno)); 4911 hashno = 1; 4912 } else { 4913 hashno++; 4914 } 4915 } 4916 4917 sfmmu_hblks_list_purge(&list, 0); 4918 DEMAP_RANGE_FLUSH(&dmr); 4919 cpuset = sfmmup->sfmmu_cpusran; 4920 xt_sync(cpuset); 4921 } 4922 4923 /* 4924 * This function chgattr on a range of addresses in an hmeblk. It returns the 4925 * next addres that needs to be chgattr. 4926 * It should be called with the hash lock held. 4927 * XXX It should be possible to optimize chgattr by not flushing every time but 4928 * on the other hand: 4929 * 1. do one flush crosscall. 4930 * 2. only flush if we are increasing permissions (make sure this will work) 4931 */ 4932 static caddr_t 4933 sfmmu_hblk_chgattr(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 4934 caddr_t endaddr, demap_range_t *dmrp, uint_t attr, int mode) 4935 { 4936 tte_t tte, tteattr, tteflags, ttemod; 4937 struct sf_hment *sfhmep; 4938 int ttesz; 4939 struct page *pp = NULL; 4940 kmutex_t *pml, *pmtx; 4941 int ret; 4942 int use_demap_range; 4943 #if defined(SF_ERRATA_57) 4944 int check_exec; 4945 #endif 4946 4947 ASSERT(in_hblk_range(hmeblkp, addr)); 4948 ASSERT(hmeblkp->hblk_shw_bit == 0); 4949 ASSERT(!hmeblkp->hblk_shared); 4950 4951 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 4952 ttesz = get_hblk_ttesz(hmeblkp); 4953 4954 /* 4955 * Flush the current demap region if addresses have been 4956 * skipped or the page size doesn't match. 4957 */ 4958 use_demap_range = (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp)); 4959 if (use_demap_range) { 4960 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 4961 } else if (dmrp != NULL) { 4962 DEMAP_RANGE_FLUSH(dmrp); 4963 } 4964 4965 tteattr.ll = sfmmu_vtop_attr(attr, mode, &tteflags); 4966 #if defined(SF_ERRATA_57) 4967 check_exec = (sfmmup != ksfmmup) && 4968 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 4969 TTE_IS_EXECUTABLE(&tteattr); 4970 #endif 4971 HBLKTOHME(sfhmep, hmeblkp, addr); 4972 while (addr < endaddr) { 4973 sfmmu_copytte(&sfhmep->hme_tte, &tte); 4974 if (TTE_IS_VALID(&tte)) { 4975 if ((tte.ll & tteflags.ll) == tteattr.ll) { 4976 /* 4977 * if the new attr is the same as old 4978 * continue 4979 */ 4980 goto next_addr; 4981 } 4982 if (!TTE_IS_WRITABLE(&tteattr)) { 4983 /* 4984 * make sure we clear hw modify bit if we 4985 * removing write protections 4986 */ 4987 tteflags.tte_intlo |= TTE_HWWR_INT; 4988 } 4989 4990 pml = NULL; 4991 pp = sfhmep->hme_page; 4992 if (pp) { 4993 pml = sfmmu_mlist_enter(pp); 4994 } 4995 4996 if (pp != sfhmep->hme_page) { 4997 /* 4998 * tte must have been unloaded. 4999 */ 5000 ASSERT(pml); 5001 sfmmu_mlist_exit(pml); 5002 continue; 5003 } 5004 5005 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5006 5007 ttemod = tte; 5008 ttemod.ll = (ttemod.ll & ~tteflags.ll) | tteattr.ll; 5009 ASSERT(TTE_TO_TTEPFN(&ttemod) == TTE_TO_TTEPFN(&tte)); 5010 5011 #if defined(SF_ERRATA_57) 5012 if (check_exec && addr < errata57_limit) 5013 ttemod.tte_exec_perm = 0; 5014 #endif 5015 ret = sfmmu_modifytte_try(&tte, &ttemod, 5016 &sfhmep->hme_tte); 5017 5018 if (ret < 0) { 5019 /* tte changed underneath us */ 5020 if (pml) { 5021 sfmmu_mlist_exit(pml); 5022 } 5023 continue; 5024 } 5025 5026 if (tteflags.tte_intlo & TTE_HWWR_INT) { 5027 /* 5028 * need to sync if we are clearing modify bit. 5029 */ 5030 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5031 } 5032 5033 if (pp && PP_ISRO(pp)) { 5034 if (tteattr.tte_intlo & TTE_WRPRM_INT) { 5035 pmtx = sfmmu_page_enter(pp); 5036 PP_CLRRO(pp); 5037 sfmmu_page_exit(pmtx); 5038 } 5039 } 5040 5041 if (ret > 0 && use_demap_range) { 5042 DEMAP_RANGE_MARKPG(dmrp, addr); 5043 } else if (ret > 0) { 5044 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 5045 } 5046 5047 if (pml) { 5048 sfmmu_mlist_exit(pml); 5049 } 5050 } 5051 next_addr: 5052 addr += TTEBYTES(ttesz); 5053 sfhmep++; 5054 DEMAP_RANGE_NEXTPG(dmrp); 5055 } 5056 return (addr); 5057 } 5058 5059 /* 5060 * This routine converts virtual attributes to physical ones. It will 5061 * update the tteflags field with the tte mask corresponding to the attributes 5062 * affected and it returns the new attributes. It will also clear the modify 5063 * bit if we are taking away write permission. This is necessary since the 5064 * modify bit is the hardware permission bit and we need to clear it in order 5065 * to detect write faults. 5066 */ 5067 static uint64_t 5068 sfmmu_vtop_attr(uint_t attr, int mode, tte_t *ttemaskp) 5069 { 5070 tte_t ttevalue; 5071 5072 ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); 5073 5074 switch (mode) { 5075 case SFMMU_CHGATTR: 5076 /* all attributes specified */ 5077 ttevalue.tte_inthi = MAKE_TTEATTR_INTHI(attr); 5078 ttevalue.tte_intlo = MAKE_TTEATTR_INTLO(attr); 5079 ttemaskp->tte_inthi = TTEINTHI_ATTR; 5080 ttemaskp->tte_intlo = TTEINTLO_ATTR; 5081 break; 5082 case SFMMU_SETATTR: 5083 ASSERT(!(attr & ~HAT_PROT_MASK)); 5084 ttemaskp->ll = 0; 5085 ttevalue.ll = 0; 5086 /* 5087 * a valid tte implies exec and read for sfmmu 5088 * so no need to do anything about them. 5089 * since priviledged access implies user access 5090 * PROT_USER doesn't make sense either. 5091 */ 5092 if (attr & PROT_WRITE) { 5093 ttemaskp->tte_intlo |= TTE_WRPRM_INT; 5094 ttevalue.tte_intlo |= TTE_WRPRM_INT; 5095 } 5096 break; 5097 case SFMMU_CLRATTR: 5098 /* attributes will be nand with current ones */ 5099 if (attr & ~(PROT_WRITE | PROT_USER)) { 5100 panic("sfmmu: attr %x not supported", attr); 5101 } 5102 ttemaskp->ll = 0; 5103 ttevalue.ll = 0; 5104 if (attr & PROT_WRITE) { 5105 /* clear both writable and modify bit */ 5106 ttemaskp->tte_intlo |= TTE_WRPRM_INT | TTE_HWWR_INT; 5107 } 5108 if (attr & PROT_USER) { 5109 ttemaskp->tte_intlo |= TTE_PRIV_INT; 5110 ttevalue.tte_intlo |= TTE_PRIV_INT; 5111 } 5112 break; 5113 default: 5114 panic("sfmmu_vtop_attr: bad mode %x", mode); 5115 } 5116 ASSERT(TTE_TO_TTEPFN(&ttevalue) == 0); 5117 return (ttevalue.ll); 5118 } 5119 5120 static uint_t 5121 sfmmu_ptov_attr(tte_t *ttep) 5122 { 5123 uint_t attr; 5124 5125 ASSERT(TTE_IS_VALID(ttep)); 5126 5127 attr = PROT_READ; 5128 5129 if (TTE_IS_WRITABLE(ttep)) { 5130 attr |= PROT_WRITE; 5131 } 5132 if (TTE_IS_EXECUTABLE(ttep)) { 5133 attr |= PROT_EXEC; 5134 } 5135 if (!TTE_IS_PRIVILEGED(ttep)) { 5136 attr |= PROT_USER; 5137 } 5138 if (TTE_IS_NFO(ttep)) { 5139 attr |= HAT_NOFAULT; 5140 } 5141 if (TTE_IS_NOSYNC(ttep)) { 5142 attr |= HAT_NOSYNC; 5143 } 5144 if (TTE_IS_SIDEFFECT(ttep)) { 5145 attr |= SFMMU_SIDEFFECT; 5146 } 5147 if (!TTE_IS_VCACHEABLE(ttep)) { 5148 attr |= SFMMU_UNCACHEVTTE; 5149 } 5150 if (!TTE_IS_PCACHEABLE(ttep)) { 5151 attr |= SFMMU_UNCACHEPTTE; 5152 } 5153 return (attr); 5154 } 5155 5156 /* 5157 * hat_chgprot is a deprecated hat call. New segment drivers 5158 * should store all attributes and use hat_*attr calls. 5159 * 5160 * Change the protections in the virtual address range 5161 * given to the specified virtual protection. If vprot is ~PROT_WRITE, 5162 * then remove write permission, leaving the other 5163 * permissions unchanged. If vprot is ~PROT_USER, remove user permissions. 5164 * 5165 */ 5166 void 5167 hat_chgprot(struct hat *sfmmup, caddr_t addr, size_t len, uint_t vprot) 5168 { 5169 struct hmehash_bucket *hmebp; 5170 hmeblk_tag hblktag; 5171 int hmeshift, hashno = 1; 5172 struct hme_blk *hmeblkp, *list = NULL; 5173 caddr_t endaddr; 5174 cpuset_t cpuset; 5175 demap_range_t dmr; 5176 5177 ASSERT((len & MMU_PAGEOFFSET) == 0); 5178 ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); 5179 5180 ASSERT(sfmmup->sfmmu_as != NULL); 5181 5182 CPUSET_ZERO(cpuset); 5183 5184 if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) && 5185 ((addr + len) > (caddr_t)USERLIMIT)) { 5186 panic("user addr %p vprot %x in kernel space", 5187 (void *)addr, vprot); 5188 } 5189 endaddr = addr + len; 5190 hblktag.htag_id = sfmmup; 5191 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 5192 DEMAP_RANGE_INIT(sfmmup, &dmr); 5193 5194 while (addr < endaddr) { 5195 hmeshift = HME_HASH_SHIFT(hashno); 5196 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5197 hblktag.htag_rehash = hashno; 5198 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5199 5200 SFMMU_HASH_LOCK(hmebp); 5201 5202 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 5203 if (hmeblkp != NULL) { 5204 ASSERT(!hmeblkp->hblk_shared); 5205 /* 5206 * We've encountered a shadow hmeblk so skip the range 5207 * of the next smaller mapping size. 5208 */ 5209 if (hmeblkp->hblk_shw_bit) { 5210 ASSERT(sfmmup != ksfmmup); 5211 ASSERT(hashno > 1); 5212 addr = (caddr_t)P2END((uintptr_t)addr, 5213 TTEBYTES(hashno - 1)); 5214 } else { 5215 addr = sfmmu_hblk_chgprot(sfmmup, hmeblkp, 5216 addr, endaddr, &dmr, vprot); 5217 } 5218 SFMMU_HASH_UNLOCK(hmebp); 5219 hashno = 1; 5220 continue; 5221 } 5222 SFMMU_HASH_UNLOCK(hmebp); 5223 5224 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 5225 /* 5226 * We have traversed the whole list and rehashed 5227 * if necessary without finding the address to chgprot. 5228 * This is ok so we increment the address by the 5229 * smallest hmeblk range for kernel mappings and the 5230 * largest hmeblk range, to account for shadow hmeblks, 5231 * for user mappings and continue. 5232 */ 5233 if (sfmmup == ksfmmup) 5234 addr = (caddr_t)P2END((uintptr_t)addr, 5235 TTEBYTES(1)); 5236 else 5237 addr = (caddr_t)P2END((uintptr_t)addr, 5238 TTEBYTES(hashno)); 5239 hashno = 1; 5240 } else { 5241 hashno++; 5242 } 5243 } 5244 5245 sfmmu_hblks_list_purge(&list, 0); 5246 DEMAP_RANGE_FLUSH(&dmr); 5247 cpuset = sfmmup->sfmmu_cpusran; 5248 xt_sync(cpuset); 5249 } 5250 5251 /* 5252 * This function chgprots a range of addresses in an hmeblk. It returns the 5253 * next addres that needs to be chgprot. 5254 * It should be called with the hash lock held. 5255 * XXX It shold be possible to optimize chgprot by not flushing every time but 5256 * on the other hand: 5257 * 1. do one flush crosscall. 5258 * 2. only flush if we are increasing permissions (make sure this will work) 5259 */ 5260 static caddr_t 5261 sfmmu_hblk_chgprot(sfmmu_t *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5262 caddr_t endaddr, demap_range_t *dmrp, uint_t vprot) 5263 { 5264 uint_t pprot; 5265 tte_t tte, ttemod; 5266 struct sf_hment *sfhmep; 5267 uint_t tteflags; 5268 int ttesz; 5269 struct page *pp = NULL; 5270 kmutex_t *pml, *pmtx; 5271 int ret; 5272 int use_demap_range; 5273 #if defined(SF_ERRATA_57) 5274 int check_exec; 5275 #endif 5276 5277 ASSERT(in_hblk_range(hmeblkp, addr)); 5278 ASSERT(hmeblkp->hblk_shw_bit == 0); 5279 ASSERT(!hmeblkp->hblk_shared); 5280 5281 #ifdef DEBUG 5282 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5283 (endaddr < get_hblk_endaddr(hmeblkp))) { 5284 panic("sfmmu_hblk_chgprot: partial chgprot of large page"); 5285 } 5286 #endif /* DEBUG */ 5287 5288 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5289 ttesz = get_hblk_ttesz(hmeblkp); 5290 5291 pprot = sfmmu_vtop_prot(vprot, &tteflags); 5292 #if defined(SF_ERRATA_57) 5293 check_exec = (sfmmup != ksfmmup) && 5294 AS_TYPE_64BIT(sfmmup->sfmmu_as) && 5295 ((vprot & PROT_EXEC) == PROT_EXEC); 5296 #endif 5297 HBLKTOHME(sfhmep, hmeblkp, addr); 5298 5299 /* 5300 * Flush the current demap region if addresses have been 5301 * skipped or the page size doesn't match. 5302 */ 5303 use_demap_range = (TTEBYTES(ttesz) == MMU_PAGESIZE); 5304 if (use_demap_range) { 5305 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5306 } else if (dmrp != NULL) { 5307 DEMAP_RANGE_FLUSH(dmrp); 5308 } 5309 5310 while (addr < endaddr) { 5311 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5312 if (TTE_IS_VALID(&tte)) { 5313 if (TTE_GET_LOFLAGS(&tte, tteflags) == pprot) { 5314 /* 5315 * if the new protection is the same as old 5316 * continue 5317 */ 5318 goto next_addr; 5319 } 5320 pml = NULL; 5321 pp = sfhmep->hme_page; 5322 if (pp) { 5323 pml = sfmmu_mlist_enter(pp); 5324 } 5325 if (pp != sfhmep->hme_page) { 5326 /* 5327 * tte most have been unloaded 5328 * underneath us. Recheck 5329 */ 5330 ASSERT(pml); 5331 sfmmu_mlist_exit(pml); 5332 continue; 5333 } 5334 5335 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5336 5337 ttemod = tte; 5338 TTE_SET_LOFLAGS(&ttemod, tteflags, pprot); 5339 #if defined(SF_ERRATA_57) 5340 if (check_exec && addr < errata57_limit) 5341 ttemod.tte_exec_perm = 0; 5342 #endif 5343 ret = sfmmu_modifytte_try(&tte, &ttemod, 5344 &sfhmep->hme_tte); 5345 5346 if (ret < 0) { 5347 /* tte changed underneath us */ 5348 if (pml) { 5349 sfmmu_mlist_exit(pml); 5350 } 5351 continue; 5352 } 5353 5354 if (tteflags & TTE_HWWR_INT) { 5355 /* 5356 * need to sync if we are clearing modify bit. 5357 */ 5358 sfmmu_ttesync(sfmmup, addr, &tte, pp); 5359 } 5360 5361 if (pp && PP_ISRO(pp)) { 5362 if (pprot & TTE_WRPRM_INT) { 5363 pmtx = sfmmu_page_enter(pp); 5364 PP_CLRRO(pp); 5365 sfmmu_page_exit(pmtx); 5366 } 5367 } 5368 5369 if (ret > 0 && use_demap_range) { 5370 DEMAP_RANGE_MARKPG(dmrp, addr); 5371 } else if (ret > 0) { 5372 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 5373 } 5374 5375 if (pml) { 5376 sfmmu_mlist_exit(pml); 5377 } 5378 } 5379 next_addr: 5380 addr += TTEBYTES(ttesz); 5381 sfhmep++; 5382 DEMAP_RANGE_NEXTPG(dmrp); 5383 } 5384 return (addr); 5385 } 5386 5387 /* 5388 * This routine is deprecated and should only be used by hat_chgprot. 5389 * The correct routine is sfmmu_vtop_attr. 5390 * This routine converts virtual page protections to physical ones. It will 5391 * update the tteflags field with the tte mask corresponding to the protections 5392 * affected and it returns the new protections. It will also clear the modify 5393 * bit if we are taking away write permission. This is necessary since the 5394 * modify bit is the hardware permission bit and we need to clear it in order 5395 * to detect write faults. 5396 * It accepts the following special protections: 5397 * ~PROT_WRITE = remove write permissions. 5398 * ~PROT_USER = remove user permissions. 5399 */ 5400 static uint_t 5401 sfmmu_vtop_prot(uint_t vprot, uint_t *tteflagsp) 5402 { 5403 if (vprot == (uint_t)~PROT_WRITE) { 5404 *tteflagsp = TTE_WRPRM_INT | TTE_HWWR_INT; 5405 return (0); /* will cause wrprm to be cleared */ 5406 } 5407 if (vprot == (uint_t)~PROT_USER) { 5408 *tteflagsp = TTE_PRIV_INT; 5409 return (0); /* will cause privprm to be cleared */ 5410 } 5411 if ((vprot == 0) || (vprot == PROT_USER) || 5412 ((vprot & PROT_ALL) != vprot)) { 5413 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 5414 } 5415 5416 switch (vprot) { 5417 case (PROT_READ): 5418 case (PROT_EXEC): 5419 case (PROT_EXEC | PROT_READ): 5420 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 5421 return (TTE_PRIV_INT); /* set prv and clr wrt */ 5422 case (PROT_WRITE): 5423 case (PROT_WRITE | PROT_READ): 5424 case (PROT_EXEC | PROT_WRITE): 5425 case (PROT_EXEC | PROT_WRITE | PROT_READ): 5426 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 5427 return (TTE_PRIV_INT | TTE_WRPRM_INT); /* set prv and wrt */ 5428 case (PROT_USER | PROT_READ): 5429 case (PROT_USER | PROT_EXEC): 5430 case (PROT_USER | PROT_EXEC | PROT_READ): 5431 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT | TTE_HWWR_INT; 5432 return (0); /* clr prv and wrt */ 5433 case (PROT_USER | PROT_WRITE): 5434 case (PROT_USER | PROT_WRITE | PROT_READ): 5435 case (PROT_USER | PROT_EXEC | PROT_WRITE): 5436 case (PROT_USER | PROT_EXEC | PROT_WRITE | PROT_READ): 5437 *tteflagsp = TTE_PRIV_INT | TTE_WRPRM_INT; 5438 return (TTE_WRPRM_INT); /* clr prv and set wrt */ 5439 default: 5440 panic("sfmmu_vtop_prot -- bad prot %x", vprot); 5441 } 5442 return (0); 5443 } 5444 5445 /* 5446 * Alternate unload for very large virtual ranges. With a true 64 bit VA, 5447 * the normal algorithm would take too long for a very large VA range with 5448 * few real mappings. This routine just walks thru all HMEs in the global 5449 * hash table to find and remove mappings. 5450 */ 5451 static void 5452 hat_unload_large_virtual( 5453 struct hat *sfmmup, 5454 caddr_t startaddr, 5455 size_t len, 5456 uint_t flags, 5457 hat_callback_t *callback) 5458 { 5459 struct hmehash_bucket *hmebp; 5460 struct hme_blk *hmeblkp; 5461 struct hme_blk *pr_hblk = NULL; 5462 struct hme_blk *nx_hblk; 5463 struct hme_blk *list = NULL; 5464 int i; 5465 demap_range_t dmr, *dmrp; 5466 cpuset_t cpuset; 5467 caddr_t endaddr = startaddr + len; 5468 caddr_t sa; 5469 caddr_t ea; 5470 caddr_t cb_sa[MAX_CB_ADDR]; 5471 caddr_t cb_ea[MAX_CB_ADDR]; 5472 int addr_cnt = 0; 5473 int a = 0; 5474 5475 if (sfmmup->sfmmu_free) { 5476 dmrp = NULL; 5477 } else { 5478 dmrp = &dmr; 5479 DEMAP_RANGE_INIT(sfmmup, dmrp); 5480 } 5481 5482 /* 5483 * Loop through all the hash buckets of HME blocks looking for matches. 5484 */ 5485 for (i = 0; i <= UHMEHASH_SZ; i++) { 5486 hmebp = &uhme_hash[i]; 5487 SFMMU_HASH_LOCK(hmebp); 5488 hmeblkp = hmebp->hmeblkp; 5489 pr_hblk = NULL; 5490 while (hmeblkp) { 5491 nx_hblk = hmeblkp->hblk_next; 5492 5493 /* 5494 * skip if not this context, if a shadow block or 5495 * if the mapping is not in the requested range 5496 */ 5497 if (hmeblkp->hblk_tag.htag_id != sfmmup || 5498 hmeblkp->hblk_shw_bit || 5499 (sa = (caddr_t)get_hblk_base(hmeblkp)) >= endaddr || 5500 (ea = get_hblk_endaddr(hmeblkp)) <= startaddr) { 5501 pr_hblk = hmeblkp; 5502 goto next_block; 5503 } 5504 5505 ASSERT(!hmeblkp->hblk_shared); 5506 /* 5507 * unload if there are any current valid mappings 5508 */ 5509 if (hmeblkp->hblk_vcnt != 0 || 5510 hmeblkp->hblk_hmecnt != 0) 5511 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 5512 sa, ea, dmrp, flags); 5513 5514 /* 5515 * on unmap we also release the HME block itself, once 5516 * all mappings are gone. 5517 */ 5518 if ((flags & HAT_UNLOAD_UNMAP) != 0 && 5519 !hmeblkp->hblk_vcnt && 5520 !hmeblkp->hblk_hmecnt) { 5521 ASSERT(!hmeblkp->hblk_lckcnt); 5522 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 5523 &list, 0); 5524 } else { 5525 pr_hblk = hmeblkp; 5526 } 5527 5528 if (callback == NULL) 5529 goto next_block; 5530 5531 /* 5532 * HME blocks may span more than one page, but we may be 5533 * unmapping only one page, so check for a smaller range 5534 * for the callback 5535 */ 5536 if (sa < startaddr) 5537 sa = startaddr; 5538 if (--ea > endaddr) 5539 ea = endaddr - 1; 5540 5541 cb_sa[addr_cnt] = sa; 5542 cb_ea[addr_cnt] = ea; 5543 if (++addr_cnt == MAX_CB_ADDR) { 5544 if (dmrp != NULL) { 5545 DEMAP_RANGE_FLUSH(dmrp); 5546 cpuset = sfmmup->sfmmu_cpusran; 5547 xt_sync(cpuset); 5548 } 5549 5550 for (a = 0; a < MAX_CB_ADDR; ++a) { 5551 callback->hcb_start_addr = cb_sa[a]; 5552 callback->hcb_end_addr = cb_ea[a]; 5553 callback->hcb_function(callback); 5554 } 5555 addr_cnt = 0; 5556 } 5557 5558 next_block: 5559 hmeblkp = nx_hblk; 5560 } 5561 SFMMU_HASH_UNLOCK(hmebp); 5562 } 5563 5564 sfmmu_hblks_list_purge(&list, 0); 5565 if (dmrp != NULL) { 5566 DEMAP_RANGE_FLUSH(dmrp); 5567 cpuset = sfmmup->sfmmu_cpusran; 5568 xt_sync(cpuset); 5569 } 5570 5571 for (a = 0; a < addr_cnt; ++a) { 5572 callback->hcb_start_addr = cb_sa[a]; 5573 callback->hcb_end_addr = cb_ea[a]; 5574 callback->hcb_function(callback); 5575 } 5576 5577 /* 5578 * Check TSB and TLB page sizes if the process isn't exiting. 5579 */ 5580 if (!sfmmup->sfmmu_free) 5581 sfmmu_check_page_sizes(sfmmup, 0); 5582 } 5583 5584 /* 5585 * Unload all the mappings in the range [addr..addr+len). addr and len must 5586 * be MMU_PAGESIZE aligned. 5587 */ 5588 5589 extern struct seg *segkmap; 5590 #define ISSEGKMAP(sfmmup, addr) (sfmmup == ksfmmup && \ 5591 segkmap->s_base <= (addr) && (addr) < (segkmap->s_base + segkmap->s_size)) 5592 5593 5594 void 5595 hat_unload_callback( 5596 struct hat *sfmmup, 5597 caddr_t addr, 5598 size_t len, 5599 uint_t flags, 5600 hat_callback_t *callback) 5601 { 5602 struct hmehash_bucket *hmebp; 5603 hmeblk_tag hblktag; 5604 int hmeshift, hashno, iskernel; 5605 struct hme_blk *hmeblkp, *pr_hblk, *list = NULL; 5606 caddr_t endaddr; 5607 cpuset_t cpuset; 5608 int addr_count = 0; 5609 int a; 5610 caddr_t cb_start_addr[MAX_CB_ADDR]; 5611 caddr_t cb_end_addr[MAX_CB_ADDR]; 5612 int issegkmap = ISSEGKMAP(sfmmup, addr); 5613 demap_range_t dmr, *dmrp; 5614 5615 ASSERT(sfmmup->sfmmu_as != NULL); 5616 5617 ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \ 5618 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 5619 5620 ASSERT(sfmmup != NULL); 5621 ASSERT((len & MMU_PAGEOFFSET) == 0); 5622 ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); 5623 5624 /* 5625 * Probing through a large VA range (say 63 bits) will be slow, even 5626 * at 4 Meg steps between the probes. So, when the virtual address range 5627 * is very large, search the HME entries for what to unload. 5628 * 5629 * len >> TTE_PAGE_SHIFT(TTE4M) is the # of 4Meg probes we'd need 5630 * 5631 * UHMEHASH_SZ is number of hash buckets to examine 5632 * 5633 */ 5634 if (sfmmup != KHATID && (len >> TTE_PAGE_SHIFT(TTE4M)) > UHMEHASH_SZ) { 5635 hat_unload_large_virtual(sfmmup, addr, len, flags, callback); 5636 return; 5637 } 5638 5639 CPUSET_ZERO(cpuset); 5640 5641 /* 5642 * If the process is exiting, we can save a lot of fuss since 5643 * we'll flush the TLB when we free the ctx anyway. 5644 */ 5645 if (sfmmup->sfmmu_free) { 5646 dmrp = NULL; 5647 } else { 5648 dmrp = &dmr; 5649 DEMAP_RANGE_INIT(sfmmup, dmrp); 5650 } 5651 5652 endaddr = addr + len; 5653 hblktag.htag_id = sfmmup; 5654 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 5655 5656 /* 5657 * It is likely for the vm to call unload over a wide range of 5658 * addresses that are actually very sparsely populated by 5659 * translations. In order to speed this up the sfmmu hat supports 5660 * the concept of shadow hmeblks. Dummy large page hmeblks that 5661 * correspond to actual small translations are allocated at tteload 5662 * time and are referred to as shadow hmeblks. Now, during unload 5663 * time, we first check if we have a shadow hmeblk for that 5664 * translation. The absence of one means the corresponding address 5665 * range is empty and can be skipped. 5666 * 5667 * The kernel is an exception to above statement and that is why 5668 * we don't use shadow hmeblks and hash starting from the smallest 5669 * page size. 5670 */ 5671 if (sfmmup == KHATID) { 5672 iskernel = 1; 5673 hashno = TTE64K; 5674 } else { 5675 iskernel = 0; 5676 if (mmu_page_sizes == max_mmu_page_sizes) { 5677 hashno = TTE256M; 5678 } else { 5679 hashno = TTE4M; 5680 } 5681 } 5682 while (addr < endaddr) { 5683 hmeshift = HME_HASH_SHIFT(hashno); 5684 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 5685 hblktag.htag_rehash = hashno; 5686 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 5687 5688 SFMMU_HASH_LOCK(hmebp); 5689 5690 HME_HASH_SEARCH_PREV(hmebp, hblktag, hmeblkp, pr_hblk, &list); 5691 if (hmeblkp == NULL) { 5692 /* 5693 * didn't find an hmeblk. skip the appropiate 5694 * address range. 5695 */ 5696 SFMMU_HASH_UNLOCK(hmebp); 5697 if (iskernel) { 5698 if (hashno < mmu_hashcnt) { 5699 hashno++; 5700 continue; 5701 } else { 5702 hashno = TTE64K; 5703 addr = (caddr_t)roundup((uintptr_t)addr 5704 + 1, MMU_PAGESIZE64K); 5705 continue; 5706 } 5707 } 5708 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5709 (1 << hmeshift)); 5710 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5711 ASSERT(hashno == TTE64K); 5712 continue; 5713 } 5714 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5715 hashno = TTE512K; 5716 continue; 5717 } 5718 if (mmu_page_sizes == max_mmu_page_sizes) { 5719 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5720 hashno = TTE4M; 5721 continue; 5722 } 5723 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5724 hashno = TTE32M; 5725 continue; 5726 } 5727 hashno = TTE256M; 5728 continue; 5729 } else { 5730 hashno = TTE4M; 5731 continue; 5732 } 5733 } 5734 ASSERT(hmeblkp); 5735 ASSERT(!hmeblkp->hblk_shared); 5736 if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5737 /* 5738 * If the valid count is zero we can skip the range 5739 * mapped by this hmeblk. 5740 * We free hblks in the case of HAT_UNMAP. HAT_UNMAP 5741 * is used by segment drivers as a hint 5742 * that the mapping resource won't be used any longer. 5743 * The best example of this is during exit(). 5744 */ 5745 addr = (caddr_t)roundup((uintptr_t)addr + 1, 5746 get_hblk_span(hmeblkp)); 5747 if ((flags & HAT_UNLOAD_UNMAP) || 5748 (iskernel && !issegkmap)) { 5749 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, 5750 &list, 0); 5751 } 5752 SFMMU_HASH_UNLOCK(hmebp); 5753 5754 if (iskernel) { 5755 hashno = TTE64K; 5756 continue; 5757 } 5758 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5759 ASSERT(hashno == TTE64K); 5760 continue; 5761 } 5762 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5763 hashno = TTE512K; 5764 continue; 5765 } 5766 if (mmu_page_sizes == max_mmu_page_sizes) { 5767 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5768 hashno = TTE4M; 5769 continue; 5770 } 5771 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5772 hashno = TTE32M; 5773 continue; 5774 } 5775 hashno = TTE256M; 5776 continue; 5777 } else { 5778 hashno = TTE4M; 5779 continue; 5780 } 5781 } 5782 if (hmeblkp->hblk_shw_bit) { 5783 /* 5784 * If we encounter a shadow hmeblk we know there is 5785 * smaller sized hmeblks mapping the same address space. 5786 * Decrement the hash size and rehash. 5787 */ 5788 ASSERT(sfmmup != KHATID); 5789 hashno--; 5790 SFMMU_HASH_UNLOCK(hmebp); 5791 continue; 5792 } 5793 5794 /* 5795 * track callback address ranges. 5796 * only start a new range when it's not contiguous 5797 */ 5798 if (callback != NULL) { 5799 if (addr_count > 0 && 5800 addr == cb_end_addr[addr_count - 1]) 5801 --addr_count; 5802 else 5803 cb_start_addr[addr_count] = addr; 5804 } 5805 5806 addr = sfmmu_hblk_unload(sfmmup, hmeblkp, addr, endaddr, 5807 dmrp, flags); 5808 5809 if (callback != NULL) 5810 cb_end_addr[addr_count++] = addr; 5811 5812 if (((flags & HAT_UNLOAD_UNMAP) || (iskernel && !issegkmap)) && 5813 !hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { 5814 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 0); 5815 } 5816 SFMMU_HASH_UNLOCK(hmebp); 5817 5818 /* 5819 * Notify our caller as to exactly which pages 5820 * have been unloaded. We do these in clumps, 5821 * to minimize the number of xt_sync()s that need to occur. 5822 */ 5823 if (callback != NULL && addr_count == MAX_CB_ADDR) { 5824 if (dmrp != NULL) { 5825 DEMAP_RANGE_FLUSH(dmrp); 5826 cpuset = sfmmup->sfmmu_cpusran; 5827 xt_sync(cpuset); 5828 } 5829 5830 for (a = 0; a < MAX_CB_ADDR; ++a) { 5831 callback->hcb_start_addr = cb_start_addr[a]; 5832 callback->hcb_end_addr = cb_end_addr[a]; 5833 callback->hcb_function(callback); 5834 } 5835 addr_count = 0; 5836 } 5837 if (iskernel) { 5838 hashno = TTE64K; 5839 continue; 5840 } 5841 if ((uintptr_t)addr & MMU_PAGEOFFSET512K) { 5842 ASSERT(hashno == TTE64K); 5843 continue; 5844 } 5845 if ((uintptr_t)addr & MMU_PAGEOFFSET4M) { 5846 hashno = TTE512K; 5847 continue; 5848 } 5849 if (mmu_page_sizes == max_mmu_page_sizes) { 5850 if ((uintptr_t)addr & MMU_PAGEOFFSET32M) { 5851 hashno = TTE4M; 5852 continue; 5853 } 5854 if ((uintptr_t)addr & MMU_PAGEOFFSET256M) { 5855 hashno = TTE32M; 5856 continue; 5857 } 5858 hashno = TTE256M; 5859 } else { 5860 hashno = TTE4M; 5861 } 5862 } 5863 5864 sfmmu_hblks_list_purge(&list, 0); 5865 if (dmrp != NULL) { 5866 DEMAP_RANGE_FLUSH(dmrp); 5867 cpuset = sfmmup->sfmmu_cpusran; 5868 xt_sync(cpuset); 5869 } 5870 if (callback && addr_count != 0) { 5871 for (a = 0; a < addr_count; ++a) { 5872 callback->hcb_start_addr = cb_start_addr[a]; 5873 callback->hcb_end_addr = cb_end_addr[a]; 5874 callback->hcb_function(callback); 5875 } 5876 } 5877 5878 /* 5879 * Check TSB and TLB page sizes if the process isn't exiting. 5880 */ 5881 if (!sfmmup->sfmmu_free) 5882 sfmmu_check_page_sizes(sfmmup, 0); 5883 } 5884 5885 /* 5886 * Unload all the mappings in the range [addr..addr+len). addr and len must 5887 * be MMU_PAGESIZE aligned. 5888 */ 5889 void 5890 hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags) 5891 { 5892 hat_unload_callback(sfmmup, addr, len, flags, NULL); 5893 } 5894 5895 5896 /* 5897 * Find the largest mapping size for this page. 5898 */ 5899 int 5900 fnd_mapping_sz(page_t *pp) 5901 { 5902 int sz; 5903 int p_index; 5904 5905 p_index = PP_MAPINDEX(pp); 5906 5907 sz = 0; 5908 p_index >>= 1; /* don't care about 8K bit */ 5909 for (; p_index; p_index >>= 1) { 5910 sz++; 5911 } 5912 5913 return (sz); 5914 } 5915 5916 /* 5917 * This function unloads a range of addresses for an hmeblk. 5918 * It returns the next address to be unloaded. 5919 * It should be called with the hash lock held. 5920 */ 5921 static caddr_t 5922 sfmmu_hblk_unload(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 5923 caddr_t endaddr, demap_range_t *dmrp, uint_t flags) 5924 { 5925 tte_t tte, ttemod; 5926 struct sf_hment *sfhmep; 5927 int ttesz; 5928 long ttecnt; 5929 page_t *pp; 5930 kmutex_t *pml; 5931 int ret; 5932 int use_demap_range; 5933 5934 ASSERT(in_hblk_range(hmeblkp, addr)); 5935 ASSERT(!hmeblkp->hblk_shw_bit); 5936 ASSERT(sfmmup != NULL || hmeblkp->hblk_shared); 5937 ASSERT(sfmmup == NULL || !hmeblkp->hblk_shared); 5938 ASSERT(dmrp == NULL || !hmeblkp->hblk_shared); 5939 5940 #ifdef DEBUG 5941 if (get_hblk_ttesz(hmeblkp) != TTE8K && 5942 (endaddr < get_hblk_endaddr(hmeblkp))) { 5943 panic("sfmmu_hblk_unload: partial unload of large page"); 5944 } 5945 #endif /* DEBUG */ 5946 5947 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 5948 ttesz = get_hblk_ttesz(hmeblkp); 5949 5950 use_demap_range = ((dmrp == NULL) || 5951 (TTEBYTES(ttesz) == DEMAP_RANGE_PGSZ(dmrp))); 5952 5953 if (use_demap_range) { 5954 DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr); 5955 } else if (dmrp != NULL) { 5956 DEMAP_RANGE_FLUSH(dmrp); 5957 } 5958 ttecnt = 0; 5959 HBLKTOHME(sfhmep, hmeblkp, addr); 5960 5961 while (addr < endaddr) { 5962 pml = NULL; 5963 sfmmu_copytte(&sfhmep->hme_tte, &tte); 5964 if (TTE_IS_VALID(&tte)) { 5965 pp = sfhmep->hme_page; 5966 if (pp != NULL) { 5967 pml = sfmmu_mlist_enter(pp); 5968 } 5969 5970 /* 5971 * Verify if hme still points to 'pp' now that 5972 * we have p_mapping lock. 5973 */ 5974 if (sfhmep->hme_page != pp) { 5975 if (pp != NULL && sfhmep->hme_page != NULL) { 5976 ASSERT(pml != NULL); 5977 sfmmu_mlist_exit(pml); 5978 /* Re-start this iteration. */ 5979 continue; 5980 } 5981 ASSERT((pp != NULL) && 5982 (sfhmep->hme_page == NULL)); 5983 goto tte_unloaded; 5984 } 5985 5986 /* 5987 * This point on we have both HASH and p_mapping 5988 * lock. 5989 */ 5990 ASSERT(pp == sfhmep->hme_page); 5991 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 5992 5993 /* 5994 * We need to loop on modify tte because it is 5995 * possible for pagesync to come along and 5996 * change the software bits beneath us. 5997 * 5998 * Page_unload can also invalidate the tte after 5999 * we read tte outside of p_mapping lock. 6000 */ 6001 again: 6002 ttemod = tte; 6003 6004 TTE_SET_INVALID(&ttemod); 6005 ret = sfmmu_modifytte_try(&tte, &ttemod, 6006 &sfhmep->hme_tte); 6007 6008 if (ret <= 0) { 6009 if (TTE_IS_VALID(&tte)) { 6010 ASSERT(ret < 0); 6011 goto again; 6012 } 6013 if (pp != NULL) { 6014 panic("sfmmu_hblk_unload: pp = 0x%p " 6015 "tte became invalid under mlist" 6016 " lock = 0x%p", (void *)pp, 6017 (void *)pml); 6018 } 6019 continue; 6020 } 6021 6022 if (!(flags & HAT_UNLOAD_NOSYNC)) { 6023 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6024 } 6025 6026 /* 6027 * Ok- we invalidated the tte. Do the rest of the job. 6028 */ 6029 ttecnt++; 6030 6031 if (flags & HAT_UNLOAD_UNLOCK) { 6032 ASSERT(hmeblkp->hblk_lckcnt > 0); 6033 atomic_dec_32(&hmeblkp->hblk_lckcnt); 6034 HBLK_STACK_TRACE(hmeblkp, HBLK_UNLOCK); 6035 } 6036 6037 /* 6038 * Normally we would need to flush the page 6039 * from the virtual cache at this point in 6040 * order to prevent a potential cache alias 6041 * inconsistency. 6042 * The particular scenario we need to worry 6043 * about is: 6044 * Given: va1 and va2 are two virtual address 6045 * that alias and map the same physical 6046 * address. 6047 * 1. mapping exists from va1 to pa and data 6048 * has been read into the cache. 6049 * 2. unload va1. 6050 * 3. load va2 and modify data using va2. 6051 * 4 unload va2. 6052 * 5. load va1 and reference data. Unless we 6053 * flush the data cache when we unload we will 6054 * get stale data. 6055 * Fortunately, page coloring eliminates the 6056 * above scenario by remembering the color a 6057 * physical page was last or is currently 6058 * mapped to. Now, we delay the flush until 6059 * the loading of translations. Only when the 6060 * new translation is of a different color 6061 * are we forced to flush. 6062 */ 6063 if (use_demap_range) { 6064 /* 6065 * Mark this page as needing a demap. 6066 */ 6067 DEMAP_RANGE_MARKPG(dmrp, addr); 6068 } else { 6069 ASSERT(sfmmup != NULL); 6070 ASSERT(!hmeblkp->hblk_shared); 6071 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 6072 sfmmup->sfmmu_free, 0); 6073 } 6074 6075 if (pp) { 6076 /* 6077 * Remove the hment from the mapping list 6078 */ 6079 ASSERT(hmeblkp->hblk_hmecnt > 0); 6080 6081 /* 6082 * Again, we cannot 6083 * ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS); 6084 */ 6085 HME_SUB(sfhmep, pp); 6086 membar_stst(); 6087 atomic_dec_16(&hmeblkp->hblk_hmecnt); 6088 } 6089 6090 ASSERT(hmeblkp->hblk_vcnt > 0); 6091 atomic_dec_16(&hmeblkp->hblk_vcnt); 6092 6093 ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 6094 !hmeblkp->hblk_lckcnt); 6095 6096 #ifdef VAC 6097 if (pp && (pp->p_nrm & (P_KPMC | P_KPMS | P_TNC))) { 6098 if (PP_ISTNC(pp)) { 6099 /* 6100 * If page was temporary 6101 * uncached, try to recache 6102 * it. Note that HME_SUB() was 6103 * called above so p_index and 6104 * mlist had been updated. 6105 */ 6106 conv_tnc(pp, ttesz); 6107 } else if (pp->p_mapping == NULL) { 6108 ASSERT(kpm_enable); 6109 /* 6110 * Page is marked to be in VAC conflict 6111 * to an existing kpm mapping and/or is 6112 * kpm mapped using only the regular 6113 * pagesize. 6114 */ 6115 sfmmu_kpm_hme_unload(pp); 6116 } 6117 } 6118 #endif /* VAC */ 6119 } else if ((pp = sfhmep->hme_page) != NULL) { 6120 /* 6121 * TTE is invalid but the hme 6122 * still exists. let pageunload 6123 * complete its job. 6124 */ 6125 ASSERT(pml == NULL); 6126 pml = sfmmu_mlist_enter(pp); 6127 if (sfhmep->hme_page != NULL) { 6128 sfmmu_mlist_exit(pml); 6129 continue; 6130 } 6131 ASSERT(sfhmep->hme_page == NULL); 6132 } else if (hmeblkp->hblk_hmecnt != 0) { 6133 /* 6134 * pageunload may have not finished decrementing 6135 * hblk_vcnt and hblk_hmecnt. Find page_t if any and 6136 * wait for pageunload to finish. Rely on pageunload 6137 * to decrement hblk_hmecnt after hblk_vcnt. 6138 */ 6139 pfn_t pfn = TTE_TO_TTEPFN(&tte); 6140 ASSERT(pml == NULL); 6141 if (pf_is_memory(pfn)) { 6142 pp = page_numtopp_nolock(pfn); 6143 if (pp != NULL) { 6144 pml = sfmmu_mlist_enter(pp); 6145 sfmmu_mlist_exit(pml); 6146 pml = NULL; 6147 } 6148 } 6149 } 6150 6151 tte_unloaded: 6152 /* 6153 * At this point, the tte we are looking at 6154 * should be unloaded, and hme has been unlinked 6155 * from page too. This is important because in 6156 * pageunload, it does ttesync() then HME_SUB. 6157 * We need to make sure HME_SUB has been completed 6158 * so we know ttesync() has been completed. Otherwise, 6159 * at exit time, after return from hat layer, VM will 6160 * release as structure which hat_setstat() (called 6161 * by ttesync()) needs. 6162 */ 6163 #ifdef DEBUG 6164 { 6165 tte_t dtte; 6166 6167 ASSERT(sfhmep->hme_page == NULL); 6168 6169 sfmmu_copytte(&sfhmep->hme_tte, &dtte); 6170 ASSERT(!TTE_IS_VALID(&dtte)); 6171 } 6172 #endif 6173 6174 if (pml) { 6175 sfmmu_mlist_exit(pml); 6176 } 6177 6178 addr += TTEBYTES(ttesz); 6179 sfhmep++; 6180 DEMAP_RANGE_NEXTPG(dmrp); 6181 } 6182 /* 6183 * For shared hmeblks this routine is only called when region is freed 6184 * and no longer referenced. So no need to decrement ttecnt 6185 * in the region structure here. 6186 */ 6187 if (ttecnt > 0 && sfmmup != NULL) { 6188 atomic_add_long(&sfmmup->sfmmu_ttecnt[ttesz], -ttecnt); 6189 } 6190 return (addr); 6191 } 6192 6193 /* 6194 * Invalidate a virtual address range for the local CPU. 6195 * For best performance ensure that the va range is completely 6196 * mapped, otherwise the entire TLB will be flushed. 6197 */ 6198 void 6199 hat_flush_range(struct hat *sfmmup, caddr_t va, size_t size) 6200 { 6201 ssize_t sz; 6202 caddr_t endva = va + size; 6203 6204 while (va < endva) { 6205 sz = hat_getpagesize(sfmmup, va); 6206 if (sz < 0) { 6207 vtag_flushall(); 6208 break; 6209 } 6210 vtag_flushpage(va, (uint64_t)sfmmup); 6211 va += sz; 6212 } 6213 } 6214 6215 /* 6216 * Synchronize all the mappings in the range [addr..addr+len). 6217 * Can be called with clearflag having two states: 6218 * HAT_SYNC_DONTZERO means just return the rm stats 6219 * HAT_SYNC_ZERORM means zero rm bits in the tte and return the stats 6220 */ 6221 void 6222 hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag) 6223 { 6224 struct hmehash_bucket *hmebp; 6225 hmeblk_tag hblktag; 6226 int hmeshift, hashno = 1; 6227 struct hme_blk *hmeblkp, *list = NULL; 6228 caddr_t endaddr; 6229 cpuset_t cpuset; 6230 6231 ASSERT((sfmmup == ksfmmup) || 6232 AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 6233 ASSERT((len & MMU_PAGEOFFSET) == 0); 6234 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 6235 (clearflag == HAT_SYNC_ZERORM)); 6236 6237 CPUSET_ZERO(cpuset); 6238 6239 endaddr = addr + len; 6240 hblktag.htag_id = sfmmup; 6241 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 6242 6243 /* 6244 * Spitfire supports 4 page sizes. 6245 * Most pages are expected to be of the smallest page 6246 * size (8K) and these will not need to be rehashed. 64K 6247 * pages also don't need to be rehashed because the an hmeblk 6248 * spans 64K of address space. 512K pages might need 1 rehash and 6249 * and 4M pages 2 rehashes. 6250 */ 6251 while (addr < endaddr) { 6252 hmeshift = HME_HASH_SHIFT(hashno); 6253 hblktag.htag_bspage = HME_HASH_BSPAGE(addr, hmeshift); 6254 hblktag.htag_rehash = hashno; 6255 hmebp = HME_HASH_FUNCTION(sfmmup, addr, hmeshift); 6256 6257 SFMMU_HASH_LOCK(hmebp); 6258 6259 HME_HASH_SEARCH(hmebp, hblktag, hmeblkp, &list); 6260 if (hmeblkp != NULL) { 6261 ASSERT(!hmeblkp->hblk_shared); 6262 /* 6263 * We've encountered a shadow hmeblk so skip the range 6264 * of the next smaller mapping size. 6265 */ 6266 if (hmeblkp->hblk_shw_bit) { 6267 ASSERT(sfmmup != ksfmmup); 6268 ASSERT(hashno > 1); 6269 addr = (caddr_t)P2END((uintptr_t)addr, 6270 TTEBYTES(hashno - 1)); 6271 } else { 6272 addr = sfmmu_hblk_sync(sfmmup, hmeblkp, 6273 addr, endaddr, clearflag); 6274 } 6275 SFMMU_HASH_UNLOCK(hmebp); 6276 hashno = 1; 6277 continue; 6278 } 6279 SFMMU_HASH_UNLOCK(hmebp); 6280 6281 if (!HME_REHASH(sfmmup) || (hashno >= mmu_hashcnt)) { 6282 /* 6283 * We have traversed the whole list and rehashed 6284 * if necessary without finding the address to sync. 6285 * This is ok so we increment the address by the 6286 * smallest hmeblk range for kernel mappings and the 6287 * largest hmeblk range, to account for shadow hmeblks, 6288 * for user mappings and continue. 6289 */ 6290 if (sfmmup == ksfmmup) 6291 addr = (caddr_t)P2END((uintptr_t)addr, 6292 TTEBYTES(1)); 6293 else 6294 addr = (caddr_t)P2END((uintptr_t)addr, 6295 TTEBYTES(hashno)); 6296 hashno = 1; 6297 } else { 6298 hashno++; 6299 } 6300 } 6301 sfmmu_hblks_list_purge(&list, 0); 6302 cpuset = sfmmup->sfmmu_cpusran; 6303 xt_sync(cpuset); 6304 } 6305 6306 static caddr_t 6307 sfmmu_hblk_sync(struct hat *sfmmup, struct hme_blk *hmeblkp, caddr_t addr, 6308 caddr_t endaddr, int clearflag) 6309 { 6310 tte_t tte, ttemod; 6311 struct sf_hment *sfhmep; 6312 int ttesz; 6313 struct page *pp; 6314 kmutex_t *pml; 6315 int ret; 6316 6317 ASSERT(hmeblkp->hblk_shw_bit == 0); 6318 ASSERT(!hmeblkp->hblk_shared); 6319 6320 endaddr = MIN(endaddr, get_hblk_endaddr(hmeblkp)); 6321 6322 ttesz = get_hblk_ttesz(hmeblkp); 6323 HBLKTOHME(sfhmep, hmeblkp, addr); 6324 6325 while (addr < endaddr) { 6326 sfmmu_copytte(&sfhmep->hme_tte, &tte); 6327 if (TTE_IS_VALID(&tte)) { 6328 pml = NULL; 6329 pp = sfhmep->hme_page; 6330 if (pp) { 6331 pml = sfmmu_mlist_enter(pp); 6332 } 6333 if (pp != sfhmep->hme_page) { 6334 /* 6335 * tte most have been unloaded 6336 * underneath us. Recheck 6337 */ 6338 ASSERT(pml); 6339 sfmmu_mlist_exit(pml); 6340 continue; 6341 } 6342 6343 ASSERT(pp == NULL || sfmmu_mlist_held(pp)); 6344 6345 if (clearflag == HAT_SYNC_ZERORM) { 6346 ttemod = tte; 6347 TTE_CLR_RM(&ttemod); 6348 ret = sfmmu_modifytte_try(&tte, &ttemod, 6349 &sfhmep->hme_tte); 6350 if (ret < 0) { 6351 if (pml) { 6352 sfmmu_mlist_exit(pml); 6353 } 6354 continue; 6355 } 6356 6357 if (ret > 0) { 6358 sfmmu_tlb_demap(addr, sfmmup, 6359 hmeblkp, 0, 0); 6360 } 6361 } 6362 sfmmu_ttesync(sfmmup, addr, &tte, pp); 6363 if (pml) { 6364 sfmmu_mlist_exit(pml); 6365 } 6366 } 6367 addr += TTEBYTES(ttesz); 6368 sfhmep++; 6369 } 6370 return (addr); 6371 } 6372 6373 /* 6374 * This function will sync a tte to the page struct and it will 6375 * update the hat stats. Currently it allows us to pass a NULL pp 6376 * and we will simply update the stats. We may want to change this 6377 * so we only keep stats for pages backed by pp's. 6378 */ 6379 static void 6380 sfmmu_ttesync(struct hat *sfmmup, caddr_t addr, tte_t *ttep, page_t *pp) 6381 { 6382 uint_t rm = 0; 6383 int sz; 6384 pgcnt_t npgs; 6385 6386 ASSERT(TTE_IS_VALID(ttep)); 6387 6388 if (TTE_IS_NOSYNC(ttep)) { 6389 return; 6390 } 6391 6392 if (TTE_IS_REF(ttep)) { 6393 rm = P_REF; 6394 } 6395 if (TTE_IS_MOD(ttep)) { 6396 rm |= P_MOD; 6397 } 6398 6399 if (rm == 0) { 6400 return; 6401 } 6402 6403 sz = TTE_CSZ(ttep); 6404 if (sfmmup != NULL && sfmmup->sfmmu_rmstat) { 6405 int i; 6406 caddr_t vaddr = addr; 6407 6408 for (i = 0; i < TTEPAGES(sz); i++, vaddr += MMU_PAGESIZE) { 6409 hat_setstat(sfmmup->sfmmu_as, vaddr, MMU_PAGESIZE, rm); 6410 } 6411 6412 } 6413 6414 /* 6415 * XXX I want to use cas to update nrm bits but they 6416 * currently belong in common/vm and not in hat where 6417 * they should be. 6418 * The nrm bits are protected by the same mutex as 6419 * the one that protects the page's mapping list. 6420 */ 6421 if (!pp) 6422 return; 6423 ASSERT(sfmmu_mlist_held(pp)); 6424 /* 6425 * If the tte is for a large page, we need to sync all the 6426 * pages covered by the tte. 6427 */ 6428 if (sz != TTE8K) { 6429 ASSERT(pp->p_szc != 0); 6430 pp = PP_GROUPLEADER(pp, sz); 6431 ASSERT(sfmmu_mlist_held(pp)); 6432 } 6433 6434 /* Get number of pages from tte size. */ 6435 npgs = TTEPAGES(sz); 6436 6437 do { 6438 ASSERT(pp); 6439 ASSERT(sfmmu_mlist_held(pp)); 6440 if (((rm & P_REF) != 0 && !PP_ISREF(pp)) || 6441 ((rm & P_MOD) != 0 && !PP_ISMOD(pp))) 6442 hat_page_setattr(pp, rm); 6443 6444 /* 6445 * Are we done? If not, we must have a large mapping. 6446 * For large mappings we need to sync the rest of the pages 6447 * covered by this tte; goto the next page. 6448 */ 6449 } while (--npgs > 0 && (pp = PP_PAGENEXT(pp))); 6450 } 6451 6452 /* 6453 * Execute pre-callback handler of each pa_hment linked to pp 6454 * 6455 * Inputs: 6456 * flag: either HAT_PRESUSPEND or HAT_SUSPEND. 6457 * capture_cpus: pointer to return value (below) 6458 * 6459 * Returns: 6460 * Propagates the subsystem callback return values back to the caller; 6461 * returns 0 on success. If capture_cpus is non-NULL, the value returned 6462 * is zero if all of the pa_hments are of a type that do not require 6463 * capturing CPUs prior to suspending the mapping, else it is 1. 6464 */ 6465 static int 6466 hat_pageprocess_precallbacks(struct page *pp, uint_t flag, int *capture_cpus) 6467 { 6468 struct sf_hment *sfhmep; 6469 struct pa_hment *pahmep; 6470 int (*f)(caddr_t, uint_t, uint_t, void *); 6471 int ret; 6472 id_t id; 6473 int locked = 0; 6474 kmutex_t *pml; 6475 6476 ASSERT(PAGE_EXCL(pp)); 6477 if (!sfmmu_mlist_held(pp)) { 6478 pml = sfmmu_mlist_enter(pp); 6479 locked = 1; 6480 } 6481 6482 if (capture_cpus) 6483 *capture_cpus = 0; 6484 6485 top: 6486 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6487 /* 6488 * skip sf_hments corresponding to VA<->PA mappings; 6489 * for pa_hment's, hme_tte.ll is zero 6490 */ 6491 if (!IS_PAHME(sfhmep)) 6492 continue; 6493 6494 pahmep = sfhmep->hme_data; 6495 ASSERT(pahmep != NULL); 6496 6497 /* 6498 * skip if pre-handler has been called earlier in this loop 6499 */ 6500 if (pahmep->flags & flag) 6501 continue; 6502 6503 id = pahmep->cb_id; 6504 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 6505 if (capture_cpus && sfmmu_cb_table[id].capture_cpus != 0) 6506 *capture_cpus = 1; 6507 if ((f = sfmmu_cb_table[id].prehandler) == NULL) { 6508 pahmep->flags |= flag; 6509 continue; 6510 } 6511 6512 /* 6513 * Drop the mapping list lock to avoid locking order issues. 6514 */ 6515 if (locked) 6516 sfmmu_mlist_exit(pml); 6517 6518 ret = f(pahmep->addr, pahmep->len, flag, pahmep->pvt); 6519 if (ret != 0) 6520 return (ret); /* caller must do the cleanup */ 6521 6522 if (locked) { 6523 pml = sfmmu_mlist_enter(pp); 6524 pahmep->flags |= flag; 6525 goto top; 6526 } 6527 6528 pahmep->flags |= flag; 6529 } 6530 6531 if (locked) 6532 sfmmu_mlist_exit(pml); 6533 6534 return (0); 6535 } 6536 6537 /* 6538 * Execute post-callback handler of each pa_hment linked to pp 6539 * 6540 * Same overall assumptions and restrictions apply as for 6541 * hat_pageprocess_precallbacks(). 6542 */ 6543 static void 6544 hat_pageprocess_postcallbacks(struct page *pp, uint_t flag) 6545 { 6546 pfn_t pgpfn = pp->p_pagenum; 6547 pfn_t pgmask = btop(page_get_pagesize(pp->p_szc)) - 1; 6548 pfn_t newpfn; 6549 struct sf_hment *sfhmep; 6550 struct pa_hment *pahmep; 6551 int (*f)(caddr_t, uint_t, uint_t, void *, pfn_t); 6552 id_t id; 6553 int locked = 0; 6554 kmutex_t *pml; 6555 6556 ASSERT(PAGE_EXCL(pp)); 6557 if (!sfmmu_mlist_held(pp)) { 6558 pml = sfmmu_mlist_enter(pp); 6559 locked = 1; 6560 } 6561 6562 top: 6563 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6564 /* 6565 * skip sf_hments corresponding to VA<->PA mappings; 6566 * for pa_hment's, hme_tte.ll is zero 6567 */ 6568 if (!IS_PAHME(sfhmep)) 6569 continue; 6570 6571 pahmep = sfhmep->hme_data; 6572 ASSERT(pahmep != NULL); 6573 6574 if ((pahmep->flags & flag) == 0) 6575 continue; 6576 6577 pahmep->flags &= ~flag; 6578 6579 id = pahmep->cb_id; 6580 ASSERT(id >= (id_t)0 && id < sfmmu_cb_nextid); 6581 if ((f = sfmmu_cb_table[id].posthandler) == NULL) 6582 continue; 6583 6584 /* 6585 * Convert the base page PFN into the constituent PFN 6586 * which is needed by the callback handler. 6587 */ 6588 newpfn = pgpfn | (btop((uintptr_t)pahmep->addr) & pgmask); 6589 6590 /* 6591 * Drop the mapping list lock to avoid locking order issues. 6592 */ 6593 if (locked) 6594 sfmmu_mlist_exit(pml); 6595 6596 if (f(pahmep->addr, pahmep->len, flag, pahmep->pvt, newpfn) 6597 != 0) 6598 panic("sfmmu: posthandler failed"); 6599 6600 if (locked) { 6601 pml = sfmmu_mlist_enter(pp); 6602 goto top; 6603 } 6604 } 6605 6606 if (locked) 6607 sfmmu_mlist_exit(pml); 6608 } 6609 6610 /* 6611 * Suspend locked kernel mapping 6612 */ 6613 void 6614 hat_pagesuspend(struct page *pp) 6615 { 6616 struct sf_hment *sfhmep; 6617 sfmmu_t *sfmmup; 6618 tte_t tte, ttemod; 6619 struct hme_blk *hmeblkp; 6620 caddr_t addr; 6621 int index, cons; 6622 cpuset_t cpuset; 6623 6624 ASSERT(PAGE_EXCL(pp)); 6625 ASSERT(sfmmu_mlist_held(pp)); 6626 6627 mutex_enter(&kpr_suspendlock); 6628 6629 /* 6630 * We're about to suspend a kernel mapping so mark this thread as 6631 * non-traceable by DTrace. This prevents us from running into issues 6632 * with probe context trying to touch a suspended page 6633 * in the relocation codepath itself. 6634 */ 6635 curthread->t_flag |= T_DONTDTRACE; 6636 6637 index = PP_MAPINDEX(pp); 6638 cons = TTE8K; 6639 6640 retry: 6641 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 6642 6643 if (IS_PAHME(sfhmep)) 6644 continue; 6645 6646 if (get_hblk_ttesz(sfmmu_hmetohblk(sfhmep)) != cons) 6647 continue; 6648 6649 /* 6650 * Loop until we successfully set the suspend bit in 6651 * the TTE. 6652 */ 6653 again: 6654 sfmmu_copytte(&sfhmep->hme_tte, &tte); 6655 ASSERT(TTE_IS_VALID(&tte)); 6656 6657 ttemod = tte; 6658 TTE_SET_SUSPEND(&ttemod); 6659 if (sfmmu_modifytte_try(&tte, &ttemod, 6660 &sfhmep->hme_tte) < 0) 6661 goto again; 6662 6663 /* 6664 * Invalidate TSB entry 6665 */ 6666 hmeblkp = sfmmu_hmetohblk(sfhmep); 6667 6668 sfmmup = hblktosfmmu(hmeblkp); 6669 ASSERT(sfmmup == ksfmmup); 6670 ASSERT(!hmeblkp->hblk_shared); 6671 6672 addr = tte_to_vaddr(hmeblkp, tte); 6673 6674 /* 6675 * No need to make sure that the TSB for this sfmmu is 6676 * not being relocated since it is ksfmmup and thus it 6677 * will never be relocated. 6678 */ 6679 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 6680 6681 /* 6682 * Update xcall stats 6683 */ 6684 cpuset = cpu_ready_set; 6685 CPUSET_DEL(cpuset, CPU->cpu_id); 6686 6687 /* LINTED: constant in conditional context */ 6688 SFMMU_XCALL_STATS(ksfmmup); 6689 6690 /* 6691 * Flush TLB entry on remote CPU's 6692 */ 6693 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 6694 (uint64_t)ksfmmup); 6695 xt_sync(cpuset); 6696 6697 /* 6698 * Flush TLB entry on local CPU 6699 */ 6700 vtag_flushpage(addr, (uint64_t)ksfmmup); 6701 } 6702 6703 while (index != 0) { 6704 index = index >> 1; 6705 if (index != 0) 6706 cons++; 6707 if (index & 0x1) { 6708 pp = PP_GROUPLEADER(pp, cons); 6709 goto retry; 6710 } 6711 } 6712 } 6713 6714 #ifdef DEBUG 6715 6716 #define N_PRLE 1024 6717 struct prle { 6718 page_t *targ; 6719 page_t *repl; 6720 int status; 6721 int pausecpus; 6722 hrtime_t whence; 6723 }; 6724 6725 static struct prle page_relocate_log[N_PRLE]; 6726 static int prl_entry; 6727 static kmutex_t prl_mutex; 6728 6729 #define PAGE_RELOCATE_LOG(t, r, s, p) \ 6730 mutex_enter(&prl_mutex); \ 6731 page_relocate_log[prl_entry].targ = *(t); \ 6732 page_relocate_log[prl_entry].repl = *(r); \ 6733 page_relocate_log[prl_entry].status = (s); \ 6734 page_relocate_log[prl_entry].pausecpus = (p); \ 6735 page_relocate_log[prl_entry].whence = gethrtime(); \ 6736 prl_entry = (prl_entry == (N_PRLE - 1))? 0 : prl_entry + 1; \ 6737 mutex_exit(&prl_mutex); 6738 6739 #else /* !DEBUG */ 6740 #define PAGE_RELOCATE_LOG(t, r, s, p) 6741 #endif 6742 6743 /* 6744 * Core Kernel Page Relocation Algorithm 6745 * 6746 * Input: 6747 * 6748 * target : constituent pages are SE_EXCL locked. 6749 * replacement: constituent pages are SE_EXCL locked. 6750 * 6751 * Output: 6752 * 6753 * nrelocp: number of pages relocated 6754 */ 6755 int 6756 hat_page_relocate(page_t **target, page_t **replacement, spgcnt_t *nrelocp) 6757 { 6758 page_t *targ, *repl; 6759 page_t *tpp, *rpp; 6760 kmutex_t *low, *high; 6761 spgcnt_t npages, i; 6762 page_t *pl = NULL; 6763 int old_pil; 6764 cpuset_t cpuset; 6765 int cap_cpus; 6766 int ret; 6767 #ifdef VAC 6768 int cflags = 0; 6769 #endif 6770 6771 if (!kcage_on || PP_ISNORELOC(*target)) { 6772 PAGE_RELOCATE_LOG(target, replacement, EAGAIN, -1); 6773 return (EAGAIN); 6774 } 6775 6776 mutex_enter(&kpr_mutex); 6777 kreloc_thread = curthread; 6778 6779 targ = *target; 6780 repl = *replacement; 6781 ASSERT(repl != NULL); 6782 ASSERT(targ->p_szc == repl->p_szc); 6783 6784 npages = page_get_pagecnt(targ->p_szc); 6785 6786 /* 6787 * unload VA<->PA mappings that are not locked 6788 */ 6789 tpp = targ; 6790 for (i = 0; i < npages; i++) { 6791 (void) hat_pageunload(tpp, SFMMU_KERNEL_RELOC); 6792 tpp++; 6793 } 6794 6795 /* 6796 * Do "presuspend" callbacks, in a context from which we can still 6797 * block as needed. Note that we don't hold the mapping list lock 6798 * of "targ" at this point due to potential locking order issues; 6799 * we assume that between the hat_pageunload() above and holding 6800 * the SE_EXCL lock that the mapping list *cannot* change at this 6801 * point. 6802 */ 6803 ret = hat_pageprocess_precallbacks(targ, HAT_PRESUSPEND, &cap_cpus); 6804 if (ret != 0) { 6805 /* 6806 * EIO translates to fatal error, for all others cleanup 6807 * and return EAGAIN. 6808 */ 6809 ASSERT(ret != EIO); 6810 hat_pageprocess_postcallbacks(targ, HAT_POSTUNSUSPEND); 6811 PAGE_RELOCATE_LOG(target, replacement, ret, -1); 6812 kreloc_thread = NULL; 6813 mutex_exit(&kpr_mutex); 6814 return (EAGAIN); 6815 } 6816 6817 /* 6818 * acquire p_mapping list lock for both the target and replacement 6819 * root pages. 6820 * 6821 * low and high refer to the need to grab the mlist locks in a 6822 * specific order in order to prevent race conditions. Thus the 6823 * lower lock must be grabbed before the higher lock. 6824 * 6825 * This will block hat_unload's accessing p_mapping list. Since 6826 * we have SE_EXCL lock, hat_memload and hat_pageunload will be 6827 * blocked. Thus, no one else will be accessing the p_mapping list 6828 * while we suspend and reload the locked mapping below. 6829 */ 6830 tpp = targ; 6831 rpp = repl; 6832 sfmmu_mlist_reloc_enter(tpp, rpp, &low, &high); 6833 6834 kpreempt_disable(); 6835 6836 /* 6837 * We raise our PIL to 13 so that we don't get captured by 6838 * another CPU or pinned by an interrupt thread. We can't go to 6839 * PIL 14 since the nexus driver(s) may need to interrupt at 6840 * that level in the case of IOMMU pseudo mappings. 6841 */ 6842 cpuset = cpu_ready_set; 6843 CPUSET_DEL(cpuset, CPU->cpu_id); 6844 if (!cap_cpus || CPUSET_ISNULL(cpuset)) { 6845 old_pil = splr(XCALL_PIL); 6846 } else { 6847 old_pil = -1; 6848 xc_attention(cpuset); 6849 } 6850 ASSERT(getpil() == XCALL_PIL); 6851 6852 /* 6853 * Now do suspend callbacks. In the case of an IOMMU mapping 6854 * this will suspend all DMA activity to the page while it is 6855 * being relocated. Since we are well above LOCK_LEVEL and CPUs 6856 * may be captured at this point we should have acquired any needed 6857 * locks in the presuspend callback. 6858 */ 6859 ret = hat_pageprocess_precallbacks(targ, HAT_SUSPEND, NULL); 6860 if (ret != 0) { 6861 repl = targ; 6862 goto suspend_fail; 6863 } 6864 6865 /* 6866 * Raise the PIL yet again, this time to block all high-level 6867 * interrupts on this CPU. This is necessary to prevent an 6868 * interrupt routine from pinning the thread which holds the 6869 * mapping suspended and then touching the suspended page. 6870 * 6871 * Once the page is suspended we also need to be careful to 6872 * avoid calling any functions which touch any seg_kmem memory 6873 * since that memory may be backed by the very page we are 6874 * relocating in here! 6875 */ 6876 hat_pagesuspend(targ); 6877 6878 /* 6879 * Now that we are confident everybody has stopped using this page, 6880 * copy the page contents. Note we use a physical copy to prevent 6881 * locking issues and to avoid fpRAS because we can't handle it in 6882 * this context. 6883 */ 6884 for (i = 0; i < npages; i++, tpp++, rpp++) { 6885 #ifdef VAC 6886 /* 6887 * If the replacement has a different vcolor than 6888 * the one being replacd, we need to handle VAC 6889 * consistency for it just as we were setting up 6890 * a new mapping to it. 6891 */ 6892 if ((PP_GET_VCOLOR(rpp) != NO_VCOLOR) && 6893 (tpp->p_vcolor != rpp->p_vcolor) && 6894 !CacheColor_IsFlushed(cflags, PP_GET_VCOLOR(rpp))) { 6895 CacheColor_SetFlushed(cflags, PP_GET_VCOLOR(rpp)); 6896 sfmmu_cache_flushcolor(PP_GET_VCOLOR(rpp), 6897 rpp->p_pagenum); 6898 } 6899 #endif 6900 /* 6901 * Copy the contents of the page. 6902 */ 6903 ppcopy_kernel(tpp, rpp); 6904 } 6905 6906 tpp = targ; 6907 rpp = repl; 6908 for (i = 0; i < npages; i++, tpp++, rpp++) { 6909 /* 6910 * Copy attributes. VAC consistency was handled above, 6911 * if required. 6912 */ 6913 rpp->p_nrm = tpp->p_nrm; 6914 tpp->p_nrm = 0; 6915 rpp->p_index = tpp->p_index; 6916 tpp->p_index = 0; 6917 #ifdef VAC 6918 rpp->p_vcolor = tpp->p_vcolor; 6919 #endif 6920 } 6921 6922 /* 6923 * First, unsuspend the page, if we set the suspend bit, and transfer 6924 * the mapping list from the target page to the replacement page. 6925 * Next process postcallbacks; since pa_hment's are linked only to the 6926 * p_mapping list of root page, we don't iterate over the constituent 6927 * pages. 6928 */ 6929 hat_pagereload(targ, repl); 6930 6931 suspend_fail: 6932 hat_pageprocess_postcallbacks(repl, HAT_UNSUSPEND); 6933 6934 /* 6935 * Now lower our PIL and release any captured CPUs since we 6936 * are out of the "danger zone". After this it will again be 6937 * safe to acquire adaptive mutex locks, or to drop them... 6938 */ 6939 if (old_pil != -1) { 6940 splx(old_pil); 6941 } else { 6942 xc_dismissed(cpuset); 6943 } 6944 6945 kpreempt_enable(); 6946 6947 sfmmu_mlist_reloc_exit(low, high); 6948 6949 /* 6950 * Postsuspend callbacks should drop any locks held across 6951 * the suspend callbacks. As before, we don't hold the mapping 6952 * list lock at this point.. our assumption is that the mapping 6953 * list still can't change due to our holding SE_EXCL lock and 6954 * there being no unlocked mappings left. Hence the restriction 6955 * on calling context to hat_delete_callback() 6956 */ 6957 hat_pageprocess_postcallbacks(repl, HAT_POSTUNSUSPEND); 6958 if (ret != 0) { 6959 /* 6960 * The second presuspend call failed: we got here through 6961 * the suspend_fail label above. 6962 */ 6963 ASSERT(ret != EIO); 6964 PAGE_RELOCATE_LOG(target, replacement, ret, cap_cpus); 6965 kreloc_thread = NULL; 6966 mutex_exit(&kpr_mutex); 6967 return (EAGAIN); 6968 } 6969 6970 /* 6971 * Now that we're out of the performance critical section we can 6972 * take care of updating the hash table, since we still 6973 * hold all the pages locked SE_EXCL at this point we 6974 * needn't worry about things changing out from under us. 6975 */ 6976 tpp = targ; 6977 rpp = repl; 6978 for (i = 0; i < npages; i++, tpp++, rpp++) { 6979 6980 /* 6981 * replace targ with replacement in page_hash table 6982 */ 6983 targ = tpp; 6984 page_relocate_hash(rpp, targ); 6985 6986 /* 6987 * concatenate target; caller of platform_page_relocate() 6988 * expects target to be concatenated after returning. 6989 */ 6990 ASSERT(targ->p_next == targ); 6991 ASSERT(targ->p_prev == targ); 6992 page_list_concat(&pl, &targ); 6993 } 6994 6995 ASSERT(*target == pl); 6996 *nrelocp = npages; 6997 PAGE_RELOCATE_LOG(target, replacement, 0, cap_cpus); 6998 kreloc_thread = NULL; 6999 mutex_exit(&kpr_mutex); 7000 return (0); 7001 } 7002 7003 /* 7004 * Called when stray pa_hments are found attached to a page which is 7005 * being freed. Notify the subsystem which attached the pa_hment of 7006 * the error if it registered a suitable handler, else panic. 7007 */ 7008 static void 7009 sfmmu_pahment_leaked(struct pa_hment *pahmep) 7010 { 7011 id_t cb_id = pahmep->cb_id; 7012 7013 ASSERT(cb_id >= (id_t)0 && cb_id < sfmmu_cb_nextid); 7014 if (sfmmu_cb_table[cb_id].errhandler != NULL) { 7015 if (sfmmu_cb_table[cb_id].errhandler(pahmep->addr, pahmep->len, 7016 HAT_CB_ERR_LEAKED, pahmep->pvt) == 0) 7017 return; /* non-fatal */ 7018 } 7019 panic("pa_hment leaked: 0x%p", (void *)pahmep); 7020 } 7021 7022 /* 7023 * Remove all mappings to page 'pp'. 7024 */ 7025 int 7026 hat_pageunload(struct page *pp, uint_t forceflag) 7027 { 7028 struct page *origpp = pp; 7029 struct sf_hment *sfhme, *tmphme; 7030 struct hme_blk *hmeblkp; 7031 kmutex_t *pml; 7032 #ifdef VAC 7033 kmutex_t *pmtx; 7034 #endif 7035 cpuset_t cpuset, tset; 7036 int index, cons; 7037 int pa_hments; 7038 7039 ASSERT(PAGE_EXCL(pp)); 7040 7041 tmphme = NULL; 7042 pa_hments = 0; 7043 CPUSET_ZERO(cpuset); 7044 7045 pml = sfmmu_mlist_enter(pp); 7046 7047 #ifdef VAC 7048 if (pp->p_kpmref) 7049 sfmmu_kpm_pageunload(pp); 7050 ASSERT(!PP_ISMAPPED_KPM(pp)); 7051 #endif 7052 /* 7053 * Clear vpm reference. Since the page is exclusively locked 7054 * vpm cannot be referencing it. 7055 */ 7056 if (vpm_enable) { 7057 pp->p_vpmref = 0; 7058 } 7059 7060 index = PP_MAPINDEX(pp); 7061 cons = TTE8K; 7062 retry: 7063 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7064 tmphme = sfhme->hme_next; 7065 7066 if (IS_PAHME(sfhme)) { 7067 ASSERT(sfhme->hme_data != NULL); 7068 pa_hments++; 7069 continue; 7070 } 7071 7072 hmeblkp = sfmmu_hmetohblk(sfhme); 7073 7074 /* 7075 * If there are kernel mappings don't unload them, they will 7076 * be suspended. 7077 */ 7078 if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt && 7079 hmeblkp->hblk_tag.htag_id == ksfmmup) 7080 continue; 7081 7082 tset = sfmmu_pageunload(pp, sfhme, cons); 7083 CPUSET_OR(cpuset, tset); 7084 } 7085 7086 while (index != 0) { 7087 index = index >> 1; 7088 if (index != 0) 7089 cons++; 7090 if (index & 0x1) { 7091 /* Go to leading page */ 7092 pp = PP_GROUPLEADER(pp, cons); 7093 ASSERT(sfmmu_mlist_held(pp)); 7094 goto retry; 7095 } 7096 } 7097 7098 /* 7099 * cpuset may be empty if the page was only mapped by segkpm, 7100 * in which case we won't actually cross-trap. 7101 */ 7102 xt_sync(cpuset); 7103 7104 /* 7105 * The page should have no mappings at this point, unless 7106 * we were called from hat_page_relocate() in which case we 7107 * leave the locked mappings which will be suspended later. 7108 */ 7109 ASSERT(!PP_ISMAPPED(origpp) || pa_hments || 7110 (forceflag == SFMMU_KERNEL_RELOC)); 7111 7112 #ifdef VAC 7113 if (PP_ISTNC(pp)) { 7114 if (cons == TTE8K) { 7115 pmtx = sfmmu_page_enter(pp); 7116 PP_CLRTNC(pp); 7117 sfmmu_page_exit(pmtx); 7118 } else { 7119 conv_tnc(pp, cons); 7120 } 7121 } 7122 #endif /* VAC */ 7123 7124 if (pa_hments && forceflag != SFMMU_KERNEL_RELOC) { 7125 /* 7126 * Unlink any pa_hments and free them, calling back 7127 * the responsible subsystem to notify it of the error. 7128 * This can occur in situations such as drivers leaking 7129 * DMA handles: naughty, but common enough that we'd like 7130 * to keep the system running rather than bringing it 7131 * down with an obscure error like "pa_hment leaked" 7132 * which doesn't aid the user in debugging their driver. 7133 */ 7134 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7135 tmphme = sfhme->hme_next; 7136 if (IS_PAHME(sfhme)) { 7137 struct pa_hment *pahmep = sfhme->hme_data; 7138 sfmmu_pahment_leaked(pahmep); 7139 HME_SUB(sfhme, pp); 7140 kmem_cache_free(pa_hment_cache, pahmep); 7141 } 7142 } 7143 7144 ASSERT(!PP_ISMAPPED(origpp)); 7145 } 7146 7147 sfmmu_mlist_exit(pml); 7148 7149 return (0); 7150 } 7151 7152 cpuset_t 7153 sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons) 7154 { 7155 struct hme_blk *hmeblkp; 7156 sfmmu_t *sfmmup; 7157 tte_t tte, ttemod; 7158 #ifdef DEBUG 7159 tte_t orig_old; 7160 #endif /* DEBUG */ 7161 caddr_t addr; 7162 int ttesz; 7163 int ret; 7164 cpuset_t cpuset; 7165 7166 ASSERT(pp != NULL); 7167 ASSERT(sfmmu_mlist_held(pp)); 7168 ASSERT(!PP_ISKAS(pp)); 7169 7170 CPUSET_ZERO(cpuset); 7171 7172 hmeblkp = sfmmu_hmetohblk(sfhme); 7173 7174 readtte: 7175 sfmmu_copytte(&sfhme->hme_tte, &tte); 7176 if (TTE_IS_VALID(&tte)) { 7177 sfmmup = hblktosfmmu(hmeblkp); 7178 ttesz = get_hblk_ttesz(hmeblkp); 7179 /* 7180 * Only unload mappings of 'cons' size. 7181 */ 7182 if (ttesz != cons) 7183 return (cpuset); 7184 7185 /* 7186 * Note that we have p_mapping lock, but no hash lock here. 7187 * hblk_unload() has to have both hash lock AND p_mapping 7188 * lock before it tries to modify tte. So, the tte could 7189 * not become invalid in the sfmmu_modifytte_try() below. 7190 */ 7191 ttemod = tte; 7192 #ifdef DEBUG 7193 orig_old = tte; 7194 #endif /* DEBUG */ 7195 7196 TTE_SET_INVALID(&ttemod); 7197 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 7198 if (ret < 0) { 7199 #ifdef DEBUG 7200 /* only R/M bits can change. */ 7201 chk_tte(&orig_old, &tte, &ttemod, hmeblkp); 7202 #endif /* DEBUG */ 7203 goto readtte; 7204 } 7205 7206 if (ret == 0) { 7207 panic("pageunload: cas failed?"); 7208 } 7209 7210 addr = tte_to_vaddr(hmeblkp, tte); 7211 7212 if (hmeblkp->hblk_shared) { 7213 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7214 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7215 sf_region_t *rgnp; 7216 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7217 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7218 ASSERT(srdp != NULL); 7219 rgnp = srdp->srd_hmergnp[rid]; 7220 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 7221 cpuset = sfmmu_rgntlb_demap(addr, rgnp, hmeblkp, 1); 7222 sfmmu_ttesync(NULL, addr, &tte, pp); 7223 ASSERT(rgnp->rgn_ttecnt[ttesz] > 0); 7224 atomic_dec_ulong(&rgnp->rgn_ttecnt[ttesz]); 7225 } else { 7226 sfmmu_ttesync(sfmmup, addr, &tte, pp); 7227 atomic_dec_ulong(&sfmmup->sfmmu_ttecnt[ttesz]); 7228 7229 /* 7230 * We need to flush the page from the virtual cache 7231 * in order to prevent a virtual cache alias 7232 * inconsistency. The particular scenario we need 7233 * to worry about is: 7234 * Given: va1 and va2 are two virtual address that 7235 * alias and will map the same physical address. 7236 * 1. mapping exists from va1 to pa and data has 7237 * been read into the cache. 7238 * 2. unload va1. 7239 * 3. load va2 and modify data using va2. 7240 * 4 unload va2. 7241 * 5. load va1 and reference data. Unless we flush 7242 * the data cache when we unload we will get 7243 * stale data. 7244 * This scenario is taken care of by using virtual 7245 * page coloring. 7246 */ 7247 if (sfmmup->sfmmu_ismhat) { 7248 /* 7249 * Flush TSBs, TLBs and caches 7250 * of every process 7251 * sharing this ism segment. 7252 */ 7253 sfmmu_hat_lock_all(); 7254 mutex_enter(&ism_mlist_lock); 7255 kpreempt_disable(); 7256 sfmmu_ismtlbcache_demap(addr, sfmmup, hmeblkp, 7257 pp->p_pagenum, CACHE_NO_FLUSH); 7258 kpreempt_enable(); 7259 mutex_exit(&ism_mlist_lock); 7260 sfmmu_hat_unlock_all(); 7261 cpuset = cpu_ready_set; 7262 } else { 7263 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 7264 cpuset = sfmmup->sfmmu_cpusran; 7265 } 7266 } 7267 7268 /* 7269 * Hme_sub has to run after ttesync() and a_rss update. 7270 * See hblk_unload(). 7271 */ 7272 HME_SUB(sfhme, pp); 7273 membar_stst(); 7274 7275 /* 7276 * We can not make ASSERT(hmeblkp->hblk_hmecnt <= NHMENTS) 7277 * since pteload may have done a HME_ADD() right after 7278 * we did the HME_SUB() above. Hmecnt is now maintained 7279 * by cas only. no lock guranteed its value. The only 7280 * gurantee we have is the hmecnt should not be less than 7281 * what it should be so the hblk will not be taken away. 7282 * It's also important that we decremented the hmecnt after 7283 * we are done with hmeblkp so that this hmeblk won't be 7284 * stolen. 7285 */ 7286 ASSERT(hmeblkp->hblk_hmecnt > 0); 7287 ASSERT(hmeblkp->hblk_vcnt > 0); 7288 atomic_dec_16(&hmeblkp->hblk_vcnt); 7289 atomic_dec_16(&hmeblkp->hblk_hmecnt); 7290 /* 7291 * This is bug 4063182. 7292 * XXX: fixme 7293 * ASSERT(hmeblkp->hblk_hmecnt || hmeblkp->hblk_vcnt || 7294 * !hmeblkp->hblk_lckcnt); 7295 */ 7296 } else { 7297 panic("invalid tte? pp %p &tte %p", 7298 (void *)pp, (void *)&tte); 7299 } 7300 7301 return (cpuset); 7302 } 7303 7304 /* 7305 * While relocating a kernel page, this function will move the mappings 7306 * from tpp to dpp and modify any associated data with these mappings. 7307 * It also unsuspends the suspended kernel mapping. 7308 */ 7309 static void 7310 hat_pagereload(struct page *tpp, struct page *dpp) 7311 { 7312 struct sf_hment *sfhme; 7313 tte_t tte, ttemod; 7314 int index, cons; 7315 7316 ASSERT(getpil() == PIL_MAX); 7317 ASSERT(sfmmu_mlist_held(tpp)); 7318 ASSERT(sfmmu_mlist_held(dpp)); 7319 7320 index = PP_MAPINDEX(tpp); 7321 cons = TTE8K; 7322 7323 /* Update real mappings to the page */ 7324 retry: 7325 for (sfhme = tpp->p_mapping; sfhme != NULL; sfhme = sfhme->hme_next) { 7326 if (IS_PAHME(sfhme)) 7327 continue; 7328 sfmmu_copytte(&sfhme->hme_tte, &tte); 7329 ttemod = tte; 7330 7331 /* 7332 * replace old pfn with new pfn in TTE 7333 */ 7334 PFN_TO_TTE(ttemod, dpp->p_pagenum); 7335 7336 /* 7337 * clear suspend bit 7338 */ 7339 ASSERT(TTE_IS_SUSPEND(&ttemod)); 7340 TTE_CLR_SUSPEND(&ttemod); 7341 7342 if (sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte) < 0) 7343 panic("hat_pagereload(): sfmmu_modifytte_try() failed"); 7344 7345 /* 7346 * set hme_page point to new page 7347 */ 7348 sfhme->hme_page = dpp; 7349 } 7350 7351 /* 7352 * move p_mapping list from old page to new page 7353 */ 7354 dpp->p_mapping = tpp->p_mapping; 7355 tpp->p_mapping = NULL; 7356 dpp->p_share = tpp->p_share; 7357 tpp->p_share = 0; 7358 7359 while (index != 0) { 7360 index = index >> 1; 7361 if (index != 0) 7362 cons++; 7363 if (index & 0x1) { 7364 tpp = PP_GROUPLEADER(tpp, cons); 7365 dpp = PP_GROUPLEADER(dpp, cons); 7366 goto retry; 7367 } 7368 } 7369 7370 curthread->t_flag &= ~T_DONTDTRACE; 7371 mutex_exit(&kpr_suspendlock); 7372 } 7373 7374 uint_t 7375 hat_pagesync(struct page *pp, uint_t clearflag) 7376 { 7377 struct sf_hment *sfhme, *tmphme = NULL; 7378 struct hme_blk *hmeblkp; 7379 kmutex_t *pml; 7380 cpuset_t cpuset, tset; 7381 int index, cons; 7382 extern ulong_t po_share; 7383 page_t *save_pp = pp; 7384 int stop_on_sh = 0; 7385 uint_t shcnt; 7386 7387 CPUSET_ZERO(cpuset); 7388 7389 if (PP_ISRO(pp) && (clearflag & HAT_SYNC_STOPON_MOD)) { 7390 return (PP_GENERIC_ATTR(pp)); 7391 } 7392 7393 if ((clearflag & HAT_SYNC_ZERORM) == 0) { 7394 if ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(pp)) { 7395 return (PP_GENERIC_ATTR(pp)); 7396 } 7397 if ((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(pp)) { 7398 return (PP_GENERIC_ATTR(pp)); 7399 } 7400 if (clearflag & HAT_SYNC_STOPON_SHARED) { 7401 if (pp->p_share > po_share) { 7402 hat_page_setattr(pp, P_REF); 7403 return (PP_GENERIC_ATTR(pp)); 7404 } 7405 stop_on_sh = 1; 7406 shcnt = 0; 7407 } 7408 } 7409 7410 clearflag &= ~HAT_SYNC_STOPON_SHARED; 7411 pml = sfmmu_mlist_enter(pp); 7412 index = PP_MAPINDEX(pp); 7413 cons = TTE8K; 7414 retry: 7415 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7416 /* 7417 * We need to save the next hment on the list since 7418 * it is possible for pagesync to remove an invalid hment 7419 * from the list. 7420 */ 7421 tmphme = sfhme->hme_next; 7422 if (IS_PAHME(sfhme)) 7423 continue; 7424 /* 7425 * If we are looking for large mappings and this hme doesn't 7426 * reach the range we are seeking, just ignore it. 7427 */ 7428 hmeblkp = sfmmu_hmetohblk(sfhme); 7429 7430 if (hme_size(sfhme) < cons) 7431 continue; 7432 7433 if (stop_on_sh) { 7434 if (hmeblkp->hblk_shared) { 7435 sf_srd_t *srdp = hblktosrd(hmeblkp); 7436 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7437 sf_region_t *rgnp; 7438 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7439 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7440 ASSERT(srdp != NULL); 7441 rgnp = srdp->srd_hmergnp[rid]; 7442 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, 7443 rgnp, rid); 7444 shcnt += rgnp->rgn_refcnt; 7445 } else { 7446 shcnt++; 7447 } 7448 if (shcnt > po_share) { 7449 /* 7450 * tell the pager to spare the page this time 7451 * around. 7452 */ 7453 hat_page_setattr(save_pp, P_REF); 7454 index = 0; 7455 break; 7456 } 7457 } 7458 tset = sfmmu_pagesync(pp, sfhme, 7459 clearflag & ~HAT_SYNC_STOPON_RM); 7460 CPUSET_OR(cpuset, tset); 7461 7462 /* 7463 * If clearflag is HAT_SYNC_DONTZERO, break out as soon 7464 * as the "ref" or "mod" is set or share cnt exceeds po_share. 7465 */ 7466 if ((clearflag & ~HAT_SYNC_STOPON_RM) == HAT_SYNC_DONTZERO && 7467 (((clearflag & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) || 7468 ((clearflag & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)))) { 7469 index = 0; 7470 break; 7471 } 7472 } 7473 7474 while (index) { 7475 index = index >> 1; 7476 cons++; 7477 if (index & 0x1) { 7478 /* Go to leading page */ 7479 pp = PP_GROUPLEADER(pp, cons); 7480 goto retry; 7481 } 7482 } 7483 7484 xt_sync(cpuset); 7485 sfmmu_mlist_exit(pml); 7486 return (PP_GENERIC_ATTR(save_pp)); 7487 } 7488 7489 /* 7490 * Get all the hardware dependent attributes for a page struct 7491 */ 7492 static cpuset_t 7493 sfmmu_pagesync(struct page *pp, struct sf_hment *sfhme, 7494 uint_t clearflag) 7495 { 7496 caddr_t addr; 7497 tte_t tte, ttemod; 7498 struct hme_blk *hmeblkp; 7499 int ret; 7500 sfmmu_t *sfmmup; 7501 cpuset_t cpuset; 7502 7503 ASSERT(pp != NULL); 7504 ASSERT(sfmmu_mlist_held(pp)); 7505 ASSERT((clearflag == HAT_SYNC_DONTZERO) || 7506 (clearflag == HAT_SYNC_ZERORM)); 7507 7508 SFMMU_STAT(sf_pagesync); 7509 7510 CPUSET_ZERO(cpuset); 7511 7512 sfmmu_pagesync_retry: 7513 7514 sfmmu_copytte(&sfhme->hme_tte, &tte); 7515 if (TTE_IS_VALID(&tte)) { 7516 hmeblkp = sfmmu_hmetohblk(sfhme); 7517 sfmmup = hblktosfmmu(hmeblkp); 7518 addr = tte_to_vaddr(hmeblkp, tte); 7519 if (clearflag == HAT_SYNC_ZERORM) { 7520 ttemod = tte; 7521 TTE_CLR_RM(&ttemod); 7522 ret = sfmmu_modifytte_try(&tte, &ttemod, 7523 &sfhme->hme_tte); 7524 if (ret < 0) { 7525 /* 7526 * cas failed and the new value is not what 7527 * we want. 7528 */ 7529 goto sfmmu_pagesync_retry; 7530 } 7531 7532 if (ret > 0) { 7533 /* we win the cas */ 7534 if (hmeblkp->hblk_shared) { 7535 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7536 uint_t rid = 7537 hmeblkp->hblk_tag.htag_rid; 7538 sf_region_t *rgnp; 7539 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7540 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7541 ASSERT(srdp != NULL); 7542 rgnp = srdp->srd_hmergnp[rid]; 7543 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 7544 srdp, rgnp, rid); 7545 cpuset = sfmmu_rgntlb_demap(addr, 7546 rgnp, hmeblkp, 1); 7547 } else { 7548 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 7549 0, 0); 7550 cpuset = sfmmup->sfmmu_cpusran; 7551 } 7552 } 7553 } 7554 sfmmu_ttesync(hmeblkp->hblk_shared ? NULL : sfmmup, addr, 7555 &tte, pp); 7556 } 7557 return (cpuset); 7558 } 7559 7560 /* 7561 * Remove write permission from a mappings to a page, so that 7562 * we can detect the next modification of it. This requires modifying 7563 * the TTE then invalidating (demap) any TLB entry using that TTE. 7564 * This code is similar to sfmmu_pagesync(). 7565 */ 7566 static cpuset_t 7567 sfmmu_pageclrwrt(struct page *pp, struct sf_hment *sfhme) 7568 { 7569 caddr_t addr; 7570 tte_t tte; 7571 tte_t ttemod; 7572 struct hme_blk *hmeblkp; 7573 int ret; 7574 sfmmu_t *sfmmup; 7575 cpuset_t cpuset; 7576 7577 ASSERT(pp != NULL); 7578 ASSERT(sfmmu_mlist_held(pp)); 7579 7580 CPUSET_ZERO(cpuset); 7581 SFMMU_STAT(sf_clrwrt); 7582 7583 retry: 7584 7585 sfmmu_copytte(&sfhme->hme_tte, &tte); 7586 if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) { 7587 hmeblkp = sfmmu_hmetohblk(sfhme); 7588 sfmmup = hblktosfmmu(hmeblkp); 7589 addr = tte_to_vaddr(hmeblkp, tte); 7590 7591 ttemod = tte; 7592 TTE_CLR_WRT(&ttemod); 7593 TTE_CLR_MOD(&ttemod); 7594 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 7595 7596 /* 7597 * if cas failed and the new value is not what 7598 * we want retry 7599 */ 7600 if (ret < 0) 7601 goto retry; 7602 7603 /* we win the cas */ 7604 if (ret > 0) { 7605 if (hmeblkp->hblk_shared) { 7606 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 7607 uint_t rid = hmeblkp->hblk_tag.htag_rid; 7608 sf_region_t *rgnp; 7609 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7610 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7611 ASSERT(srdp != NULL); 7612 rgnp = srdp->srd_hmergnp[rid]; 7613 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 7614 srdp, rgnp, rid); 7615 cpuset = sfmmu_rgntlb_demap(addr, 7616 rgnp, hmeblkp, 1); 7617 } else { 7618 sfmmu_tlb_demap(addr, sfmmup, hmeblkp, 0, 0); 7619 cpuset = sfmmup->sfmmu_cpusran; 7620 } 7621 } 7622 } 7623 7624 return (cpuset); 7625 } 7626 7627 /* 7628 * Walk all mappings of a page, removing write permission and clearing the 7629 * ref/mod bits. This code is similar to hat_pagesync() 7630 */ 7631 static void 7632 hat_page_clrwrt(page_t *pp) 7633 { 7634 struct sf_hment *sfhme; 7635 struct sf_hment *tmphme = NULL; 7636 kmutex_t *pml; 7637 cpuset_t cpuset; 7638 cpuset_t tset; 7639 int index; 7640 int cons; 7641 7642 CPUSET_ZERO(cpuset); 7643 7644 pml = sfmmu_mlist_enter(pp); 7645 index = PP_MAPINDEX(pp); 7646 cons = TTE8K; 7647 retry: 7648 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 7649 tmphme = sfhme->hme_next; 7650 7651 /* 7652 * If we are looking for large mappings and this hme doesn't 7653 * reach the range we are seeking, just ignore its. 7654 */ 7655 7656 if (hme_size(sfhme) < cons) 7657 continue; 7658 7659 tset = sfmmu_pageclrwrt(pp, sfhme); 7660 CPUSET_OR(cpuset, tset); 7661 } 7662 7663 while (index) { 7664 index = index >> 1; 7665 cons++; 7666 if (index & 0x1) { 7667 /* Go to leading page */ 7668 pp = PP_GROUPLEADER(pp, cons); 7669 goto retry; 7670 } 7671 } 7672 7673 xt_sync(cpuset); 7674 sfmmu_mlist_exit(pml); 7675 } 7676 7677 /* 7678 * Set the given REF/MOD/RO bits for the given page. 7679 * For a vnode with a sorted v_pages list, we need to change 7680 * the attributes and the v_pages list together under page_vnode_mutex. 7681 */ 7682 void 7683 hat_page_setattr(page_t *pp, uint_t flag) 7684 { 7685 vnode_t *vp = pp->p_vnode; 7686 page_t **listp; 7687 kmutex_t *pmtx; 7688 kmutex_t *vphm = NULL; 7689 int noshuffle; 7690 7691 noshuffle = flag & P_NSH; 7692 flag &= ~P_NSH; 7693 7694 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7695 7696 /* 7697 * nothing to do if attribute already set 7698 */ 7699 if ((pp->p_nrm & flag) == flag) 7700 return; 7701 7702 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) && 7703 !noshuffle) { 7704 vphm = page_vnode_mutex(vp); 7705 mutex_enter(vphm); 7706 } 7707 7708 pmtx = sfmmu_page_enter(pp); 7709 pp->p_nrm |= flag; 7710 sfmmu_page_exit(pmtx); 7711 7712 if (vphm != NULL) { 7713 /* 7714 * Some File Systems examine v_pages for NULL w/o 7715 * grabbing the vphm mutex. Must not let it become NULL when 7716 * pp is the only page on the list. 7717 */ 7718 if (pp->p_vpnext != pp) { 7719 page_vpsub(&vp->v_pages, pp); 7720 if (vp->v_pages != NULL) 7721 listp = &vp->v_pages->p_vpprev->p_vpnext; 7722 else 7723 listp = &vp->v_pages; 7724 page_vpadd(listp, pp); 7725 } 7726 mutex_exit(vphm); 7727 } 7728 } 7729 7730 void 7731 hat_page_clrattr(page_t *pp, uint_t flag) 7732 { 7733 vnode_t *vp = pp->p_vnode; 7734 kmutex_t *pmtx; 7735 7736 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7737 7738 pmtx = sfmmu_page_enter(pp); 7739 7740 /* 7741 * Caller is expected to hold page's io lock for VMODSORT to work 7742 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod 7743 * bit is cleared. 7744 * We don't have assert to avoid tripping some existing third party 7745 * code. The dirty page is moved back to top of the v_page list 7746 * after IO is done in pvn_write_done(). 7747 */ 7748 pp->p_nrm &= ~flag; 7749 sfmmu_page_exit(pmtx); 7750 7751 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) { 7752 7753 /* 7754 * VMODSORT works by removing write permissions and getting 7755 * a fault when a page is made dirty. At this point 7756 * we need to remove write permission from all mappings 7757 * to this page. 7758 */ 7759 hat_page_clrwrt(pp); 7760 } 7761 } 7762 7763 uint_t 7764 hat_page_getattr(page_t *pp, uint_t flag) 7765 { 7766 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO))); 7767 return ((uint_t)(pp->p_nrm & flag)); 7768 } 7769 7770 /* 7771 * DEBUG kernels: verify that a kernel va<->pa translation 7772 * is safe by checking the underlying page_t is in a page 7773 * relocation-safe state. 7774 */ 7775 #ifdef DEBUG 7776 void 7777 sfmmu_check_kpfn(pfn_t pfn) 7778 { 7779 page_t *pp; 7780 int index, cons; 7781 7782 if (hat_check_vtop == 0) 7783 return; 7784 7785 if (kvseg.s_base == NULL || panicstr) 7786 return; 7787 7788 pp = page_numtopp_nolock(pfn); 7789 if (!pp) 7790 return; 7791 7792 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7793 return; 7794 7795 /* 7796 * Handed a large kernel page, we dig up the root page since we 7797 * know the root page might have the lock also. 7798 */ 7799 if (pp->p_szc != 0) { 7800 index = PP_MAPINDEX(pp); 7801 cons = TTE8K; 7802 again: 7803 while (index != 0) { 7804 index >>= 1; 7805 if (index != 0) 7806 cons++; 7807 if (index & 0x1) { 7808 pp = PP_GROUPLEADER(pp, cons); 7809 goto again; 7810 } 7811 } 7812 } 7813 7814 if (PAGE_LOCKED(pp) || PP_ISNORELOC(pp)) 7815 return; 7816 7817 /* 7818 * Pages need to be locked or allocated "permanent" (either from 7819 * static_arena arena or explicitly setting PG_NORELOC when calling 7820 * page_create_va()) for VA->PA translations to be valid. 7821 */ 7822 if (!PP_ISNORELOC(pp)) 7823 panic("Illegal VA->PA translation, pp 0x%p not permanent", 7824 (void *)pp); 7825 else 7826 panic("Illegal VA->PA translation, pp 0x%p not locked", 7827 (void *)pp); 7828 } 7829 #endif /* DEBUG */ 7830 7831 /* 7832 * Returns a page frame number for a given virtual address. 7833 * Returns PFN_INVALID to indicate an invalid mapping 7834 */ 7835 pfn_t 7836 hat_getpfnum(struct hat *hat, caddr_t addr) 7837 { 7838 pfn_t pfn; 7839 tte_t tte; 7840 7841 /* 7842 * We would like to 7843 * ASSERT(AS_LOCK_HELD(as, &as->a_lock)); 7844 * but we can't because the iommu driver will call this 7845 * routine at interrupt time and it can't grab the as lock 7846 * or it will deadlock: A thread could have the as lock 7847 * and be waiting for io. The io can't complete 7848 * because the interrupt thread is blocked trying to grab 7849 * the as lock. 7850 */ 7851 7852 if (hat == ksfmmup) { 7853 if (IS_KMEM_VA_LARGEPAGE(addr)) { 7854 ASSERT(segkmem_lpszc > 0); 7855 pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc); 7856 if (pfn != PFN_INVALID) { 7857 sfmmu_check_kpfn(pfn); 7858 return (pfn); 7859 } 7860 } else if (segkpm && IS_KPM_ADDR(addr)) { 7861 return (sfmmu_kpm_vatopfn(addr)); 7862 } 7863 while ((pfn = sfmmu_vatopfn(addr, ksfmmup, &tte)) 7864 == PFN_SUSPENDED) { 7865 sfmmu_vatopfn_suspended(addr, ksfmmup, &tte); 7866 } 7867 sfmmu_check_kpfn(pfn); 7868 return (pfn); 7869 } else { 7870 return (sfmmu_uvatopfn(addr, hat, NULL)); 7871 } 7872 } 7873 7874 /* 7875 * This routine will return both pfn and tte for the vaddr. 7876 */ 7877 static pfn_t 7878 sfmmu_uvatopfn(caddr_t vaddr, struct hat *sfmmup, tte_t *ttep) 7879 { 7880 struct hmehash_bucket *hmebp; 7881 hmeblk_tag hblktag; 7882 int hmeshift, hashno = 1; 7883 struct hme_blk *hmeblkp = NULL; 7884 tte_t tte; 7885 7886 struct sf_hment *sfhmep; 7887 pfn_t pfn; 7888 7889 /* support for ISM */ 7890 ism_map_t *ism_map; 7891 ism_blk_t *ism_blkp; 7892 int i; 7893 sfmmu_t *ism_hatid = NULL; 7894 sfmmu_t *locked_hatid = NULL; 7895 sfmmu_t *sv_sfmmup = sfmmup; 7896 caddr_t sv_vaddr = vaddr; 7897 sf_srd_t *srdp; 7898 7899 if (ttep == NULL) { 7900 ttep = &tte; 7901 } else { 7902 ttep->ll = 0; 7903 } 7904 7905 ASSERT(sfmmup != ksfmmup); 7906 SFMMU_STAT(sf_user_vtop); 7907 /* 7908 * Set ism_hatid if vaddr falls in a ISM segment. 7909 */ 7910 ism_blkp = sfmmup->sfmmu_iblk; 7911 if (ism_blkp != NULL) { 7912 sfmmu_ismhat_enter(sfmmup, 0); 7913 locked_hatid = sfmmup; 7914 } 7915 while (ism_blkp != NULL && ism_hatid == NULL) { 7916 ism_map = ism_blkp->iblk_maps; 7917 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 7918 if (vaddr >= ism_start(ism_map[i]) && 7919 vaddr < ism_end(ism_map[i])) { 7920 sfmmup = ism_hatid = ism_map[i].imap_ismhat; 7921 vaddr = (caddr_t)(vaddr - 7922 ism_start(ism_map[i])); 7923 break; 7924 } 7925 } 7926 ism_blkp = ism_blkp->iblk_next; 7927 } 7928 if (locked_hatid) { 7929 sfmmu_ismhat_exit(locked_hatid, 0); 7930 } 7931 7932 hblktag.htag_id = sfmmup; 7933 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 7934 do { 7935 hmeshift = HME_HASH_SHIFT(hashno); 7936 hblktag.htag_bspage = HME_HASH_BSPAGE(vaddr, hmeshift); 7937 hblktag.htag_rehash = hashno; 7938 hmebp = HME_HASH_FUNCTION(sfmmup, vaddr, hmeshift); 7939 7940 SFMMU_HASH_LOCK(hmebp); 7941 7942 HME_HASH_FAST_SEARCH(hmebp, hblktag, hmeblkp); 7943 if (hmeblkp != NULL) { 7944 ASSERT(!hmeblkp->hblk_shared); 7945 HBLKTOHME(sfhmep, hmeblkp, vaddr); 7946 sfmmu_copytte(&sfhmep->hme_tte, ttep); 7947 SFMMU_HASH_UNLOCK(hmebp); 7948 if (TTE_IS_VALID(ttep)) { 7949 pfn = TTE_TO_PFN(vaddr, ttep); 7950 return (pfn); 7951 } 7952 break; 7953 } 7954 SFMMU_HASH_UNLOCK(hmebp); 7955 hashno++; 7956 } while (HME_REHASH(sfmmup) && (hashno <= mmu_hashcnt)); 7957 7958 if (SF_HMERGNMAP_ISNULL(sv_sfmmup)) { 7959 return (PFN_INVALID); 7960 } 7961 srdp = sv_sfmmup->sfmmu_srdp; 7962 ASSERT(srdp != NULL); 7963 ASSERT(srdp->srd_refcnt != 0); 7964 hblktag.htag_id = srdp; 7965 hashno = 1; 7966 do { 7967 hmeshift = HME_HASH_SHIFT(hashno); 7968 hblktag.htag_bspage = HME_HASH_BSPAGE(sv_vaddr, hmeshift); 7969 hblktag.htag_rehash = hashno; 7970 hmebp = HME_HASH_FUNCTION(srdp, sv_vaddr, hmeshift); 7971 7972 SFMMU_HASH_LOCK(hmebp); 7973 for (hmeblkp = hmebp->hmeblkp; hmeblkp != NULL; 7974 hmeblkp = hmeblkp->hblk_next) { 7975 uint_t rid; 7976 sf_region_t *rgnp; 7977 caddr_t rsaddr; 7978 caddr_t readdr; 7979 7980 if (!HTAGS_EQ_SHME(hmeblkp->hblk_tag, hblktag, 7981 sv_sfmmup->sfmmu_hmeregion_map)) { 7982 continue; 7983 } 7984 ASSERT(hmeblkp->hblk_shared); 7985 rid = hmeblkp->hblk_tag.htag_rid; 7986 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 7987 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 7988 rgnp = srdp->srd_hmergnp[rid]; 7989 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 7990 HBLKTOHME(sfhmep, hmeblkp, sv_vaddr); 7991 sfmmu_copytte(&sfhmep->hme_tte, ttep); 7992 rsaddr = rgnp->rgn_saddr; 7993 readdr = rsaddr + rgnp->rgn_size; 7994 #ifdef DEBUG 7995 if (TTE_IS_VALID(ttep) || 7996 get_hblk_ttesz(hmeblkp) > TTE8K) { 7997 caddr_t eva = tte_to_evaddr(hmeblkp, ttep); 7998 ASSERT(eva > sv_vaddr); 7999 ASSERT(sv_vaddr >= rsaddr); 8000 ASSERT(sv_vaddr < readdr); 8001 ASSERT(eva <= readdr); 8002 } 8003 #endif /* DEBUG */ 8004 /* 8005 * Continue the search if we 8006 * found an invalid 8K tte outside of the area 8007 * covered by this hmeblk's region. 8008 */ 8009 if (TTE_IS_VALID(ttep)) { 8010 SFMMU_HASH_UNLOCK(hmebp); 8011 pfn = TTE_TO_PFN(sv_vaddr, ttep); 8012 return (pfn); 8013 } else if (get_hblk_ttesz(hmeblkp) > TTE8K || 8014 (sv_vaddr >= rsaddr && sv_vaddr < readdr)) { 8015 SFMMU_HASH_UNLOCK(hmebp); 8016 pfn = PFN_INVALID; 8017 return (pfn); 8018 } 8019 } 8020 SFMMU_HASH_UNLOCK(hmebp); 8021 hashno++; 8022 } while (hashno <= mmu_hashcnt); 8023 return (PFN_INVALID); 8024 } 8025 8026 8027 /* 8028 * For compatability with AT&T and later optimizations 8029 */ 8030 /* ARGSUSED */ 8031 void 8032 hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags) 8033 { 8034 ASSERT(hat != NULL); 8035 } 8036 8037 /* 8038 * Return the number of mappings to a particular page. This number is an 8039 * approximation of the number of people sharing the page. 8040 * 8041 * shared hmeblks or ism hmeblks are counted as 1 mapping here. 8042 * hat_page_checkshare() can be used to compare threshold to share 8043 * count that reflects the number of region sharers albeit at higher cost. 8044 */ 8045 ulong_t 8046 hat_page_getshare(page_t *pp) 8047 { 8048 page_t *spp = pp; /* start page */ 8049 kmutex_t *pml; 8050 ulong_t cnt; 8051 int index, sz = TTE64K; 8052 8053 /* 8054 * We need to grab the mlist lock to make sure any outstanding 8055 * load/unloads complete. Otherwise we could return zero 8056 * even though the unload(s) hasn't finished yet. 8057 */ 8058 pml = sfmmu_mlist_enter(spp); 8059 cnt = spp->p_share; 8060 8061 #ifdef VAC 8062 if (kpm_enable) 8063 cnt += spp->p_kpmref; 8064 #endif 8065 if (vpm_enable && pp->p_vpmref) { 8066 cnt += 1; 8067 } 8068 8069 /* 8070 * If we have any large mappings, we count the number of 8071 * mappings that this large page is part of. 8072 */ 8073 index = PP_MAPINDEX(spp); 8074 index >>= 1; 8075 while (index) { 8076 pp = PP_GROUPLEADER(spp, sz); 8077 if ((index & 0x1) && pp != spp) { 8078 cnt += pp->p_share; 8079 spp = pp; 8080 } 8081 index >>= 1; 8082 sz++; 8083 } 8084 sfmmu_mlist_exit(pml); 8085 return (cnt); 8086 } 8087 8088 /* 8089 * Return 1 if the number of mappings exceeds sh_thresh. Return 0 8090 * otherwise. Count shared hmeblks by region's refcnt. 8091 */ 8092 int 8093 hat_page_checkshare(page_t *pp, ulong_t sh_thresh) 8094 { 8095 kmutex_t *pml; 8096 ulong_t cnt = 0; 8097 int index, sz = TTE8K; 8098 struct sf_hment *sfhme, *tmphme = NULL; 8099 struct hme_blk *hmeblkp; 8100 8101 pml = sfmmu_mlist_enter(pp); 8102 8103 #ifdef VAC 8104 if (kpm_enable) 8105 cnt = pp->p_kpmref; 8106 #endif 8107 8108 if (vpm_enable && pp->p_vpmref) { 8109 cnt += 1; 8110 } 8111 8112 if (pp->p_share + cnt > sh_thresh) { 8113 sfmmu_mlist_exit(pml); 8114 return (1); 8115 } 8116 8117 index = PP_MAPINDEX(pp); 8118 8119 again: 8120 for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) { 8121 tmphme = sfhme->hme_next; 8122 if (IS_PAHME(sfhme)) { 8123 continue; 8124 } 8125 8126 hmeblkp = sfmmu_hmetohblk(sfhme); 8127 if (hme_size(sfhme) != sz) { 8128 continue; 8129 } 8130 8131 if (hmeblkp->hblk_shared) { 8132 sf_srd_t *srdp = hblktosrd(hmeblkp); 8133 uint_t rid = hmeblkp->hblk_tag.htag_rid; 8134 sf_region_t *rgnp; 8135 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 8136 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 8137 ASSERT(srdp != NULL); 8138 rgnp = srdp->srd_hmergnp[rid]; 8139 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, 8140 rgnp, rid); 8141 cnt += rgnp->rgn_refcnt; 8142 } else { 8143 cnt++; 8144 } 8145 if (cnt > sh_thresh) { 8146 sfmmu_mlist_exit(pml); 8147 return (1); 8148 } 8149 } 8150 8151 index >>= 1; 8152 sz++; 8153 while (index) { 8154 pp = PP_GROUPLEADER(pp, sz); 8155 ASSERT(sfmmu_mlist_held(pp)); 8156 if (index & 0x1) { 8157 goto again; 8158 } 8159 index >>= 1; 8160 sz++; 8161 } 8162 sfmmu_mlist_exit(pml); 8163 return (0); 8164 } 8165 8166 /* 8167 * Unload all large mappings to the pp and reset the p_szc field of every 8168 * constituent page according to the remaining mappings. 8169 * 8170 * pp must be locked SE_EXCL. Even though no other constituent pages are 8171 * locked it's legal to unload the large mappings to the pp because all 8172 * constituent pages of large locked mappings have to be locked SE_SHARED. 8173 * This means if we have SE_EXCL lock on one of constituent pages none of the 8174 * large mappings to pp are locked. 8175 * 8176 * Decrease p_szc field starting from the last constituent page and ending 8177 * with the root page. This method is used because other threads rely on the 8178 * root's p_szc to find the lock to syncronize on. After a root page_t's p_szc 8179 * is demoted then other threads will succeed in sfmmu_mlspl_enter(). This 8180 * ensures that p_szc changes of the constituent pages appears atomic for all 8181 * threads that use sfmmu_mlspl_enter() to examine p_szc field. 8182 * 8183 * This mechanism is only used for file system pages where it's not always 8184 * possible to get SE_EXCL locks on all constituent pages to demote the size 8185 * code (as is done for anonymous or kernel large pages). 8186 * 8187 * See more comments in front of sfmmu_mlspl_enter(). 8188 */ 8189 void 8190 hat_page_demote(page_t *pp) 8191 { 8192 int index; 8193 int sz; 8194 cpuset_t cpuset; 8195 int sync = 0; 8196 page_t *rootpp; 8197 struct sf_hment *sfhme; 8198 struct sf_hment *tmphme = NULL; 8199 struct hme_blk *hmeblkp; 8200 uint_t pszc; 8201 page_t *lastpp; 8202 cpuset_t tset; 8203 pgcnt_t npgs; 8204 kmutex_t *pml; 8205 kmutex_t *pmtx = NULL; 8206 8207 ASSERT(PAGE_EXCL(pp)); 8208 ASSERT(!PP_ISFREE(pp)); 8209 ASSERT(!PP_ISKAS(pp)); 8210 ASSERT(page_szc_lock_assert(pp)); 8211 pml = sfmmu_mlist_enter(pp); 8212 8213 pszc = pp->p_szc; 8214 if (pszc == 0) { 8215 goto out; 8216 } 8217 8218 index = PP_MAPINDEX(pp) >> 1; 8219 8220 if (index) { 8221 CPUSET_ZERO(cpuset); 8222 sz = TTE64K; 8223 sync = 1; 8224 } 8225 8226 while (index) { 8227 if (!(index & 0x1)) { 8228 index >>= 1; 8229 sz++; 8230 continue; 8231 } 8232 ASSERT(sz <= pszc); 8233 rootpp = PP_GROUPLEADER(pp, sz); 8234 for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) { 8235 tmphme = sfhme->hme_next; 8236 ASSERT(!IS_PAHME(sfhme)); 8237 hmeblkp = sfmmu_hmetohblk(sfhme); 8238 if (hme_size(sfhme) != sz) { 8239 continue; 8240 } 8241 tset = sfmmu_pageunload(rootpp, sfhme, sz); 8242 CPUSET_OR(cpuset, tset); 8243 } 8244 if (index >>= 1) { 8245 sz++; 8246 } 8247 } 8248 8249 ASSERT(!PP_ISMAPPED_LARGE(pp)); 8250 8251 if (sync) { 8252 xt_sync(cpuset); 8253 #ifdef VAC 8254 if (PP_ISTNC(pp)) { 8255 conv_tnc(rootpp, sz); 8256 } 8257 #endif /* VAC */ 8258 } 8259 8260 pmtx = sfmmu_page_enter(pp); 8261 8262 ASSERT(pp->p_szc == pszc); 8263 rootpp = PP_PAGEROOT(pp); 8264 ASSERT(rootpp->p_szc == pszc); 8265 lastpp = PP_PAGENEXT_N(rootpp, TTEPAGES(pszc) - 1); 8266 8267 while (lastpp != rootpp) { 8268 sz = PP_MAPINDEX(lastpp) ? fnd_mapping_sz(lastpp) : 0; 8269 ASSERT(sz < pszc); 8270 npgs = (sz == 0) ? 1 : TTEPAGES(sz); 8271 ASSERT(P2PHASE(lastpp->p_pagenum, npgs) == npgs - 1); 8272 while (--npgs > 0) { 8273 lastpp->p_szc = (uchar_t)sz; 8274 lastpp = PP_PAGEPREV(lastpp); 8275 } 8276 if (sz) { 8277 /* 8278 * make sure before current root's pszc 8279 * is updated all updates to constituent pages pszc 8280 * fields are globally visible. 8281 */ 8282 membar_producer(); 8283 } 8284 lastpp->p_szc = sz; 8285 ASSERT(IS_P2ALIGNED(lastpp->p_pagenum, TTEPAGES(sz))); 8286 if (lastpp != rootpp) { 8287 lastpp = PP_PAGEPREV(lastpp); 8288 } 8289 } 8290 if (sz == 0) { 8291 /* the loop above doesn't cover this case */ 8292 rootpp->p_szc = 0; 8293 } 8294 out: 8295 ASSERT(pp->p_szc == 0); 8296 if (pmtx != NULL) { 8297 sfmmu_page_exit(pmtx); 8298 } 8299 sfmmu_mlist_exit(pml); 8300 } 8301 8302 /* 8303 * Refresh the HAT ismttecnt[] element for size szc. 8304 * Caller must have set ISM busy flag to prevent mapping 8305 * lists from changing while we're traversing them. 8306 */ 8307 pgcnt_t 8308 ism_tsb_entries(sfmmu_t *sfmmup, int szc) 8309 { 8310 ism_blk_t *ism_blkp = sfmmup->sfmmu_iblk; 8311 ism_map_t *ism_map; 8312 pgcnt_t npgs = 0; 8313 pgcnt_t npgs_scd = 0; 8314 int j; 8315 sf_scd_t *scdp; 8316 uchar_t rid; 8317 8318 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 8319 scdp = sfmmup->sfmmu_scdp; 8320 8321 for (; ism_blkp != NULL; ism_blkp = ism_blkp->iblk_next) { 8322 ism_map = ism_blkp->iblk_maps; 8323 for (j = 0; ism_map[j].imap_ismhat && j < ISM_MAP_SLOTS; j++) { 8324 rid = ism_map[j].imap_rid; 8325 ASSERT(rid == SFMMU_INVALID_ISMRID || 8326 rid < sfmmup->sfmmu_srdp->srd_next_ismrid); 8327 8328 if (scdp != NULL && rid != SFMMU_INVALID_ISMRID && 8329 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) { 8330 /* ISM is in sfmmup's SCD */ 8331 npgs_scd += 8332 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 8333 } else { 8334 /* ISMs is not in SCD */ 8335 npgs += 8336 ism_map[j].imap_ismhat->sfmmu_ttecnt[szc]; 8337 } 8338 } 8339 } 8340 sfmmup->sfmmu_ismttecnt[szc] = npgs; 8341 sfmmup->sfmmu_scdismttecnt[szc] = npgs_scd; 8342 return (npgs); 8343 } 8344 8345 /* 8346 * Yield the memory claim requirement for an address space. 8347 * 8348 * This is currently implemented as the number of bytes that have active 8349 * hardware translations that have page structures. Therefore, it can 8350 * underestimate the traditional resident set size, eg, if the 8351 * physical page is present and the hardware translation is missing; 8352 * and it can overestimate the rss, eg, if there are active 8353 * translations to a frame buffer with page structs. 8354 * Also, it does not take sharing into account. 8355 * 8356 * Note that we don't acquire locks here since this function is most often 8357 * called from the clock thread. 8358 */ 8359 size_t 8360 hat_get_mapped_size(struct hat *hat) 8361 { 8362 size_t assize = 0; 8363 int i; 8364 8365 if (hat == NULL) 8366 return (0); 8367 8368 for (i = 0; i < mmu_page_sizes; i++) 8369 assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] + 8370 (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i); 8371 8372 if (hat->sfmmu_iblk == NULL) 8373 return (assize); 8374 8375 for (i = 0; i < mmu_page_sizes; i++) 8376 assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] + 8377 (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i); 8378 8379 return (assize); 8380 } 8381 8382 int 8383 hat_stats_enable(struct hat *hat) 8384 { 8385 hatlock_t *hatlockp; 8386 8387 hatlockp = sfmmu_hat_enter(hat); 8388 hat->sfmmu_rmstat++; 8389 sfmmu_hat_exit(hatlockp); 8390 return (1); 8391 } 8392 8393 void 8394 hat_stats_disable(struct hat *hat) 8395 { 8396 hatlock_t *hatlockp; 8397 8398 hatlockp = sfmmu_hat_enter(hat); 8399 hat->sfmmu_rmstat--; 8400 sfmmu_hat_exit(hatlockp); 8401 } 8402 8403 /* 8404 * Routines for entering or removing ourselves from the 8405 * ism_hat's mapping list. This is used for both private and 8406 * SCD hats. 8407 */ 8408 static void 8409 iment_add(struct ism_ment *iment, struct hat *ism_hat) 8410 { 8411 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 8412 8413 iment->iment_prev = NULL; 8414 iment->iment_next = ism_hat->sfmmu_iment; 8415 if (ism_hat->sfmmu_iment) { 8416 ism_hat->sfmmu_iment->iment_prev = iment; 8417 } 8418 ism_hat->sfmmu_iment = iment; 8419 } 8420 8421 static void 8422 iment_sub(struct ism_ment *iment, struct hat *ism_hat) 8423 { 8424 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 8425 8426 if (ism_hat->sfmmu_iment == NULL) { 8427 panic("ism map entry remove - no entries"); 8428 } 8429 8430 if (iment->iment_prev) { 8431 ASSERT(ism_hat->sfmmu_iment != iment); 8432 iment->iment_prev->iment_next = iment->iment_next; 8433 } else { 8434 ASSERT(ism_hat->sfmmu_iment == iment); 8435 ism_hat->sfmmu_iment = iment->iment_next; 8436 } 8437 8438 if (iment->iment_next) { 8439 iment->iment_next->iment_prev = iment->iment_prev; 8440 } 8441 8442 /* 8443 * zero out the entry 8444 */ 8445 iment->iment_next = NULL; 8446 iment->iment_prev = NULL; 8447 iment->iment_hat = NULL; 8448 iment->iment_base_va = 0; 8449 } 8450 8451 /* 8452 * Hat_share()/unshare() return an (non-zero) error 8453 * when saddr and daddr are not properly aligned. 8454 * 8455 * The top level mapping element determines the alignment 8456 * requirement for saddr and daddr, depending on different 8457 * architectures. 8458 * 8459 * When hat_share()/unshare() are not supported, 8460 * HATOP_SHARE()/UNSHARE() return 0 8461 */ 8462 int 8463 hat_share(struct hat *sfmmup, caddr_t addr, 8464 struct hat *ism_hatid, caddr_t sptaddr, size_t len, uint_t ismszc) 8465 { 8466 ism_blk_t *ism_blkp; 8467 ism_blk_t *new_iblk; 8468 ism_map_t *ism_map; 8469 ism_ment_t *ism_ment; 8470 int i, added; 8471 hatlock_t *hatlockp; 8472 int reload_mmu = 0; 8473 uint_t ismshift = page_get_shift(ismszc); 8474 size_t ismpgsz = page_get_pagesize(ismszc); 8475 uint_t ismmask = (uint_t)ismpgsz - 1; 8476 size_t sh_size = ISM_SHIFT(ismshift, len); 8477 ushort_t ismhatflag; 8478 hat_region_cookie_t rcookie; 8479 sf_scd_t *old_scdp; 8480 8481 #ifdef DEBUG 8482 caddr_t eaddr = addr + len; 8483 #endif /* DEBUG */ 8484 8485 ASSERT(ism_hatid != NULL && sfmmup != NULL); 8486 ASSERT(sptaddr == ISMID_STARTADDR); 8487 /* 8488 * Check the alignment. 8489 */ 8490 if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr)) 8491 return (EINVAL); 8492 8493 /* 8494 * Check size alignment. 8495 */ 8496 if (!ISM_ALIGNED(ismshift, len)) 8497 return (EINVAL); 8498 8499 /* 8500 * Allocate ism_ment for the ism_hat's mapping list, and an 8501 * ism map blk in case we need one. We must do our 8502 * allocations before acquiring locks to prevent a deadlock 8503 * in the kmem allocator on the mapping list lock. 8504 */ 8505 new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP); 8506 ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP); 8507 8508 /* 8509 * Serialize ISM mappings with the ISM busy flag, and also the 8510 * trap handlers. 8511 */ 8512 sfmmu_ismhat_enter(sfmmup, 0); 8513 8514 /* 8515 * Allocate an ism map blk if necessary. 8516 */ 8517 if (sfmmup->sfmmu_iblk == NULL) { 8518 sfmmup->sfmmu_iblk = new_iblk; 8519 bzero(new_iblk, sizeof (*new_iblk)); 8520 new_iblk->iblk_nextpa = (uint64_t)-1; 8521 membar_stst(); /* make sure next ptr visible to all CPUs */ 8522 sfmmup->sfmmu_ismblkpa = va_to_pa((caddr_t)new_iblk); 8523 reload_mmu = 1; 8524 new_iblk = NULL; 8525 } 8526 8527 #ifdef DEBUG 8528 /* 8529 * Make sure mapping does not already exist. 8530 */ 8531 ism_blkp = sfmmup->sfmmu_iblk; 8532 while (ism_blkp != NULL) { 8533 ism_map = ism_blkp->iblk_maps; 8534 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 8535 if ((addr >= ism_start(ism_map[i]) && 8536 addr < ism_end(ism_map[i])) || 8537 eaddr > ism_start(ism_map[i]) && 8538 eaddr <= ism_end(ism_map[i])) { 8539 panic("sfmmu_share: Already mapped!"); 8540 } 8541 } 8542 ism_blkp = ism_blkp->iblk_next; 8543 } 8544 #endif /* DEBUG */ 8545 8546 ASSERT(ismszc >= TTE4M); 8547 if (ismszc == TTE4M) { 8548 ismhatflag = HAT_4M_FLAG; 8549 } else if (ismszc == TTE32M) { 8550 ismhatflag = HAT_32M_FLAG; 8551 } else if (ismszc == TTE256M) { 8552 ismhatflag = HAT_256M_FLAG; 8553 } 8554 /* 8555 * Add mapping to first available mapping slot. 8556 */ 8557 ism_blkp = sfmmup->sfmmu_iblk; 8558 added = 0; 8559 while (!added) { 8560 ism_map = ism_blkp->iblk_maps; 8561 for (i = 0; i < ISM_MAP_SLOTS; i++) { 8562 if (ism_map[i].imap_ismhat == NULL) { 8563 8564 ism_map[i].imap_ismhat = ism_hatid; 8565 ism_map[i].imap_vb_shift = (uchar_t)ismshift; 8566 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID; 8567 ism_map[i].imap_hatflags = ismhatflag; 8568 ism_map[i].imap_sz_mask = ismmask; 8569 /* 8570 * imap_seg is checked in ISM_CHECK to see if 8571 * non-NULL, then other info assumed valid. 8572 */ 8573 membar_stst(); 8574 ism_map[i].imap_seg = (uintptr_t)addr | sh_size; 8575 ism_map[i].imap_ment = ism_ment; 8576 8577 /* 8578 * Now add ourselves to the ism_hat's 8579 * mapping list. 8580 */ 8581 ism_ment->iment_hat = sfmmup; 8582 ism_ment->iment_base_va = addr; 8583 ism_hatid->sfmmu_ismhat = 1; 8584 mutex_enter(&ism_mlist_lock); 8585 iment_add(ism_ment, ism_hatid); 8586 mutex_exit(&ism_mlist_lock); 8587 added = 1; 8588 break; 8589 } 8590 } 8591 if (!added && ism_blkp->iblk_next == NULL) { 8592 ism_blkp->iblk_next = new_iblk; 8593 new_iblk = NULL; 8594 bzero(ism_blkp->iblk_next, 8595 sizeof (*ism_blkp->iblk_next)); 8596 ism_blkp->iblk_next->iblk_nextpa = (uint64_t)-1; 8597 membar_stst(); 8598 ism_blkp->iblk_nextpa = 8599 va_to_pa((caddr_t)ism_blkp->iblk_next); 8600 } 8601 ism_blkp = ism_blkp->iblk_next; 8602 } 8603 8604 /* 8605 * After calling hat_join_region, sfmmup may join a new SCD or 8606 * move from the old scd to a new scd, in which case, we want to 8607 * shrink the sfmmup's private tsb size, i.e., pass shrink to 8608 * sfmmu_check_page_sizes at the end of this routine. 8609 */ 8610 old_scdp = sfmmup->sfmmu_scdp; 8611 8612 rcookie = hat_join_region(sfmmup, addr, len, (void *)ism_hatid, 0, 8613 PROT_ALL, ismszc, NULL, HAT_REGION_ISM); 8614 if (rcookie != HAT_INVALID_REGION_COOKIE) { 8615 ism_map[i].imap_rid = (uchar_t)((uint64_t)rcookie); 8616 } 8617 /* 8618 * Update our counters for this sfmmup's ism mappings. 8619 */ 8620 for (i = 0; i <= ismszc; i++) { 8621 if (!(disable_ism_large_pages & (1 << i))) 8622 (void) ism_tsb_entries(sfmmup, i); 8623 } 8624 8625 /* 8626 * For ISM and DISM we do not support 512K pages, so we only only 8627 * search the 4M and 8K/64K hashes for 4 pagesize cpus, and search the 8628 * 256M or 32M, and 4M and 8K/64K hashes for 6 pagesize cpus. 8629 * 8630 * Need to set 32M/256M ISM flags to make sure 8631 * sfmmu_check_page_sizes() enables them on Panther. 8632 */ 8633 ASSERT((disable_ism_large_pages & (1 << TTE512K)) != 0); 8634 8635 switch (ismszc) { 8636 case TTE256M: 8637 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_256M_ISM)) { 8638 hatlockp = sfmmu_hat_enter(sfmmup); 8639 SFMMU_FLAGS_SET(sfmmup, HAT_256M_ISM); 8640 sfmmu_hat_exit(hatlockp); 8641 } 8642 break; 8643 case TTE32M: 8644 if (!SFMMU_FLAGS_ISSET(sfmmup, HAT_32M_ISM)) { 8645 hatlockp = sfmmu_hat_enter(sfmmup); 8646 SFMMU_FLAGS_SET(sfmmup, HAT_32M_ISM); 8647 sfmmu_hat_exit(hatlockp); 8648 } 8649 break; 8650 default: 8651 break; 8652 } 8653 8654 /* 8655 * If we updated the ismblkpa for this HAT we must make 8656 * sure all CPUs running this process reload their tsbmiss area. 8657 * Otherwise they will fail to load the mappings in the tsbmiss 8658 * handler and will loop calling pagefault(). 8659 */ 8660 if (reload_mmu) { 8661 hatlockp = sfmmu_hat_enter(sfmmup); 8662 sfmmu_sync_mmustate(sfmmup); 8663 sfmmu_hat_exit(hatlockp); 8664 } 8665 8666 sfmmu_ismhat_exit(sfmmup, 0); 8667 8668 /* 8669 * Free up ismblk if we didn't use it. 8670 */ 8671 if (new_iblk != NULL) 8672 kmem_cache_free(ism_blk_cache, new_iblk); 8673 8674 /* 8675 * Check TSB and TLB page sizes. 8676 */ 8677 if (sfmmup->sfmmu_scdp != NULL && old_scdp != sfmmup->sfmmu_scdp) { 8678 sfmmu_check_page_sizes(sfmmup, 0); 8679 } else { 8680 sfmmu_check_page_sizes(sfmmup, 1); 8681 } 8682 return (0); 8683 } 8684 8685 /* 8686 * hat_unshare removes exactly one ism_map from 8687 * this process's as. It expects multiple calls 8688 * to hat_unshare for multiple shm segments. 8689 */ 8690 void 8691 hat_unshare(struct hat *sfmmup, caddr_t addr, size_t len, uint_t ismszc) 8692 { 8693 ism_map_t *ism_map; 8694 ism_ment_t *free_ment = NULL; 8695 ism_blk_t *ism_blkp; 8696 struct hat *ism_hatid; 8697 int found, i; 8698 hatlock_t *hatlockp; 8699 struct tsb_info *tsbinfo; 8700 uint_t ismshift = page_get_shift(ismszc); 8701 size_t sh_size = ISM_SHIFT(ismshift, len); 8702 uchar_t ism_rid; 8703 sf_scd_t *old_scdp; 8704 8705 ASSERT(ISM_ALIGNED(ismshift, addr)); 8706 ASSERT(ISM_ALIGNED(ismshift, len)); 8707 ASSERT(sfmmup != NULL); 8708 ASSERT(sfmmup != ksfmmup); 8709 8710 ASSERT(sfmmup->sfmmu_as != NULL); 8711 8712 /* 8713 * Make sure that during the entire time ISM mappings are removed, 8714 * the trap handlers serialize behind us, and that no one else 8715 * can be mucking with ISM mappings. This also lets us get away 8716 * with not doing expensive cross calls to flush the TLB -- we 8717 * just discard the context, flush the entire TSB, and call it 8718 * a day. 8719 */ 8720 sfmmu_ismhat_enter(sfmmup, 0); 8721 8722 /* 8723 * Remove the mapping. 8724 * 8725 * We can't have any holes in the ism map. 8726 * The tsb miss code while searching the ism map will 8727 * stop on an empty map slot. So we must move 8728 * everyone past the hole up 1 if any. 8729 * 8730 * Also empty ism map blks are not freed until the 8731 * process exits. This is to prevent a MT race condition 8732 * between sfmmu_unshare() and sfmmu_tsbmiss_exception(). 8733 */ 8734 found = 0; 8735 ism_blkp = sfmmup->sfmmu_iblk; 8736 while (!found && ism_blkp != NULL) { 8737 ism_map = ism_blkp->iblk_maps; 8738 for (i = 0; i < ISM_MAP_SLOTS; i++) { 8739 if (addr == ism_start(ism_map[i]) && 8740 sh_size == (size_t)(ism_size(ism_map[i]))) { 8741 found = 1; 8742 break; 8743 } 8744 } 8745 if (!found) 8746 ism_blkp = ism_blkp->iblk_next; 8747 } 8748 8749 if (found) { 8750 ism_hatid = ism_map[i].imap_ismhat; 8751 ism_rid = ism_map[i].imap_rid; 8752 ASSERT(ism_hatid != NULL); 8753 ASSERT(ism_hatid->sfmmu_ismhat == 1); 8754 8755 /* 8756 * After hat_leave_region, the sfmmup may leave SCD, 8757 * in which case, we want to grow the private tsb size when 8758 * calling sfmmu_check_page_sizes at the end of the routine. 8759 */ 8760 old_scdp = sfmmup->sfmmu_scdp; 8761 /* 8762 * Then remove ourselves from the region. 8763 */ 8764 if (ism_rid != SFMMU_INVALID_ISMRID) { 8765 hat_leave_region(sfmmup, (void *)((uint64_t)ism_rid), 8766 HAT_REGION_ISM); 8767 } 8768 8769 /* 8770 * And now guarantee that any other cpu 8771 * that tries to process an ISM miss 8772 * will go to tl=0. 8773 */ 8774 hatlockp = sfmmu_hat_enter(sfmmup); 8775 sfmmu_invalidate_ctx(sfmmup); 8776 sfmmu_hat_exit(hatlockp); 8777 8778 /* 8779 * Remove ourselves from the ism mapping list. 8780 */ 8781 mutex_enter(&ism_mlist_lock); 8782 iment_sub(ism_map[i].imap_ment, ism_hatid); 8783 mutex_exit(&ism_mlist_lock); 8784 free_ment = ism_map[i].imap_ment; 8785 8786 /* 8787 * We delete the ism map by copying 8788 * the next map over the current one. 8789 * We will take the next one in the maps 8790 * array or from the next ism_blk. 8791 */ 8792 while (ism_blkp != NULL) { 8793 ism_map = ism_blkp->iblk_maps; 8794 while (i < (ISM_MAP_SLOTS - 1)) { 8795 ism_map[i] = ism_map[i + 1]; 8796 i++; 8797 } 8798 /* i == (ISM_MAP_SLOTS - 1) */ 8799 ism_blkp = ism_blkp->iblk_next; 8800 if (ism_blkp != NULL) { 8801 ism_map[i] = ism_blkp->iblk_maps[0]; 8802 i = 0; 8803 } else { 8804 ism_map[i].imap_seg = 0; 8805 ism_map[i].imap_vb_shift = 0; 8806 ism_map[i].imap_rid = SFMMU_INVALID_ISMRID; 8807 ism_map[i].imap_hatflags = 0; 8808 ism_map[i].imap_sz_mask = 0; 8809 ism_map[i].imap_ismhat = NULL; 8810 ism_map[i].imap_ment = NULL; 8811 } 8812 } 8813 8814 /* 8815 * Now flush entire TSB for the process, since 8816 * demapping page by page can be too expensive. 8817 * We don't have to flush the TLB here anymore 8818 * since we switch to a new TLB ctx instead. 8819 * Also, there is no need to flush if the process 8820 * is exiting since the TSB will be freed later. 8821 */ 8822 if (!sfmmup->sfmmu_free) { 8823 hatlockp = sfmmu_hat_enter(sfmmup); 8824 for (tsbinfo = sfmmup->sfmmu_tsb; tsbinfo != NULL; 8825 tsbinfo = tsbinfo->tsb_next) { 8826 if (tsbinfo->tsb_flags & TSB_SWAPPED) 8827 continue; 8828 if (tsbinfo->tsb_flags & TSB_RELOC_FLAG) { 8829 tsbinfo->tsb_flags |= 8830 TSB_FLUSH_NEEDED; 8831 continue; 8832 } 8833 8834 sfmmu_inv_tsb(tsbinfo->tsb_va, 8835 TSB_BYTES(tsbinfo->tsb_szc)); 8836 } 8837 sfmmu_hat_exit(hatlockp); 8838 } 8839 } 8840 8841 /* 8842 * Update our counters for this sfmmup's ism mappings. 8843 */ 8844 for (i = 0; i <= ismszc; i++) { 8845 if (!(disable_ism_large_pages & (1 << i))) 8846 (void) ism_tsb_entries(sfmmup, i); 8847 } 8848 8849 sfmmu_ismhat_exit(sfmmup, 0); 8850 8851 /* 8852 * We must do our freeing here after dropping locks 8853 * to prevent a deadlock in the kmem allocator on the 8854 * mapping list lock. 8855 */ 8856 if (free_ment != NULL) 8857 kmem_cache_free(ism_ment_cache, free_ment); 8858 8859 /* 8860 * Check TSB and TLB page sizes if the process isn't exiting. 8861 */ 8862 if (!sfmmup->sfmmu_free) { 8863 if (found && old_scdp != NULL && sfmmup->sfmmu_scdp == NULL) { 8864 sfmmu_check_page_sizes(sfmmup, 1); 8865 } else { 8866 sfmmu_check_page_sizes(sfmmup, 0); 8867 } 8868 } 8869 } 8870 8871 /* ARGSUSED */ 8872 static int 8873 sfmmu_idcache_constructor(void *buf, void *cdrarg, int kmflags) 8874 { 8875 /* void *buf is sfmmu_t pointer */ 8876 bzero(buf, sizeof (sfmmu_t)); 8877 8878 return (0); 8879 } 8880 8881 /* ARGSUSED */ 8882 static void 8883 sfmmu_idcache_destructor(void *buf, void *cdrarg) 8884 { 8885 /* void *buf is sfmmu_t pointer */ 8886 } 8887 8888 /* 8889 * setup kmem hmeblks by bzeroing all members and initializing the nextpa 8890 * field to be the pa of this hmeblk 8891 */ 8892 /* ARGSUSED */ 8893 static int 8894 sfmmu_hblkcache_constructor(void *buf, void *cdrarg, int kmflags) 8895 { 8896 struct hme_blk *hmeblkp; 8897 8898 bzero(buf, (size_t)cdrarg); 8899 hmeblkp = (struct hme_blk *)buf; 8900 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 8901 8902 #ifdef HBLK_TRACE 8903 mutex_init(&hmeblkp->hblk_audit_lock, NULL, MUTEX_DEFAULT, NULL); 8904 #endif /* HBLK_TRACE */ 8905 8906 return (0); 8907 } 8908 8909 /* ARGSUSED */ 8910 static void 8911 sfmmu_hblkcache_destructor(void *buf, void *cdrarg) 8912 { 8913 8914 #ifdef HBLK_TRACE 8915 8916 struct hme_blk *hmeblkp; 8917 8918 hmeblkp = (struct hme_blk *)buf; 8919 mutex_destroy(&hmeblkp->hblk_audit_lock); 8920 8921 #endif /* HBLK_TRACE */ 8922 } 8923 8924 #define SFMMU_CACHE_RECLAIM_SCAN_RATIO 8 8925 static int sfmmu_cache_reclaim_scan_ratio = SFMMU_CACHE_RECLAIM_SCAN_RATIO; 8926 /* 8927 * The kmem allocator will callback into our reclaim routine when the system 8928 * is running low in memory. We traverse the hash and free up all unused but 8929 * still cached hme_blks. We also traverse the free list and free them up 8930 * as well. 8931 */ 8932 /*ARGSUSED*/ 8933 static void 8934 sfmmu_hblkcache_reclaim(void *cdrarg) 8935 { 8936 int i; 8937 struct hmehash_bucket *hmebp; 8938 struct hme_blk *hmeblkp, *nx_hblk, *pr_hblk = NULL; 8939 static struct hmehash_bucket *uhmehash_reclaim_hand; 8940 static struct hmehash_bucket *khmehash_reclaim_hand; 8941 struct hme_blk *list = NULL, *last_hmeblkp; 8942 cpuset_t cpuset = cpu_ready_set; 8943 cpu_hme_pend_t *cpuhp; 8944 8945 /* Free up hmeblks on the cpu pending lists */ 8946 for (i = 0; i < NCPU; i++) { 8947 cpuhp = &cpu_hme_pend[i]; 8948 if (cpuhp->chp_listp != NULL) { 8949 mutex_enter(&cpuhp->chp_mutex); 8950 if (cpuhp->chp_listp == NULL) { 8951 mutex_exit(&cpuhp->chp_mutex); 8952 continue; 8953 } 8954 for (last_hmeblkp = cpuhp->chp_listp; 8955 last_hmeblkp->hblk_next != NULL; 8956 last_hmeblkp = last_hmeblkp->hblk_next) 8957 ; 8958 last_hmeblkp->hblk_next = list; 8959 list = cpuhp->chp_listp; 8960 cpuhp->chp_listp = NULL; 8961 cpuhp->chp_count = 0; 8962 mutex_exit(&cpuhp->chp_mutex); 8963 } 8964 8965 } 8966 8967 if (list != NULL) { 8968 kpreempt_disable(); 8969 CPUSET_DEL(cpuset, CPU->cpu_id); 8970 xt_sync(cpuset); 8971 xt_sync(cpuset); 8972 kpreempt_enable(); 8973 sfmmu_hblk_free(&list); 8974 list = NULL; 8975 } 8976 8977 hmebp = uhmehash_reclaim_hand; 8978 if (hmebp == NULL || hmebp > &uhme_hash[UHMEHASH_SZ]) 8979 uhmehash_reclaim_hand = hmebp = uhme_hash; 8980 uhmehash_reclaim_hand += UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 8981 8982 for (i = UHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 8983 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 8984 hmeblkp = hmebp->hmeblkp; 8985 pr_hblk = NULL; 8986 while (hmeblkp) { 8987 nx_hblk = hmeblkp->hblk_next; 8988 if (!hmeblkp->hblk_vcnt && 8989 !hmeblkp->hblk_hmecnt) { 8990 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 8991 pr_hblk, &list, 0); 8992 } else { 8993 pr_hblk = hmeblkp; 8994 } 8995 hmeblkp = nx_hblk; 8996 } 8997 SFMMU_HASH_UNLOCK(hmebp); 8998 } 8999 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 9000 hmebp = uhme_hash; 9001 } 9002 9003 hmebp = khmehash_reclaim_hand; 9004 if (hmebp == NULL || hmebp > &khme_hash[KHMEHASH_SZ]) 9005 khmehash_reclaim_hand = hmebp = khme_hash; 9006 khmehash_reclaim_hand += KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; 9007 9008 for (i = KHMEHASH_SZ / sfmmu_cache_reclaim_scan_ratio; i; i--) { 9009 if (SFMMU_HASH_LOCK_TRYENTER(hmebp) != 0) { 9010 hmeblkp = hmebp->hmeblkp; 9011 pr_hblk = NULL; 9012 while (hmeblkp) { 9013 nx_hblk = hmeblkp->hblk_next; 9014 if (!hmeblkp->hblk_vcnt && 9015 !hmeblkp->hblk_hmecnt) { 9016 sfmmu_hblk_hash_rm(hmebp, hmeblkp, 9017 pr_hblk, &list, 0); 9018 } else { 9019 pr_hblk = hmeblkp; 9020 } 9021 hmeblkp = nx_hblk; 9022 } 9023 SFMMU_HASH_UNLOCK(hmebp); 9024 } 9025 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 9026 hmebp = khme_hash; 9027 } 9028 sfmmu_hblks_list_purge(&list, 0); 9029 } 9030 9031 /* 9032 * sfmmu_get_ppvcolor should become a vm_machdep or hatop interface. 9033 * same goes for sfmmu_get_addrvcolor(). 9034 * 9035 * This function will return the virtual color for the specified page. The 9036 * virtual color corresponds to this page current mapping or its last mapping. 9037 * It is used by memory allocators to choose addresses with the correct 9038 * alignment so vac consistency is automatically maintained. If the page 9039 * has no color it returns -1. 9040 */ 9041 /*ARGSUSED*/ 9042 int 9043 sfmmu_get_ppvcolor(struct page *pp) 9044 { 9045 #ifdef VAC 9046 int color; 9047 9048 if (!(cache & CACHE_VAC) || PP_NEWPAGE(pp)) { 9049 return (-1); 9050 } 9051 color = PP_GET_VCOLOR(pp); 9052 ASSERT(color < mmu_btop(shm_alignment)); 9053 return (color); 9054 #else 9055 return (-1); 9056 #endif /* VAC */ 9057 } 9058 9059 /* 9060 * This function will return the desired alignment for vac consistency 9061 * (vac color) given a virtual address. If no vac is present it returns -1. 9062 */ 9063 /*ARGSUSED*/ 9064 int 9065 sfmmu_get_addrvcolor(caddr_t vaddr) 9066 { 9067 #ifdef VAC 9068 if (cache & CACHE_VAC) { 9069 return (addr_to_vcolor(vaddr)); 9070 } else { 9071 return (-1); 9072 } 9073 #else 9074 return (-1); 9075 #endif /* VAC */ 9076 } 9077 9078 #ifdef VAC 9079 /* 9080 * Check for conflicts. 9081 * A conflict exists if the new and existent mappings do not match in 9082 * their "shm_alignment fields. If conflicts exist, the existant mappings 9083 * are flushed unless one of them is locked. If one of them is locked, then 9084 * the mappings are flushed and converted to non-cacheable mappings. 9085 */ 9086 static void 9087 sfmmu_vac_conflict(struct hat *hat, caddr_t addr, page_t *pp) 9088 { 9089 struct hat *tmphat; 9090 struct sf_hment *sfhmep, *tmphme = NULL; 9091 struct hme_blk *hmeblkp; 9092 int vcolor; 9093 tte_t tte; 9094 9095 ASSERT(sfmmu_mlist_held(pp)); 9096 ASSERT(!PP_ISNC(pp)); /* page better be cacheable */ 9097 9098 vcolor = addr_to_vcolor(addr); 9099 if (PP_NEWPAGE(pp)) { 9100 PP_SET_VCOLOR(pp, vcolor); 9101 return; 9102 } 9103 9104 if (PP_GET_VCOLOR(pp) == vcolor) { 9105 return; 9106 } 9107 9108 if (!PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp)) { 9109 /* 9110 * Previous user of page had a different color 9111 * but since there are no current users 9112 * we just flush the cache and change the color. 9113 */ 9114 SFMMU_STAT(sf_pgcolor_conflict); 9115 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 9116 PP_SET_VCOLOR(pp, vcolor); 9117 return; 9118 } 9119 9120 /* 9121 * If we get here we have a vac conflict with a current 9122 * mapping. VAC conflict policy is as follows. 9123 * - The default is to unload the other mappings unless: 9124 * - If we have a large mapping we uncache the page. 9125 * We need to uncache the rest of the large page too. 9126 * - If any of the mappings are locked we uncache the page. 9127 * - If the requested mapping is inconsistent 9128 * with another mapping and that mapping 9129 * is in the same address space we have to 9130 * make it non-cached. The default thing 9131 * to do is unload the inconsistent mapping 9132 * but if they are in the same address space 9133 * we run the risk of unmapping the pc or the 9134 * stack which we will use as we return to the user, 9135 * in which case we can then fault on the thing 9136 * we just unloaded and get into an infinite loop. 9137 */ 9138 if (PP_ISMAPPED_LARGE(pp)) { 9139 int sz; 9140 9141 /* 9142 * Existing mapping is for big pages. We don't unload 9143 * existing big mappings to satisfy new mappings. 9144 * Always convert all mappings to TNC. 9145 */ 9146 sz = fnd_mapping_sz(pp); 9147 pp = PP_GROUPLEADER(pp, sz); 9148 SFMMU_STAT_ADD(sf_uncache_conflict, TTEPAGES(sz)); 9149 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 9150 TTEPAGES(sz)); 9151 9152 return; 9153 } 9154 9155 /* 9156 * check if any mapping is in same as or if it is locked 9157 * since in that case we need to uncache. 9158 */ 9159 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 9160 tmphme = sfhmep->hme_next; 9161 if (IS_PAHME(sfhmep)) 9162 continue; 9163 hmeblkp = sfmmu_hmetohblk(sfhmep); 9164 tmphat = hblktosfmmu(hmeblkp); 9165 sfmmu_copytte(&sfhmep->hme_tte, &tte); 9166 ASSERT(TTE_IS_VALID(&tte)); 9167 if (hmeblkp->hblk_shared || tmphat == hat || 9168 hmeblkp->hblk_lckcnt) { 9169 /* 9170 * We have an uncache conflict 9171 */ 9172 SFMMU_STAT(sf_uncache_conflict); 9173 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1); 9174 return; 9175 } 9176 } 9177 9178 /* 9179 * We have an unload conflict 9180 * We have already checked for LARGE mappings, therefore 9181 * the remaining mapping(s) must be TTE8K. 9182 */ 9183 SFMMU_STAT(sf_unload_conflict); 9184 9185 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { 9186 tmphme = sfhmep->hme_next; 9187 if (IS_PAHME(sfhmep)) 9188 continue; 9189 hmeblkp = sfmmu_hmetohblk(sfhmep); 9190 ASSERT(!hmeblkp->hblk_shared); 9191 (void) sfmmu_pageunload(pp, sfhmep, TTE8K); 9192 } 9193 9194 if (PP_ISMAPPED_KPM(pp)) 9195 sfmmu_kpm_vac_unload(pp, addr); 9196 9197 /* 9198 * Unloads only do TLB flushes so we need to flush the 9199 * cache here. 9200 */ 9201 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp)); 9202 PP_SET_VCOLOR(pp, vcolor); 9203 } 9204 9205 /* 9206 * Whenever a mapping is unloaded and the page is in TNC state, 9207 * we see if the page can be made cacheable again. 'pp' is 9208 * the page that we just unloaded a mapping from, the size 9209 * of mapping that was unloaded is 'ottesz'. 9210 * Remark: 9211 * The recache policy for mpss pages can leave a performance problem 9212 * under the following circumstances: 9213 * . A large page in uncached mode has just been unmapped. 9214 * . All constituent pages are TNC due to a conflicting small mapping. 9215 * . There are many other, non conflicting, small mappings around for 9216 * a lot of the constituent pages. 9217 * . We're called w/ the "old" groupleader page and the old ottesz, 9218 * but this is irrelevant, since we're no more "PP_ISMAPPED_LARGE", so 9219 * we end up w/ TTE8K or npages == 1. 9220 * . We call tst_tnc w/ the old groupleader only, and if there is no 9221 * conflict, we re-cache only this page. 9222 * . All other small mappings are not checked and will be left in TNC mode. 9223 * The problem is not very serious because: 9224 * . mpss is actually only defined for heap and stack, so the probability 9225 * is not very high that a large page mapping exists in parallel to a small 9226 * one (this is possible, but seems to be bad programming style in the 9227 * appl). 9228 * . The problem gets a little bit more serious, when those TNC pages 9229 * have to be mapped into kernel space, e.g. for networking. 9230 * . When VAC alias conflicts occur in applications, this is regarded 9231 * as an application bug. So if kstat's show them, the appl should 9232 * be changed anyway. 9233 */ 9234 void 9235 conv_tnc(page_t *pp, int ottesz) 9236 { 9237 int cursz, dosz; 9238 pgcnt_t curnpgs, dopgs; 9239 pgcnt_t pg64k; 9240 page_t *pp2; 9241 9242 /* 9243 * Determine how big a range we check for TNC and find 9244 * leader page. cursz is the size of the biggest 9245 * mapping that still exist on 'pp'. 9246 */ 9247 if (PP_ISMAPPED_LARGE(pp)) { 9248 cursz = fnd_mapping_sz(pp); 9249 } else { 9250 cursz = TTE8K; 9251 } 9252 9253 if (ottesz >= cursz) { 9254 dosz = ottesz; 9255 pp2 = pp; 9256 } else { 9257 dosz = cursz; 9258 pp2 = PP_GROUPLEADER(pp, dosz); 9259 } 9260 9261 pg64k = TTEPAGES(TTE64K); 9262 dopgs = TTEPAGES(dosz); 9263 9264 ASSERT(dopgs == 1 || ((dopgs & (pg64k - 1)) == 0)); 9265 9266 while (dopgs != 0) { 9267 curnpgs = TTEPAGES(cursz); 9268 if (tst_tnc(pp2, curnpgs)) { 9269 SFMMU_STAT_ADD(sf_recache, curnpgs); 9270 sfmmu_page_cache_array(pp2, HAT_CACHE, CACHE_NO_FLUSH, 9271 curnpgs); 9272 } 9273 9274 ASSERT(dopgs >= curnpgs); 9275 dopgs -= curnpgs; 9276 9277 if (dopgs == 0) { 9278 break; 9279 } 9280 9281 pp2 = PP_PAGENEXT_N(pp2, curnpgs); 9282 if (((dopgs & (pg64k - 1)) == 0) && PP_ISMAPPED_LARGE(pp2)) { 9283 cursz = fnd_mapping_sz(pp2); 9284 } else { 9285 cursz = TTE8K; 9286 } 9287 } 9288 } 9289 9290 /* 9291 * Returns 1 if page(s) can be converted from TNC to cacheable setting, 9292 * returns 0 otherwise. Note that oaddr argument is valid for only 9293 * 8k pages. 9294 */ 9295 int 9296 tst_tnc(page_t *pp, pgcnt_t npages) 9297 { 9298 struct sf_hment *sfhme; 9299 struct hme_blk *hmeblkp; 9300 tte_t tte; 9301 caddr_t vaddr; 9302 int clr_valid = 0; 9303 int color, color1, bcolor; 9304 int i, ncolors; 9305 9306 ASSERT(pp != NULL); 9307 ASSERT(!(cache & CACHE_WRITEBACK)); 9308 9309 if (npages > 1) { 9310 ncolors = CACHE_NUM_COLOR; 9311 } 9312 9313 for (i = 0; i < npages; i++) { 9314 ASSERT(sfmmu_mlist_held(pp)); 9315 ASSERT(PP_ISTNC(pp)); 9316 ASSERT(PP_GET_VCOLOR(pp) == NO_VCOLOR); 9317 9318 if (PP_ISPNC(pp)) { 9319 return (0); 9320 } 9321 9322 clr_valid = 0; 9323 if (PP_ISMAPPED_KPM(pp)) { 9324 caddr_t kpmvaddr; 9325 9326 ASSERT(kpm_enable); 9327 kpmvaddr = hat_kpm_page2va(pp, 1); 9328 ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr))); 9329 color1 = addr_to_vcolor(kpmvaddr); 9330 clr_valid = 1; 9331 } 9332 9333 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 9334 if (IS_PAHME(sfhme)) 9335 continue; 9336 hmeblkp = sfmmu_hmetohblk(sfhme); 9337 9338 sfmmu_copytte(&sfhme->hme_tte, &tte); 9339 ASSERT(TTE_IS_VALID(&tte)); 9340 9341 vaddr = tte_to_vaddr(hmeblkp, tte); 9342 color = addr_to_vcolor(vaddr); 9343 9344 if (npages > 1) { 9345 /* 9346 * If there is a big mapping, make sure 9347 * 8K mapping is consistent with the big 9348 * mapping. 9349 */ 9350 bcolor = i % ncolors; 9351 if (color != bcolor) { 9352 return (0); 9353 } 9354 } 9355 if (!clr_valid) { 9356 clr_valid = 1; 9357 color1 = color; 9358 } 9359 9360 if (color1 != color) { 9361 return (0); 9362 } 9363 } 9364 9365 pp = PP_PAGENEXT(pp); 9366 } 9367 9368 return (1); 9369 } 9370 9371 void 9372 sfmmu_page_cache_array(page_t *pp, int flags, int cache_flush_flag, 9373 pgcnt_t npages) 9374 { 9375 kmutex_t *pmtx; 9376 int i, ncolors, bcolor; 9377 kpm_hlk_t *kpmp; 9378 cpuset_t cpuset; 9379 9380 ASSERT(pp != NULL); 9381 ASSERT(!(cache & CACHE_WRITEBACK)); 9382 9383 kpmp = sfmmu_kpm_kpmp_enter(pp, npages); 9384 pmtx = sfmmu_page_enter(pp); 9385 9386 /* 9387 * Fast path caching single unmapped page 9388 */ 9389 if (npages == 1 && !PP_ISMAPPED(pp) && !PP_ISMAPPED_KPM(pp) && 9390 flags == HAT_CACHE) { 9391 PP_CLRTNC(pp); 9392 PP_CLRPNC(pp); 9393 sfmmu_page_exit(pmtx); 9394 sfmmu_kpm_kpmp_exit(kpmp); 9395 return; 9396 } 9397 9398 /* 9399 * We need to capture all cpus in order to change cacheability 9400 * because we can't allow one cpu to access the same physical 9401 * page using a cacheable and a non-cachebale mapping at the same 9402 * time. Since we may end up walking the ism mapping list 9403 * have to grab it's lock now since we can't after all the 9404 * cpus have been captured. 9405 */ 9406 sfmmu_hat_lock_all(); 9407 mutex_enter(&ism_mlist_lock); 9408 kpreempt_disable(); 9409 cpuset = cpu_ready_set; 9410 xc_attention(cpuset); 9411 9412 if (npages > 1) { 9413 /* 9414 * Make sure all colors are flushed since the 9415 * sfmmu_page_cache() only flushes one color- 9416 * it does not know big pages. 9417 */ 9418 ncolors = CACHE_NUM_COLOR; 9419 if (flags & HAT_TMPNC) { 9420 for (i = 0; i < ncolors; i++) { 9421 sfmmu_cache_flushcolor(i, pp->p_pagenum); 9422 } 9423 cache_flush_flag = CACHE_NO_FLUSH; 9424 } 9425 } 9426 9427 for (i = 0; i < npages; i++) { 9428 9429 ASSERT(sfmmu_mlist_held(pp)); 9430 9431 if (!(flags == HAT_TMPNC && PP_ISTNC(pp))) { 9432 9433 if (npages > 1) { 9434 bcolor = i % ncolors; 9435 } else { 9436 bcolor = NO_VCOLOR; 9437 } 9438 9439 sfmmu_page_cache(pp, flags, cache_flush_flag, 9440 bcolor); 9441 } 9442 9443 pp = PP_PAGENEXT(pp); 9444 } 9445 9446 xt_sync(cpuset); 9447 xc_dismissed(cpuset); 9448 mutex_exit(&ism_mlist_lock); 9449 sfmmu_hat_unlock_all(); 9450 sfmmu_page_exit(pmtx); 9451 sfmmu_kpm_kpmp_exit(kpmp); 9452 kpreempt_enable(); 9453 } 9454 9455 /* 9456 * This function changes the virtual cacheability of all mappings to a 9457 * particular page. When changing from uncache to cacheable the mappings will 9458 * only be changed if all of them have the same virtual color. 9459 * We need to flush the cache in all cpus. It is possible that 9460 * a process referenced a page as cacheable but has sinced exited 9461 * and cleared the mapping list. We still to flush it but have no 9462 * state so all cpus is the only alternative. 9463 */ 9464 static void 9465 sfmmu_page_cache(page_t *pp, int flags, int cache_flush_flag, int bcolor) 9466 { 9467 struct sf_hment *sfhme; 9468 struct hme_blk *hmeblkp; 9469 sfmmu_t *sfmmup; 9470 tte_t tte, ttemod; 9471 caddr_t vaddr; 9472 int ret, color; 9473 pfn_t pfn; 9474 9475 color = bcolor; 9476 pfn = pp->p_pagenum; 9477 9478 for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { 9479 9480 if (IS_PAHME(sfhme)) 9481 continue; 9482 hmeblkp = sfmmu_hmetohblk(sfhme); 9483 9484 sfmmu_copytte(&sfhme->hme_tte, &tte); 9485 ASSERT(TTE_IS_VALID(&tte)); 9486 vaddr = tte_to_vaddr(hmeblkp, tte); 9487 color = addr_to_vcolor(vaddr); 9488 9489 #ifdef DEBUG 9490 if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) { 9491 ASSERT(color == bcolor); 9492 } 9493 #endif 9494 9495 ASSERT(flags != HAT_TMPNC || color == PP_GET_VCOLOR(pp)); 9496 9497 ttemod = tte; 9498 if (flags & (HAT_UNCACHE | HAT_TMPNC)) { 9499 TTE_CLR_VCACHEABLE(&ttemod); 9500 } else { /* flags & HAT_CACHE */ 9501 TTE_SET_VCACHEABLE(&ttemod); 9502 } 9503 ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte); 9504 if (ret < 0) { 9505 /* 9506 * Since all cpus are captured modifytte should not 9507 * fail. 9508 */ 9509 panic("sfmmu_page_cache: write to tte failed"); 9510 } 9511 9512 sfmmup = hblktosfmmu(hmeblkp); 9513 if (cache_flush_flag == CACHE_FLUSH) { 9514 /* 9515 * Flush TSBs, TLBs and caches 9516 */ 9517 if (hmeblkp->hblk_shared) { 9518 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 9519 uint_t rid = hmeblkp->hblk_tag.htag_rid; 9520 sf_region_t *rgnp; 9521 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 9522 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 9523 ASSERT(srdp != NULL); 9524 rgnp = srdp->srd_hmergnp[rid]; 9525 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 9526 srdp, rgnp, rid); 9527 (void) sfmmu_rgntlb_demap(vaddr, rgnp, 9528 hmeblkp, 0); 9529 sfmmu_cache_flush(pfn, addr_to_vcolor(vaddr)); 9530 } else if (sfmmup->sfmmu_ismhat) { 9531 if (flags & HAT_CACHE) { 9532 SFMMU_STAT(sf_ism_recache); 9533 } else { 9534 SFMMU_STAT(sf_ism_uncache); 9535 } 9536 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 9537 pfn, CACHE_FLUSH); 9538 } else { 9539 sfmmu_tlbcache_demap(vaddr, sfmmup, hmeblkp, 9540 pfn, 0, FLUSH_ALL_CPUS, CACHE_FLUSH, 1); 9541 } 9542 9543 /* 9544 * all cache entries belonging to this pfn are 9545 * now flushed. 9546 */ 9547 cache_flush_flag = CACHE_NO_FLUSH; 9548 } else { 9549 /* 9550 * Flush only TSBs and TLBs. 9551 */ 9552 if (hmeblkp->hblk_shared) { 9553 sf_srd_t *srdp = (sf_srd_t *)sfmmup; 9554 uint_t rid = hmeblkp->hblk_tag.htag_rid; 9555 sf_region_t *rgnp; 9556 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 9557 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 9558 ASSERT(srdp != NULL); 9559 rgnp = srdp->srd_hmergnp[rid]; 9560 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, 9561 srdp, rgnp, rid); 9562 (void) sfmmu_rgntlb_demap(vaddr, rgnp, 9563 hmeblkp, 0); 9564 } else if (sfmmup->sfmmu_ismhat) { 9565 if (flags & HAT_CACHE) { 9566 SFMMU_STAT(sf_ism_recache); 9567 } else { 9568 SFMMU_STAT(sf_ism_uncache); 9569 } 9570 sfmmu_ismtlbcache_demap(vaddr, sfmmup, hmeblkp, 9571 pfn, CACHE_NO_FLUSH); 9572 } else { 9573 sfmmu_tlb_demap(vaddr, sfmmup, hmeblkp, 0, 1); 9574 } 9575 } 9576 } 9577 9578 if (PP_ISMAPPED_KPM(pp)) 9579 sfmmu_kpm_page_cache(pp, flags, cache_flush_flag); 9580 9581 switch (flags) { 9582 9583 default: 9584 panic("sfmmu_pagecache: unknown flags"); 9585 break; 9586 9587 case HAT_CACHE: 9588 PP_CLRTNC(pp); 9589 PP_CLRPNC(pp); 9590 PP_SET_VCOLOR(pp, color); 9591 break; 9592 9593 case HAT_TMPNC: 9594 PP_SETTNC(pp); 9595 PP_SET_VCOLOR(pp, NO_VCOLOR); 9596 break; 9597 9598 case HAT_UNCACHE: 9599 PP_SETPNC(pp); 9600 PP_CLRTNC(pp); 9601 PP_SET_VCOLOR(pp, NO_VCOLOR); 9602 break; 9603 } 9604 } 9605 #endif /* VAC */ 9606 9607 9608 /* 9609 * Wrapper routine used to return a context. 9610 * 9611 * It's the responsibility of the caller to guarantee that the 9612 * process serializes on calls here by taking the HAT lock for 9613 * the hat. 9614 * 9615 */ 9616 static void 9617 sfmmu_get_ctx(sfmmu_t *sfmmup) 9618 { 9619 mmu_ctx_t *mmu_ctxp; 9620 uint_t pstate_save; 9621 int ret; 9622 9623 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9624 ASSERT(sfmmup != ksfmmup); 9625 9626 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)) { 9627 sfmmu_setup_tsbinfo(sfmmup); 9628 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ALLCTX_INVALID); 9629 } 9630 9631 kpreempt_disable(); 9632 9633 mmu_ctxp = CPU_MMU_CTXP(CPU); 9634 ASSERT(mmu_ctxp); 9635 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 9636 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 9637 9638 /* 9639 * Do a wrap-around if cnum reaches the max # cnum supported by a MMU. 9640 */ 9641 if (mmu_ctxp->mmu_cnum == mmu_ctxp->mmu_nctxs) 9642 sfmmu_ctx_wrap_around(mmu_ctxp, B_TRUE); 9643 9644 /* 9645 * Let the MMU set up the page sizes to use for 9646 * this context in the TLB. Don't program 2nd dtlb for ism hat. 9647 */ 9648 if ((&mmu_set_ctx_page_sizes) && (sfmmup->sfmmu_ismhat == 0)) { 9649 mmu_set_ctx_page_sizes(sfmmup); 9650 } 9651 9652 /* 9653 * sfmmu_alloc_ctx and sfmmu_load_mmustate will be performed with 9654 * interrupts disabled to prevent race condition with wrap-around 9655 * ctx invalidatation. In sun4v, ctx invalidation also involves 9656 * a HV call to set the number of TSBs to 0. If interrupts are not 9657 * disabled until after sfmmu_load_mmustate is complete TSBs may 9658 * become assigned to INVALID_CONTEXT. This is not allowed. 9659 */ 9660 pstate_save = sfmmu_disable_intrs(); 9661 9662 if (sfmmu_alloc_ctx(sfmmup, 1, CPU, SFMMU_PRIVATE) && 9663 sfmmup->sfmmu_scdp != NULL) { 9664 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 9665 sfmmu_t *scsfmmup = scdp->scd_sfmmup; 9666 ret = sfmmu_alloc_ctx(scsfmmup, 1, CPU, SFMMU_SHARED); 9667 /* debug purpose only */ 9668 ASSERT(!ret || scsfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum 9669 != INVALID_CONTEXT); 9670 } 9671 sfmmu_load_mmustate(sfmmup); 9672 9673 sfmmu_enable_intrs(pstate_save); 9674 9675 kpreempt_enable(); 9676 } 9677 9678 /* 9679 * When all cnums are used up in a MMU, cnum will wrap around to the 9680 * next generation and start from 2. 9681 */ 9682 static void 9683 sfmmu_ctx_wrap_around(mmu_ctx_t *mmu_ctxp, boolean_t reset_cnum) 9684 { 9685 9686 /* caller must have disabled the preemption */ 9687 ASSERT(curthread->t_preempt >= 1); 9688 ASSERT(mmu_ctxp != NULL); 9689 9690 /* acquire Per-MMU (PM) spin lock */ 9691 mutex_enter(&mmu_ctxp->mmu_lock); 9692 9693 /* re-check to see if wrap-around is needed */ 9694 if (mmu_ctxp->mmu_cnum < mmu_ctxp->mmu_nctxs) 9695 goto done; 9696 9697 SFMMU_MMU_STAT(mmu_wrap_around); 9698 9699 /* update gnum */ 9700 ASSERT(mmu_ctxp->mmu_gnum != 0); 9701 mmu_ctxp->mmu_gnum++; 9702 if (mmu_ctxp->mmu_gnum == 0 || 9703 mmu_ctxp->mmu_gnum > MAX_SFMMU_GNUM_VAL) { 9704 cmn_err(CE_PANIC, "mmu_gnum of mmu_ctx 0x%p is out of bound.", 9705 (void *)mmu_ctxp); 9706 } 9707 9708 if (mmu_ctxp->mmu_ncpus > 1) { 9709 cpuset_t cpuset; 9710 9711 membar_enter(); /* make sure updated gnum visible */ 9712 9713 SFMMU_XCALL_STATS(NULL); 9714 9715 /* xcall to others on the same MMU to invalidate ctx */ 9716 cpuset = mmu_ctxp->mmu_cpuset; 9717 ASSERT(CPU_IN_SET(cpuset, CPU->cpu_id) || !reset_cnum); 9718 CPUSET_DEL(cpuset, CPU->cpu_id); 9719 CPUSET_AND(cpuset, cpu_ready_set); 9720 9721 /* 9722 * Pass in INVALID_CONTEXT as the first parameter to 9723 * sfmmu_raise_tsb_exception, which invalidates the context 9724 * of any process running on the CPUs in the MMU. 9725 */ 9726 xt_some(cpuset, sfmmu_raise_tsb_exception, 9727 INVALID_CONTEXT, INVALID_CONTEXT); 9728 xt_sync(cpuset); 9729 9730 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 9731 } 9732 9733 if (sfmmu_getctx_sec() != INVALID_CONTEXT) { 9734 sfmmu_setctx_sec(INVALID_CONTEXT); 9735 sfmmu_clear_utsbinfo(); 9736 } 9737 9738 /* 9739 * No xcall is needed here. For sun4u systems all CPUs in context 9740 * domain share a single physical MMU therefore it's enough to flush 9741 * TLB on local CPU. On sun4v systems we use 1 global context 9742 * domain and flush all remote TLBs in sfmmu_raise_tsb_exception 9743 * handler. Note that vtag_flushall_uctxs() is called 9744 * for Ultra II machine, where the equivalent flushall functionality 9745 * is implemented in SW, and only user ctx TLB entries are flushed. 9746 */ 9747 if (&vtag_flushall_uctxs != NULL) { 9748 vtag_flushall_uctxs(); 9749 } else { 9750 vtag_flushall(); 9751 } 9752 9753 /* reset mmu cnum, skips cnum 0 and 1 */ 9754 if (reset_cnum == B_TRUE) 9755 mmu_ctxp->mmu_cnum = NUM_LOCKED_CTXS; 9756 9757 done: 9758 mutex_exit(&mmu_ctxp->mmu_lock); 9759 } 9760 9761 9762 /* 9763 * For multi-threaded process, set the process context to INVALID_CONTEXT 9764 * so that it faults and reloads the MMU state from TL=0. For single-threaded 9765 * process, we can just load the MMU state directly without having to 9766 * set context invalid. Caller must hold the hat lock since we don't 9767 * acquire it here. 9768 */ 9769 static void 9770 sfmmu_sync_mmustate(sfmmu_t *sfmmup) 9771 { 9772 uint_t cnum; 9773 uint_t pstate_save; 9774 9775 ASSERT(sfmmup != ksfmmup); 9776 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9777 9778 kpreempt_disable(); 9779 9780 /* 9781 * We check whether the pass'ed-in sfmmup is the same as the 9782 * current running proc. This is to makes sure the current proc 9783 * stays single-threaded if it already is. 9784 */ 9785 if ((sfmmup == curthread->t_procp->p_as->a_hat) && 9786 (curthread->t_procp->p_lwpcnt == 1)) { 9787 /* single-thread */ 9788 cnum = sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum; 9789 if (cnum != INVALID_CONTEXT) { 9790 uint_t curcnum; 9791 /* 9792 * Disable interrupts to prevent race condition 9793 * with sfmmu_ctx_wrap_around ctx invalidation. 9794 * In sun4v, ctx invalidation involves setting 9795 * TSB to NULL, hence, interrupts should be disabled 9796 * untill after sfmmu_load_mmustate is completed. 9797 */ 9798 pstate_save = sfmmu_disable_intrs(); 9799 curcnum = sfmmu_getctx_sec(); 9800 if (curcnum == cnum) 9801 sfmmu_load_mmustate(sfmmup); 9802 sfmmu_enable_intrs(pstate_save); 9803 ASSERT(curcnum == cnum || curcnum == INVALID_CONTEXT); 9804 } 9805 } else { 9806 /* 9807 * multi-thread 9808 * or when sfmmup is not the same as the curproc. 9809 */ 9810 sfmmu_invalidate_ctx(sfmmup); 9811 } 9812 9813 kpreempt_enable(); 9814 } 9815 9816 9817 /* 9818 * Replace the specified TSB with a new TSB. This function gets called when 9819 * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the 9820 * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB 9821 * (8K). 9822 * 9823 * Caller must hold the HAT lock, but should assume any tsb_info 9824 * pointers it has are no longer valid after calling this function. 9825 * 9826 * Return values: 9827 * TSB_ALLOCFAIL Failed to allocate a TSB, due to memory constraints 9828 * TSB_LOSTRACE HAT is busy, i.e. another thread is already doing 9829 * something to this tsbinfo/TSB 9830 * TSB_SUCCESS Operation succeeded 9831 */ 9832 static tsb_replace_rc_t 9833 sfmmu_replace_tsb(sfmmu_t *sfmmup, struct tsb_info *old_tsbinfo, uint_t szc, 9834 hatlock_t *hatlockp, uint_t flags) 9835 { 9836 struct tsb_info *new_tsbinfo = NULL; 9837 struct tsb_info *curtsb, *prevtsb; 9838 uint_t tte_sz_mask; 9839 int i; 9840 9841 ASSERT(sfmmup != ksfmmup); 9842 ASSERT(sfmmup->sfmmu_ismhat == 0); 9843 ASSERT(sfmmu_hat_lock_held(sfmmup)); 9844 ASSERT(szc <= tsb_max_growsize); 9845 9846 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_BUSY)) 9847 return (TSB_LOSTRACE); 9848 9849 /* 9850 * Find the tsb_info ahead of this one in the list, and 9851 * also make sure that the tsb_info passed in really 9852 * exists! 9853 */ 9854 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 9855 curtsb != old_tsbinfo && curtsb != NULL; 9856 prevtsb = curtsb, curtsb = curtsb->tsb_next) 9857 ; 9858 ASSERT(curtsb != NULL); 9859 9860 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 9861 /* 9862 * The process is swapped out, so just set the new size 9863 * code. When it swaps back in, we'll allocate a new one 9864 * of the new chosen size. 9865 */ 9866 curtsb->tsb_szc = szc; 9867 return (TSB_SUCCESS); 9868 } 9869 SFMMU_FLAGS_SET(sfmmup, HAT_BUSY); 9870 9871 tte_sz_mask = old_tsbinfo->tsb_ttesz_mask; 9872 9873 /* 9874 * All initialization is done inside of sfmmu_tsbinfo_alloc(). 9875 * If we fail to allocate a TSB, exit. 9876 * 9877 * If tsb grows with new tsb size > 4M and old tsb size < 4M, 9878 * then try 4M slab after the initial alloc fails. 9879 * 9880 * If tsb swapin with tsb size > 4M, then try 4M after the 9881 * initial alloc fails. 9882 */ 9883 sfmmu_hat_exit(hatlockp); 9884 if (sfmmu_tsbinfo_alloc(&new_tsbinfo, szc, 9885 tte_sz_mask, flags, sfmmup) && 9886 (!(flags & (TSB_GROW | TSB_SWAPIN)) || (szc <= TSB_4M_SZCODE) || 9887 (!(flags & TSB_SWAPIN) && 9888 (old_tsbinfo->tsb_szc >= TSB_4M_SZCODE)) || 9889 sfmmu_tsbinfo_alloc(&new_tsbinfo, TSB_4M_SZCODE, 9890 tte_sz_mask, flags, sfmmup))) { 9891 (void) sfmmu_hat_enter(sfmmup); 9892 if (!(flags & TSB_SWAPIN)) 9893 SFMMU_STAT(sf_tsb_resize_failures); 9894 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9895 return (TSB_ALLOCFAIL); 9896 } 9897 (void) sfmmu_hat_enter(sfmmup); 9898 9899 /* 9900 * Re-check to make sure somebody else didn't muck with us while we 9901 * didn't hold the HAT lock. If the process swapped out, fine, just 9902 * exit; this can happen if we try to shrink the TSB from the context 9903 * of another process (such as on an ISM unmap), though it is rare. 9904 */ 9905 if (!(flags & TSB_SWAPIN) && SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 9906 SFMMU_STAT(sf_tsb_resize_failures); 9907 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9908 sfmmu_hat_exit(hatlockp); 9909 sfmmu_tsbinfo_free(new_tsbinfo); 9910 (void) sfmmu_hat_enter(sfmmup); 9911 return (TSB_LOSTRACE); 9912 } 9913 9914 #ifdef DEBUG 9915 /* Reverify that the tsb_info still exists.. for debugging only */ 9916 for (prevtsb = NULL, curtsb = sfmmup->sfmmu_tsb; 9917 curtsb != old_tsbinfo && curtsb != NULL; 9918 prevtsb = curtsb, curtsb = curtsb->tsb_next) 9919 ; 9920 ASSERT(curtsb != NULL); 9921 #endif /* DEBUG */ 9922 9923 /* 9924 * Quiesce any CPUs running this process on their next TLB miss 9925 * so they atomically see the new tsb_info. We temporarily set the 9926 * context to invalid context so new threads that come on processor 9927 * after we do the xcall to cpusran will also serialize behind the 9928 * HAT lock on TLB miss and will see the new TSB. Since this short 9929 * race with a new thread coming on processor is relatively rare, 9930 * this synchronization mechanism should be cheaper than always 9931 * pausing all CPUs for the duration of the setup, which is what 9932 * the old implementation did. This is particuarly true if we are 9933 * copying a huge chunk of memory around during that window. 9934 * 9935 * The memory barriers are to make sure things stay consistent 9936 * with resume() since it does not hold the HAT lock while 9937 * walking the list of tsb_info structures. 9938 */ 9939 if ((flags & TSB_SWAPIN) != TSB_SWAPIN) { 9940 /* The TSB is either growing or shrinking. */ 9941 sfmmu_invalidate_ctx(sfmmup); 9942 } else { 9943 /* 9944 * It is illegal to swap in TSBs from a process other 9945 * than a process being swapped in. This in turn 9946 * implies we do not have a valid MMU context here 9947 * since a process needs one to resolve translation 9948 * misses. 9949 */ 9950 ASSERT(curthread->t_procp->p_as->a_hat == sfmmup); 9951 } 9952 9953 #ifdef DEBUG 9954 ASSERT(max_mmu_ctxdoms > 0); 9955 9956 /* 9957 * Process should have INVALID_CONTEXT on all MMUs 9958 */ 9959 for (i = 0; i < max_mmu_ctxdoms; i++) { 9960 9961 ASSERT(sfmmup->sfmmu_ctxs[i].cnum == INVALID_CONTEXT); 9962 } 9963 #endif 9964 9965 new_tsbinfo->tsb_next = old_tsbinfo->tsb_next; 9966 membar_stst(); /* strict ordering required */ 9967 if (prevtsb) 9968 prevtsb->tsb_next = new_tsbinfo; 9969 else 9970 sfmmup->sfmmu_tsb = new_tsbinfo; 9971 membar_enter(); /* make sure new TSB globally visible */ 9972 9973 /* 9974 * We need to migrate TSB entries from the old TSB to the new TSB 9975 * if tsb_remap_ttes is set and the TSB is growing. 9976 */ 9977 if (tsb_remap_ttes && ((flags & TSB_GROW) == TSB_GROW)) 9978 sfmmu_copy_tsb(old_tsbinfo, new_tsbinfo); 9979 9980 SFMMU_FLAGS_CLEAR(sfmmup, HAT_BUSY); 9981 9982 /* 9983 * Drop the HAT lock to free our old tsb_info. 9984 */ 9985 sfmmu_hat_exit(hatlockp); 9986 9987 if ((flags & TSB_GROW) == TSB_GROW) { 9988 SFMMU_STAT(sf_tsb_grow); 9989 } else if ((flags & TSB_SHRINK) == TSB_SHRINK) { 9990 SFMMU_STAT(sf_tsb_shrink); 9991 } 9992 9993 sfmmu_tsbinfo_free(old_tsbinfo); 9994 9995 (void) sfmmu_hat_enter(sfmmup); 9996 return (TSB_SUCCESS); 9997 } 9998 9999 /* 10000 * This function will re-program hat pgsz array, and invalidate the 10001 * process' context, forcing the process to switch to another 10002 * context on the next TLB miss, and therefore start using the 10003 * TLB that is reprogrammed for the new page sizes. 10004 */ 10005 void 10006 sfmmu_reprog_pgsz_arr(sfmmu_t *sfmmup, uint8_t *tmp_pgsz) 10007 { 10008 int i; 10009 hatlock_t *hatlockp = NULL; 10010 10011 hatlockp = sfmmu_hat_enter(sfmmup); 10012 /* USIII+-IV+ optimization, requires hat lock */ 10013 if (tmp_pgsz) { 10014 for (i = 0; i < mmu_page_sizes; i++) 10015 sfmmup->sfmmu_pgsz[i] = tmp_pgsz[i]; 10016 } 10017 SFMMU_STAT(sf_tlb_reprog_pgsz); 10018 10019 sfmmu_invalidate_ctx(sfmmup); 10020 10021 sfmmu_hat_exit(hatlockp); 10022 } 10023 10024 /* 10025 * The scd_rttecnt field in the SCD must be updated to take account of the 10026 * regions which it contains. 10027 */ 10028 static void 10029 sfmmu_set_scd_rttecnt(sf_srd_t *srdp, sf_scd_t *scdp) 10030 { 10031 uint_t rid; 10032 uint_t i, j; 10033 ulong_t w; 10034 sf_region_t *rgnp; 10035 10036 ASSERT(srdp != NULL); 10037 10038 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 10039 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 10040 continue; 10041 } 10042 10043 j = 0; 10044 while (w) { 10045 if (!(w & 0x1)) { 10046 j++; 10047 w >>= 1; 10048 continue; 10049 } 10050 rid = (i << BT_ULSHIFT) | j; 10051 j++; 10052 w >>= 1; 10053 10054 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 10055 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 10056 rgnp = srdp->srd_hmergnp[rid]; 10057 ASSERT(rgnp->rgn_refcnt > 0); 10058 ASSERT(rgnp->rgn_id == rid); 10059 10060 scdp->scd_rttecnt[rgnp->rgn_pgszc] += 10061 rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc); 10062 10063 /* 10064 * Maintain the tsb0 inflation cnt for the regions 10065 * in the SCD. 10066 */ 10067 if (rgnp->rgn_pgszc >= TTE4M) { 10068 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt += 10069 rgnp->rgn_size >> 10070 (TTE_PAGE_SHIFT(TTE8K) + 2); 10071 } 10072 } 10073 } 10074 } 10075 10076 /* 10077 * This function assumes that there are either four or six supported page 10078 * sizes and at most two programmable TLBs, so we need to decide which 10079 * page sizes are most important and then tell the MMU layer so it 10080 * can adjust the TLB page sizes accordingly (if supported). 10081 * 10082 * If these assumptions change, this function will need to be 10083 * updated to support whatever the new limits are. 10084 * 10085 * The growing flag is nonzero if we are growing the address space, 10086 * and zero if it is shrinking. This allows us to decide whether 10087 * to grow or shrink our TSB, depending upon available memory 10088 * conditions. 10089 */ 10090 static void 10091 sfmmu_check_page_sizes(sfmmu_t *sfmmup, int growing) 10092 { 10093 uint64_t ttecnt[MMU_PAGE_SIZES]; 10094 uint64_t tte8k_cnt, tte4m_cnt; 10095 uint8_t i; 10096 int sectsb_thresh; 10097 10098 /* 10099 * Kernel threads, processes with small address spaces not using 10100 * large pages, and dummy ISM HATs need not apply. 10101 */ 10102 if (sfmmup == ksfmmup || sfmmup->sfmmu_ismhat != NULL) 10103 return; 10104 10105 if (!SFMMU_LGPGS_INUSE(sfmmup) && 10106 sfmmup->sfmmu_ttecnt[TTE8K] <= tsb_rss_factor) 10107 return; 10108 10109 for (i = 0; i < mmu_page_sizes; i++) { 10110 ttecnt[i] = sfmmup->sfmmu_ttecnt[i] + 10111 sfmmup->sfmmu_ismttecnt[i]; 10112 } 10113 10114 /* Check pagesizes in use, and possibly reprogram DTLB. */ 10115 if (&mmu_check_page_sizes) 10116 mmu_check_page_sizes(sfmmup, ttecnt); 10117 10118 /* 10119 * Calculate the number of 8k ttes to represent the span of these 10120 * pages. 10121 */ 10122 tte8k_cnt = ttecnt[TTE8K] + 10123 (ttecnt[TTE64K] << (MMU_PAGESHIFT64K - MMU_PAGESHIFT)) + 10124 (ttecnt[TTE512K] << (MMU_PAGESHIFT512K - MMU_PAGESHIFT)); 10125 if (mmu_page_sizes == max_mmu_page_sizes) { 10126 tte4m_cnt = ttecnt[TTE4M] + 10127 (ttecnt[TTE32M] << (MMU_PAGESHIFT32M - MMU_PAGESHIFT4M)) + 10128 (ttecnt[TTE256M] << (MMU_PAGESHIFT256M - MMU_PAGESHIFT4M)); 10129 } else { 10130 tte4m_cnt = ttecnt[TTE4M]; 10131 } 10132 10133 /* 10134 * Inflate tte8k_cnt to allow for region large page allocation failure. 10135 */ 10136 tte8k_cnt += sfmmup->sfmmu_tsb0_4minflcnt; 10137 10138 /* 10139 * Inflate TSB sizes by a factor of 2 if this process 10140 * uses 4M text pages to minimize extra conflict misses 10141 * in the first TSB since without counting text pages 10142 * 8K TSB may become too small. 10143 * 10144 * Also double the size of the second TSB to minimize 10145 * extra conflict misses due to competition between 4M text pages 10146 * and data pages. 10147 * 10148 * We need to adjust the second TSB allocation threshold by the 10149 * inflation factor, since there is no point in creating a second 10150 * TSB when we know all the mappings can fit in the I/D TLBs. 10151 */ 10152 sectsb_thresh = tsb_sectsb_threshold; 10153 if (sfmmup->sfmmu_flags & HAT_4MTEXT_FLAG) { 10154 tte8k_cnt <<= 1; 10155 tte4m_cnt <<= 1; 10156 sectsb_thresh <<= 1; 10157 } 10158 10159 /* 10160 * Check to see if our TSB is the right size; we may need to 10161 * grow or shrink it. If the process is small, our work is 10162 * finished at this point. 10163 */ 10164 if (tte8k_cnt <= tsb_rss_factor && tte4m_cnt <= sectsb_thresh) { 10165 return; 10166 } 10167 sfmmu_size_tsb(sfmmup, growing, tte8k_cnt, tte4m_cnt, sectsb_thresh); 10168 } 10169 10170 static void 10171 sfmmu_size_tsb(sfmmu_t *sfmmup, int growing, uint64_t tte8k_cnt, 10172 uint64_t tte4m_cnt, int sectsb_thresh) 10173 { 10174 int tsb_bits; 10175 uint_t tsb_szc; 10176 struct tsb_info *tsbinfop; 10177 hatlock_t *hatlockp = NULL; 10178 10179 hatlockp = sfmmu_hat_enter(sfmmup); 10180 ASSERT(hatlockp != NULL); 10181 tsbinfop = sfmmup->sfmmu_tsb; 10182 ASSERT(tsbinfop != NULL); 10183 10184 /* 10185 * If we're growing, select the size based on RSS. If we're 10186 * shrinking, leave some room so we don't have to turn around and 10187 * grow again immediately. 10188 */ 10189 if (growing) 10190 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 10191 else 10192 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt << 1); 10193 10194 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 10195 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 10196 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 10197 hatlockp, TSB_SHRINK); 10198 } else if (growing && tsb_szc > tsbinfop->tsb_szc && TSB_OK_GROW()) { 10199 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, tsb_szc, 10200 hatlockp, TSB_GROW); 10201 } 10202 tsbinfop = sfmmup->sfmmu_tsb; 10203 10204 /* 10205 * With the TLB and first TSB out of the way, we need to see if 10206 * we need a second TSB for 4M pages. If we managed to reprogram 10207 * the TLB page sizes above, the process will start using this new 10208 * TSB right away; otherwise, it will start using it on the next 10209 * context switch. Either way, it's no big deal so there's no 10210 * synchronization with the trap handlers here unless we grow the 10211 * TSB (in which case it's required to prevent using the old one 10212 * after it's freed). Note: second tsb is required for 32M/256M 10213 * page sizes. 10214 */ 10215 if (tte4m_cnt > sectsb_thresh) { 10216 /* 10217 * If we're growing, select the size based on RSS. If we're 10218 * shrinking, leave some room so we don't have to turn 10219 * around and grow again immediately. 10220 */ 10221 if (growing) 10222 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 10223 else 10224 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt << 1); 10225 if (tsbinfop->tsb_next == NULL) { 10226 struct tsb_info *newtsb; 10227 int allocflags = SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)? 10228 0 : TSB_ALLOC; 10229 10230 sfmmu_hat_exit(hatlockp); 10231 10232 /* 10233 * Try to allocate a TSB for 4[32|256]M pages. If we 10234 * can't get the size we want, retry w/a minimum sized 10235 * TSB. If that still didn't work, give up; we can 10236 * still run without one. 10237 */ 10238 tsb_bits = (mmu_page_sizes == max_mmu_page_sizes)? 10239 TSB4M|TSB32M|TSB256M:TSB4M; 10240 if ((sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, tsb_bits, 10241 allocflags, sfmmup)) && 10242 (tsb_szc <= TSB_4M_SZCODE || 10243 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE, 10244 tsb_bits, allocflags, sfmmup)) && 10245 sfmmu_tsbinfo_alloc(&newtsb, TSB_MIN_SZCODE, 10246 tsb_bits, allocflags, sfmmup)) { 10247 return; 10248 } 10249 10250 hatlockp = sfmmu_hat_enter(sfmmup); 10251 10252 sfmmu_invalidate_ctx(sfmmup); 10253 10254 if (sfmmup->sfmmu_tsb->tsb_next == NULL) { 10255 sfmmup->sfmmu_tsb->tsb_next = newtsb; 10256 SFMMU_STAT(sf_tsb_sectsb_create); 10257 sfmmu_hat_exit(hatlockp); 10258 return; 10259 } else { 10260 /* 10261 * It's annoying, but possible for us 10262 * to get here.. we dropped the HAT lock 10263 * because of locking order in the kmem 10264 * allocator, and while we were off getting 10265 * our memory, some other thread decided to 10266 * do us a favor and won the race to get a 10267 * second TSB for this process. Sigh. 10268 */ 10269 sfmmu_hat_exit(hatlockp); 10270 sfmmu_tsbinfo_free(newtsb); 10271 return; 10272 } 10273 } 10274 10275 /* 10276 * We have a second TSB, see if it's big enough. 10277 */ 10278 tsbinfop = tsbinfop->tsb_next; 10279 10280 /* 10281 * Check to see if our second TSB is the right size; 10282 * we may need to grow or shrink it. 10283 * To prevent thrashing (e.g. growing the TSB on a 10284 * subsequent map operation), only try to shrink if 10285 * the TSB reach exceeds twice the virtual address 10286 * space size. 10287 */ 10288 if (!growing && (tsb_szc < tsbinfop->tsb_szc) && 10289 (tsb_szc >= default_tsb_size) && TSB_OK_SHRINK()) { 10290 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 10291 tsb_szc, hatlockp, TSB_SHRINK); 10292 } else if (growing && tsb_szc > tsbinfop->tsb_szc && 10293 TSB_OK_GROW()) { 10294 (void) sfmmu_replace_tsb(sfmmup, tsbinfop, 10295 tsb_szc, hatlockp, TSB_GROW); 10296 } 10297 } 10298 10299 sfmmu_hat_exit(hatlockp); 10300 } 10301 10302 /* 10303 * Free up a sfmmu 10304 * Since the sfmmu is currently embedded in the hat struct we simply zero 10305 * out our fields and free up the ism map blk list if any. 10306 */ 10307 static void 10308 sfmmu_free_sfmmu(sfmmu_t *sfmmup) 10309 { 10310 ism_blk_t *blkp, *nx_blkp; 10311 #ifdef DEBUG 10312 ism_map_t *map; 10313 int i; 10314 #endif 10315 10316 ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); 10317 ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); 10318 ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); 10319 ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); 10320 ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0); 10321 ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0); 10322 ASSERT(SF_RGNMAP_ISNULL(sfmmup)); 10323 10324 sfmmup->sfmmu_free = 0; 10325 sfmmup->sfmmu_ismhat = 0; 10326 10327 blkp = sfmmup->sfmmu_iblk; 10328 sfmmup->sfmmu_iblk = NULL; 10329 10330 while (blkp) { 10331 #ifdef DEBUG 10332 map = blkp->iblk_maps; 10333 for (i = 0; i < ISM_MAP_SLOTS; i++) { 10334 ASSERT(map[i].imap_seg == 0); 10335 ASSERT(map[i].imap_ismhat == NULL); 10336 ASSERT(map[i].imap_ment == NULL); 10337 } 10338 #endif 10339 nx_blkp = blkp->iblk_next; 10340 blkp->iblk_next = NULL; 10341 blkp->iblk_nextpa = (uint64_t)-1; 10342 kmem_cache_free(ism_blk_cache, blkp); 10343 blkp = nx_blkp; 10344 } 10345 } 10346 10347 /* 10348 * Locking primitves accessed by HATLOCK macros 10349 */ 10350 10351 #define SFMMU_SPL_MTX (0x0) 10352 #define SFMMU_ML_MTX (0x1) 10353 10354 #define SFMMU_MLSPL_MTX(type, pg) (((type) == SFMMU_SPL_MTX) ? \ 10355 SPL_HASH(pg) : MLIST_HASH(pg)) 10356 10357 kmutex_t * 10358 sfmmu_page_enter(struct page *pp) 10359 { 10360 return (sfmmu_mlspl_enter(pp, SFMMU_SPL_MTX)); 10361 } 10362 10363 void 10364 sfmmu_page_exit(kmutex_t *spl) 10365 { 10366 mutex_exit(spl); 10367 } 10368 10369 int 10370 sfmmu_page_spl_held(struct page *pp) 10371 { 10372 return (sfmmu_mlspl_held(pp, SFMMU_SPL_MTX)); 10373 } 10374 10375 kmutex_t * 10376 sfmmu_mlist_enter(struct page *pp) 10377 { 10378 return (sfmmu_mlspl_enter(pp, SFMMU_ML_MTX)); 10379 } 10380 10381 void 10382 sfmmu_mlist_exit(kmutex_t *mml) 10383 { 10384 mutex_exit(mml); 10385 } 10386 10387 int 10388 sfmmu_mlist_held(struct page *pp) 10389 { 10390 10391 return (sfmmu_mlspl_held(pp, SFMMU_ML_MTX)); 10392 } 10393 10394 /* 10395 * Common code for sfmmu_mlist_enter() and sfmmu_page_enter(). For 10396 * sfmmu_mlist_enter() case mml_table lock array is used and for 10397 * sfmmu_page_enter() sfmmu_page_lock lock array is used. 10398 * 10399 * The lock is taken on a root page so that it protects an operation on all 10400 * constituent pages of a large page pp belongs to. 10401 * 10402 * The routine takes a lock from the appropriate array. The lock is determined 10403 * by hashing the root page. After taking the lock this routine checks if the 10404 * root page has the same size code that was used to determine the root (i.e 10405 * that root hasn't changed). If root page has the expected p_szc field we 10406 * have the right lock and it's returned to the caller. If root's p_szc 10407 * decreased we release the lock and retry from the beginning. This case can 10408 * happen due to hat_page_demote() decreasing p_szc between our load of p_szc 10409 * value and taking the lock. The number of retries due to p_szc decrease is 10410 * limited by the maximum p_szc value. If p_szc is 0 we return the lock 10411 * determined by hashing pp itself. 10412 * 10413 * If our caller doesn't hold a SE_SHARED or SE_EXCL lock on pp it's also 10414 * possible that p_szc can increase. To increase p_szc a thread has to lock 10415 * all constituent pages EXCL and do hat_pageunload() on all of them. All the 10416 * callers that don't hold a page locked recheck if hmeblk through which pp 10417 * was found still maps this pp. If it doesn't map it anymore returned lock 10418 * is immediately dropped. Therefore if sfmmu_mlspl_enter() hits the case of 10419 * p_szc increase after taking the lock it returns this lock without further 10420 * retries because in this case the caller doesn't care about which lock was 10421 * taken. The caller will drop it right away. 10422 * 10423 * After the routine returns it's guaranteed that hat_page_demote() can't 10424 * change p_szc field of any of constituent pages of a large page pp belongs 10425 * to as long as pp was either locked at least SHARED prior to this call or 10426 * the caller finds that hment that pointed to this pp still references this 10427 * pp (this also assumes that the caller holds hme hash bucket lock so that 10428 * the same pp can't be remapped into the same hmeblk after it was unmapped by 10429 * hat_pageunload()). 10430 */ 10431 static kmutex_t * 10432 sfmmu_mlspl_enter(struct page *pp, int type) 10433 { 10434 kmutex_t *mtx; 10435 uint_t prev_rszc = UINT_MAX; 10436 page_t *rootpp; 10437 uint_t szc; 10438 uint_t rszc; 10439 uint_t pszc = pp->p_szc; 10440 10441 ASSERT(pp != NULL); 10442 10443 again: 10444 if (pszc == 0) { 10445 mtx = SFMMU_MLSPL_MTX(type, pp); 10446 mutex_enter(mtx); 10447 return (mtx); 10448 } 10449 10450 /* The lock lives in the root page */ 10451 rootpp = PP_GROUPLEADER(pp, pszc); 10452 mtx = SFMMU_MLSPL_MTX(type, rootpp); 10453 mutex_enter(mtx); 10454 10455 /* 10456 * Return mml in the following 3 cases: 10457 * 10458 * 1) If pp itself is root since if its p_szc decreased before we took 10459 * the lock pp is still the root of smaller szc page. And if its p_szc 10460 * increased it doesn't matter what lock we return (see comment in 10461 * front of this routine). 10462 * 10463 * 2) If pp's not root but rootpp is the root of a rootpp->p_szc size 10464 * large page we have the right lock since any previous potential 10465 * hat_page_demote() is done demoting from greater than current root's 10466 * p_szc because hat_page_demote() changes root's p_szc last. No 10467 * further hat_page_demote() can start or be in progress since it 10468 * would need the same lock we currently hold. 10469 * 10470 * 3) If rootpp's p_szc increased since previous iteration it doesn't 10471 * matter what lock we return (see comment in front of this routine). 10472 */ 10473 if (pp == rootpp || (rszc = rootpp->p_szc) == pszc || 10474 rszc >= prev_rszc) { 10475 return (mtx); 10476 } 10477 10478 /* 10479 * hat_page_demote() could have decreased root's p_szc. 10480 * In this case pp's p_szc must also be smaller than pszc. 10481 * Retry. 10482 */ 10483 if (rszc < pszc) { 10484 szc = pp->p_szc; 10485 if (szc < pszc) { 10486 mutex_exit(mtx); 10487 pszc = szc; 10488 goto again; 10489 } 10490 /* 10491 * pp's p_szc increased after it was decreased. 10492 * page cannot be mapped. Return current lock. The caller 10493 * will drop it right away. 10494 */ 10495 return (mtx); 10496 } 10497 10498 /* 10499 * root's p_szc is greater than pp's p_szc. 10500 * hat_page_demote() is not done with all pages 10501 * yet. Wait for it to complete. 10502 */ 10503 mutex_exit(mtx); 10504 rootpp = PP_GROUPLEADER(rootpp, rszc); 10505 mtx = SFMMU_MLSPL_MTX(type, rootpp); 10506 mutex_enter(mtx); 10507 mutex_exit(mtx); 10508 prev_rszc = rszc; 10509 goto again; 10510 } 10511 10512 static int 10513 sfmmu_mlspl_held(struct page *pp, int type) 10514 { 10515 kmutex_t *mtx; 10516 10517 ASSERT(pp != NULL); 10518 /* The lock lives in the root page */ 10519 pp = PP_PAGEROOT(pp); 10520 ASSERT(pp != NULL); 10521 10522 mtx = SFMMU_MLSPL_MTX(type, pp); 10523 return (MUTEX_HELD(mtx)); 10524 } 10525 10526 static uint_t 10527 sfmmu_get_free_hblk(struct hme_blk **hmeblkpp, uint_t critical) 10528 { 10529 struct hme_blk *hblkp; 10530 10531 10532 if (freehblkp != NULL) { 10533 mutex_enter(&freehblkp_lock); 10534 if (freehblkp != NULL) { 10535 /* 10536 * If the current thread is owning hblk_reserve OR 10537 * critical request from sfmmu_hblk_steal() 10538 * let it succeed even if freehblkcnt is really low. 10539 */ 10540 if (freehblkcnt <= HBLK_RESERVE_MIN && !critical) { 10541 SFMMU_STAT(sf_get_free_throttle); 10542 mutex_exit(&freehblkp_lock); 10543 return (0); 10544 } 10545 freehblkcnt--; 10546 *hmeblkpp = freehblkp; 10547 hblkp = *hmeblkpp; 10548 freehblkp = hblkp->hblk_next; 10549 mutex_exit(&freehblkp_lock); 10550 hblkp->hblk_next = NULL; 10551 SFMMU_STAT(sf_get_free_success); 10552 10553 ASSERT(hblkp->hblk_hmecnt == 0); 10554 ASSERT(hblkp->hblk_vcnt == 0); 10555 ASSERT(hblkp->hblk_nextpa == va_to_pa((caddr_t)hblkp)); 10556 10557 return (1); 10558 } 10559 mutex_exit(&freehblkp_lock); 10560 } 10561 10562 /* Check cpu hblk pending queues */ 10563 if ((*hmeblkpp = sfmmu_check_pending_hblks(TTE8K)) != NULL) { 10564 hblkp = *hmeblkpp; 10565 hblkp->hblk_next = NULL; 10566 hblkp->hblk_nextpa = va_to_pa((caddr_t)hblkp); 10567 10568 ASSERT(hblkp->hblk_hmecnt == 0); 10569 ASSERT(hblkp->hblk_vcnt == 0); 10570 10571 return (1); 10572 } 10573 10574 SFMMU_STAT(sf_get_free_fail); 10575 return (0); 10576 } 10577 10578 static uint_t 10579 sfmmu_put_free_hblk(struct hme_blk *hmeblkp, uint_t critical) 10580 { 10581 struct hme_blk *hblkp; 10582 10583 ASSERT(hmeblkp->hblk_hmecnt == 0); 10584 ASSERT(hmeblkp->hblk_vcnt == 0); 10585 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 10586 10587 /* 10588 * If the current thread is mapping into kernel space, 10589 * let it succede even if freehblkcnt is max 10590 * so that it will avoid freeing it to kmem. 10591 * This will prevent stack overflow due to 10592 * possible recursion since kmem_cache_free() 10593 * might require creation of a slab which 10594 * in turn needs an hmeblk to map that slab; 10595 * let's break this vicious chain at the first 10596 * opportunity. 10597 */ 10598 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 10599 mutex_enter(&freehblkp_lock); 10600 if (freehblkcnt < HBLK_RESERVE_CNT || critical) { 10601 SFMMU_STAT(sf_put_free_success); 10602 freehblkcnt++; 10603 hmeblkp->hblk_next = freehblkp; 10604 freehblkp = hmeblkp; 10605 mutex_exit(&freehblkp_lock); 10606 return (1); 10607 } 10608 mutex_exit(&freehblkp_lock); 10609 } 10610 10611 /* 10612 * Bring down freehblkcnt to HBLK_RESERVE_CNT. We are here 10613 * only if freehblkcnt is at least HBLK_RESERVE_CNT *and* 10614 * we are not in the process of mapping into kernel space. 10615 */ 10616 ASSERT(!critical); 10617 while (freehblkcnt > HBLK_RESERVE_CNT) { 10618 mutex_enter(&freehblkp_lock); 10619 if (freehblkcnt > HBLK_RESERVE_CNT) { 10620 freehblkcnt--; 10621 hblkp = freehblkp; 10622 freehblkp = hblkp->hblk_next; 10623 mutex_exit(&freehblkp_lock); 10624 ASSERT(get_hblk_cache(hblkp) == sfmmu8_cache); 10625 kmem_cache_free(sfmmu8_cache, hblkp); 10626 continue; 10627 } 10628 mutex_exit(&freehblkp_lock); 10629 } 10630 SFMMU_STAT(sf_put_free_fail); 10631 return (0); 10632 } 10633 10634 static void 10635 sfmmu_hblk_swap(struct hme_blk *new) 10636 { 10637 struct hme_blk *old, *hblkp, *prev; 10638 uint64_t newpa; 10639 caddr_t base, vaddr, endaddr; 10640 struct hmehash_bucket *hmebp; 10641 struct sf_hment *osfhme, *nsfhme; 10642 page_t *pp; 10643 kmutex_t *pml; 10644 tte_t tte; 10645 struct hme_blk *list = NULL; 10646 10647 #ifdef DEBUG 10648 hmeblk_tag hblktag; 10649 struct hme_blk *found; 10650 #endif 10651 old = HBLK_RESERVE; 10652 ASSERT(!old->hblk_shared); 10653 10654 /* 10655 * save pa before bcopy clobbers it 10656 */ 10657 newpa = new->hblk_nextpa; 10658 10659 base = (caddr_t)get_hblk_base(old); 10660 endaddr = base + get_hblk_span(old); 10661 10662 /* 10663 * acquire hash bucket lock. 10664 */ 10665 hmebp = sfmmu_tteload_acquire_hashbucket(ksfmmup, base, TTE8K, 10666 SFMMU_INVALID_SHMERID); 10667 10668 /* 10669 * copy contents from old to new 10670 */ 10671 bcopy((void *)old, (void *)new, HME8BLK_SZ); 10672 10673 /* 10674 * add new to hash chain 10675 */ 10676 sfmmu_hblk_hash_add(hmebp, new, newpa); 10677 10678 /* 10679 * search hash chain for hblk_reserve; this needs to be performed 10680 * after adding new, otherwise prev won't correspond to the hblk which 10681 * is prior to old in hash chain when we call sfmmu_hblk_hash_rm to 10682 * remove old later. 10683 */ 10684 for (prev = NULL, 10685 hblkp = hmebp->hmeblkp; hblkp != NULL && hblkp != old; 10686 prev = hblkp, hblkp = hblkp->hblk_next) 10687 ; 10688 10689 if (hblkp != old) 10690 panic("sfmmu_hblk_swap: hblk_reserve not found"); 10691 10692 /* 10693 * p_mapping list is still pointing to hments in hblk_reserve; 10694 * fix up p_mapping list so that they point to hments in new. 10695 * 10696 * Since all these mappings are created by hblk_reserve_thread 10697 * on the way and it's using at least one of the buffers from each of 10698 * the newly minted slabs, there is no danger of any of these 10699 * mappings getting unloaded by another thread. 10700 * 10701 * tsbmiss could only modify ref/mod bits of hments in old/new. 10702 * Since all of these hments hold mappings established by segkmem 10703 * and mappings in segkmem are setup with HAT_NOSYNC, ref/mod bits 10704 * have no meaning for the mappings in hblk_reserve. hments in 10705 * old and new are identical except for ref/mod bits. 10706 */ 10707 for (vaddr = base; vaddr < endaddr; vaddr += TTEBYTES(TTE8K)) { 10708 10709 HBLKTOHME(osfhme, old, vaddr); 10710 sfmmu_copytte(&osfhme->hme_tte, &tte); 10711 10712 if (TTE_IS_VALID(&tte)) { 10713 if ((pp = osfhme->hme_page) == NULL) 10714 panic("sfmmu_hblk_swap: page not mapped"); 10715 10716 pml = sfmmu_mlist_enter(pp); 10717 10718 if (pp != osfhme->hme_page) 10719 panic("sfmmu_hblk_swap: mapping changed"); 10720 10721 HBLKTOHME(nsfhme, new, vaddr); 10722 10723 HME_ADD(nsfhme, pp); 10724 HME_SUB(osfhme, pp); 10725 10726 sfmmu_mlist_exit(pml); 10727 } 10728 } 10729 10730 /* 10731 * remove old from hash chain 10732 */ 10733 sfmmu_hblk_hash_rm(hmebp, old, prev, &list, 1); 10734 10735 #ifdef DEBUG 10736 10737 hblktag.htag_id = ksfmmup; 10738 hblktag.htag_rid = SFMMU_INVALID_SHMERID; 10739 hblktag.htag_bspage = HME_HASH_BSPAGE(base, HME_HASH_SHIFT(TTE8K)); 10740 hblktag.htag_rehash = HME_HASH_REHASH(TTE8K); 10741 HME_HASH_FAST_SEARCH(hmebp, hblktag, found); 10742 10743 if (found != new) 10744 panic("sfmmu_hblk_swap: new hblk not found"); 10745 #endif 10746 10747 SFMMU_HASH_UNLOCK(hmebp); 10748 10749 /* 10750 * Reset hblk_reserve 10751 */ 10752 bzero((void *)old, HME8BLK_SZ); 10753 old->hblk_nextpa = va_to_pa((caddr_t)old); 10754 } 10755 10756 /* 10757 * Grab the mlist mutex for both pages passed in. 10758 * 10759 * low and high will be returned as pointers to the mutexes for these pages. 10760 * low refers to the mutex residing in the lower bin of the mlist hash, while 10761 * high refers to the mutex residing in the higher bin of the mlist hash. This 10762 * is due to the locking order restrictions on the same thread grabbing 10763 * multiple mlist mutexes. The low lock must be acquired before the high lock. 10764 * 10765 * If both pages hash to the same mutex, only grab that single mutex, and 10766 * high will be returned as NULL 10767 * If the pages hash to different bins in the hash, grab the lower addressed 10768 * lock first and then the higher addressed lock in order to follow the locking 10769 * rules involved with the same thread grabbing multiple mlist mutexes. 10770 * low and high will both have non-NULL values. 10771 */ 10772 static void 10773 sfmmu_mlist_reloc_enter(struct page *targ, struct page *repl, 10774 kmutex_t **low, kmutex_t **high) 10775 { 10776 kmutex_t *mml_targ, *mml_repl; 10777 10778 /* 10779 * no need to do the dance around szc as in sfmmu_mlist_enter() 10780 * because this routine is only called by hat_page_relocate() and all 10781 * targ and repl pages are already locked EXCL so szc can't change. 10782 */ 10783 10784 mml_targ = MLIST_HASH(PP_PAGEROOT(targ)); 10785 mml_repl = MLIST_HASH(PP_PAGEROOT(repl)); 10786 10787 if (mml_targ == mml_repl) { 10788 *low = mml_targ; 10789 *high = NULL; 10790 } else { 10791 if (mml_targ < mml_repl) { 10792 *low = mml_targ; 10793 *high = mml_repl; 10794 } else { 10795 *low = mml_repl; 10796 *high = mml_targ; 10797 } 10798 } 10799 10800 mutex_enter(*low); 10801 if (*high) 10802 mutex_enter(*high); 10803 } 10804 10805 static void 10806 sfmmu_mlist_reloc_exit(kmutex_t *low, kmutex_t *high) 10807 { 10808 if (high) 10809 mutex_exit(high); 10810 mutex_exit(low); 10811 } 10812 10813 static hatlock_t * 10814 sfmmu_hat_enter(sfmmu_t *sfmmup) 10815 { 10816 hatlock_t *hatlockp; 10817 10818 if (sfmmup != ksfmmup) { 10819 hatlockp = TSB_HASH(sfmmup); 10820 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 10821 return (hatlockp); 10822 } 10823 return (NULL); 10824 } 10825 10826 static hatlock_t * 10827 sfmmu_hat_tryenter(sfmmu_t *sfmmup) 10828 { 10829 hatlock_t *hatlockp; 10830 10831 if (sfmmup != ksfmmup) { 10832 hatlockp = TSB_HASH(sfmmup); 10833 if (mutex_tryenter(HATLOCK_MUTEXP(hatlockp)) == 0) 10834 return (NULL); 10835 return (hatlockp); 10836 } 10837 return (NULL); 10838 } 10839 10840 static void 10841 sfmmu_hat_exit(hatlock_t *hatlockp) 10842 { 10843 if (hatlockp != NULL) 10844 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 10845 } 10846 10847 static void 10848 sfmmu_hat_lock_all(void) 10849 { 10850 int i; 10851 for (i = 0; i < SFMMU_NUM_LOCK; i++) 10852 mutex_enter(HATLOCK_MUTEXP(&hat_lock[i])); 10853 } 10854 10855 static void 10856 sfmmu_hat_unlock_all(void) 10857 { 10858 int i; 10859 for (i = SFMMU_NUM_LOCK - 1; i >= 0; i--) 10860 mutex_exit(HATLOCK_MUTEXP(&hat_lock[i])); 10861 } 10862 10863 int 10864 sfmmu_hat_lock_held(sfmmu_t *sfmmup) 10865 { 10866 ASSERT(sfmmup != ksfmmup); 10867 return (MUTEX_HELD(HATLOCK_MUTEXP(TSB_HASH(sfmmup)))); 10868 } 10869 10870 /* 10871 * Locking primitives to provide consistency between ISM unmap 10872 * and other operations. Since ISM unmap can take a long time, we 10873 * use HAT_ISMBUSY flag (protected by the hatlock) to avoid creating 10874 * contention on the hatlock buckets while ISM segments are being 10875 * unmapped. The tradeoff is that the flags don't prevent priority 10876 * inversion from occurring, so we must request kernel priority in 10877 * case we have to sleep to keep from getting buried while holding 10878 * the HAT_ISMBUSY flag set, which in turn could block other kernel 10879 * threads from running (for example, in sfmmu_uvatopfn()). 10880 */ 10881 static void 10882 sfmmu_ismhat_enter(sfmmu_t *sfmmup, int hatlock_held) 10883 { 10884 hatlock_t *hatlockp; 10885 10886 THREAD_KPRI_REQUEST(); 10887 if (!hatlock_held) 10888 hatlockp = sfmmu_hat_enter(sfmmup); 10889 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) 10890 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 10891 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 10892 if (!hatlock_held) 10893 sfmmu_hat_exit(hatlockp); 10894 } 10895 10896 static void 10897 sfmmu_ismhat_exit(sfmmu_t *sfmmup, int hatlock_held) 10898 { 10899 hatlock_t *hatlockp; 10900 10901 if (!hatlock_held) 10902 hatlockp = sfmmu_hat_enter(sfmmup); 10903 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 10904 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 10905 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 10906 if (!hatlock_held) 10907 sfmmu_hat_exit(hatlockp); 10908 THREAD_KPRI_RELEASE(); 10909 } 10910 10911 /* 10912 * 10913 * Algorithm: 10914 * 10915 * (1) if segkmem is not ready, allocate hblk from an array of pre-alloc'ed 10916 * hblks. 10917 * 10918 * (2) if we are allocating an hblk for mapping a slab in sfmmu_cache, 10919 * 10920 * (a) try to return an hblk from reserve pool of free hblks; 10921 * (b) if the reserve pool is empty, acquire hblk_reserve_lock 10922 * and return hblk_reserve. 10923 * 10924 * (3) call kmem_cache_alloc() to allocate hblk; 10925 * 10926 * (a) if hblk_reserve_lock is held by the current thread, 10927 * atomically replace hblk_reserve by the hblk that is 10928 * returned by kmem_cache_alloc; release hblk_reserve_lock 10929 * and call kmem_cache_alloc() again. 10930 * (b) if reserve pool is not full, add the hblk that is 10931 * returned by kmem_cache_alloc to reserve pool and 10932 * call kmem_cache_alloc again. 10933 * 10934 */ 10935 static struct hme_blk * 10936 sfmmu_hblk_alloc(sfmmu_t *sfmmup, caddr_t vaddr, 10937 struct hmehash_bucket *hmebp, uint_t size, hmeblk_tag hblktag, 10938 uint_t flags, uint_t rid) 10939 { 10940 struct hme_blk *hmeblkp = NULL; 10941 struct hme_blk *newhblkp; 10942 struct hme_blk *shw_hblkp = NULL; 10943 struct kmem_cache *sfmmu_cache = NULL; 10944 uint64_t hblkpa; 10945 ulong_t index; 10946 uint_t owner; /* set to 1 if using hblk_reserve */ 10947 uint_t forcefree; 10948 int sleep; 10949 sf_srd_t *srdp; 10950 sf_region_t *rgnp; 10951 10952 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 10953 ASSERT(hblktag.htag_rid == rid); 10954 SFMMU_VALIDATE_HMERID(sfmmup, rid, vaddr, TTEBYTES(size)); 10955 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || 10956 IS_P2ALIGNED(vaddr, TTEBYTES(size))); 10957 10958 /* 10959 * If segkmem is not created yet, allocate from static hmeblks 10960 * created at the end of startup_modules(). See the block comment 10961 * in startup_modules() describing how we estimate the number of 10962 * static hmeblks that will be needed during re-map. 10963 */ 10964 if (!hblk_alloc_dynamic) { 10965 10966 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 10967 10968 if (size == TTE8K) { 10969 index = nucleus_hblk8.index; 10970 if (index >= nucleus_hblk8.len) { 10971 /* 10972 * If we panic here, see startup_modules() to 10973 * make sure that we are calculating the 10974 * number of hblk8's that we need correctly. 10975 */ 10976 prom_panic("no nucleus hblk8 to allocate"); 10977 } 10978 hmeblkp = 10979 (struct hme_blk *)&nucleus_hblk8.list[index]; 10980 nucleus_hblk8.index++; 10981 SFMMU_STAT(sf_hblk8_nalloc); 10982 } else { 10983 index = nucleus_hblk1.index; 10984 if (nucleus_hblk1.index >= nucleus_hblk1.len) { 10985 /* 10986 * If we panic here, see startup_modules(). 10987 * Most likely you need to update the 10988 * calculation of the number of hblk1 elements 10989 * that the kernel needs to boot. 10990 */ 10991 prom_panic("no nucleus hblk1 to allocate"); 10992 } 10993 hmeblkp = 10994 (struct hme_blk *)&nucleus_hblk1.list[index]; 10995 nucleus_hblk1.index++; 10996 SFMMU_STAT(sf_hblk1_nalloc); 10997 } 10998 10999 goto hblk_init; 11000 } 11001 11002 SFMMU_HASH_UNLOCK(hmebp); 11003 11004 if (sfmmup != KHATID && !SFMMU_IS_SHMERID_VALID(rid)) { 11005 if (mmu_page_sizes == max_mmu_page_sizes) { 11006 if (size < TTE256M) 11007 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 11008 size, flags); 11009 } else { 11010 if (size < TTE4M) 11011 shw_hblkp = sfmmu_shadow_hcreate(sfmmup, vaddr, 11012 size, flags); 11013 } 11014 } else if (SFMMU_IS_SHMERID_VALID(rid)) { 11015 /* 11016 * Shared hmes use per region bitmaps in rgn_hmeflag 11017 * rather than shadow hmeblks to keep track of the 11018 * mapping sizes which have been allocated for the region. 11019 * Here we cleanup old invalid hmeblks with this rid, 11020 * which may be left around by pageunload(). 11021 */ 11022 int ttesz; 11023 caddr_t va; 11024 caddr_t eva = vaddr + TTEBYTES(size); 11025 11026 ASSERT(sfmmup != KHATID); 11027 11028 srdp = sfmmup->sfmmu_srdp; 11029 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 11030 rgnp = srdp->srd_hmergnp[rid]; 11031 ASSERT(rgnp != NULL && rgnp->rgn_id == rid); 11032 ASSERT(rgnp->rgn_refcnt != 0); 11033 ASSERT(size <= rgnp->rgn_pgszc); 11034 11035 ttesz = HBLK_MIN_TTESZ; 11036 do { 11037 if (!(rgnp->rgn_hmeflags & (0x1 << ttesz))) { 11038 continue; 11039 } 11040 11041 if (ttesz > size && ttesz != HBLK_MIN_TTESZ) { 11042 sfmmu_cleanup_rhblk(srdp, vaddr, rid, ttesz); 11043 } else if (ttesz < size) { 11044 for (va = vaddr; va < eva; 11045 va += TTEBYTES(ttesz)) { 11046 sfmmu_cleanup_rhblk(srdp, va, rid, 11047 ttesz); 11048 } 11049 } 11050 } while (++ttesz <= rgnp->rgn_pgszc); 11051 } 11052 11053 fill_hblk: 11054 owner = (hblk_reserve_thread == curthread) ? 1 : 0; 11055 11056 if (owner && size == TTE8K) { 11057 11058 ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); 11059 /* 11060 * We are really in a tight spot. We already own 11061 * hblk_reserve and we need another hblk. In anticipation 11062 * of this kind of scenario, we specifically set aside 11063 * HBLK_RESERVE_MIN number of hblks to be used exclusively 11064 * by owner of hblk_reserve. 11065 */ 11066 SFMMU_STAT(sf_hblk_recurse_cnt); 11067 11068 if (!sfmmu_get_free_hblk(&hmeblkp, 1)) 11069 panic("sfmmu_hblk_alloc: reserve list is empty"); 11070 11071 goto hblk_verify; 11072 } 11073 11074 ASSERT(!owner); 11075 11076 if ((flags & HAT_NO_KALLOC) == 0) { 11077 11078 sfmmu_cache = ((size == TTE8K) ? sfmmu8_cache : sfmmu1_cache); 11079 sleep = ((sfmmup == KHATID) ? KM_NOSLEEP : KM_SLEEP); 11080 11081 if ((hmeblkp = kmem_cache_alloc(sfmmu_cache, sleep)) == NULL) { 11082 hmeblkp = sfmmu_hblk_steal(size); 11083 } else { 11084 /* 11085 * if we are the owner of hblk_reserve, 11086 * swap hblk_reserve with hmeblkp and 11087 * start a fresh life. Hope things go 11088 * better this time. 11089 */ 11090 if (hblk_reserve_thread == curthread) { 11091 ASSERT(sfmmu_cache == sfmmu8_cache); 11092 sfmmu_hblk_swap(hmeblkp); 11093 hblk_reserve_thread = NULL; 11094 mutex_exit(&hblk_reserve_lock); 11095 goto fill_hblk; 11096 } 11097 /* 11098 * let's donate this hblk to our reserve list if 11099 * we are not mapping kernel range 11100 */ 11101 if (size == TTE8K && sfmmup != KHATID) { 11102 if (sfmmu_put_free_hblk(hmeblkp, 0)) 11103 goto fill_hblk; 11104 } 11105 } 11106 } else { 11107 /* 11108 * We are here to map the slab in sfmmu8_cache; let's 11109 * check if we could tap our reserve list; if successful, 11110 * this will avoid the pain of going thru sfmmu_hblk_swap 11111 */ 11112 SFMMU_STAT(sf_hblk_slab_cnt); 11113 if (!sfmmu_get_free_hblk(&hmeblkp, 0)) { 11114 /* 11115 * let's start hblk_reserve dance 11116 */ 11117 SFMMU_STAT(sf_hblk_reserve_cnt); 11118 owner = 1; 11119 mutex_enter(&hblk_reserve_lock); 11120 hmeblkp = HBLK_RESERVE; 11121 hblk_reserve_thread = curthread; 11122 } 11123 } 11124 11125 hblk_verify: 11126 ASSERT(hmeblkp != NULL); 11127 set_hblk_sz(hmeblkp, size); 11128 ASSERT(hmeblkp->hblk_nextpa == va_to_pa((caddr_t)hmeblkp)); 11129 SFMMU_HASH_LOCK(hmebp); 11130 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 11131 if (newhblkp != NULL) { 11132 SFMMU_HASH_UNLOCK(hmebp); 11133 if (hmeblkp != HBLK_RESERVE) { 11134 /* 11135 * This is really tricky! 11136 * 11137 * vmem_alloc(vmem_seg_arena) 11138 * vmem_alloc(vmem_internal_arena) 11139 * segkmem_alloc(heap_arena) 11140 * vmem_alloc(heap_arena) 11141 * page_create() 11142 * hat_memload() 11143 * kmem_cache_free() 11144 * kmem_cache_alloc() 11145 * kmem_slab_create() 11146 * vmem_alloc(kmem_internal_arena) 11147 * segkmem_alloc(heap_arena) 11148 * vmem_alloc(heap_arena) 11149 * page_create() 11150 * hat_memload() 11151 * kmem_cache_free() 11152 * ... 11153 * 11154 * Thus, hat_memload() could call kmem_cache_free 11155 * for enough number of times that we could easily 11156 * hit the bottom of the stack or run out of reserve 11157 * list of vmem_seg structs. So, we must donate 11158 * this hblk to reserve list if it's allocated 11159 * from sfmmu8_cache *and* mapping kernel range. 11160 * We don't need to worry about freeing hmeblk1's 11161 * to kmem since they don't map any kmem slabs. 11162 * 11163 * Note: When segkmem supports largepages, we must 11164 * free hmeblk1's to reserve list as well. 11165 */ 11166 forcefree = (sfmmup == KHATID) ? 1 : 0; 11167 if (size == TTE8K && 11168 sfmmu_put_free_hblk(hmeblkp, forcefree)) { 11169 goto re_verify; 11170 } 11171 ASSERT(sfmmup != KHATID); 11172 kmem_cache_free(get_hblk_cache(hmeblkp), hmeblkp); 11173 } else { 11174 /* 11175 * Hey! we don't need hblk_reserve any more. 11176 */ 11177 ASSERT(owner); 11178 hblk_reserve_thread = NULL; 11179 mutex_exit(&hblk_reserve_lock); 11180 owner = 0; 11181 } 11182 re_verify: 11183 /* 11184 * let's check if the goodies are still present 11185 */ 11186 SFMMU_HASH_LOCK(hmebp); 11187 HME_HASH_FAST_SEARCH(hmebp, hblktag, newhblkp); 11188 if (newhblkp != NULL) { 11189 /* 11190 * return newhblkp if it's not hblk_reserve; 11191 * if newhblkp is hblk_reserve, return it 11192 * _only if_ we are the owner of hblk_reserve. 11193 */ 11194 if (newhblkp != HBLK_RESERVE || owner) { 11195 ASSERT(!SFMMU_IS_SHMERID_VALID(rid) || 11196 newhblkp->hblk_shared); 11197 ASSERT(SFMMU_IS_SHMERID_VALID(rid) || 11198 !newhblkp->hblk_shared); 11199 return (newhblkp); 11200 } else { 11201 /* 11202 * we just hit hblk_reserve in the hash and 11203 * we are not the owner of that; 11204 * 11205 * block until hblk_reserve_thread completes 11206 * swapping hblk_reserve and try the dance 11207 * once again. 11208 */ 11209 SFMMU_HASH_UNLOCK(hmebp); 11210 mutex_enter(&hblk_reserve_lock); 11211 mutex_exit(&hblk_reserve_lock); 11212 SFMMU_STAT(sf_hblk_reserve_hit); 11213 goto fill_hblk; 11214 } 11215 } else { 11216 /* 11217 * it's no more! try the dance once again. 11218 */ 11219 SFMMU_HASH_UNLOCK(hmebp); 11220 goto fill_hblk; 11221 } 11222 } 11223 11224 hblk_init: 11225 if (SFMMU_IS_SHMERID_VALID(rid)) { 11226 uint16_t tteflag = 0x1 << 11227 ((size < HBLK_MIN_TTESZ) ? HBLK_MIN_TTESZ : size); 11228 11229 if (!(rgnp->rgn_hmeflags & tteflag)) { 11230 atomic_or_16(&rgnp->rgn_hmeflags, tteflag); 11231 } 11232 hmeblkp->hblk_shared = 1; 11233 } else { 11234 hmeblkp->hblk_shared = 0; 11235 } 11236 set_hblk_sz(hmeblkp, size); 11237 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 11238 hmeblkp->hblk_next = (struct hme_blk *)NULL; 11239 hmeblkp->hblk_tag = hblktag; 11240 hmeblkp->hblk_shadow = shw_hblkp; 11241 hblkpa = hmeblkp->hblk_nextpa; 11242 hmeblkp->hblk_nextpa = HMEBLK_ENDPA; 11243 11244 ASSERT(get_hblk_ttesz(hmeblkp) == size); 11245 ASSERT(get_hblk_span(hmeblkp) == HMEBLK_SPAN(size)); 11246 ASSERT(hmeblkp->hblk_hmecnt == 0); 11247 ASSERT(hmeblkp->hblk_vcnt == 0); 11248 ASSERT(hmeblkp->hblk_lckcnt == 0); 11249 ASSERT(hblkpa == va_to_pa((caddr_t)hmeblkp)); 11250 sfmmu_hblk_hash_add(hmebp, hmeblkp, hblkpa); 11251 return (hmeblkp); 11252 } 11253 11254 /* 11255 * This function cleans up the hme_blk and returns it to the free list. 11256 */ 11257 /* ARGSUSED */ 11258 static void 11259 sfmmu_hblk_free(struct hme_blk **listp) 11260 { 11261 struct hme_blk *hmeblkp, *next_hmeblkp; 11262 int size; 11263 uint_t critical; 11264 uint64_t hblkpa; 11265 11266 ASSERT(*listp != NULL); 11267 11268 hmeblkp = *listp; 11269 while (hmeblkp != NULL) { 11270 next_hmeblkp = hmeblkp->hblk_next; 11271 ASSERT(!hmeblkp->hblk_hmecnt); 11272 ASSERT(!hmeblkp->hblk_vcnt); 11273 ASSERT(!hmeblkp->hblk_lckcnt); 11274 ASSERT(hmeblkp != (struct hme_blk *)hblk_reserve); 11275 ASSERT(hmeblkp->hblk_shared == 0); 11276 ASSERT(hmeblkp->hblk_shw_bit == 0); 11277 ASSERT(hmeblkp->hblk_shadow == NULL); 11278 11279 hblkpa = va_to_pa((caddr_t)hmeblkp); 11280 ASSERT(hblkpa != (uint64_t)-1); 11281 critical = (hblktosfmmu(hmeblkp) == KHATID) ? 1 : 0; 11282 11283 size = get_hblk_ttesz(hmeblkp); 11284 hmeblkp->hblk_next = NULL; 11285 hmeblkp->hblk_nextpa = hblkpa; 11286 11287 if (hmeblkp->hblk_nuc_bit == 0) { 11288 11289 if (size != TTE8K || 11290 !sfmmu_put_free_hblk(hmeblkp, critical)) 11291 kmem_cache_free(get_hblk_cache(hmeblkp), 11292 hmeblkp); 11293 } 11294 hmeblkp = next_hmeblkp; 11295 } 11296 } 11297 11298 #define BUCKETS_TO_SEARCH_BEFORE_UNLOAD 30 11299 #define SFMMU_HBLK_STEAL_THRESHOLD 5 11300 11301 static uint_t sfmmu_hblk_steal_twice; 11302 static uint_t sfmmu_hblk_steal_count, sfmmu_hblk_steal_unload_count; 11303 11304 /* 11305 * Steal a hmeblk from user or kernel hme hash lists. 11306 * For 8K tte grab one from reserve pool (freehblkp) before proceeding to 11307 * steal and if we fail to steal after SFMMU_HBLK_STEAL_THRESHOLD attempts 11308 * tap into critical reserve of freehblkp. 11309 * Note: We remain looping in this routine until we find one. 11310 */ 11311 static struct hme_blk * 11312 sfmmu_hblk_steal(int size) 11313 { 11314 static struct hmehash_bucket *uhmehash_steal_hand = NULL; 11315 struct hmehash_bucket *hmebp; 11316 struct hme_blk *hmeblkp = NULL, *pr_hblk; 11317 uint64_t hblkpa; 11318 int i; 11319 uint_t loop_cnt = 0, critical; 11320 11321 for (;;) { 11322 /* Check cpu hblk pending queues */ 11323 if ((hmeblkp = sfmmu_check_pending_hblks(size)) != NULL) { 11324 hmeblkp->hblk_nextpa = va_to_pa((caddr_t)hmeblkp); 11325 ASSERT(hmeblkp->hblk_hmecnt == 0); 11326 ASSERT(hmeblkp->hblk_vcnt == 0); 11327 return (hmeblkp); 11328 } 11329 11330 if (size == TTE8K) { 11331 critical = 11332 (++loop_cnt > SFMMU_HBLK_STEAL_THRESHOLD) ? 1 : 0; 11333 if (sfmmu_get_free_hblk(&hmeblkp, critical)) 11334 return (hmeblkp); 11335 } 11336 11337 hmebp = (uhmehash_steal_hand == NULL) ? uhme_hash : 11338 uhmehash_steal_hand; 11339 ASSERT(hmebp >= uhme_hash && hmebp <= &uhme_hash[UHMEHASH_SZ]); 11340 11341 for (i = 0; hmeblkp == NULL && i <= UHMEHASH_SZ + 11342 BUCKETS_TO_SEARCH_BEFORE_UNLOAD; i++) { 11343 SFMMU_HASH_LOCK(hmebp); 11344 hmeblkp = hmebp->hmeblkp; 11345 hblkpa = hmebp->hmeh_nextpa; 11346 pr_hblk = NULL; 11347 while (hmeblkp) { 11348 /* 11349 * check if it is a hmeblk that is not locked 11350 * and not shared. skip shadow hmeblks with 11351 * shadow_mask set i.e valid count non zero. 11352 */ 11353 if ((get_hblk_ttesz(hmeblkp) == size) && 11354 (hmeblkp->hblk_shw_bit == 0 || 11355 hmeblkp->hblk_vcnt == 0) && 11356 (hmeblkp->hblk_lckcnt == 0)) { 11357 /* 11358 * there is a high probability that we 11359 * will find a free one. search some 11360 * buckets for a free hmeblk initially 11361 * before unloading a valid hmeblk. 11362 */ 11363 if ((hmeblkp->hblk_vcnt == 0 && 11364 hmeblkp->hblk_hmecnt == 0) || (i >= 11365 BUCKETS_TO_SEARCH_BEFORE_UNLOAD)) { 11366 if (sfmmu_steal_this_hblk(hmebp, 11367 hmeblkp, hblkpa, pr_hblk)) { 11368 /* 11369 * Hblk is unloaded 11370 * successfully 11371 */ 11372 break; 11373 } 11374 } 11375 } 11376 pr_hblk = hmeblkp; 11377 hblkpa = hmeblkp->hblk_nextpa; 11378 hmeblkp = hmeblkp->hblk_next; 11379 } 11380 11381 SFMMU_HASH_UNLOCK(hmebp); 11382 if (hmebp++ == &uhme_hash[UHMEHASH_SZ]) 11383 hmebp = uhme_hash; 11384 } 11385 uhmehash_steal_hand = hmebp; 11386 11387 if (hmeblkp != NULL) 11388 break; 11389 11390 /* 11391 * in the worst case, look for a free one in the kernel 11392 * hash table. 11393 */ 11394 for (i = 0, hmebp = khme_hash; i <= KHMEHASH_SZ; i++) { 11395 SFMMU_HASH_LOCK(hmebp); 11396 hmeblkp = hmebp->hmeblkp; 11397 hblkpa = hmebp->hmeh_nextpa; 11398 pr_hblk = NULL; 11399 while (hmeblkp) { 11400 /* 11401 * check if it is free hmeblk 11402 */ 11403 if ((get_hblk_ttesz(hmeblkp) == size) && 11404 (hmeblkp->hblk_lckcnt == 0) && 11405 (hmeblkp->hblk_vcnt == 0) && 11406 (hmeblkp->hblk_hmecnt == 0)) { 11407 if (sfmmu_steal_this_hblk(hmebp, 11408 hmeblkp, hblkpa, pr_hblk)) { 11409 break; 11410 } else { 11411 /* 11412 * Cannot fail since we have 11413 * hash lock. 11414 */ 11415 panic("fail to steal?"); 11416 } 11417 } 11418 11419 pr_hblk = hmeblkp; 11420 hblkpa = hmeblkp->hblk_nextpa; 11421 hmeblkp = hmeblkp->hblk_next; 11422 } 11423 11424 SFMMU_HASH_UNLOCK(hmebp); 11425 if (hmebp++ == &khme_hash[KHMEHASH_SZ]) 11426 hmebp = khme_hash; 11427 } 11428 11429 if (hmeblkp != NULL) 11430 break; 11431 sfmmu_hblk_steal_twice++; 11432 } 11433 return (hmeblkp); 11434 } 11435 11436 /* 11437 * This routine does real work to prepare a hblk to be "stolen" by 11438 * unloading the mappings, updating shadow counts .... 11439 * It returns 1 if the block is ready to be reused (stolen), or 0 11440 * means the block cannot be stolen yet- pageunload is still working 11441 * on this hblk. 11442 */ 11443 static int 11444 sfmmu_steal_this_hblk(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 11445 uint64_t hblkpa, struct hme_blk *pr_hblk) 11446 { 11447 int shw_size, vshift; 11448 struct hme_blk *shw_hblkp; 11449 caddr_t vaddr; 11450 uint_t shw_mask, newshw_mask; 11451 struct hme_blk *list = NULL; 11452 11453 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 11454 11455 /* 11456 * check if the hmeblk is free, unload if necessary 11457 */ 11458 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 11459 sfmmu_t *sfmmup; 11460 demap_range_t dmr; 11461 11462 sfmmup = hblktosfmmu(hmeblkp); 11463 if (hmeblkp->hblk_shared || sfmmup->sfmmu_ismhat) { 11464 return (0); 11465 } 11466 DEMAP_RANGE_INIT(sfmmup, &dmr); 11467 (void) sfmmu_hblk_unload(sfmmup, hmeblkp, 11468 (caddr_t)get_hblk_base(hmeblkp), 11469 get_hblk_endaddr(hmeblkp), &dmr, HAT_UNLOAD); 11470 DEMAP_RANGE_FLUSH(&dmr); 11471 if (hmeblkp->hblk_vcnt || hmeblkp->hblk_hmecnt) { 11472 /* 11473 * Pageunload is working on the same hblk. 11474 */ 11475 return (0); 11476 } 11477 11478 sfmmu_hblk_steal_unload_count++; 11479 } 11480 11481 ASSERT(hmeblkp->hblk_lckcnt == 0); 11482 ASSERT(hmeblkp->hblk_vcnt == 0 && hmeblkp->hblk_hmecnt == 0); 11483 11484 sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, &list, 1); 11485 hmeblkp->hblk_nextpa = hblkpa; 11486 11487 shw_hblkp = hmeblkp->hblk_shadow; 11488 if (shw_hblkp) { 11489 ASSERT(!hmeblkp->hblk_shared); 11490 shw_size = get_hblk_ttesz(shw_hblkp); 11491 vaddr = (caddr_t)get_hblk_base(hmeblkp); 11492 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 11493 ASSERT(vshift < 8); 11494 /* 11495 * Atomically clear shadow mask bit 11496 */ 11497 do { 11498 shw_mask = shw_hblkp->hblk_shw_mask; 11499 ASSERT(shw_mask & (1 << vshift)); 11500 newshw_mask = shw_mask & ~(1 << vshift); 11501 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask, 11502 shw_mask, newshw_mask); 11503 } while (newshw_mask != shw_mask); 11504 hmeblkp->hblk_shadow = NULL; 11505 } 11506 11507 /* 11508 * remove shadow bit if we are stealing an unused shadow hmeblk. 11509 * sfmmu_hblk_alloc needs it that way, will set shadow bit later if 11510 * we are indeed allocating a shadow hmeblk. 11511 */ 11512 hmeblkp->hblk_shw_bit = 0; 11513 11514 if (hmeblkp->hblk_shared) { 11515 sf_srd_t *srdp; 11516 sf_region_t *rgnp; 11517 uint_t rid; 11518 11519 srdp = hblktosrd(hmeblkp); 11520 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 11521 rid = hmeblkp->hblk_tag.htag_rid; 11522 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 11523 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 11524 rgnp = srdp->srd_hmergnp[rid]; 11525 ASSERT(rgnp != NULL); 11526 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 11527 hmeblkp->hblk_shared = 0; 11528 } 11529 11530 sfmmu_hblk_steal_count++; 11531 SFMMU_STAT(sf_steal_count); 11532 11533 return (1); 11534 } 11535 11536 struct hme_blk * 11537 sfmmu_hmetohblk(struct sf_hment *sfhme) 11538 { 11539 struct hme_blk *hmeblkp; 11540 struct sf_hment *sfhme0; 11541 struct hme_blk *hblk_dummy = 0; 11542 11543 /* 11544 * No dummy sf_hments, please. 11545 */ 11546 ASSERT(sfhme->hme_tte.ll != 0); 11547 11548 sfhme0 = sfhme - sfhme->hme_tte.tte_hmenum; 11549 hmeblkp = (struct hme_blk *)((uintptr_t)sfhme0 - 11550 (uintptr_t)&hblk_dummy->hblk_hme[0]); 11551 11552 return (hmeblkp); 11553 } 11554 11555 /* 11556 * On swapin, get appropriately sized TSB(s) and clear the HAT_SWAPPED flag. 11557 * If we can't get appropriately sized TSB(s), try for 8K TSB(s) using 11558 * KM_SLEEP allocation. 11559 * 11560 * Return 0 on success, -1 otherwise. 11561 */ 11562 static void 11563 sfmmu_tsb_swapin(sfmmu_t *sfmmup, hatlock_t *hatlockp) 11564 { 11565 struct tsb_info *tsbinfop, *next; 11566 tsb_replace_rc_t rc; 11567 boolean_t gotfirst = B_FALSE; 11568 11569 ASSERT(sfmmup != ksfmmup); 11570 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11571 11572 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPIN)) { 11573 cv_wait(&sfmmup->sfmmu_tsb_cv, HATLOCK_MUTEXP(hatlockp)); 11574 } 11575 11576 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 11577 SFMMU_FLAGS_SET(sfmmup, HAT_SWAPIN); 11578 } else { 11579 return; 11580 } 11581 11582 ASSERT(sfmmup->sfmmu_tsb != NULL); 11583 11584 /* 11585 * Loop over all tsbinfo's replacing them with ones that actually have 11586 * a TSB. If any of the replacements ever fail, bail out of the loop. 11587 */ 11588 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; tsbinfop = next) { 11589 ASSERT(tsbinfop->tsb_flags & TSB_SWAPPED); 11590 next = tsbinfop->tsb_next; 11591 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, tsbinfop->tsb_szc, 11592 hatlockp, TSB_SWAPIN); 11593 if (rc != TSB_SUCCESS) { 11594 break; 11595 } 11596 gotfirst = B_TRUE; 11597 } 11598 11599 switch (rc) { 11600 case TSB_SUCCESS: 11601 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 11602 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11603 return; 11604 case TSB_LOSTRACE: 11605 break; 11606 case TSB_ALLOCFAIL: 11607 break; 11608 default: 11609 panic("sfmmu_replace_tsb returned unrecognized failure code " 11610 "%d", rc); 11611 } 11612 11613 /* 11614 * In this case, we failed to get one of our TSBs. If we failed to 11615 * get the first TSB, get one of minimum size (8KB). Walk the list 11616 * and throw away the tsbinfos, starting where the allocation failed; 11617 * we can get by with just one TSB as long as we don't leave the 11618 * SWAPPED tsbinfo structures lying around. 11619 */ 11620 tsbinfop = sfmmup->sfmmu_tsb; 11621 next = tsbinfop->tsb_next; 11622 tsbinfop->tsb_next = NULL; 11623 11624 sfmmu_hat_exit(hatlockp); 11625 for (tsbinfop = next; tsbinfop != NULL; tsbinfop = next) { 11626 next = tsbinfop->tsb_next; 11627 sfmmu_tsbinfo_free(tsbinfop); 11628 } 11629 hatlockp = sfmmu_hat_enter(sfmmup); 11630 11631 /* 11632 * If we don't have any TSBs, get a single 8K TSB for 8K, 64K and 512K 11633 * pages. 11634 */ 11635 if (!gotfirst) { 11636 tsbinfop = sfmmup->sfmmu_tsb; 11637 rc = sfmmu_replace_tsb(sfmmup, tsbinfop, TSB_MIN_SZCODE, 11638 hatlockp, TSB_SWAPIN | TSB_FORCEALLOC); 11639 ASSERT(rc == TSB_SUCCESS); 11640 } 11641 11642 SFMMU_FLAGS_CLEAR(sfmmup, HAT_SWAPPED|HAT_SWAPIN); 11643 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 11644 } 11645 11646 static int 11647 sfmmu_is_rgnva(sf_srd_t *srdp, caddr_t addr, ulong_t w, ulong_t bmw) 11648 { 11649 ulong_t bix = 0; 11650 uint_t rid; 11651 sf_region_t *rgnp; 11652 11653 ASSERT(srdp != NULL); 11654 ASSERT(srdp->srd_refcnt != 0); 11655 11656 w <<= BT_ULSHIFT; 11657 while (bmw) { 11658 if (!(bmw & 0x1)) { 11659 bix++; 11660 bmw >>= 1; 11661 continue; 11662 } 11663 rid = w | bix; 11664 rgnp = srdp->srd_hmergnp[rid]; 11665 ASSERT(rgnp->rgn_refcnt > 0); 11666 ASSERT(rgnp->rgn_id == rid); 11667 if (addr < rgnp->rgn_saddr || 11668 addr >= (rgnp->rgn_saddr + rgnp->rgn_size)) { 11669 bix++; 11670 bmw >>= 1; 11671 } else { 11672 return (1); 11673 } 11674 } 11675 return (0); 11676 } 11677 11678 /* 11679 * Handle exceptions for low level tsb_handler. 11680 * 11681 * There are many scenarios that could land us here: 11682 * 11683 * If the context is invalid we land here. The context can be invalid 11684 * for 3 reasons: 1) we couldn't allocate a new context and now need to 11685 * perform a wrap around operation in order to allocate a new context. 11686 * 2) Context was invalidated to change pagesize programming 3) ISMs or 11687 * TSBs configuration is changeing for this process and we are forced into 11688 * here to do a syncronization operation. If the context is valid we can 11689 * be here from window trap hanlder. In this case just call trap to handle 11690 * the fault. 11691 * 11692 * Note that the process will run in INVALID_CONTEXT before 11693 * faulting into here and subsequently loading the MMU registers 11694 * (including the TSB base register) associated with this process. 11695 * For this reason, the trap handlers must all test for 11696 * INVALID_CONTEXT before attempting to access any registers other 11697 * than the context registers. 11698 */ 11699 void 11700 sfmmu_tsbmiss_exception(struct regs *rp, uintptr_t tagaccess, uint_t traptype) 11701 { 11702 sfmmu_t *sfmmup, *shsfmmup; 11703 uint_t ctxtype; 11704 klwp_id_t lwp; 11705 char lwp_save_state; 11706 hatlock_t *hatlockp, *shatlockp; 11707 struct tsb_info *tsbinfop; 11708 struct tsbmiss *tsbmp; 11709 sf_scd_t *scdp; 11710 11711 SFMMU_STAT(sf_tsb_exceptions); 11712 SFMMU_MMU_STAT(mmu_tsb_exceptions); 11713 sfmmup = astosfmmu(curthread->t_procp->p_as); 11714 /* 11715 * note that in sun4u, tagacces register contains ctxnum 11716 * while sun4v passes ctxtype in the tagaccess register. 11717 */ 11718 ctxtype = tagaccess & TAGACC_CTX_MASK; 11719 11720 ASSERT(sfmmup != ksfmmup && ctxtype != KCONTEXT); 11721 ASSERT(sfmmup->sfmmu_ismhat == 0); 11722 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED) || 11723 ctxtype == INVALID_CONTEXT); 11724 11725 if (ctxtype != INVALID_CONTEXT && traptype != T_DATA_PROT) { 11726 /* 11727 * We may land here because shme bitmap and pagesize 11728 * flags are updated lazily in tsbmiss area on other cpus. 11729 * If we detect here that tsbmiss area is out of sync with 11730 * sfmmu update it and retry the trapped instruction. 11731 * Otherwise call trap(). 11732 */ 11733 int ret = 0; 11734 uchar_t tteflag_mask = (1 << TTE64K) | (1 << TTE8K); 11735 caddr_t addr = (caddr_t)(tagaccess & TAGACC_VADDR_MASK); 11736 11737 /* 11738 * Must set lwp state to LWP_SYS before 11739 * trying to acquire any adaptive lock 11740 */ 11741 lwp = ttolwp(curthread); 11742 ASSERT(lwp); 11743 lwp_save_state = lwp->lwp_state; 11744 lwp->lwp_state = LWP_SYS; 11745 11746 hatlockp = sfmmu_hat_enter(sfmmup); 11747 kpreempt_disable(); 11748 tsbmp = &tsbmiss_area[CPU->cpu_id]; 11749 ASSERT(sfmmup == tsbmp->usfmmup); 11750 if (((tsbmp->uhat_tteflags ^ sfmmup->sfmmu_tteflags) & 11751 ~tteflag_mask) || 11752 ((tsbmp->uhat_rtteflags ^ sfmmup->sfmmu_rtteflags) & 11753 ~tteflag_mask)) { 11754 tsbmp->uhat_tteflags = sfmmup->sfmmu_tteflags; 11755 tsbmp->uhat_rtteflags = sfmmup->sfmmu_rtteflags; 11756 ret = 1; 11757 } 11758 if (sfmmup->sfmmu_srdp != NULL) { 11759 ulong_t *sm = sfmmup->sfmmu_hmeregion_map.bitmap; 11760 ulong_t *tm = tsbmp->shmermap; 11761 ulong_t i; 11762 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 11763 ulong_t d = tm[i] ^ sm[i]; 11764 if (d) { 11765 if (d & sm[i]) { 11766 if (!ret && sfmmu_is_rgnva( 11767 sfmmup->sfmmu_srdp, 11768 addr, i, d & sm[i])) { 11769 ret = 1; 11770 } 11771 } 11772 tm[i] = sm[i]; 11773 } 11774 } 11775 } 11776 kpreempt_enable(); 11777 sfmmu_hat_exit(hatlockp); 11778 lwp->lwp_state = lwp_save_state; 11779 if (ret) { 11780 return; 11781 } 11782 } else if (ctxtype == INVALID_CONTEXT) { 11783 /* 11784 * First, make sure we come out of here with a valid ctx, 11785 * since if we don't get one we'll simply loop on the 11786 * faulting instruction. 11787 * 11788 * If the ISM mappings are changing, the TSB is relocated, 11789 * the process is swapped, the process is joining SCD or 11790 * leaving SCD or shared regions we serialize behind the 11791 * controlling thread with hat lock, sfmmu_flags and 11792 * sfmmu_tsb_cv condition variable. 11793 */ 11794 11795 /* 11796 * Must set lwp state to LWP_SYS before 11797 * trying to acquire any adaptive lock 11798 */ 11799 lwp = ttolwp(curthread); 11800 ASSERT(lwp); 11801 lwp_save_state = lwp->lwp_state; 11802 lwp->lwp_state = LWP_SYS; 11803 11804 hatlockp = sfmmu_hat_enter(sfmmup); 11805 retry: 11806 if ((scdp = sfmmup->sfmmu_scdp) != NULL) { 11807 shsfmmup = scdp->scd_sfmmup; 11808 ASSERT(shsfmmup != NULL); 11809 11810 for (tsbinfop = shsfmmup->sfmmu_tsb; tsbinfop != NULL; 11811 tsbinfop = tsbinfop->tsb_next) { 11812 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 11813 /* drop the private hat lock */ 11814 sfmmu_hat_exit(hatlockp); 11815 /* acquire the shared hat lock */ 11816 shatlockp = sfmmu_hat_enter(shsfmmup); 11817 /* 11818 * recheck to see if anything changed 11819 * after we drop the private hat lock. 11820 */ 11821 if (sfmmup->sfmmu_scdp == scdp && 11822 shsfmmup == scdp->scd_sfmmup) { 11823 sfmmu_tsb_chk_reloc(shsfmmup, 11824 shatlockp); 11825 } 11826 sfmmu_hat_exit(shatlockp); 11827 hatlockp = sfmmu_hat_enter(sfmmup); 11828 goto retry; 11829 } 11830 } 11831 } 11832 11833 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 11834 tsbinfop = tsbinfop->tsb_next) { 11835 if (tsbinfop->tsb_flags & TSB_RELOC_FLAG) { 11836 cv_wait(&sfmmup->sfmmu_tsb_cv, 11837 HATLOCK_MUTEXP(hatlockp)); 11838 goto retry; 11839 } 11840 } 11841 11842 /* 11843 * Wait for ISM maps to be updated. 11844 */ 11845 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 11846 cv_wait(&sfmmup->sfmmu_tsb_cv, 11847 HATLOCK_MUTEXP(hatlockp)); 11848 goto retry; 11849 } 11850 11851 /* Is this process joining an SCD? */ 11852 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 11853 /* 11854 * Flush private TSB and setup shared TSB. 11855 * sfmmu_finish_join_scd() does not drop the 11856 * hat lock. 11857 */ 11858 sfmmu_finish_join_scd(sfmmup); 11859 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD); 11860 } 11861 11862 /* 11863 * If we're swapping in, get TSB(s). Note that we must do 11864 * this before we get a ctx or load the MMU state. Once 11865 * we swap in we have to recheck to make sure the TSB(s) and 11866 * ISM mappings didn't change while we slept. 11867 */ 11868 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { 11869 sfmmu_tsb_swapin(sfmmup, hatlockp); 11870 goto retry; 11871 } 11872 11873 sfmmu_get_ctx(sfmmup); 11874 11875 sfmmu_hat_exit(hatlockp); 11876 /* 11877 * Must restore lwp_state if not calling 11878 * trap() for further processing. Restore 11879 * it anyway. 11880 */ 11881 lwp->lwp_state = lwp_save_state; 11882 return; 11883 } 11884 trap(rp, (caddr_t)tagaccess, traptype, 0); 11885 } 11886 11887 static void 11888 sfmmu_tsb_chk_reloc(sfmmu_t *sfmmup, hatlock_t *hatlockp) 11889 { 11890 struct tsb_info *tp; 11891 11892 ASSERT(sfmmu_hat_lock_held(sfmmup)); 11893 11894 for (tp = sfmmup->sfmmu_tsb; tp != NULL; tp = tp->tsb_next) { 11895 if (tp->tsb_flags & TSB_RELOC_FLAG) { 11896 cv_wait(&sfmmup->sfmmu_tsb_cv, 11897 HATLOCK_MUTEXP(hatlockp)); 11898 break; 11899 } 11900 } 11901 } 11902 11903 /* 11904 * sfmmu_vatopfn_suspended is called from GET_TTE when TL=0 and 11905 * TTE_SUSPENDED bit set in tte we block on aquiring a page lock 11906 * rather than spinning to avoid send mondo timeouts with 11907 * interrupts enabled. When the lock is acquired it is immediately 11908 * released and we return back to sfmmu_vatopfn just after 11909 * the GET_TTE call. 11910 */ 11911 void 11912 sfmmu_vatopfn_suspended(caddr_t vaddr, sfmmu_t *sfmmu, tte_t *ttep) 11913 { 11914 struct page **pp; 11915 11916 (void) as_pagelock(sfmmu->sfmmu_as, &pp, vaddr, TTE_CSZ(ttep), S_WRITE); 11917 as_pageunlock(sfmmu->sfmmu_as, pp, vaddr, TTE_CSZ(ttep), S_WRITE); 11918 } 11919 11920 /* 11921 * sfmmu_tsbmiss_suspended is called from GET_TTE when TL>0 and 11922 * TTE_SUSPENDED bit set in tte. We do this so that we can handle 11923 * cross traps which cannot be handled while spinning in the 11924 * trap handlers. Simply enter and exit the kpr_suspendlock spin 11925 * mutex, which is held by the holder of the suspend bit, and then 11926 * retry the trapped instruction after unwinding. 11927 */ 11928 /*ARGSUSED*/ 11929 void 11930 sfmmu_tsbmiss_suspended(struct regs *rp, uintptr_t tagacc, uint_t traptype) 11931 { 11932 ASSERT(curthread != kreloc_thread); 11933 mutex_enter(&kpr_suspendlock); 11934 mutex_exit(&kpr_suspendlock); 11935 } 11936 11937 /* 11938 * This routine could be optimized to reduce the number of xcalls by flushing 11939 * the entire TLBs if region reference count is above some threshold but the 11940 * tradeoff will depend on the size of the TLB. So for now flush the specific 11941 * page a context at a time. 11942 * 11943 * If uselocks is 0 then it's called after all cpus were captured and all the 11944 * hat locks were taken. In this case don't take the region lock by relying on 11945 * the order of list region update operations in hat_join_region(), 11946 * hat_leave_region() and hat_dup_region(). The ordering in those routines 11947 * guarantees that list is always forward walkable and reaches active sfmmus 11948 * regardless of where xc_attention() captures a cpu. 11949 */ 11950 cpuset_t 11951 sfmmu_rgntlb_demap(caddr_t addr, sf_region_t *rgnp, 11952 struct hme_blk *hmeblkp, int uselocks) 11953 { 11954 sfmmu_t *sfmmup; 11955 cpuset_t cpuset; 11956 cpuset_t rcpuset; 11957 hatlock_t *hatlockp; 11958 uint_t rid = rgnp->rgn_id; 11959 sf_rgn_link_t *rlink; 11960 sf_scd_t *scdp; 11961 11962 ASSERT(hmeblkp->hblk_shared); 11963 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 11964 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 11965 11966 CPUSET_ZERO(rcpuset); 11967 if (uselocks) { 11968 mutex_enter(&rgnp->rgn_mutex); 11969 } 11970 sfmmup = rgnp->rgn_sfmmu_head; 11971 while (sfmmup != NULL) { 11972 if (uselocks) { 11973 hatlockp = sfmmu_hat_enter(sfmmup); 11974 } 11975 11976 /* 11977 * When an SCD is created the SCD hat is linked on the sfmmu 11978 * region lists for each hme region which is part of the 11979 * SCD. If we find an SCD hat, when walking these lists, 11980 * then we flush the shared TSBs, if we find a private hat, 11981 * which is part of an SCD, but where the region 11982 * is not part of the SCD then we flush the private TSBs. 11983 */ 11984 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && 11985 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 11986 scdp = sfmmup->sfmmu_scdp; 11987 if (SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 11988 if (uselocks) { 11989 sfmmu_hat_exit(hatlockp); 11990 } 11991 goto next; 11992 } 11993 } 11994 11995 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 11996 11997 kpreempt_disable(); 11998 cpuset = sfmmup->sfmmu_cpusran; 11999 CPUSET_AND(cpuset, cpu_ready_set); 12000 CPUSET_DEL(cpuset, CPU->cpu_id); 12001 SFMMU_XCALL_STATS(sfmmup); 12002 xt_some(cpuset, vtag_flushpage_tl1, 12003 (uint64_t)addr, (uint64_t)sfmmup); 12004 vtag_flushpage(addr, (uint64_t)sfmmup); 12005 if (uselocks) { 12006 sfmmu_hat_exit(hatlockp); 12007 } 12008 kpreempt_enable(); 12009 CPUSET_OR(rcpuset, cpuset); 12010 12011 next: 12012 /* LINTED: constant in conditional context */ 12013 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0); 12014 ASSERT(rlink != NULL); 12015 sfmmup = rlink->next; 12016 } 12017 if (uselocks) { 12018 mutex_exit(&rgnp->rgn_mutex); 12019 } 12020 return (rcpuset); 12021 } 12022 12023 /* 12024 * This routine takes an sfmmu pointer and the va for an adddress in an 12025 * ISM region as input and returns the corresponding region id in ism_rid. 12026 * The return value of 1 indicates that a region has been found and ism_rid 12027 * is valid, otherwise 0 is returned. 12028 */ 12029 static int 12030 find_ism_rid(sfmmu_t *sfmmup, sfmmu_t *ism_sfmmup, caddr_t va, uint_t *ism_rid) 12031 { 12032 ism_blk_t *ism_blkp; 12033 int i; 12034 ism_map_t *ism_map; 12035 #ifdef DEBUG 12036 struct hat *ism_hatid; 12037 #endif 12038 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12039 12040 ism_blkp = sfmmup->sfmmu_iblk; 12041 while (ism_blkp != NULL) { 12042 ism_map = ism_blkp->iblk_maps; 12043 for (i = 0; i < ISM_MAP_SLOTS && ism_map[i].imap_ismhat; i++) { 12044 if ((va >= ism_start(ism_map[i])) && 12045 (va < ism_end(ism_map[i]))) { 12046 12047 *ism_rid = ism_map[i].imap_rid; 12048 #ifdef DEBUG 12049 ism_hatid = ism_map[i].imap_ismhat; 12050 ASSERT(ism_hatid == ism_sfmmup); 12051 ASSERT(ism_hatid->sfmmu_ismhat); 12052 #endif 12053 return (1); 12054 } 12055 } 12056 ism_blkp = ism_blkp->iblk_next; 12057 } 12058 return (0); 12059 } 12060 12061 /* 12062 * Special routine to flush out ism mappings- TSBs, TLBs and D-caches. 12063 * This routine may be called with all cpu's captured. Therefore, the 12064 * caller is responsible for holding all locks and disabling kernel 12065 * preemption. 12066 */ 12067 /* ARGSUSED */ 12068 static void 12069 sfmmu_ismtlbcache_demap(caddr_t addr, sfmmu_t *ism_sfmmup, 12070 struct hme_blk *hmeblkp, pfn_t pfnum, int cache_flush_flag) 12071 { 12072 cpuset_t cpuset; 12073 caddr_t va; 12074 ism_ment_t *ment; 12075 sfmmu_t *sfmmup; 12076 #ifdef VAC 12077 int vcolor; 12078 #endif 12079 12080 sf_scd_t *scdp; 12081 uint_t ism_rid; 12082 12083 ASSERT(!hmeblkp->hblk_shared); 12084 /* 12085 * Walk the ism_hat's mapping list and flush the page 12086 * from every hat sharing this ism_hat. This routine 12087 * may be called while all cpu's have been captured. 12088 * Therefore we can't attempt to grab any locks. For now 12089 * this means we will protect the ism mapping list under 12090 * a single lock which will be grabbed by the caller. 12091 * If hat_share/unshare scalibility becomes a performance 12092 * problem then we may need to re-think ism mapping list locking. 12093 */ 12094 ASSERT(ism_sfmmup->sfmmu_ismhat); 12095 ASSERT(MUTEX_HELD(&ism_mlist_lock)); 12096 addr = addr - ISMID_STARTADDR; 12097 12098 for (ment = ism_sfmmup->sfmmu_iment; ment; ment = ment->iment_next) { 12099 12100 sfmmup = ment->iment_hat; 12101 12102 va = ment->iment_base_va; 12103 va = (caddr_t)((uintptr_t)va + (uintptr_t)addr); 12104 12105 /* 12106 * When an SCD is created the SCD hat is linked on the ism 12107 * mapping lists for each ISM segment which is part of the 12108 * SCD. If we find an SCD hat, when walking these lists, 12109 * then we flush the shared TSBs, if we find a private hat, 12110 * which is part of an SCD, but where the region 12111 * corresponding to this va is not part of the SCD then we 12112 * flush the private TSBs. 12113 */ 12114 if (!sfmmup->sfmmu_scdhat && sfmmup->sfmmu_scdp != NULL && 12115 !SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD) && 12116 !SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)) { 12117 if (!find_ism_rid(sfmmup, ism_sfmmup, va, 12118 &ism_rid)) { 12119 cmn_err(CE_PANIC, 12120 "can't find matching ISM rid!"); 12121 } 12122 12123 scdp = sfmmup->sfmmu_scdp; 12124 if (SFMMU_IS_ISMRID_VALID(ism_rid) && 12125 SF_RGNMAP_TEST(scdp->scd_ismregion_map, 12126 ism_rid)) { 12127 continue; 12128 } 12129 } 12130 SFMMU_UNLOAD_TSB(va, sfmmup, hmeblkp, 1); 12131 12132 cpuset = sfmmup->sfmmu_cpusran; 12133 CPUSET_AND(cpuset, cpu_ready_set); 12134 CPUSET_DEL(cpuset, CPU->cpu_id); 12135 SFMMU_XCALL_STATS(sfmmup); 12136 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)va, 12137 (uint64_t)sfmmup); 12138 vtag_flushpage(va, (uint64_t)sfmmup); 12139 12140 #ifdef VAC 12141 /* 12142 * Flush D$ 12143 * When flushing D$ we must flush all 12144 * cpu's. See sfmmu_cache_flush(). 12145 */ 12146 if (cache_flush_flag == CACHE_FLUSH) { 12147 cpuset = cpu_ready_set; 12148 CPUSET_DEL(cpuset, CPU->cpu_id); 12149 12150 SFMMU_XCALL_STATS(sfmmup); 12151 vcolor = addr_to_vcolor(va); 12152 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12153 vac_flushpage(pfnum, vcolor); 12154 } 12155 #endif /* VAC */ 12156 } 12157 } 12158 12159 /* 12160 * Demaps the TSB, CPU caches, and flushes all TLBs on all CPUs of 12161 * a particular virtual address and ctx. If noflush is set we do not 12162 * flush the TLB/TSB. This function may or may not be called with the 12163 * HAT lock held. 12164 */ 12165 static void 12166 sfmmu_tlbcache_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 12167 pfn_t pfnum, int tlb_noflush, int cpu_flag, int cache_flush_flag, 12168 int hat_lock_held) 12169 { 12170 #ifdef VAC 12171 int vcolor; 12172 #endif 12173 cpuset_t cpuset; 12174 hatlock_t *hatlockp; 12175 12176 ASSERT(!hmeblkp->hblk_shared); 12177 12178 #if defined(lint) && !defined(VAC) 12179 pfnum = pfnum; 12180 cpu_flag = cpu_flag; 12181 cache_flush_flag = cache_flush_flag; 12182 #endif 12183 12184 /* 12185 * There is no longer a need to protect against ctx being 12186 * stolen here since we don't store the ctx in the TSB anymore. 12187 */ 12188 #ifdef VAC 12189 vcolor = addr_to_vcolor(addr); 12190 #endif 12191 12192 /* 12193 * We must hold the hat lock during the flush of TLB, 12194 * to avoid a race with sfmmu_invalidate_ctx(), where 12195 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 12196 * causing TLB demap routine to skip flush on that MMU. 12197 * If the context on a MMU has already been set to 12198 * INVALID_CONTEXT, we just get an extra flush on 12199 * that MMU. 12200 */ 12201 if (!hat_lock_held && !tlb_noflush) 12202 hatlockp = sfmmu_hat_enter(sfmmup); 12203 12204 kpreempt_disable(); 12205 if (!tlb_noflush) { 12206 /* 12207 * Flush the TSB and TLB. 12208 */ 12209 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12210 12211 cpuset = sfmmup->sfmmu_cpusran; 12212 CPUSET_AND(cpuset, cpu_ready_set); 12213 CPUSET_DEL(cpuset, CPU->cpu_id); 12214 12215 SFMMU_XCALL_STATS(sfmmup); 12216 12217 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, 12218 (uint64_t)sfmmup); 12219 12220 vtag_flushpage(addr, (uint64_t)sfmmup); 12221 } 12222 12223 if (!hat_lock_held && !tlb_noflush) 12224 sfmmu_hat_exit(hatlockp); 12225 12226 #ifdef VAC 12227 /* 12228 * Flush the D$ 12229 * 12230 * Even if the ctx is stolen, we need to flush the 12231 * cache. Our ctx stealer only flushes the TLBs. 12232 */ 12233 if (cache_flush_flag == CACHE_FLUSH) { 12234 if (cpu_flag & FLUSH_ALL_CPUS) { 12235 cpuset = cpu_ready_set; 12236 } else { 12237 cpuset = sfmmup->sfmmu_cpusran; 12238 CPUSET_AND(cpuset, cpu_ready_set); 12239 } 12240 CPUSET_DEL(cpuset, CPU->cpu_id); 12241 SFMMU_XCALL_STATS(sfmmup); 12242 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12243 vac_flushpage(pfnum, vcolor); 12244 } 12245 #endif /* VAC */ 12246 kpreempt_enable(); 12247 } 12248 12249 /* 12250 * Demaps the TSB and flushes all TLBs on all cpus for a particular virtual 12251 * address and ctx. If noflush is set we do not currently do anything. 12252 * This function may or may not be called with the HAT lock held. 12253 */ 12254 static void 12255 sfmmu_tlb_demap(caddr_t addr, sfmmu_t *sfmmup, struct hme_blk *hmeblkp, 12256 int tlb_noflush, int hat_lock_held) 12257 { 12258 cpuset_t cpuset; 12259 hatlock_t *hatlockp; 12260 12261 ASSERT(!hmeblkp->hblk_shared); 12262 12263 /* 12264 * If the process is exiting we have nothing to do. 12265 */ 12266 if (tlb_noflush) 12267 return; 12268 12269 /* 12270 * Flush TSB. 12271 */ 12272 if (!hat_lock_held) 12273 hatlockp = sfmmu_hat_enter(sfmmup); 12274 SFMMU_UNLOAD_TSB(addr, sfmmup, hmeblkp, 0); 12275 12276 kpreempt_disable(); 12277 12278 cpuset = sfmmup->sfmmu_cpusran; 12279 CPUSET_AND(cpuset, cpu_ready_set); 12280 CPUSET_DEL(cpuset, CPU->cpu_id); 12281 12282 SFMMU_XCALL_STATS(sfmmup); 12283 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)addr, (uint64_t)sfmmup); 12284 12285 vtag_flushpage(addr, (uint64_t)sfmmup); 12286 12287 if (!hat_lock_held) 12288 sfmmu_hat_exit(hatlockp); 12289 12290 kpreempt_enable(); 12291 12292 } 12293 12294 /* 12295 * Special case of sfmmu_tlb_demap for MMU_PAGESIZE hblks. Use the xcall 12296 * call handler that can flush a range of pages to save on xcalls. 12297 */ 12298 static int sfmmu_xcall_save; 12299 12300 /* 12301 * this routine is never used for demaping addresses backed by SRD hmeblks. 12302 */ 12303 static void 12304 sfmmu_tlb_range_demap(demap_range_t *dmrp) 12305 { 12306 sfmmu_t *sfmmup = dmrp->dmr_sfmmup; 12307 hatlock_t *hatlockp; 12308 cpuset_t cpuset; 12309 uint64_t sfmmu_pgcnt; 12310 pgcnt_t pgcnt = 0; 12311 int pgunload = 0; 12312 int dirtypg = 0; 12313 caddr_t addr = dmrp->dmr_addr; 12314 caddr_t eaddr; 12315 uint64_t bitvec = dmrp->dmr_bitvec; 12316 12317 ASSERT(bitvec & 1); 12318 12319 /* 12320 * Flush TSB and calculate number of pages to flush. 12321 */ 12322 while (bitvec != 0) { 12323 dirtypg = 0; 12324 /* 12325 * Find the first page to flush and then count how many 12326 * pages there are after it that also need to be flushed. 12327 * This way the number of TSB flushes is minimized. 12328 */ 12329 while ((bitvec & 1) == 0) { 12330 pgcnt++; 12331 addr += MMU_PAGESIZE; 12332 bitvec >>= 1; 12333 } 12334 while (bitvec & 1) { 12335 dirtypg++; 12336 bitvec >>= 1; 12337 } 12338 eaddr = addr + ptob(dirtypg); 12339 hatlockp = sfmmu_hat_enter(sfmmup); 12340 sfmmu_unload_tsb_range(sfmmup, addr, eaddr, TTE8K); 12341 sfmmu_hat_exit(hatlockp); 12342 pgunload += dirtypg; 12343 addr = eaddr; 12344 pgcnt += dirtypg; 12345 } 12346 12347 ASSERT((pgcnt<<MMU_PAGESHIFT) <= dmrp->dmr_endaddr - dmrp->dmr_addr); 12348 if (sfmmup->sfmmu_free == 0) { 12349 addr = dmrp->dmr_addr; 12350 bitvec = dmrp->dmr_bitvec; 12351 12352 /* 12353 * make sure it has SFMMU_PGCNT_SHIFT bits only, 12354 * as it will be used to pack argument for xt_some 12355 */ 12356 ASSERT((pgcnt > 0) && 12357 (pgcnt <= (1 << SFMMU_PGCNT_SHIFT))); 12358 12359 /* 12360 * Encode pgcnt as (pgcnt -1 ), and pass (pgcnt - 1) in 12361 * the low 6 bits of sfmmup. This is doable since pgcnt 12362 * always >= 1. 12363 */ 12364 ASSERT(!((uint64_t)sfmmup & SFMMU_PGCNT_MASK)); 12365 sfmmu_pgcnt = (uint64_t)sfmmup | 12366 ((pgcnt - 1) & SFMMU_PGCNT_MASK); 12367 12368 /* 12369 * We must hold the hat lock during the flush of TLB, 12370 * to avoid a race with sfmmu_invalidate_ctx(), where 12371 * sfmmu_cnum on a MMU could be set to INVALID_CONTEXT, 12372 * causing TLB demap routine to skip flush on that MMU. 12373 * If the context on a MMU has already been set to 12374 * INVALID_CONTEXT, we just get an extra flush on 12375 * that MMU. 12376 */ 12377 hatlockp = sfmmu_hat_enter(sfmmup); 12378 kpreempt_disable(); 12379 12380 cpuset = sfmmup->sfmmu_cpusran; 12381 CPUSET_AND(cpuset, cpu_ready_set); 12382 CPUSET_DEL(cpuset, CPU->cpu_id); 12383 12384 SFMMU_XCALL_STATS(sfmmup); 12385 xt_some(cpuset, vtag_flush_pgcnt_tl1, (uint64_t)addr, 12386 sfmmu_pgcnt); 12387 12388 for (; bitvec != 0; bitvec >>= 1) { 12389 if (bitvec & 1) 12390 vtag_flushpage(addr, (uint64_t)sfmmup); 12391 addr += MMU_PAGESIZE; 12392 } 12393 kpreempt_enable(); 12394 sfmmu_hat_exit(hatlockp); 12395 12396 sfmmu_xcall_save += (pgunload-1); 12397 } 12398 dmrp->dmr_bitvec = 0; 12399 } 12400 12401 /* 12402 * In cases where we need to synchronize with TLB/TSB miss trap 12403 * handlers, _and_ need to flush the TLB, it's a lot easier to 12404 * throw away the context from the process than to do a 12405 * special song and dance to keep things consistent for the 12406 * handlers. 12407 * 12408 * Since the process suddenly ends up without a context and our caller 12409 * holds the hat lock, threads that fault after this function is called 12410 * will pile up on the lock. We can then do whatever we need to 12411 * atomically from the context of the caller. The first blocked thread 12412 * to resume executing will get the process a new context, and the 12413 * process will resume executing. 12414 * 12415 * One added advantage of this approach is that on MMUs that 12416 * support a "flush all" operation, we will delay the flush until 12417 * cnum wrap-around, and then flush the TLB one time. This 12418 * is rather rare, so it's a lot less expensive than making 8000 12419 * x-calls to flush the TLB 8000 times. 12420 * 12421 * A per-process (PP) lock is used to synchronize ctx allocations in 12422 * resume() and ctx invalidations here. 12423 */ 12424 static void 12425 sfmmu_invalidate_ctx(sfmmu_t *sfmmup) 12426 { 12427 cpuset_t cpuset; 12428 int cnum, currcnum; 12429 mmu_ctx_t *mmu_ctxp; 12430 int i; 12431 uint_t pstate_save; 12432 12433 SFMMU_STAT(sf_ctx_inv); 12434 12435 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12436 ASSERT(sfmmup != ksfmmup); 12437 12438 kpreempt_disable(); 12439 12440 mmu_ctxp = CPU_MMU_CTXP(CPU); 12441 ASSERT(mmu_ctxp); 12442 ASSERT(mmu_ctxp->mmu_idx < max_mmu_ctxdoms); 12443 ASSERT(mmu_ctxp == mmu_ctxs_tbl[mmu_ctxp->mmu_idx]); 12444 12445 currcnum = sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum; 12446 12447 pstate_save = sfmmu_disable_intrs(); 12448 12449 lock_set(&sfmmup->sfmmu_ctx_lock); /* acquire PP lock */ 12450 /* set HAT cnum invalid across all context domains. */ 12451 for (i = 0; i < max_mmu_ctxdoms; i++) { 12452 12453 cnum = sfmmup->sfmmu_ctxs[i].cnum; 12454 if (cnum == INVALID_CONTEXT) { 12455 continue; 12456 } 12457 12458 sfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 12459 } 12460 membar_enter(); /* make sure globally visible to all CPUs */ 12461 lock_clear(&sfmmup->sfmmu_ctx_lock); /* release PP lock */ 12462 12463 sfmmu_enable_intrs(pstate_save); 12464 12465 cpuset = sfmmup->sfmmu_cpusran; 12466 CPUSET_DEL(cpuset, CPU->cpu_id); 12467 CPUSET_AND(cpuset, cpu_ready_set); 12468 if (!CPUSET_ISNULL(cpuset)) { 12469 SFMMU_XCALL_STATS(sfmmup); 12470 xt_some(cpuset, sfmmu_raise_tsb_exception, 12471 (uint64_t)sfmmup, INVALID_CONTEXT); 12472 xt_sync(cpuset); 12473 SFMMU_STAT(sf_tsb_raise_exception); 12474 SFMMU_MMU_STAT(mmu_tsb_raise_exception); 12475 } 12476 12477 /* 12478 * If the hat to-be-invalidated is the same as the current 12479 * process on local CPU we need to invalidate 12480 * this CPU context as well. 12481 */ 12482 if ((sfmmu_getctx_sec() == currcnum) && 12483 (currcnum != INVALID_CONTEXT)) { 12484 /* sets shared context to INVALID too */ 12485 sfmmu_setctx_sec(INVALID_CONTEXT); 12486 sfmmu_clear_utsbinfo(); 12487 } 12488 12489 SFMMU_FLAGS_SET(sfmmup, HAT_ALLCTX_INVALID); 12490 12491 kpreempt_enable(); 12492 12493 /* 12494 * we hold the hat lock, so nobody should allocate a context 12495 * for us yet 12496 */ 12497 ASSERT(sfmmup->sfmmu_ctxs[mmu_ctxp->mmu_idx].cnum == INVALID_CONTEXT); 12498 } 12499 12500 #ifdef VAC 12501 /* 12502 * We need to flush the cache in all cpus. It is possible that 12503 * a process referenced a page as cacheable but has sinced exited 12504 * and cleared the mapping list. We still to flush it but have no 12505 * state so all cpus is the only alternative. 12506 */ 12507 void 12508 sfmmu_cache_flush(pfn_t pfnum, int vcolor) 12509 { 12510 cpuset_t cpuset; 12511 12512 kpreempt_disable(); 12513 cpuset = cpu_ready_set; 12514 CPUSET_DEL(cpuset, CPU->cpu_id); 12515 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 12516 xt_some(cpuset, vac_flushpage_tl1, pfnum, vcolor); 12517 xt_sync(cpuset); 12518 vac_flushpage(pfnum, vcolor); 12519 kpreempt_enable(); 12520 } 12521 12522 void 12523 sfmmu_cache_flushcolor(int vcolor, pfn_t pfnum) 12524 { 12525 cpuset_t cpuset; 12526 12527 ASSERT(vcolor >= 0); 12528 12529 kpreempt_disable(); 12530 cpuset = cpu_ready_set; 12531 CPUSET_DEL(cpuset, CPU->cpu_id); 12532 SFMMU_XCALL_STATS(NULL); /* account to any ctx */ 12533 xt_some(cpuset, vac_flushcolor_tl1, vcolor, pfnum); 12534 xt_sync(cpuset); 12535 vac_flushcolor(vcolor, pfnum); 12536 kpreempt_enable(); 12537 } 12538 #endif /* VAC */ 12539 12540 /* 12541 * We need to prevent processes from accessing the TSB using a cached physical 12542 * address. It's alright if they try to access the TSB via virtual address 12543 * since they will just fault on that virtual address once the mapping has 12544 * been suspended. 12545 */ 12546 #pragma weak sendmondo_in_recover 12547 12548 /* ARGSUSED */ 12549 static int 12550 sfmmu_tsb_pre_relocator(caddr_t va, uint_t tsbsz, uint_t flags, void *tsbinfo) 12551 { 12552 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 12553 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 12554 hatlock_t *hatlockp; 12555 sf_scd_t *scdp; 12556 12557 if (flags != HAT_PRESUSPEND) 12558 return (0); 12559 12560 /* 12561 * If tsb is a shared TSB with TSB_SHAREDCTX set, sfmmup must 12562 * be a shared hat, then set SCD's tsbinfo's flag. 12563 * If tsb is not shared, sfmmup is a private hat, then set 12564 * its private tsbinfo's flag. 12565 */ 12566 hatlockp = sfmmu_hat_enter(sfmmup); 12567 tsbinfop->tsb_flags |= TSB_RELOC_FLAG; 12568 12569 if (!(tsbinfop->tsb_flags & TSB_SHAREDCTX)) { 12570 sfmmu_tsb_inv_ctx(sfmmup); 12571 sfmmu_hat_exit(hatlockp); 12572 } else { 12573 /* release lock on the shared hat */ 12574 sfmmu_hat_exit(hatlockp); 12575 /* sfmmup is a shared hat */ 12576 ASSERT(sfmmup->sfmmu_scdhat); 12577 scdp = sfmmup->sfmmu_scdp; 12578 ASSERT(scdp != NULL); 12579 /* get private hat from the scd list */ 12580 mutex_enter(&scdp->scd_mutex); 12581 sfmmup = scdp->scd_sf_list; 12582 while (sfmmup != NULL) { 12583 hatlockp = sfmmu_hat_enter(sfmmup); 12584 /* 12585 * We do not call sfmmu_tsb_inv_ctx here because 12586 * sendmondo_in_recover check is only needed for 12587 * sun4u. 12588 */ 12589 sfmmu_invalidate_ctx(sfmmup); 12590 sfmmu_hat_exit(hatlockp); 12591 sfmmup = sfmmup->sfmmu_scd_link.next; 12592 12593 } 12594 mutex_exit(&scdp->scd_mutex); 12595 } 12596 return (0); 12597 } 12598 12599 static void 12600 sfmmu_tsb_inv_ctx(sfmmu_t *sfmmup) 12601 { 12602 extern uint32_t sendmondo_in_recover; 12603 12604 ASSERT(sfmmu_hat_lock_held(sfmmup)); 12605 12606 /* 12607 * For Cheetah+ Erratum 25: 12608 * Wait for any active recovery to finish. We can't risk 12609 * relocating the TSB of the thread running mondo_recover_proc() 12610 * since, if we did that, we would deadlock. The scenario we are 12611 * trying to avoid is as follows: 12612 * 12613 * THIS CPU RECOVER CPU 12614 * -------- ----------- 12615 * Begins recovery, walking through TSB 12616 * hat_pagesuspend() TSB TTE 12617 * TLB miss on TSB TTE, spins at TL1 12618 * xt_sync() 12619 * send_mondo_timeout() 12620 * mondo_recover_proc() 12621 * ((deadlocked)) 12622 * 12623 * The second half of the workaround is that mondo_recover_proc() 12624 * checks to see if the tsb_info has the RELOC flag set, and if it 12625 * does, it skips over that TSB without ever touching tsbinfop->tsb_va 12626 * and hence avoiding the TLB miss that could result in a deadlock. 12627 */ 12628 if (&sendmondo_in_recover) { 12629 membar_enter(); /* make sure RELOC flag visible */ 12630 while (sendmondo_in_recover) { 12631 drv_usecwait(1); 12632 membar_consumer(); 12633 } 12634 } 12635 12636 sfmmu_invalidate_ctx(sfmmup); 12637 } 12638 12639 /* ARGSUSED */ 12640 static int 12641 sfmmu_tsb_post_relocator(caddr_t va, uint_t tsbsz, uint_t flags, 12642 void *tsbinfo, pfn_t newpfn) 12643 { 12644 hatlock_t *hatlockp; 12645 struct tsb_info *tsbinfop = (struct tsb_info *)tsbinfo; 12646 sfmmu_t *sfmmup = tsbinfop->tsb_sfmmu; 12647 12648 if (flags != HAT_POSTUNSUSPEND) 12649 return (0); 12650 12651 hatlockp = sfmmu_hat_enter(sfmmup); 12652 12653 SFMMU_STAT(sf_tsb_reloc); 12654 12655 /* 12656 * The process may have swapped out while we were relocating one 12657 * of its TSBs. If so, don't bother doing the setup since the 12658 * process can't be using the memory anymore. 12659 */ 12660 if ((tsbinfop->tsb_flags & TSB_SWAPPED) == 0) { 12661 ASSERT(va == tsbinfop->tsb_va); 12662 sfmmu_tsbinfo_setup_phys(tsbinfop, newpfn); 12663 12664 if (tsbinfop->tsb_flags & TSB_FLUSH_NEEDED) { 12665 sfmmu_inv_tsb(tsbinfop->tsb_va, 12666 TSB_BYTES(tsbinfop->tsb_szc)); 12667 tsbinfop->tsb_flags &= ~TSB_FLUSH_NEEDED; 12668 } 12669 } 12670 12671 membar_exit(); 12672 tsbinfop->tsb_flags &= ~TSB_RELOC_FLAG; 12673 cv_broadcast(&sfmmup->sfmmu_tsb_cv); 12674 12675 sfmmu_hat_exit(hatlockp); 12676 12677 return (0); 12678 } 12679 12680 /* 12681 * Allocate and initialize a tsb_info structure. Note that we may or may not 12682 * allocate a TSB here, depending on the flags passed in. 12683 */ 12684 static int 12685 sfmmu_tsbinfo_alloc(struct tsb_info **tsbinfopp, int tsb_szc, int tte_sz_mask, 12686 uint_t flags, sfmmu_t *sfmmup) 12687 { 12688 int err; 12689 12690 *tsbinfopp = (struct tsb_info *)kmem_cache_alloc( 12691 sfmmu_tsbinfo_cache, KM_SLEEP); 12692 12693 if ((err = sfmmu_init_tsbinfo(*tsbinfopp, tte_sz_mask, 12694 tsb_szc, flags, sfmmup)) != 0) { 12695 kmem_cache_free(sfmmu_tsbinfo_cache, *tsbinfopp); 12696 SFMMU_STAT(sf_tsb_allocfail); 12697 *tsbinfopp = NULL; 12698 return (err); 12699 } 12700 SFMMU_STAT(sf_tsb_alloc); 12701 12702 /* 12703 * Bump the TSB size counters for this TSB size. 12704 */ 12705 (*(((int *)&sfmmu_tsbsize_stat) + tsb_szc))++; 12706 return (0); 12707 } 12708 12709 static void 12710 sfmmu_tsb_free(struct tsb_info *tsbinfo) 12711 { 12712 caddr_t tsbva = tsbinfo->tsb_va; 12713 uint_t tsb_size = TSB_BYTES(tsbinfo->tsb_szc); 12714 struct kmem_cache *kmem_cachep = tsbinfo->tsb_cache; 12715 vmem_t *vmp = tsbinfo->tsb_vmp; 12716 12717 /* 12718 * If we allocated this TSB from relocatable kernel memory, then we 12719 * need to uninstall the callback handler. 12720 */ 12721 if (tsbinfo->tsb_cache != sfmmu_tsb8k_cache) { 12722 uintptr_t slab_mask; 12723 caddr_t slab_vaddr; 12724 page_t **ppl; 12725 int ret; 12726 12727 ASSERT(tsb_size <= MMU_PAGESIZE4M || use_bigtsb_arena); 12728 if (tsb_size > MMU_PAGESIZE4M) 12729 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT; 12730 else 12731 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 12732 slab_vaddr = (caddr_t)((uintptr_t)tsbva & slab_mask); 12733 12734 ret = as_pagelock(&kas, &ppl, slab_vaddr, PAGESIZE, S_WRITE); 12735 ASSERT(ret == 0); 12736 hat_delete_callback(tsbva, (uint_t)tsb_size, (void *)tsbinfo, 12737 0, NULL); 12738 as_pageunlock(&kas, ppl, slab_vaddr, PAGESIZE, S_WRITE); 12739 } 12740 12741 if (kmem_cachep != NULL) { 12742 kmem_cache_free(kmem_cachep, tsbva); 12743 } else { 12744 vmem_xfree(vmp, (void *)tsbva, tsb_size); 12745 } 12746 tsbinfo->tsb_va = (caddr_t)0xbad00bad; 12747 atomic_add_64(&tsb_alloc_bytes, -(int64_t)tsb_size); 12748 } 12749 12750 static void 12751 sfmmu_tsbinfo_free(struct tsb_info *tsbinfo) 12752 { 12753 if ((tsbinfo->tsb_flags & TSB_SWAPPED) == 0) { 12754 sfmmu_tsb_free(tsbinfo); 12755 } 12756 kmem_cache_free(sfmmu_tsbinfo_cache, tsbinfo); 12757 12758 } 12759 12760 /* 12761 * Setup all the references to physical memory for this tsbinfo. 12762 * The underlying page(s) must be locked. 12763 */ 12764 static void 12765 sfmmu_tsbinfo_setup_phys(struct tsb_info *tsbinfo, pfn_t pfn) 12766 { 12767 ASSERT(pfn != PFN_INVALID); 12768 ASSERT(pfn == va_to_pfn(tsbinfo->tsb_va)); 12769 12770 #ifndef sun4v 12771 if (tsbinfo->tsb_szc == 0) { 12772 sfmmu_memtte(&tsbinfo->tsb_tte, pfn, 12773 PROT_WRITE|PROT_READ, TTE8K); 12774 } else { 12775 /* 12776 * Round down PA and use a large mapping; the handlers will 12777 * compute the TSB pointer at the correct offset into the 12778 * big virtual page. NOTE: this assumes all TSBs larger 12779 * than 8K must come from physically contiguous slabs of 12780 * size tsb_slab_size. 12781 */ 12782 sfmmu_memtte(&tsbinfo->tsb_tte, pfn & ~tsb_slab_mask, 12783 PROT_WRITE|PROT_READ, tsb_slab_ttesz); 12784 } 12785 tsbinfo->tsb_pa = ptob(pfn); 12786 12787 TTE_SET_LOCKED(&tsbinfo->tsb_tte); /* lock the tte into dtlb */ 12788 TTE_SET_MOD(&tsbinfo->tsb_tte); /* enable writes */ 12789 12790 ASSERT(TTE_IS_PRIVILEGED(&tsbinfo->tsb_tte)); 12791 ASSERT(TTE_IS_LOCKED(&tsbinfo->tsb_tte)); 12792 #else /* sun4v */ 12793 tsbinfo->tsb_pa = ptob(pfn); 12794 #endif /* sun4v */ 12795 } 12796 12797 12798 /* 12799 * Returns zero on success, ENOMEM if over the high water mark, 12800 * or EAGAIN if the caller needs to retry with a smaller TSB 12801 * size (or specify TSB_FORCEALLOC if the allocation can't fail). 12802 * 12803 * This call cannot fail to allocate a TSB if TSB_FORCEALLOC 12804 * is specified and the TSB requested is PAGESIZE, though it 12805 * may sleep waiting for memory if sufficient memory is not 12806 * available. 12807 */ 12808 static int 12809 sfmmu_init_tsbinfo(struct tsb_info *tsbinfo, int tteszmask, 12810 int tsbcode, uint_t flags, sfmmu_t *sfmmup) 12811 { 12812 caddr_t vaddr = NULL; 12813 caddr_t slab_vaddr; 12814 uintptr_t slab_mask; 12815 int tsbbytes = TSB_BYTES(tsbcode); 12816 int lowmem = 0; 12817 struct kmem_cache *kmem_cachep = NULL; 12818 vmem_t *vmp = NULL; 12819 lgrp_id_t lgrpid = LGRP_NONE; 12820 pfn_t pfn; 12821 uint_t cbflags = HAC_SLEEP; 12822 page_t **pplist; 12823 int ret; 12824 12825 ASSERT(tsbbytes <= MMU_PAGESIZE4M || use_bigtsb_arena); 12826 if (tsbbytes > MMU_PAGESIZE4M) 12827 slab_mask = ~((uintptr_t)bigtsb_slab_mask) << PAGESHIFT; 12828 else 12829 slab_mask = ~((uintptr_t)tsb_slab_mask) << PAGESHIFT; 12830 12831 if (flags & (TSB_FORCEALLOC | TSB_SWAPIN | TSB_GROW | TSB_SHRINK)) 12832 flags |= TSB_ALLOC; 12833 12834 ASSERT((flags & TSB_FORCEALLOC) == 0 || tsbcode == TSB_MIN_SZCODE); 12835 12836 tsbinfo->tsb_sfmmu = sfmmup; 12837 12838 /* 12839 * If not allocating a TSB, set up the tsbinfo, set TSB_SWAPPED, and 12840 * return. 12841 */ 12842 if ((flags & TSB_ALLOC) == 0) { 12843 tsbinfo->tsb_szc = tsbcode; 12844 tsbinfo->tsb_ttesz_mask = tteszmask; 12845 tsbinfo->tsb_va = (caddr_t)0xbadbadbeef; 12846 tsbinfo->tsb_pa = -1; 12847 tsbinfo->tsb_tte.ll = 0; 12848 tsbinfo->tsb_next = NULL; 12849 tsbinfo->tsb_flags = TSB_SWAPPED; 12850 tsbinfo->tsb_cache = NULL; 12851 tsbinfo->tsb_vmp = NULL; 12852 return (0); 12853 } 12854 12855 #ifdef DEBUG 12856 /* 12857 * For debugging: 12858 * Randomly force allocation failures every tsb_alloc_mtbf 12859 * tries if TSB_FORCEALLOC is not specified. This will 12860 * return ENOMEM if tsb_alloc_mtbf is odd, or EAGAIN if 12861 * it is even, to allow testing of both failure paths... 12862 */ 12863 if (tsb_alloc_mtbf && ((flags & TSB_FORCEALLOC) == 0) && 12864 (tsb_alloc_count++ == tsb_alloc_mtbf)) { 12865 tsb_alloc_count = 0; 12866 tsb_alloc_fail_mtbf++; 12867 return ((tsb_alloc_mtbf & 1)? ENOMEM : EAGAIN); 12868 } 12869 #endif /* DEBUG */ 12870 12871 /* 12872 * Enforce high water mark if we are not doing a forced allocation 12873 * and are not shrinking a process' TSB. 12874 */ 12875 if ((flags & TSB_SHRINK) == 0 && 12876 (tsbbytes + tsb_alloc_bytes) > tsb_alloc_hiwater) { 12877 if ((flags & TSB_FORCEALLOC) == 0) 12878 return (ENOMEM); 12879 lowmem = 1; 12880 } 12881 12882 /* 12883 * Allocate from the correct location based upon the size of the TSB 12884 * compared to the base page size, and what memory conditions dictate. 12885 * Note we always do nonblocking allocations from the TSB arena since 12886 * we don't want memory fragmentation to cause processes to block 12887 * indefinitely waiting for memory; until the kernel algorithms that 12888 * coalesce large pages are improved this is our best option. 12889 * 12890 * Algorithm: 12891 * If allocating a "large" TSB (>8K), allocate from the 12892 * appropriate kmem_tsb_default_arena vmem arena 12893 * else if low on memory or the TSB_FORCEALLOC flag is set or 12894 * tsb_forceheap is set 12895 * Allocate from kernel heap via sfmmu_tsb8k_cache with 12896 * KM_SLEEP (never fails) 12897 * else 12898 * Allocate from appropriate sfmmu_tsb_cache with 12899 * KM_NOSLEEP 12900 * endif 12901 */ 12902 if (tsb_lgrp_affinity) 12903 lgrpid = lgrp_home_id(curthread); 12904 if (lgrpid == LGRP_NONE) 12905 lgrpid = 0; /* use lgrp of boot CPU */ 12906 12907 if (tsbbytes > MMU_PAGESIZE) { 12908 if (tsbbytes > MMU_PAGESIZE4M) { 12909 vmp = kmem_bigtsb_default_arena[lgrpid]; 12910 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 12911 0, 0, NULL, NULL, VM_NOSLEEP); 12912 } else { 12913 vmp = kmem_tsb_default_arena[lgrpid]; 12914 vaddr = (caddr_t)vmem_xalloc(vmp, tsbbytes, tsbbytes, 12915 0, 0, NULL, NULL, VM_NOSLEEP); 12916 } 12917 #ifdef DEBUG 12918 } else if (lowmem || (flags & TSB_FORCEALLOC) || tsb_forceheap) { 12919 #else /* !DEBUG */ 12920 } else if (lowmem || (flags & TSB_FORCEALLOC)) { 12921 #endif /* DEBUG */ 12922 kmem_cachep = sfmmu_tsb8k_cache; 12923 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_SLEEP); 12924 ASSERT(vaddr != NULL); 12925 } else { 12926 kmem_cachep = sfmmu_tsb_cache[lgrpid]; 12927 vaddr = (caddr_t)kmem_cache_alloc(kmem_cachep, KM_NOSLEEP); 12928 } 12929 12930 tsbinfo->tsb_cache = kmem_cachep; 12931 tsbinfo->tsb_vmp = vmp; 12932 12933 if (vaddr == NULL) { 12934 return (EAGAIN); 12935 } 12936 12937 atomic_add_64(&tsb_alloc_bytes, (int64_t)tsbbytes); 12938 kmem_cachep = tsbinfo->tsb_cache; 12939 12940 /* 12941 * If we are allocating from outside the cage, then we need to 12942 * register a relocation callback handler. Note that for now 12943 * since pseudo mappings always hang off of the slab's root page, 12944 * we need only lock the first 8K of the TSB slab. This is a bit 12945 * hacky but it is good for performance. 12946 */ 12947 if (kmem_cachep != sfmmu_tsb8k_cache) { 12948 slab_vaddr = (caddr_t)((uintptr_t)vaddr & slab_mask); 12949 ret = as_pagelock(&kas, &pplist, slab_vaddr, PAGESIZE, S_WRITE); 12950 ASSERT(ret == 0); 12951 ret = hat_add_callback(sfmmu_tsb_cb_id, vaddr, (uint_t)tsbbytes, 12952 cbflags, (void *)tsbinfo, &pfn, NULL); 12953 12954 /* 12955 * Need to free up resources if we could not successfully 12956 * add the callback function and return an error condition. 12957 */ 12958 if (ret != 0) { 12959 if (kmem_cachep) { 12960 kmem_cache_free(kmem_cachep, vaddr); 12961 } else { 12962 vmem_xfree(vmp, (void *)vaddr, tsbbytes); 12963 } 12964 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, 12965 S_WRITE); 12966 return (EAGAIN); 12967 } 12968 } else { 12969 /* 12970 * Since allocation of 8K TSBs from heap is rare and occurs 12971 * during memory pressure we allocate them from permanent 12972 * memory rather than using callbacks to get the PFN. 12973 */ 12974 pfn = hat_getpfnum(kas.a_hat, vaddr); 12975 } 12976 12977 tsbinfo->tsb_va = vaddr; 12978 tsbinfo->tsb_szc = tsbcode; 12979 tsbinfo->tsb_ttesz_mask = tteszmask; 12980 tsbinfo->tsb_next = NULL; 12981 tsbinfo->tsb_flags = 0; 12982 12983 sfmmu_tsbinfo_setup_phys(tsbinfo, pfn); 12984 12985 sfmmu_inv_tsb(vaddr, tsbbytes); 12986 12987 if (kmem_cachep != sfmmu_tsb8k_cache) { 12988 as_pageunlock(&kas, pplist, slab_vaddr, PAGESIZE, S_WRITE); 12989 } 12990 12991 return (0); 12992 } 12993 12994 /* 12995 * Initialize per cpu tsb and per cpu tsbmiss_area 12996 */ 12997 void 12998 sfmmu_init_tsbs(void) 12999 { 13000 int i; 13001 struct tsbmiss *tsbmissp; 13002 struct kpmtsbm *kpmtsbmp; 13003 #ifndef sun4v 13004 extern int dcache_line_mask; 13005 #endif /* sun4v */ 13006 extern uint_t vac_colors; 13007 13008 /* 13009 * Init. tsb miss area. 13010 */ 13011 tsbmissp = tsbmiss_area; 13012 13013 for (i = 0; i < NCPU; tsbmissp++, i++) { 13014 /* 13015 * initialize the tsbmiss area. 13016 * Do this for all possible CPUs as some may be added 13017 * while the system is running. There is no cost to this. 13018 */ 13019 tsbmissp->ksfmmup = ksfmmup; 13020 #ifndef sun4v 13021 tsbmissp->dcache_line_mask = (uint16_t)dcache_line_mask; 13022 #endif /* sun4v */ 13023 tsbmissp->khashstart = 13024 (struct hmehash_bucket *)va_to_pa((caddr_t)khme_hash); 13025 tsbmissp->uhashstart = 13026 (struct hmehash_bucket *)va_to_pa((caddr_t)uhme_hash); 13027 tsbmissp->khashsz = khmehash_num; 13028 tsbmissp->uhashsz = uhmehash_num; 13029 } 13030 13031 sfmmu_tsb_cb_id = hat_register_callback('T'<<16 | 'S' << 8 | 'B', 13032 sfmmu_tsb_pre_relocator, sfmmu_tsb_post_relocator, NULL, 0); 13033 13034 if (kpm_enable == 0) 13035 return; 13036 13037 /* -- Begin KPM specific init -- */ 13038 13039 if (kpm_smallpages) { 13040 /* 13041 * If we're using base pagesize pages for seg_kpm 13042 * mappings, we use the kernel TSB since we can't afford 13043 * to allocate a second huge TSB for these mappings. 13044 */ 13045 kpm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 13046 kpm_tsbsz = ktsb_szcode; 13047 kpmsm_tsbbase = kpm_tsbbase; 13048 kpmsm_tsbsz = kpm_tsbsz; 13049 } else { 13050 /* 13051 * In VAC conflict case, just put the entries in the 13052 * kernel 8K indexed TSB for now so we can find them. 13053 * This could really be changed in the future if we feel 13054 * the need... 13055 */ 13056 kpmsm_tsbbase = ktsb_phys? ktsb_pbase : (uint64_t)ktsb_base; 13057 kpmsm_tsbsz = ktsb_szcode; 13058 kpm_tsbbase = ktsb_phys? ktsb4m_pbase : (uint64_t)ktsb4m_base; 13059 kpm_tsbsz = ktsb4m_szcode; 13060 } 13061 13062 kpmtsbmp = kpmtsbm_area; 13063 for (i = 0; i < NCPU; kpmtsbmp++, i++) { 13064 /* 13065 * Initialize the kpmtsbm area. 13066 * Do this for all possible CPUs as some may be added 13067 * while the system is running. There is no cost to this. 13068 */ 13069 kpmtsbmp->vbase = kpm_vbase; 13070 kpmtsbmp->vend = kpm_vbase + kpm_size * vac_colors; 13071 kpmtsbmp->sz_shift = kpm_size_shift; 13072 kpmtsbmp->kpmp_shift = kpmp_shift; 13073 kpmtsbmp->kpmp2pshft = (uchar_t)kpmp2pshft; 13074 if (kpm_smallpages == 0) { 13075 kpmtsbmp->kpmp_table_sz = kpmp_table_sz; 13076 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_table); 13077 } else { 13078 kpmtsbmp->kpmp_table_sz = kpmp_stable_sz; 13079 kpmtsbmp->kpmp_tablepa = va_to_pa(kpmp_stable); 13080 } 13081 kpmtsbmp->msegphashpa = va_to_pa(memseg_phash); 13082 kpmtsbmp->flags = KPMTSBM_ENABLE_FLAG; 13083 #ifdef DEBUG 13084 kpmtsbmp->flags |= (kpm_tsbmtl) ? KPMTSBM_TLTSBM_FLAG : 0; 13085 #endif /* DEBUG */ 13086 if (ktsb_phys) 13087 kpmtsbmp->flags |= KPMTSBM_TSBPHYS_FLAG; 13088 } 13089 13090 /* -- End KPM specific init -- */ 13091 } 13092 13093 /* Avoid using sfmmu_tsbinfo_alloc() to avoid kmem_alloc - no real reason */ 13094 struct tsb_info ktsb_info[2]; 13095 13096 /* 13097 * Called from hat_kern_setup() to setup the tsb_info for ksfmmup. 13098 */ 13099 void 13100 sfmmu_init_ktsbinfo() 13101 { 13102 ASSERT(ksfmmup != NULL); 13103 ASSERT(ksfmmup->sfmmu_tsb == NULL); 13104 /* 13105 * Allocate tsbinfos for kernel and copy in data 13106 * to make debug easier and sun4v setup easier. 13107 */ 13108 ktsb_info[0].tsb_sfmmu = ksfmmup; 13109 ktsb_info[0].tsb_szc = ktsb_szcode; 13110 ktsb_info[0].tsb_ttesz_mask = TSB8K|TSB64K|TSB512K; 13111 ktsb_info[0].tsb_va = ktsb_base; 13112 ktsb_info[0].tsb_pa = ktsb_pbase; 13113 ktsb_info[0].tsb_flags = 0; 13114 ktsb_info[0].tsb_tte.ll = 0; 13115 ktsb_info[0].tsb_cache = NULL; 13116 13117 ktsb_info[1].tsb_sfmmu = ksfmmup; 13118 ktsb_info[1].tsb_szc = ktsb4m_szcode; 13119 ktsb_info[1].tsb_ttesz_mask = TSB4M; 13120 ktsb_info[1].tsb_va = ktsb4m_base; 13121 ktsb_info[1].tsb_pa = ktsb4m_pbase; 13122 ktsb_info[1].tsb_flags = 0; 13123 ktsb_info[1].tsb_tte.ll = 0; 13124 ktsb_info[1].tsb_cache = NULL; 13125 13126 /* Link them into ksfmmup. */ 13127 ktsb_info[0].tsb_next = &ktsb_info[1]; 13128 ktsb_info[1].tsb_next = NULL; 13129 ksfmmup->sfmmu_tsb = &ktsb_info[0]; 13130 13131 sfmmu_setup_tsbinfo(ksfmmup); 13132 } 13133 13134 /* 13135 * Cache the last value returned from va_to_pa(). If the VA specified 13136 * in the current call to cached_va_to_pa() maps to the same Page (as the 13137 * previous call to cached_va_to_pa()), then compute the PA using 13138 * cached info, else call va_to_pa(). 13139 * 13140 * Note: this function is neither MT-safe nor consistent in the presence 13141 * of multiple, interleaved threads. This function was created to enable 13142 * an optimization used during boot (at a point when there's only one thread 13143 * executing on the "boot CPU", and before startup_vm() has been called). 13144 */ 13145 static uint64_t 13146 cached_va_to_pa(void *vaddr) 13147 { 13148 static uint64_t prev_vaddr_base = 0; 13149 static uint64_t prev_pfn = 0; 13150 13151 if ((((uint64_t)vaddr) & MMU_PAGEMASK) == prev_vaddr_base) { 13152 return (prev_pfn | ((uint64_t)vaddr & MMU_PAGEOFFSET)); 13153 } else { 13154 uint64_t pa = va_to_pa(vaddr); 13155 13156 if (pa != ((uint64_t)-1)) { 13157 /* 13158 * Computed physical address is valid. Cache its 13159 * related info for the next cached_va_to_pa() call. 13160 */ 13161 prev_pfn = pa & MMU_PAGEMASK; 13162 prev_vaddr_base = ((uint64_t)vaddr) & MMU_PAGEMASK; 13163 } 13164 13165 return (pa); 13166 } 13167 } 13168 13169 /* 13170 * Carve up our nucleus hblk region. We may allocate more hblks than 13171 * asked due to rounding errors but we are guaranteed to have at least 13172 * enough space to allocate the requested number of hblk8's and hblk1's. 13173 */ 13174 void 13175 sfmmu_init_nucleus_hblks(caddr_t addr, size_t size, int nhblk8, int nhblk1) 13176 { 13177 struct hme_blk *hmeblkp; 13178 size_t hme8blk_sz, hme1blk_sz; 13179 size_t i; 13180 size_t hblk8_bound; 13181 ulong_t j = 0, k = 0; 13182 13183 ASSERT(addr != NULL && size != 0); 13184 13185 /* Need to use proper structure alignment */ 13186 hme8blk_sz = roundup(HME8BLK_SZ, sizeof (int64_t)); 13187 hme1blk_sz = roundup(HME1BLK_SZ, sizeof (int64_t)); 13188 13189 nucleus_hblk8.list = (void *)addr; 13190 nucleus_hblk8.index = 0; 13191 13192 /* 13193 * Use as much memory as possible for hblk8's since we 13194 * expect all bop_alloc'ed memory to be allocated in 8k chunks. 13195 * We need to hold back enough space for the hblk1's which 13196 * we'll allocate next. 13197 */ 13198 hblk8_bound = size - (nhblk1 * hme1blk_sz) - hme8blk_sz; 13199 for (i = 0; i <= hblk8_bound; i += hme8blk_sz, j++) { 13200 hmeblkp = (struct hme_blk *)addr; 13201 addr += hme8blk_sz; 13202 hmeblkp->hblk_nuc_bit = 1; 13203 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 13204 } 13205 nucleus_hblk8.len = j; 13206 ASSERT(j >= nhblk8); 13207 SFMMU_STAT_ADD(sf_hblk8_ncreate, j); 13208 13209 nucleus_hblk1.list = (void *)addr; 13210 nucleus_hblk1.index = 0; 13211 for (; i <= (size - hme1blk_sz); i += hme1blk_sz, k++) { 13212 hmeblkp = (struct hme_blk *)addr; 13213 addr += hme1blk_sz; 13214 hmeblkp->hblk_nuc_bit = 1; 13215 hmeblkp->hblk_nextpa = cached_va_to_pa((caddr_t)hmeblkp); 13216 } 13217 ASSERT(k >= nhblk1); 13218 nucleus_hblk1.len = k; 13219 SFMMU_STAT_ADD(sf_hblk1_ncreate, k); 13220 } 13221 13222 /* 13223 * This function is currently not supported on this platform. For what 13224 * it's supposed to do, see hat.c and hat_srmmu.c 13225 */ 13226 /* ARGSUSED */ 13227 faultcode_t 13228 hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp, 13229 uint_t flags) 13230 { 13231 return (FC_NOSUPPORT); 13232 } 13233 13234 /* 13235 * Searchs the mapping list of the page for a mapping of the same size. If not 13236 * found the corresponding bit is cleared in the p_index field. When large 13237 * pages are more prevalent in the system, we can maintain the mapping list 13238 * in order and we don't have to traverse the list each time. Just check the 13239 * next and prev entries, and if both are of different size, we clear the bit. 13240 */ 13241 static void 13242 sfmmu_rm_large_mappings(page_t *pp, int ttesz) 13243 { 13244 struct sf_hment *sfhmep; 13245 struct hme_blk *hmeblkp; 13246 int index; 13247 pgcnt_t npgs; 13248 13249 ASSERT(ttesz > TTE8K); 13250 13251 ASSERT(sfmmu_mlist_held(pp)); 13252 13253 ASSERT(PP_ISMAPPED_LARGE(pp)); 13254 13255 /* 13256 * Traverse mapping list looking for another mapping of same size. 13257 * since we only want to clear index field if all mappings of 13258 * that size are gone. 13259 */ 13260 13261 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { 13262 if (IS_PAHME(sfhmep)) 13263 continue; 13264 hmeblkp = sfmmu_hmetohblk(sfhmep); 13265 if (hme_size(sfhmep) == ttesz) { 13266 /* 13267 * another mapping of the same size. don't clear index. 13268 */ 13269 return; 13270 } 13271 } 13272 13273 /* 13274 * Clear the p_index bit for large page. 13275 */ 13276 index = PAGESZ_TO_INDEX(ttesz); 13277 npgs = TTEPAGES(ttesz); 13278 while (npgs-- > 0) { 13279 ASSERT(pp->p_index & index); 13280 pp->p_index &= ~index; 13281 pp = PP_PAGENEXT(pp); 13282 } 13283 } 13284 13285 /* 13286 * return supported features 13287 */ 13288 /* ARGSUSED */ 13289 int 13290 hat_supported(enum hat_features feature, void *arg) 13291 { 13292 switch (feature) { 13293 case HAT_SHARED_PT: 13294 case HAT_DYNAMIC_ISM_UNMAP: 13295 case HAT_VMODSORT: 13296 return (1); 13297 case HAT_SHARED_REGIONS: 13298 if (shctx_on) 13299 return (1); 13300 else 13301 return (0); 13302 default: 13303 return (0); 13304 } 13305 } 13306 13307 void 13308 hat_enter(struct hat *hat) 13309 { 13310 hatlock_t *hatlockp; 13311 13312 if (hat != ksfmmup) { 13313 hatlockp = TSB_HASH(hat); 13314 mutex_enter(HATLOCK_MUTEXP(hatlockp)); 13315 } 13316 } 13317 13318 void 13319 hat_exit(struct hat *hat) 13320 { 13321 hatlock_t *hatlockp; 13322 13323 if (hat != ksfmmup) { 13324 hatlockp = TSB_HASH(hat); 13325 mutex_exit(HATLOCK_MUTEXP(hatlockp)); 13326 } 13327 } 13328 13329 /*ARGSUSED*/ 13330 void 13331 hat_reserve(struct as *as, caddr_t addr, size_t len) 13332 { 13333 } 13334 13335 static void 13336 hat_kstat_init(void) 13337 { 13338 kstat_t *ksp; 13339 13340 ksp = kstat_create("unix", 0, "sfmmu_global_stat", "hat", 13341 KSTAT_TYPE_RAW, sizeof (struct sfmmu_global_stat), 13342 KSTAT_FLAG_VIRTUAL); 13343 if (ksp) { 13344 ksp->ks_data = (void *) &sfmmu_global_stat; 13345 kstat_install(ksp); 13346 } 13347 ksp = kstat_create("unix", 0, "sfmmu_tsbsize_stat", "hat", 13348 KSTAT_TYPE_RAW, sizeof (struct sfmmu_tsbsize_stat), 13349 KSTAT_FLAG_VIRTUAL); 13350 if (ksp) { 13351 ksp->ks_data = (void *) &sfmmu_tsbsize_stat; 13352 kstat_install(ksp); 13353 } 13354 ksp = kstat_create("unix", 0, "sfmmu_percpu_stat", "hat", 13355 KSTAT_TYPE_RAW, sizeof (struct sfmmu_percpu_stat) * NCPU, 13356 KSTAT_FLAG_WRITABLE); 13357 if (ksp) { 13358 ksp->ks_update = sfmmu_kstat_percpu_update; 13359 kstat_install(ksp); 13360 } 13361 } 13362 13363 /* ARGSUSED */ 13364 static int 13365 sfmmu_kstat_percpu_update(kstat_t *ksp, int rw) 13366 { 13367 struct sfmmu_percpu_stat *cpu_kstat = ksp->ks_data; 13368 struct tsbmiss *tsbm = tsbmiss_area; 13369 struct kpmtsbm *kpmtsbm = kpmtsbm_area; 13370 int i; 13371 13372 ASSERT(cpu_kstat); 13373 if (rw == KSTAT_READ) { 13374 for (i = 0; i < NCPU; cpu_kstat++, tsbm++, kpmtsbm++, i++) { 13375 cpu_kstat->sf_itlb_misses = 0; 13376 cpu_kstat->sf_dtlb_misses = 0; 13377 cpu_kstat->sf_utsb_misses = tsbm->utsb_misses - 13378 tsbm->uprot_traps; 13379 cpu_kstat->sf_ktsb_misses = tsbm->ktsb_misses + 13380 kpmtsbm->kpm_tsb_misses - tsbm->kprot_traps; 13381 cpu_kstat->sf_tsb_hits = 0; 13382 cpu_kstat->sf_umod_faults = tsbm->uprot_traps; 13383 cpu_kstat->sf_kmod_faults = tsbm->kprot_traps; 13384 } 13385 } else { 13386 /* KSTAT_WRITE is used to clear stats */ 13387 for (i = 0; i < NCPU; tsbm++, kpmtsbm++, i++) { 13388 tsbm->utsb_misses = 0; 13389 tsbm->ktsb_misses = 0; 13390 tsbm->uprot_traps = 0; 13391 tsbm->kprot_traps = 0; 13392 kpmtsbm->kpm_dtlb_misses = 0; 13393 kpmtsbm->kpm_tsb_misses = 0; 13394 } 13395 } 13396 return (0); 13397 } 13398 13399 #ifdef DEBUG 13400 13401 tte_t *gorig[NCPU], *gcur[NCPU], *gnew[NCPU]; 13402 13403 /* 13404 * A tte checker. *orig_old is the value we read before cas. 13405 * *cur is the value returned by cas. 13406 * *new is the desired value when we do the cas. 13407 * 13408 * *hmeblkp is currently unused. 13409 */ 13410 13411 /* ARGSUSED */ 13412 void 13413 chk_tte(tte_t *orig_old, tte_t *cur, tte_t *new, struct hme_blk *hmeblkp) 13414 { 13415 pfn_t i, j, k; 13416 int cpuid = CPU->cpu_id; 13417 13418 gorig[cpuid] = orig_old; 13419 gcur[cpuid] = cur; 13420 gnew[cpuid] = new; 13421 13422 #ifdef lint 13423 hmeblkp = hmeblkp; 13424 #endif 13425 13426 if (TTE_IS_VALID(orig_old)) { 13427 if (TTE_IS_VALID(cur)) { 13428 i = TTE_TO_TTEPFN(orig_old); 13429 j = TTE_TO_TTEPFN(cur); 13430 k = TTE_TO_TTEPFN(new); 13431 if (i != j) { 13432 /* remap error? */ 13433 panic("chk_tte: bad pfn, 0x%lx, 0x%lx", i, j); 13434 } 13435 13436 if (i != k) { 13437 /* remap error? */ 13438 panic("chk_tte: bad pfn2, 0x%lx, 0x%lx", i, k); 13439 } 13440 } else { 13441 if (TTE_IS_VALID(new)) { 13442 panic("chk_tte: invalid cur? "); 13443 } 13444 13445 i = TTE_TO_TTEPFN(orig_old); 13446 k = TTE_TO_TTEPFN(new); 13447 if (i != k) { 13448 panic("chk_tte: bad pfn3, 0x%lx, 0x%lx", i, k); 13449 } 13450 } 13451 } else { 13452 if (TTE_IS_VALID(cur)) { 13453 j = TTE_TO_TTEPFN(cur); 13454 if (TTE_IS_VALID(new)) { 13455 k = TTE_TO_TTEPFN(new); 13456 if (j != k) { 13457 panic("chk_tte: bad pfn4, 0x%lx, 0x%lx", 13458 j, k); 13459 } 13460 } else { 13461 panic("chk_tte: why here?"); 13462 } 13463 } else { 13464 if (!TTE_IS_VALID(new)) { 13465 panic("chk_tte: why here2 ?"); 13466 } 13467 } 13468 } 13469 } 13470 13471 #endif /* DEBUG */ 13472 13473 extern void prefetch_tsbe_read(struct tsbe *); 13474 extern void prefetch_tsbe_write(struct tsbe *); 13475 13476 13477 /* 13478 * We want to prefetch 7 cache lines ahead for our read prefetch. This gives 13479 * us optimal performance on Cheetah+. You can only have 8 outstanding 13480 * prefetches at any one time, so we opted for 7 read prefetches and 1 write 13481 * prefetch to make the most utilization of the prefetch capability. 13482 */ 13483 #define TSBE_PREFETCH_STRIDE (7) 13484 13485 void 13486 sfmmu_copy_tsb(struct tsb_info *old_tsbinfo, struct tsb_info *new_tsbinfo) 13487 { 13488 int old_bytes = TSB_BYTES(old_tsbinfo->tsb_szc); 13489 int new_bytes = TSB_BYTES(new_tsbinfo->tsb_szc); 13490 int old_entries = TSB_ENTRIES(old_tsbinfo->tsb_szc); 13491 int new_entries = TSB_ENTRIES(new_tsbinfo->tsb_szc); 13492 struct tsbe *old; 13493 struct tsbe *new; 13494 struct tsbe *new_base = (struct tsbe *)new_tsbinfo->tsb_va; 13495 uint64_t va; 13496 int new_offset; 13497 int i; 13498 int vpshift; 13499 int last_prefetch; 13500 13501 if (old_bytes == new_bytes) { 13502 bcopy(old_tsbinfo->tsb_va, new_tsbinfo->tsb_va, new_bytes); 13503 } else { 13504 13505 /* 13506 * A TSBE is 16 bytes which means there are four TSBE's per 13507 * P$ line (64 bytes), thus every 4 TSBE's we prefetch. 13508 */ 13509 old = (struct tsbe *)old_tsbinfo->tsb_va; 13510 last_prefetch = old_entries - (4*(TSBE_PREFETCH_STRIDE+1)); 13511 for (i = 0; i < old_entries; i++, old++) { 13512 if (((i & (4-1)) == 0) && (i < last_prefetch)) 13513 prefetch_tsbe_read(old); 13514 if (!old->tte_tag.tag_invalid) { 13515 /* 13516 * We have a valid TTE to remap. Check the 13517 * size. We won't remap 64K or 512K TTEs 13518 * because they span more than one TSB entry 13519 * and are indexed using an 8K virt. page. 13520 * Ditto for 32M and 256M TTEs. 13521 */ 13522 if (TTE_CSZ(&old->tte_data) == TTE64K || 13523 TTE_CSZ(&old->tte_data) == TTE512K) 13524 continue; 13525 if (mmu_page_sizes == max_mmu_page_sizes) { 13526 if (TTE_CSZ(&old->tte_data) == TTE32M || 13527 TTE_CSZ(&old->tte_data) == TTE256M) 13528 continue; 13529 } 13530 13531 /* clear the lower 22 bits of the va */ 13532 va = *(uint64_t *)old << 22; 13533 /* turn va into a virtual pfn */ 13534 va >>= 22 - TSB_START_SIZE; 13535 /* 13536 * or in bits from the offset in the tsb 13537 * to get the real virtual pfn. These 13538 * correspond to bits [21:13] in the va 13539 */ 13540 vpshift = 13541 TTE_BSZS_SHIFT(TTE_CSZ(&old->tte_data)) & 13542 0x1ff; 13543 va |= (i << vpshift); 13544 va >>= vpshift; 13545 new_offset = va & (new_entries - 1); 13546 new = new_base + new_offset; 13547 prefetch_tsbe_write(new); 13548 *new = *old; 13549 } 13550 } 13551 } 13552 } 13553 13554 /* 13555 * unused in sfmmu 13556 */ 13557 void 13558 hat_dump(void) 13559 { 13560 } 13561 13562 /* 13563 * Called when a thread is exiting and we have switched to the kernel address 13564 * space. Perform the same VM initialization resume() uses when switching 13565 * processes. 13566 * 13567 * Note that sfmmu_load_mmustate() is currently a no-op for kernel threads, but 13568 * we call it anyway in case the semantics change in the future. 13569 */ 13570 /*ARGSUSED*/ 13571 void 13572 hat_thread_exit(kthread_t *thd) 13573 { 13574 uint_t pgsz_cnum; 13575 uint_t pstate_save; 13576 13577 ASSERT(thd->t_procp->p_as == &kas); 13578 13579 pgsz_cnum = KCONTEXT; 13580 #ifdef sun4u 13581 pgsz_cnum |= (ksfmmup->sfmmu_cext << CTXREG_EXT_SHIFT); 13582 #endif 13583 13584 /* 13585 * Note that sfmmu_load_mmustate() is currently a no-op for 13586 * kernel threads. We need to disable interrupts here, 13587 * simply because otherwise sfmmu_load_mmustate() would panic 13588 * if the caller does not disable interrupts. 13589 */ 13590 pstate_save = sfmmu_disable_intrs(); 13591 13592 /* Compatibility Note: hw takes care of MMU_SCONTEXT1 */ 13593 sfmmu_setctx_sec(pgsz_cnum); 13594 sfmmu_load_mmustate(ksfmmup); 13595 sfmmu_enable_intrs(pstate_save); 13596 } 13597 13598 13599 /* 13600 * SRD support 13601 */ 13602 #define SRD_HASH_FUNCTION(vp) (((((uintptr_t)(vp)) >> 4) ^ \ 13603 (((uintptr_t)(vp)) >> 11)) & \ 13604 srd_hashmask) 13605 13606 /* 13607 * Attach the process to the srd struct associated with the exec vnode 13608 * from which the process is started. 13609 */ 13610 void 13611 hat_join_srd(struct hat *sfmmup, vnode_t *evp) 13612 { 13613 uint_t hash = SRD_HASH_FUNCTION(evp); 13614 sf_srd_t *srdp; 13615 sf_srd_t *newsrdp; 13616 13617 ASSERT(sfmmup != ksfmmup); 13618 ASSERT(sfmmup->sfmmu_srdp == NULL); 13619 13620 if (!shctx_on) { 13621 return; 13622 } 13623 13624 VN_HOLD(evp); 13625 13626 if (srd_buckets[hash].srdb_srdp != NULL) { 13627 mutex_enter(&srd_buckets[hash].srdb_lock); 13628 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; 13629 srdp = srdp->srd_hash) { 13630 if (srdp->srd_evp == evp) { 13631 ASSERT(srdp->srd_refcnt >= 0); 13632 sfmmup->sfmmu_srdp = srdp; 13633 atomic_inc_32( 13634 (volatile uint_t *)&srdp->srd_refcnt); 13635 mutex_exit(&srd_buckets[hash].srdb_lock); 13636 return; 13637 } 13638 } 13639 mutex_exit(&srd_buckets[hash].srdb_lock); 13640 } 13641 newsrdp = kmem_cache_alloc(srd_cache, KM_SLEEP); 13642 ASSERT(newsrdp->srd_next_ismrid == 0 && newsrdp->srd_next_hmerid == 0); 13643 13644 newsrdp->srd_evp = evp; 13645 newsrdp->srd_refcnt = 1; 13646 newsrdp->srd_hmergnfree = NULL; 13647 newsrdp->srd_ismrgnfree = NULL; 13648 13649 mutex_enter(&srd_buckets[hash].srdb_lock); 13650 for (srdp = srd_buckets[hash].srdb_srdp; srdp != NULL; 13651 srdp = srdp->srd_hash) { 13652 if (srdp->srd_evp == evp) { 13653 ASSERT(srdp->srd_refcnt >= 0); 13654 sfmmup->sfmmu_srdp = srdp; 13655 atomic_inc_32((volatile uint_t *)&srdp->srd_refcnt); 13656 mutex_exit(&srd_buckets[hash].srdb_lock); 13657 kmem_cache_free(srd_cache, newsrdp); 13658 return; 13659 } 13660 } 13661 newsrdp->srd_hash = srd_buckets[hash].srdb_srdp; 13662 srd_buckets[hash].srdb_srdp = newsrdp; 13663 sfmmup->sfmmu_srdp = newsrdp; 13664 13665 mutex_exit(&srd_buckets[hash].srdb_lock); 13666 13667 } 13668 13669 static void 13670 sfmmu_leave_srd(sfmmu_t *sfmmup) 13671 { 13672 vnode_t *evp; 13673 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 13674 uint_t hash; 13675 sf_srd_t **prev_srdpp; 13676 sf_region_t *rgnp; 13677 sf_region_t *nrgnp; 13678 #ifdef DEBUG 13679 int rgns = 0; 13680 #endif 13681 int i; 13682 13683 ASSERT(sfmmup != ksfmmup); 13684 ASSERT(srdp != NULL); 13685 ASSERT(srdp->srd_refcnt > 0); 13686 ASSERT(sfmmup->sfmmu_scdp == NULL); 13687 ASSERT(sfmmup->sfmmu_free == 1); 13688 13689 sfmmup->sfmmu_srdp = NULL; 13690 evp = srdp->srd_evp; 13691 ASSERT(evp != NULL); 13692 if (atomic_dec_32_nv((volatile uint_t *)&srdp->srd_refcnt)) { 13693 VN_RELE(evp); 13694 return; 13695 } 13696 13697 hash = SRD_HASH_FUNCTION(evp); 13698 mutex_enter(&srd_buckets[hash].srdb_lock); 13699 for (prev_srdpp = &srd_buckets[hash].srdb_srdp; 13700 (srdp = *prev_srdpp) != NULL; prev_srdpp = &srdp->srd_hash) { 13701 if (srdp->srd_evp == evp) { 13702 break; 13703 } 13704 } 13705 if (srdp == NULL || srdp->srd_refcnt) { 13706 mutex_exit(&srd_buckets[hash].srdb_lock); 13707 VN_RELE(evp); 13708 return; 13709 } 13710 *prev_srdpp = srdp->srd_hash; 13711 mutex_exit(&srd_buckets[hash].srdb_lock); 13712 13713 ASSERT(srdp->srd_refcnt == 0); 13714 VN_RELE(evp); 13715 13716 #ifdef DEBUG 13717 for (i = 0; i < SFMMU_MAX_REGION_BUCKETS; i++) { 13718 ASSERT(srdp->srd_rgnhash[i] == NULL); 13719 } 13720 #endif /* DEBUG */ 13721 13722 /* free each hme regions in the srd */ 13723 for (rgnp = srdp->srd_hmergnfree; rgnp != NULL; rgnp = nrgnp) { 13724 nrgnp = rgnp->rgn_next; 13725 ASSERT(rgnp->rgn_id < srdp->srd_next_hmerid); 13726 ASSERT(rgnp->rgn_refcnt == 0); 13727 ASSERT(rgnp->rgn_sfmmu_head == NULL); 13728 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 13729 ASSERT(rgnp->rgn_hmeflags == 0); 13730 ASSERT(srdp->srd_hmergnp[rgnp->rgn_id] == rgnp); 13731 #ifdef DEBUG 13732 for (i = 0; i < MMU_PAGE_SIZES; i++) { 13733 ASSERT(rgnp->rgn_ttecnt[i] == 0); 13734 } 13735 rgns++; 13736 #endif /* DEBUG */ 13737 kmem_cache_free(region_cache, rgnp); 13738 } 13739 ASSERT(rgns == srdp->srd_next_hmerid); 13740 13741 #ifdef DEBUG 13742 rgns = 0; 13743 #endif 13744 /* free each ism rgns in the srd */ 13745 for (rgnp = srdp->srd_ismrgnfree; rgnp != NULL; rgnp = nrgnp) { 13746 nrgnp = rgnp->rgn_next; 13747 ASSERT(rgnp->rgn_id < srdp->srd_next_ismrid); 13748 ASSERT(rgnp->rgn_refcnt == 0); 13749 ASSERT(rgnp->rgn_sfmmu_head == NULL); 13750 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 13751 ASSERT(srdp->srd_ismrgnp[rgnp->rgn_id] == rgnp); 13752 #ifdef DEBUG 13753 for (i = 0; i < MMU_PAGE_SIZES; i++) { 13754 ASSERT(rgnp->rgn_ttecnt[i] == 0); 13755 } 13756 rgns++; 13757 #endif /* DEBUG */ 13758 kmem_cache_free(region_cache, rgnp); 13759 } 13760 ASSERT(rgns == srdp->srd_next_ismrid); 13761 ASSERT(srdp->srd_ismbusyrgns == 0); 13762 ASSERT(srdp->srd_hmebusyrgns == 0); 13763 13764 srdp->srd_next_ismrid = 0; 13765 srdp->srd_next_hmerid = 0; 13766 13767 bzero((void *)srdp->srd_ismrgnp, 13768 sizeof (sf_region_t *) * SFMMU_MAX_ISM_REGIONS); 13769 bzero((void *)srdp->srd_hmergnp, 13770 sizeof (sf_region_t *) * SFMMU_MAX_HME_REGIONS); 13771 13772 ASSERT(srdp->srd_scdp == NULL); 13773 kmem_cache_free(srd_cache, srdp); 13774 } 13775 13776 /* ARGSUSED */ 13777 static int 13778 sfmmu_srdcache_constructor(void *buf, void *cdrarg, int kmflags) 13779 { 13780 sf_srd_t *srdp = (sf_srd_t *)buf; 13781 bzero(buf, sizeof (*srdp)); 13782 13783 mutex_init(&srdp->srd_mutex, NULL, MUTEX_DEFAULT, NULL); 13784 mutex_init(&srdp->srd_scd_mutex, NULL, MUTEX_DEFAULT, NULL); 13785 return (0); 13786 } 13787 13788 /* ARGSUSED */ 13789 static void 13790 sfmmu_srdcache_destructor(void *buf, void *cdrarg) 13791 { 13792 sf_srd_t *srdp = (sf_srd_t *)buf; 13793 13794 mutex_destroy(&srdp->srd_mutex); 13795 mutex_destroy(&srdp->srd_scd_mutex); 13796 } 13797 13798 /* 13799 * The caller makes sure hat_join_region()/hat_leave_region() can't be called 13800 * at the same time for the same process and address range. This is ensured by 13801 * the fact that address space is locked as writer when a process joins the 13802 * regions. Therefore there's no need to hold an srd lock during the entire 13803 * execution of hat_join_region()/hat_leave_region(). 13804 */ 13805 13806 #define RGN_HASH_FUNCTION(obj) (((((uintptr_t)(obj)) >> 4) ^ \ 13807 (((uintptr_t)(obj)) >> 11)) & \ 13808 srd_rgn_hashmask) 13809 /* 13810 * This routine implements the shared context functionality required when 13811 * attaching a segment to an address space. It must be called from 13812 * hat_share() for D(ISM) segments and from segvn_create() for segments 13813 * with the MAP_PRIVATE and MAP_TEXT flags set. It returns a region_cookie 13814 * which is saved in the private segment data for hme segments and 13815 * the ism_map structure for ism segments. 13816 */ 13817 hat_region_cookie_t 13818 hat_join_region(struct hat *sfmmup, 13819 caddr_t r_saddr, 13820 size_t r_size, 13821 void *r_obj, 13822 u_offset_t r_objoff, 13823 uchar_t r_perm, 13824 uchar_t r_pgszc, 13825 hat_rgn_cb_func_t r_cb_function, 13826 uint_t flags) 13827 { 13828 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 13829 uint_t rhash; 13830 uint_t rid; 13831 hatlock_t *hatlockp; 13832 sf_region_t *rgnp; 13833 sf_region_t *new_rgnp = NULL; 13834 int i; 13835 uint16_t *nextidp; 13836 sf_region_t **freelistp; 13837 int maxids; 13838 sf_region_t **rarrp; 13839 uint16_t *busyrgnsp; 13840 ulong_t rttecnt; 13841 uchar_t tteflag; 13842 uchar_t r_type = flags & HAT_REGION_TYPE_MASK; 13843 int text = (r_type == HAT_REGION_TEXT); 13844 13845 if (srdp == NULL || r_size == 0) { 13846 return (HAT_INVALID_REGION_COOKIE); 13847 } 13848 13849 ASSERT(sfmmup != ksfmmup); 13850 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 13851 ASSERT(srdp->srd_refcnt > 0); 13852 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK)); 13853 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM); 13854 ASSERT(r_pgszc < mmu_page_sizes); 13855 if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) || 13856 !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) { 13857 panic("hat_join_region: region addr or size is not aligned\n"); 13858 } 13859 13860 13861 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM : 13862 SFMMU_REGION_HME; 13863 /* 13864 * Currently only support shared hmes for the read only main text 13865 * region. 13866 */ 13867 if (r_type == SFMMU_REGION_HME && ((r_obj != srdp->srd_evp) || 13868 (r_perm & PROT_WRITE))) { 13869 return (HAT_INVALID_REGION_COOKIE); 13870 } 13871 13872 rhash = RGN_HASH_FUNCTION(r_obj); 13873 13874 if (r_type == SFMMU_REGION_ISM) { 13875 nextidp = &srdp->srd_next_ismrid; 13876 freelistp = &srdp->srd_ismrgnfree; 13877 maxids = SFMMU_MAX_ISM_REGIONS; 13878 rarrp = srdp->srd_ismrgnp; 13879 busyrgnsp = &srdp->srd_ismbusyrgns; 13880 } else { 13881 nextidp = &srdp->srd_next_hmerid; 13882 freelistp = &srdp->srd_hmergnfree; 13883 maxids = SFMMU_MAX_HME_REGIONS; 13884 rarrp = srdp->srd_hmergnp; 13885 busyrgnsp = &srdp->srd_hmebusyrgns; 13886 } 13887 13888 mutex_enter(&srdp->srd_mutex); 13889 13890 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL; 13891 rgnp = rgnp->rgn_hash) { 13892 if (rgnp->rgn_saddr == r_saddr && rgnp->rgn_size == r_size && 13893 rgnp->rgn_obj == r_obj && rgnp->rgn_objoff == r_objoff && 13894 rgnp->rgn_perm == r_perm && rgnp->rgn_pgszc == r_pgszc) { 13895 break; 13896 } 13897 } 13898 13899 rfound: 13900 if (rgnp != NULL) { 13901 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 13902 ASSERT(rgnp->rgn_cb_function == r_cb_function); 13903 ASSERT(rgnp->rgn_refcnt >= 0); 13904 rid = rgnp->rgn_id; 13905 ASSERT(rid < maxids); 13906 ASSERT(rarrp[rid] == rgnp); 13907 ASSERT(rid < *nextidp); 13908 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt); 13909 mutex_exit(&srdp->srd_mutex); 13910 if (new_rgnp != NULL) { 13911 kmem_cache_free(region_cache, new_rgnp); 13912 } 13913 if (r_type == SFMMU_REGION_HME) { 13914 int myjoin = 13915 (sfmmup == astosfmmu(curthread->t_procp->p_as)); 13916 13917 sfmmu_link_to_hmeregion(sfmmup, rgnp); 13918 /* 13919 * bitmap should be updated after linking sfmmu on 13920 * region list so that pageunload() doesn't skip 13921 * TSB/TLB flush. As soon as bitmap is updated another 13922 * thread in this process can already start accessing 13923 * this region. 13924 */ 13925 /* 13926 * Normally ttecnt accounting is done as part of 13927 * pagefault handling. But a process may not take any 13928 * pagefaults on shared hmeblks created by some other 13929 * process. To compensate for this assume that the 13930 * entire region will end up faulted in using 13931 * the region's pagesize. 13932 * 13933 */ 13934 if (r_pgszc > TTE8K) { 13935 tteflag = 1 << r_pgszc; 13936 if (disable_large_pages & tteflag) { 13937 tteflag = 0; 13938 } 13939 } else { 13940 tteflag = 0; 13941 } 13942 if (tteflag && !(sfmmup->sfmmu_rtteflags & tteflag)) { 13943 hatlockp = sfmmu_hat_enter(sfmmup); 13944 sfmmup->sfmmu_rtteflags |= tteflag; 13945 sfmmu_hat_exit(hatlockp); 13946 } 13947 hatlockp = sfmmu_hat_enter(sfmmup); 13948 13949 /* 13950 * Preallocate 1/4 of ttecnt's in 8K TSB for >= 4M 13951 * region to allow for large page allocation failure. 13952 */ 13953 if (r_pgszc >= TTE4M) { 13954 sfmmup->sfmmu_tsb0_4minflcnt += 13955 r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 13956 } 13957 13958 /* update sfmmu_ttecnt with the shme rgn ttecnt */ 13959 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 13960 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], 13961 rttecnt); 13962 13963 if (text && r_pgszc >= TTE4M && 13964 (tteflag || ((disable_large_pages >> TTE4M) & 13965 ((1 << (r_pgszc - TTE4M + 1)) - 1))) && 13966 !SFMMU_FLAGS_ISSET(sfmmup, HAT_4MTEXT_FLAG)) { 13967 SFMMU_FLAGS_SET(sfmmup, HAT_4MTEXT_FLAG); 13968 } 13969 13970 sfmmu_hat_exit(hatlockp); 13971 /* 13972 * On Panther we need to make sure TLB is programmed 13973 * to accept 32M/256M pages. Call 13974 * sfmmu_check_page_sizes() now to make sure TLB is 13975 * setup before making hmeregions visible to other 13976 * threads. 13977 */ 13978 sfmmu_check_page_sizes(sfmmup, 1); 13979 hatlockp = sfmmu_hat_enter(sfmmup); 13980 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid); 13981 13982 /* 13983 * if context is invalid tsb miss exception code will 13984 * call sfmmu_check_page_sizes() and update tsbmiss 13985 * area later. 13986 */ 13987 kpreempt_disable(); 13988 if (myjoin && 13989 (sfmmup->sfmmu_ctxs[CPU_MMU_IDX(CPU)].cnum 13990 != INVALID_CONTEXT)) { 13991 struct tsbmiss *tsbmp; 13992 13993 tsbmp = &tsbmiss_area[CPU->cpu_id]; 13994 ASSERT(sfmmup == tsbmp->usfmmup); 13995 BT_SET(tsbmp->shmermap, rid); 13996 if (r_pgszc > TTE64K) { 13997 tsbmp->uhat_rtteflags |= tteflag; 13998 } 13999 14000 } 14001 kpreempt_enable(); 14002 14003 sfmmu_hat_exit(hatlockp); 14004 ASSERT((hat_region_cookie_t)((uint64_t)rid) != 14005 HAT_INVALID_REGION_COOKIE); 14006 } else { 14007 hatlockp = sfmmu_hat_enter(sfmmup); 14008 SF_RGNMAP_ADD(sfmmup->sfmmu_ismregion_map, rid); 14009 sfmmu_hat_exit(hatlockp); 14010 } 14011 ASSERT(rid < maxids); 14012 14013 if (r_type == SFMMU_REGION_ISM) { 14014 sfmmu_find_scd(sfmmup); 14015 } 14016 return ((hat_region_cookie_t)((uint64_t)rid)); 14017 } 14018 14019 ASSERT(new_rgnp == NULL); 14020 14021 if (*busyrgnsp >= maxids) { 14022 mutex_exit(&srdp->srd_mutex); 14023 return (HAT_INVALID_REGION_COOKIE); 14024 } 14025 14026 ASSERT(MUTEX_HELD(&srdp->srd_mutex)); 14027 if (*freelistp != NULL) { 14028 rgnp = *freelistp; 14029 *freelistp = rgnp->rgn_next; 14030 ASSERT(rgnp->rgn_id < *nextidp); 14031 ASSERT(rgnp->rgn_id < maxids); 14032 ASSERT(rgnp->rgn_flags & SFMMU_REGION_FREE); 14033 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) 14034 == r_type); 14035 ASSERT(rarrp[rgnp->rgn_id] == rgnp); 14036 ASSERT(rgnp->rgn_hmeflags == 0); 14037 } else { 14038 /* 14039 * release local locks before memory allocation. 14040 */ 14041 mutex_exit(&srdp->srd_mutex); 14042 14043 new_rgnp = kmem_cache_alloc(region_cache, KM_SLEEP); 14044 14045 mutex_enter(&srdp->srd_mutex); 14046 for (rgnp = srdp->srd_rgnhash[rhash]; rgnp != NULL; 14047 rgnp = rgnp->rgn_hash) { 14048 if (rgnp->rgn_saddr == r_saddr && 14049 rgnp->rgn_size == r_size && 14050 rgnp->rgn_obj == r_obj && 14051 rgnp->rgn_objoff == r_objoff && 14052 rgnp->rgn_perm == r_perm && 14053 rgnp->rgn_pgszc == r_pgszc) { 14054 break; 14055 } 14056 } 14057 if (rgnp != NULL) { 14058 goto rfound; 14059 } 14060 14061 if (*nextidp >= maxids) { 14062 mutex_exit(&srdp->srd_mutex); 14063 goto fail; 14064 } 14065 rgnp = new_rgnp; 14066 new_rgnp = NULL; 14067 rgnp->rgn_id = (*nextidp)++; 14068 ASSERT(rgnp->rgn_id < maxids); 14069 ASSERT(rarrp[rgnp->rgn_id] == NULL); 14070 rarrp[rgnp->rgn_id] = rgnp; 14071 } 14072 14073 ASSERT(rgnp->rgn_sfmmu_head == NULL); 14074 ASSERT(rgnp->rgn_hmeflags == 0); 14075 #ifdef DEBUG 14076 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14077 ASSERT(rgnp->rgn_ttecnt[i] == 0); 14078 } 14079 #endif 14080 rgnp->rgn_saddr = r_saddr; 14081 rgnp->rgn_size = r_size; 14082 rgnp->rgn_obj = r_obj; 14083 rgnp->rgn_objoff = r_objoff; 14084 rgnp->rgn_perm = r_perm; 14085 rgnp->rgn_pgszc = r_pgszc; 14086 rgnp->rgn_flags = r_type; 14087 rgnp->rgn_refcnt = 0; 14088 rgnp->rgn_cb_function = r_cb_function; 14089 rgnp->rgn_hash = srdp->srd_rgnhash[rhash]; 14090 srdp->srd_rgnhash[rhash] = rgnp; 14091 (*busyrgnsp)++; 14092 ASSERT(*busyrgnsp <= maxids); 14093 goto rfound; 14094 14095 fail: 14096 ASSERT(new_rgnp != NULL); 14097 kmem_cache_free(region_cache, new_rgnp); 14098 return (HAT_INVALID_REGION_COOKIE); 14099 } 14100 14101 /* 14102 * This function implements the shared context functionality required 14103 * when detaching a segment from an address space. It must be called 14104 * from hat_unshare() for all D(ISM) segments and from segvn_unmap(), 14105 * for segments with a valid region_cookie. 14106 * It will also be called from all seg_vn routines which change a 14107 * segment's attributes such as segvn_setprot(), segvn_setpagesize(), 14108 * segvn_clrszc() & segvn_advise(), as well as in the case of COW fault 14109 * from segvn_fault(). 14110 */ 14111 void 14112 hat_leave_region(struct hat *sfmmup, hat_region_cookie_t rcookie, uint_t flags) 14113 { 14114 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14115 sf_scd_t *scdp; 14116 uint_t rhash; 14117 uint_t rid = (uint_t)((uint64_t)rcookie); 14118 hatlock_t *hatlockp = NULL; 14119 sf_region_t *rgnp; 14120 sf_region_t **prev_rgnpp; 14121 sf_region_t *cur_rgnp; 14122 void *r_obj; 14123 int i; 14124 caddr_t r_saddr; 14125 caddr_t r_eaddr; 14126 size_t r_size; 14127 uchar_t r_pgszc; 14128 uchar_t r_type = flags & HAT_REGION_TYPE_MASK; 14129 14130 ASSERT(sfmmup != ksfmmup); 14131 ASSERT(srdp != NULL); 14132 ASSERT(srdp->srd_refcnt > 0); 14133 ASSERT(!(flags & ~HAT_REGION_TYPE_MASK)); 14134 ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM); 14135 ASSERT(!sfmmup->sfmmu_free || sfmmup->sfmmu_scdp == NULL); 14136 14137 r_type = (r_type == HAT_REGION_ISM) ? SFMMU_REGION_ISM : 14138 SFMMU_REGION_HME; 14139 14140 if (r_type == SFMMU_REGION_ISM) { 14141 ASSERT(SFMMU_IS_ISMRID_VALID(rid)); 14142 ASSERT(rid < SFMMU_MAX_ISM_REGIONS); 14143 rgnp = srdp->srd_ismrgnp[rid]; 14144 } else { 14145 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14146 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 14147 rgnp = srdp->srd_hmergnp[rid]; 14148 } 14149 ASSERT(rgnp != NULL); 14150 ASSERT(rgnp->rgn_id == rid); 14151 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 14152 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); 14153 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 14154 14155 if (sfmmup->sfmmu_free) { 14156 ulong_t rttecnt; 14157 r_pgszc = rgnp->rgn_pgszc; 14158 r_size = rgnp->rgn_size; 14159 14160 ASSERT(sfmmup->sfmmu_scdp == NULL); 14161 if (r_type == SFMMU_REGION_ISM) { 14162 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid); 14163 } else { 14164 /* update shme rgns ttecnt in sfmmu_ttecnt */ 14165 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14166 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt); 14167 14168 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], 14169 -rttecnt); 14170 14171 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid); 14172 } 14173 } else if (r_type == SFMMU_REGION_ISM) { 14174 hatlockp = sfmmu_hat_enter(sfmmup); 14175 ASSERT(rid < srdp->srd_next_ismrid); 14176 SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid); 14177 scdp = sfmmup->sfmmu_scdp; 14178 if (scdp != NULL && 14179 SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid)) { 14180 sfmmu_leave_scd(sfmmup, r_type); 14181 ASSERT(sfmmu_hat_lock_held(sfmmup)); 14182 } 14183 sfmmu_hat_exit(hatlockp); 14184 } else { 14185 ulong_t rttecnt; 14186 r_pgszc = rgnp->rgn_pgszc; 14187 r_saddr = rgnp->rgn_saddr; 14188 r_size = rgnp->rgn_size; 14189 r_eaddr = r_saddr + r_size; 14190 14191 ASSERT(r_type == SFMMU_REGION_HME); 14192 hatlockp = sfmmu_hat_enter(sfmmup); 14193 ASSERT(rid < srdp->srd_next_hmerid); 14194 SF_RGNMAP_DEL(sfmmup->sfmmu_hmeregion_map, rid); 14195 14196 /* 14197 * If region is part of an SCD call sfmmu_leave_scd(). 14198 * Otherwise if process is not exiting and has valid context 14199 * just drop the context on the floor to lose stale TLB 14200 * entries and force the update of tsb miss area to reflect 14201 * the new region map. After that clean our TSB entries. 14202 */ 14203 scdp = sfmmup->sfmmu_scdp; 14204 if (scdp != NULL && 14205 SF_RGNMAP_TEST(scdp->scd_hmeregion_map, rid)) { 14206 sfmmu_leave_scd(sfmmup, r_type); 14207 ASSERT(sfmmu_hat_lock_held(sfmmup)); 14208 } 14209 sfmmu_invalidate_ctx(sfmmup); 14210 14211 i = TTE8K; 14212 while (i < mmu_page_sizes) { 14213 if (rgnp->rgn_ttecnt[i] != 0) { 14214 sfmmu_unload_tsb_range(sfmmup, r_saddr, 14215 r_eaddr, i); 14216 if (i < TTE4M) { 14217 i = TTE4M; 14218 continue; 14219 } else { 14220 break; 14221 } 14222 } 14223 i++; 14224 } 14225 /* Remove the preallocated 1/4 8k ttecnt for 4M regions. */ 14226 if (r_pgszc >= TTE4M) { 14227 rttecnt = r_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14228 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >= 14229 rttecnt); 14230 sfmmup->sfmmu_tsb0_4minflcnt -= rttecnt; 14231 } 14232 14233 /* update shme rgns ttecnt in sfmmu_ttecnt */ 14234 rttecnt = r_size >> TTE_PAGE_SHIFT(r_pgszc); 14235 ASSERT(sfmmup->sfmmu_ttecnt[r_pgszc] >= rttecnt); 14236 atomic_add_long(&sfmmup->sfmmu_ttecnt[r_pgszc], -rttecnt); 14237 14238 sfmmu_hat_exit(hatlockp); 14239 if (scdp != NULL && sfmmup->sfmmu_scdp == NULL) { 14240 /* sfmmup left the scd, grow private tsb */ 14241 sfmmu_check_page_sizes(sfmmup, 1); 14242 } else { 14243 sfmmu_check_page_sizes(sfmmup, 0); 14244 } 14245 } 14246 14247 if (r_type == SFMMU_REGION_HME) { 14248 sfmmu_unlink_from_hmeregion(sfmmup, rgnp); 14249 } 14250 14251 r_obj = rgnp->rgn_obj; 14252 if (atomic_dec_32_nv((volatile uint_t *)&rgnp->rgn_refcnt)) { 14253 return; 14254 } 14255 14256 /* 14257 * looks like nobody uses this region anymore. Free it. 14258 */ 14259 rhash = RGN_HASH_FUNCTION(r_obj); 14260 mutex_enter(&srdp->srd_mutex); 14261 for (prev_rgnpp = &srdp->srd_rgnhash[rhash]; 14262 (cur_rgnp = *prev_rgnpp) != NULL; 14263 prev_rgnpp = &cur_rgnp->rgn_hash) { 14264 if (cur_rgnp == rgnp && cur_rgnp->rgn_refcnt == 0) { 14265 break; 14266 } 14267 } 14268 14269 if (cur_rgnp == NULL) { 14270 mutex_exit(&srdp->srd_mutex); 14271 return; 14272 } 14273 14274 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); 14275 *prev_rgnpp = rgnp->rgn_hash; 14276 if (r_type == SFMMU_REGION_ISM) { 14277 rgnp->rgn_flags |= SFMMU_REGION_FREE; 14278 ASSERT(rid < srdp->srd_next_ismrid); 14279 rgnp->rgn_next = srdp->srd_ismrgnfree; 14280 srdp->srd_ismrgnfree = rgnp; 14281 ASSERT(srdp->srd_ismbusyrgns > 0); 14282 srdp->srd_ismbusyrgns--; 14283 mutex_exit(&srdp->srd_mutex); 14284 return; 14285 } 14286 mutex_exit(&srdp->srd_mutex); 14287 14288 /* 14289 * Destroy region's hmeblks. 14290 */ 14291 sfmmu_unload_hmeregion(srdp, rgnp); 14292 14293 rgnp->rgn_hmeflags = 0; 14294 14295 ASSERT(rgnp->rgn_sfmmu_head == NULL); 14296 ASSERT(rgnp->rgn_id == rid); 14297 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14298 rgnp->rgn_ttecnt[i] = 0; 14299 } 14300 rgnp->rgn_flags |= SFMMU_REGION_FREE; 14301 mutex_enter(&srdp->srd_mutex); 14302 ASSERT(rid < srdp->srd_next_hmerid); 14303 rgnp->rgn_next = srdp->srd_hmergnfree; 14304 srdp->srd_hmergnfree = rgnp; 14305 ASSERT(srdp->srd_hmebusyrgns > 0); 14306 srdp->srd_hmebusyrgns--; 14307 mutex_exit(&srdp->srd_mutex); 14308 } 14309 14310 /* 14311 * For now only called for hmeblk regions and not for ISM regions. 14312 */ 14313 void 14314 hat_dup_region(struct hat *sfmmup, hat_region_cookie_t rcookie) 14315 { 14316 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14317 uint_t rid = (uint_t)((uint64_t)rcookie); 14318 sf_region_t *rgnp; 14319 sf_rgn_link_t *rlink; 14320 sf_rgn_link_t *hrlink; 14321 ulong_t rttecnt; 14322 14323 ASSERT(sfmmup != ksfmmup); 14324 ASSERT(srdp != NULL); 14325 ASSERT(srdp->srd_refcnt > 0); 14326 14327 ASSERT(rid < srdp->srd_next_hmerid); 14328 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14329 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 14330 14331 rgnp = srdp->srd_hmergnp[rid]; 14332 ASSERT(rgnp->rgn_refcnt > 0); 14333 ASSERT(rgnp->rgn_id == rid); 14334 ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == SFMMU_REGION_HME); 14335 ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); 14336 14337 atomic_inc_32((volatile uint_t *)&rgnp->rgn_refcnt); 14338 14339 /* LINTED: constant in conditional context */ 14340 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 0); 14341 ASSERT(rlink != NULL); 14342 mutex_enter(&rgnp->rgn_mutex); 14343 ASSERT(rgnp->rgn_sfmmu_head != NULL); 14344 /* LINTED: constant in conditional context */ 14345 SFMMU_HMERID2RLINKP(rgnp->rgn_sfmmu_head, rid, hrlink, 0, 0); 14346 ASSERT(hrlink != NULL); 14347 ASSERT(hrlink->prev == NULL); 14348 rlink->next = rgnp->rgn_sfmmu_head; 14349 rlink->prev = NULL; 14350 hrlink->prev = sfmmup; 14351 /* 14352 * make sure rlink's next field is correct 14353 * before making this link visible. 14354 */ 14355 membar_stst(); 14356 rgnp->rgn_sfmmu_head = sfmmup; 14357 mutex_exit(&rgnp->rgn_mutex); 14358 14359 /* update sfmmu_ttecnt with the shme rgn ttecnt */ 14360 rttecnt = rgnp->rgn_size >> TTE_PAGE_SHIFT(rgnp->rgn_pgszc); 14361 atomic_add_long(&sfmmup->sfmmu_ttecnt[rgnp->rgn_pgszc], rttecnt); 14362 /* update tsb0 inflation count */ 14363 if (rgnp->rgn_pgszc >= TTE4M) { 14364 sfmmup->sfmmu_tsb0_4minflcnt += 14365 rgnp->rgn_size >> (TTE_PAGE_SHIFT(TTE8K) + 2); 14366 } 14367 /* 14368 * Update regionid bitmask without hat lock since no other thread 14369 * can update this region bitmask right now. 14370 */ 14371 SF_RGNMAP_ADD(sfmmup->sfmmu_hmeregion_map, rid); 14372 } 14373 14374 /* ARGSUSED */ 14375 static int 14376 sfmmu_rgncache_constructor(void *buf, void *cdrarg, int kmflags) 14377 { 14378 sf_region_t *rgnp = (sf_region_t *)buf; 14379 bzero(buf, sizeof (*rgnp)); 14380 14381 mutex_init(&rgnp->rgn_mutex, NULL, MUTEX_DEFAULT, NULL); 14382 14383 return (0); 14384 } 14385 14386 /* ARGSUSED */ 14387 static void 14388 sfmmu_rgncache_destructor(void *buf, void *cdrarg) 14389 { 14390 sf_region_t *rgnp = (sf_region_t *)buf; 14391 mutex_destroy(&rgnp->rgn_mutex); 14392 } 14393 14394 static int 14395 sfrgnmap_isnull(sf_region_map_t *map) 14396 { 14397 int i; 14398 14399 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14400 if (map->bitmap[i] != 0) { 14401 return (0); 14402 } 14403 } 14404 return (1); 14405 } 14406 14407 static int 14408 sfhmergnmap_isnull(sf_hmeregion_map_t *map) 14409 { 14410 int i; 14411 14412 for (i = 0; i < SFMMU_HMERGNMAP_WORDS; i++) { 14413 if (map->bitmap[i] != 0) { 14414 return (0); 14415 } 14416 } 14417 return (1); 14418 } 14419 14420 #ifdef DEBUG 14421 static void 14422 check_scd_sfmmu_list(sfmmu_t **headp, sfmmu_t *sfmmup, int onlist) 14423 { 14424 sfmmu_t *sp; 14425 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14426 14427 for (sp = *headp; sp != NULL; sp = sp->sfmmu_scd_link.next) { 14428 ASSERT(srdp == sp->sfmmu_srdp); 14429 if (sp == sfmmup) { 14430 if (onlist) { 14431 return; 14432 } else { 14433 panic("shctx: sfmmu 0x%p found on scd" 14434 "list 0x%p", (void *)sfmmup, 14435 (void *)*headp); 14436 } 14437 } 14438 } 14439 if (onlist) { 14440 panic("shctx: sfmmu 0x%p not found on scd list 0x%p", 14441 (void *)sfmmup, (void *)*headp); 14442 } else { 14443 return; 14444 } 14445 } 14446 #else /* DEBUG */ 14447 #define check_scd_sfmmu_list(headp, sfmmup, onlist) 14448 #endif /* DEBUG */ 14449 14450 /* 14451 * Removes an sfmmu from the SCD sfmmu list. 14452 */ 14453 static void 14454 sfmmu_from_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup) 14455 { 14456 ASSERT(sfmmup->sfmmu_srdp != NULL); 14457 check_scd_sfmmu_list(headp, sfmmup, 1); 14458 if (sfmmup->sfmmu_scd_link.prev != NULL) { 14459 ASSERT(*headp != sfmmup); 14460 sfmmup->sfmmu_scd_link.prev->sfmmu_scd_link.next = 14461 sfmmup->sfmmu_scd_link.next; 14462 } else { 14463 ASSERT(*headp == sfmmup); 14464 *headp = sfmmup->sfmmu_scd_link.next; 14465 } 14466 if (sfmmup->sfmmu_scd_link.next != NULL) { 14467 sfmmup->sfmmu_scd_link.next->sfmmu_scd_link.prev = 14468 sfmmup->sfmmu_scd_link.prev; 14469 } 14470 } 14471 14472 14473 /* 14474 * Adds an sfmmu to the start of the queue. 14475 */ 14476 static void 14477 sfmmu_to_scd_list(sfmmu_t **headp, sfmmu_t *sfmmup) 14478 { 14479 check_scd_sfmmu_list(headp, sfmmup, 0); 14480 sfmmup->sfmmu_scd_link.prev = NULL; 14481 sfmmup->sfmmu_scd_link.next = *headp; 14482 if (*headp != NULL) 14483 (*headp)->sfmmu_scd_link.prev = sfmmup; 14484 *headp = sfmmup; 14485 } 14486 14487 /* 14488 * Remove an scd from the start of the queue. 14489 */ 14490 static void 14491 sfmmu_remove_scd(sf_scd_t **headp, sf_scd_t *scdp) 14492 { 14493 if (scdp->scd_prev != NULL) { 14494 ASSERT(*headp != scdp); 14495 scdp->scd_prev->scd_next = scdp->scd_next; 14496 } else { 14497 ASSERT(*headp == scdp); 14498 *headp = scdp->scd_next; 14499 } 14500 14501 if (scdp->scd_next != NULL) { 14502 scdp->scd_next->scd_prev = scdp->scd_prev; 14503 } 14504 } 14505 14506 /* 14507 * Add an scd to the start of the queue. 14508 */ 14509 static void 14510 sfmmu_add_scd(sf_scd_t **headp, sf_scd_t *scdp) 14511 { 14512 scdp->scd_prev = NULL; 14513 scdp->scd_next = *headp; 14514 if (*headp != NULL) { 14515 (*headp)->scd_prev = scdp; 14516 } 14517 *headp = scdp; 14518 } 14519 14520 static int 14521 sfmmu_alloc_scd_tsbs(sf_srd_t *srdp, sf_scd_t *scdp) 14522 { 14523 uint_t rid; 14524 uint_t i; 14525 uint_t j; 14526 ulong_t w; 14527 sf_region_t *rgnp; 14528 ulong_t tte8k_cnt = 0; 14529 ulong_t tte4m_cnt = 0; 14530 uint_t tsb_szc; 14531 sfmmu_t *scsfmmup = scdp->scd_sfmmup; 14532 sfmmu_t *ism_hatid; 14533 struct tsb_info *newtsb; 14534 int szc; 14535 14536 ASSERT(srdp != NULL); 14537 14538 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14539 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14540 continue; 14541 } 14542 j = 0; 14543 while (w) { 14544 if (!(w & 0x1)) { 14545 j++; 14546 w >>= 1; 14547 continue; 14548 } 14549 rid = (i << BT_ULSHIFT) | j; 14550 j++; 14551 w >>= 1; 14552 14553 if (rid < SFMMU_MAX_HME_REGIONS) { 14554 rgnp = srdp->srd_hmergnp[rid]; 14555 ASSERT(rgnp->rgn_id == rid); 14556 ASSERT(rgnp->rgn_refcnt > 0); 14557 14558 if (rgnp->rgn_pgszc < TTE4M) { 14559 tte8k_cnt += rgnp->rgn_size >> 14560 TTE_PAGE_SHIFT(TTE8K); 14561 } else { 14562 ASSERT(rgnp->rgn_pgszc >= TTE4M); 14563 tte4m_cnt += rgnp->rgn_size >> 14564 TTE_PAGE_SHIFT(TTE4M); 14565 /* 14566 * Inflate SCD tsb0 by preallocating 14567 * 1/4 8k ttecnt for 4M regions to 14568 * allow for lgpg alloc failure. 14569 */ 14570 tte8k_cnt += rgnp->rgn_size >> 14571 (TTE_PAGE_SHIFT(TTE8K) + 2); 14572 } 14573 } else { 14574 rid -= SFMMU_MAX_HME_REGIONS; 14575 rgnp = srdp->srd_ismrgnp[rid]; 14576 ASSERT(rgnp->rgn_id == rid); 14577 ASSERT(rgnp->rgn_refcnt > 0); 14578 14579 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14580 ASSERT(ism_hatid->sfmmu_ismhat); 14581 14582 for (szc = 0; szc < TTE4M; szc++) { 14583 tte8k_cnt += 14584 ism_hatid->sfmmu_ttecnt[szc] << 14585 TTE_BSZS_SHIFT(szc); 14586 } 14587 14588 ASSERT(rgnp->rgn_pgszc >= TTE4M); 14589 if (rgnp->rgn_pgszc >= TTE4M) { 14590 tte4m_cnt += rgnp->rgn_size >> 14591 TTE_PAGE_SHIFT(TTE4M); 14592 } 14593 } 14594 } 14595 } 14596 14597 tsb_szc = SELECT_TSB_SIZECODE(tte8k_cnt); 14598 14599 /* Allocate both the SCD TSBs here. */ 14600 if (sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb, 14601 tsb_szc, TSB8K|TSB64K|TSB512K, TSB_ALLOC, scsfmmup) && 14602 (tsb_szc <= TSB_4M_SZCODE || 14603 sfmmu_tsbinfo_alloc(&scsfmmup->sfmmu_tsb, 14604 TSB_4M_SZCODE, TSB8K|TSB64K|TSB512K, 14605 TSB_ALLOC, scsfmmup))) { 14606 14607 SFMMU_STAT(sf_scd_1sttsb_allocfail); 14608 return (TSB_ALLOCFAIL); 14609 } else { 14610 scsfmmup->sfmmu_tsb->tsb_flags |= TSB_SHAREDCTX; 14611 14612 if (tte4m_cnt) { 14613 tsb_szc = SELECT_TSB_SIZECODE(tte4m_cnt); 14614 if (sfmmu_tsbinfo_alloc(&newtsb, tsb_szc, 14615 TSB4M|TSB32M|TSB256M, TSB_ALLOC, scsfmmup) && 14616 (tsb_szc <= TSB_4M_SZCODE || 14617 sfmmu_tsbinfo_alloc(&newtsb, TSB_4M_SZCODE, 14618 TSB4M|TSB32M|TSB256M, 14619 TSB_ALLOC, scsfmmup))) { 14620 /* 14621 * If we fail to allocate the 2nd shared tsb, 14622 * just free the 1st tsb, return failure. 14623 */ 14624 sfmmu_tsbinfo_free(scsfmmup->sfmmu_tsb); 14625 SFMMU_STAT(sf_scd_2ndtsb_allocfail); 14626 return (TSB_ALLOCFAIL); 14627 } else { 14628 ASSERT(scsfmmup->sfmmu_tsb->tsb_next == NULL); 14629 newtsb->tsb_flags |= TSB_SHAREDCTX; 14630 scsfmmup->sfmmu_tsb->tsb_next = newtsb; 14631 SFMMU_STAT(sf_scd_2ndtsb_alloc); 14632 } 14633 } 14634 SFMMU_STAT(sf_scd_1sttsb_alloc); 14635 } 14636 return (TSB_SUCCESS); 14637 } 14638 14639 static void 14640 sfmmu_free_scd_tsbs(sfmmu_t *scd_sfmmu) 14641 { 14642 while (scd_sfmmu->sfmmu_tsb != NULL) { 14643 struct tsb_info *next = scd_sfmmu->sfmmu_tsb->tsb_next; 14644 sfmmu_tsbinfo_free(scd_sfmmu->sfmmu_tsb); 14645 scd_sfmmu->sfmmu_tsb = next; 14646 } 14647 } 14648 14649 /* 14650 * Link the sfmmu onto the hme region list. 14651 */ 14652 void 14653 sfmmu_link_to_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp) 14654 { 14655 uint_t rid; 14656 sf_rgn_link_t *rlink; 14657 sfmmu_t *head; 14658 sf_rgn_link_t *hrlink; 14659 14660 rid = rgnp->rgn_id; 14661 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14662 14663 /* LINTED: constant in conditional context */ 14664 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 1, 1); 14665 ASSERT(rlink != NULL); 14666 mutex_enter(&rgnp->rgn_mutex); 14667 if ((head = rgnp->rgn_sfmmu_head) == NULL) { 14668 rlink->next = NULL; 14669 rlink->prev = NULL; 14670 /* 14671 * make sure rlink's next field is NULL 14672 * before making this link visible. 14673 */ 14674 membar_stst(); 14675 rgnp->rgn_sfmmu_head = sfmmup; 14676 } else { 14677 /* LINTED: constant in conditional context */ 14678 SFMMU_HMERID2RLINKP(head, rid, hrlink, 0, 0); 14679 ASSERT(hrlink != NULL); 14680 ASSERT(hrlink->prev == NULL); 14681 rlink->next = head; 14682 rlink->prev = NULL; 14683 hrlink->prev = sfmmup; 14684 /* 14685 * make sure rlink's next field is correct 14686 * before making this link visible. 14687 */ 14688 membar_stst(); 14689 rgnp->rgn_sfmmu_head = sfmmup; 14690 } 14691 mutex_exit(&rgnp->rgn_mutex); 14692 } 14693 14694 /* 14695 * Unlink the sfmmu from the hme region list. 14696 */ 14697 void 14698 sfmmu_unlink_from_hmeregion(sfmmu_t *sfmmup, sf_region_t *rgnp) 14699 { 14700 uint_t rid; 14701 sf_rgn_link_t *rlink; 14702 14703 rid = rgnp->rgn_id; 14704 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 14705 14706 /* LINTED: constant in conditional context */ 14707 SFMMU_HMERID2RLINKP(sfmmup, rid, rlink, 0, 0); 14708 ASSERT(rlink != NULL); 14709 mutex_enter(&rgnp->rgn_mutex); 14710 if (rgnp->rgn_sfmmu_head == sfmmup) { 14711 sfmmu_t *next = rlink->next; 14712 rgnp->rgn_sfmmu_head = next; 14713 /* 14714 * if we are stopped by xc_attention() after this 14715 * point the forward link walking in 14716 * sfmmu_rgntlb_demap() will work correctly since the 14717 * head correctly points to the next element. 14718 */ 14719 membar_stst(); 14720 rlink->next = NULL; 14721 ASSERT(rlink->prev == NULL); 14722 if (next != NULL) { 14723 sf_rgn_link_t *nrlink; 14724 /* LINTED: constant in conditional context */ 14725 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0); 14726 ASSERT(nrlink != NULL); 14727 ASSERT(nrlink->prev == sfmmup); 14728 nrlink->prev = NULL; 14729 } 14730 } else { 14731 sfmmu_t *next = rlink->next; 14732 sfmmu_t *prev = rlink->prev; 14733 sf_rgn_link_t *prlink; 14734 14735 ASSERT(prev != NULL); 14736 /* LINTED: constant in conditional context */ 14737 SFMMU_HMERID2RLINKP(prev, rid, prlink, 0, 0); 14738 ASSERT(prlink != NULL); 14739 ASSERT(prlink->next == sfmmup); 14740 prlink->next = next; 14741 /* 14742 * if we are stopped by xc_attention() 14743 * after this point the forward link walking 14744 * will work correctly since the prev element 14745 * correctly points to the next element. 14746 */ 14747 membar_stst(); 14748 rlink->next = NULL; 14749 rlink->prev = NULL; 14750 if (next != NULL) { 14751 sf_rgn_link_t *nrlink; 14752 /* LINTED: constant in conditional context */ 14753 SFMMU_HMERID2RLINKP(next, rid, nrlink, 0, 0); 14754 ASSERT(nrlink != NULL); 14755 ASSERT(nrlink->prev == sfmmup); 14756 nrlink->prev = prev; 14757 } 14758 } 14759 mutex_exit(&rgnp->rgn_mutex); 14760 } 14761 14762 /* 14763 * Link scd sfmmu onto ism or hme region list for each region in the 14764 * scd region map. 14765 */ 14766 void 14767 sfmmu_link_scd_to_regions(sf_srd_t *srdp, sf_scd_t *scdp) 14768 { 14769 uint_t rid; 14770 uint_t i; 14771 uint_t j; 14772 ulong_t w; 14773 sf_region_t *rgnp; 14774 sfmmu_t *scsfmmup; 14775 14776 scsfmmup = scdp->scd_sfmmup; 14777 ASSERT(scsfmmup->sfmmu_scdhat); 14778 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14779 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14780 continue; 14781 } 14782 j = 0; 14783 while (w) { 14784 if (!(w & 0x1)) { 14785 j++; 14786 w >>= 1; 14787 continue; 14788 } 14789 rid = (i << BT_ULSHIFT) | j; 14790 j++; 14791 w >>= 1; 14792 14793 if (rid < SFMMU_MAX_HME_REGIONS) { 14794 rgnp = srdp->srd_hmergnp[rid]; 14795 ASSERT(rgnp->rgn_id == rid); 14796 ASSERT(rgnp->rgn_refcnt > 0); 14797 sfmmu_link_to_hmeregion(scsfmmup, rgnp); 14798 } else { 14799 sfmmu_t *ism_hatid = NULL; 14800 ism_ment_t *ism_ment; 14801 rid -= SFMMU_MAX_HME_REGIONS; 14802 rgnp = srdp->srd_ismrgnp[rid]; 14803 ASSERT(rgnp->rgn_id == rid); 14804 ASSERT(rgnp->rgn_refcnt > 0); 14805 14806 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14807 ASSERT(ism_hatid->sfmmu_ismhat); 14808 ism_ment = &scdp->scd_ism_links[rid]; 14809 ism_ment->iment_hat = scsfmmup; 14810 ism_ment->iment_base_va = rgnp->rgn_saddr; 14811 mutex_enter(&ism_mlist_lock); 14812 iment_add(ism_ment, ism_hatid); 14813 mutex_exit(&ism_mlist_lock); 14814 14815 } 14816 } 14817 } 14818 } 14819 /* 14820 * Unlink scd sfmmu from ism or hme region list for each region in the 14821 * scd region map. 14822 */ 14823 void 14824 sfmmu_unlink_scd_from_regions(sf_srd_t *srdp, sf_scd_t *scdp) 14825 { 14826 uint_t rid; 14827 uint_t i; 14828 uint_t j; 14829 ulong_t w; 14830 sf_region_t *rgnp; 14831 sfmmu_t *scsfmmup; 14832 14833 scsfmmup = scdp->scd_sfmmup; 14834 for (i = 0; i < SFMMU_RGNMAP_WORDS; i++) { 14835 if ((w = scdp->scd_region_map.bitmap[i]) == 0) { 14836 continue; 14837 } 14838 j = 0; 14839 while (w) { 14840 if (!(w & 0x1)) { 14841 j++; 14842 w >>= 1; 14843 continue; 14844 } 14845 rid = (i << BT_ULSHIFT) | j; 14846 j++; 14847 w >>= 1; 14848 14849 if (rid < SFMMU_MAX_HME_REGIONS) { 14850 rgnp = srdp->srd_hmergnp[rid]; 14851 ASSERT(rgnp->rgn_id == rid); 14852 ASSERT(rgnp->rgn_refcnt > 0); 14853 sfmmu_unlink_from_hmeregion(scsfmmup, 14854 rgnp); 14855 14856 } else { 14857 sfmmu_t *ism_hatid = NULL; 14858 ism_ment_t *ism_ment; 14859 rid -= SFMMU_MAX_HME_REGIONS; 14860 rgnp = srdp->srd_ismrgnp[rid]; 14861 ASSERT(rgnp->rgn_id == rid); 14862 ASSERT(rgnp->rgn_refcnt > 0); 14863 14864 ism_hatid = (sfmmu_t *)rgnp->rgn_obj; 14865 ASSERT(ism_hatid->sfmmu_ismhat); 14866 ism_ment = &scdp->scd_ism_links[rid]; 14867 ASSERT(ism_ment->iment_hat == scdp->scd_sfmmup); 14868 ASSERT(ism_ment->iment_base_va == 14869 rgnp->rgn_saddr); 14870 mutex_enter(&ism_mlist_lock); 14871 iment_sub(ism_ment, ism_hatid); 14872 mutex_exit(&ism_mlist_lock); 14873 14874 } 14875 } 14876 } 14877 } 14878 /* 14879 * Allocates and initialises a new SCD structure, this is called with 14880 * the srd_scd_mutex held and returns with the reference count 14881 * initialised to 1. 14882 */ 14883 static sf_scd_t * 14884 sfmmu_alloc_scd(sf_srd_t *srdp, sf_region_map_t *new_map) 14885 { 14886 sf_scd_t *new_scdp; 14887 sfmmu_t *scsfmmup; 14888 int i; 14889 14890 ASSERT(MUTEX_HELD(&srdp->srd_scd_mutex)); 14891 new_scdp = kmem_cache_alloc(scd_cache, KM_SLEEP); 14892 14893 scsfmmup = kmem_cache_alloc(sfmmuid_cache, KM_SLEEP); 14894 new_scdp->scd_sfmmup = scsfmmup; 14895 scsfmmup->sfmmu_srdp = srdp; 14896 scsfmmup->sfmmu_scdp = new_scdp; 14897 scsfmmup->sfmmu_tsb0_4minflcnt = 0; 14898 scsfmmup->sfmmu_scdhat = 1; 14899 CPUSET_ALL(scsfmmup->sfmmu_cpusran); 14900 bzero(scsfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE); 14901 14902 ASSERT(max_mmu_ctxdoms > 0); 14903 for (i = 0; i < max_mmu_ctxdoms; i++) { 14904 scsfmmup->sfmmu_ctxs[i].cnum = INVALID_CONTEXT; 14905 scsfmmup->sfmmu_ctxs[i].gnum = 0; 14906 } 14907 14908 for (i = 0; i < MMU_PAGE_SIZES; i++) { 14909 new_scdp->scd_rttecnt[i] = 0; 14910 } 14911 14912 new_scdp->scd_region_map = *new_map; 14913 new_scdp->scd_refcnt = 1; 14914 if (sfmmu_alloc_scd_tsbs(srdp, new_scdp) != TSB_SUCCESS) { 14915 kmem_cache_free(scd_cache, new_scdp); 14916 kmem_cache_free(sfmmuid_cache, scsfmmup); 14917 return (NULL); 14918 } 14919 if (&mmu_init_scd) { 14920 mmu_init_scd(new_scdp); 14921 } 14922 return (new_scdp); 14923 } 14924 14925 /* 14926 * The first phase of a process joining an SCD. The hat structure is 14927 * linked to the SCD queue and then the HAT_JOIN_SCD sfmmu flag is set 14928 * and a cross-call with context invalidation is used to cause the 14929 * remaining work to be carried out in the sfmmu_tsbmiss_exception() 14930 * routine. 14931 */ 14932 static void 14933 sfmmu_join_scd(sf_scd_t *scdp, sfmmu_t *sfmmup) 14934 { 14935 hatlock_t *hatlockp; 14936 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 14937 int i; 14938 sf_scd_t *old_scdp; 14939 14940 ASSERT(srdp != NULL); 14941 ASSERT(scdp != NULL); 14942 ASSERT(scdp->scd_refcnt > 0); 14943 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 14944 14945 if ((old_scdp = sfmmup->sfmmu_scdp) != NULL) { 14946 ASSERT(old_scdp != scdp); 14947 14948 mutex_enter(&old_scdp->scd_mutex); 14949 sfmmu_from_scd_list(&old_scdp->scd_sf_list, sfmmup); 14950 mutex_exit(&old_scdp->scd_mutex); 14951 /* 14952 * sfmmup leaves the old scd. Update sfmmu_ttecnt to 14953 * include the shme rgn ttecnt for rgns that 14954 * were in the old SCD 14955 */ 14956 for (i = 0; i < mmu_page_sizes; i++) { 14957 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 14958 old_scdp->scd_rttecnt[i]); 14959 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 14960 sfmmup->sfmmu_scdrttecnt[i]); 14961 } 14962 } 14963 14964 /* 14965 * Move sfmmu to the scd lists. 14966 */ 14967 mutex_enter(&scdp->scd_mutex); 14968 sfmmu_to_scd_list(&scdp->scd_sf_list, sfmmup); 14969 mutex_exit(&scdp->scd_mutex); 14970 SF_SCD_INCR_REF(scdp); 14971 14972 hatlockp = sfmmu_hat_enter(sfmmup); 14973 /* 14974 * For a multi-thread process, we must stop 14975 * all the other threads before joining the scd. 14976 */ 14977 14978 SFMMU_FLAGS_SET(sfmmup, HAT_JOIN_SCD); 14979 14980 sfmmu_invalidate_ctx(sfmmup); 14981 sfmmup->sfmmu_scdp = scdp; 14982 14983 /* 14984 * Copy scd_rttecnt into sfmmup's sfmmu_scdrttecnt, and update 14985 * sfmmu_ttecnt to not include the rgn ttecnt just joined in SCD. 14986 */ 14987 for (i = 0; i < mmu_page_sizes; i++) { 14988 sfmmup->sfmmu_scdrttecnt[i] = scdp->scd_rttecnt[i]; 14989 ASSERT(sfmmup->sfmmu_ttecnt[i] >= scdp->scd_rttecnt[i]); 14990 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 14991 -sfmmup->sfmmu_scdrttecnt[i]); 14992 } 14993 /* update tsb0 inflation count */ 14994 if (old_scdp != NULL) { 14995 sfmmup->sfmmu_tsb0_4minflcnt += 14996 old_scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 14997 } 14998 ASSERT(sfmmup->sfmmu_tsb0_4minflcnt >= 14999 scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt); 15000 sfmmup->sfmmu_tsb0_4minflcnt -= scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 15001 15002 sfmmu_hat_exit(hatlockp); 15003 15004 if (old_scdp != NULL) { 15005 SF_SCD_DECR_REF(srdp, old_scdp); 15006 } 15007 15008 } 15009 15010 /* 15011 * This routine is called by a process to become part of an SCD. It is called 15012 * from sfmmu_tsbmiss_exception() once most of the initial work has been 15013 * done by sfmmu_join_scd(). This routine must not drop the hat lock. 15014 */ 15015 static void 15016 sfmmu_finish_join_scd(sfmmu_t *sfmmup) 15017 { 15018 struct tsb_info *tsbinfop; 15019 15020 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15021 ASSERT(sfmmup->sfmmu_scdp != NULL); 15022 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)); 15023 ASSERT(!SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15024 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ALLCTX_INVALID)); 15025 15026 for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; 15027 tsbinfop = tsbinfop->tsb_next) { 15028 if (tsbinfop->tsb_flags & TSB_SWAPPED) { 15029 continue; 15030 } 15031 ASSERT(!(tsbinfop->tsb_flags & TSB_RELOC_FLAG)); 15032 15033 sfmmu_inv_tsb(tsbinfop->tsb_va, 15034 TSB_BYTES(tsbinfop->tsb_szc)); 15035 } 15036 15037 /* Set HAT_CTX1_FLAG for all SCD ISMs */ 15038 sfmmu_ism_hatflags(sfmmup, 1); 15039 15040 SFMMU_STAT(sf_join_scd); 15041 } 15042 15043 /* 15044 * This routine is called in order to check if there is an SCD which matches 15045 * the process's region map if not then a new SCD may be created. 15046 */ 15047 static void 15048 sfmmu_find_scd(sfmmu_t *sfmmup) 15049 { 15050 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15051 sf_scd_t *scdp, *new_scdp; 15052 int ret; 15053 15054 ASSERT(srdp != NULL); 15055 ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 15056 15057 mutex_enter(&srdp->srd_scd_mutex); 15058 for (scdp = srdp->srd_scdp; scdp != NULL; 15059 scdp = scdp->scd_next) { 15060 SF_RGNMAP_EQUAL(&scdp->scd_region_map, 15061 &sfmmup->sfmmu_region_map, ret); 15062 if (ret == 1) { 15063 SF_SCD_INCR_REF(scdp); 15064 mutex_exit(&srdp->srd_scd_mutex); 15065 sfmmu_join_scd(scdp, sfmmup); 15066 ASSERT(scdp->scd_refcnt >= 2); 15067 atomic_dec_32((volatile uint32_t *)&scdp->scd_refcnt); 15068 return; 15069 } else { 15070 /* 15071 * If the sfmmu region map is a subset of the scd 15072 * region map, then the assumption is that this process 15073 * will continue attaching to ISM segments until the 15074 * region maps are equal. 15075 */ 15076 SF_RGNMAP_IS_SUBSET(&scdp->scd_region_map, 15077 &sfmmup->sfmmu_region_map, ret); 15078 if (ret == 1) { 15079 mutex_exit(&srdp->srd_scd_mutex); 15080 return; 15081 } 15082 } 15083 } 15084 15085 ASSERT(scdp == NULL); 15086 /* 15087 * No matching SCD has been found, create a new one. 15088 */ 15089 if ((new_scdp = sfmmu_alloc_scd(srdp, &sfmmup->sfmmu_region_map)) == 15090 NULL) { 15091 mutex_exit(&srdp->srd_scd_mutex); 15092 return; 15093 } 15094 15095 /* 15096 * sfmmu_alloc_scd() returns with a ref count of 1 on the scd. 15097 */ 15098 15099 /* Set scd_rttecnt for shme rgns in SCD */ 15100 sfmmu_set_scd_rttecnt(srdp, new_scdp); 15101 15102 /* 15103 * Link scd onto srd_scdp list and scd sfmmu onto region/iment lists. 15104 */ 15105 sfmmu_link_scd_to_regions(srdp, new_scdp); 15106 sfmmu_add_scd(&srdp->srd_scdp, new_scdp); 15107 SFMMU_STAT_ADD(sf_create_scd, 1); 15108 15109 mutex_exit(&srdp->srd_scd_mutex); 15110 sfmmu_join_scd(new_scdp, sfmmup); 15111 ASSERT(new_scdp->scd_refcnt >= 2); 15112 atomic_dec_32((volatile uint32_t *)&new_scdp->scd_refcnt); 15113 } 15114 15115 /* 15116 * This routine is called by a process to remove itself from an SCD. It is 15117 * either called when the processes has detached from a segment or from 15118 * hat_free_start() as a result of calling exit. 15119 */ 15120 static void 15121 sfmmu_leave_scd(sfmmu_t *sfmmup, uchar_t r_type) 15122 { 15123 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 15124 sf_srd_t *srdp = sfmmup->sfmmu_srdp; 15125 hatlock_t *hatlockp = TSB_HASH(sfmmup); 15126 int i; 15127 15128 ASSERT(scdp != NULL); 15129 ASSERT(srdp != NULL); 15130 15131 if (sfmmup->sfmmu_free) { 15132 /* 15133 * If the process is part of an SCD the sfmmu is unlinked 15134 * from scd_sf_list. 15135 */ 15136 mutex_enter(&scdp->scd_mutex); 15137 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup); 15138 mutex_exit(&scdp->scd_mutex); 15139 /* 15140 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that 15141 * are about to leave the SCD 15142 */ 15143 for (i = 0; i < mmu_page_sizes; i++) { 15144 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15145 scdp->scd_rttecnt[i]); 15146 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15147 sfmmup->sfmmu_scdrttecnt[i]); 15148 sfmmup->sfmmu_scdrttecnt[i] = 0; 15149 } 15150 sfmmup->sfmmu_scdp = NULL; 15151 15152 SF_SCD_DECR_REF(srdp, scdp); 15153 return; 15154 } 15155 15156 ASSERT(r_type != SFMMU_REGION_ISM || 15157 SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15158 ASSERT(scdp->scd_refcnt); 15159 ASSERT(!sfmmup->sfmmu_free); 15160 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15161 ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); 15162 15163 /* 15164 * Wait for ISM maps to be updated. 15165 */ 15166 if (r_type != SFMMU_REGION_ISM) { 15167 while (SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY) && 15168 sfmmup->sfmmu_scdp != NULL) { 15169 cv_wait(&sfmmup->sfmmu_tsb_cv, 15170 HATLOCK_MUTEXP(hatlockp)); 15171 } 15172 15173 if (sfmmup->sfmmu_scdp == NULL) { 15174 sfmmu_hat_exit(hatlockp); 15175 return; 15176 } 15177 SFMMU_FLAGS_SET(sfmmup, HAT_ISMBUSY); 15178 } 15179 15180 if (SFMMU_FLAGS_ISSET(sfmmup, HAT_JOIN_SCD)) { 15181 SFMMU_FLAGS_CLEAR(sfmmup, HAT_JOIN_SCD); 15182 /* 15183 * Since HAT_JOIN_SCD was set our context 15184 * is still invalid. 15185 */ 15186 } else { 15187 /* 15188 * For a multi-thread process, we must stop 15189 * all the other threads before leaving the scd. 15190 */ 15191 15192 sfmmu_invalidate_ctx(sfmmup); 15193 } 15194 15195 /* Clear all the rid's for ISM, delete flags, etc */ 15196 ASSERT(SFMMU_FLAGS_ISSET(sfmmup, HAT_ISMBUSY)); 15197 sfmmu_ism_hatflags(sfmmup, 0); 15198 15199 /* 15200 * Update sfmmu_ttecnt to include the rgn ttecnt for rgns that 15201 * are in SCD before this sfmmup leaves the SCD. 15202 */ 15203 for (i = 0; i < mmu_page_sizes; i++) { 15204 ASSERT(sfmmup->sfmmu_scdrttecnt[i] == 15205 scdp->scd_rttecnt[i]); 15206 atomic_add_long(&sfmmup->sfmmu_ttecnt[i], 15207 sfmmup->sfmmu_scdrttecnt[i]); 15208 sfmmup->sfmmu_scdrttecnt[i] = 0; 15209 /* update ismttecnt to include SCD ism before hat leaves SCD */ 15210 sfmmup->sfmmu_ismttecnt[i] += sfmmup->sfmmu_scdismttecnt[i]; 15211 sfmmup->sfmmu_scdismttecnt[i] = 0; 15212 } 15213 /* update tsb0 inflation count */ 15214 sfmmup->sfmmu_tsb0_4minflcnt += scdp->scd_sfmmup->sfmmu_tsb0_4minflcnt; 15215 15216 if (r_type != SFMMU_REGION_ISM) { 15217 SFMMU_FLAGS_CLEAR(sfmmup, HAT_ISMBUSY); 15218 } 15219 sfmmup->sfmmu_scdp = NULL; 15220 15221 sfmmu_hat_exit(hatlockp); 15222 15223 /* 15224 * Unlink sfmmu from scd_sf_list this can be done without holding 15225 * the hat lock as we hold the sfmmu_as lock which prevents 15226 * hat_join_region from adding this thread to the scd again. Other 15227 * threads check if sfmmu_scdp is NULL under hat lock and if it's NULL 15228 * they won't get here, since sfmmu_leave_scd() clears sfmmu_scdp 15229 * while holding the hat lock. 15230 */ 15231 mutex_enter(&scdp->scd_mutex); 15232 sfmmu_from_scd_list(&scdp->scd_sf_list, sfmmup); 15233 mutex_exit(&scdp->scd_mutex); 15234 SFMMU_STAT(sf_leave_scd); 15235 15236 SF_SCD_DECR_REF(srdp, scdp); 15237 hatlockp = sfmmu_hat_enter(sfmmup); 15238 15239 } 15240 15241 /* 15242 * Unlink and free up an SCD structure with a reference count of 0. 15243 */ 15244 static void 15245 sfmmu_destroy_scd(sf_srd_t *srdp, sf_scd_t *scdp, sf_region_map_t *scd_rmap) 15246 { 15247 sfmmu_t *scsfmmup; 15248 sf_scd_t *sp; 15249 hatlock_t *shatlockp; 15250 int i, ret; 15251 15252 mutex_enter(&srdp->srd_scd_mutex); 15253 for (sp = srdp->srd_scdp; sp != NULL; sp = sp->scd_next) { 15254 if (sp == scdp) 15255 break; 15256 } 15257 if (sp == NULL || sp->scd_refcnt) { 15258 mutex_exit(&srdp->srd_scd_mutex); 15259 return; 15260 } 15261 15262 /* 15263 * It is possible that the scd has been freed and reallocated with a 15264 * different region map while we've been waiting for the srd_scd_mutex. 15265 */ 15266 SF_RGNMAP_EQUAL(scd_rmap, &sp->scd_region_map, ret); 15267 if (ret != 1) { 15268 mutex_exit(&srdp->srd_scd_mutex); 15269 return; 15270 } 15271 15272 ASSERT(scdp->scd_sf_list == NULL); 15273 /* 15274 * Unlink scd from srd_scdp list. 15275 */ 15276 sfmmu_remove_scd(&srdp->srd_scdp, scdp); 15277 mutex_exit(&srdp->srd_scd_mutex); 15278 15279 sfmmu_unlink_scd_from_regions(srdp, scdp); 15280 15281 /* Clear shared context tsb and release ctx */ 15282 scsfmmup = scdp->scd_sfmmup; 15283 15284 /* 15285 * create a barrier so that scd will not be destroyed 15286 * if other thread still holds the same shared hat lock. 15287 * E.g., sfmmu_tsbmiss_exception() needs to acquire the 15288 * shared hat lock before checking the shared tsb reloc flag. 15289 */ 15290 shatlockp = sfmmu_hat_enter(scsfmmup); 15291 sfmmu_hat_exit(shatlockp); 15292 15293 sfmmu_free_scd_tsbs(scsfmmup); 15294 15295 for (i = 0; i < SFMMU_L1_HMERLINKS; i++) { 15296 if (scsfmmup->sfmmu_hmeregion_links[i] != NULL) { 15297 kmem_free(scsfmmup->sfmmu_hmeregion_links[i], 15298 SFMMU_L2_HMERLINKS_SIZE); 15299 scsfmmup->sfmmu_hmeregion_links[i] = NULL; 15300 } 15301 } 15302 kmem_cache_free(sfmmuid_cache, scsfmmup); 15303 kmem_cache_free(scd_cache, scdp); 15304 SFMMU_STAT(sf_destroy_scd); 15305 } 15306 15307 /* 15308 * Modifies the HAT_CTX1_FLAG for each of the ISM segments which correspond to 15309 * bits which are set in the ism_region_map parameter. This flag indicates to 15310 * the tsbmiss handler that mapping for these segments should be loaded using 15311 * the shared context. 15312 */ 15313 static void 15314 sfmmu_ism_hatflags(sfmmu_t *sfmmup, int addflag) 15315 { 15316 sf_scd_t *scdp = sfmmup->sfmmu_scdp; 15317 ism_blk_t *ism_blkp; 15318 ism_map_t *ism_map; 15319 int i, rid; 15320 15321 ASSERT(sfmmup->sfmmu_iblk != NULL); 15322 ASSERT(scdp != NULL); 15323 /* 15324 * Note that the caller either set HAT_ISMBUSY flag or checked 15325 * under hat lock that HAT_ISMBUSY was not set by another thread. 15326 */ 15327 ASSERT(sfmmu_hat_lock_held(sfmmup)); 15328 15329 ism_blkp = sfmmup->sfmmu_iblk; 15330 while (ism_blkp != NULL) { 15331 ism_map = ism_blkp->iblk_maps; 15332 for (i = 0; ism_map[i].imap_ismhat && i < ISM_MAP_SLOTS; i++) { 15333 rid = ism_map[i].imap_rid; 15334 if (rid == SFMMU_INVALID_ISMRID) { 15335 continue; 15336 } 15337 ASSERT(rid >= 0 && rid < SFMMU_MAX_ISM_REGIONS); 15338 if (SF_RGNMAP_TEST(scdp->scd_ismregion_map, rid) && 15339 addflag) { 15340 ism_map[i].imap_hatflags |= 15341 HAT_CTX1_FLAG; 15342 } else { 15343 ism_map[i].imap_hatflags &= 15344 ~HAT_CTX1_FLAG; 15345 } 15346 } 15347 ism_blkp = ism_blkp->iblk_next; 15348 } 15349 } 15350 15351 static int 15352 sfmmu_srd_lock_held(sf_srd_t *srdp) 15353 { 15354 return (MUTEX_HELD(&srdp->srd_mutex)); 15355 } 15356 15357 /* ARGSUSED */ 15358 static int 15359 sfmmu_scdcache_constructor(void *buf, void *cdrarg, int kmflags) 15360 { 15361 sf_scd_t *scdp = (sf_scd_t *)buf; 15362 15363 bzero(buf, sizeof (sf_scd_t)); 15364 mutex_init(&scdp->scd_mutex, NULL, MUTEX_DEFAULT, NULL); 15365 return (0); 15366 } 15367 15368 /* ARGSUSED */ 15369 static void 15370 sfmmu_scdcache_destructor(void *buf, void *cdrarg) 15371 { 15372 sf_scd_t *scdp = (sf_scd_t *)buf; 15373 15374 mutex_destroy(&scdp->scd_mutex); 15375 } 15376 15377 /* 15378 * The listp parameter is a pointer to a list of hmeblks which are partially 15379 * freed as result of calling sfmmu_hblk_hash_rm(), the last phase of the 15380 * freeing process is to cross-call all cpus to ensure that there are no 15381 * remaining cached references. 15382 * 15383 * If the local generation number is less than the global then we can free 15384 * hmeblks which are already on the pending queue as another cpu has completed 15385 * the cross-call. 15386 * 15387 * We cross-call to make sure that there are no threads on other cpus accessing 15388 * these hmblks and then complete the process of freeing them under the 15389 * following conditions: 15390 * The total number of pending hmeblks is greater than the threshold 15391 * The reserve list has fewer than HBLK_RESERVE_CNT hmeblks 15392 * It is at least 1 second since the last time we cross-called 15393 * 15394 * Otherwise, we add the hmeblks to the per-cpu pending queue. 15395 */ 15396 static void 15397 sfmmu_hblks_list_purge(struct hme_blk **listp, int dontfree) 15398 { 15399 struct hme_blk *hblkp, *pr_hblkp = NULL; 15400 int count = 0; 15401 cpuset_t cpuset = cpu_ready_set; 15402 cpu_hme_pend_t *cpuhp; 15403 timestruc_t now; 15404 int one_second_expired = 0; 15405 15406 gethrestime_lasttick(&now); 15407 15408 for (hblkp = *listp; hblkp != NULL; hblkp = hblkp->hblk_next) { 15409 ASSERT(hblkp->hblk_shw_bit == 0); 15410 ASSERT(hblkp->hblk_shared == 0); 15411 count++; 15412 pr_hblkp = hblkp; 15413 } 15414 15415 cpuhp = &cpu_hme_pend[CPU->cpu_seqid]; 15416 mutex_enter(&cpuhp->chp_mutex); 15417 15418 if ((cpuhp->chp_count + count) == 0) { 15419 mutex_exit(&cpuhp->chp_mutex); 15420 return; 15421 } 15422 15423 if ((now.tv_sec - cpuhp->chp_timestamp) > 1) { 15424 one_second_expired = 1; 15425 } 15426 15427 if (!dontfree && (freehblkcnt < HBLK_RESERVE_CNT || 15428 (cpuhp->chp_count + count) > cpu_hme_pend_thresh || 15429 one_second_expired)) { 15430 /* Append global list to local */ 15431 if (pr_hblkp == NULL) { 15432 *listp = cpuhp->chp_listp; 15433 } else { 15434 pr_hblkp->hblk_next = cpuhp->chp_listp; 15435 } 15436 cpuhp->chp_listp = NULL; 15437 cpuhp->chp_count = 0; 15438 cpuhp->chp_timestamp = now.tv_sec; 15439 mutex_exit(&cpuhp->chp_mutex); 15440 15441 kpreempt_disable(); 15442 CPUSET_DEL(cpuset, CPU->cpu_id); 15443 xt_sync(cpuset); 15444 xt_sync(cpuset); 15445 kpreempt_enable(); 15446 15447 /* 15448 * At this stage we know that no trap handlers on other 15449 * cpus can have references to hmeblks on the list. 15450 */ 15451 sfmmu_hblk_free(listp); 15452 } else if (*listp != NULL) { 15453 pr_hblkp->hblk_next = cpuhp->chp_listp; 15454 cpuhp->chp_listp = *listp; 15455 cpuhp->chp_count += count; 15456 *listp = NULL; 15457 mutex_exit(&cpuhp->chp_mutex); 15458 } else { 15459 mutex_exit(&cpuhp->chp_mutex); 15460 } 15461 } 15462 15463 /* 15464 * Add an hmeblk to the the hash list. 15465 */ 15466 void 15467 sfmmu_hblk_hash_add(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 15468 uint64_t hblkpa) 15469 { 15470 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 15471 #ifdef DEBUG 15472 if (hmebp->hmeblkp == NULL) { 15473 ASSERT(hmebp->hmeh_nextpa == HMEBLK_ENDPA); 15474 } 15475 #endif /* DEBUG */ 15476 15477 hmeblkp->hblk_nextpa = hmebp->hmeh_nextpa; 15478 /* 15479 * Since the TSB miss handler now does not lock the hash chain before 15480 * walking it, make sure that the hmeblks nextpa is globally visible 15481 * before we make the hmeblk globally visible by updating the chain root 15482 * pointer in the hash bucket. 15483 */ 15484 membar_producer(); 15485 hmebp->hmeh_nextpa = hblkpa; 15486 hmeblkp->hblk_next = hmebp->hmeblkp; 15487 hmebp->hmeblkp = hmeblkp; 15488 15489 } 15490 15491 /* 15492 * This function is the first part of a 2 part process to remove an hmeblk 15493 * from the hash chain. In this phase we unlink the hmeblk from the hash chain 15494 * but leave the next physical pointer unchanged. The hmeblk is then linked onto 15495 * a per-cpu pending list using the virtual address pointer. 15496 * 15497 * TSB miss trap handlers that start after this phase will no longer see 15498 * this hmeblk. TSB miss handlers that still cache this hmeblk in a register 15499 * can still use it for further chain traversal because we haven't yet modifed 15500 * the next physical pointer or freed it. 15501 * 15502 * In the second phase of hmeblk removal we'll issue a barrier xcall before 15503 * we reuse or free this hmeblk. This will make sure all lingering references to 15504 * the hmeblk after first phase disappear before we finally reclaim it. 15505 * This scheme eliminates the need for TSB miss handlers to lock hmeblk chains 15506 * during their traversal. 15507 * 15508 * The hmehash_mutex must be held when calling this function. 15509 * 15510 * Input: 15511 * hmebp - hme hash bucket pointer 15512 * hmeblkp - address of hmeblk to be removed 15513 * pr_hblk - virtual address of previous hmeblkp 15514 * listp - pointer to list of hmeblks linked by virtual address 15515 * free_now flag - indicates that a complete removal from the hash chains 15516 * is necessary. 15517 * 15518 * It is inefficient to use the free_now flag as a cross-call is required to 15519 * remove a single hmeblk from the hash chain but is necessary when hmeblks are 15520 * in short supply. 15521 */ 15522 void 15523 sfmmu_hblk_hash_rm(struct hmehash_bucket *hmebp, struct hme_blk *hmeblkp, 15524 struct hme_blk *pr_hblk, struct hme_blk **listp, 15525 int free_now) 15526 { 15527 int shw_size, vshift; 15528 struct hme_blk *shw_hblkp; 15529 uint_t shw_mask, newshw_mask; 15530 caddr_t vaddr; 15531 int size; 15532 cpuset_t cpuset = cpu_ready_set; 15533 15534 ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp)); 15535 15536 if (hmebp->hmeblkp == hmeblkp) { 15537 hmebp->hmeh_nextpa = hmeblkp->hblk_nextpa; 15538 hmebp->hmeblkp = hmeblkp->hblk_next; 15539 } else { 15540 pr_hblk->hblk_nextpa = hmeblkp->hblk_nextpa; 15541 pr_hblk->hblk_next = hmeblkp->hblk_next; 15542 } 15543 15544 size = get_hblk_ttesz(hmeblkp); 15545 shw_hblkp = hmeblkp->hblk_shadow; 15546 if (shw_hblkp) { 15547 ASSERT(hblktosfmmu(hmeblkp) != KHATID); 15548 ASSERT(!hmeblkp->hblk_shared); 15549 #ifdef DEBUG 15550 if (mmu_page_sizes == max_mmu_page_sizes) { 15551 ASSERT(size < TTE256M); 15552 } else { 15553 ASSERT(size < TTE4M); 15554 } 15555 #endif /* DEBUG */ 15556 15557 shw_size = get_hblk_ttesz(shw_hblkp); 15558 vaddr = (caddr_t)get_hblk_base(hmeblkp); 15559 vshift = vaddr_to_vshift(shw_hblkp->hblk_tag, vaddr, shw_size); 15560 ASSERT(vshift < 8); 15561 /* 15562 * Atomically clear shadow mask bit 15563 */ 15564 do { 15565 shw_mask = shw_hblkp->hblk_shw_mask; 15566 ASSERT(shw_mask & (1 << vshift)); 15567 newshw_mask = shw_mask & ~(1 << vshift); 15568 newshw_mask = atomic_cas_32(&shw_hblkp->hblk_shw_mask, 15569 shw_mask, newshw_mask); 15570 } while (newshw_mask != shw_mask); 15571 hmeblkp->hblk_shadow = NULL; 15572 } 15573 hmeblkp->hblk_shw_bit = 0; 15574 15575 if (hmeblkp->hblk_shared) { 15576 #ifdef DEBUG 15577 sf_srd_t *srdp; 15578 sf_region_t *rgnp; 15579 uint_t rid; 15580 15581 srdp = hblktosrd(hmeblkp); 15582 ASSERT(srdp != NULL && srdp->srd_refcnt != 0); 15583 rid = hmeblkp->hblk_tag.htag_rid; 15584 ASSERT(SFMMU_IS_SHMERID_VALID(rid)); 15585 ASSERT(rid < SFMMU_MAX_HME_REGIONS); 15586 rgnp = srdp->srd_hmergnp[rid]; 15587 ASSERT(rgnp != NULL); 15588 SFMMU_VALIDATE_SHAREDHBLK(hmeblkp, srdp, rgnp, rid); 15589 #endif /* DEBUG */ 15590 hmeblkp->hblk_shared = 0; 15591 } 15592 if (free_now) { 15593 kpreempt_disable(); 15594 CPUSET_DEL(cpuset, CPU->cpu_id); 15595 xt_sync(cpuset); 15596 xt_sync(cpuset); 15597 kpreempt_enable(); 15598 15599 hmeblkp->hblk_nextpa = HMEBLK_ENDPA; 15600 hmeblkp->hblk_next = NULL; 15601 } else { 15602 /* Append hmeblkp to listp for processing later. */ 15603 hmeblkp->hblk_next = *listp; 15604 *listp = hmeblkp; 15605 } 15606 } 15607 15608 /* 15609 * This routine is called when memory is in short supply and returns a free 15610 * hmeblk of the requested size from the cpu pending lists. 15611 */ 15612 static struct hme_blk * 15613 sfmmu_check_pending_hblks(int size) 15614 { 15615 int i; 15616 struct hme_blk *hmeblkp = NULL, *last_hmeblkp; 15617 int found_hmeblk; 15618 cpuset_t cpuset = cpu_ready_set; 15619 cpu_hme_pend_t *cpuhp; 15620 15621 /* Flush cpu hblk pending queues */ 15622 for (i = 0; i < NCPU; i++) { 15623 cpuhp = &cpu_hme_pend[i]; 15624 if (cpuhp->chp_listp != NULL) { 15625 mutex_enter(&cpuhp->chp_mutex); 15626 if (cpuhp->chp_listp == NULL) { 15627 mutex_exit(&cpuhp->chp_mutex); 15628 continue; 15629 } 15630 found_hmeblk = 0; 15631 last_hmeblkp = NULL; 15632 for (hmeblkp = cpuhp->chp_listp; hmeblkp != NULL; 15633 hmeblkp = hmeblkp->hblk_next) { 15634 if (get_hblk_ttesz(hmeblkp) == size) { 15635 if (last_hmeblkp == NULL) { 15636 cpuhp->chp_listp = 15637 hmeblkp->hblk_next; 15638 } else { 15639 last_hmeblkp->hblk_next = 15640 hmeblkp->hblk_next; 15641 } 15642 ASSERT(cpuhp->chp_count > 0); 15643 cpuhp->chp_count--; 15644 found_hmeblk = 1; 15645 break; 15646 } else { 15647 last_hmeblkp = hmeblkp; 15648 } 15649 } 15650 mutex_exit(&cpuhp->chp_mutex); 15651 15652 if (found_hmeblk) { 15653 kpreempt_disable(); 15654 CPUSET_DEL(cpuset, CPU->cpu_id); 15655 xt_sync(cpuset); 15656 xt_sync(cpuset); 15657 kpreempt_enable(); 15658 return (hmeblkp); 15659 } 15660 } 15661 } 15662 return (NULL); 15663 }