Print this page
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory.  The code is there and in theory it runs when we get *extremely* low
on memory.  In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout
remove xhat
The xhat infrastructure was added to support hardware such as the zulu
graphics card - hardware which had on-board MMUs.  The VM used the xhat code
to keep the CPU's and Zulu's page tables in-sync.  Since the only xhat user
was zulu (which is gone), we can safely remove it simplifying the whole VM
subsystem.
Assorted notes:
- AS_BUSY flag was used solely by xhat

*** 79,89 **** #include <sys/prom_debug.h> #include <sys/ksynch.h> #include <sys/mem_config.h> #include <sys/mem_cage.h> #include <vm/vm_dep.h> - #include <vm/xhat_sfmmu.h> #include <sys/fpu/fpusystm.h> #include <vm/mach_kpm.h> #include <sys/callb.h> #ifdef DEBUG --- 79,88 ----
*** 1348,1359 **** if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) { cmn_err(CE_PANIC, "VAC too big!"); } #endif - (void) xhat_init(); - uhme_hash_pa = va_to_pa(uhme_hash); khme_hash_pa = va_to_pa(khme_hash); /* * Initialize relocation locks. kpr_suspendlock is held --- 1347,1356 ----
*** 1537,1547 **** CPUSET_ZERO(sfmmup->sfmmu_cpusran); } sfmmup->sfmmu_free = 0; sfmmup->sfmmu_rmstat = 0; sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart; - sfmmup->sfmmu_xhat_provider = NULL; cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL); sfmmup->sfmmu_srdp = NULL; SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map); bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE); sfmmup->sfmmu_scdp = NULL; --- 1534,1543 ----
*** 1917,1927 **** void hat_free_start(struct hat *sfmmup) { ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); ASSERT(sfmmup != ksfmmup); - ASSERT(sfmmup->sfmmu_xhat_provider == NULL); sfmmup->sfmmu_free = 1; if (sfmmup->sfmmu_scdp != NULL) { sfmmu_leave_scd(sfmmup, 0); } --- 1913,1922 ----
*** 1932,1942 **** void hat_free_end(struct hat *sfmmup) { int i; - ASSERT(sfmmup->sfmmu_xhat_provider == NULL); ASSERT(sfmmup->sfmmu_free == 1); ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0); ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0); ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0); ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0); --- 1927,1936 ----
*** 1974,2130 **** kmem_cache_free(sfmmuid_cache, sfmmup); } /* - * Set up any translation structures, for the specified address space, - * that are needed or preferred when the process is being swapped in. - */ - /* ARGSUSED */ - void - hat_swapin(struct hat *hat) - { - ASSERT(hat->sfmmu_xhat_provider == NULL); - } - - /* - * Free all of the translation resources, for the specified address space, - * that can be freed while the process is swapped out. Called from as_swapout. - * Also, free up the ctx that this process was using. - */ - void - hat_swapout(struct hat *sfmmup) - { - struct hmehash_bucket *hmebp; - struct hme_blk *hmeblkp; - struct hme_blk *pr_hblk = NULL; - struct hme_blk *nx_hblk; - int i; - struct hme_blk *list = NULL; - hatlock_t *hatlockp; - struct tsb_info *tsbinfop; - struct free_tsb { - struct free_tsb *next; - struct tsb_info *tsbinfop; - }; /* free list of TSBs */ - struct free_tsb *freelist, *last, *next; - - ASSERT(sfmmup->sfmmu_xhat_provider == NULL); - SFMMU_STAT(sf_swapout); - - /* - * There is no way to go from an as to all its translations in sfmmu. - * Here is one of the times when we take the big hit and traverse - * the hash looking for hme_blks to free up. Not only do we free up - * this as hme_blks but all those that are free. We are obviously - * swapping because we need memory so let's free up as much - * as we can. - * - * Note that we don't flush TLB/TSB here -- it's not necessary - * because: - * 1) we free the ctx we're using and throw away the TSB(s); - * 2) processes aren't runnable while being swapped out. - */ - ASSERT(sfmmup != KHATID); - for (i = 0; i <= UHMEHASH_SZ; i++) { - hmebp = &uhme_hash[i]; - SFMMU_HASH_LOCK(hmebp); - hmeblkp = hmebp->hmeblkp; - pr_hblk = NULL; - while (hmeblkp) { - - ASSERT(!hmeblkp->hblk_xhat_bit); - - if ((hmeblkp->hblk_tag.htag_id == sfmmup) && - !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) { - ASSERT(!hmeblkp->hblk_shared); - (void) sfmmu_hblk_unload(sfmmup, hmeblkp, - (caddr_t)get_hblk_base(hmeblkp), - get_hblk_endaddr(hmeblkp), - NULL, HAT_UNLOAD); - } - nx_hblk = hmeblkp->hblk_next; - if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) { - ASSERT(!hmeblkp->hblk_lckcnt); - sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk, - &list, 0); - } else { - pr_hblk = hmeblkp; - } - hmeblkp = nx_hblk; - } - SFMMU_HASH_UNLOCK(hmebp); - } - - sfmmu_hblks_list_purge(&list, 0); - - /* - * Now free up the ctx so that others can reuse it. - */ - hatlockp = sfmmu_hat_enter(sfmmup); - - sfmmu_invalidate_ctx(sfmmup); - - /* - * Free TSBs, but not tsbinfos, and set SWAPPED flag. - * If TSBs were never swapped in, just return. - * This implies that we don't support partial swapping - * of TSBs -- either all are swapped out, or none are. - * - * We must hold the HAT lock here to prevent racing with another - * thread trying to unmap TTEs from the TSB or running the post- - * relocator after relocating the TSB's memory. Unfortunately, we - * can't free memory while holding the HAT lock or we could - * deadlock, so we build a list of TSBs to be freed after marking - * the tsbinfos as swapped out and free them after dropping the - * lock. - */ - if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) { - sfmmu_hat_exit(hatlockp); - return; - } - - SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED); - last = freelist = NULL; - for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL; - tsbinfop = tsbinfop->tsb_next) { - ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0); - - /* - * Cast the TSB into a struct free_tsb and put it on the free - * list. - */ - if (freelist == NULL) { - last = freelist = (struct free_tsb *)tsbinfop->tsb_va; - } else { - last->next = (struct free_tsb *)tsbinfop->tsb_va; - last = last->next; - } - last->next = NULL; - last->tsbinfop = tsbinfop; - tsbinfop->tsb_flags |= TSB_SWAPPED; - /* - * Zero out the TTE to clear the valid bit. - * Note we can't use a value like 0xbad because we want to - * ensure diagnostic bits are NEVER set on TTEs that might - * be loaded. The intent is to catch any invalid access - * to the swapped TSB, such as a thread running with a valid - * context without first calling sfmmu_tsb_swapin() to - * allocate TSB memory. - */ - tsbinfop->tsb_tte.ll = 0; - } - - /* Now we can drop the lock and free the TSB memory. */ - sfmmu_hat_exit(hatlockp); - for (; freelist != NULL; freelist = next) { - next = freelist->next; - sfmmu_tsb_free(freelist->tsbinfop); - } - } - - /* * Duplicate the translations of an as into another newas */ /* ARGSUSED */ int hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len, --- 1968,1977 ----
*** 2133,2143 **** sf_srd_t *srdp; sf_scd_t *scdp; int i; extern uint_t get_color_start(struct as *); - ASSERT(hat->sfmmu_xhat_provider == NULL); ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) || (flag == HAT_DUP_SRD)); ASSERT(hat != ksfmmup); ASSERT(newhat != ksfmmup); ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp); --- 1980,1989 ----
*** 2203,2214 **** void hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp, uint_t attr, uint_t flags, hat_region_cookie_t rcookie) { uint_t rid; ! if (rcookie == HAT_INVALID_REGION_COOKIE || ! hat->sfmmu_xhat_provider != NULL) { hat_do_memload(hat, addr, pp, attr, flags, SFMMU_INVALID_SHMERID); return; } rid = (uint_t)((uint64_t)rcookie); --- 2049,2059 ---- void hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp, uint_t attr, uint_t flags, hat_region_cookie_t rcookie) { uint_t rid; ! if (rcookie == HAT_INVALID_REGION_COOKIE) { hat_do_memload(hat, addr, pp, attr, flags, SFMMU_INVALID_SHMERID); return; } rid = (uint_t)((uint64_t)rcookie);
*** 2238,2254 **** if (PP_ISFREE(pp)) { panic("hat_memload: loading a mapping to free page %p", (void *)pp); } - if (hat->sfmmu_xhat_provider) { - /* no regions for xhats */ - ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); - XHAT_MEMLOAD(hat, addr, pp, attr, flags); - return; - } - ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); if (flags & ~SFMMU_LOAD_ALLFLAG) cmn_err(CE_NOTE, "hat_memload: unsupported flags %d", --- 2083,2092 ----
*** 2294,2308 **** struct page *pp = NULL; int use_lgpg = 0; ASSERT(hat != NULL); - if (hat->sfmmu_xhat_provider) { - XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags); - return; - } - ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG)); ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR)); ASSERT((hat == ksfmmup) || AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock)); if (len == 0) --- 2132,2141 ----
*** 2445,2456 **** hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len, struct page **pps, uint_t attr, uint_t flags, hat_region_cookie_t rcookie) { uint_t rid; ! if (rcookie == HAT_INVALID_REGION_COOKIE || ! hat->sfmmu_xhat_provider != NULL) { hat_do_memload_array(hat, addr, len, pps, attr, flags, SFMMU_INVALID_SHMERID); return; } rid = (uint_t)((uint64_t)rcookie); --- 2278,2288 ---- hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len, struct page **pps, uint_t attr, uint_t flags, hat_region_cookie_t rcookie) { uint_t rid; ! if (rcookie == HAT_INVALID_REGION_COOKIE) { hat_do_memload_array(hat, addr, len, pps, attr, flags, SFMMU_INVALID_SHMERID); return; } rid = (uint_t)((uint64_t)rcookie);
*** 2481,2496 **** uint_t large_pages_disable; ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET)); SFMMU_VALIDATE_HMERID(hat, rid, addr, len); - if (hat->sfmmu_xhat_provider) { - ASSERT(!SFMMU_IS_SHMERID_VALID(rid)); - XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags); - return; - } - if (hat->sfmmu_rmstat) hat_resvstat(len, hat->sfmmu_as, addr); #if defined(SF_ERRATA_57) if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) && --- 2313,2322 ----
*** 3969,3979 **** int hmeshift, hashno = 1; struct hme_blk *hmeblkp, *list = NULL; caddr_t endaddr; ASSERT(sfmmup != NULL); - ASSERT(sfmmup->sfmmu_xhat_provider == NULL); ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); ASSERT((len & MMU_PAGEOFFSET) == 0); endaddr = addr + len; --- 3795,3804 ----
*** 4054,4064 **** hat_unlock(sfmmup, addr, len); return; } ASSERT(sfmmup != NULL); - ASSERT(sfmmup->sfmmu_xhat_provider == NULL); ASSERT(sfmmup != ksfmmup); srdp = sfmmup->sfmmu_srdp; rid = (uint_t)((uint64_t)rcookie); VERIFY3U(rid, <, SFMMU_MAX_HME_REGIONS); --- 3879,3888 ----
*** 4766,4776 **** { pfn_t pfn; tte_t tte; ASSERT(sfmmup != NULL); - ASSERT(sfmmup->sfmmu_xhat_provider == NULL); ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); if (sfmmup == ksfmmup) { --- 4590,4599 ----
*** 4791,4802 **** ssize_t hat_getpagesize(struct hat *sfmmup, caddr_t addr) { tte_t tte; - ASSERT(sfmmup->sfmmu_xhat_provider == NULL); - if (sfmmup == ksfmmup) { if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) { return (-1); } } else { --- 4614,4623 ----
*** 4812,4823 **** uint_t hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr) { tte_t tte; - ASSERT(sfmmup->sfmmu_xhat_provider == NULL); - if (sfmmup == ksfmmup) { if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) { tte.ll = 0; } } else { --- 4633,4642 ----
*** 4837,4859 **** * Enables more attributes on specified address range (ie. logical OR) */ void hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) { - if (hat->sfmmu_xhat_provider) { - XHAT_SETATTR(hat, addr, len, attr); - return; - } else { - /* - * This must be a CPU HAT. If the address space has - * XHATs attached, change attributes for all of them, - * just in case - */ ASSERT(hat->sfmmu_as != NULL); - if (hat->sfmmu_as->a_xhat != NULL) - xhat_setattr_all(hat->sfmmu_as, addr, len, attr); - } sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR); } /* --- 4656,4666 ----
*** 4861,4906 **** * are specified. */ void hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) { - if (hat->sfmmu_xhat_provider) { - XHAT_CHGATTR(hat, addr, len, attr); - return; - } else { - /* - * This must be a CPU HAT. If the address space has - * XHATs attached, change attributes for all of them, - * just in case - */ ASSERT(hat->sfmmu_as != NULL); - if (hat->sfmmu_as->a_xhat != NULL) - xhat_chgattr_all(hat->sfmmu_as, addr, len, attr); - } sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR); } /* * Remove attributes on the specified address range (ie. loginal NAND) */ void hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr) { - if (hat->sfmmu_xhat_provider) { - XHAT_CLRATTR(hat, addr, len, attr); - return; - } else { - /* - * This must be a CPU HAT. If the address space has - * XHATs attached, change attributes for all of them, - * just in case - */ ASSERT(hat->sfmmu_as != NULL); - if (hat->sfmmu_as->a_xhat != NULL) - xhat_clrattr_all(hat->sfmmu_as, addr, len, attr); - } sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR); } /* --- 4668,4689 ----
*** 5249,5271 **** demap_range_t dmr; ASSERT((len & MMU_PAGEOFFSET) == 0); ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0); - if (sfmmup->sfmmu_xhat_provider) { - XHAT_CHGPROT(sfmmup, addr, len, vprot); - return; - } else { - /* - * This must be a CPU HAT. If the address space has - * XHATs attached, change attributes for all of them, - * just in case - */ ASSERT(sfmmup->sfmmu_as != NULL); - if (sfmmup->sfmmu_as->a_xhat != NULL) - xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot); - } CPUSET_ZERO(cpuset); if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) && ((addr + len) > (caddr_t)USERLIMIT)) { --- 5032,5042 ----
*** 5696,5719 **** caddr_t cb_start_addr[MAX_CB_ADDR]; caddr_t cb_end_addr[MAX_CB_ADDR]; int issegkmap = ISSEGKMAP(sfmmup, addr); demap_range_t dmr, *dmrp; - if (sfmmup->sfmmu_xhat_provider) { - XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback); - return; - } else { - /* - * This must be a CPU HAT. If the address space has - * XHATs attached, unload the mappings for all of them, - * just in case - */ ASSERT(sfmmup->sfmmu_as != NULL); - if (sfmmup->sfmmu_as->a_xhat != NULL) - xhat_unload_callback_all(sfmmup->sfmmu_as, addr, - len, flags, callback); - } ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \ AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); ASSERT(sfmmup != NULL); --- 5467,5477 ----
*** 5986,5999 **** * be MMU_PAGESIZE aligned. */ void hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags) { - if (sfmmup->sfmmu_xhat_provider) { - XHAT_UNLOAD(sfmmup, addr, len, flags); - return; - } hat_unload_callback(sfmmup, addr, len, flags, NULL); } /* --- 5744,5753 ----
*** 6329,6339 **** int hmeshift, hashno = 1; struct hme_blk *hmeblkp, *list = NULL; caddr_t endaddr; cpuset_t cpuset; - ASSERT(sfmmup->sfmmu_xhat_provider == NULL); ASSERT((sfmmup == ksfmmup) || AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); ASSERT((len & MMU_PAGEOFFSET) == 0); ASSERT((clearflag == HAT_SYNC_DONTZERO) || (clearflag == HAT_SYNC_ZERORM)); --- 6083,6092 ----
*** 7136,7153 **** #ifdef VAC kmutex_t *pmtx; #endif cpuset_t cpuset, tset; int index, cons; - int xhme_blks; int pa_hments; ASSERT(PAGE_EXCL(pp)); - retry_xhat: tmphme = NULL; - xhme_blks = 0; pa_hments = 0; CPUSET_ZERO(cpuset); pml = sfmmu_mlist_enter(pp); --- 6889,6903 ----
*** 7175,7194 **** pa_hments++; continue; } hmeblkp = sfmmu_hmetohblk(sfhme); - if (hmeblkp->hblk_xhat_bit) { - struct xhat_hme_blk *xblk = - (struct xhat_hme_blk *)hmeblkp; - - (void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat, - pp, forceflag, XBLK2PROVBLK(xblk)); - - xhme_blks = 1; - continue; - } /* * If there are kernel mappings don't unload them, they will * be suspended. */ --- 6925,6934 ----
*** 7221,7231 **** /* * The page should have no mappings at this point, unless * we were called from hat_page_relocate() in which case we * leave the locked mappings which will be suspended later. */ ! ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments || (forceflag == SFMMU_KERNEL_RELOC)); #ifdef VAC if (PP_ISTNC(pp)) { if (cons == TTE8K) { --- 6961,6971 ---- /* * The page should have no mappings at this point, unless * we were called from hat_page_relocate() in which case we * leave the locked mappings which will be suspended later. */ ! ASSERT(!PP_ISMAPPED(origpp) || pa_hments || (forceflag == SFMMU_KERNEL_RELOC)); #ifdef VAC if (PP_ISTNC(pp)) { if (cons == TTE8K) {
*** 7256,7281 **** HME_SUB(sfhme, pp); kmem_cache_free(pa_hment_cache, pahmep); } } ! ASSERT(!PP_ISMAPPED(origpp) || xhme_blks); } sfmmu_mlist_exit(pml); - /* - * XHAT may not have finished unloading pages - * because some other thread was waiting for - * mlist lock and XHAT_PAGEUNLOAD let it do - * the job. - */ - if (xhme_blks) { - pp = origpp; - goto retry_xhat; - } - return (0); } cpuset_t sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons) --- 6996,7010 ---- HME_SUB(sfhme, pp); kmem_cache_free(pa_hment_cache, pahmep); } } ! ASSERT(!PP_ISMAPPED(origpp)); } sfmmu_mlist_exit(pml); return (0); } cpuset_t sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons)
*** 7552,7563 **** /* * If we are looking for large mappings and this hme doesn't * reach the range we are seeking, just ignore it. */ hmeblkp = sfmmu_hmetohblk(sfhme); - if (hmeblkp->hblk_xhat_bit) - continue; if (hme_size(sfhme) < cons) continue; if (stop_on_sh) { --- 7281,7290 ----
*** 7713,7728 **** retry: sfmmu_copytte(&sfhme->hme_tte, &tte); if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) { hmeblkp = sfmmu_hmetohblk(sfhme); - - /* - * xhat mappings should never be to a VMODSORT page. - */ - ASSERT(hmeblkp->hblk_xhat_bit == 0); - sfmmup = hblktosfmmu(hmeblkp); addr = tte_to_vaddr(hmeblkp, tte); ttemod = tte; TTE_CLR_WRT(&ttemod); --- 7440,7449 ----
*** 7983,7994 **** * and be waiting for io. The io can't complete * because the interrupt thread is blocked trying to grab * the as lock. */ - ASSERT(hat->sfmmu_xhat_provider == NULL); - if (hat == ksfmmup) { if (IS_KMEM_VA_LARGEPAGE(addr)) { ASSERT(segkmem_lpszc > 0); pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc); if (pfn != PFN_INVALID) { --- 7704,7713 ----
*** 8168,8178 **** /* ARGSUSED */ void hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags) { ASSERT(hat != NULL); - ASSERT(hat->sfmmu_xhat_provider == NULL); } /* * Return the number of mappings to a particular page. This number is an * approximation of the number of people sharing the page. --- 7887,7896 ----
*** 8261,8278 **** if (IS_PAHME(sfhme)) { continue; } hmeblkp = sfmmu_hmetohblk(sfhme); - if (hmeblkp->hblk_xhat_bit) { - cnt++; - if (cnt > sh_thresh) { - sfmmu_mlist_exit(pml); - return (1); - } - continue; - } if (hme_size(sfhme) != sz) { continue; } if (hmeblkp->hblk_shared) { --- 7979,7988 ----
*** 8383,8396 **** ASSERT(!IS_PAHME(sfhme)); hmeblkp = sfmmu_hmetohblk(sfhme); if (hme_size(sfhme) != sz) { continue; } - if (hmeblkp->hblk_xhat_bit) { - cmn_err(CE_PANIC, - "hat_page_demote: xhat hmeblk"); - } tset = sfmmu_pageunload(rootpp, sfhme, sz); CPUSET_OR(cpuset, tset); } if (index >>= 1) { sz++; --- 8093,8102 ----
*** 8514,8525 **** int i; if (hat == NULL) return (0); - ASSERT(hat->sfmmu_xhat_provider == NULL); - for (i = 0; i < mmu_page_sizes; i++) assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] + (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i); if (hat->sfmmu_iblk == NULL) --- 8220,8229 ----
*** 8535,8546 **** int hat_stats_enable(struct hat *hat) { hatlock_t *hatlockp; - ASSERT(hat->sfmmu_xhat_provider == NULL); - hatlockp = sfmmu_hat_enter(hat); hat->sfmmu_rmstat++; sfmmu_hat_exit(hatlockp); return (1); } --- 8239,8248 ----
*** 8548,8559 **** void hat_stats_disable(struct hat *hat) { hatlock_t *hatlockp; - ASSERT(hat->sfmmu_xhat_provider == NULL); - hatlockp = sfmmu_hat_enter(hat); hat->sfmmu_rmstat--; sfmmu_hat_exit(hatlockp); } --- 8250,8259 ----
*** 8651,8662 **** * Check size alignment. */ if (!ISM_ALIGNED(ismshift, len)) return (EINVAL); - ASSERT(sfmmup->sfmmu_xhat_provider == NULL); - /* * Allocate ism_ment for the ism_hat's mapping list, and an * ism map blk in case we need one. We must do our * allocations before acquiring locks to prevent a deadlock * in the kmem allocator on the mapping list lock. --- 8351,8360 ----
*** 8864,8886 **** ASSERT(ISM_ALIGNED(ismshift, addr)); ASSERT(ISM_ALIGNED(ismshift, len)); ASSERT(sfmmup != NULL); ASSERT(sfmmup != ksfmmup); - if (sfmmup->sfmmu_xhat_provider) { - XHAT_UNSHARE(sfmmup, addr, len); - return; - } else { - /* - * This must be a CPU HAT. If the address space has - * XHATs attached, inform all XHATs that ISM segment - * is going away - */ ASSERT(sfmmup->sfmmu_as != NULL); - if (sfmmup->sfmmu_as->a_xhat != NULL) - xhat_unshare_all(sfmmup->sfmmu_as, addr, len); - } /* * Make sure that during the entire time ISM mappings are removed, * the trap handlers serialize behind us, and that no one else * can be mucking with ISM mappings. This also lets us get away --- 8562,8572 ----
*** 9330,9341 **** for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { tmphme = sfhmep->hme_next; if (IS_PAHME(sfhmep)) continue; hmeblkp = sfmmu_hmetohblk(sfhmep); - if (hmeblkp->hblk_xhat_bit) - continue; tmphat = hblktosfmmu(hmeblkp); sfmmu_copytte(&sfhmep->hme_tte, &tte); ASSERT(TTE_IS_VALID(&tte)); if (hmeblkp->hblk_shared || tmphat == hat || hmeblkp->hblk_lckcnt) { --- 9016,9025 ----
*** 9358,9369 **** for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) { tmphme = sfhmep->hme_next; if (IS_PAHME(sfhmep)) continue; hmeblkp = sfmmu_hmetohblk(sfhmep); - if (hmeblkp->hblk_xhat_bit) - continue; ASSERT(!hmeblkp->hblk_shared); (void) sfmmu_pageunload(pp, sfhmep, TTE8K); } if (PP_ISMAPPED_KPM(pp)) --- 9042,9051 ----
*** 9507,9518 **** for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) { if (IS_PAHME(sfhme)) continue; hmeblkp = sfmmu_hmetohblk(sfhme); - if (hmeblkp->hblk_xhat_bit) - continue; sfmmu_copytte(&sfhme->hme_tte, &tte); ASSERT(TTE_IS_VALID(&tte)); vaddr = tte_to_vaddr(hmeblkp, tte); --- 9189,9198 ----
*** 9656,9668 **** if (IS_PAHME(sfhme)) continue; hmeblkp = sfmmu_hmetohblk(sfhme); - if (hmeblkp->hblk_xhat_bit) - continue; - sfmmu_copytte(&sfhme->hme_tte, &tte); ASSERT(TTE_IS_VALID(&tte)); vaddr = tte_to_vaddr(hmeblkp, tte); color = addr_to_vcolor(vaddr); --- 9336,9345 ----
*** 9994,10004 **** } /* * Replace the specified TSB with a new TSB. This function gets called when ! * we grow, shrink or swapin a TSB. When swapping in a TSB (TSB_SWAPIN), the * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB * (8K). * * Caller must hold the HAT lock, but should assume any tsb_info * pointers it has are no longer valid after calling this function. --- 9671,9681 ---- } /* * Replace the specified TSB with a new TSB. This function gets called when ! * we grow, or shrink a TSB. When swapping in a TSB (TSB_SWAPIN), the * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB * (8K). * * Caller must hold the HAT lock, but should assume any tsb_info * pointers it has are no longer valid after calling this function.
*** 13406,13416 **** /* ARGSUSED */ faultcode_t hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp, uint_t flags) { - ASSERT(hat->sfmmu_xhat_provider == NULL); return (FC_NOSUPPORT); } /* * Searchs the mapping list of the page for a mapping of the same size. If not --- 13083,13092 ----
*** 13441,13452 **** for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) { if (IS_PAHME(sfhmep)) continue; hmeblkp = sfmmu_hmetohblk(sfhmep); - if (hmeblkp->hblk_xhat_bit) - continue; if (hme_size(sfhmep) == ttesz) { /* * another mapping of the same size. don't clear index. */ return; --- 13117,13126 ----
*** 14027,14037 **** if (srdp == NULL || r_size == 0) { return (HAT_INVALID_REGION_COOKIE); } - ASSERT(sfmmup->sfmmu_xhat_provider == NULL); ASSERT(sfmmup != ksfmmup); ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); ASSERT(srdp->srd_refcnt > 0); ASSERT(!(flags & ~HAT_REGION_TYPE_MASK)); ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM); --- 13701,13710 ----
*** 14334,14349 **** ASSERT(rgnp->rgn_id == rid); ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type); ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE)); ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock)); - ASSERT(sfmmup->sfmmu_xhat_provider == NULL); - if (r_type == SFMMU_REGION_HME && sfmmup->sfmmu_as->a_xhat != NULL) { - xhat_unload_callback_all(sfmmup->sfmmu_as, rgnp->rgn_saddr, - rgnp->rgn_size, 0, NULL); - } - if (sfmmup->sfmmu_free) { ulong_t rttecnt; r_pgszc = rgnp->rgn_pgszc; r_size = rgnp->rgn_size; --- 14007,14016 ----