Print this page
remove xhat
The xhat infrastructure was added to support hardware such as the zulu
graphics card - hardware which had on-board MMUs. The VM used the xhat code
to keep the CPU's and Zulu's page tables in-sync. Since the only xhat user
was zulu (which is gone), we can safely remove it simplifying the whole VM
subsystem.
Assorted notes:
- AS_BUSY flag was used solely by xhat
@@ -79,11 +79,10 @@
#include <sys/prom_debug.h>
#include <sys/ksynch.h>
#include <sys/mem_config.h>
#include <sys/mem_cage.h>
#include <vm/vm_dep.h>
-#include <vm/xhat_sfmmu.h>
#include <sys/fpu/fpusystm.h>
#include <vm/mach_kpm.h>
#include <sys/callb.h>
#ifdef DEBUG
@@ -1348,12 +1347,10 @@
if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) {
cmn_err(CE_PANIC, "VAC too big!");
}
#endif
- (void) xhat_init();
-
uhme_hash_pa = va_to_pa(uhme_hash);
khme_hash_pa = va_to_pa(khme_hash);
/*
* Initialize relocation locks. kpr_suspendlock is held
@@ -1537,11 +1534,10 @@
CPUSET_ZERO(sfmmup->sfmmu_cpusran);
}
sfmmup->sfmmu_free = 0;
sfmmup->sfmmu_rmstat = 0;
sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart;
- sfmmup->sfmmu_xhat_provider = NULL;
cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL);
sfmmup->sfmmu_srdp = NULL;
SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map);
bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
sfmmup->sfmmu_scdp = NULL;
@@ -1917,11 +1913,10 @@
void
hat_free_start(struct hat *sfmmup)
{
ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
ASSERT(sfmmup != ksfmmup);
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
sfmmup->sfmmu_free = 1;
if (sfmmup->sfmmu_scdp != NULL) {
sfmmu_leave_scd(sfmmup, 0);
}
@@ -1932,11 +1927,10 @@
void
hat_free_end(struct hat *sfmmup)
{
int i;
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
ASSERT(sfmmup->sfmmu_free == 1);
ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
@@ -1981,11 +1975,10 @@
*/
/* ARGSUSED */
void
hat_swapin(struct hat *hat)
{
- ASSERT(hat->sfmmu_xhat_provider == NULL);
}
/*
* Free all of the translation resources, for the specified address space,
* that can be freed while the process is swapped out. Called from as_swapout.
@@ -2006,11 +1999,10 @@
struct free_tsb *next;
struct tsb_info *tsbinfop;
}; /* free list of TSBs */
struct free_tsb *freelist, *last, *next;
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
SFMMU_STAT(sf_swapout);
/*
* There is no way to go from an as to all its translations in sfmmu.
* Here is one of the times when we take the big hit and traverse
@@ -2030,12 +2022,10 @@
SFMMU_HASH_LOCK(hmebp);
hmeblkp = hmebp->hmeblkp;
pr_hblk = NULL;
while (hmeblkp) {
- ASSERT(!hmeblkp->hblk_xhat_bit);
-
if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
!hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
ASSERT(!hmeblkp->hblk_shared);
(void) sfmmu_hblk_unload(sfmmup, hmeblkp,
(caddr_t)get_hblk_base(hmeblkp),
@@ -2133,11 +2123,10 @@
sf_srd_t *srdp;
sf_scd_t *scdp;
int i;
extern uint_t get_color_start(struct as *);
- ASSERT(hat->sfmmu_xhat_provider == NULL);
ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) ||
(flag == HAT_DUP_SRD));
ASSERT(hat != ksfmmup);
ASSERT(newhat != ksfmmup);
ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp);
@@ -2203,12 +2192,11 @@
void
hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
{
uint_t rid;
- if (rcookie == HAT_INVALID_REGION_COOKIE ||
- hat->sfmmu_xhat_provider != NULL) {
+ if (rcookie == HAT_INVALID_REGION_COOKIE) {
hat_do_memload(hat, addr, pp, attr, flags,
SFMMU_INVALID_SHMERID);
return;
}
rid = (uint_t)((uint64_t)rcookie);
@@ -2238,17 +2226,10 @@
if (PP_ISFREE(pp)) {
panic("hat_memload: loading a mapping to free page %p",
(void *)pp);
}
- if (hat->sfmmu_xhat_provider) {
- /* no regions for xhats */
- ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
- XHAT_MEMLOAD(hat, addr, pp, attr, flags);
- return;
- }
-
ASSERT((hat == ksfmmup) ||
AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
if (flags & ~SFMMU_LOAD_ALLFLAG)
cmn_err(CE_NOTE, "hat_memload: unsupported flags %d",
@@ -2294,15 +2275,10 @@
struct page *pp = NULL;
int use_lgpg = 0;
ASSERT(hat != NULL);
- if (hat->sfmmu_xhat_provider) {
- XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags);
- return;
- }
-
ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
ASSERT((hat == ksfmmup) ||
AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
if (len == 0)
@@ -2445,12 +2421,11 @@
hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
struct page **pps, uint_t attr, uint_t flags,
hat_region_cookie_t rcookie)
{
uint_t rid;
- if (rcookie == HAT_INVALID_REGION_COOKIE ||
- hat->sfmmu_xhat_provider != NULL) {
+ if (rcookie == HAT_INVALID_REGION_COOKIE) {
hat_do_memload_array(hat, addr, len, pps, attr, flags,
SFMMU_INVALID_SHMERID);
return;
}
rid = (uint_t)((uint64_t)rcookie);
@@ -2481,16 +2456,10 @@
uint_t large_pages_disable;
ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
SFMMU_VALIDATE_HMERID(hat, rid, addr, len);
- if (hat->sfmmu_xhat_provider) {
- ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
- XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags);
- return;
- }
-
if (hat->sfmmu_rmstat)
hat_resvstat(len, hat->sfmmu_as, addr);
#if defined(SF_ERRATA_57)
if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
@@ -3969,11 +3938,10 @@
int hmeshift, hashno = 1;
struct hme_blk *hmeblkp, *list = NULL;
caddr_t endaddr;
ASSERT(sfmmup != NULL);
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
ASSERT((sfmmup == ksfmmup) ||
AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
ASSERT((len & MMU_PAGEOFFSET) == 0);
endaddr = addr + len;
@@ -4054,11 +4022,10 @@
hat_unlock(sfmmup, addr, len);
return;
}
ASSERT(sfmmup != NULL);
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
ASSERT(sfmmup != ksfmmup);
srdp = sfmmup->sfmmu_srdp;
rid = (uint_t)((uint64_t)rcookie);
VERIFY3U(rid, <, SFMMU_MAX_HME_REGIONS);
@@ -4766,11 +4733,10 @@
{
pfn_t pfn;
tte_t tte;
ASSERT(sfmmup != NULL);
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
ASSERT((sfmmup == ksfmmup) ||
AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
if (sfmmup == ksfmmup) {
@@ -4791,12 +4757,10 @@
ssize_t
hat_getpagesize(struct hat *sfmmup, caddr_t addr)
{
tte_t tte;
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
-
if (sfmmup == ksfmmup) {
if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
return (-1);
}
} else {
@@ -4812,12 +4776,10 @@
uint_t
hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr)
{
tte_t tte;
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
-
if (sfmmup == ksfmmup) {
if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
tte.ll = 0;
}
} else {
@@ -4837,23 +4799,11 @@
* Enables more attributes on specified address range (ie. logical OR)
*/
void
hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
{
- if (hat->sfmmu_xhat_provider) {
- XHAT_SETATTR(hat, addr, len, attr);
- return;
- } else {
- /*
- * This must be a CPU HAT. If the address space has
- * XHATs attached, change attributes for all of them,
- * just in case
- */
ASSERT(hat->sfmmu_as != NULL);
- if (hat->sfmmu_as->a_xhat != NULL)
- xhat_setattr_all(hat->sfmmu_as, addr, len, attr);
- }
sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR);
}
/*
@@ -4861,46 +4811,22 @@
* are specified.
*/
void
hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
{
- if (hat->sfmmu_xhat_provider) {
- XHAT_CHGATTR(hat, addr, len, attr);
- return;
- } else {
- /*
- * This must be a CPU HAT. If the address space has
- * XHATs attached, change attributes for all of them,
- * just in case
- */
ASSERT(hat->sfmmu_as != NULL);
- if (hat->sfmmu_as->a_xhat != NULL)
- xhat_chgattr_all(hat->sfmmu_as, addr, len, attr);
- }
sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR);
}
/*
* Remove attributes on the specified address range (ie. loginal NAND)
*/
void
hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
{
- if (hat->sfmmu_xhat_provider) {
- XHAT_CLRATTR(hat, addr, len, attr);
- return;
- } else {
- /*
- * This must be a CPU HAT. If the address space has
- * XHATs attached, change attributes for all of them,
- * just in case
- */
ASSERT(hat->sfmmu_as != NULL);
- if (hat->sfmmu_as->a_xhat != NULL)
- xhat_clrattr_all(hat->sfmmu_as, addr, len, attr);
- }
sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR);
}
/*
@@ -5249,23 +5175,11 @@
demap_range_t dmr;
ASSERT((len & MMU_PAGEOFFSET) == 0);
ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
- if (sfmmup->sfmmu_xhat_provider) {
- XHAT_CHGPROT(sfmmup, addr, len, vprot);
- return;
- } else {
- /*
- * This must be a CPU HAT. If the address space has
- * XHATs attached, change attributes for all of them,
- * just in case
- */
ASSERT(sfmmup->sfmmu_as != NULL);
- if (sfmmup->sfmmu_as->a_xhat != NULL)
- xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot);
- }
CPUSET_ZERO(cpuset);
if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) &&
((addr + len) > (caddr_t)USERLIMIT)) {
@@ -5696,24 +5610,11 @@
caddr_t cb_start_addr[MAX_CB_ADDR];
caddr_t cb_end_addr[MAX_CB_ADDR];
int issegkmap = ISSEGKMAP(sfmmup, addr);
demap_range_t dmr, *dmrp;
- if (sfmmup->sfmmu_xhat_provider) {
- XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback);
- return;
- } else {
- /*
- * This must be a CPU HAT. If the address space has
- * XHATs attached, unload the mappings for all of them,
- * just in case
- */
ASSERT(sfmmup->sfmmu_as != NULL);
- if (sfmmup->sfmmu_as->a_xhat != NULL)
- xhat_unload_callback_all(sfmmup->sfmmu_as, addr,
- len, flags, callback);
- }
ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \
AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
ASSERT(sfmmup != NULL);
@@ -5986,14 +5887,10 @@
* be MMU_PAGESIZE aligned.
*/
void
hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags)
{
- if (sfmmup->sfmmu_xhat_provider) {
- XHAT_UNLOAD(sfmmup, addr, len, flags);
- return;
- }
hat_unload_callback(sfmmup, addr, len, flags, NULL);
}
/*
@@ -6329,11 +6226,10 @@
int hmeshift, hashno = 1;
struct hme_blk *hmeblkp, *list = NULL;
caddr_t endaddr;
cpuset_t cpuset;
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
ASSERT((sfmmup == ksfmmup) ||
AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
ASSERT((len & MMU_PAGEOFFSET) == 0);
ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
(clearflag == HAT_SYNC_ZERORM));
@@ -7136,18 +7032,15 @@
#ifdef VAC
kmutex_t *pmtx;
#endif
cpuset_t cpuset, tset;
int index, cons;
- int xhme_blks;
int pa_hments;
ASSERT(PAGE_EXCL(pp));
-retry_xhat:
tmphme = NULL;
- xhme_blks = 0;
pa_hments = 0;
CPUSET_ZERO(cpuset);
pml = sfmmu_mlist_enter(pp);
@@ -7175,20 +7068,10 @@
pa_hments++;
continue;
}
hmeblkp = sfmmu_hmetohblk(sfhme);
- if (hmeblkp->hblk_xhat_bit) {
- struct xhat_hme_blk *xblk =
- (struct xhat_hme_blk *)hmeblkp;
-
- (void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat,
- pp, forceflag, XBLK2PROVBLK(xblk));
-
- xhme_blks = 1;
- continue;
- }
/*
* If there are kernel mappings don't unload them, they will
* be suspended.
*/
@@ -7221,11 +7104,11 @@
/*
* The page should have no mappings at this point, unless
* we were called from hat_page_relocate() in which case we
* leave the locked mappings which will be suspended later.
*/
- ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments ||
+ ASSERT(!PP_ISMAPPED(origpp) || pa_hments ||
(forceflag == SFMMU_KERNEL_RELOC));
#ifdef VAC
if (PP_ISTNC(pp)) {
if (cons == TTE8K) {
@@ -7256,26 +7139,15 @@
HME_SUB(sfhme, pp);
kmem_cache_free(pa_hment_cache, pahmep);
}
}
- ASSERT(!PP_ISMAPPED(origpp) || xhme_blks);
+ ASSERT(!PP_ISMAPPED(origpp));
}
sfmmu_mlist_exit(pml);
- /*
- * XHAT may not have finished unloading pages
- * because some other thread was waiting for
- * mlist lock and XHAT_PAGEUNLOAD let it do
- * the job.
- */
- if (xhme_blks) {
- pp = origpp;
- goto retry_xhat;
- }
-
return (0);
}
cpuset_t
sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons)
@@ -7552,12 +7424,10 @@
/*
* If we are looking for large mappings and this hme doesn't
* reach the range we are seeking, just ignore it.
*/
hmeblkp = sfmmu_hmetohblk(sfhme);
- if (hmeblkp->hblk_xhat_bit)
- continue;
if (hme_size(sfhme) < cons)
continue;
if (stop_on_sh) {
@@ -7713,16 +7583,10 @@
retry:
sfmmu_copytte(&sfhme->hme_tte, &tte);
if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) {
hmeblkp = sfmmu_hmetohblk(sfhme);
-
- /*
- * xhat mappings should never be to a VMODSORT page.
- */
- ASSERT(hmeblkp->hblk_xhat_bit == 0);
-
sfmmup = hblktosfmmu(hmeblkp);
addr = tte_to_vaddr(hmeblkp, tte);
ttemod = tte;
TTE_CLR_WRT(&ttemod);
@@ -7983,12 +7847,10 @@
* and be waiting for io. The io can't complete
* because the interrupt thread is blocked trying to grab
* the as lock.
*/
- ASSERT(hat->sfmmu_xhat_provider == NULL);
-
if (hat == ksfmmup) {
if (IS_KMEM_VA_LARGEPAGE(addr)) {
ASSERT(segkmem_lpszc > 0);
pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc);
if (pfn != PFN_INVALID) {
@@ -8168,11 +8030,10 @@
/* ARGSUSED */
void
hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags)
{
ASSERT(hat != NULL);
- ASSERT(hat->sfmmu_xhat_provider == NULL);
}
/*
* Return the number of mappings to a particular page. This number is an
* approximation of the number of people sharing the page.
@@ -8261,18 +8122,10 @@
if (IS_PAHME(sfhme)) {
continue;
}
hmeblkp = sfmmu_hmetohblk(sfhme);
- if (hmeblkp->hblk_xhat_bit) {
- cnt++;
- if (cnt > sh_thresh) {
- sfmmu_mlist_exit(pml);
- return (1);
- }
- continue;
- }
if (hme_size(sfhme) != sz) {
continue;
}
if (hmeblkp->hblk_shared) {
@@ -8383,14 +8236,10 @@
ASSERT(!IS_PAHME(sfhme));
hmeblkp = sfmmu_hmetohblk(sfhme);
if (hme_size(sfhme) != sz) {
continue;
}
- if (hmeblkp->hblk_xhat_bit) {
- cmn_err(CE_PANIC,
- "hat_page_demote: xhat hmeblk");
- }
tset = sfmmu_pageunload(rootpp, sfhme, sz);
CPUSET_OR(cpuset, tset);
}
if (index >>= 1) {
sz++;
@@ -8514,12 +8363,10 @@
int i;
if (hat == NULL)
return (0);
- ASSERT(hat->sfmmu_xhat_provider == NULL);
-
for (i = 0; i < mmu_page_sizes; i++)
assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] +
(pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i);
if (hat->sfmmu_iblk == NULL)
@@ -8535,12 +8382,10 @@
int
hat_stats_enable(struct hat *hat)
{
hatlock_t *hatlockp;
- ASSERT(hat->sfmmu_xhat_provider == NULL);
-
hatlockp = sfmmu_hat_enter(hat);
hat->sfmmu_rmstat++;
sfmmu_hat_exit(hatlockp);
return (1);
}
@@ -8548,12 +8393,10 @@
void
hat_stats_disable(struct hat *hat)
{
hatlock_t *hatlockp;
- ASSERT(hat->sfmmu_xhat_provider == NULL);
-
hatlockp = sfmmu_hat_enter(hat);
hat->sfmmu_rmstat--;
sfmmu_hat_exit(hatlockp);
}
@@ -8651,12 +8494,10 @@
* Check size alignment.
*/
if (!ISM_ALIGNED(ismshift, len))
return (EINVAL);
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
-
/*
* Allocate ism_ment for the ism_hat's mapping list, and an
* ism map blk in case we need one. We must do our
* allocations before acquiring locks to prevent a deadlock
* in the kmem allocator on the mapping list lock.
@@ -8864,23 +8705,11 @@
ASSERT(ISM_ALIGNED(ismshift, addr));
ASSERT(ISM_ALIGNED(ismshift, len));
ASSERT(sfmmup != NULL);
ASSERT(sfmmup != ksfmmup);
- if (sfmmup->sfmmu_xhat_provider) {
- XHAT_UNSHARE(sfmmup, addr, len);
- return;
- } else {
- /*
- * This must be a CPU HAT. If the address space has
- * XHATs attached, inform all XHATs that ISM segment
- * is going away
- */
ASSERT(sfmmup->sfmmu_as != NULL);
- if (sfmmup->sfmmu_as->a_xhat != NULL)
- xhat_unshare_all(sfmmup->sfmmu_as, addr, len);
- }
/*
* Make sure that during the entire time ISM mappings are removed,
* the trap handlers serialize behind us, and that no one else
* can be mucking with ISM mappings. This also lets us get away
@@ -9330,12 +9159,10 @@
for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
tmphme = sfhmep->hme_next;
if (IS_PAHME(sfhmep))
continue;
hmeblkp = sfmmu_hmetohblk(sfhmep);
- if (hmeblkp->hblk_xhat_bit)
- continue;
tmphat = hblktosfmmu(hmeblkp);
sfmmu_copytte(&sfhmep->hme_tte, &tte);
ASSERT(TTE_IS_VALID(&tte));
if (hmeblkp->hblk_shared || tmphat == hat ||
hmeblkp->hblk_lckcnt) {
@@ -9358,12 +9185,10 @@
for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
tmphme = sfhmep->hme_next;
if (IS_PAHME(sfhmep))
continue;
hmeblkp = sfmmu_hmetohblk(sfhmep);
- if (hmeblkp->hblk_xhat_bit)
- continue;
ASSERT(!hmeblkp->hblk_shared);
(void) sfmmu_pageunload(pp, sfhmep, TTE8K);
}
if (PP_ISMAPPED_KPM(pp))
@@ -9507,12 +9332,10 @@
for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
if (IS_PAHME(sfhme))
continue;
hmeblkp = sfmmu_hmetohblk(sfhme);
- if (hmeblkp->hblk_xhat_bit)
- continue;
sfmmu_copytte(&sfhme->hme_tte, &tte);
ASSERT(TTE_IS_VALID(&tte));
vaddr = tte_to_vaddr(hmeblkp, tte);
@@ -9656,13 +9479,10 @@
if (IS_PAHME(sfhme))
continue;
hmeblkp = sfmmu_hmetohblk(sfhme);
- if (hmeblkp->hblk_xhat_bit)
- continue;
-
sfmmu_copytte(&sfhme->hme_tte, &tte);
ASSERT(TTE_IS_VALID(&tte));
vaddr = tte_to_vaddr(hmeblkp, tte);
color = addr_to_vcolor(vaddr);
@@ -13406,11 +13226,10 @@
/* ARGSUSED */
faultcode_t
hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp,
uint_t flags)
{
- ASSERT(hat->sfmmu_xhat_provider == NULL);
return (FC_NOSUPPORT);
}
/*
* Searchs the mapping list of the page for a mapping of the same size. If not
@@ -13441,12 +13260,10 @@
for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
if (IS_PAHME(sfhmep))
continue;
hmeblkp = sfmmu_hmetohblk(sfhmep);
- if (hmeblkp->hblk_xhat_bit)
- continue;
if (hme_size(sfhmep) == ttesz) {
/*
* another mapping of the same size. don't clear index.
*/
return;
@@ -14027,11 +13844,10 @@
if (srdp == NULL || r_size == 0) {
return (HAT_INVALID_REGION_COOKIE);
}
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
ASSERT(sfmmup != ksfmmup);
ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
ASSERT(srdp->srd_refcnt > 0);
ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
@@ -14334,16 +14150,10 @@
ASSERT(rgnp->rgn_id == rid);
ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
- if (r_type == SFMMU_REGION_HME && sfmmup->sfmmu_as->a_xhat != NULL) {
- xhat_unload_callback_all(sfmmup->sfmmu_as, rgnp->rgn_saddr,
- rgnp->rgn_size, 0, NULL);
- }
-
if (sfmmup->sfmmu_free) {
ulong_t rttecnt;
r_pgszc = rgnp->rgn_pgszc;
r_size = rgnp->rgn_size;