Print this page
remove xhat
The xhat infrastructure was added to support hardware such as the zulu
graphics card - hardware which had on-board MMUs. The VM used the xhat code
to keep the CPU's and Zulu's page tables in-sync. Since the only xhat user
was zulu (which is gone), we can safely remove it simplifying the whole VM
subsystem.
Assorted notes:
- AS_BUSY flag was used solely by xhat
*** 79,89 ****
#include <sys/prom_debug.h>
#include <sys/ksynch.h>
#include <sys/mem_config.h>
#include <sys/mem_cage.h>
#include <vm/vm_dep.h>
- #include <vm/xhat_sfmmu.h>
#include <sys/fpu/fpusystm.h>
#include <vm/mach_kpm.h>
#include <sys/callb.h>
#ifdef DEBUG
--- 79,88 ----
*** 1348,1359 ****
if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) {
cmn_err(CE_PANIC, "VAC too big!");
}
#endif
- (void) xhat_init();
-
uhme_hash_pa = va_to_pa(uhme_hash);
khme_hash_pa = va_to_pa(khme_hash);
/*
* Initialize relocation locks. kpr_suspendlock is held
--- 1347,1356 ----
*** 1537,1547 ****
CPUSET_ZERO(sfmmup->sfmmu_cpusran);
}
sfmmup->sfmmu_free = 0;
sfmmup->sfmmu_rmstat = 0;
sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart;
- sfmmup->sfmmu_xhat_provider = NULL;
cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL);
sfmmup->sfmmu_srdp = NULL;
SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map);
bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
sfmmup->sfmmu_scdp = NULL;
--- 1534,1543 ----
*** 1917,1927 ****
void
hat_free_start(struct hat *sfmmup)
{
ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
ASSERT(sfmmup != ksfmmup);
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
sfmmup->sfmmu_free = 1;
if (sfmmup->sfmmu_scdp != NULL) {
sfmmu_leave_scd(sfmmup, 0);
}
--- 1913,1922 ----
*** 1932,1942 ****
void
hat_free_end(struct hat *sfmmup)
{
int i;
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
ASSERT(sfmmup->sfmmu_free == 1);
ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
--- 1927,1936 ----
*** 1981,1991 ****
*/
/* ARGSUSED */
void
hat_swapin(struct hat *hat)
{
- ASSERT(hat->sfmmu_xhat_provider == NULL);
}
/*
* Free all of the translation resources, for the specified address space,
* that can be freed while the process is swapped out. Called from as_swapout.
--- 1975,1984 ----
*** 2006,2016 ****
struct free_tsb *next;
struct tsb_info *tsbinfop;
}; /* free list of TSBs */
struct free_tsb *freelist, *last, *next;
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
SFMMU_STAT(sf_swapout);
/*
* There is no way to go from an as to all its translations in sfmmu.
* Here is one of the times when we take the big hit and traverse
--- 1999,2008 ----
*** 2030,2041 ****
SFMMU_HASH_LOCK(hmebp);
hmeblkp = hmebp->hmeblkp;
pr_hblk = NULL;
while (hmeblkp) {
- ASSERT(!hmeblkp->hblk_xhat_bit);
-
if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
!hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
ASSERT(!hmeblkp->hblk_shared);
(void) sfmmu_hblk_unload(sfmmup, hmeblkp,
(caddr_t)get_hblk_base(hmeblkp),
--- 2022,2031 ----
*** 2133,2143 ****
sf_srd_t *srdp;
sf_scd_t *scdp;
int i;
extern uint_t get_color_start(struct as *);
- ASSERT(hat->sfmmu_xhat_provider == NULL);
ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) ||
(flag == HAT_DUP_SRD));
ASSERT(hat != ksfmmup);
ASSERT(newhat != ksfmmup);
ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp);
--- 2123,2132 ----
*** 2203,2214 ****
void
hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
{
uint_t rid;
! if (rcookie == HAT_INVALID_REGION_COOKIE ||
! hat->sfmmu_xhat_provider != NULL) {
hat_do_memload(hat, addr, pp, attr, flags,
SFMMU_INVALID_SHMERID);
return;
}
rid = (uint_t)((uint64_t)rcookie);
--- 2192,2202 ----
void
hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
{
uint_t rid;
! if (rcookie == HAT_INVALID_REGION_COOKIE) {
hat_do_memload(hat, addr, pp, attr, flags,
SFMMU_INVALID_SHMERID);
return;
}
rid = (uint_t)((uint64_t)rcookie);
*** 2238,2254 ****
if (PP_ISFREE(pp)) {
panic("hat_memload: loading a mapping to free page %p",
(void *)pp);
}
- if (hat->sfmmu_xhat_provider) {
- /* no regions for xhats */
- ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
- XHAT_MEMLOAD(hat, addr, pp, attr, flags);
- return;
- }
-
ASSERT((hat == ksfmmup) ||
AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
if (flags & ~SFMMU_LOAD_ALLFLAG)
cmn_err(CE_NOTE, "hat_memload: unsupported flags %d",
--- 2226,2235 ----
*** 2294,2308 ****
struct page *pp = NULL;
int use_lgpg = 0;
ASSERT(hat != NULL);
- if (hat->sfmmu_xhat_provider) {
- XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags);
- return;
- }
-
ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
ASSERT((hat == ksfmmup) ||
AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
if (len == 0)
--- 2275,2284 ----
*** 2445,2456 ****
hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
struct page **pps, uint_t attr, uint_t flags,
hat_region_cookie_t rcookie)
{
uint_t rid;
! if (rcookie == HAT_INVALID_REGION_COOKIE ||
! hat->sfmmu_xhat_provider != NULL) {
hat_do_memload_array(hat, addr, len, pps, attr, flags,
SFMMU_INVALID_SHMERID);
return;
}
rid = (uint_t)((uint64_t)rcookie);
--- 2421,2431 ----
hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
struct page **pps, uint_t attr, uint_t flags,
hat_region_cookie_t rcookie)
{
uint_t rid;
! if (rcookie == HAT_INVALID_REGION_COOKIE) {
hat_do_memload_array(hat, addr, len, pps, attr, flags,
SFMMU_INVALID_SHMERID);
return;
}
rid = (uint_t)((uint64_t)rcookie);
*** 2481,2496 ****
uint_t large_pages_disable;
ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
SFMMU_VALIDATE_HMERID(hat, rid, addr, len);
- if (hat->sfmmu_xhat_provider) {
- ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
- XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags);
- return;
- }
-
if (hat->sfmmu_rmstat)
hat_resvstat(len, hat->sfmmu_as, addr);
#if defined(SF_ERRATA_57)
if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
--- 2456,2465 ----
*** 3969,3979 ****
int hmeshift, hashno = 1;
struct hme_blk *hmeblkp, *list = NULL;
caddr_t endaddr;
ASSERT(sfmmup != NULL);
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
ASSERT((sfmmup == ksfmmup) ||
AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
ASSERT((len & MMU_PAGEOFFSET) == 0);
endaddr = addr + len;
--- 3938,3947 ----
*** 4054,4064 ****
hat_unlock(sfmmup, addr, len);
return;
}
ASSERT(sfmmup != NULL);
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
ASSERT(sfmmup != ksfmmup);
srdp = sfmmup->sfmmu_srdp;
rid = (uint_t)((uint64_t)rcookie);
VERIFY3U(rid, <, SFMMU_MAX_HME_REGIONS);
--- 4022,4031 ----
*** 4766,4776 ****
{
pfn_t pfn;
tte_t tte;
ASSERT(sfmmup != NULL);
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
ASSERT((sfmmup == ksfmmup) ||
AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
if (sfmmup == ksfmmup) {
--- 4733,4742 ----
*** 4791,4802 ****
ssize_t
hat_getpagesize(struct hat *sfmmup, caddr_t addr)
{
tte_t tte;
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
-
if (sfmmup == ksfmmup) {
if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
return (-1);
}
} else {
--- 4757,4766 ----
*** 4812,4823 ****
uint_t
hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr)
{
tte_t tte;
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
-
if (sfmmup == ksfmmup) {
if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
tte.ll = 0;
}
} else {
--- 4776,4785 ----
*** 4837,4859 ****
* Enables more attributes on specified address range (ie. logical OR)
*/
void
hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
{
- if (hat->sfmmu_xhat_provider) {
- XHAT_SETATTR(hat, addr, len, attr);
- return;
- } else {
- /*
- * This must be a CPU HAT. If the address space has
- * XHATs attached, change attributes for all of them,
- * just in case
- */
ASSERT(hat->sfmmu_as != NULL);
- if (hat->sfmmu_as->a_xhat != NULL)
- xhat_setattr_all(hat->sfmmu_as, addr, len, attr);
- }
sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR);
}
/*
--- 4799,4809 ----
*** 4861,4906 ****
* are specified.
*/
void
hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
{
- if (hat->sfmmu_xhat_provider) {
- XHAT_CHGATTR(hat, addr, len, attr);
- return;
- } else {
- /*
- * This must be a CPU HAT. If the address space has
- * XHATs attached, change attributes for all of them,
- * just in case
- */
ASSERT(hat->sfmmu_as != NULL);
- if (hat->sfmmu_as->a_xhat != NULL)
- xhat_chgattr_all(hat->sfmmu_as, addr, len, attr);
- }
sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR);
}
/*
* Remove attributes on the specified address range (ie. loginal NAND)
*/
void
hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
{
- if (hat->sfmmu_xhat_provider) {
- XHAT_CLRATTR(hat, addr, len, attr);
- return;
- } else {
- /*
- * This must be a CPU HAT. If the address space has
- * XHATs attached, change attributes for all of them,
- * just in case
- */
ASSERT(hat->sfmmu_as != NULL);
- if (hat->sfmmu_as->a_xhat != NULL)
- xhat_clrattr_all(hat->sfmmu_as, addr, len, attr);
- }
sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR);
}
/*
--- 4811,4832 ----
*** 5249,5271 ****
demap_range_t dmr;
ASSERT((len & MMU_PAGEOFFSET) == 0);
ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
- if (sfmmup->sfmmu_xhat_provider) {
- XHAT_CHGPROT(sfmmup, addr, len, vprot);
- return;
- } else {
- /*
- * This must be a CPU HAT. If the address space has
- * XHATs attached, change attributes for all of them,
- * just in case
- */
ASSERT(sfmmup->sfmmu_as != NULL);
- if (sfmmup->sfmmu_as->a_xhat != NULL)
- xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot);
- }
CPUSET_ZERO(cpuset);
if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) &&
((addr + len) > (caddr_t)USERLIMIT)) {
--- 5175,5185 ----
*** 5696,5719 ****
caddr_t cb_start_addr[MAX_CB_ADDR];
caddr_t cb_end_addr[MAX_CB_ADDR];
int issegkmap = ISSEGKMAP(sfmmup, addr);
demap_range_t dmr, *dmrp;
- if (sfmmup->sfmmu_xhat_provider) {
- XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback);
- return;
- } else {
- /*
- * This must be a CPU HAT. If the address space has
- * XHATs attached, unload the mappings for all of them,
- * just in case
- */
ASSERT(sfmmup->sfmmu_as != NULL);
- if (sfmmup->sfmmu_as->a_xhat != NULL)
- xhat_unload_callback_all(sfmmup->sfmmu_as, addr,
- len, flags, callback);
- }
ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \
AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
ASSERT(sfmmup != NULL);
--- 5610,5620 ----
*** 5986,5999 ****
* be MMU_PAGESIZE aligned.
*/
void
hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags)
{
- if (sfmmup->sfmmu_xhat_provider) {
- XHAT_UNLOAD(sfmmup, addr, len, flags);
- return;
- }
hat_unload_callback(sfmmup, addr, len, flags, NULL);
}
/*
--- 5887,5896 ----
*** 6329,6339 ****
int hmeshift, hashno = 1;
struct hme_blk *hmeblkp, *list = NULL;
caddr_t endaddr;
cpuset_t cpuset;
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
ASSERT((sfmmup == ksfmmup) ||
AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
ASSERT((len & MMU_PAGEOFFSET) == 0);
ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
(clearflag == HAT_SYNC_ZERORM));
--- 6226,6235 ----
*** 7136,7153 ****
#ifdef VAC
kmutex_t *pmtx;
#endif
cpuset_t cpuset, tset;
int index, cons;
- int xhme_blks;
int pa_hments;
ASSERT(PAGE_EXCL(pp));
- retry_xhat:
tmphme = NULL;
- xhme_blks = 0;
pa_hments = 0;
CPUSET_ZERO(cpuset);
pml = sfmmu_mlist_enter(pp);
--- 7032,7046 ----
*** 7175,7194 ****
pa_hments++;
continue;
}
hmeblkp = sfmmu_hmetohblk(sfhme);
- if (hmeblkp->hblk_xhat_bit) {
- struct xhat_hme_blk *xblk =
- (struct xhat_hme_blk *)hmeblkp;
-
- (void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat,
- pp, forceflag, XBLK2PROVBLK(xblk));
-
- xhme_blks = 1;
- continue;
- }
/*
* If there are kernel mappings don't unload them, they will
* be suspended.
*/
--- 7068,7077 ----
*** 7221,7231 ****
/*
* The page should have no mappings at this point, unless
* we were called from hat_page_relocate() in which case we
* leave the locked mappings which will be suspended later.
*/
! ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments ||
(forceflag == SFMMU_KERNEL_RELOC));
#ifdef VAC
if (PP_ISTNC(pp)) {
if (cons == TTE8K) {
--- 7104,7114 ----
/*
* The page should have no mappings at this point, unless
* we were called from hat_page_relocate() in which case we
* leave the locked mappings which will be suspended later.
*/
! ASSERT(!PP_ISMAPPED(origpp) || pa_hments ||
(forceflag == SFMMU_KERNEL_RELOC));
#ifdef VAC
if (PP_ISTNC(pp)) {
if (cons == TTE8K) {
*** 7256,7281 ****
HME_SUB(sfhme, pp);
kmem_cache_free(pa_hment_cache, pahmep);
}
}
! ASSERT(!PP_ISMAPPED(origpp) || xhme_blks);
}
sfmmu_mlist_exit(pml);
- /*
- * XHAT may not have finished unloading pages
- * because some other thread was waiting for
- * mlist lock and XHAT_PAGEUNLOAD let it do
- * the job.
- */
- if (xhme_blks) {
- pp = origpp;
- goto retry_xhat;
- }
-
return (0);
}
cpuset_t
sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons)
--- 7139,7153 ----
HME_SUB(sfhme, pp);
kmem_cache_free(pa_hment_cache, pahmep);
}
}
! ASSERT(!PP_ISMAPPED(origpp));
}
sfmmu_mlist_exit(pml);
return (0);
}
cpuset_t
sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons)
*** 7552,7563 ****
/*
* If we are looking for large mappings and this hme doesn't
* reach the range we are seeking, just ignore it.
*/
hmeblkp = sfmmu_hmetohblk(sfhme);
- if (hmeblkp->hblk_xhat_bit)
- continue;
if (hme_size(sfhme) < cons)
continue;
if (stop_on_sh) {
--- 7424,7433 ----
*** 7713,7728 ****
retry:
sfmmu_copytte(&sfhme->hme_tte, &tte);
if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) {
hmeblkp = sfmmu_hmetohblk(sfhme);
-
- /*
- * xhat mappings should never be to a VMODSORT page.
- */
- ASSERT(hmeblkp->hblk_xhat_bit == 0);
-
sfmmup = hblktosfmmu(hmeblkp);
addr = tte_to_vaddr(hmeblkp, tte);
ttemod = tte;
TTE_CLR_WRT(&ttemod);
--- 7583,7592 ----
*** 7983,7994 ****
* and be waiting for io. The io can't complete
* because the interrupt thread is blocked trying to grab
* the as lock.
*/
- ASSERT(hat->sfmmu_xhat_provider == NULL);
-
if (hat == ksfmmup) {
if (IS_KMEM_VA_LARGEPAGE(addr)) {
ASSERT(segkmem_lpszc > 0);
pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc);
if (pfn != PFN_INVALID) {
--- 7847,7856 ----
*** 8168,8178 ****
/* ARGSUSED */
void
hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags)
{
ASSERT(hat != NULL);
- ASSERT(hat->sfmmu_xhat_provider == NULL);
}
/*
* Return the number of mappings to a particular page. This number is an
* approximation of the number of people sharing the page.
--- 8030,8039 ----
*** 8261,8278 ****
if (IS_PAHME(sfhme)) {
continue;
}
hmeblkp = sfmmu_hmetohblk(sfhme);
- if (hmeblkp->hblk_xhat_bit) {
- cnt++;
- if (cnt > sh_thresh) {
- sfmmu_mlist_exit(pml);
- return (1);
- }
- continue;
- }
if (hme_size(sfhme) != sz) {
continue;
}
if (hmeblkp->hblk_shared) {
--- 8122,8131 ----
*** 8383,8396 ****
ASSERT(!IS_PAHME(sfhme));
hmeblkp = sfmmu_hmetohblk(sfhme);
if (hme_size(sfhme) != sz) {
continue;
}
- if (hmeblkp->hblk_xhat_bit) {
- cmn_err(CE_PANIC,
- "hat_page_demote: xhat hmeblk");
- }
tset = sfmmu_pageunload(rootpp, sfhme, sz);
CPUSET_OR(cpuset, tset);
}
if (index >>= 1) {
sz++;
--- 8236,8245 ----
*** 8514,8525 ****
int i;
if (hat == NULL)
return (0);
- ASSERT(hat->sfmmu_xhat_provider == NULL);
-
for (i = 0; i < mmu_page_sizes; i++)
assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] +
(pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i);
if (hat->sfmmu_iblk == NULL)
--- 8363,8372 ----
*** 8535,8546 ****
int
hat_stats_enable(struct hat *hat)
{
hatlock_t *hatlockp;
- ASSERT(hat->sfmmu_xhat_provider == NULL);
-
hatlockp = sfmmu_hat_enter(hat);
hat->sfmmu_rmstat++;
sfmmu_hat_exit(hatlockp);
return (1);
}
--- 8382,8391 ----
*** 8548,8559 ****
void
hat_stats_disable(struct hat *hat)
{
hatlock_t *hatlockp;
- ASSERT(hat->sfmmu_xhat_provider == NULL);
-
hatlockp = sfmmu_hat_enter(hat);
hat->sfmmu_rmstat--;
sfmmu_hat_exit(hatlockp);
}
--- 8393,8402 ----
*** 8651,8662 ****
* Check size alignment.
*/
if (!ISM_ALIGNED(ismshift, len))
return (EINVAL);
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
-
/*
* Allocate ism_ment for the ism_hat's mapping list, and an
* ism map blk in case we need one. We must do our
* allocations before acquiring locks to prevent a deadlock
* in the kmem allocator on the mapping list lock.
--- 8494,8503 ----
*** 8864,8886 ****
ASSERT(ISM_ALIGNED(ismshift, addr));
ASSERT(ISM_ALIGNED(ismshift, len));
ASSERT(sfmmup != NULL);
ASSERT(sfmmup != ksfmmup);
- if (sfmmup->sfmmu_xhat_provider) {
- XHAT_UNSHARE(sfmmup, addr, len);
- return;
- } else {
- /*
- * This must be a CPU HAT. If the address space has
- * XHATs attached, inform all XHATs that ISM segment
- * is going away
- */
ASSERT(sfmmup->sfmmu_as != NULL);
- if (sfmmup->sfmmu_as->a_xhat != NULL)
- xhat_unshare_all(sfmmup->sfmmu_as, addr, len);
- }
/*
* Make sure that during the entire time ISM mappings are removed,
* the trap handlers serialize behind us, and that no one else
* can be mucking with ISM mappings. This also lets us get away
--- 8705,8715 ----
*** 9330,9341 ****
for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
tmphme = sfhmep->hme_next;
if (IS_PAHME(sfhmep))
continue;
hmeblkp = sfmmu_hmetohblk(sfhmep);
- if (hmeblkp->hblk_xhat_bit)
- continue;
tmphat = hblktosfmmu(hmeblkp);
sfmmu_copytte(&sfhmep->hme_tte, &tte);
ASSERT(TTE_IS_VALID(&tte));
if (hmeblkp->hblk_shared || tmphat == hat ||
hmeblkp->hblk_lckcnt) {
--- 9159,9168 ----
*** 9358,9369 ****
for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
tmphme = sfhmep->hme_next;
if (IS_PAHME(sfhmep))
continue;
hmeblkp = sfmmu_hmetohblk(sfhmep);
- if (hmeblkp->hblk_xhat_bit)
- continue;
ASSERT(!hmeblkp->hblk_shared);
(void) sfmmu_pageunload(pp, sfhmep, TTE8K);
}
if (PP_ISMAPPED_KPM(pp))
--- 9185,9194 ----
*** 9507,9518 ****
for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
if (IS_PAHME(sfhme))
continue;
hmeblkp = sfmmu_hmetohblk(sfhme);
- if (hmeblkp->hblk_xhat_bit)
- continue;
sfmmu_copytte(&sfhme->hme_tte, &tte);
ASSERT(TTE_IS_VALID(&tte));
vaddr = tte_to_vaddr(hmeblkp, tte);
--- 9332,9341 ----
*** 9656,9668 ****
if (IS_PAHME(sfhme))
continue;
hmeblkp = sfmmu_hmetohblk(sfhme);
- if (hmeblkp->hblk_xhat_bit)
- continue;
-
sfmmu_copytte(&sfhme->hme_tte, &tte);
ASSERT(TTE_IS_VALID(&tte));
vaddr = tte_to_vaddr(hmeblkp, tte);
color = addr_to_vcolor(vaddr);
--- 9479,9488 ----
*** 13406,13416 ****
/* ARGSUSED */
faultcode_t
hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp,
uint_t flags)
{
- ASSERT(hat->sfmmu_xhat_provider == NULL);
return (FC_NOSUPPORT);
}
/*
* Searchs the mapping list of the page for a mapping of the same size. If not
--- 13226,13235 ----
*** 13441,13452 ****
for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
if (IS_PAHME(sfhmep))
continue;
hmeblkp = sfmmu_hmetohblk(sfhmep);
- if (hmeblkp->hblk_xhat_bit)
- continue;
if (hme_size(sfhmep) == ttesz) {
/*
* another mapping of the same size. don't clear index.
*/
return;
--- 13260,13269 ----
*** 14027,14037 ****
if (srdp == NULL || r_size == 0) {
return (HAT_INVALID_REGION_COOKIE);
}
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
ASSERT(sfmmup != ksfmmup);
ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
ASSERT(srdp->srd_refcnt > 0);
ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
--- 13844,13853 ----
*** 14334,14349 ****
ASSERT(rgnp->rgn_id == rid);
ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
- ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
- if (r_type == SFMMU_REGION_HME && sfmmup->sfmmu_as->a_xhat != NULL) {
- xhat_unload_callback_all(sfmmup->sfmmu_as, rgnp->rgn_saddr,
- rgnp->rgn_size, 0, NULL);
- }
-
if (sfmmup->sfmmu_free) {
ulong_t rttecnt;
r_pgszc = rgnp->rgn_pgszc;
r_size = rgnp->rgn_size;
--- 14150,14159 ----