Print this page
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory.  The code is there and in theory it runs when we get *extremely* low
on memory.  In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout
remove xhat
The xhat infrastructure was added to support hardware such as the zulu
graphics card - hardware which had on-board MMUs.  The VM used the xhat code
to keep the CPU's and Zulu's page tables in-sync.  Since the only xhat user
was zulu (which is gone), we can safely remove it simplifying the whole VM
subsystem.
Assorted notes:
- AS_BUSY flag was used solely by xhat

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/sfmmu/vm/hat_sfmmu.c
          +++ new/usr/src/uts/sfmmu/vm/hat_sfmmu.c
↓ open down ↓ 73 lines elided ↑ open up ↑
  74   74  #include <sys/bitmap.h>
  75   75  #include <sys/machlock.h>
  76   76  #include <sys/membar.h>
  77   77  #include <sys/atomic.h>
  78   78  #include <sys/cpu_module.h>
  79   79  #include <sys/prom_debug.h>
  80   80  #include <sys/ksynch.h>
  81   81  #include <sys/mem_config.h>
  82   82  #include <sys/mem_cage.h>
  83   83  #include <vm/vm_dep.h>
  84      -#include <vm/xhat_sfmmu.h>
  85   84  #include <sys/fpu/fpusystm.h>
  86   85  #include <vm/mach_kpm.h>
  87   86  #include <sys/callb.h>
  88   87  
  89   88  #ifdef  DEBUG
  90   89  #define SFMMU_VALIDATE_HMERID(hat, rid, saddr, len)                     \
  91   90          if (SFMMU_IS_SHMERID_VALID(rid)) {                              \
  92   91                  caddr_t _eaddr = (saddr) + (len);                       \
  93   92                  sf_srd_t *_srdp;                                        \
  94   93                  sf_region_t *_rgnp;                                     \
↓ open down ↓ 1248 lines elided ↑ open up ↑
1343 1342          /*
1344 1343           * The big page VAC handling code assumes VAC
1345 1344           * will not be bigger than the smallest big
1346 1345           * page- which is 64K.
1347 1346           */
1348 1347          if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) {
1349 1348                  cmn_err(CE_PANIC, "VAC too big!");
1350 1349          }
1351 1350  #endif
1352 1351  
1353      -        (void) xhat_init();
1354      -
1355 1352          uhme_hash_pa = va_to_pa(uhme_hash);
1356 1353          khme_hash_pa = va_to_pa(khme_hash);
1357 1354  
1358 1355          /*
1359 1356           * Initialize relocation locks. kpr_suspendlock is held
1360 1357           * at PIL_MAX to prevent interrupts from pinning the holder
1361 1358           * of a suspended TTE which may access it leading to a
1362 1359           * deadlock condition.
1363 1360           */
1364 1361          mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL);
↓ open down ↓ 167 lines elided ↑ open up ↑
1532 1529          sfmmup->sfmmu_scdhat = 0;
1533 1530          sfmmup->sfmmu_ismblkpa = (uint64_t)-1;
1534 1531          if (sfmmup == ksfmmup) {
1535 1532                  CPUSET_ALL(sfmmup->sfmmu_cpusran);
1536 1533          } else {
1537 1534                  CPUSET_ZERO(sfmmup->sfmmu_cpusran);
1538 1535          }
1539 1536          sfmmup->sfmmu_free = 0;
1540 1537          sfmmup->sfmmu_rmstat = 0;
1541 1538          sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart;
1542      -        sfmmup->sfmmu_xhat_provider = NULL;
1543 1539          cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL);
1544 1540          sfmmup->sfmmu_srdp = NULL;
1545 1541          SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map);
1546 1542          bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
1547 1543          sfmmup->sfmmu_scdp = NULL;
1548 1544          sfmmup->sfmmu_scd_link.next = NULL;
1549 1545          sfmmup->sfmmu_scd_link.prev = NULL;
1550 1546          return (sfmmup);
1551 1547  }
1552 1548  
↓ open down ↓ 359 lines elided ↑ open up ↑
1912 1908  
1913 1909  /*
1914 1910   * Free all the translation resources for the specified address space.
1915 1911   * Called from as_free when an address space is being destroyed.
1916 1912   */
1917 1913  void
1918 1914  hat_free_start(struct hat *sfmmup)
1919 1915  {
1920 1916          ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
1921 1917          ASSERT(sfmmup != ksfmmup);
1922      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
1923 1918  
1924 1919          sfmmup->sfmmu_free = 1;
1925 1920          if (sfmmup->sfmmu_scdp != NULL) {
1926 1921                  sfmmu_leave_scd(sfmmup, 0);
1927 1922          }
1928 1923  
1929 1924          ASSERT(sfmmup->sfmmu_scdp == NULL);
1930 1925  }
1931 1926  
1932 1927  void
1933 1928  hat_free_end(struct hat *sfmmup)
1934 1929  {
1935 1930          int i;
1936 1931  
1937      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
1938 1932          ASSERT(sfmmup->sfmmu_free == 1);
1939 1933          ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
1940 1934          ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
1941 1935          ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
1942 1936          ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
1943 1937          ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
1944 1938          ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
1945 1939  
1946 1940          if (sfmmup->sfmmu_rmstat) {
1947 1941                  hat_freestat(sfmmup->sfmmu_as, NULL);
↓ open down ↓ 21 lines elided ↑ open up ↑
1969 1963  #ifdef DEBUG
1970 1964          for (i = 0; i < SFMMU_L1_HMERLINKS; i++) {
1971 1965                  ASSERT(sfmmup->sfmmu_hmeregion_links[i] == NULL);
1972 1966          }
1973 1967  #endif
1974 1968  
1975 1969          kmem_cache_free(sfmmuid_cache, sfmmup);
1976 1970  }
1977 1971  
1978 1972  /*
1979      - * Set up any translation structures, for the specified address space,
1980      - * that are needed or preferred when the process is being swapped in.
1981      - */
1982      -/* ARGSUSED */
1983      -void
1984      -hat_swapin(struct hat *hat)
1985      -{
1986      -        ASSERT(hat->sfmmu_xhat_provider == NULL);
1987      -}
1988      -
1989      -/*
1990      - * Free all of the translation resources, for the specified address space,
1991      - * that can be freed while the process is swapped out. Called from as_swapout.
1992      - * Also, free up the ctx that this process was using.
1993      - */
1994      -void
1995      -hat_swapout(struct hat *sfmmup)
1996      -{
1997      -        struct hmehash_bucket *hmebp;
1998      -        struct hme_blk *hmeblkp;
1999      -        struct hme_blk *pr_hblk = NULL;
2000      -        struct hme_blk *nx_hblk;
2001      -        int i;
2002      -        struct hme_blk *list = NULL;
2003      -        hatlock_t *hatlockp;
2004      -        struct tsb_info *tsbinfop;
2005      -        struct free_tsb {
2006      -                struct free_tsb *next;
2007      -                struct tsb_info *tsbinfop;
2008      -        };                      /* free list of TSBs */
2009      -        struct free_tsb *freelist, *last, *next;
2010      -
2011      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
2012      -        SFMMU_STAT(sf_swapout);
2013      -
2014      -        /*
2015      -         * There is no way to go from an as to all its translations in sfmmu.
2016      -         * Here is one of the times when we take the big hit and traverse
2017      -         * the hash looking for hme_blks to free up.  Not only do we free up
2018      -         * this as hme_blks but all those that are free.  We are obviously
2019      -         * swapping because we need memory so let's free up as much
2020      -         * as we can.
2021      -         *
2022      -         * Note that we don't flush TLB/TSB here -- it's not necessary
2023      -         * because:
2024      -         *  1) we free the ctx we're using and throw away the TSB(s);
2025      -         *  2) processes aren't runnable while being swapped out.
2026      -         */
2027      -        ASSERT(sfmmup != KHATID);
2028      -        for (i = 0; i <= UHMEHASH_SZ; i++) {
2029      -                hmebp = &uhme_hash[i];
2030      -                SFMMU_HASH_LOCK(hmebp);
2031      -                hmeblkp = hmebp->hmeblkp;
2032      -                pr_hblk = NULL;
2033      -                while (hmeblkp) {
2034      -
2035      -                        ASSERT(!hmeblkp->hblk_xhat_bit);
2036      -
2037      -                        if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
2038      -                            !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
2039      -                                ASSERT(!hmeblkp->hblk_shared);
2040      -                                (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
2041      -                                    (caddr_t)get_hblk_base(hmeblkp),
2042      -                                    get_hblk_endaddr(hmeblkp),
2043      -                                    NULL, HAT_UNLOAD);
2044      -                        }
2045      -                        nx_hblk = hmeblkp->hblk_next;
2046      -                        if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
2047      -                                ASSERT(!hmeblkp->hblk_lckcnt);
2048      -                                sfmmu_hblk_hash_rm(hmebp, hmeblkp, pr_hblk,
2049      -                                    &list, 0);
2050      -                        } else {
2051      -                                pr_hblk = hmeblkp;
2052      -                        }
2053      -                        hmeblkp = nx_hblk;
2054      -                }
2055      -                SFMMU_HASH_UNLOCK(hmebp);
2056      -        }
2057      -
2058      -        sfmmu_hblks_list_purge(&list, 0);
2059      -
2060      -        /*
2061      -         * Now free up the ctx so that others can reuse it.
2062      -         */
2063      -        hatlockp = sfmmu_hat_enter(sfmmup);
2064      -
2065      -        sfmmu_invalidate_ctx(sfmmup);
2066      -
2067      -        /*
2068      -         * Free TSBs, but not tsbinfos, and set SWAPPED flag.
2069      -         * If TSBs were never swapped in, just return.
2070      -         * This implies that we don't support partial swapping
2071      -         * of TSBs -- either all are swapped out, or none are.
2072      -         *
2073      -         * We must hold the HAT lock here to prevent racing with another
2074      -         * thread trying to unmap TTEs from the TSB or running the post-
2075      -         * relocator after relocating the TSB's memory.  Unfortunately, we
2076      -         * can't free memory while holding the HAT lock or we could
2077      -         * deadlock, so we build a list of TSBs to be freed after marking
2078      -         * the tsbinfos as swapped out and free them after dropping the
2079      -         * lock.
2080      -         */
2081      -        if (SFMMU_FLAGS_ISSET(sfmmup, HAT_SWAPPED)) {
2082      -                sfmmu_hat_exit(hatlockp);
2083      -                return;
2084      -        }
2085      -
2086      -        SFMMU_FLAGS_SET(sfmmup, HAT_SWAPPED);
2087      -        last = freelist = NULL;
2088      -        for (tsbinfop = sfmmup->sfmmu_tsb; tsbinfop != NULL;
2089      -            tsbinfop = tsbinfop->tsb_next) {
2090      -                ASSERT((tsbinfop->tsb_flags & TSB_SWAPPED) == 0);
2091      -
2092      -                /*
2093      -                 * Cast the TSB into a struct free_tsb and put it on the free
2094      -                 * list.
2095      -                 */
2096      -                if (freelist == NULL) {
2097      -                        last = freelist = (struct free_tsb *)tsbinfop->tsb_va;
2098      -                } else {
2099      -                        last->next = (struct free_tsb *)tsbinfop->tsb_va;
2100      -                        last = last->next;
2101      -                }
2102      -                last->next = NULL;
2103      -                last->tsbinfop = tsbinfop;
2104      -                tsbinfop->tsb_flags |= TSB_SWAPPED;
2105      -                /*
2106      -                 * Zero out the TTE to clear the valid bit.
2107      -                 * Note we can't use a value like 0xbad because we want to
2108      -                 * ensure diagnostic bits are NEVER set on TTEs that might
2109      -                 * be loaded.  The intent is to catch any invalid access
2110      -                 * to the swapped TSB, such as a thread running with a valid
2111      -                 * context without first calling sfmmu_tsb_swapin() to
2112      -                 * allocate TSB memory.
2113      -                 */
2114      -                tsbinfop->tsb_tte.ll = 0;
2115      -        }
2116      -
2117      -        /* Now we can drop the lock and free the TSB memory. */
2118      -        sfmmu_hat_exit(hatlockp);
2119      -        for (; freelist != NULL; freelist = next) {
2120      -                next = freelist->next;
2121      -                sfmmu_tsb_free(freelist->tsbinfop);
2122      -        }
2123      -}
2124      -
2125      -/*
2126 1973   * Duplicate the translations of an as into another newas
2127 1974   */
2128 1975  /* ARGSUSED */
2129 1976  int
2130 1977  hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
2131 1978          uint_t flag)
2132 1979  {
2133 1980          sf_srd_t *srdp;
2134 1981          sf_scd_t *scdp;
2135 1982          int i;
2136 1983          extern uint_t get_color_start(struct as *);
2137 1984  
2138      -        ASSERT(hat->sfmmu_xhat_provider == NULL);
2139 1985          ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) ||
2140 1986              (flag == HAT_DUP_SRD));
2141 1987          ASSERT(hat != ksfmmup);
2142 1988          ASSERT(newhat != ksfmmup);
2143 1989          ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp);
2144 1990  
2145 1991          if (flag == HAT_DUP_COW) {
2146 1992                  panic("hat_dup: HAT_DUP_COW not supported");
2147 1993          }
2148 1994  
↓ open down ↓ 49 lines elided ↑ open up ↑
2198 2044  {
2199 2045          hat_do_memload(hat, addr, pp, attr, flags,
2200 2046              SFMMU_INVALID_SHMERID);
2201 2047  }
2202 2048  
2203 2049  void
2204 2050  hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
2205 2051          uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
2206 2052  {
2207 2053          uint_t rid;
2208      -        if (rcookie == HAT_INVALID_REGION_COOKIE ||
2209      -            hat->sfmmu_xhat_provider != NULL) {
     2054 +        if (rcookie == HAT_INVALID_REGION_COOKIE) {
2210 2055                  hat_do_memload(hat, addr, pp, attr, flags,
2211 2056                      SFMMU_INVALID_SHMERID);
2212 2057                  return;
2213 2058          }
2214 2059          rid = (uint_t)((uint64_t)rcookie);
2215 2060          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2216 2061          hat_do_memload(hat, addr, pp, attr, flags, rid);
2217 2062  }
2218 2063  
2219 2064  /*
↓ open down ↓ 13 lines elided ↑ open up ↑
2233 2078          ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2234 2079          ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2235 2080          ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2236 2081          SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE);
2237 2082  
2238 2083          if (PP_ISFREE(pp)) {
2239 2084                  panic("hat_memload: loading a mapping to free page %p",
2240 2085                      (void *)pp);
2241 2086          }
2242 2087  
2243      -        if (hat->sfmmu_xhat_provider) {
2244      -                /* no regions for xhats */
2245      -                ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
2246      -                XHAT_MEMLOAD(hat, addr, pp, attr, flags);
2247      -                return;
2248      -        }
2249      -
2250 2088          ASSERT((hat == ksfmmup) ||
2251 2089              AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
2252 2090  
2253 2091          if (flags & ~SFMMU_LOAD_ALLFLAG)
2254 2092                  cmn_err(CE_NOTE, "hat_memload: unsupported flags %d",
2255 2093                      flags & ~SFMMU_LOAD_ALLFLAG);
2256 2094  
2257 2095          if (hat->sfmmu_rmstat)
2258 2096                  hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr);
2259 2097  
↓ open down ↓ 29 lines elided ↑ open up ↑
2289 2127  void
2290 2128  hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn,
2291 2129          uint_t attr, int flags)
2292 2130  {
2293 2131          tte_t tte;
2294 2132          struct page *pp = NULL;
2295 2133          int use_lgpg = 0;
2296 2134  
2297 2135          ASSERT(hat != NULL);
2298 2136  
2299      -        if (hat->sfmmu_xhat_provider) {
2300      -                XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags);
2301      -                return;
2302      -        }
2303      -
2304 2137          ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2305 2138          ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2306 2139          ASSERT((hat == ksfmmup) ||
2307 2140              AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
2308 2141          if (len == 0)
2309 2142                  panic("hat_devload: zero len");
2310 2143          if (flags & ~SFMMU_LOAD_ALLFLAG)
2311 2144                  cmn_err(CE_NOTE, "hat_devload: unsupported flags %d",
2312 2145                      flags & ~SFMMU_LOAD_ALLFLAG);
2313 2146  
↓ open down ↓ 126 lines elided ↑ open up ↑
2440 2273          hat_do_memload_array(hat, addr, len, pps, attr, flags,
2441 2274              SFMMU_INVALID_SHMERID);
2442 2275  }
2443 2276  
2444 2277  void
2445 2278  hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
2446 2279          struct page **pps, uint_t attr, uint_t flags,
2447 2280          hat_region_cookie_t rcookie)
2448 2281  {
2449 2282          uint_t rid;
2450      -        if (rcookie == HAT_INVALID_REGION_COOKIE ||
2451      -            hat->sfmmu_xhat_provider != NULL) {
     2283 +        if (rcookie == HAT_INVALID_REGION_COOKIE) {
2452 2284                  hat_do_memload_array(hat, addr, len, pps, attr, flags,
2453 2285                      SFMMU_INVALID_SHMERID);
2454 2286                  return;
2455 2287          }
2456 2288          rid = (uint_t)((uint64_t)rcookie);
2457 2289          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2458 2290          hat_do_memload_array(hat, addr, len, pps, attr, flags, rid);
2459 2291  }
2460 2292  
2461 2293  /*
↓ open down ↓ 14 lines elided ↑ open up ↑
2476 2308          int  ttesz;
2477 2309          size_t mapsz;
2478 2310          pgcnt_t numpg, npgs;
2479 2311          tte_t tte;
2480 2312          page_t *pp;
2481 2313          uint_t large_pages_disable;
2482 2314  
2483 2315          ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2484 2316          SFMMU_VALIDATE_HMERID(hat, rid, addr, len);
2485 2317  
2486      -        if (hat->sfmmu_xhat_provider) {
2487      -                ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
2488      -                XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags);
2489      -                return;
2490      -        }
2491      -
2492 2318          if (hat->sfmmu_rmstat)
2493 2319                  hat_resvstat(len, hat->sfmmu_as, addr);
2494 2320  
2495 2321  #if defined(SF_ERRATA_57)
2496 2322          if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2497 2323              (addr < errata57_limit) && (attr & PROT_EXEC) &&
2498 2324              !(flags & HAT_LOAD_SHARE)) {
2499 2325                  cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make "
2500 2326                      "user page executable");
2501 2327                  attr &= ~PROT_EXEC;
↓ open down ↓ 1462 lines elided ↑ open up ↑
3964 3790  void
3965 3791  hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len)
3966 3792  {
3967 3793          struct hmehash_bucket *hmebp;
3968 3794          hmeblk_tag hblktag;
3969 3795          int hmeshift, hashno = 1;
3970 3796          struct hme_blk *hmeblkp, *list = NULL;
3971 3797          caddr_t endaddr;
3972 3798  
3973 3799          ASSERT(sfmmup != NULL);
3974      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
3975 3800  
3976 3801          ASSERT((sfmmup == ksfmmup) ||
3977 3802              AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
3978 3803          ASSERT((len & MMU_PAGEOFFSET) == 0);
3979 3804          endaddr = addr + len;
3980 3805          hblktag.htag_id = sfmmup;
3981 3806          hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3982 3807  
3983 3808          /*
3984 3809           * Spitfire supports 4 page sizes.
↓ open down ↓ 64 lines elided ↑ open up ↑
4049 3874          struct hme_blk *hmeblkp;
4050 3875          struct hme_blk *pr_hblk;
4051 3876          struct hme_blk *list;
4052 3877  
4053 3878          if (rcookie == HAT_INVALID_REGION_COOKIE) {
4054 3879                  hat_unlock(sfmmup, addr, len);
4055 3880                  return;
4056 3881          }
4057 3882  
4058 3883          ASSERT(sfmmup != NULL);
4059      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4060 3884          ASSERT(sfmmup != ksfmmup);
4061 3885  
4062 3886          srdp = sfmmup->sfmmu_srdp;
4063 3887          rid = (uint_t)((uint64_t)rcookie);
4064 3888          VERIFY3U(rid, <, SFMMU_MAX_HME_REGIONS);
4065 3889          eaddr = addr + len;
4066 3890          va = addr;
4067 3891          list = NULL;
4068 3892          rgnp = srdp->srd_hmergnp[rid];
4069 3893          SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len);
↓ open down ↓ 691 lines elided ↑ open up ↑
4761 4585   * to implement optimizations in the segment drivers.
4762 4586   *
4763 4587   */
4764 4588  int
4765 4589  hat_probe(struct hat *sfmmup, caddr_t addr)
4766 4590  {
4767 4591          pfn_t pfn;
4768 4592          tte_t tte;
4769 4593  
4770 4594          ASSERT(sfmmup != NULL);
4771      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4772 4595  
4773 4596          ASSERT((sfmmup == ksfmmup) ||
4774 4597              AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
4775 4598  
4776 4599          if (sfmmup == ksfmmup) {
4777 4600                  while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte))
4778 4601                      == PFN_SUSPENDED) {
4779 4602                          sfmmu_vatopfn_suspended(addr, sfmmup, &tte);
4780 4603                  }
4781 4604          } else {
↓ open down ↓ 4 lines elided ↑ open up ↑
4786 4609                  return (1);
4787 4610          else
4788 4611                  return (0);
4789 4612  }
4790 4613  
4791 4614  ssize_t
4792 4615  hat_getpagesize(struct hat *sfmmup, caddr_t addr)
4793 4616  {
4794 4617          tte_t tte;
4795 4618  
4796      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4797      -
4798 4619          if (sfmmup == ksfmmup) {
4799 4620                  if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4800 4621                          return (-1);
4801 4622                  }
4802 4623          } else {
4803 4624                  if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4804 4625                          return (-1);
4805 4626                  }
4806 4627          }
4807 4628  
4808 4629          ASSERT(TTE_IS_VALID(&tte));
4809 4630          return (TTEBYTES(TTE_CSZ(&tte)));
4810 4631  }
4811 4632  
4812 4633  uint_t
4813 4634  hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr)
4814 4635  {
4815 4636          tte_t tte;
4816 4637  
4817      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4818      -
4819 4638          if (sfmmup == ksfmmup) {
4820 4639                  if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4821 4640                          tte.ll = 0;
4822 4641                  }
4823 4642          } else {
4824 4643                  if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4825 4644                          tte.ll = 0;
4826 4645                  }
4827 4646          }
4828 4647          if (TTE_IS_VALID(&tte)) {
↓ open down ↓ 3 lines elided ↑ open up ↑
4832 4651          *attr = 0;
4833 4652          return ((uint_t)0xffffffff);
4834 4653  }
4835 4654  
4836 4655  /*
4837 4656   * Enables more attributes on specified address range (ie. logical OR)
4838 4657   */
4839 4658  void
4840 4659  hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4841 4660  {
4842      -        if (hat->sfmmu_xhat_provider) {
4843      -                XHAT_SETATTR(hat, addr, len, attr);
4844      -                return;
4845      -        } else {
4846      -                /*
4847      -                 * This must be a CPU HAT. If the address space has
4848      -                 * XHATs attached, change attributes for all of them,
4849      -                 * just in case
4850      -                 */
4851      -                ASSERT(hat->sfmmu_as != NULL);
4852      -                if (hat->sfmmu_as->a_xhat != NULL)
4853      -                        xhat_setattr_all(hat->sfmmu_as, addr, len, attr);
4854      -        }
     4661 +        ASSERT(hat->sfmmu_as != NULL);
4855 4662  
4856 4663          sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR);
4857 4664  }
4858 4665  
4859 4666  /*
4860 4667   * Assigns attributes to the specified address range.  All the attributes
4861 4668   * are specified.
4862 4669   */
4863 4670  void
4864 4671  hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4865 4672  {
4866      -        if (hat->sfmmu_xhat_provider) {
4867      -                XHAT_CHGATTR(hat, addr, len, attr);
4868      -                return;
4869      -        } else {
4870      -                /*
4871      -                 * This must be a CPU HAT. If the address space has
4872      -                 * XHATs attached, change attributes for all of them,
4873      -                 * just in case
4874      -                 */
4875      -                ASSERT(hat->sfmmu_as != NULL);
4876      -                if (hat->sfmmu_as->a_xhat != NULL)
4877      -                        xhat_chgattr_all(hat->sfmmu_as, addr, len, attr);
4878      -        }
     4673 +        ASSERT(hat->sfmmu_as != NULL);
4879 4674  
4880 4675          sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR);
4881 4676  }
4882 4677  
4883 4678  /*
4884 4679   * Remove attributes on the specified address range (ie. loginal NAND)
4885 4680   */
4886 4681  void
4887 4682  hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4888 4683  {
4889      -        if (hat->sfmmu_xhat_provider) {
4890      -                XHAT_CLRATTR(hat, addr, len, attr);
4891      -                return;
4892      -        } else {
4893      -                /*
4894      -                 * This must be a CPU HAT. If the address space has
4895      -                 * XHATs attached, change attributes for all of them,
4896      -                 * just in case
4897      -                 */
4898      -                ASSERT(hat->sfmmu_as != NULL);
4899      -                if (hat->sfmmu_as->a_xhat != NULL)
4900      -                        xhat_clrattr_all(hat->sfmmu_as, addr, len, attr);
4901      -        }
     4684 +        ASSERT(hat->sfmmu_as != NULL);
4902 4685  
4903 4686          sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR);
4904 4687  }
4905 4688  
4906 4689  /*
4907 4690   * Change attributes on an address range to that specified by attr and mode.
4908 4691   */
4909 4692  static void
4910 4693  sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr,
4911 4694          int mode)
↓ open down ↓ 332 lines elided ↑ open up ↑
5244 5027          hmeblk_tag hblktag;
5245 5028          int hmeshift, hashno = 1;
5246 5029          struct hme_blk *hmeblkp, *list = NULL;
5247 5030          caddr_t endaddr;
5248 5031          cpuset_t cpuset;
5249 5032          demap_range_t dmr;
5250 5033  
5251 5034          ASSERT((len & MMU_PAGEOFFSET) == 0);
5252 5035          ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
5253 5036  
5254      -        if (sfmmup->sfmmu_xhat_provider) {
5255      -                XHAT_CHGPROT(sfmmup, addr, len, vprot);
5256      -                return;
5257      -        } else {
5258      -                /*
5259      -                 * This must be a CPU HAT. If the address space has
5260      -                 * XHATs attached, change attributes for all of them,
5261      -                 * just in case
5262      -                 */
5263      -                ASSERT(sfmmup->sfmmu_as != NULL);
5264      -                if (sfmmup->sfmmu_as->a_xhat != NULL)
5265      -                        xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot);
5266      -        }
     5037 +        ASSERT(sfmmup->sfmmu_as != NULL);
5267 5038  
5268 5039          CPUSET_ZERO(cpuset);
5269 5040  
5270 5041          if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) &&
5271 5042              ((addr + len) > (caddr_t)USERLIMIT)) {
5272 5043                  panic("user addr %p vprot %x in kernel space",
5273 5044                      (void *)addr, vprot);
5274 5045          }
5275 5046          endaddr = addr + len;
5276 5047          hblktag.htag_id = sfmmup;
↓ open down ↓ 414 lines elided ↑ open up ↑
5691 5462          struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
5692 5463          caddr_t endaddr;
5693 5464          cpuset_t cpuset;
5694 5465          int addr_count = 0;
5695 5466          int a;
5696 5467          caddr_t cb_start_addr[MAX_CB_ADDR];
5697 5468          caddr_t cb_end_addr[MAX_CB_ADDR];
5698 5469          int issegkmap = ISSEGKMAP(sfmmup, addr);
5699 5470          demap_range_t dmr, *dmrp;
5700 5471  
5701      -        if (sfmmup->sfmmu_xhat_provider) {
5702      -                XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback);
5703      -                return;
5704      -        } else {
5705      -                /*
5706      -                 * This must be a CPU HAT. If the address space has
5707      -                 * XHATs attached, unload the mappings for all of them,
5708      -                 * just in case
5709      -                 */
5710      -                ASSERT(sfmmup->sfmmu_as != NULL);
5711      -                if (sfmmup->sfmmu_as->a_xhat != NULL)
5712      -                        xhat_unload_callback_all(sfmmup->sfmmu_as, addr,
5713      -                            len, flags, callback);
5714      -        }
     5472 +        ASSERT(sfmmup->sfmmu_as != NULL);
5715 5473  
5716 5474          ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \
5717 5475              AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
5718 5476  
5719 5477          ASSERT(sfmmup != NULL);
5720 5478          ASSERT((len & MMU_PAGEOFFSET) == 0);
5721 5479          ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
5722 5480  
5723 5481          /*
5724 5482           * Probing through a large VA range (say 63 bits) will be slow, even
↓ open down ↓ 256 lines elided ↑ open up ↑
5981 5739                  sfmmu_check_page_sizes(sfmmup, 0);
5982 5740  }
5983 5741  
5984 5742  /*
5985 5743   * Unload all the mappings in the range [addr..addr+len). addr and len must
5986 5744   * be MMU_PAGESIZE aligned.
5987 5745   */
5988 5746  void
5989 5747  hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags)
5990 5748  {
5991      -        if (sfmmup->sfmmu_xhat_provider) {
5992      -                XHAT_UNLOAD(sfmmup, addr, len, flags);
5993      -                return;
5994      -        }
5995 5749          hat_unload_callback(sfmmup, addr, len, flags, NULL);
5996 5750  }
5997 5751  
5998 5752  
5999 5753  /*
6000 5754   * Find the largest mapping size for this page.
6001 5755   */
6002 5756  int
6003 5757  fnd_mapping_sz(page_t *pp)
6004 5758  {
↓ open down ↓ 319 lines elided ↑ open up ↑
6324 6078  void
6325 6079  hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag)
6326 6080  {
6327 6081          struct hmehash_bucket *hmebp;
6328 6082          hmeblk_tag hblktag;
6329 6083          int hmeshift, hashno = 1;
6330 6084          struct hme_blk *hmeblkp, *list = NULL;
6331 6085          caddr_t endaddr;
6332 6086          cpuset_t cpuset;
6333 6087  
6334      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
6335 6088          ASSERT((sfmmup == ksfmmup) ||
6336 6089              AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
6337 6090          ASSERT((len & MMU_PAGEOFFSET) == 0);
6338 6091          ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
6339 6092              (clearflag == HAT_SYNC_ZERORM));
6340 6093  
6341 6094          CPUSET_ZERO(cpuset);
6342 6095  
6343 6096          endaddr = addr + len;
6344 6097          hblktag.htag_id = sfmmup;
↓ open down ↓ 786 lines elided ↑ open up ↑
7131 6884  {
7132 6885          struct page *origpp = pp;
7133 6886          struct sf_hment *sfhme, *tmphme;
7134 6887          struct hme_blk *hmeblkp;
7135 6888          kmutex_t *pml;
7136 6889  #ifdef VAC
7137 6890          kmutex_t *pmtx;
7138 6891  #endif
7139 6892          cpuset_t cpuset, tset;
7140 6893          int index, cons;
7141      -        int xhme_blks;
7142 6894          int pa_hments;
7143 6895  
7144 6896          ASSERT(PAGE_EXCL(pp));
7145 6897  
7146      -retry_xhat:
7147 6898          tmphme = NULL;
7148      -        xhme_blks = 0;
7149 6899          pa_hments = 0;
7150 6900          CPUSET_ZERO(cpuset);
7151 6901  
7152 6902          pml = sfmmu_mlist_enter(pp);
7153 6903  
7154 6904  #ifdef VAC
7155 6905          if (pp->p_kpmref)
7156 6906                  sfmmu_kpm_pageunload(pp);
7157 6907          ASSERT(!PP_ISMAPPED_KPM(pp));
7158 6908  #endif
↓ open down ↓ 11 lines elided ↑ open up ↑
7170 6920          for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7171 6921                  tmphme = sfhme->hme_next;
7172 6922  
7173 6923                  if (IS_PAHME(sfhme)) {
7174 6924                          ASSERT(sfhme->hme_data != NULL);
7175 6925                          pa_hments++;
7176 6926                          continue;
7177 6927                  }
7178 6928  
7179 6929                  hmeblkp = sfmmu_hmetohblk(sfhme);
7180      -                if (hmeblkp->hblk_xhat_bit) {
7181      -                        struct xhat_hme_blk *xblk =
7182      -                            (struct xhat_hme_blk *)hmeblkp;
7183      -
7184      -                        (void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat,
7185      -                            pp, forceflag, XBLK2PROVBLK(xblk));
7186      -
7187      -                        xhme_blks = 1;
7188      -                        continue;
7189      -                }
7190 6930  
7191 6931                  /*
7192 6932                   * If there are kernel mappings don't unload them, they will
7193 6933                   * be suspended.
7194 6934                   */
7195 6935                  if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt &&
7196 6936                      hmeblkp->hblk_tag.htag_id == ksfmmup)
7197 6937                          continue;
7198 6938  
7199 6939                  tset = sfmmu_pageunload(pp, sfhme, cons);
↓ open down ↓ 16 lines elided ↑ open up ↑
7216 6956           * cpuset may be empty if the page was only mapped by segkpm,
7217 6957           * in which case we won't actually cross-trap.
7218 6958           */
7219 6959          xt_sync(cpuset);
7220 6960  
7221 6961          /*
7222 6962           * The page should have no mappings at this point, unless
7223 6963           * we were called from hat_page_relocate() in which case we
7224 6964           * leave the locked mappings which will be suspended later.
7225 6965           */
7226      -        ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments ||
     6966 +        ASSERT(!PP_ISMAPPED(origpp) || pa_hments ||
7227 6967              (forceflag == SFMMU_KERNEL_RELOC));
7228 6968  
7229 6969  #ifdef VAC
7230 6970          if (PP_ISTNC(pp)) {
7231 6971                  if (cons == TTE8K) {
7232 6972                          pmtx = sfmmu_page_enter(pp);
7233 6973                          PP_CLRTNC(pp);
7234 6974                          sfmmu_page_exit(pmtx);
7235 6975                  } else {
7236 6976                          conv_tnc(pp, cons);
↓ open down ↓ 14 lines elided ↑ open up ↑
7251 6991                  for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7252 6992                          tmphme = sfhme->hme_next;
7253 6993                          if (IS_PAHME(sfhme)) {
7254 6994                                  struct pa_hment *pahmep = sfhme->hme_data;
7255 6995                                  sfmmu_pahment_leaked(pahmep);
7256 6996                                  HME_SUB(sfhme, pp);
7257 6997                                  kmem_cache_free(pa_hment_cache, pahmep);
7258 6998                          }
7259 6999                  }
7260 7000  
7261      -                ASSERT(!PP_ISMAPPED(origpp) || xhme_blks);
     7001 +                ASSERT(!PP_ISMAPPED(origpp));
7262 7002          }
7263 7003  
7264 7004          sfmmu_mlist_exit(pml);
7265 7005  
7266      -        /*
7267      -         * XHAT may not have finished unloading pages
7268      -         * because some other thread was waiting for
7269      -         * mlist lock and XHAT_PAGEUNLOAD let it do
7270      -         * the job.
7271      -         */
7272      -        if (xhme_blks) {
7273      -                pp = origpp;
7274      -                goto retry_xhat;
7275      -        }
7276      -
7277 7006          return (0);
7278 7007  }
7279 7008  
7280 7009  cpuset_t
7281 7010  sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons)
7282 7011  {
7283 7012          struct hme_blk *hmeblkp;
7284 7013          sfmmu_t *sfmmup;
7285 7014          tte_t tte, ttemod;
7286 7015  #ifdef DEBUG
↓ open down ↓ 260 lines elided ↑ open up ↑
7547 7276                   * from the list.
7548 7277                   */
7549 7278                  tmphme = sfhme->hme_next;
7550 7279                  if (IS_PAHME(sfhme))
7551 7280                          continue;
7552 7281                  /*
7553 7282                   * If we are looking for large mappings and this hme doesn't
7554 7283                   * reach the range we are seeking, just ignore it.
7555 7284                   */
7556 7285                  hmeblkp = sfmmu_hmetohblk(sfhme);
7557      -                if (hmeblkp->hblk_xhat_bit)
7558      -                        continue;
7559 7286  
7560 7287                  if (hme_size(sfhme) < cons)
7561 7288                          continue;
7562 7289  
7563 7290                  if (stop_on_sh) {
7564 7291                          if (hmeblkp->hblk_shared) {
7565 7292                                  sf_srd_t *srdp = hblktosrd(hmeblkp);
7566 7293                                  uint_t rid = hmeblkp->hblk_tag.htag_rid;
7567 7294                                  sf_region_t *rgnp;
7568 7295                                  ASSERT(SFMMU_IS_SHMERID_VALID(rid));
↓ open down ↓ 139 lines elided ↑ open up ↑
7708 7435          ASSERT(sfmmu_mlist_held(pp));
7709 7436  
7710 7437          CPUSET_ZERO(cpuset);
7711 7438          SFMMU_STAT(sf_clrwrt);
7712 7439  
7713 7440  retry:
7714 7441  
7715 7442          sfmmu_copytte(&sfhme->hme_tte, &tte);
7716 7443          if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) {
7717 7444                  hmeblkp = sfmmu_hmetohblk(sfhme);
7718      -
7719      -                /*
7720      -                 * xhat mappings should never be to a VMODSORT page.
7721      -                 */
7722      -                ASSERT(hmeblkp->hblk_xhat_bit == 0);
7723      -
7724 7445                  sfmmup = hblktosfmmu(hmeblkp);
7725 7446                  addr = tte_to_vaddr(hmeblkp, tte);
7726 7447  
7727 7448                  ttemod = tte;
7728 7449                  TTE_CLR_WRT(&ttemod);
7729 7450                  TTE_CLR_MOD(&ttemod);
7730 7451                  ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7731 7452  
7732 7453                  /*
7733 7454                   * if cas failed and the new value is not what
↓ open down ↓ 244 lines elided ↑ open up ↑
7978 7699           * We would like to
7979 7700           * ASSERT(AS_LOCK_HELD(as, &as->a_lock));
7980 7701           * but we can't because the iommu driver will call this
7981 7702           * routine at interrupt time and it can't grab the as lock
7982 7703           * or it will deadlock: A thread could have the as lock
7983 7704           * and be waiting for io.  The io can't complete
7984 7705           * because the interrupt thread is blocked trying to grab
7985 7706           * the as lock.
7986 7707           */
7987 7708  
7988      -        ASSERT(hat->sfmmu_xhat_provider == NULL);
7989      -
7990 7709          if (hat == ksfmmup) {
7991 7710                  if (IS_KMEM_VA_LARGEPAGE(addr)) {
7992 7711                          ASSERT(segkmem_lpszc > 0);
7993 7712                          pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc);
7994 7713                          if (pfn != PFN_INVALID) {
7995 7714                                  sfmmu_check_kpfn(pfn);
7996 7715                                  return (pfn);
7997 7716                          }
7998 7717                  } else if (segkpm && IS_KPM_ADDR(addr)) {
7999 7718                          return (sfmmu_kpm_vatopfn(addr));
↓ open down ↓ 163 lines elided ↑ open up ↑
8163 7882  
8164 7883  
8165 7884  /*
8166 7885   * For compatability with AT&T and later optimizations
8167 7886   */
8168 7887  /* ARGSUSED */
8169 7888  void
8170 7889  hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags)
8171 7890  {
8172 7891          ASSERT(hat != NULL);
8173      -        ASSERT(hat->sfmmu_xhat_provider == NULL);
8174 7892  }
8175 7893  
8176 7894  /*
8177 7895   * Return the number of mappings to a particular page.  This number is an
8178 7896   * approximation of the number of people sharing the page.
8179 7897   *
8180 7898   * shared hmeblks or ism hmeblks are counted as 1 mapping here.
8181 7899   * hat_page_checkshare() can be used to compare threshold to share
8182 7900   * count that reflects the number of region sharers albeit at higher cost.
8183 7901   */
↓ open down ↓ 72 lines elided ↑ open up ↑
8256 7974          index = PP_MAPINDEX(pp);
8257 7975  
8258 7976  again:
8259 7977          for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
8260 7978                  tmphme = sfhme->hme_next;
8261 7979                  if (IS_PAHME(sfhme)) {
8262 7980                          continue;
8263 7981                  }
8264 7982  
8265 7983                  hmeblkp = sfmmu_hmetohblk(sfhme);
8266      -                if (hmeblkp->hblk_xhat_bit) {
8267      -                        cnt++;
8268      -                        if (cnt > sh_thresh) {
8269      -                                sfmmu_mlist_exit(pml);
8270      -                                return (1);
8271      -                        }
8272      -                        continue;
8273      -                }
8274 7984                  if (hme_size(sfhme) != sz) {
8275 7985                          continue;
8276 7986                  }
8277 7987  
8278 7988                  if (hmeblkp->hblk_shared) {
8279 7989                          sf_srd_t *srdp = hblktosrd(hmeblkp);
8280 7990                          uint_t rid = hmeblkp->hblk_tag.htag_rid;
8281 7991                          sf_region_t *rgnp;
8282 7992                          ASSERT(SFMMU_IS_SHMERID_VALID(rid));
8283 7993                          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
↓ open down ↓ 94 lines elided ↑ open up ↑
8378 8088                  }
8379 8089                  ASSERT(sz <= pszc);
8380 8090                  rootpp = PP_GROUPLEADER(pp, sz);
8381 8091                  for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) {
8382 8092                          tmphme = sfhme->hme_next;
8383 8093                          ASSERT(!IS_PAHME(sfhme));
8384 8094                          hmeblkp = sfmmu_hmetohblk(sfhme);
8385 8095                          if (hme_size(sfhme) != sz) {
8386 8096                                  continue;
8387 8097                          }
8388      -                        if (hmeblkp->hblk_xhat_bit) {
8389      -                                cmn_err(CE_PANIC,
8390      -                                    "hat_page_demote: xhat hmeblk");
8391      -                        }
8392 8098                          tset = sfmmu_pageunload(rootpp, sfhme, sz);
8393 8099                          CPUSET_OR(cpuset, tset);
8394 8100                  }
8395 8101                  if (index >>= 1) {
8396 8102                          sz++;
8397 8103                  }
8398 8104          }
8399 8105  
8400 8106          ASSERT(!PP_ISMAPPED_LARGE(pp));
8401 8107  
↓ open down ↓ 107 lines elided ↑ open up ↑
8509 8215   */
8510 8216  size_t
8511 8217  hat_get_mapped_size(struct hat *hat)
8512 8218  {
8513 8219          size_t          assize = 0;
8514 8220          int             i;
8515 8221  
8516 8222          if (hat == NULL)
8517 8223                  return (0);
8518 8224  
8519      -        ASSERT(hat->sfmmu_xhat_provider == NULL);
8520      -
8521 8225          for (i = 0; i < mmu_page_sizes; i++)
8522 8226                  assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] +
8523 8227                      (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i);
8524 8228  
8525 8229          if (hat->sfmmu_iblk == NULL)
8526 8230                  return (assize);
8527 8231  
8528 8232          for (i = 0; i < mmu_page_sizes; i++)
8529 8233                  assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] +
8530 8234                      (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i);
8531 8235  
8532 8236          return (assize);
8533 8237  }
8534 8238  
8535 8239  int
8536 8240  hat_stats_enable(struct hat *hat)
8537 8241  {
8538 8242          hatlock_t       *hatlockp;
8539 8243  
8540      -        ASSERT(hat->sfmmu_xhat_provider == NULL);
8541      -
8542 8244          hatlockp = sfmmu_hat_enter(hat);
8543 8245          hat->sfmmu_rmstat++;
8544 8246          sfmmu_hat_exit(hatlockp);
8545 8247          return (1);
8546 8248  }
8547 8249  
8548 8250  void
8549 8251  hat_stats_disable(struct hat *hat)
8550 8252  {
8551 8253          hatlock_t       *hatlockp;
8552 8254  
8553      -        ASSERT(hat->sfmmu_xhat_provider == NULL);
8554      -
8555 8255          hatlockp = sfmmu_hat_enter(hat);
8556 8256          hat->sfmmu_rmstat--;
8557 8257          sfmmu_hat_exit(hatlockp);
8558 8258  }
8559 8259  
8560 8260  /*
8561 8261   * Routines for entering or removing  ourselves from the
8562 8262   * ism_hat's mapping list. This is used for both private and
8563 8263   * SCD hats.
8564 8264   */
↓ open down ↓ 81 lines elided ↑ open up ↑
8646 8346           */
8647 8347          if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr))
8648 8348                  return (EINVAL);
8649 8349  
8650 8350          /*
8651 8351           * Check size alignment.
8652 8352           */
8653 8353          if (!ISM_ALIGNED(ismshift, len))
8654 8354                  return (EINVAL);
8655 8355  
8656      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
8657      -
8658 8356          /*
8659 8357           * Allocate ism_ment for the ism_hat's mapping list, and an
8660 8358           * ism map blk in case we need one.  We must do our
8661 8359           * allocations before acquiring locks to prevent a deadlock
8662 8360           * in the kmem allocator on the mapping list lock.
8663 8361           */
8664 8362          new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP);
8665 8363          ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP);
8666 8364  
8667 8365          /*
↓ open down ↓ 191 lines elided ↑ open up ↑
8859 8557          uint_t          ismshift = page_get_shift(ismszc);
8860 8558          size_t          sh_size = ISM_SHIFT(ismshift, len);
8861 8559          uchar_t         ism_rid;
8862 8560          sf_scd_t        *old_scdp;
8863 8561  
8864 8562          ASSERT(ISM_ALIGNED(ismshift, addr));
8865 8563          ASSERT(ISM_ALIGNED(ismshift, len));
8866 8564          ASSERT(sfmmup != NULL);
8867 8565          ASSERT(sfmmup != ksfmmup);
8868 8566  
8869      -        if (sfmmup->sfmmu_xhat_provider) {
8870      -                XHAT_UNSHARE(sfmmup, addr, len);
8871      -                return;
8872      -        } else {
8873      -                /*
8874      -                 * This must be a CPU HAT. If the address space has
8875      -                 * XHATs attached, inform all XHATs that ISM segment
8876      -                 * is going away
8877      -                 */
8878      -                ASSERT(sfmmup->sfmmu_as != NULL);
8879      -                if (sfmmup->sfmmu_as->a_xhat != NULL)
8880      -                        xhat_unshare_all(sfmmup->sfmmu_as, addr, len);
8881      -        }
     8567 +        ASSERT(sfmmup->sfmmu_as != NULL);
8882 8568  
8883 8569          /*
8884 8570           * Make sure that during the entire time ISM mappings are removed,
8885 8571           * the trap handlers serialize behind us, and that no one else
8886 8572           * can be mucking with ISM mappings.  This also lets us get away
8887 8573           * with not doing expensive cross calls to flush the TLB -- we
8888 8574           * just discard the context, flush the entire TSB, and call it
8889 8575           * a day.
8890 8576           */
8891 8577          sfmmu_ismhat_enter(sfmmup, 0);
↓ open down ↓ 433 lines elided ↑ open up ↑
9325 9011  
9326 9012          /*
9327 9013           * check if any mapping is in same as or if it is locked
9328 9014           * since in that case we need to uncache.
9329 9015           */
9330 9016          for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9331 9017                  tmphme = sfhmep->hme_next;
9332 9018                  if (IS_PAHME(sfhmep))
9333 9019                          continue;
9334 9020                  hmeblkp = sfmmu_hmetohblk(sfhmep);
9335      -                if (hmeblkp->hblk_xhat_bit)
9336      -                        continue;
9337 9021                  tmphat = hblktosfmmu(hmeblkp);
9338 9022                  sfmmu_copytte(&sfhmep->hme_tte, &tte);
9339 9023                  ASSERT(TTE_IS_VALID(&tte));
9340 9024                  if (hmeblkp->hblk_shared || tmphat == hat ||
9341 9025                      hmeblkp->hblk_lckcnt) {
9342 9026                          /*
9343 9027                           * We have an uncache conflict
9344 9028                           */
9345 9029                          SFMMU_STAT(sf_uncache_conflict);
9346 9030                          sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1);
↓ open down ↓ 6 lines elided ↑ open up ↑
9353 9037           * We have already checked for LARGE mappings, therefore
9354 9038           * the remaining mapping(s) must be TTE8K.
9355 9039           */
9356 9040          SFMMU_STAT(sf_unload_conflict);
9357 9041  
9358 9042          for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9359 9043                  tmphme = sfhmep->hme_next;
9360 9044                  if (IS_PAHME(sfhmep))
9361 9045                          continue;
9362 9046                  hmeblkp = sfmmu_hmetohblk(sfhmep);
9363      -                if (hmeblkp->hblk_xhat_bit)
9364      -                        continue;
9365 9047                  ASSERT(!hmeblkp->hblk_shared);
9366 9048                  (void) sfmmu_pageunload(pp, sfhmep, TTE8K);
9367 9049          }
9368 9050  
9369 9051          if (PP_ISMAPPED_KPM(pp))
9370 9052                  sfmmu_kpm_vac_unload(pp, addr);
9371 9053  
9372 9054          /*
9373 9055           * Unloads only do TLB flushes so we need to flush the
9374 9056           * cache here.
↓ open down ↓ 127 lines elided ↑ open up ↑
9502 9184                          kpmvaddr = hat_kpm_page2va(pp, 1);
9503 9185                          ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr)));
9504 9186                          color1 = addr_to_vcolor(kpmvaddr);
9505 9187                          clr_valid = 1;
9506 9188                  }
9507 9189  
9508 9190                  for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9509 9191                          if (IS_PAHME(sfhme))
9510 9192                                  continue;
9511 9193                          hmeblkp = sfmmu_hmetohblk(sfhme);
9512      -                        if (hmeblkp->hblk_xhat_bit)
9513      -                                continue;
9514 9194  
9515 9195                          sfmmu_copytte(&sfhme->hme_tte, &tte);
9516 9196                          ASSERT(TTE_IS_VALID(&tte));
9517 9197  
9518 9198                          vaddr = tte_to_vaddr(hmeblkp, tte);
9519 9199                          color = addr_to_vcolor(vaddr);
9520 9200  
9521 9201                          if (npages > 1) {
9522 9202                                  /*
9523 9203                                   * If there is a big mapping, make sure
↓ open down ↓ 127 lines elided ↑ open up ↑
9651 9331  
9652 9332          color = bcolor;
9653 9333          pfn = pp->p_pagenum;
9654 9334  
9655 9335          for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9656 9336  
9657 9337                  if (IS_PAHME(sfhme))
9658 9338                          continue;
9659 9339                  hmeblkp = sfmmu_hmetohblk(sfhme);
9660 9340  
9661      -                if (hmeblkp->hblk_xhat_bit)
9662      -                        continue;
9663      -
9664 9341                  sfmmu_copytte(&sfhme->hme_tte, &tte);
9665 9342                  ASSERT(TTE_IS_VALID(&tte));
9666 9343                  vaddr = tte_to_vaddr(hmeblkp, tte);
9667 9344                  color = addr_to_vcolor(vaddr);
9668 9345  
9669 9346  #ifdef DEBUG
9670 9347                  if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) {
9671 9348                          ASSERT(color == bcolor);
9672 9349                  }
9673 9350  #endif
↓ open down ↓ 315 lines elided ↑ open up ↑
9989 9666                   */
9990 9667                  sfmmu_invalidate_ctx(sfmmup);
9991 9668          }
9992 9669  
9993 9670          kpreempt_enable();
9994 9671  }
9995 9672  
9996 9673  
9997 9674  /*
9998 9675   * Replace the specified TSB with a new TSB.  This function gets called when
9999      - * we grow, shrink or swapin a TSB.  When swapping in a TSB (TSB_SWAPIN), the
     9676 + * we grow, or shrink a TSB.  When swapping in a TSB (TSB_SWAPIN), the
10000 9677   * TSB_FORCEALLOC flag may be used to force allocation of a minimum-sized TSB
10001 9678   * (8K).
10002 9679   *
10003 9680   * Caller must hold the HAT lock, but should assume any tsb_info
10004 9681   * pointers it has are no longer valid after calling this function.
10005 9682   *
10006 9683   * Return values:
10007 9684   *      TSB_ALLOCFAIL   Failed to allocate a TSB, due to memory constraints
10008 9685   *      TSB_LOSTRACE    HAT is busy, i.e. another thread is already doing
10009 9686   *                      something to this tsbinfo/TSB
↓ open down ↓ 3391 lines elided ↑ open up ↑
13401 13078  
13402 13079  /*
13403 13080   * This function is currently not supported on this platform. For what
13404 13081   * it's supposed to do, see hat.c and hat_srmmu.c
13405 13082   */
13406 13083  /* ARGSUSED */
13407 13084  faultcode_t
13408 13085  hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp,
13409 13086      uint_t flags)
13410 13087  {
13411      -        ASSERT(hat->sfmmu_xhat_provider == NULL);
13412 13088          return (FC_NOSUPPORT);
13413 13089  }
13414 13090  
13415 13091  /*
13416 13092   * Searchs the mapping list of the page for a mapping of the same size. If not
13417 13093   * found the corresponding bit is cleared in the p_index field. When large
13418 13094   * pages are more prevalent in the system, we can maintain the mapping list
13419 13095   * in order and we don't have to traverse the list each time. Just check the
13420 13096   * next and prev entries, and if both are of different size, we clear the bit.
13421 13097   */
↓ open down ↓ 14 lines elided ↑ open up ↑
13436 13112          /*
13437 13113           * Traverse mapping list looking for another mapping of same size.
13438 13114           * since we only want to clear index field if all mappings of
13439 13115           * that size are gone.
13440 13116           */
13441 13117  
13442 13118          for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
13443 13119                  if (IS_PAHME(sfhmep))
13444 13120                          continue;
13445 13121                  hmeblkp = sfmmu_hmetohblk(sfhmep);
13446      -                if (hmeblkp->hblk_xhat_bit)
13447      -                        continue;
13448 13122                  if (hme_size(sfhmep) == ttesz) {
13449 13123                          /*
13450 13124                           * another mapping of the same size. don't clear index.
13451 13125                           */
13452 13126                          return;
13453 13127                  }
13454 13128          }
13455 13129  
13456 13130          /*
13457 13131           * Clear the p_index bit for large page.
↓ open down ↓ 564 lines elided ↑ open up ↑
14022 13696          uint16_t *busyrgnsp;
14023 13697          ulong_t rttecnt;
14024 13698          uchar_t tteflag;
14025 13699          uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
14026 13700          int text = (r_type == HAT_REGION_TEXT);
14027 13701  
14028 13702          if (srdp == NULL || r_size == 0) {
14029 13703                  return (HAT_INVALID_REGION_COOKIE);
14030 13704          }
14031 13705  
14032      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
14033 13706          ASSERT(sfmmup != ksfmmup);
14034 13707          ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
14035 13708          ASSERT(srdp->srd_refcnt > 0);
14036 13709          ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
14037 13710          ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
14038 13711          ASSERT(r_pgszc < mmu_page_sizes);
14039 13712          if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) ||
14040 13713              !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) {
14041 13714                  panic("hat_join_region: region addr or size is not aligned\n");
14042 13715          }
↓ open down ↓ 285 lines elided ↑ open up ↑
14328 14001          } else {
14329 14002                  ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14330 14003                  ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14331 14004                  rgnp = srdp->srd_hmergnp[rid];
14332 14005          }
14333 14006          ASSERT(rgnp != NULL);
14334 14007          ASSERT(rgnp->rgn_id == rid);
14335 14008          ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14336 14009          ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14337 14010          ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
14338      -
14339      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
14340      -        if (r_type == SFMMU_REGION_HME && sfmmup->sfmmu_as->a_xhat != NULL) {
14341      -                xhat_unload_callback_all(sfmmup->sfmmu_as, rgnp->rgn_saddr,
14342      -                    rgnp->rgn_size, 0, NULL);
14343      -        }
14344 14011  
14345 14012          if (sfmmup->sfmmu_free) {
14346 14013                  ulong_t rttecnt;
14347 14014                  r_pgszc = rgnp->rgn_pgszc;
14348 14015                  r_size = rgnp->rgn_size;
14349 14016  
14350 14017                  ASSERT(sfmmup->sfmmu_scdp == NULL);
14351 14018                  if (r_type == SFMMU_REGION_ISM) {
14352 14019                          SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14353 14020                  } else {
↓ open down ↓ 1500 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX