Print this page
6345 remove xhat support

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/sfmmu/vm/hat_sfmmu.c
          +++ new/usr/src/uts/sfmmu/vm/hat_sfmmu.c
↓ open down ↓ 73 lines elided ↑ open up ↑
  74   74  #include <sys/bitmap.h>
  75   75  #include <sys/machlock.h>
  76   76  #include <sys/membar.h>
  77   77  #include <sys/atomic.h>
  78   78  #include <sys/cpu_module.h>
  79   79  #include <sys/prom_debug.h>
  80   80  #include <sys/ksynch.h>
  81   81  #include <sys/mem_config.h>
  82   82  #include <sys/mem_cage.h>
  83   83  #include <vm/vm_dep.h>
  84      -#include <vm/xhat_sfmmu.h>
  85   84  #include <sys/fpu/fpusystm.h>
  86   85  #include <vm/mach_kpm.h>
  87   86  #include <sys/callb.h>
  88   87  
  89   88  #ifdef  DEBUG
  90   89  #define SFMMU_VALIDATE_HMERID(hat, rid, saddr, len)                     \
  91   90          if (SFMMU_IS_SHMERID_VALID(rid)) {                              \
  92   91                  caddr_t _eaddr = (saddr) + (len);                       \
  93   92                  sf_srd_t *_srdp;                                        \
  94   93                  sf_region_t *_rgnp;                                     \
↓ open down ↓ 1248 lines elided ↑ open up ↑
1343 1342          /*
1344 1343           * The big page VAC handling code assumes VAC
1345 1344           * will not be bigger than the smallest big
1346 1345           * page- which is 64K.
1347 1346           */
1348 1347          if (TTEPAGES(TTE64K) < CACHE_NUM_COLOR) {
1349 1348                  cmn_err(CE_PANIC, "VAC too big!");
1350 1349          }
1351 1350  #endif
1352 1351  
1353      -        (void) xhat_init();
1354      -
1355 1352          uhme_hash_pa = va_to_pa(uhme_hash);
1356 1353          khme_hash_pa = va_to_pa(khme_hash);
1357 1354  
1358 1355          /*
1359 1356           * Initialize relocation locks. kpr_suspendlock is held
1360 1357           * at PIL_MAX to prevent interrupts from pinning the holder
1361 1358           * of a suspended TTE which may access it leading to a
1362 1359           * deadlock condition.
1363 1360           */
1364 1361          mutex_init(&kpr_mutex, NULL, MUTEX_DEFAULT, NULL);
↓ open down ↓ 167 lines elided ↑ open up ↑
1532 1529          sfmmup->sfmmu_scdhat = 0;
1533 1530          sfmmup->sfmmu_ismblkpa = (uint64_t)-1;
1534 1531          if (sfmmup == ksfmmup) {
1535 1532                  CPUSET_ALL(sfmmup->sfmmu_cpusran);
1536 1533          } else {
1537 1534                  CPUSET_ZERO(sfmmup->sfmmu_cpusran);
1538 1535          }
1539 1536          sfmmup->sfmmu_free = 0;
1540 1537          sfmmup->sfmmu_rmstat = 0;
1541 1538          sfmmup->sfmmu_clrbin = sfmmup->sfmmu_clrstart;
1542      -        sfmmup->sfmmu_xhat_provider = NULL;
1543 1539          cv_init(&sfmmup->sfmmu_tsb_cv, NULL, CV_DEFAULT, NULL);
1544 1540          sfmmup->sfmmu_srdp = NULL;
1545 1541          SF_RGNMAP_ZERO(sfmmup->sfmmu_region_map);
1546 1542          bzero(sfmmup->sfmmu_hmeregion_links, SFMMU_L1_HMERLINKS_SIZE);
1547 1543          sfmmup->sfmmu_scdp = NULL;
1548 1544          sfmmup->sfmmu_scd_link.next = NULL;
1549 1545          sfmmup->sfmmu_scd_link.prev = NULL;
1550 1546          return (sfmmup);
1551 1547  }
1552 1548  
↓ open down ↓ 359 lines elided ↑ open up ↑
1912 1908  
1913 1909  /*
1914 1910   * Free all the translation resources for the specified address space.
1915 1911   * Called from as_free when an address space is being destroyed.
1916 1912   */
1917 1913  void
1918 1914  hat_free_start(struct hat *sfmmup)
1919 1915  {
1920 1916          ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
1921 1917          ASSERT(sfmmup != ksfmmup);
1922      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
1923 1918  
1924 1919          sfmmup->sfmmu_free = 1;
1925 1920          if (sfmmup->sfmmu_scdp != NULL) {
1926 1921                  sfmmu_leave_scd(sfmmup, 0);
1927 1922          }
1928 1923  
1929 1924          ASSERT(sfmmup->sfmmu_scdp == NULL);
1930 1925  }
1931 1926  
1932 1927  void
1933 1928  hat_free_end(struct hat *sfmmup)
1934 1929  {
1935 1930          int i;
1936 1931  
1937      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
1938 1932          ASSERT(sfmmup->sfmmu_free == 1);
1939 1933          ASSERT(sfmmup->sfmmu_ttecnt[TTE8K] == 0);
1940 1934          ASSERT(sfmmup->sfmmu_ttecnt[TTE64K] == 0);
1941 1935          ASSERT(sfmmup->sfmmu_ttecnt[TTE512K] == 0);
1942 1936          ASSERT(sfmmup->sfmmu_ttecnt[TTE4M] == 0);
1943 1937          ASSERT(sfmmup->sfmmu_ttecnt[TTE32M] == 0);
1944 1938          ASSERT(sfmmup->sfmmu_ttecnt[TTE256M] == 0);
1945 1939  
1946 1940          if (sfmmup->sfmmu_rmstat) {
1947 1941                  hat_freestat(sfmmup->sfmmu_as, NULL);
↓ open down ↓ 28 lines elided ↑ open up ↑
1976 1970  }
1977 1971  
1978 1972  /*
1979 1973   * Set up any translation structures, for the specified address space,
1980 1974   * that are needed or preferred when the process is being swapped in.
1981 1975   */
1982 1976  /* ARGSUSED */
1983 1977  void
1984 1978  hat_swapin(struct hat *hat)
1985 1979  {
1986      -        ASSERT(hat->sfmmu_xhat_provider == NULL);
1987 1980  }
1988 1981  
1989 1982  /*
1990 1983   * Free all of the translation resources, for the specified address space,
1991 1984   * that can be freed while the process is swapped out. Called from as_swapout.
1992 1985   * Also, free up the ctx that this process was using.
1993 1986   */
1994 1987  void
1995 1988  hat_swapout(struct hat *sfmmup)
1996 1989  {
↓ open down ↓ 4 lines elided ↑ open up ↑
2001 1994          int i;
2002 1995          struct hme_blk *list = NULL;
2003 1996          hatlock_t *hatlockp;
2004 1997          struct tsb_info *tsbinfop;
2005 1998          struct free_tsb {
2006 1999                  struct free_tsb *next;
2007 2000                  struct tsb_info *tsbinfop;
2008 2001          };                      /* free list of TSBs */
2009 2002          struct free_tsb *freelist, *last, *next;
2010 2003  
2011      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
2012 2004          SFMMU_STAT(sf_swapout);
2013 2005  
2014 2006          /*
2015 2007           * There is no way to go from an as to all its translations in sfmmu.
2016 2008           * Here is one of the times when we take the big hit and traverse
2017 2009           * the hash looking for hme_blks to free up.  Not only do we free up
2018 2010           * this as hme_blks but all those that are free.  We are obviously
2019 2011           * swapping because we need memory so let's free up as much
2020 2012           * as we can.
2021 2013           *
↓ open down ↓ 3 lines elided ↑ open up ↑
2025 2017           *  2) processes aren't runnable while being swapped out.
2026 2018           */
2027 2019          ASSERT(sfmmup != KHATID);
2028 2020          for (i = 0; i <= UHMEHASH_SZ; i++) {
2029 2021                  hmebp = &uhme_hash[i];
2030 2022                  SFMMU_HASH_LOCK(hmebp);
2031 2023                  hmeblkp = hmebp->hmeblkp;
2032 2024                  pr_hblk = NULL;
2033 2025                  while (hmeblkp) {
2034 2026  
2035      -                        ASSERT(!hmeblkp->hblk_xhat_bit);
2036      -
2037 2027                          if ((hmeblkp->hblk_tag.htag_id == sfmmup) &&
2038 2028                              !hmeblkp->hblk_shw_bit && !hmeblkp->hblk_lckcnt) {
2039 2029                                  ASSERT(!hmeblkp->hblk_shared);
2040 2030                                  (void) sfmmu_hblk_unload(sfmmup, hmeblkp,
2041 2031                                      (caddr_t)get_hblk_base(hmeblkp),
2042 2032                                      get_hblk_endaddr(hmeblkp),
2043 2033                                      NULL, HAT_UNLOAD);
2044 2034                          }
2045 2035                          nx_hblk = hmeblkp->hblk_next;
2046 2036                          if (!hmeblkp->hblk_vcnt && !hmeblkp->hblk_hmecnt) {
↓ open down ↓ 81 lines elided ↑ open up ↑
2128 2118  /* ARGSUSED */
2129 2119  int
2130 2120  hat_dup(struct hat *hat, struct hat *newhat, caddr_t addr, size_t len,
2131 2121          uint_t flag)
2132 2122  {
2133 2123          sf_srd_t *srdp;
2134 2124          sf_scd_t *scdp;
2135 2125          int i;
2136 2126          extern uint_t get_color_start(struct as *);
2137 2127  
2138      -        ASSERT(hat->sfmmu_xhat_provider == NULL);
2139 2128          ASSERT((flag == 0) || (flag == HAT_DUP_ALL) || (flag == HAT_DUP_COW) ||
2140 2129              (flag == HAT_DUP_SRD));
2141 2130          ASSERT(hat != ksfmmup);
2142 2131          ASSERT(newhat != ksfmmup);
2143 2132          ASSERT(flag != HAT_DUP_ALL || hat->sfmmu_srdp == newhat->sfmmu_srdp);
2144 2133  
2145 2134          if (flag == HAT_DUP_COW) {
2146 2135                  panic("hat_dup: HAT_DUP_COW not supported");
2147 2136          }
2148 2137  
↓ open down ↓ 49 lines elided ↑ open up ↑
2198 2187  {
2199 2188          hat_do_memload(hat, addr, pp, attr, flags,
2200 2189              SFMMU_INVALID_SHMERID);
2201 2190  }
2202 2191  
2203 2192  void
2204 2193  hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
2205 2194          uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
2206 2195  {
2207 2196          uint_t rid;
2208      -        if (rcookie == HAT_INVALID_REGION_COOKIE ||
2209      -            hat->sfmmu_xhat_provider != NULL) {
     2197 +        if (rcookie == HAT_INVALID_REGION_COOKIE) {
2210 2198                  hat_do_memload(hat, addr, pp, attr, flags,
2211 2199                      SFMMU_INVALID_SHMERID);
2212 2200                  return;
2213 2201          }
2214 2202          rid = (uint_t)((uint64_t)rcookie);
2215 2203          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2216 2204          hat_do_memload(hat, addr, pp, attr, flags, rid);
2217 2205  }
2218 2206  
2219 2207  /*
↓ open down ↓ 13 lines elided ↑ open up ↑
2233 2221          ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2234 2222          ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2235 2223          ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2236 2224          SFMMU_VALIDATE_HMERID(hat, rid, addr, MMU_PAGESIZE);
2237 2225  
2238 2226          if (PP_ISFREE(pp)) {
2239 2227                  panic("hat_memload: loading a mapping to free page %p",
2240 2228                      (void *)pp);
2241 2229          }
2242 2230  
2243      -        if (hat->sfmmu_xhat_provider) {
2244      -                /* no regions for xhats */
2245      -                ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
2246      -                XHAT_MEMLOAD(hat, addr, pp, attr, flags);
2247      -                return;
2248      -        }
2249      -
2250 2231          ASSERT((hat == ksfmmup) ||
2251 2232              AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
2252 2233  
2253 2234          if (flags & ~SFMMU_LOAD_ALLFLAG)
2254 2235                  cmn_err(CE_NOTE, "hat_memload: unsupported flags %d",
2255 2236                      flags & ~SFMMU_LOAD_ALLFLAG);
2256 2237  
2257 2238          if (hat->sfmmu_rmstat)
2258 2239                  hat_resvstat(MMU_PAGESIZE, hat->sfmmu_as, addr);
2259 2240  
↓ open down ↓ 29 lines elided ↑ open up ↑
2289 2270  void
2290 2271  hat_devload(struct hat *hat, caddr_t addr, size_t len, pfn_t pfn,
2291 2272          uint_t attr, int flags)
2292 2273  {
2293 2274          tte_t tte;
2294 2275          struct page *pp = NULL;
2295 2276          int use_lgpg = 0;
2296 2277  
2297 2278          ASSERT(hat != NULL);
2298 2279  
2299      -        if (hat->sfmmu_xhat_provider) {
2300      -                XHAT_DEVLOAD(hat, addr, len, pfn, attr, flags);
2301      -                return;
2302      -        }
2303      -
2304 2280          ASSERT(!(flags & ~SFMMU_LOAD_ALLFLAG));
2305 2281          ASSERT(!(attr & ~SFMMU_LOAD_ALLATTR));
2306 2282          ASSERT((hat == ksfmmup) ||
2307 2283              AS_LOCK_HELD(hat->sfmmu_as, &hat->sfmmu_as->a_lock));
2308 2284          if (len == 0)
2309 2285                  panic("hat_devload: zero len");
2310 2286          if (flags & ~SFMMU_LOAD_ALLFLAG)
2311 2287                  cmn_err(CE_NOTE, "hat_devload: unsupported flags %d",
2312 2288                      flags & ~SFMMU_LOAD_ALLFLAG);
2313 2289  
↓ open down ↓ 126 lines elided ↑ open up ↑
2440 2416          hat_do_memload_array(hat, addr, len, pps, attr, flags,
2441 2417              SFMMU_INVALID_SHMERID);
2442 2418  }
2443 2419  
2444 2420  void
2445 2421  hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
2446 2422          struct page **pps, uint_t attr, uint_t flags,
2447 2423          hat_region_cookie_t rcookie)
2448 2424  {
2449 2425          uint_t rid;
2450      -        if (rcookie == HAT_INVALID_REGION_COOKIE ||
2451      -            hat->sfmmu_xhat_provider != NULL) {
     2426 +        if (rcookie == HAT_INVALID_REGION_COOKIE) {
2452 2427                  hat_do_memload_array(hat, addr, len, pps, attr, flags,
2453 2428                      SFMMU_INVALID_SHMERID);
2454 2429                  return;
2455 2430          }
2456 2431          rid = (uint_t)((uint64_t)rcookie);
2457 2432          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
2458 2433          hat_do_memload_array(hat, addr, len, pps, attr, flags, rid);
2459 2434  }
2460 2435  
2461 2436  /*
↓ open down ↓ 14 lines elided ↑ open up ↑
2476 2451          int  ttesz;
2477 2452          size_t mapsz;
2478 2453          pgcnt_t numpg, npgs;
2479 2454          tte_t tte;
2480 2455          page_t *pp;
2481 2456          uint_t large_pages_disable;
2482 2457  
2483 2458          ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
2484 2459          SFMMU_VALIDATE_HMERID(hat, rid, addr, len);
2485 2460  
2486      -        if (hat->sfmmu_xhat_provider) {
2487      -                ASSERT(!SFMMU_IS_SHMERID_VALID(rid));
2488      -                XHAT_MEMLOAD_ARRAY(hat, addr, len, pps, attr, flags);
2489      -                return;
2490      -        }
2491      -
2492 2461          if (hat->sfmmu_rmstat)
2493 2462                  hat_resvstat(len, hat->sfmmu_as, addr);
2494 2463  
2495 2464  #if defined(SF_ERRATA_57)
2496 2465          if ((hat != ksfmmup) && AS_TYPE_64BIT(hat->sfmmu_as) &&
2497 2466              (addr < errata57_limit) && (attr & PROT_EXEC) &&
2498 2467              !(flags & HAT_LOAD_SHARE)) {
2499 2468                  cmn_err(CE_WARN, "hat_memload_array: illegal attempt to make "
2500 2469                      "user page executable");
2501 2470                  attr &= ~PROT_EXEC;
↓ open down ↓ 1462 lines elided ↑ open up ↑
3964 3933  void
3965 3934  hat_unlock(struct hat *sfmmup, caddr_t addr, size_t len)
3966 3935  {
3967 3936          struct hmehash_bucket *hmebp;
3968 3937          hmeblk_tag hblktag;
3969 3938          int hmeshift, hashno = 1;
3970 3939          struct hme_blk *hmeblkp, *list = NULL;
3971 3940          caddr_t endaddr;
3972 3941  
3973 3942          ASSERT(sfmmup != NULL);
3974      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
3975 3943  
3976 3944          ASSERT((sfmmup == ksfmmup) ||
3977 3945              AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
3978 3946          ASSERT((len & MMU_PAGEOFFSET) == 0);
3979 3947          endaddr = addr + len;
3980 3948          hblktag.htag_id = sfmmup;
3981 3949          hblktag.htag_rid = SFMMU_INVALID_SHMERID;
3982 3950  
3983 3951          /*
3984 3952           * Spitfire supports 4 page sizes.
↓ open down ↓ 64 lines elided ↑ open up ↑
4049 4017          struct hme_blk *hmeblkp;
4050 4018          struct hme_blk *pr_hblk;
4051 4019          struct hme_blk *list;
4052 4020  
4053 4021          if (rcookie == HAT_INVALID_REGION_COOKIE) {
4054 4022                  hat_unlock(sfmmup, addr, len);
4055 4023                  return;
4056 4024          }
4057 4025  
4058 4026          ASSERT(sfmmup != NULL);
4059      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4060 4027          ASSERT(sfmmup != ksfmmup);
4061 4028  
4062 4029          srdp = sfmmup->sfmmu_srdp;
4063 4030          rid = (uint_t)((uint64_t)rcookie);
4064 4031          VERIFY3U(rid, <, SFMMU_MAX_HME_REGIONS);
4065 4032          eaddr = addr + len;
4066 4033          va = addr;
4067 4034          list = NULL;
4068 4035          rgnp = srdp->srd_hmergnp[rid];
4069 4036          SFMMU_VALIDATE_HMERID(sfmmup, rid, addr, len);
↓ open down ↓ 691 lines elided ↑ open up ↑
4761 4728   * to implement optimizations in the segment drivers.
4762 4729   *
4763 4730   */
4764 4731  int
4765 4732  hat_probe(struct hat *sfmmup, caddr_t addr)
4766 4733  {
4767 4734          pfn_t pfn;
4768 4735          tte_t tte;
4769 4736  
4770 4737          ASSERT(sfmmup != NULL);
4771      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4772 4738  
4773 4739          ASSERT((sfmmup == ksfmmup) ||
4774 4740              AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
4775 4741  
4776 4742          if (sfmmup == ksfmmup) {
4777 4743                  while ((pfn = sfmmu_vatopfn(addr, sfmmup, &tte))
4778 4744                      == PFN_SUSPENDED) {
4779 4745                          sfmmu_vatopfn_suspended(addr, sfmmup, &tte);
4780 4746                  }
4781 4747          } else {
↓ open down ↓ 4 lines elided ↑ open up ↑
4786 4752                  return (1);
4787 4753          else
4788 4754                  return (0);
4789 4755  }
4790 4756  
4791 4757  ssize_t
4792 4758  hat_getpagesize(struct hat *sfmmup, caddr_t addr)
4793 4759  {
4794 4760          tte_t tte;
4795 4761  
4796      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4797      -
4798 4762          if (sfmmup == ksfmmup) {
4799 4763                  if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4800 4764                          return (-1);
4801 4765                  }
4802 4766          } else {
4803 4767                  if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4804 4768                          return (-1);
4805 4769                  }
4806 4770          }
4807 4771  
4808 4772          ASSERT(TTE_IS_VALID(&tte));
4809 4773          return (TTEBYTES(TTE_CSZ(&tte)));
4810 4774  }
4811 4775  
4812 4776  uint_t
4813 4777  hat_getattr(struct hat *sfmmup, caddr_t addr, uint_t *attr)
4814 4778  {
4815 4779          tte_t tte;
4816 4780  
4817      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
4818      -
4819 4781          if (sfmmup == ksfmmup) {
4820 4782                  if (sfmmu_vatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4821 4783                          tte.ll = 0;
4822 4784                  }
4823 4785          } else {
4824 4786                  if (sfmmu_uvatopfn(addr, sfmmup, &tte) == PFN_INVALID) {
4825 4787                          tte.ll = 0;
4826 4788                  }
4827 4789          }
4828 4790          if (TTE_IS_VALID(&tte)) {
↓ open down ↓ 3 lines elided ↑ open up ↑
4832 4794          *attr = 0;
4833 4795          return ((uint_t)0xffffffff);
4834 4796  }
4835 4797  
4836 4798  /*
4837 4799   * Enables more attributes on specified address range (ie. logical OR)
4838 4800   */
4839 4801  void
4840 4802  hat_setattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4841 4803  {
4842      -        if (hat->sfmmu_xhat_provider) {
4843      -                XHAT_SETATTR(hat, addr, len, attr);
4844      -                return;
4845      -        } else {
4846      -                /*
4847      -                 * This must be a CPU HAT. If the address space has
4848      -                 * XHATs attached, change attributes for all of them,
4849      -                 * just in case
4850      -                 */
4851      -                ASSERT(hat->sfmmu_as != NULL);
4852      -                if (hat->sfmmu_as->a_xhat != NULL)
4853      -                        xhat_setattr_all(hat->sfmmu_as, addr, len, attr);
4854      -        }
     4804 +        ASSERT(hat->sfmmu_as != NULL);
4855 4805  
4856 4806          sfmmu_chgattr(hat, addr, len, attr, SFMMU_SETATTR);
4857 4807  }
4858 4808  
4859 4809  /*
4860 4810   * Assigns attributes to the specified address range.  All the attributes
4861 4811   * are specified.
4862 4812   */
4863 4813  void
4864 4814  hat_chgattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4865 4815  {
4866      -        if (hat->sfmmu_xhat_provider) {
4867      -                XHAT_CHGATTR(hat, addr, len, attr);
4868      -                return;
4869      -        } else {
4870      -                /*
4871      -                 * This must be a CPU HAT. If the address space has
4872      -                 * XHATs attached, change attributes for all of them,
4873      -                 * just in case
4874      -                 */
4875      -                ASSERT(hat->sfmmu_as != NULL);
4876      -                if (hat->sfmmu_as->a_xhat != NULL)
4877      -                        xhat_chgattr_all(hat->sfmmu_as, addr, len, attr);
4878      -        }
     4816 +        ASSERT(hat->sfmmu_as != NULL);
4879 4817  
4880 4818          sfmmu_chgattr(hat, addr, len, attr, SFMMU_CHGATTR);
4881 4819  }
4882 4820  
4883 4821  /*
4884 4822   * Remove attributes on the specified address range (ie. loginal NAND)
4885 4823   */
4886 4824  void
4887 4825  hat_clrattr(struct hat *hat, caddr_t addr, size_t len, uint_t attr)
4888 4826  {
4889      -        if (hat->sfmmu_xhat_provider) {
4890      -                XHAT_CLRATTR(hat, addr, len, attr);
4891      -                return;
4892      -        } else {
4893      -                /*
4894      -                 * This must be a CPU HAT. If the address space has
4895      -                 * XHATs attached, change attributes for all of them,
4896      -                 * just in case
4897      -                 */
4898      -                ASSERT(hat->sfmmu_as != NULL);
4899      -                if (hat->sfmmu_as->a_xhat != NULL)
4900      -                        xhat_clrattr_all(hat->sfmmu_as, addr, len, attr);
4901      -        }
     4827 +        ASSERT(hat->sfmmu_as != NULL);
4902 4828  
4903 4829          sfmmu_chgattr(hat, addr, len, attr, SFMMU_CLRATTR);
4904 4830  }
4905 4831  
4906 4832  /*
4907 4833   * Change attributes on an address range to that specified by attr and mode.
4908 4834   */
4909 4835  static void
4910 4836  sfmmu_chgattr(struct hat *sfmmup, caddr_t addr, size_t len, uint_t attr,
4911 4837          int mode)
↓ open down ↓ 332 lines elided ↑ open up ↑
5244 5170          hmeblk_tag hblktag;
5245 5171          int hmeshift, hashno = 1;
5246 5172          struct hme_blk *hmeblkp, *list = NULL;
5247 5173          caddr_t endaddr;
5248 5174          cpuset_t cpuset;
5249 5175          demap_range_t dmr;
5250 5176  
5251 5177          ASSERT((len & MMU_PAGEOFFSET) == 0);
5252 5178          ASSERT(((uintptr_t)addr & MMU_PAGEOFFSET) == 0);
5253 5179  
5254      -        if (sfmmup->sfmmu_xhat_provider) {
5255      -                XHAT_CHGPROT(sfmmup, addr, len, vprot);
5256      -                return;
5257      -        } else {
5258      -                /*
5259      -                 * This must be a CPU HAT. If the address space has
5260      -                 * XHATs attached, change attributes for all of them,
5261      -                 * just in case
5262      -                 */
5263      -                ASSERT(sfmmup->sfmmu_as != NULL);
5264      -                if (sfmmup->sfmmu_as->a_xhat != NULL)
5265      -                        xhat_chgprot_all(sfmmup->sfmmu_as, addr, len, vprot);
5266      -        }
     5180 +        ASSERT(sfmmup->sfmmu_as != NULL);
5267 5181  
5268 5182          CPUSET_ZERO(cpuset);
5269 5183  
5270 5184          if ((vprot != (uint_t)~PROT_WRITE) && (vprot & PROT_USER) &&
5271 5185              ((addr + len) > (caddr_t)USERLIMIT)) {
5272 5186                  panic("user addr %p vprot %x in kernel space",
5273 5187                      (void *)addr, vprot);
5274 5188          }
5275 5189          endaddr = addr + len;
5276 5190          hblktag.htag_id = sfmmup;
↓ open down ↓ 414 lines elided ↑ open up ↑
5691 5605          struct hme_blk *hmeblkp, *pr_hblk, *list = NULL;
5692 5606          caddr_t endaddr;
5693 5607          cpuset_t cpuset;
5694 5608          int addr_count = 0;
5695 5609          int a;
5696 5610          caddr_t cb_start_addr[MAX_CB_ADDR];
5697 5611          caddr_t cb_end_addr[MAX_CB_ADDR];
5698 5612          int issegkmap = ISSEGKMAP(sfmmup, addr);
5699 5613          demap_range_t dmr, *dmrp;
5700 5614  
5701      -        if (sfmmup->sfmmu_xhat_provider) {
5702      -                XHAT_UNLOAD_CALLBACK(sfmmup, addr, len, flags, callback);
5703      -                return;
5704      -        } else {
5705      -                /*
5706      -                 * This must be a CPU HAT. If the address space has
5707      -                 * XHATs attached, unload the mappings for all of them,
5708      -                 * just in case
5709      -                 */
5710      -                ASSERT(sfmmup->sfmmu_as != NULL);
5711      -                if (sfmmup->sfmmu_as->a_xhat != NULL)
5712      -                        xhat_unload_callback_all(sfmmup->sfmmu_as, addr,
5713      -                            len, flags, callback);
5714      -        }
     5615 +        ASSERT(sfmmup->sfmmu_as != NULL);
5715 5616  
5716 5617          ASSERT((sfmmup == ksfmmup) || (flags & HAT_UNLOAD_OTHER) || \
5717 5618              AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
5718 5619  
5719 5620          ASSERT(sfmmup != NULL);
5720 5621          ASSERT((len & MMU_PAGEOFFSET) == 0);
5721 5622          ASSERT(!((uintptr_t)addr & MMU_PAGEOFFSET));
5722 5623  
5723 5624          /*
5724 5625           * Probing through a large VA range (say 63 bits) will be slow, even
↓ open down ↓ 256 lines elided ↑ open up ↑
5981 5882                  sfmmu_check_page_sizes(sfmmup, 0);
5982 5883  }
5983 5884  
5984 5885  /*
5985 5886   * Unload all the mappings in the range [addr..addr+len). addr and len must
5986 5887   * be MMU_PAGESIZE aligned.
5987 5888   */
5988 5889  void
5989 5890  hat_unload(struct hat *sfmmup, caddr_t addr, size_t len, uint_t flags)
5990 5891  {
5991      -        if (sfmmup->sfmmu_xhat_provider) {
5992      -                XHAT_UNLOAD(sfmmup, addr, len, flags);
5993      -                return;
5994      -        }
5995 5892          hat_unload_callback(sfmmup, addr, len, flags, NULL);
5996 5893  }
5997 5894  
5998 5895  
5999 5896  /*
6000 5897   * Find the largest mapping size for this page.
6001 5898   */
6002 5899  int
6003 5900  fnd_mapping_sz(page_t *pp)
6004 5901  {
↓ open down ↓ 319 lines elided ↑ open up ↑
6324 6221  void
6325 6222  hat_sync(struct hat *sfmmup, caddr_t addr, size_t len, uint_t clearflag)
6326 6223  {
6327 6224          struct hmehash_bucket *hmebp;
6328 6225          hmeblk_tag hblktag;
6329 6226          int hmeshift, hashno = 1;
6330 6227          struct hme_blk *hmeblkp, *list = NULL;
6331 6228          caddr_t endaddr;
6332 6229          cpuset_t cpuset;
6333 6230  
6334      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
6335 6231          ASSERT((sfmmup == ksfmmup) ||
6336 6232              AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
6337 6233          ASSERT((len & MMU_PAGEOFFSET) == 0);
6338 6234          ASSERT((clearflag == HAT_SYNC_DONTZERO) ||
6339 6235              (clearflag == HAT_SYNC_ZERORM));
6340 6236  
6341 6237          CPUSET_ZERO(cpuset);
6342 6238  
6343 6239          endaddr = addr + len;
6344 6240          hblktag.htag_id = sfmmup;
↓ open down ↓ 786 lines elided ↑ open up ↑
7131 7027  {
7132 7028          struct page *origpp = pp;
7133 7029          struct sf_hment *sfhme, *tmphme;
7134 7030          struct hme_blk *hmeblkp;
7135 7031          kmutex_t *pml;
7136 7032  #ifdef VAC
7137 7033          kmutex_t *pmtx;
7138 7034  #endif
7139 7035          cpuset_t cpuset, tset;
7140 7036          int index, cons;
7141      -        int xhme_blks;
7142 7037          int pa_hments;
7143 7038  
7144 7039          ASSERT(PAGE_EXCL(pp));
7145 7040  
7146      -retry_xhat:
7147 7041          tmphme = NULL;
7148      -        xhme_blks = 0;
7149 7042          pa_hments = 0;
7150 7043          CPUSET_ZERO(cpuset);
7151 7044  
7152 7045          pml = sfmmu_mlist_enter(pp);
7153 7046  
7154 7047  #ifdef VAC
7155 7048          if (pp->p_kpmref)
7156 7049                  sfmmu_kpm_pageunload(pp);
7157 7050          ASSERT(!PP_ISMAPPED_KPM(pp));
7158 7051  #endif
↓ open down ↓ 11 lines elided ↑ open up ↑
7170 7063          for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7171 7064                  tmphme = sfhme->hme_next;
7172 7065  
7173 7066                  if (IS_PAHME(sfhme)) {
7174 7067                          ASSERT(sfhme->hme_data != NULL);
7175 7068                          pa_hments++;
7176 7069                          continue;
7177 7070                  }
7178 7071  
7179 7072                  hmeblkp = sfmmu_hmetohblk(sfhme);
7180      -                if (hmeblkp->hblk_xhat_bit) {
7181      -                        struct xhat_hme_blk *xblk =
7182      -                            (struct xhat_hme_blk *)hmeblkp;
7183      -
7184      -                        (void) XHAT_PAGEUNLOAD(xblk->xhat_hme_blk_hat,
7185      -                            pp, forceflag, XBLK2PROVBLK(xblk));
7186      -
7187      -                        xhme_blks = 1;
7188      -                        continue;
7189      -                }
7190 7073  
7191 7074                  /*
7192 7075                   * If there are kernel mappings don't unload them, they will
7193 7076                   * be suspended.
7194 7077                   */
7195 7078                  if (forceflag == SFMMU_KERNEL_RELOC && hmeblkp->hblk_lckcnt &&
7196 7079                      hmeblkp->hblk_tag.htag_id == ksfmmup)
7197 7080                          continue;
7198 7081  
7199 7082                  tset = sfmmu_pageunload(pp, sfhme, cons);
↓ open down ↓ 16 lines elided ↑ open up ↑
7216 7099           * cpuset may be empty if the page was only mapped by segkpm,
7217 7100           * in which case we won't actually cross-trap.
7218 7101           */
7219 7102          xt_sync(cpuset);
7220 7103  
7221 7104          /*
7222 7105           * The page should have no mappings at this point, unless
7223 7106           * we were called from hat_page_relocate() in which case we
7224 7107           * leave the locked mappings which will be suspended later.
7225 7108           */
7226      -        ASSERT(!PP_ISMAPPED(origpp) || xhme_blks || pa_hments ||
     7109 +        ASSERT(!PP_ISMAPPED(origpp) || pa_hments ||
7227 7110              (forceflag == SFMMU_KERNEL_RELOC));
7228 7111  
7229 7112  #ifdef VAC
7230 7113          if (PP_ISTNC(pp)) {
7231 7114                  if (cons == TTE8K) {
7232 7115                          pmtx = sfmmu_page_enter(pp);
7233 7116                          PP_CLRTNC(pp);
7234 7117                          sfmmu_page_exit(pmtx);
7235 7118                  } else {
7236 7119                          conv_tnc(pp, cons);
↓ open down ↓ 14 lines elided ↑ open up ↑
7251 7134                  for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
7252 7135                          tmphme = sfhme->hme_next;
7253 7136                          if (IS_PAHME(sfhme)) {
7254 7137                                  struct pa_hment *pahmep = sfhme->hme_data;
7255 7138                                  sfmmu_pahment_leaked(pahmep);
7256 7139                                  HME_SUB(sfhme, pp);
7257 7140                                  kmem_cache_free(pa_hment_cache, pahmep);
7258 7141                          }
7259 7142                  }
7260 7143  
7261      -                ASSERT(!PP_ISMAPPED(origpp) || xhme_blks);
     7144 +                ASSERT(!PP_ISMAPPED(origpp));
7262 7145          }
7263 7146  
7264 7147          sfmmu_mlist_exit(pml);
7265 7148  
7266      -        /*
7267      -         * XHAT may not have finished unloading pages
7268      -         * because some other thread was waiting for
7269      -         * mlist lock and XHAT_PAGEUNLOAD let it do
7270      -         * the job.
7271      -         */
7272      -        if (xhme_blks) {
7273      -                pp = origpp;
7274      -                goto retry_xhat;
7275      -        }
7276      -
7277 7149          return (0);
7278 7150  }
7279 7151  
7280 7152  cpuset_t
7281 7153  sfmmu_pageunload(page_t *pp, struct sf_hment *sfhme, int cons)
7282 7154  {
7283 7155          struct hme_blk *hmeblkp;
7284 7156          sfmmu_t *sfmmup;
7285 7157          tte_t tte, ttemod;
7286 7158  #ifdef DEBUG
↓ open down ↓ 260 lines elided ↑ open up ↑
7547 7419                   * from the list.
7548 7420                   */
7549 7421                  tmphme = sfhme->hme_next;
7550 7422                  if (IS_PAHME(sfhme))
7551 7423                          continue;
7552 7424                  /*
7553 7425                   * If we are looking for large mappings and this hme doesn't
7554 7426                   * reach the range we are seeking, just ignore it.
7555 7427                   */
7556 7428                  hmeblkp = sfmmu_hmetohblk(sfhme);
7557      -                if (hmeblkp->hblk_xhat_bit)
7558      -                        continue;
7559 7429  
7560 7430                  if (hme_size(sfhme) < cons)
7561 7431                          continue;
7562 7432  
7563 7433                  if (stop_on_sh) {
7564 7434                          if (hmeblkp->hblk_shared) {
7565 7435                                  sf_srd_t *srdp = hblktosrd(hmeblkp);
7566 7436                                  uint_t rid = hmeblkp->hblk_tag.htag_rid;
7567 7437                                  sf_region_t *rgnp;
7568 7438                                  ASSERT(SFMMU_IS_SHMERID_VALID(rid));
↓ open down ↓ 139 lines elided ↑ open up ↑
7708 7578          ASSERT(sfmmu_mlist_held(pp));
7709 7579  
7710 7580          CPUSET_ZERO(cpuset);
7711 7581          SFMMU_STAT(sf_clrwrt);
7712 7582  
7713 7583  retry:
7714 7584  
7715 7585          sfmmu_copytte(&sfhme->hme_tte, &tte);
7716 7586          if (TTE_IS_VALID(&tte) && TTE_IS_WRITABLE(&tte)) {
7717 7587                  hmeblkp = sfmmu_hmetohblk(sfhme);
7718      -
7719      -                /*
7720      -                 * xhat mappings should never be to a VMODSORT page.
7721      -                 */
7722      -                ASSERT(hmeblkp->hblk_xhat_bit == 0);
7723      -
7724 7588                  sfmmup = hblktosfmmu(hmeblkp);
7725 7589                  addr = tte_to_vaddr(hmeblkp, tte);
7726 7590  
7727 7591                  ttemod = tte;
7728 7592                  TTE_CLR_WRT(&ttemod);
7729 7593                  TTE_CLR_MOD(&ttemod);
7730 7594                  ret = sfmmu_modifytte_try(&tte, &ttemod, &sfhme->hme_tte);
7731 7595  
7732 7596                  /*
7733 7597                   * if cas failed and the new value is not what
↓ open down ↓ 244 lines elided ↑ open up ↑
7978 7842           * We would like to
7979 7843           * ASSERT(AS_LOCK_HELD(as, &as->a_lock));
7980 7844           * but we can't because the iommu driver will call this
7981 7845           * routine at interrupt time and it can't grab the as lock
7982 7846           * or it will deadlock: A thread could have the as lock
7983 7847           * and be waiting for io.  The io can't complete
7984 7848           * because the interrupt thread is blocked trying to grab
7985 7849           * the as lock.
7986 7850           */
7987 7851  
7988      -        ASSERT(hat->sfmmu_xhat_provider == NULL);
7989      -
7990 7852          if (hat == ksfmmup) {
7991 7853                  if (IS_KMEM_VA_LARGEPAGE(addr)) {
7992 7854                          ASSERT(segkmem_lpszc > 0);
7993 7855                          pfn = sfmmu_kvaszc2pfn(addr, segkmem_lpszc);
7994 7856                          if (pfn != PFN_INVALID) {
7995 7857                                  sfmmu_check_kpfn(pfn);
7996 7858                                  return (pfn);
7997 7859                          }
7998 7860                  } else if (segkpm && IS_KPM_ADDR(addr)) {
7999 7861                          return (sfmmu_kpm_vatopfn(addr));
↓ open down ↓ 163 lines elided ↑ open up ↑
8163 8025  
8164 8026  
8165 8027  /*
8166 8028   * For compatability with AT&T and later optimizations
8167 8029   */
8168 8030  /* ARGSUSED */
8169 8031  void
8170 8032  hat_map(struct hat *hat, caddr_t addr, size_t len, uint_t flags)
8171 8033  {
8172 8034          ASSERT(hat != NULL);
8173      -        ASSERT(hat->sfmmu_xhat_provider == NULL);
8174 8035  }
8175 8036  
8176 8037  /*
8177 8038   * Return the number of mappings to a particular page.  This number is an
8178 8039   * approximation of the number of people sharing the page.
8179 8040   *
8180 8041   * shared hmeblks or ism hmeblks are counted as 1 mapping here.
8181 8042   * hat_page_checkshare() can be used to compare threshold to share
8182 8043   * count that reflects the number of region sharers albeit at higher cost.
8183 8044   */
↓ open down ↓ 72 lines elided ↑ open up ↑
8256 8117          index = PP_MAPINDEX(pp);
8257 8118  
8258 8119  again:
8259 8120          for (sfhme = pp->p_mapping; sfhme; sfhme = tmphme) {
8260 8121                  tmphme = sfhme->hme_next;
8261 8122                  if (IS_PAHME(sfhme)) {
8262 8123                          continue;
8263 8124                  }
8264 8125  
8265 8126                  hmeblkp = sfmmu_hmetohblk(sfhme);
8266      -                if (hmeblkp->hblk_xhat_bit) {
8267      -                        cnt++;
8268      -                        if (cnt > sh_thresh) {
8269      -                                sfmmu_mlist_exit(pml);
8270      -                                return (1);
8271      -                        }
8272      -                        continue;
8273      -                }
8274 8127                  if (hme_size(sfhme) != sz) {
8275 8128                          continue;
8276 8129                  }
8277 8130  
8278 8131                  if (hmeblkp->hblk_shared) {
8279 8132                          sf_srd_t *srdp = hblktosrd(hmeblkp);
8280 8133                          uint_t rid = hmeblkp->hblk_tag.htag_rid;
8281 8134                          sf_region_t *rgnp;
8282 8135                          ASSERT(SFMMU_IS_SHMERID_VALID(rid));
8283 8136                          ASSERT(rid < SFMMU_MAX_HME_REGIONS);
↓ open down ↓ 94 lines elided ↑ open up ↑
8378 8231                  }
8379 8232                  ASSERT(sz <= pszc);
8380 8233                  rootpp = PP_GROUPLEADER(pp, sz);
8381 8234                  for (sfhme = rootpp->p_mapping; sfhme; sfhme = tmphme) {
8382 8235                          tmphme = sfhme->hme_next;
8383 8236                          ASSERT(!IS_PAHME(sfhme));
8384 8237                          hmeblkp = sfmmu_hmetohblk(sfhme);
8385 8238                          if (hme_size(sfhme) != sz) {
8386 8239                                  continue;
8387 8240                          }
8388      -                        if (hmeblkp->hblk_xhat_bit) {
8389      -                                cmn_err(CE_PANIC,
8390      -                                    "hat_page_demote: xhat hmeblk");
8391      -                        }
8392 8241                          tset = sfmmu_pageunload(rootpp, sfhme, sz);
8393 8242                          CPUSET_OR(cpuset, tset);
8394 8243                  }
8395 8244                  if (index >>= 1) {
8396 8245                          sz++;
8397 8246                  }
8398 8247          }
8399 8248  
8400 8249          ASSERT(!PP_ISMAPPED_LARGE(pp));
8401 8250  
↓ open down ↓ 107 lines elided ↑ open up ↑
8509 8358   */
8510 8359  size_t
8511 8360  hat_get_mapped_size(struct hat *hat)
8512 8361  {
8513 8362          size_t          assize = 0;
8514 8363          int             i;
8515 8364  
8516 8365          if (hat == NULL)
8517 8366                  return (0);
8518 8367  
8519      -        ASSERT(hat->sfmmu_xhat_provider == NULL);
8520      -
8521 8368          for (i = 0; i < mmu_page_sizes; i++)
8522 8369                  assize += ((pgcnt_t)hat->sfmmu_ttecnt[i] +
8523 8370                      (pgcnt_t)hat->sfmmu_scdrttecnt[i]) * TTEBYTES(i);
8524 8371  
8525 8372          if (hat->sfmmu_iblk == NULL)
8526 8373                  return (assize);
8527 8374  
8528 8375          for (i = 0; i < mmu_page_sizes; i++)
8529 8376                  assize += ((pgcnt_t)hat->sfmmu_ismttecnt[i] +
8530 8377                      (pgcnt_t)hat->sfmmu_scdismttecnt[i]) * TTEBYTES(i);
8531 8378  
8532 8379          return (assize);
8533 8380  }
8534 8381  
8535 8382  int
8536 8383  hat_stats_enable(struct hat *hat)
8537 8384  {
8538 8385          hatlock_t       *hatlockp;
8539 8386  
8540      -        ASSERT(hat->sfmmu_xhat_provider == NULL);
8541      -
8542 8387          hatlockp = sfmmu_hat_enter(hat);
8543 8388          hat->sfmmu_rmstat++;
8544 8389          sfmmu_hat_exit(hatlockp);
8545 8390          return (1);
8546 8391  }
8547 8392  
8548 8393  void
8549 8394  hat_stats_disable(struct hat *hat)
8550 8395  {
8551 8396          hatlock_t       *hatlockp;
8552 8397  
8553      -        ASSERT(hat->sfmmu_xhat_provider == NULL);
8554      -
8555 8398          hatlockp = sfmmu_hat_enter(hat);
8556 8399          hat->sfmmu_rmstat--;
8557 8400          sfmmu_hat_exit(hatlockp);
8558 8401  }
8559 8402  
8560 8403  /*
8561 8404   * Routines for entering or removing  ourselves from the
8562 8405   * ism_hat's mapping list. This is used for both private and
8563 8406   * SCD hats.
8564 8407   */
↓ open down ↓ 81 lines elided ↑ open up ↑
8646 8489           */
8647 8490          if (!ISM_ALIGNED(ismshift, addr) || !ISM_ALIGNED(ismshift, sptaddr))
8648 8491                  return (EINVAL);
8649 8492  
8650 8493          /*
8651 8494           * Check size alignment.
8652 8495           */
8653 8496          if (!ISM_ALIGNED(ismshift, len))
8654 8497                  return (EINVAL);
8655 8498  
8656      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
8657      -
8658 8499          /*
8659 8500           * Allocate ism_ment for the ism_hat's mapping list, and an
8660 8501           * ism map blk in case we need one.  We must do our
8661 8502           * allocations before acquiring locks to prevent a deadlock
8662 8503           * in the kmem allocator on the mapping list lock.
8663 8504           */
8664 8505          new_iblk = kmem_cache_alloc(ism_blk_cache, KM_SLEEP);
8665 8506          ism_ment = kmem_cache_alloc(ism_ment_cache, KM_SLEEP);
8666 8507  
8667 8508          /*
↓ open down ↓ 191 lines elided ↑ open up ↑
8859 8700          uint_t          ismshift = page_get_shift(ismszc);
8860 8701          size_t          sh_size = ISM_SHIFT(ismshift, len);
8861 8702          uchar_t         ism_rid;
8862 8703          sf_scd_t        *old_scdp;
8863 8704  
8864 8705          ASSERT(ISM_ALIGNED(ismshift, addr));
8865 8706          ASSERT(ISM_ALIGNED(ismshift, len));
8866 8707          ASSERT(sfmmup != NULL);
8867 8708          ASSERT(sfmmup != ksfmmup);
8868 8709  
8869      -        if (sfmmup->sfmmu_xhat_provider) {
8870      -                XHAT_UNSHARE(sfmmup, addr, len);
8871      -                return;
8872      -        } else {
8873      -                /*
8874      -                 * This must be a CPU HAT. If the address space has
8875      -                 * XHATs attached, inform all XHATs that ISM segment
8876      -                 * is going away
8877      -                 */
8878      -                ASSERT(sfmmup->sfmmu_as != NULL);
8879      -                if (sfmmup->sfmmu_as->a_xhat != NULL)
8880      -                        xhat_unshare_all(sfmmup->sfmmu_as, addr, len);
8881      -        }
     8710 +        ASSERT(sfmmup->sfmmu_as != NULL);
8882 8711  
8883 8712          /*
8884 8713           * Make sure that during the entire time ISM mappings are removed,
8885 8714           * the trap handlers serialize behind us, and that no one else
8886 8715           * can be mucking with ISM mappings.  This also lets us get away
8887 8716           * with not doing expensive cross calls to flush the TLB -- we
8888 8717           * just discard the context, flush the entire TSB, and call it
8889 8718           * a day.
8890 8719           */
8891 8720          sfmmu_ismhat_enter(sfmmup, 0);
↓ open down ↓ 433 lines elided ↑ open up ↑
9325 9154  
9326 9155          /*
9327 9156           * check if any mapping is in same as or if it is locked
9328 9157           * since in that case we need to uncache.
9329 9158           */
9330 9159          for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9331 9160                  tmphme = sfhmep->hme_next;
9332 9161                  if (IS_PAHME(sfhmep))
9333 9162                          continue;
9334 9163                  hmeblkp = sfmmu_hmetohblk(sfhmep);
9335      -                if (hmeblkp->hblk_xhat_bit)
9336      -                        continue;
9337 9164                  tmphat = hblktosfmmu(hmeblkp);
9338 9165                  sfmmu_copytte(&sfhmep->hme_tte, &tte);
9339 9166                  ASSERT(TTE_IS_VALID(&tte));
9340 9167                  if (hmeblkp->hblk_shared || tmphat == hat ||
9341 9168                      hmeblkp->hblk_lckcnt) {
9342 9169                          /*
9343 9170                           * We have an uncache conflict
9344 9171                           */
9345 9172                          SFMMU_STAT(sf_uncache_conflict);
9346 9173                          sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1);
↓ open down ↓ 6 lines elided ↑ open up ↑
9353 9180           * We have already checked for LARGE mappings, therefore
9354 9181           * the remaining mapping(s) must be TTE8K.
9355 9182           */
9356 9183          SFMMU_STAT(sf_unload_conflict);
9357 9184  
9358 9185          for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
9359 9186                  tmphme = sfhmep->hme_next;
9360 9187                  if (IS_PAHME(sfhmep))
9361 9188                          continue;
9362 9189                  hmeblkp = sfmmu_hmetohblk(sfhmep);
9363      -                if (hmeblkp->hblk_xhat_bit)
9364      -                        continue;
9365 9190                  ASSERT(!hmeblkp->hblk_shared);
9366 9191                  (void) sfmmu_pageunload(pp, sfhmep, TTE8K);
9367 9192          }
9368 9193  
9369 9194          if (PP_ISMAPPED_KPM(pp))
9370 9195                  sfmmu_kpm_vac_unload(pp, addr);
9371 9196  
9372 9197          /*
9373 9198           * Unloads only do TLB flushes so we need to flush the
9374 9199           * cache here.
↓ open down ↓ 127 lines elided ↑ open up ↑
9502 9327                          kpmvaddr = hat_kpm_page2va(pp, 1);
9503 9328                          ASSERT(!(npages > 1 && IS_KPM_ALIAS_RANGE(kpmvaddr)));
9504 9329                          color1 = addr_to_vcolor(kpmvaddr);
9505 9330                          clr_valid = 1;
9506 9331                  }
9507 9332  
9508 9333                  for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9509 9334                          if (IS_PAHME(sfhme))
9510 9335                                  continue;
9511 9336                          hmeblkp = sfmmu_hmetohblk(sfhme);
9512      -                        if (hmeblkp->hblk_xhat_bit)
9513      -                                continue;
9514 9337  
9515 9338                          sfmmu_copytte(&sfhme->hme_tte, &tte);
9516 9339                          ASSERT(TTE_IS_VALID(&tte));
9517 9340  
9518 9341                          vaddr = tte_to_vaddr(hmeblkp, tte);
9519 9342                          color = addr_to_vcolor(vaddr);
9520 9343  
9521 9344                          if (npages > 1) {
9522 9345                                  /*
9523 9346                                   * If there is a big mapping, make sure
↓ open down ↓ 127 lines elided ↑ open up ↑
9651 9474  
9652 9475          color = bcolor;
9653 9476          pfn = pp->p_pagenum;
9654 9477  
9655 9478          for (sfhme = pp->p_mapping; sfhme; sfhme = sfhme->hme_next) {
9656 9479  
9657 9480                  if (IS_PAHME(sfhme))
9658 9481                          continue;
9659 9482                  hmeblkp = sfmmu_hmetohblk(sfhme);
9660 9483  
9661      -                if (hmeblkp->hblk_xhat_bit)
9662      -                        continue;
9663      -
9664 9484                  sfmmu_copytte(&sfhme->hme_tte, &tte);
9665 9485                  ASSERT(TTE_IS_VALID(&tte));
9666 9486                  vaddr = tte_to_vaddr(hmeblkp, tte);
9667 9487                  color = addr_to_vcolor(vaddr);
9668 9488  
9669 9489  #ifdef DEBUG
9670 9490                  if ((flags & HAT_CACHE) && bcolor != NO_VCOLOR) {
9671 9491                          ASSERT(color == bcolor);
9672 9492                  }
9673 9493  #endif
↓ open down ↓ 3727 lines elided ↑ open up ↑
13401 13221  
13402 13222  /*
13403 13223   * This function is currently not supported on this platform. For what
13404 13224   * it's supposed to do, see hat.c and hat_srmmu.c
13405 13225   */
13406 13226  /* ARGSUSED */
13407 13227  faultcode_t
13408 13228  hat_softlock(struct hat *hat, caddr_t addr, size_t *lenp, page_t **ppp,
13409 13229      uint_t flags)
13410 13230  {
13411      -        ASSERT(hat->sfmmu_xhat_provider == NULL);
13412 13231          return (FC_NOSUPPORT);
13413 13232  }
13414 13233  
13415 13234  /*
13416 13235   * Searchs the mapping list of the page for a mapping of the same size. If not
13417 13236   * found the corresponding bit is cleared in the p_index field. When large
13418 13237   * pages are more prevalent in the system, we can maintain the mapping list
13419 13238   * in order and we don't have to traverse the list each time. Just check the
13420 13239   * next and prev entries, and if both are of different size, we clear the bit.
13421 13240   */
↓ open down ↓ 14 lines elided ↑ open up ↑
13436 13255          /*
13437 13256           * Traverse mapping list looking for another mapping of same size.
13438 13257           * since we only want to clear index field if all mappings of
13439 13258           * that size are gone.
13440 13259           */
13441 13260  
13442 13261          for (sfhmep = pp->p_mapping; sfhmep; sfhmep = sfhmep->hme_next) {
13443 13262                  if (IS_PAHME(sfhmep))
13444 13263                          continue;
13445 13264                  hmeblkp = sfmmu_hmetohblk(sfhmep);
13446      -                if (hmeblkp->hblk_xhat_bit)
13447      -                        continue;
13448 13265                  if (hme_size(sfhmep) == ttesz) {
13449 13266                          /*
13450 13267                           * another mapping of the same size. don't clear index.
13451 13268                           */
13452 13269                          return;
13453 13270                  }
13454 13271          }
13455 13272  
13456 13273          /*
13457 13274           * Clear the p_index bit for large page.
↓ open down ↓ 564 lines elided ↑ open up ↑
14022 13839          uint16_t *busyrgnsp;
14023 13840          ulong_t rttecnt;
14024 13841          uchar_t tteflag;
14025 13842          uchar_t r_type = flags & HAT_REGION_TYPE_MASK;
14026 13843          int text = (r_type == HAT_REGION_TEXT);
14027 13844  
14028 13845          if (srdp == NULL || r_size == 0) {
14029 13846                  return (HAT_INVALID_REGION_COOKIE);
14030 13847          }
14031 13848  
14032      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
14033 13849          ASSERT(sfmmup != ksfmmup);
14034 13850          ASSERT(AS_WRITE_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
14035 13851          ASSERT(srdp->srd_refcnt > 0);
14036 13852          ASSERT(!(flags & ~HAT_REGION_TYPE_MASK));
14037 13853          ASSERT(flags == HAT_REGION_TEXT || flags == HAT_REGION_ISM);
14038 13854          ASSERT(r_pgszc < mmu_page_sizes);
14039 13855          if (!IS_P2ALIGNED(r_saddr, TTEBYTES(r_pgszc)) ||
14040 13856              !IS_P2ALIGNED(r_size, TTEBYTES(r_pgszc))) {
14041 13857                  panic("hat_join_region: region addr or size is not aligned\n");
14042 13858          }
↓ open down ↓ 285 lines elided ↑ open up ↑
14328 14144          } else {
14329 14145                  ASSERT(SFMMU_IS_SHMERID_VALID(rid));
14330 14146                  ASSERT(rid < SFMMU_MAX_HME_REGIONS);
14331 14147                  rgnp = srdp->srd_hmergnp[rid];
14332 14148          }
14333 14149          ASSERT(rgnp != NULL);
14334 14150          ASSERT(rgnp->rgn_id == rid);
14335 14151          ASSERT((rgnp->rgn_flags & SFMMU_REGION_TYPE_MASK) == r_type);
14336 14152          ASSERT(!(rgnp->rgn_flags & SFMMU_REGION_FREE));
14337 14153          ASSERT(AS_LOCK_HELD(sfmmup->sfmmu_as, &sfmmup->sfmmu_as->a_lock));
14338      -
14339      -        ASSERT(sfmmup->sfmmu_xhat_provider == NULL);
14340      -        if (r_type == SFMMU_REGION_HME && sfmmup->sfmmu_as->a_xhat != NULL) {
14341      -                xhat_unload_callback_all(sfmmup->sfmmu_as, rgnp->rgn_saddr,
14342      -                    rgnp->rgn_size, 0, NULL);
14343      -        }
14344 14154  
14345 14155          if (sfmmup->sfmmu_free) {
14346 14156                  ulong_t rttecnt;
14347 14157                  r_pgszc = rgnp->rgn_pgszc;
14348 14158                  r_size = rgnp->rgn_size;
14349 14159  
14350 14160                  ASSERT(sfmmup->sfmmu_scdp == NULL);
14351 14161                  if (r_type == SFMMU_REGION_ISM) {
14352 14162                          SF_RGNMAP_DEL(sfmmup->sfmmu_ismregion_map, rid);
14353 14163                  } else {
↓ open down ↓ 1500 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX