Print this page
patch as-lock-macro-simplification

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/proc/prsubr.c
          +++ new/usr/src/uts/common/fs/proc/prsubr.c
↓ open down ↓ 1368 lines elided ↑ open up ↑
1369 1369  
1370 1370  /*
1371 1371   * Count the number of segments in this process's address space.
1372 1372   */
1373 1373  int
1374 1374  prnsegs(struct as *as, int reserved)
1375 1375  {
1376 1376          int n = 0;
1377 1377          struct seg *seg;
1378 1378  
1379      -        ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
     1379 +        ASSERT(as != &kas && AS_WRITE_HELD(as));
1380 1380  
1381 1381          for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
1382 1382                  caddr_t eaddr = seg->s_base + pr_getsegsize(seg, reserved);
1383 1383                  caddr_t saddr, naddr;
1384 1384                  void *tmp = NULL;
1385 1385  
1386 1386                  for (saddr = seg->s_base; saddr < eaddr; saddr = naddr) {
1387 1387                          (void) pr_getprot(seg, reserved, &tmp,
1388 1388                              &saddr, &naddr, eaddr);
1389 1389                          if (saddr != naddr)
↓ open down ↓ 222 lines elided ↑ open up ↑
1612 1612  prgetmap(proc_t *p, int reserved, list_t *iolhead)
1613 1613  {
1614 1614          struct as *as = p->p_as;
1615 1615          prmap_t *mp;
1616 1616          struct seg *seg;
1617 1617          struct seg *brkseg, *stkseg;
1618 1618          struct vnode *vp;
1619 1619          struct vattr vattr;
1620 1620          uint_t prot;
1621 1621  
1622      -        ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
     1622 +        ASSERT(as != &kas && AS_WRITE_HELD(as));
1623 1623  
1624 1624          /*
1625 1625           * Request an initial buffer size that doesn't waste memory
1626 1626           * if the address space has only a small number of segments.
1627 1627           */
1628 1628          pr_iol_initlist(iolhead, sizeof (*mp), avl_numnodes(&as->a_segtree));
1629 1629  
1630 1630          if ((seg = AS_SEGFIRST(as)) == NULL)
1631 1631                  return (0);
1632 1632  
↓ open down ↓ 90 lines elided ↑ open up ↑
1723 1723  prgetmap32(proc_t *p, int reserved, list_t *iolhead)
1724 1724  {
1725 1725          struct as *as = p->p_as;
1726 1726          prmap32_t *mp;
1727 1727          struct seg *seg;
1728 1728          struct seg *brkseg, *stkseg;
1729 1729          struct vnode *vp;
1730 1730          struct vattr vattr;
1731 1731          uint_t prot;
1732 1732  
1733      -        ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
     1733 +        ASSERT(as != &kas && AS_WRITE_HELD(as));
1734 1734  
1735 1735          /*
1736 1736           * Request an initial buffer size that doesn't waste memory
1737 1737           * if the address space has only a small number of segments.
1738 1738           */
1739 1739          pr_iol_initlist(iolhead, sizeof (*mp), avl_numnodes(&as->a_segtree));
1740 1740  
1741 1741          if ((seg = AS_SEGFIRST(as)) == NULL)
1742 1742                  return (0);
1743 1743  
↓ open down ↓ 89 lines elided ↑ open up ↑
1833 1833  
1834 1834  /*
1835 1835   * Return the size of the /proc page data file.
1836 1836   */
1837 1837  size_t
1838 1838  prpdsize(struct as *as)
1839 1839  {
1840 1840          struct seg *seg;
1841 1841          size_t size;
1842 1842  
1843      -        ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
     1843 +        ASSERT(as != &kas && AS_WRITE_HELD(as));
1844 1844  
1845 1845          if ((seg = AS_SEGFIRST(as)) == NULL)
1846 1846                  return (0);
1847 1847  
1848 1848          size = sizeof (prpageheader_t);
1849 1849          do {
1850 1850                  caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
1851 1851                  caddr_t saddr, naddr;
1852 1852                  void *tmp = NULL;
1853 1853                  size_t npage;
↓ open down ↓ 9 lines elided ↑ open up ↑
1863 1863          return (size);
1864 1864  }
1865 1865  
1866 1866  #ifdef _SYSCALL32_IMPL
1867 1867  size_t
1868 1868  prpdsize32(struct as *as)
1869 1869  {
1870 1870          struct seg *seg;
1871 1871          size_t size;
1872 1872  
1873      -        ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
     1873 +        ASSERT(as != &kas && AS_WRITE_HELD(as));
1874 1874  
1875 1875          if ((seg = AS_SEGFIRST(as)) == NULL)
1876 1876                  return (0);
1877 1877  
1878 1878          size = sizeof (prpageheader32_t);
1879 1879          do {
1880 1880                  caddr_t eaddr = seg->s_base + pr_getsegsize(seg, 0);
1881 1881                  caddr_t saddr, naddr;
1882 1882                  void *tmp = NULL;
1883 1883                  size_t npage;
↓ open down ↓ 18 lines elided ↑ open up ↑
1902 1902  {
1903 1903          struct as *as = p->p_as;
1904 1904          caddr_t buf;
1905 1905          size_t size;
1906 1906          prpageheader_t *php;
1907 1907          prasmap_t *pmp;
1908 1908          struct seg *seg;
1909 1909          int error;
1910 1910  
1911 1911  again:
1912      -        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
     1912 +        AS_LOCK_ENTER(as, RW_WRITER);
1913 1913  
1914 1914          if ((seg = AS_SEGFIRST(as)) == NULL) {
1915      -                AS_LOCK_EXIT(as, &as->a_lock);
     1915 +                AS_LOCK_EXIT(as);
1916 1916                  return (0);
1917 1917          }
1918 1918          size = prpdsize(as);
1919 1919          if (uiop->uio_resid < size) {
1920      -                AS_LOCK_EXIT(as, &as->a_lock);
     1920 +                AS_LOCK_EXIT(as);
1921 1921                  return (E2BIG);
1922 1922          }
1923 1923  
1924 1924          buf = kmem_zalloc(size, KM_SLEEP);
1925 1925          php = (prpageheader_t *)buf;
1926 1926          pmp = (prasmap_t *)(buf + sizeof (prpageheader_t));
1927 1927  
1928 1928          hrt2ts(gethrtime(), &php->pr_tstamp);
1929 1929          php->pr_nmap = 0;
1930 1930          php->pr_npage = 0;
↓ open down ↓ 27 lines elided ↑ open up ↑
1958 1958                           * overrun the buffer whose size we computed based
1959 1959                           * on the initial iteration through the segments.
1960 1960                           * Once we've detected an overflow, we need to clean
1961 1961                           * up the temporary memory allocated in pr_getprot()
1962 1962                           * and retry. If there's a pending signal, we return
1963 1963                           * EINTR so that this thread can be dislodged if
1964 1964                           * a latent bug causes us to spin indefinitely.
1965 1965                           */
1966 1966                          if (next > (uintptr_t)buf + size) {
1967 1967                                  pr_getprot_done(&tmp);
1968      -                                AS_LOCK_EXIT(as, &as->a_lock);
     1968 +                                AS_LOCK_EXIT(as);
1969 1969  
1970 1970                                  kmem_free(buf, size);
1971 1971  
1972 1972                                  if (ISSIG(curthread, JUSTLOOKING))
1973 1973                                          return (EINTR);
1974 1974  
1975 1975                                  goto again;
1976 1976                          }
1977 1977  
1978 1978                          php->pr_nmap++;
↓ open down ↓ 48 lines elided ↑ open up ↑
2027 2027                                  pmp->pr_shmid = -1;
2028 2028                          }
2029 2029  
2030 2030                          hat_getstat(as, saddr, len, hatid,
2031 2031                              (char *)(pmp + 1), HAT_SYNC_ZERORM);
2032 2032                          pmp = (prasmap_t *)next;
2033 2033                  }
2034 2034                  ASSERT(tmp == NULL);
2035 2035          } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
2036 2036  
2037      -        AS_LOCK_EXIT(as, &as->a_lock);
     2037 +        AS_LOCK_EXIT(as);
2038 2038  
2039 2039          ASSERT((uintptr_t)pmp <= (uintptr_t)buf + size);
2040 2040          error = uiomove(buf, (caddr_t)pmp - buf, UIO_READ, uiop);
2041 2041          kmem_free(buf, size);
2042 2042  
2043 2043          return (error);
2044 2044  }
2045 2045  
2046 2046  #ifdef _SYSCALL32_IMPL
2047 2047  int
↓ open down ↓ 1 lines elided ↑ open up ↑
2049 2049  {
2050 2050          struct as *as = p->p_as;
2051 2051          caddr_t buf;
2052 2052          size_t size;
2053 2053          prpageheader32_t *php;
2054 2054          prasmap32_t *pmp;
2055 2055          struct seg *seg;
2056 2056          int error;
2057 2057  
2058 2058  again:
2059      -        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
     2059 +        AS_LOCK_ENTER(as, RW_WRITER);
2060 2060  
2061 2061          if ((seg = AS_SEGFIRST(as)) == NULL) {
2062      -                AS_LOCK_EXIT(as, &as->a_lock);
     2062 +                AS_LOCK_EXIT(as);
2063 2063                  return (0);
2064 2064          }
2065 2065          size = prpdsize32(as);
2066 2066          if (uiop->uio_resid < size) {
2067      -                AS_LOCK_EXIT(as, &as->a_lock);
     2067 +                AS_LOCK_EXIT(as);
2068 2068                  return (E2BIG);
2069 2069          }
2070 2070  
2071 2071          buf = kmem_zalloc(size, KM_SLEEP);
2072 2072          php = (prpageheader32_t *)buf;
2073 2073          pmp = (prasmap32_t *)(buf + sizeof (prpageheader32_t));
2074 2074  
2075 2075          hrt2ts32(gethrtime(), &php->pr_tstamp);
2076 2076          php->pr_nmap = 0;
2077 2077          php->pr_npage = 0;
↓ open down ↓ 27 lines elided ↑ open up ↑
2105 2105                           * overrun the buffer whose size we computed based
2106 2106                           * on the initial iteration through the segments.
2107 2107                           * Once we've detected an overflow, we need to clean
2108 2108                           * up the temporary memory allocated in pr_getprot()
2109 2109                           * and retry. If there's a pending signal, we return
2110 2110                           * EINTR so that this thread can be dislodged if
2111 2111                           * a latent bug causes us to spin indefinitely.
2112 2112                           */
2113 2113                          if (next > (uintptr_t)buf + size) {
2114 2114                                  pr_getprot_done(&tmp);
2115      -                                AS_LOCK_EXIT(as, &as->a_lock);
     2115 +                                AS_LOCK_EXIT(as);
2116 2116  
2117 2117                                  kmem_free(buf, size);
2118 2118  
2119 2119                                  if (ISSIG(curthread, JUSTLOOKING))
2120 2120                                          return (EINTR);
2121 2121  
2122 2122                                  goto again;
2123 2123                          }
2124 2124  
2125 2125                          php->pr_nmap++;
↓ open down ↓ 48 lines elided ↑ open up ↑
2174 2174                                  pmp->pr_shmid = -1;
2175 2175                          }
2176 2176  
2177 2177                          hat_getstat(as, saddr, len, hatid,
2178 2178                              (char *)(pmp + 1), HAT_SYNC_ZERORM);
2179 2179                          pmp = (prasmap32_t *)next;
2180 2180                  }
2181 2181                  ASSERT(tmp == NULL);
2182 2182          } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
2183 2183  
2184      -        AS_LOCK_EXIT(as, &as->a_lock);
     2184 +        AS_LOCK_EXIT(as);
2185 2185  
2186 2186          ASSERT((uintptr_t)pmp <= (uintptr_t)buf + size);
2187 2187          error = uiomove(buf, (caddr_t)pmp - buf, UIO_READ, uiop);
2188 2188          kmem_free(buf, size);
2189 2189  
2190 2190          return (error);
2191 2191  }
2192 2192  #endif  /* _SYSCALL32_IMPL */
2193 2193  
2194 2194  ushort_t
↓ open down ↓ 134 lines elided ↑ open up ↑
2329 2329                                  pct += cpu_update_pct(t, cur_time);
2330 2330                          } while ((t = t->t_forw) != p->p_tlist);
2331 2331  
2332 2332                          psp->pr_pctcpu = prgetpctcpu(pct);
2333 2333                  }
2334 2334                  if ((p->p_flag & SSYS) || (as = p->p_as) == &kas) {
2335 2335                          psp->pr_size = 0;
2336 2336                          psp->pr_rssize = 0;
2337 2337                  } else {
2338 2338                          mutex_exit(&p->p_lock);
2339      -                        AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
     2339 +                        AS_LOCK_ENTER(as, RW_READER);
2340 2340                          psp->pr_size = btopr(as->a_resvsize) *
2341 2341                              (PAGESIZE / 1024);
2342 2342                          psp->pr_rssize = rm_asrss(as) * (PAGESIZE / 1024);
2343 2343                          psp->pr_pctmem = rm_pctmemory(as);
2344      -                        AS_LOCK_EXIT(as, &as->a_lock);
     2344 +                        AS_LOCK_EXIT(as);
2345 2345                          mutex_enter(&p->p_lock);
2346 2346                  }
2347 2347          }
2348 2348  }
2349 2349  
2350 2350  #ifdef _SYSCALL32_IMPL
2351 2351  void
2352 2352  prgetpsinfo32(proc_t *p, psinfo32_t *psp)
2353 2353  {
2354 2354          kthread_t *t;
↓ open down ↓ 107 lines elided ↑ open up ↑
2462 2462                                  pct += cpu_update_pct(t, cur_time);
2463 2463                          } while ((t = t->t_forw) != p->p_tlist);
2464 2464  
2465 2465                          psp->pr_pctcpu = prgetpctcpu(pct);
2466 2466                  }
2467 2467                  if ((p->p_flag & SSYS) || (as = p->p_as) == &kas) {
2468 2468                          psp->pr_size = 0;
2469 2469                          psp->pr_rssize = 0;
2470 2470                  } else {
2471 2471                          mutex_exit(&p->p_lock);
2472      -                        AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
     2472 +                        AS_LOCK_ENTER(as, RW_READER);
2473 2473                          psp->pr_size = (size32_t)
2474 2474                              (btopr(as->a_resvsize) * (PAGESIZE / 1024));
2475 2475                          psp->pr_rssize = (size32_t)
2476 2476                              (rm_asrss(as) * (PAGESIZE / 1024));
2477 2477                          psp->pr_pctmem = rm_pctmemory(as);
2478      -                        AS_LOCK_EXIT(as, &as->a_lock);
     2478 +                        AS_LOCK_EXIT(as);
2479 2479                          mutex_enter(&p->p_lock);
2480 2480                  }
2481 2481          }
2482 2482  
2483 2483          /*
2484 2484           * If we are looking at an LP64 process, zero out
2485 2485           * the fields that cannot be represented in ILP32.
2486 2486           */
2487 2487          if (p->p_model != DATAMODEL_ILP32) {
2488 2488                  psp->pr_size = 0;
↓ open down ↓ 817 lines elided ↑ open up ↑
3306 3306          struct as *as = p->p_as;
3307 3307          struct watched_page *pwp;
3308 3308          uint_t prot;
3309 3309          int    retrycnt, err;
3310 3310          void *cookie;
3311 3311  
3312 3312          if (as == NULL || avl_numnodes(&as->a_wpage) == 0)
3313 3313                  return;
3314 3314  
3315 3315          ASSERT(MUTEX_NOT_HELD(&curproc->p_lock));
3316      -        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
     3316 +        AS_LOCK_ENTER(as, RW_WRITER);
3317 3317  
3318 3318          pwp = avl_first(&as->a_wpage);
3319 3319  
3320 3320          cookie = NULL;
3321 3321          while ((pwp = avl_destroy_nodes(&as->a_wpage, &cookie)) != NULL) {
3322 3322                  retrycnt = 0;
3323 3323                  if ((prot = pwp->wp_oprot) != 0) {
3324 3324                          caddr_t addr = pwp->wp_vaddr;
3325 3325                          struct seg *seg;
3326 3326                  retry:
↓ open down ↓ 8 lines elided ↑ open up ↑
3335 3335                                          goto retry;
3336 3336                                  }
3337 3337                          }
3338 3338                  }
3339 3339                  kmem_free(pwp, sizeof (struct watched_page));
3340 3340          }
3341 3341  
3342 3342          avl_destroy(&as->a_wpage);
3343 3343          p->p_wprot = NULL;
3344 3344  
3345      -        AS_LOCK_EXIT(as, &as->a_lock);
     3345 +        AS_LOCK_EXIT(as);
3346 3346  }
3347 3347  
3348 3348  /*
3349 3349   * Insert a watched area into the list of watched pages.
3350 3350   * If oflags is zero then we are adding a new watched area.
3351 3351   * Otherwise we are changing the flags of an existing watched area.
3352 3352   */
3353 3353  static int
3354 3354  set_watched_page(proc_t *p, caddr_t vaddr, caddr_t eaddr,
3355 3355          ulong_t flags, ulong_t oflags)
↓ open down ↓ 13 lines elided ↑ open up ↑
3369 3369           * held.
3370 3370           */
3371 3371          newpwp = NULL;
3372 3372          for (addr = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK);
3373 3373              addr < eaddr; addr += PAGESIZE) {
3374 3374                  pwp = kmem_zalloc(sizeof (struct watched_page), KM_SLEEP);
3375 3375                  pwp->wp_list = newpwp;
3376 3376                  newpwp = pwp;
3377 3377          }
3378 3378  
3379      -        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
     3379 +        AS_LOCK_ENTER(as, RW_WRITER);
3380 3380  
3381 3381          /*
3382 3382           * Search for an existing watched page to contain the watched area.
3383 3383           * If none is found, grab a new one from the available list
3384 3384           * and insert it in the active list, keeping the list sorted
3385 3385           * by user-level virtual address.
3386 3386           */
3387 3387          if (p->p_flag & SVFWAIT)
3388 3388                  pwp_tree = &p->p_wpage;
3389 3389          else
3390 3390                  pwp_tree = &as->a_wpage;
3391 3391  
3392 3392  again:
3393 3393          if (avl_numnodes(pwp_tree) > prnwatch) {
3394      -                AS_LOCK_EXIT(as, &as->a_lock);
     3394 +                AS_LOCK_EXIT(as);
3395 3395                  while (newpwp != NULL) {
3396 3396                          pwp = newpwp->wp_list;
3397 3397                          kmem_free(newpwp, sizeof (struct watched_page));
3398 3398                          newpwp = pwp;
3399 3399                  }
3400 3400                  return (E2BIG);
3401 3401          }
3402 3402  
3403 3403          tpw.wp_vaddr = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK);
3404 3404          if ((pwp = avl_find(pwp_tree, &tpw, &where)) == NULL) {
↓ open down ↓ 52 lines elided ↑ open up ↑
3457 3457                  }
3458 3458          }
3459 3459  
3460 3460          /*
3461 3461           * If the watched area extends into the next page then do
3462 3462           * it over again with the virtual address of the next page.
3463 3463           */
3464 3464          if ((vaddr = pwp->wp_vaddr + PAGESIZE) < eaddr)
3465 3465                  goto again;
3466 3466  
3467      -        AS_LOCK_EXIT(as, &as->a_lock);
     3467 +        AS_LOCK_EXIT(as);
3468 3468  
3469 3469          /*
3470 3470           * Free any pages we may have over-allocated
3471 3471           */
3472 3472          while (newpwp != NULL) {
3473 3473                  pwp = newpwp->wp_list;
3474 3474                  kmem_free(newpwp, sizeof (struct watched_page));
3475 3475                  newpwp = pwp;
3476 3476          }
3477 3477  
↓ open down ↓ 6 lines elided ↑ open up ↑
3484 3484   */
3485 3485  static void
3486 3486  clear_watched_page(proc_t *p, caddr_t vaddr, caddr_t eaddr, ulong_t flags)
3487 3487  {
3488 3488          struct as *as = p->p_as;
3489 3489          struct watched_page *pwp;
3490 3490          struct watched_page tpw;
3491 3491          avl_tree_t *tree;
3492 3492          avl_index_t where;
3493 3493  
3494      -        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
     3494 +        AS_LOCK_ENTER(as, RW_WRITER);
3495 3495  
3496 3496          if (p->p_flag & SVFWAIT)
3497 3497                  tree = &p->p_wpage;
3498 3498          else
3499 3499                  tree = &as->a_wpage;
3500 3500  
3501 3501          tpw.wp_vaddr = vaddr =
3502 3502              (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK);
3503 3503          pwp = avl_find(tree, &tpw, &where);
3504 3504          if (pwp == NULL)
↓ open down ↓ 44 lines elided ↑ open up ↑
3549 3549                                          pwp->wp_flags |= WP_SETPROT;
3550 3550                                          pwp->wp_list = p->p_wprot;
3551 3551                                          p->p_wprot = pwp;
3552 3552                                  }
3553 3553                          }
3554 3554                  }
3555 3555  
3556 3556                  pwp = AVL_NEXT(tree, pwp);
3557 3557          }
3558 3558  
3559      -        AS_LOCK_EXIT(as, &as->a_lock);
     3559 +        AS_LOCK_EXIT(as);
3560 3560  }
3561 3561  
3562 3562  /*
3563 3563   * Return the original protections for the specified page.
3564 3564   */
3565 3565  static void
3566 3566  getwatchprot(struct as *as, caddr_t addr, uint_t *prot)
3567 3567  {
3568 3568          struct watched_page *pwp;
3569 3569          struct watched_page tpw;
3570 3570  
3571      -        ASSERT(AS_LOCK_HELD(as, &as->a_lock));
     3571 +        ASSERT(AS_LOCK_HELD(as));
3572 3572  
3573 3573          tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
3574 3574          if ((pwp = avl_find(&as->a_wpage, &tpw, NULL)) != NULL)
3575 3575                  *prot = pwp->wp_oprot;
3576 3576  }
3577 3577  
3578 3578  static prpagev_t *
3579 3579  pr_pagev_create(struct seg *seg, int check_noreserve)
3580 3580  {
3581 3581          prpagev_t *pagev = kmem_alloc(sizeof (prpagev_t), KM_SLEEP);
↓ open down ↓ 266 lines elided ↑ open up ↑
3848 3848          uint_t prot;
3849 3849  
3850 3850          union {
3851 3851                  struct segvn_data *svd;
3852 3852                  struct segdev_data *sdp;
3853 3853                  void *data;
3854 3854          } s;
3855 3855  
3856 3856          s.data = seg->s_data;
3857 3857  
3858      -        ASSERT(AS_WRITE_HELD(as, &as->a_lock));
     3858 +        ASSERT(AS_WRITE_HELD(as));
3859 3859          ASSERT(saddr >= seg->s_base && saddr < eaddr);
3860 3860          ASSERT(eaddr <= seg->s_base + seg->s_size);
3861 3861  
3862 3862          /*
3863 3863           * Don't include MAP_NORESERVE pages in the address range
3864 3864           * unless their mappings have actually materialized.
3865 3865           * We cheat by knowing that segvn is the only segment
3866 3866           * driver that supports MAP_NORESERVE.
3867 3867           */
3868 3868          check_noreserve =
↓ open down ↓ 93 lines elided ↑ open up ↑
3962 3962          return (vn_matchops(vp, prvnodeops) &&
3963 3963              (VTOP(vp)->pr_flags & PR_ISSELF) &&
3964 3964              VTOP(vp)->pr_type != PR_PIDDIR);
3965 3965  }
3966 3966  
3967 3967  static ssize_t
3968 3968  pr_getpagesize(struct seg *seg, caddr_t saddr, caddr_t *naddrp, caddr_t eaddr)
3969 3969  {
3970 3970          ssize_t pagesize, hatsize;
3971 3971  
3972      -        ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
     3972 +        ASSERT(AS_WRITE_HELD(seg->s_as));
3973 3973          ASSERT(IS_P2ALIGNED(saddr, PAGESIZE));
3974 3974          ASSERT(IS_P2ALIGNED(eaddr, PAGESIZE));
3975 3975          ASSERT(saddr < eaddr);
3976 3976  
3977 3977          pagesize = hatsize = hat_getpagesize(seg->s_as->a_hat, saddr);
3978 3978          ASSERT(pagesize == -1 || IS_P2ALIGNED(pagesize, pagesize));
3979 3979          ASSERT(pagesize != 0);
3980 3980  
3981 3981          if (pagesize == -1)
3982 3982                  pagesize = PAGESIZE;
↓ open down ↓ 19 lines elided ↑ open up ↑
4002 4002  prgetxmap(proc_t *p, list_t *iolhead)
4003 4003  {
4004 4004          struct as *as = p->p_as;
4005 4005          prxmap_t *mp;
4006 4006          struct seg *seg;
4007 4007          struct seg *brkseg, *stkseg;
4008 4008          struct vnode *vp;
4009 4009          struct vattr vattr;
4010 4010          uint_t prot;
4011 4011  
4012      -        ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
     4012 +        ASSERT(as != &kas && AS_WRITE_HELD(as));
4013 4013  
4014 4014          /*
4015 4015           * Request an initial buffer size that doesn't waste memory
4016 4016           * if the address space has only a small number of segments.
4017 4017           */
4018 4018          pr_iol_initlist(iolhead, sizeof (*mp), avl_numnodes(&as->a_segtree));
4019 4019  
4020 4020          if ((seg = AS_SEGFIRST(as)) == NULL)
4021 4021                  return (0);
4022 4022  
↓ open down ↓ 163 lines elided ↑ open up ↑
4186 4186  prgetxmap32(proc_t *p, list_t *iolhead)
4187 4187  {
4188 4188          struct as *as = p->p_as;
4189 4189          prxmap32_t *mp;
4190 4190          struct seg *seg;
4191 4191          struct seg *brkseg, *stkseg;
4192 4192          struct vnode *vp;
4193 4193          struct vattr vattr;
4194 4194          uint_t prot;
4195 4195  
4196      -        ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
     4196 +        ASSERT(as != &kas && AS_WRITE_HELD(as));
4197 4197  
4198 4198          /*
4199 4199           * Request an initial buffer size that doesn't waste memory
4200 4200           * if the address space has only a small number of segments.
4201 4201           */
4202 4202          pr_iol_initlist(iolhead, sizeof (*mp), avl_numnodes(&as->a_segtree));
4203 4203  
4204 4204          if ((seg = AS_SEGFIRST(as)) == NULL)
4205 4205                  return (0);
4206 4206  
↓ open down ↓ 124 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX