Print this page
patch as-lock-macro-simplification

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/vm/seg_vn.c
          +++ new/usr/src/uts/common/vm/seg_vn.c
↓ open down ↓ 543 lines elided ↑ open up ↑
 544  544          struct segvn_data *svd;
 545  545          size_t swresv = 0;
 546  546          struct cred *cred;
 547  547          struct anon_map *amp;
 548  548          int error = 0;
 549  549          size_t pgsz;
 550  550          lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT;
 551  551          int use_rgn = 0;
 552  552          int trok = 0;
 553  553  
 554      -        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
      554 +        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
 555  555  
 556  556          if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) {
 557  557                  panic("segvn_create type");
 558  558                  /*NOTREACHED*/
 559  559          }
 560  560  
 561  561          /*
 562  562           * Check arguments.  If a shared anon structure is given then
 563  563           * it is illegal to also specify a vp.
 564  564           */
↓ open down ↓ 450 lines elided ↑ open up ↑
1015 1015          struct segvn_data *svd1 = seg1->s_data;
1016 1016          struct segvn_data *svd2 = seg2->s_data;
1017 1017          struct anon_map *amp1 = svd1->amp;
1018 1018          struct anon_map *amp2 = svd2->amp;
1019 1019          struct vpage *vpage1 = svd1->vpage;
1020 1020          struct vpage *vpage2 = svd2->vpage, *nvpage = NULL;
1021 1021          size_t size, nvpsize;
1022 1022          pgcnt_t npages1, npages2;
1023 1023  
1024 1024          ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as);
1025      -        ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
     1025 +        ASSERT(AS_WRITE_HELD(seg1->s_as));
1026 1026          ASSERT(seg1->s_ops == seg2->s_ops);
1027 1027  
1028 1028          if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) ||
1029 1029              HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1030 1030                  return (-1);
1031 1031          }
1032 1032  
1033 1033          /* both segments exist, try to merge them */
1034 1034  #define incompat(x)     (svd1->x != svd2->x)
1035 1035          if (incompat(vp) || incompat(maxprot) ||
↓ open down ↓ 224 lines elided ↑ open up ↑
1260 1260  {
1261 1261          struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data;
1262 1262          size_t size;
1263 1263          struct anon_map *amp1;
1264 1264          struct vpage *new_vpage;
1265 1265  
1266 1266          /*
1267 1267           * We don't need any segment level locks for "segvn" data
1268 1268           * since the address space is "write" locked.
1269 1269           */
1270      -        ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
     1270 +        ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as));
1271 1271  
1272 1272          if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) {
1273 1273                  return (-1);
1274 1274          }
1275 1275  
1276 1276          /* second segment is new, try to extend first */
1277 1277          /* XXX - should also check cred */
1278 1278          if (svd1->vp != a->vp || svd1->maxprot != a->maxprot ||
1279 1279              (!svd1->pageprot && (svd1->prot != a->prot)) ||
1280 1280              svd1->type != a->type || svd1->flags != a->flags ||
↓ open down ↓ 100 lines elided ↑ open up ↑
1381 1381  {
1382 1382          struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data;
1383 1383          size_t size;
1384 1384          struct anon_map *amp2;
1385 1385          struct vpage *new_vpage;
1386 1386  
1387 1387          /*
1388 1388           * We don't need any segment level locks for "segvn" data
1389 1389           * since the address space is "write" locked.
1390 1390           */
1391      -        ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock));
     1391 +        ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as));
1392 1392  
1393 1393          if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1394 1394                  return (-1);
1395 1395          }
1396 1396  
1397 1397          /* first segment is new, try to extend second */
1398 1398          /* XXX - should also check cred */
1399 1399          if (svd2->vp != a->vp || svd2->maxprot != a->maxprot ||
1400 1400              (!svd2->pageprot && (svd2->prot != a->prot)) ||
1401 1401              svd2->type != a->type || svd2->flags != a->flags ||
↓ open down ↓ 166 lines elided ↑ open up ↑
1568 1568  static int
1569 1569  segvn_dup(struct seg *seg, struct seg *newseg)
1570 1570  {
1571 1571          struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1572 1572          struct segvn_data *newsvd;
1573 1573          pgcnt_t npages = seg_pages(seg);
1574 1574          int error = 0;
1575 1575          size_t len;
1576 1576          struct anon_map *amp;
1577 1577  
1578      -        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
     1578 +        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1579 1579          ASSERT(newseg->s_as->a_proc->p_parent == curproc);
1580 1580  
1581 1581          /*
1582 1582           * If segment has anon reserved, reserve more for the new seg.
1583 1583           * For a MAP_NORESERVE segment swresv will be a count of all the
1584 1584           * allocated anon slots; thus we reserve for the child as many slots
1585 1585           * as the parent has allocated. This semantic prevents the child or
1586 1586           * parent from dieing during a copy-on-write fault caused by trying
1587 1587           * to write a shared pre-existing anon page.
1588 1588           */
↓ open down ↓ 276 lines elided ↑ open up ↑
1865 1865          hat_callback_t *cbp = NULL;
1866 1866          caddr_t nbase;
1867 1867          size_t nsize;
1868 1868          size_t oswresv;
1869 1869          int reclaim = 1;
1870 1870  
1871 1871          /*
1872 1872           * We don't need any segment level locks for "segvn" data
1873 1873           * since the address space is "write" locked.
1874 1874           */
1875      -        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
     1875 +        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1876 1876  
1877 1877          /*
1878 1878           * Fail the unmap if pages are SOFTLOCKed through this mapping.
1879 1879           * softlockcnt is protected from change by the as write lock.
1880 1880           */
1881 1881  retry:
1882 1882          if (svd->softlockcnt > 0) {
1883 1883                  ASSERT(svd->tr_state == SEGVN_TR_OFF);
1884 1884  
1885 1885                  /*
↓ open down ↓ 529 lines elided ↑ open up ↑
2415 2415  {
2416 2416          struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2417 2417          pgcnt_t npages = seg_pages(seg);
2418 2418          struct anon_map *amp;
2419 2419          size_t len;
2420 2420  
2421 2421          /*
2422 2422           * We don't need any segment level locks for "segvn" data
2423 2423           * since the address space is "write" locked.
2424 2424           */
2425      -        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
     2425 +        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
2426 2426          ASSERT(svd->tr_state == SEGVN_TR_OFF);
2427 2427  
2428 2428          ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2429 2429  
2430 2430          /*
2431 2431           * Be sure to unlock pages. XXX Why do things get free'ed instead
2432 2432           * of unmapped? XXX
2433 2433           */
2434 2434          (void) segvn_lockop(seg, seg->s_base, seg->s_size,
2435 2435              0, MC_UNLOCK, NULL, 0);
↓ open down ↓ 129 lines elided ↑ open up ↑
2565 2565  {
2566 2566          struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2567 2567          page_t *pp;
2568 2568          caddr_t adr;
2569 2569          struct vnode *vp;
2570 2570          u_offset_t offset;
2571 2571          ulong_t anon_index;
2572 2572          struct anon_map *amp;
2573 2573          struct anon *ap = NULL;
2574 2574  
2575      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     2575 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2576 2576          ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
2577 2577  
2578 2578          if ((amp = svd->amp) != NULL)
2579 2579                  anon_index = svd->anon_index + seg_page(seg, addr);
2580 2580  
2581 2581          if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
2582 2582                  ASSERT(svd->tr_state == SEGVN_TR_OFF);
2583 2583                  hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie);
2584 2584          } else {
2585 2585                  hat_unlock(seg->s_as->a_hat, addr, len);
↓ open down ↓ 2368 lines elided ↑ open up ↑
4954 4954          size_t plsz, pl_alloc_sz;
4955 4955          size_t page;
4956 4956          ulong_t anon_index;
4957 4957          struct anon_map *amp;
4958 4958          int dogetpage = 0;
4959 4959          caddr_t lpgaddr, lpgeaddr;
4960 4960          size_t pgsz;
4961 4961          anon_sync_obj_t cookie;
4962 4962          int brkcow = BREAK_COW_SHARE(rw, type, svd->type);
4963 4963  
4964      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     4964 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
4965 4965          ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE);
4966 4966  
4967 4967          /*
4968 4968           * First handle the easy stuff
4969 4969           */
4970 4970          if (type == F_SOFTUNLOCK) {
4971 4971                  if (rw == S_READ_NOCOW) {
4972 4972                          rw = S_READ;
4973      -                        ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
     4973 +                        ASSERT(AS_WRITE_HELD(seg->s_as));
4974 4974                  }
4975 4975                  SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
4976 4976                  pgsz = (seg->s_szc == 0) ? PAGESIZE :
4977 4977                      page_get_pagesize(seg->s_szc);
4978 4978                  VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]);
4979 4979                  CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
4980 4980                  segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw);
4981 4981                  SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4982 4982                  return (0);
4983 4983          }
↓ open down ↓ 124 lines elided ↑ open up ↑
5108 5108                  }
5109 5109                  if (!demote && len > PAGESIZE) {
5110 5110                          pgsz = page_get_pagesize(seg->s_szc);
5111 5111                          CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr,
5112 5112                              lpgeaddr);
5113 5113                          if (lpgeaddr - lpgaddr > pgsz) {
5114 5114                                  demote = 1;
5115 5115                          }
5116 5116                  }
5117 5117  
5118      -                ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
     5118 +                ASSERT(demote || AS_WRITE_HELD(seg->s_as));
5119 5119  
5120 5120                  if (demote) {
5121 5121                          SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5122 5122                          SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5123 5123                          if (seg->s_szc != 0) {
5124 5124                                  segvn_vmpss_clrszc_cnt++;
5125 5125                                  ASSERT(svd->softlockcnt == 0);
5126 5126                                  err = segvn_clrszc(seg);
5127 5127                                  if (err) {
5128 5128                                          segvn_vmpss_clrszc_err++;
↓ open down ↓ 33 lines elided ↑ open up ↑
5162 5162                  goto top;
5163 5163          }
5164 5164  
5165 5165          /*
5166 5166           * S_READ_NOCOW vs S_READ distinction was
5167 5167           * only needed for the code above. After
5168 5168           * that we treat it as S_READ.
5169 5169           */
5170 5170          if (rw == S_READ_NOCOW) {
5171 5171                  ASSERT(type == F_SOFTLOCK);
5172      -                ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
     5172 +                ASSERT(AS_WRITE_HELD(seg->s_as));
5173 5173                  rw = S_READ;
5174 5174          }
5175 5175  
5176 5176          amp = svd->amp;
5177 5177  
5178 5178          /*
5179 5179           * MADV_SEQUENTIAL work is ignored for large page segments.
5180 5180           */
5181 5181          if (seg->s_szc != 0) {
5182 5182                  pgsz = page_get_pagesize(seg->s_szc);
↓ open down ↓ 450 lines elided ↑ open up ↑
5633 5633   * larger pages.
5634 5634   */
5635 5635  static faultcode_t
5636 5636  segvn_faulta(struct seg *seg, caddr_t addr)
5637 5637  {
5638 5638          struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5639 5639          int err;
5640 5640          struct anon_map *amp;
5641 5641          vnode_t *vp;
5642 5642  
5643      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     5643 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
5644 5644  
5645 5645          SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5646 5646          if ((amp = svd->amp) != NULL) {
5647 5647                  struct anon *ap;
5648 5648  
5649 5649                  /*
5650 5650                   * Reader lock to prevent amp->ahp from being changed.
5651 5651                   * This is advisory, it's ok to miss a page, so
5652 5652                   * we don't do anon_array_enter lock.
5653 5653                   */
↓ open down ↓ 36 lines elided ↑ open up ↑
5690 5690  segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
5691 5691  {
5692 5692          struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5693 5693          struct vpage *cvp, *svp, *evp;
5694 5694          struct vnode *vp;
5695 5695          size_t pgsz;
5696 5696          pgcnt_t pgcnt;
5697 5697          anon_sync_obj_t cookie;
5698 5698          int unload_done = 0;
5699 5699  
5700      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     5700 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
5701 5701  
5702 5702          if ((svd->maxprot & prot) != prot)
5703 5703                  return (EACCES);                        /* violated maxprot */
5704 5704  
5705 5705          SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5706 5706  
5707 5707          /* return if prot is the same */
5708 5708          if (!svd->pageprot && svd->prot == prot) {
5709 5709                  SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5710 5710                  return (0);
↓ open down ↓ 56 lines elided ↑ open up ↑
5767 5767                  pgcnt = pgsz >> PAGESHIFT;
5768 5768                  ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
5769 5769                  if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
5770 5770                          SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5771 5771                          ASSERT(seg->s_base != addr || seg->s_size != len);
5772 5772                          /*
5773 5773                           * If we are holding the as lock as a reader then
5774 5774                           * we need to return IE_RETRY and let the as
5775 5775                           * layer drop and re-acquire the lock as a writer.
5776 5776                           */
5777      -                        if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock))
     5777 +                        if (AS_READ_HELD(seg->s_as))
5778 5778                                  return (IE_RETRY);
5779 5779                          VM_STAT_ADD(segvnvmstats.demoterange[1]);
5780 5780                          if (svd->type == MAP_PRIVATE || svd->vp != NULL) {
5781 5781                                  err = segvn_demote_range(seg, addr, len,
5782 5782                                      SDR_END, 0);
5783 5783                          } else {
5784 5784                                  uint_t szcvec = map_pgszcvec(seg->s_base,
5785 5785                                      pgsz, (uintptr_t)seg->s_base,
5786 5786                                      (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0);
5787 5787                                  err = segvn_demote_range(seg, addr, len,
↓ open down ↓ 305 lines elided ↑ open up ↑
6093 6093          struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6094 6094          struct segvn_data *nsvd;
6095 6095          struct anon_map *amp = svd->amp;
6096 6096          struct seg *nseg;
6097 6097          caddr_t eaddr = addr + len, a;
6098 6098          size_t pgsz = page_get_pagesize(szc);
6099 6099          pgcnt_t pgcnt = page_get_pagecnt(szc);
6100 6100          int err;
6101 6101          u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base);
6102 6102  
6103      -        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
     6103 +        ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
6104 6104          ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6105 6105  
6106 6106          if (seg->s_szc == szc || segvn_lpg_disable != 0) {
6107 6107                  return (0);
6108 6108          }
6109 6109  
6110 6110          /*
6111 6111           * addr should always be pgsz aligned but eaddr may be misaligned if
6112 6112           * it's at the end of the segment.
6113 6113           *
↓ open down ↓ 267 lines elided ↑ open up ↑
6381 6381          caddr_t a = seg->s_base;
6382 6382          caddr_t ea = a + seg->s_size;
6383 6383          ulong_t an_idx = svd->anon_index;
6384 6384          vnode_t *vp = svd->vp;
6385 6385          struct vpage *vpage = svd->vpage;
6386 6386          page_t *anon_pl[1 + 1], *pp;
6387 6387          struct anon *ap, *oldap;
6388 6388          uint_t prot = svd->prot, vpprot;
6389 6389          int pageflag = 0;
6390 6390  
6391      -        ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
     6391 +        ASSERT(AS_WRITE_HELD(seg->s_as) ||
6392 6392              SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
6393 6393          ASSERT(svd->softlockcnt == 0);
6394 6394  
6395 6395          if (vp == NULL && amp == NULL) {
6396 6396                  ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6397 6397                  seg->s_szc = 0;
6398 6398                  return (0);
6399 6399          }
6400 6400  
6401 6401          if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
↓ open down ↓ 191 lines elided ↑ open up ↑
6593 6593   * the current segment.
6594 6594   */
6595 6595  static struct seg *
6596 6596  segvn_split_seg(struct seg *seg, caddr_t addr)
6597 6597  {
6598 6598          struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6599 6599          struct seg *nseg;
6600 6600          size_t nsize;
6601 6601          struct segvn_data *nsvd;
6602 6602  
6603      -        ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
     6603 +        ASSERT(AS_WRITE_HELD(seg->s_as));
6604 6604          ASSERT(svd->tr_state == SEGVN_TR_OFF);
6605 6605  
6606 6606          ASSERT(addr >= seg->s_base);
6607 6607          ASSERT(addr <= seg->s_base + seg->s_size);
6608 6608          ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6609 6609  
6610 6610          if (addr == seg->s_base || addr == seg->s_base + seg->s_size)
6611 6611                  return (seg);
6612 6612  
6613 6613          nsize = seg->s_base + seg->s_size - addr;
↓ open down ↓ 134 lines elided ↑ open up ↑
6748 6748          caddr_t lpgaddr, lpgeaddr;
6749 6749          struct seg *nseg;
6750 6750          struct seg *badseg1 = NULL;
6751 6751          struct seg *badseg2 = NULL;
6752 6752          size_t pgsz;
6753 6753          struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6754 6754          int err;
6755 6755          uint_t szc = seg->s_szc;
6756 6756          uint_t tszcvec;
6757 6757  
6758      -        ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
     6758 +        ASSERT(AS_WRITE_HELD(seg->s_as));
6759 6759          ASSERT(svd->tr_state == SEGVN_TR_OFF);
6760 6760          ASSERT(szc != 0);
6761 6761          pgsz = page_get_pagesize(szc);
6762 6762          ASSERT(seg->s_base != addr || seg->s_size != len);
6763 6763          ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6764 6764          ASSERT(svd->softlockcnt == 0);
6765 6765          ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6766 6766          ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED));
6767 6767  
6768 6768          CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
↓ open down ↓ 108 lines elided ↑ open up ↑
6877 6877  
6878 6878          return (0);
6879 6879  }
6880 6880  
6881 6881  static int
6882 6882  segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
6883 6883  {
6884 6884          struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6885 6885          struct vpage *vp, *evp;
6886 6886  
6887      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     6887 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
6888 6888  
6889 6889          SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6890 6890          /*
6891 6891           * If segment protection can be used, simply check against them.
6892 6892           */
6893 6893          if (svd->pageprot == 0) {
6894 6894                  int err;
6895 6895  
6896 6896                  err = ((svd->prot & prot) != prot) ? EACCES : 0;
6897 6897                  SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
↓ open down ↓ 13 lines elided ↑ open up ↑
6911 6911          SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6912 6912          return (0);
6913 6913  }
6914 6914  
6915 6915  static int
6916 6916  segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
6917 6917  {
6918 6918          struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6919 6919          size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
6920 6920  
6921      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     6921 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
6922 6922  
6923 6923          if (pgno != 0) {
6924 6924                  SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6925 6925                  if (svd->pageprot == 0) {
6926 6926                          do {
6927 6927                                  protv[--pgno] = svd->prot;
6928 6928                          } while (pgno != 0);
6929 6929                  } else {
6930 6930                          size_t pgoff = seg_page(seg, addr);
6931 6931  
↓ open down ↓ 5 lines elided ↑ open up ↑
6937 6937                  SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6938 6938          }
6939 6939          return (0);
6940 6940  }
6941 6941  
6942 6942  static u_offset_t
6943 6943  segvn_getoffset(struct seg *seg, caddr_t addr)
6944 6944  {
6945 6945          struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6946 6946  
6947      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     6947 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
6948 6948  
6949 6949          return (svd->offset + (uintptr_t)(addr - seg->s_base));
6950 6950  }
6951 6951  
6952 6952  /*ARGSUSED*/
6953 6953  static int
6954 6954  segvn_gettype(struct seg *seg, caddr_t addr)
6955 6955  {
6956 6956          struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6957 6957  
6958      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     6958 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
6959 6959  
6960 6960          return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT |
6961 6961              MAP_INITDATA)));
6962 6962  }
6963 6963  
6964 6964  /*ARGSUSED*/
6965 6965  static int
6966 6966  segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
6967 6967  {
6968 6968          struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6969 6969  
6970      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     6970 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
6971 6971  
6972 6972          *vpp = svd->vp;
6973 6973          return (0);
6974 6974  }
6975 6975  
6976 6976  /*
6977 6977   * Check to see if it makes sense to do kluster/read ahead to
6978 6978   * addr + delta relative to the mapping at addr.  We assume here
6979 6979   * that delta is a signed PAGESIZE'd multiple (which can be negative).
6980 6980   *
↓ open down ↓ 6 lines elided ↑ open up ↑
6987 6987  segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
6988 6988  {
6989 6989          struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6990 6990          struct anon *oap, *ap;
6991 6991          ssize_t pd;
6992 6992          size_t page;
6993 6993          struct vnode *vp1, *vp2;
6994 6994          u_offset_t off1, off2;
6995 6995          struct anon_map *amp;
6996 6996  
6997      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6998      -        ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
     6997 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
     6998 +        ASSERT(AS_WRITE_HELD(seg->s_as) ||
6999 6999              SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
7000 7000  
7001 7001          if (addr + delta < seg->s_base ||
7002 7002              addr + delta >= (seg->s_base + seg->s_size))
7003 7003                  return (-1);            /* exceeded segment bounds */
7004 7004  
7005 7005          pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */
7006 7006          page = seg_page(seg, addr);
7007 7007  
7008 7008          /*
↓ open down ↓ 84 lines elided ↑ open up ↑
7093 7093  static size_t
7094 7094  segvn_swapout(struct seg *seg)
7095 7095  {
7096 7096          struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7097 7097          struct anon_map *amp;
7098 7098          pgcnt_t pgcnt = 0;
7099 7099          pgcnt_t npages;
7100 7100          pgcnt_t page;
7101 7101          ulong_t anon_index;
7102 7102  
7103      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     7103 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
7104 7104  
7105 7105          SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7106 7106          /*
7107 7107           * Find pages unmapped by our caller and force them
7108 7108           * out to the virtual swap device.
7109 7109           */
7110 7110          if ((amp = svd->amp) != NULL)
7111 7111                  anon_index = svd->anon_index;
7112 7112          npages = seg->s_size >> PAGESHIFT;
7113 7113          for (page = 0; page < npages; page++) {
↓ open down ↓ 158 lines elided ↑ open up ↑
7272 7272          int bflags;
7273 7273          int err = 0;
7274 7274          int segtype;
7275 7275          int pageprot;
7276 7276          int prot;
7277 7277          ulong_t anon_index;
7278 7278          struct anon_map *amp;
7279 7279          struct anon *ap;
7280 7280          anon_sync_obj_t cookie;
7281 7281  
7282      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     7282 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
7283 7283  
7284 7284          SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7285 7285  
7286 7286          if (svd->softlockcnt > 0) {
7287 7287                  /*
7288 7288                   * If this is shared segment non 0 softlockcnt
7289 7289                   * means locked pages are still in use.
7290 7290                   */
7291 7291                  if (svd->type == MAP_SHARED) {
7292 7292                          SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
↓ open down ↓ 201 lines elided ↑ open up ↑
7494 7494          size_t p, ep;
7495 7495          int ret;
7496 7496          struct vpage *vpp;
7497 7497          page_t *pp;
7498 7498          uint_t start;
7499 7499          struct anon_map *amp;           /* XXX - for locknest */
7500 7500          struct anon *ap;
7501 7501          uint_t attr;
7502 7502          anon_sync_obj_t cookie;
7503 7503  
7504      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     7504 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
7505 7505  
7506 7506          SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7507 7507          if (svd->amp == NULL && svd->vp == NULL) {
7508 7508                  SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7509 7509                  bzero(vec, btopr(len));
7510 7510                  return (len);   /* no anonymous pages created yet */
7511 7511          }
7512 7512  
7513 7513          p = seg_page(seg, addr);
7514 7514          ep = seg_page(seg, addr + len);
↓ open down ↓ 155 lines elided ↑ open up ↑
7670 7670          kproject_t      *proj = NULL;
7671 7671          int chargeproc = 1;
7672 7672          size_t locked_bytes = 0;
7673 7673          size_t unlocked_bytes = 0;
7674 7674          int err = 0;
7675 7675  
7676 7676          /*
7677 7677           * Hold write lock on address space because may split or concatenate
7678 7678           * segments
7679 7679           */
7680      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     7680 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
7681 7681  
7682 7682          /*
7683 7683           * If this is a shm, use shm's project and zone, else use
7684 7684           * project and zone of calling process
7685 7685           */
7686 7686  
7687 7687          /* Determine if this segment backs a sysV shm */
7688 7688          if (svd->amp != NULL && svd->amp->a_sp != NULL) {
7689 7689                  ASSERT(svd->type == MAP_SHARED);
7690 7690                  ASSERT(svd->tr_state == SEGVN_TR_OFF);
↓ open down ↓ 395 lines elided ↑ open up ↑
8086 8086          size_t page;
8087 8087          int err = 0;
8088 8088          int already_set;
8089 8089          struct anon_map *amp;
8090 8090          ulong_t anon_index;
8091 8091          struct seg *next;
8092 8092          lgrp_mem_policy_t policy;
8093 8093          struct seg *prev;
8094 8094          struct vnode *vp;
8095 8095  
8096      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     8096 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
8097 8097  
8098 8098          /*
8099 8099           * In case of MADV_FREE, we won't be modifying any segment private
8100 8100           * data structures; so, we only need to grab READER's lock
8101 8101           */
8102 8102          if (behav != MADV_FREE) {
8103 8103                  SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
8104 8104                  if (svd->tr_state != SEGVN_TR_OFF) {
8105 8105                          SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8106 8106                          return (0);
↓ open down ↓ 107 lines elided ↑ open up ↑
8214 8214                          policy = lgrp_madv_to_policy(behav, len, svd->type);
8215 8215                          if (svd->type == MAP_SHARED)
8216 8216                                  already_set = lgrp_shm_policy_set(policy, amp,
8217 8217                                      svd->anon_index, vp, svd->offset, len);
8218 8218                          else {
8219 8219                                  /*
8220 8220                                   * For private memory, need writers lock on
8221 8221                                   * address space because the segment may be
8222 8222                                   * split or concatenated when changing policy
8223 8223                                   */
8224      -                                if (AS_READ_HELD(seg->s_as,
8225      -                                    &seg->s_as->a_lock)) {
     8224 +                                if (AS_READ_HELD(seg->s_as)) {
8226 8225                                          SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8227 8226                                          return (IE_RETRY);
8228 8227                                  }
8229 8228  
8230 8229                                  already_set = lgrp_privm_policy_set(policy,
8231 8230                                      &svd->policy_info, len);
8232 8231                          }
8233 8232  
8234 8233                          /*
8235 8234                           * If policy set already and it shouldn't be reapplied,
↓ open down ↓ 129 lines elided ↑ open up ↑
8365 8364                          if (already_set &&
8366 8365                              !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8367 8366                                  break;
8368 8367  
8369 8368                          /*
8370 8369                           * For private memory, need writers lock on
8371 8370                           * address space because the segment may be
8372 8371                           * split or concatenated when changing policy
8373 8372                           */
8374 8373                          if (svd->type == MAP_PRIVATE &&
8375      -                            AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) {
     8374 +                            AS_READ_HELD(seg->s_as)) {
8376 8375                                  SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8377 8376                                  return (IE_RETRY);
8378 8377                          }
8379 8378  
8380 8379                          /*
8381 8380                           * Mark any existing pages in given range for
8382 8381                           * migration
8383 8382                           */
8384 8383                          page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8385 8384                              vp, svd->offset, 1);
↓ open down ↓ 165 lines elided ↑ open up ↑
8551 8550   *     SEGP_INH_ZERO - Pages should be zeroed in the child
8552 8551   */
8553 8552  static int
8554 8553  segvn_inherit(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
8555 8554  {
8556 8555          struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8557 8556          struct vpage *bvpp, *evpp;
8558 8557          size_t page;
8559 8558          int ret = 0;
8560 8559  
8561      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     8560 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
8562 8561  
8563 8562          /* Can't support something we don't know about */
8564 8563          if (behav != SEGP_INH_ZERO)
8565 8564                  return (ENOTSUP);
8566 8565  
8567 8566          SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
8568 8567  
8569 8568          /*
8570 8569           * This must be a straightforward anonymous segment that is mapped
8571 8570           * privately and is not backed by a vnode.
↓ open down ↓ 248 lines elided ↑ open up ↑
8820 8819                  }
8821 8820                  if ((ts % segvn_pglock_mtbf) == 1) {
8822 8821                          return (EFAULT);
8823 8822                  }
8824 8823          }
8825 8824  #endif
8826 8825  
8827 8826          TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START,
8828 8827              "segvn_pagelock: start seg %p addr %p", seg, addr);
8829 8828  
8830      -        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     8829 +        ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
8831 8830          ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
8832 8831  
8833 8832          SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8834 8833  
8835 8834          /*
8836 8835           * for now we only support pagelock to anon memory. We would have to
8837 8836           * check protections for vnode objects and call into the vnode driver.
8838 8837           * That's too much for a fast path. Let the fault entry point handle
8839 8838           * it.
8840 8839           */
↓ open down ↓ 601 lines elided ↑ open up ↑
9442 9441          struct seg *seg = (struct seg *)ptag;
9443 9442          struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9444 9443          pgcnt_t np, npages;
9445 9444          struct page **pl;
9446 9445  
9447 9446          npages = np = btop(len);
9448 9447          ASSERT(npages);
9449 9448  
9450 9449          ASSERT(svd->vp == NULL && svd->amp != NULL);
9451 9450          ASSERT(svd->softlockcnt >= npages);
9452      -        ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     9451 +        ASSERT(async || AS_LOCK_HELD(seg->s_as));
9453 9452  
9454 9453          pl = pplist;
9455 9454  
9456 9455          ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9457 9456          ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9458 9457  
9459 9458          while (np > (uint_t)0) {
9460 9459                  if (rw == S_WRITE) {
9461 9460                          hat_setrefmod(*pplist);
9462 9461                  } else {
↓ open down ↓ 257 lines elided ↑ open up ↑
9720 9719          uint_t                  szc = seg->s_szc;
9721 9720          ulong_t                 hash = SVNTR_HASH_FUNC(vp);
9722 9721          svntr_t                 *svntrp;
9723 9722          struct vattr            va;
9724 9723          proc_t                  *p = seg->s_as->a_proc;
9725 9724          lgrp_id_t               lgrp_id;
9726 9725          lgrp_id_t               olid;
9727 9726          int                     first;
9728 9727          struct anon_map         *amp;
9729 9728  
9730      -        ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
     9729 +        ASSERT(AS_LOCK_HELD(seg->s_as));
9731 9730          ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9732 9731          ASSERT(p != NULL);
9733 9732          ASSERT(svd->tr_state == SEGVN_TR_INIT);
9734 9733          ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9735 9734          ASSERT(svd->flags & MAP_TEXT);
9736 9735          ASSERT(svd->type == MAP_PRIVATE);
9737 9736          ASSERT(vp != NULL && svd->amp == NULL);
9738 9737          ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
9739 9738          ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0);
9740 9739          ASSERT(seg->s_as != &kas);
↓ open down ↓ 260 lines elided ↑ open up ↑
10001 10000          u_offset_t              off = svd->offset;
10002 10001          size_t                  size = seg->s_size;
10003 10002          u_offset_t              eoff = off + size;
10004 10003          uint_t                  szc = seg->s_szc;
10005 10004          ulong_t                 hash = SVNTR_HASH_FUNC(vp);
10006 10005          svntr_t                 *svntrp;
10007 10006          svntr_t                 **prv_svntrp;
10008 10007          lgrp_id_t               lgrp_id = svd->tr_policy_info.mem_lgrpid;
10009 10008          lgrp_id_t               i;
10010 10009  
10011      -        ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
10012      -        ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
     10010 +        ASSERT(AS_LOCK_HELD(seg->s_as));
     10011 +        ASSERT(AS_WRITE_HELD(seg->s_as) ||
10013 10012              SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
10014 10013          ASSERT(svd->tr_state == SEGVN_TR_ON);
10015 10014          ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
10016 10015          ASSERT(svd->amp != NULL);
10017 10016          ASSERT(svd->amp->refcnt >= 1);
10018 10017          ASSERT(svd->anon_index == 0);
10019 10018          ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
10020 10019          ASSERT(svntr_hashtab != NULL);
10021 10020  
10022 10021          mutex_enter(&svntr_hashtab[hash].tr_lock);
↓ open down ↓ 204 lines elided ↑ open up ↑
10227 10226          }
10228 10227          ASSERT(lgrp_id < NLGRPS_MAX);
10229 10228          if (svd->tr_policy_info.mem_lgrpid == lgrp_id) {
10230 10229                  return;
10231 10230          }
10232 10231  
10233 10232          /*
10234 10233           * Use tryenter locking since we are locking as/seg and svntr hash
10235 10234           * lock in reverse from syncrounous thread order.
10236 10235           */
10237      -        if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) {
     10236 +        if (!AS_LOCK_TRYENTER(as, RW_READER)) {
10238 10237                  SEGVN_TR_ADDSTAT(nolock);
10239 10238                  if (segvn_lgrp_trthr_migrs_snpsht) {
10240 10239                          segvn_lgrp_trthr_migrs_snpsht = 0;
10241 10240                  }
10242 10241                  return;
10243 10242          }
10244 10243          if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) {
10245      -                AS_LOCK_EXIT(as, &as->a_lock);
     10244 +                AS_LOCK_EXIT(as);
10246 10245                  SEGVN_TR_ADDSTAT(nolock);
10247 10246                  if (segvn_lgrp_trthr_migrs_snpsht) {
10248 10247                          segvn_lgrp_trthr_migrs_snpsht = 0;
10249 10248                  }
10250 10249                  return;
10251 10250          }
10252 10251          size = seg->s_size;
10253 10252          if (svntrp->tr_amp[lgrp_id] == NULL) {
10254 10253                  size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
10255 10254                  if (trmem > segvn_textrepl_max_bytes) {
10256 10255                          SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10257      -                        AS_LOCK_EXIT(as, &as->a_lock);
     10256 +                        AS_LOCK_EXIT(as);
10258 10257                          atomic_add_long(&segvn_textrepl_bytes, -size);
10259 10258                          SEGVN_TR_ADDSTAT(normem);
10260 10259                          return;
10261 10260                  }
10262 10261                  if (anon_try_resv_zone(size, NULL) == 0) {
10263 10262                          SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10264      -                        AS_LOCK_EXIT(as, &as->a_lock);
     10263 +                        AS_LOCK_EXIT(as);
10265 10264                          atomic_add_long(&segvn_textrepl_bytes, -size);
10266 10265                          SEGVN_TR_ADDSTAT(noanon);
10267 10266                          return;
10268 10267                  }
10269 10268                  amp = anonmap_alloc(size, size, KM_NOSLEEP);
10270 10269                  if (amp == NULL) {
10271 10270                          SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10272      -                        AS_LOCK_EXIT(as, &as->a_lock);
     10271 +                        AS_LOCK_EXIT(as);
10273 10272                          atomic_add_long(&segvn_textrepl_bytes, -size);
10274 10273                          anon_unresv_zone(size, NULL);
10275 10274                          SEGVN_TR_ADDSTAT(nokmem);
10276 10275                          return;
10277 10276                  }
10278 10277                  ASSERT(amp->refcnt == 1);
10279 10278                  amp->a_szc = seg->s_szc;
10280 10279                  svntrp->tr_amp[lgrp_id] = amp;
10281 10280          }
10282 10281          /*
↓ open down ↓ 11 lines elided ↑ open up ↑
10294 10293          ASSERT(svd->tr_state == SEGVN_TR_ON);
10295 10294          ASSERT(svd->amp != NULL);
10296 10295          ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10297 10296          ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id);
10298 10297          ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]);
10299 10298  
10300 10299          svd->tr_policy_info.mem_lgrpid = lgrp_id;
10301 10300          svd->amp = svntrp->tr_amp[lgrp_id];
10302 10301          p->p_tr_lgrpid = NLGRPS_MAX;
10303 10302          SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10304      -        AS_LOCK_EXIT(as, &as->a_lock);
     10303 +        AS_LOCK_EXIT(as);
10305 10304  
10306 10305          ASSERT(svntrp->tr_refcnt != 0);
10307 10306          ASSERT(svd->vp == svntrp->tr_vp);
10308 10307          ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id);
10309 10308          ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]);
10310 10309          ASSERT(svd->seg == seg);
10311 10310          ASSERT(svd->tr_state == SEGVN_TR_ON);
10312 10311  
10313 10312          SEGVN_TR_ADDSTAT(asyncrepl);
10314 10313  }
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX