Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/vm/seg_vn.c
          +++ new/usr/src/uts/common/vm/seg_vn.c
↓ open down ↓ 2695 lines elided ↑ open up ↑
2696 2696                  }
2697 2697  
2698 2698                  prot = VPP_PROT(vpage);
2699 2699                  if ((prot & protchk) == 0)
2700 2700                          return (FC_PROT);       /* illegal access type */
2701 2701          } else {
2702 2702                  prot = svd->prot;
2703 2703          }
2704 2704  
2705 2705          if (type == F_SOFTLOCK) {
2706      -                atomic_add_long((ulong_t *)&svd->softlockcnt, 1);
     2706 +                atomic_inc_ulong((ulong_t *)&svd->softlockcnt);
2707 2707          }
2708 2708  
2709 2709          /*
2710 2710           * Always acquire the anon array lock to prevent 2 threads from
2711 2711           * allocating separate anon slots for the same "addr".
2712 2712           */
2713 2713  
2714 2714          if ((amp = svd->amp) != NULL) {
2715 2715                  ASSERT(RW_READ_HELD(&amp->a_rwlock));
2716 2716                  anon_index = svd->anon_index + seg_page(seg, addr);
↓ open down ↓ 340 lines elided ↑ open up ↑
3057 3057                  page_unlock(pp);
3058 3058  
3059 3059          ASSERT(anon_lock);
3060 3060          anon_array_exit(&cookie);
3061 3061          return (0);
3062 3062  out:
3063 3063          if (anon_lock)
3064 3064                  anon_array_exit(&cookie);
3065 3065  
3066 3066          if (type == F_SOFTLOCK) {
3067      -                atomic_add_long((ulong_t *)&svd->softlockcnt, -1);
     3067 +                atomic_dec_ulong((ulong_t *)&svd->softlockcnt);
3068 3068          }
3069 3069          return (FC_MAKE_ERR(err));
3070 3070  }
3071 3071  
3072 3072  /*
3073 3073   * relocate a bunch of smaller targ pages into one large repl page. all targ
3074 3074   * pages must be complete pages smaller than replacement pages.
3075 3075   * it's assumed that no page's szc can change since they are all PAGESIZE or
3076 3076   * complete large pages locked SHARED.
3077 3077   */
↓ open down ↓ 5807 lines elided ↑ open up ↑
8885 8885                  }
8886 8886  
8887 8887                  if (pamp != NULL) {
8888 8888                          ASSERT(svd->type == MAP_SHARED);
8889 8889                          ASSERT(svd->softlockcnt >= npages);
8890 8890                          atomic_add_long((ulong_t *)&svd->softlockcnt, -npages);
8891 8891                  }
8892 8892  
8893 8893                  if (sftlck_sbase) {
8894 8894                          ASSERT(svd->softlockcnt_sbase > 0);
8895      -                        atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, -1);
     8895 +                        atomic_dec_ulong((ulong_t *)&svd->softlockcnt_sbase);
8896 8896                  }
8897 8897                  if (sftlck_send) {
8898 8898                          ASSERT(svd->softlockcnt_send > 0);
8899      -                        atomic_add_long((ulong_t *)&svd->softlockcnt_send, -1);
     8899 +                        atomic_dec_ulong((ulong_t *)&svd->softlockcnt_send);
8900 8900                  }
8901 8901  
8902 8902                  /*
8903 8903                   * If someone is blocked while unmapping, we purge
8904 8904                   * segment page cache and thus reclaim pplist synchronously
8905 8905                   * without waiting for seg_pasync_thread. This speeds up
8906 8906                   * unmapping in cases where munmap(2) is called, while
8907 8907                   * raw async i/o is still in progress or where a thread
8908 8908                   * exits on data fault in a multithreaded application.
8909 8909                   */
↓ open down ↓ 76 lines elided ↑ open up ↑
8986 8986           */
8987 8987          pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags);
8988 8988          if (pplist != NULL) {
8989 8989                  if (pamp != NULL) {
8990 8990                          npages = btop((uintptr_t)(lpgeaddr - lpgaddr));
8991 8991                          ASSERT(svd->type == MAP_SHARED);
8992 8992                          atomic_add_long((ulong_t *)&svd->softlockcnt,
8993 8993                              npages);
8994 8994                  }
8995 8995                  if (sftlck_sbase) {
8996      -                        atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, 1);
     8996 +                        atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
8997 8997                  }
8998 8998                  if (sftlck_send) {
8999      -                        atomic_add_long((ulong_t *)&svd->softlockcnt_send, 1);
     8999 +                        atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
9000 9000                  }
9001 9001                  SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9002 9002                  *ppp = pplist + adjustpages;
9003 9003                  TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END,
9004 9004                      "segvn_pagelock: cache hit seg %p addr %p", seg, addr);
9005 9005                  return (0);
9006 9006          }
9007 9007  
9008 9008          /*
9009 9009           * For MAP_SHARED segments we already verified above that segment
↓ open down ↓ 169 lines elided ↑ open up ↑
9179 9179  
9180 9180          if (a >= addr + len) {
9181 9181                  atomic_add_long((ulong_t *)&svd->softlockcnt, npages);
9182 9182                  if (pamp != NULL) {
9183 9183                          ASSERT(svd->type == MAP_SHARED);
9184 9184                          atomic_add_long((ulong_t *)&pamp->a_softlockcnt,
9185 9185                              npages);
9186 9186                          wlen = len;
9187 9187                  }
9188 9188                  if (sftlck_sbase) {
9189      -                        atomic_add_long((ulong_t *)&svd->softlockcnt_sbase, 1);
     9189 +                        atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
9190 9190                  }
9191 9191                  if (sftlck_send) {
9192      -                        atomic_add_long((ulong_t *)&svd->softlockcnt_send, 1);
     9192 +                        atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
9193 9193                  }
9194 9194                  if (use_pcache) {
9195 9195                          (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl,
9196 9196                              rw, pflags, preclaim_callback);
9197 9197                  }
9198 9198                  SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9199 9199                  TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END,
9200 9200                      "segvn_pagelock: cache fill seg %p addr %p", seg, addr);
9201 9201                  return (0);
9202 9202          }
↓ open down ↓ 930 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX