Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/sockfs/nl7curi.c
          +++ new/usr/src/uts/common/fs/sockfs/nl7curi.c
↓ open down ↓ 266 lines elided ↑ open up ↑
 267  267  #define URI_HASH_IX(hix, which) (hix) = (hix) % (uri_hash_sz[(which)])
 268  268  
 269  269  #define URI_HASH_MIGRATE(from, hp, to) {                                \
 270  270          uri_desc_t      *_nuri;                                         \
 271  271          uint32_t        _nhix;                                          \
 272  272          uri_hash_t      *_nhp;                                          \
 273  273                                                                          \
 274  274          mutex_enter(&(hp)->lock);                                       \
 275  275          while ((_nuri = (hp)->list) != NULL) {                          \
 276  276                  (hp)->list = _nuri->hash;                               \
 277      -                atomic_add_32(&uri_hash_cnt[(from)], -1);               \
 278      -                atomic_add_32(&uri_hash_cnt[(to)], 1);                  \
      277 +                atomic_dec_32(&uri_hash_cnt[(from)]);           \
      278 +                atomic_inc_32(&uri_hash_cnt[(to)]);                     \
 279  279                  _nhix = _nuri->hvalue;                                  \
 280  280                  URI_HASH_IX(_nhix, to);                                 \
 281  281                  _nhp = &uri_hash_ab[(to)][_nhix];                       \
 282  282                  mutex_enter(&_nhp->lock);                               \
 283  283                  _nuri->hash = _nhp->list;                               \
 284  284                  _nhp->list = _nuri;                                     \
 285  285                  _nuri->hit = 0;                                         \
 286  286                  mutex_exit(&_nhp->lock);                                \
 287  287          }                                                               \
 288  288          mutex_exit(&(hp)->lock);                                        \
 289  289  }
 290  290  
 291  291  #define URI_HASH_UNLINK(cur, new, hp, puri, uri) {                      \
 292  292          if ((puri) != NULL) {                                           \
 293  293                  (puri)->hash = (uri)->hash;                             \
 294  294          } else {                                                        \
 295  295                  (hp)->list = (uri)->hash;                               \
 296  296          }                                                               \
 297      -        if (atomic_add_32_nv(&uri_hash_cnt[(cur)], -1) == 0 &&          \
      297 +        if (atomic_dec_32_nv(&uri_hash_cnt[(cur)]) == 0 &&              \
 298  298              uri_hash_ab[(new)] != NULL) {                               \
 299  299                  kmem_free(uri_hash_ab[cur],                             \
 300  300                      sizeof (uri_hash_t) * uri_hash_sz[cur]);            \
 301  301                  uri_hash_ab[(cur)] = NULL;                              \
 302  302                  uri_hash_lru[(cur)] = NULL;                             \
 303  303                  uri_hash_which = (new);                                 \
 304  304          } else {                                                        \
 305  305                  uri_hash_lru[(cur)] = (hp);                             \
 306  306          }                                                               \
 307  307  }
↓ open down ↓ 282 lines elided ↑ open up ↑
 590  590                  /*
 591  591                   * Easy case, no new hash and current hasn't overflowed,
 592  592                   * add URI to current hash and return.
 593  593                   *
 594  594                   * Note, the check for uri_hash_cnt[] above aren't done
 595  595                   * atomictally, i.e. multiple threads can be in this code
 596  596                   * as RW_READER and update the cnt[], this isn't a problem
 597  597                   * as the check is only advisory.
 598  598                   */
 599  599          fast:
 600      -                atomic_add_32(&uri_hash_cnt[cur], 1);
      600 +                atomic_inc_32(&uri_hash_cnt[cur]);
 601  601                  hp = &uri_hash_ab[cur][hix];
 602  602                  mutex_enter(&hp->lock);
 603  603                  uri->hash = hp->list;
 604  604                  hp->list = uri;
 605  605                  mutex_exit(&hp->lock);
 606  606                  rw_exit(&uri_hash_access);
 607  607                  return;
 608  608          }
 609  609          if (uri_hash_ab[new] == NULL) {
 610  610                  /*
↓ open down ↓ 71 lines elided ↑ open up ↑
 682  682           */
 683  683          hp = &uri_hash_ab[cur][hix];
 684  684          if (hp->list != NULL) {
 685  685                  URI_HASH_MIGRATE(cur, hp, new);
 686  686          }
 687  687          /*
 688  688           * If new hash has overflowed before current hash has been
 689  689           * completely migrated then walk all current hash chains and
 690  690           * migrate list members now.
 691  691           */
 692      -        if (atomic_add_32_nv(&uri_hash_cnt[new], 1) >= uri_hash_overflow[new]) {
      692 +        if (atomic_inc_32_nv(&uri_hash_cnt[new]) >= uri_hash_overflow[new]) {
 693  693                  for (hix = 0; hix < uri_hash_sz[cur]; hix++) {
 694  694                          hp = &uri_hash_ab[cur][hix];
 695  695                          if (hp->list != NULL) {
 696  696                                  URI_HASH_MIGRATE(cur, hp, new);
 697  697                          }
 698  698                  }
 699  699          }
 700  700          /*
 701  701           * Add URI to new hash.
 702  702           */
↓ open down ↓ 127 lines elided ↑ open up ↑
 830  830                           *
 831  831                           * Note, this will cause all variants of the named
 832  832                           * URI to be purged.
 833  833                           */
 834  834                          if (puri != NULL) {
 835  835                                  puri->hash = uri->hash;
 836  836                          } else {
 837  837                                  hp->list = uri->hash;
 838  838                          }
 839  839                          mutex_exit(&hp->lock);
 840      -                        atomic_add_32(&uri_hash_cnt[cur], -1);
      840 +                        atomic_dec_32(&uri_hash_cnt[cur]);
 841  841                          rw_exit(&uri_hash_access);
 842  842                          if (ruri->nocache)
 843  843                                  nl7c_uri_purge++;
 844  844                          else
 845  845                                  nl7c_uri_expire++;
 846  846                          REF_RELE(uri);
 847  847                          goto again;
 848  848                  }
 849  849                  if (uri->scheme != NULL) {
 850  850                          /*
↓ open down ↓ 1314 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX