Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/io/ib/clients/rdsv3/rdma.c
          +++ new/usr/src/uts/common/io/ib/clients/rdsv3/rdma.c
↓ open down ↓ 83 lines elided ↑ open up ↑
  84   84  static struct rdsv3_mr *
  85   85  rdsv3_mr_tree_walk(struct avl_tree *root, uint32_t key,
  86   86          struct rdsv3_mr *insert)
  87   87  {
  88   88          struct rdsv3_mr *mr;
  89   89          avl_index_t where;
  90   90  
  91   91          mr = avl_find(root, &key, &where);
  92   92          if ((mr == NULL) && (insert != NULL)) {
  93   93                  avl_insert(root, (void *)insert, where);
  94      -                atomic_add_32(&insert->r_refcount, 1);
       94 +                atomic_inc_32(&insert->r_refcount);
  95   95                  return (NULL);
  96   96          }
  97   97  
  98   98          return (mr);
  99   99  }
 100  100  
 101  101  /*
 102  102   * Destroy the transport-specific part of a MR.
 103  103   */
 104  104  static void
↓ open down ↓ 142 lines elided ↑ open up ↑
 247  247           * Inserting the new MR into the rbtree bumps its
 248  248           * reference count.
 249  249           */
 250  250          mutex_enter(&rs->rs_rdma_lock);
 251  251          found = rdsv3_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
 252  252          mutex_exit(&rs->rs_rdma_lock);
 253  253  
 254  254          ASSERT(!(found && found != mr));
 255  255  
 256  256          if (mr_ret) {
 257      -                atomic_add_32(&mr->r_refcount, 1);
      257 +                atomic_inc_32(&mr->r_refcount);
 258  258                  *mr_ret = mr;
 259  259          }
 260  260  
 261  261          ret = 0;
 262  262  out:
 263  263          if (mr)
 264  264                  rdsv3_mr_put(mr);
 265  265          return (ret);
 266  266  }
 267  267  
↓ open down ↓ 124 lines elided ↑ open up ↑
 392  392                      "rdsv3: trying to unuse MR with unknown r_key %u!", r_key);
 393  393                  mutex_exit(&rs->rs_rdma_lock);
 394  394                  return;
 395  395          }
 396  396  
 397  397          if (mr->r_use_once || force) {
 398  398                  avl_remove(&rs->rs_rdma_keys, &mr->r_rb_node);
 399  399                  RB_CLEAR_NODE(&mr->r_rb_node);
 400  400                  zot_me = 1;
 401  401          } else {
 402      -                atomic_add_32(&mr->r_refcount, 1);
      402 +                atomic_inc_32(&mr->r_refcount);
 403  403          }
 404  404          mutex_exit(&rs->rs_rdma_lock);
 405  405  
 406  406          /*
 407  407           * May have to issue a dma_sync on this memory region.
 408  408           * Note we could avoid this if the operation was a RDMA READ,
 409  409           * but at this point we can't tell.
 410  410           */
 411  411          if (mr->r_trans->sync_mr)
 412  412                  mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
↓ open down ↓ 218 lines elided ↑ open up ↑
 631  631           * flush those writes to RAM. Otherwise the HCA may not see them
 632  632           * when doing a DMA from that buffer.
 633  633           */
 634  634          r_key = rdsv3_rdma_cookie_key(rm->m_rdma_cookie);
 635  635  
 636  636          mutex_enter(&rs->rs_rdma_lock);
 637  637          mr = rdsv3_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
 638  638          if (!mr)
 639  639                  err = -EINVAL;  /* invalid r_key */
 640  640          else
 641      -                atomic_add_32(&mr->r_refcount, 1);
      641 +                atomic_inc_32(&mr->r_refcount);
 642  642          mutex_exit(&rs->rs_rdma_lock);
 643  643  
 644  644          if (mr) {
 645  645                  mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
 646  646                  rm->m_rdma_mr = mr;
 647  647          }
 648  648          return (err);
 649  649  }
 650  650  
 651  651  /*
↓ open down ↓ 26 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX