Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

*** 89,99 **** avl_index_t where; mr = avl_find(root, &key, &where); if ((mr == NULL) && (insert != NULL)) { avl_insert(root, (void *)insert, where); ! atomic_add_32(&insert->r_refcount, 1); return (NULL); } return (mr); } --- 89,99 ---- avl_index_t where; mr = avl_find(root, &key, &where); if ((mr == NULL) && (insert != NULL)) { avl_insert(root, (void *)insert, where); ! atomic_inc_32(&insert->r_refcount); return (NULL); } return (mr); }
*** 252,262 **** mutex_exit(&rs->rs_rdma_lock); ASSERT(!(found && found != mr)); if (mr_ret) { ! atomic_add_32(&mr->r_refcount, 1); *mr_ret = mr; } ret = 0; out: --- 252,262 ---- mutex_exit(&rs->rs_rdma_lock); ASSERT(!(found && found != mr)); if (mr_ret) { ! atomic_inc_32(&mr->r_refcount); *mr_ret = mr; } ret = 0; out:
*** 397,407 **** if (mr->r_use_once || force) { avl_remove(&rs->rs_rdma_keys, &mr->r_rb_node); RB_CLEAR_NODE(&mr->r_rb_node); zot_me = 1; } else { ! atomic_add_32(&mr->r_refcount, 1); } mutex_exit(&rs->rs_rdma_lock); /* * May have to issue a dma_sync on this memory region. --- 397,407 ---- if (mr->r_use_once || force) { avl_remove(&rs->rs_rdma_keys, &mr->r_rb_node); RB_CLEAR_NODE(&mr->r_rb_node); zot_me = 1; } else { ! atomic_inc_32(&mr->r_refcount); } mutex_exit(&rs->rs_rdma_lock); /* * May have to issue a dma_sync on this memory region.
*** 636,646 **** mutex_enter(&rs->rs_rdma_lock); mr = rdsv3_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); if (!mr) err = -EINVAL; /* invalid r_key */ else ! atomic_add_32(&mr->r_refcount, 1); mutex_exit(&rs->rs_rdma_lock); if (mr) { mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE); rm->m_rdma_mr = mr; --- 636,646 ---- mutex_enter(&rs->rs_rdma_lock); mr = rdsv3_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL); if (!mr) err = -EINVAL; /* invalid r_key */ else ! atomic_inc_32(&mr->r_refcount); mutex_exit(&rs->rs_rdma_lock); if (mr) { mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE); rm->m_rdma_mr = mr;