Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

*** 1234,1244 **** * Here we should add dl_cnt before post recv, because * we would have to make sure dl_cnt is updated before * the corresponding ibd_rc_process_rx() is called. */ ASSERT(state->rc_srq_rwqe_list.dl_cnt < state->rc_srq_size); ! atomic_add_32(&state->rc_srq_rwqe_list.dl_cnt, 1); if (ibt_post_srq(state->rc_srq_hdl, &rwqe->w_rwr, 1, NULL) != IBT_SUCCESS) { atomic_dec_32(&state->rc_srq_rwqe_list.dl_cnt); DPRINT(40, "ibd_rc_post_srq : ibt_post_srq() failed"); return (DDI_FAILURE); --- 1234,1244 ---- * Here we should add dl_cnt before post recv, because * we would have to make sure dl_cnt is updated before * the corresponding ibd_rc_process_rx() is called. */ ASSERT(state->rc_srq_rwqe_list.dl_cnt < state->rc_srq_size); ! atomic_inc_32(&state->rc_srq_rwqe_list.dl_cnt); if (ibt_post_srq(state->rc_srq_hdl, &rwqe->w_rwr, 1, NULL) != IBT_SUCCESS) { atomic_dec_32(&state->rc_srq_rwqe_list.dl_cnt); DPRINT(40, "ibd_rc_post_srq : ibt_post_srq() failed"); return (DDI_FAILURE);
*** 1256,1266 **** /* * Here we should add dl_cnt before post recv, because we would * have to make sure dl_cnt has already updated before * corresponding ibd_rc_process_rx() is called. */ ! atomic_add_32(&chan->rx_wqe_list.dl_cnt, 1); if (ibt_post_recv(chan->chan_hdl, &rwqe->w_rwr, 1, NULL) != IBT_SUCCESS) { atomic_dec_32(&chan->rx_wqe_list.dl_cnt); DPRINT(40, "ibd_rc_post_rwqe : failed in ibt_post_recv()"); return (DDI_FAILURE); --- 1256,1266 ---- /* * Here we should add dl_cnt before post recv, because we would * have to make sure dl_cnt has already updated before * corresponding ibd_rc_process_rx() is called. */ ! atomic_inc_32(&chan->rx_wqe_list.dl_cnt); if (ibt_post_recv(chan->chan_hdl, &rwqe->w_rwr, 1, NULL) != IBT_SUCCESS) { atomic_dec_32(&chan->rx_wqe_list.dl_cnt); DPRINT(40, "ibd_rc_post_rwqe : failed in ibt_post_recv()"); return (DDI_FAILURE);
*** 1497,1511 **** /* * Record how many rwqe has been occupied by upper * network layer */ if (state->rc_enable_srq) { ! atomic_add_32(&state->rc_srq_rwqe_list. ! dl_bufs_outstanding, 1); } else { ! atomic_add_32(&chan->rx_wqe_list. ! dl_bufs_outstanding, 1); } mp = rwqe->rwqe_im_mblk; } else { atomic_add_64(&state->rc_rcv_copy_byte, wc->wc_bytes_xfer); atomic_inc_64(&state->rc_rcv_copy_pkt); --- 1497,1510 ---- /* * Record how many rwqe has been occupied by upper * network layer */ if (state->rc_enable_srq) { ! atomic_inc_32( ! &state->rc_srq_rwqe_list.dl_bufs_outstanding); } else { ! atomic_inc_32(&chan->rx_wqe_list.dl_bufs_outstanding); } mp = rwqe->rwqe_im_mblk; } else { atomic_add_64(&state->rc_rcv_copy_byte, wc->wc_bytes_xfer); atomic_inc_64(&state->rc_rcv_copy_pkt);
*** 1667,1677 **** */ if (ibd_rc_post_rwqe(chan, rwqe) == DDI_FAILURE) { ibd_rc_free_rwqe(chan, rwqe); return; } ! atomic_add_32(&chan->rx_wqe_list.dl_bufs_outstanding, -1); } /* * Common code for interrupt handling as well as for polling * for all completed wqe's while detaching. --- 1666,1676 ---- */ if (ibd_rc_post_rwqe(chan, rwqe) == DDI_FAILURE) { ibd_rc_free_rwqe(chan, rwqe); return; } ! atomic_dec_32(&chan->rx_wqe_list.dl_bufs_outstanding); } /* * Common code for interrupt handling as well as for polling * for all completed wqe's while detaching.