Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

*** 4041,4051 **** if ((p->rsmrc_key == msg->rsmipc_key) && (p->rsmrc_node == src_node)) { seg = (rsmseg_t *)p; rsmseglock_acquire(seg); ! atomic_add_32(&seg->s_pollevent, 1); if (seg->s_pollflag & RSM_SEGMENT_POLL) pollwakeup(&seg->s_poll, POLLRDNORM); rsmseglock_release(seg); --- 4041,4051 ---- if ((p->rsmrc_key == msg->rsmipc_key) && (p->rsmrc_node == src_node)) { seg = (rsmseg_t *)p; rsmseglock_acquire(seg); ! atomic_inc_32(&seg->s_pollevent); if (seg->s_pollflag & RSM_SEGMENT_POLL) pollwakeup(&seg->s_poll, POLLRDNORM); rsmseglock_release(seg);
*** 4062,4072 **** return; } ASSERT(rsmseglock_held(seg)); ! atomic_add_32(&seg->s_pollevent, 1); /* * We must hold the segment lock here, or else the segment * can be freed while pollwakeup is using it. This implies * that we MUST NOT grab the segment lock during rsm_chpoll, --- 4062,4072 ---- return; } ASSERT(rsmseglock_held(seg)); ! atomic_inc_32(&seg->s_pollevent); /* * We must hold the segment lock here, or else the segment * can be freed while pollwakeup is using it. This implies * that we MUST NOT grab the segment lock during rsm_chpoll,
*** 5500,5510 **** "rsm: rsmipc_send no reply send" " err = %d no reply count = %d\n", e, no_reply_cnt)); ASSERT(e != RSMERR_QUEUE_FENCE_UP && e != RSMERR_BAD_BARRIER_HNDL); ! atomic_add_64(&rsm_ipcsend_errcnt, 1); goto again; } else { DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmipc_send done\n")); return (e); --- 5500,5510 ---- "rsm: rsmipc_send no reply send" " err = %d no reply count = %d\n", e, no_reply_cnt)); ASSERT(e != RSMERR_QUEUE_FENCE_UP && e != RSMERR_BAD_BARRIER_HNDL); ! atomic_inc_64(&rsm_ipcsend_errcnt); goto again; } else { DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmipc_send done\n")); return (e);
*** 5531,5541 **** rele_sendq_token(sendq_token); if (e != RSM_SUCCESS) { DBG_PRINTF((category, RSM_ERR, "rsm: rsmipc_send reply send" " err = %d\n", e)); ! atomic_add_64(&rsm_ipcsend_errcnt, 1); goto again; } else { DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmipc_send done\n")); return (e); --- 5531,5541 ---- rele_sendq_token(sendq_token); if (e != RSM_SUCCESS) { DBG_PRINTF((category, RSM_ERR, "rsm: rsmipc_send reply send" " err = %d\n", e)); ! atomic_inc_64(&rsm_ipcsend_errcnt); goto again; } else { DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "rsmipc_send done\n")); return (e);
*** 5637,5647 **** DBG_PRINTF((category, RSM_ERR, "rsm: rsmipc_send rsmpi send err = %d\n", e)); RSMIPC_CLEAR(rslot, RSMIPC_PENDING); rsmipc_free(rslot); rele_sendq_token(sendq_token); ! atomic_add_64(&rsm_ipcsend_errcnt, 1); goto again; } /* wait for a reply signal, a SIGINT, or 5 sec. timeout */ e = cv_reltimedwait_sig(&rslot->rsmipc_cv, &rslot->rsmipc_lock, --- 5637,5647 ---- DBG_PRINTF((category, RSM_ERR, "rsm: rsmipc_send rsmpi send err = %d\n", e)); RSMIPC_CLEAR(rslot, RSMIPC_PENDING); rsmipc_free(rslot); rele_sendq_token(sendq_token); ! atomic_inc_64(&rsm_ipcsend_errcnt); goto again; } /* wait for a reply signal, a SIGINT, or 5 sec. timeout */ e = cv_reltimedwait_sig(&rslot->rsmipc_cv, &rslot->rsmipc_lock,
*** 5933,5943 **** if (e == RSM_SUCCESS) { break; } /* error counter for statistics */ ! atomic_add_64(&rsm_ctrlmsg_errcnt, 1); DBG_PRINTF((category, RSM_ERR, "rsmipc_send_controlmsg:rsm_send error=%d", e)); if (++retry_cnt == min_retry_cnt) { /* backoff before retry */ --- 5933,5943 ---- if (e == RSM_SUCCESS) { break; } /* error counter for statistics */ ! atomic_inc_64(&rsm_ctrlmsg_errcnt); DBG_PRINTF((category, RSM_ERR, "rsmipc_send_controlmsg:rsm_send error=%d", e)); if (++retry_cnt == min_retry_cnt) { /* backoff before retry */
*** 6447,6457 **** seg->s_flags &= ~RSM_IMPORT_DUMMY; /* clear dummy flag */ if (bar_va) { /* increment generation number on barrier page */ ! atomic_add_16(bar_va + seg->s_hdr.rsmrc_num, 1); /* return user off into barrier page where status will be */ msg->off = (int)seg->s_hdr.rsmrc_num; msg->gnum = bar_va[msg->off]; /* gnum race */ } else { msg->off = 0; --- 6447,6457 ---- seg->s_flags &= ~RSM_IMPORT_DUMMY; /* clear dummy flag */ if (bar_va) { /* increment generation number on barrier page */ ! atomic_inc_16(bar_va + seg->s_hdr.rsmrc_num); /* return user off into barrier page where status will be */ msg->off = (int)seg->s_hdr.rsmrc_num; msg->gnum = bar_va[msg->off]; /* gnum race */ } else { msg->off = 0;
*** 6683,6693 **** rsmsharelock_release(seg); } /* increment generation number on barrier page */ if (bar_va) { ! atomic_add_16(bar_va + seg->s_hdr.rsmrc_num, 1); } /* * The following needs to be done after any * rsmsharelock calls which use seg->s_share. --- 6683,6693 ---- rsmsharelock_release(seg); } /* increment generation number on barrier page */ if (bar_va) { ! atomic_inc_16(bar_va + seg->s_hdr.rsmrc_num); } /* * The following needs to be done after any * rsmsharelock calls which use seg->s_share.
*** 7282,7292 **** DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "consumeevent_ioctl: rnum(%d) seg(%p)\n", rnum, seg)); if (seg->s_pollevent) { /* consume the event */ ! atomic_add_32(&seg->s_pollevent, -1); event_list[i].revent = POLLRDNORM; } rsmseglock_release(seg); } } --- 7282,7292 ---- DBG_PRINTF((category, RSM_DEBUG_VERBOSE, "consumeevent_ioctl: rnum(%d) seg(%p)\n", rnum, seg)); if (seg->s_pollevent) { /* consume the event */ ! atomic_dec_32(&seg->s_pollevent); event_list[i].revent = POLLRDNORM; } rsmseglock_release(seg); } }