Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

@@ -1754,11 +1754,11 @@
                 return;
         }
         ASSERT(sctps->sctps_recvq_tq_list[
             sctps->sctps_recvq_tq_list_cur_sz] == NULL);
         sctps->sctps_recvq_tq_list[sctps->sctps_recvq_tq_list_cur_sz] = tq;
-        atomic_add_32(&sctps->sctps_recvq_tq_list_cur_sz, 1);
+        atomic_inc_32(&sctps->sctps_recvq_tq_list_cur_sz);
         mutex_exit(&sctps->sctps_rq_tq_lock);
 }
 
 #ifdef DEBUG
 uint32_t recvq_loop_cnt = 0;

@@ -1781,11 +1781,11 @@
          * Note that since we don't hold a lock on sctp_rq_tq_lock for
          * performance reason, recvq_ta_list_cur_sz can be changed during
          * this loop.  The problem this will create is that the loop may
          * not have tried all the recvq_tq.  This should be OK.
          */
-        next_tq = atomic_add_32_nv(&sctps->sctps_recvq_tq_list_cur, 1) %
+        next_tq = atomic_inc_32_nv(&sctps->sctps_recvq_tq_list_cur) %
             sctps->sctps_recvq_tq_list_cur_sz;
         for (try = 0; try < sctps->sctps_recvq_tq_list_cur_sz; try++) {
                 tq = sctps->sctps_recvq_tq_list[next_tq];
                 if (taskq_dispatch(tq, sctp_process_recvq, sctp,
                     TQ_NOSLEEP) != NULL) {