Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*


 942                         cv_signal(&nca_dcb_wait);                   \
 943                 }                                                       \
 944                 mutex_exit(&nca_dcb_lock);                          \
 945                 mutex_enter(&nca_dcb_readers);                              \
 946                 /*                                                      \
 947                  * We block above waiting for the writer to exit the    \
 948                  * readers lock, if we didn't block then while we were  \
 949                  * away in the nca_dcb_lock enter the writer exited,    \
 950                  * we could optimize for this case by checking USELOCK  \
 951                  * after the decrement, but as this is an exceptional   \
 952                  * case not in the fast-path we'll just take the hit    \
 953                  * of a needless readers enter/exit.                    \
 954                  */                                                     \
 955                 mutex_exit(&nca_dcb_readers);                               \
 956         }                                                               \
 957 }
 958 
 959 #define DCB_RD_EXIT(cpu) {                                              \
 960         uint32_t *rp = &nca_gv[cpu].dcb_readers;                    \
 961                                                                         \
 962         if (atomic_add_32_nv(rp, -1) == DCB_COUNT_USELOCK) {            \
 963                 mutex_enter(&nca_dcb_lock);                         \
 964                 if (CV_HAS_WAITERS(&nca_dcb_wait)) {                        \
 965                         /* May be the last reader for this CPU */       \
 966                         cv_signal(&nca_dcb_wait);                   \
 967                 }                                                       \
 968                 mutex_exit(&nca_dcb_lock);                          \
 969         }                                                               \
 970 }
 971 
 972 #define DCB_WR_ENTER() {                                                \
 973         int cpu;                                                        \
 974         int readers;                                                    \
 975                                                                         \
 976         mutex_enter(&nca_dcb_readers);                                      \
 977         mutex_enter(&nca_dcb_lock);                                 \
 978         for (;;) {                                                      \
 979                 readers = 0;                                            \
 980                 for (cpu = 0; cpu < max_ncpus; cpu++) {                      \
 981                         int new;                                        \
 982                         uint32_t *rp = &nca_gv[cpu].dcb_readers;    \




 942                         cv_signal(&nca_dcb_wait);                   \
 943                 }                                                       \
 944                 mutex_exit(&nca_dcb_lock);                          \
 945                 mutex_enter(&nca_dcb_readers);                              \
 946                 /*                                                      \
 947                  * We block above waiting for the writer to exit the    \
 948                  * readers lock, if we didn't block then while we were  \
 949                  * away in the nca_dcb_lock enter the writer exited,    \
 950                  * we could optimize for this case by checking USELOCK  \
 951                  * after the decrement, but as this is an exceptional   \
 952                  * case not in the fast-path we'll just take the hit    \
 953                  * of a needless readers enter/exit.                    \
 954                  */                                                     \
 955                 mutex_exit(&nca_dcb_readers);                               \
 956         }                                                               \
 957 }
 958 
 959 #define DCB_RD_EXIT(cpu) {                                              \
 960         uint32_t *rp = &nca_gv[cpu].dcb_readers;                    \
 961                                                                         \
 962         if (atomic_dec_32_nv(rp) == DCB_COUNT_USELOCK) {                \
 963                 mutex_enter(&nca_dcb_lock);                         \
 964                 if (CV_HAS_WAITERS(&nca_dcb_wait)) {                        \
 965                         /* May be the last reader for this CPU */       \
 966                         cv_signal(&nca_dcb_wait);                   \
 967                 }                                                       \
 968                 mutex_exit(&nca_dcb_lock);                          \
 969         }                                                               \
 970 }
 971 
 972 #define DCB_WR_ENTER() {                                                \
 973         int cpu;                                                        \
 974         int readers;                                                    \
 975                                                                         \
 976         mutex_enter(&nca_dcb_readers);                                      \
 977         mutex_enter(&nca_dcb_lock);                                 \
 978         for (;;) {                                                      \
 979                 readers = 0;                                            \
 980                 for (cpu = 0; cpu < max_ncpus; cpu++) {                      \
 981                         int new;                                        \
 982                         uint32_t *rp = &nca_gv[cpu].dcb_readers;    \