Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

*** 413,423 **** /* * There weren't any free client handles which fit, so allocate * a new one and use that. */ #ifdef DEBUG ! atomic_add_64(&nfscl->nfscl_stat.clalloc.value.ui64, 1); #endif mutex_exit(&nfscl->nfscl_chtable_lock); nfscl->nfscl_stat.cltoomany.value.ui64++; if (newch != NULL) { --- 413,423 ---- /* * There weren't any free client handles which fit, so allocate * a new one and use that. */ #ifdef DEBUG ! atomic_inc_64(&nfscl->nfscl_stat.clalloc.value.ui64); #endif mutex_exit(&nfscl->nfscl_chtable_lock); nfscl->nfscl_stat.cltoomany.value.ui64++; if (newch != NULL) {
*** 434,444 **** sigunintr(&smask); if (error != 0) { kmem_cache_free(chtab_cache, cp); #ifdef DEBUG ! atomic_add_64(&nfscl->nfscl_stat.clalloc.value.ui64, -1); #endif /* * Warning is unnecessary if error is EINTR. */ if (error != EINTR) { --- 434,444 ---- sigunintr(&smask); if (error != 0) { kmem_cache_free(chtab_cache, cp); #ifdef DEBUG ! atomic_dec_64(&nfscl->nfscl_stat.clalloc.value.ui64); #endif /* * Warning is unnecessary if error is EINTR. */ if (error != EINTR) {
*** 453,463 **** &cp->ch_client->cl_auth); if (error || cp->ch_client->cl_auth == NULL) { CLNT_DESTROY(cp->ch_client); kmem_cache_free(chtab_cache, cp); #ifdef DEBUG ! atomic_add_64(&nfscl->nfscl_stat.clalloc.value.ui64, -1); #endif return ((error != 0) ? error : EINTR); } ch->ch_timesused++; *newcl = cp->ch_client; --- 453,463 ---- &cp->ch_client->cl_auth); if (error || cp->ch_client->cl_auth == NULL) { CLNT_DESTROY(cp->ch_client); kmem_cache_free(chtab_cache, cp); #ifdef DEBUG ! atomic_dec_64(&nfscl->nfscl_stat.clalloc.value.ui64); #endif return ((error != 0) ? error : EINTR); } ch->ch_timesused++; *newcl = cp->ch_client;
*** 2535,2545 **** mutex_exit(&rpfreelist_lock); rp = kmem_cache_alloc(rnode_cache, KM_SLEEP); new_vp = vn_alloc(KM_SLEEP); ! atomic_add_long((ulong_t *)&rnew, 1); #ifdef DEBUG clstat_debug.nrnode.value.ui64++; #endif vp = new_vp; } --- 2535,2545 ---- mutex_exit(&rpfreelist_lock); rp = kmem_cache_alloc(rnode_cache, KM_SLEEP); new_vp = vn_alloc(KM_SLEEP); ! atomic_inc_ulong((ulong_t *)&rnew); #ifdef DEBUG clstat_debug.nrnode.value.ui64++; #endif vp = new_vp; }
*** 3017,3027 **** ASSERT(rp->r_count == 0); ASSERT(rp->r_lmpl == NULL); ASSERT(rp->r_mapcnt == 0); ASSERT(!(rp->r_flags & RHASHED)); ASSERT(rp->r_freef == NULL && rp->r_freeb == NULL); ! atomic_add_long((ulong_t *)&rnew, -1); #ifdef DEBUG clstat_debug.nrnode.value.ui64--; #endif nfs_rw_destroy(&rp->r_rwlock); nfs_rw_destroy(&rp->r_lkserlock); --- 3017,3027 ---- ASSERT(rp->r_count == 0); ASSERT(rp->r_lmpl == NULL); ASSERT(rp->r_mapcnt == 0); ASSERT(!(rp->r_flags & RHASHED)); ASSERT(rp->r_freef == NULL && rp->r_freeb == NULL); ! atomic_dec_ulong((ulong_t *)&rnew); #ifdef DEBUG clstat_debug.nrnode.value.ui64--; #endif nfs_rw_destroy(&rp->r_rwlock); nfs_rw_destroy(&rp->r_lkserlock);
*** 3811,3832 **** rc->flags = RDDIR; cv_init(&rc->cv, NULL, CV_DEFAULT, NULL); mutex_init(&rc->lock, NULL, MUTEX_DEFAULT, NULL); rc->count = 1; #ifdef DEBUG ! atomic_add_64(&clstat_debug.dirent.value.ui64, 1); #endif } return (rc); } static void rddir_cache_free(rddir_cache *rc) { #ifdef DEBUG ! atomic_add_64(&clstat_debug.dirent.value.ui64, -1); #endif if (rc->entries != NULL) { #ifdef DEBUG rddir_cache_buf_free(rc->entries, rc->buflen); #else --- 3811,3832 ---- rc->flags = RDDIR; cv_init(&rc->cv, NULL, CV_DEFAULT, NULL); mutex_init(&rc->lock, NULL, MUTEX_DEFAULT, NULL); rc->count = 1; #ifdef DEBUG ! atomic_inc_64(&clstat_debug.dirent.value.ui64); #endif } return (rc); } static void rddir_cache_free(rddir_cache *rc) { #ifdef DEBUG ! atomic_dec_64(&clstat_debug.dirent.value.ui64); #endif if (rc->entries != NULL) { #ifdef DEBUG rddir_cache_buf_free(rc->entries, rc->buflen); #else