Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*


 398                 (void) clnt_tli_kinit(cp->ch_client, svp->sv_knconf,
 399                     &svp->sv_addr, ci->cl_readsize, ci->cl_retrans, cr);
 400                 error = sec_clnt_geth(cp->ch_client, svp->sv_secdata, cr,
 401                     &cp->ch_client->cl_auth);
 402                 if (error || cp->ch_client->cl_auth == NULL) {
 403                         CLNT_DESTROY(cp->ch_client);
 404                         kmem_cache_free(chtab_cache, cp);
 405                         return ((error != 0) ? error : EINTR);
 406                 }
 407                 ch->ch_timesused++;
 408                 *newcl = cp->ch_client;
 409                 *chp = cp;
 410                 return (0);
 411         }
 412 
 413         /*
 414          * There weren't any free client handles which fit, so allocate
 415          * a new one and use that.
 416          */
 417 #ifdef DEBUG
 418         atomic_add_64(&nfscl->nfscl_stat.clalloc.value.ui64, 1);
 419 #endif
 420         mutex_exit(&nfscl->nfscl_chtable_lock);
 421 
 422         nfscl->nfscl_stat.cltoomany.value.ui64++;
 423         if (newch != NULL) {
 424                 kmem_free(newch->ch_protofmly, strlen(newch->ch_protofmly) + 1);
 425                 kmem_free(newch, sizeof (*newch));
 426         }
 427 
 428         cp = kmem_cache_alloc(chtab_cache, KM_SLEEP);
 429         cp->ch_head = ch;
 430 
 431         sigintr(&smask, (int)ci->cl_flags & MI_INT);
 432         error = clnt_tli_kcreate(svp->sv_knconf, &svp->sv_addr, ci->cl_prog,
 433             ci->cl_vers, ci->cl_readsize, ci->cl_retrans, cr, &cp->ch_client);
 434         sigunintr(&smask);
 435 
 436         if (error != 0) {
 437                 kmem_cache_free(chtab_cache, cp);
 438 #ifdef DEBUG
 439                 atomic_add_64(&nfscl->nfscl_stat.clalloc.value.ui64, -1);
 440 #endif
 441                 /*
 442                  * Warning is unnecessary if error is EINTR.
 443                  */
 444                 if (error != EINTR) {
 445                         nfs_cmn_err(error, CE_WARN,
 446                             "clget: couldn't create handle: %m\n");
 447                 }
 448                 return (error);
 449         }
 450         (void) CLNT_CONTROL(cp->ch_client, CLSET_PROGRESS, NULL);
 451         auth_destroy(cp->ch_client->cl_auth);
 452         error = sec_clnt_geth(cp->ch_client, svp->sv_secdata, cr,
 453             &cp->ch_client->cl_auth);
 454         if (error || cp->ch_client->cl_auth == NULL) {
 455                 CLNT_DESTROY(cp->ch_client);
 456                 kmem_cache_free(chtab_cache, cp);
 457 #ifdef DEBUG
 458                 atomic_add_64(&nfscl->nfscl_stat.clalloc.value.ui64, -1);
 459 #endif
 460                 return ((error != 0) ? error : EINTR);
 461         }
 462         ch->ch_timesused++;
 463         *newcl = cp->ch_client;
 464         ASSERT(cp->ch_client->cl_nosignal == FALSE);
 465         *chp = cp;
 466         return (0);
 467 }
 468 
 469 int
 470 clget(clinfo_t *ci, servinfo_t *svp, cred_t *cr, CLIENT **newcl,
 471     struct chtab **chp)
 472 {
 473         struct nfs_clnt *nfscl;
 474 
 475         nfscl = zone_getspecific(nfsclnt_zone_key, nfs_zone());
 476         ASSERT(nfscl != NULL);
 477 
 478         return (clget_impl(ci, svp, cr, newcl, chp, nfscl));


2520                 mutex_destroy(&rp->r_statelock);
2521                 cv_destroy(&rp->r_cv);
2522                 cv_destroy(&rp->r_commit.c_cv);
2523                 nfs_free_r_path(rp);
2524                 avl_destroy(&rp->r_dir);
2525                 /*
2526                  * Make sure that if rnode is recycled then
2527                  * VFS count is decremented properly before
2528                  * reuse.
2529                  */
2530                 VFS_RELE(vp->v_vfsp);
2531                 vn_reinit(vp);
2532         } else {
2533                 vnode_t *new_vp;
2534 
2535                 mutex_exit(&rpfreelist_lock);
2536 
2537                 rp = kmem_cache_alloc(rnode_cache, KM_SLEEP);
2538                 new_vp = vn_alloc(KM_SLEEP);
2539 
2540                 atomic_add_long((ulong_t *)&rnew, 1);
2541 #ifdef DEBUG
2542                 clstat_debug.nrnode.value.ui64++;
2543 #endif
2544                 vp = new_vp;
2545         }
2546 
2547         bzero(rp, sizeof (*rp));
2548         rp->r_vnode = vp;
2549         nfs_rw_init(&rp->r_rwlock, NULL, RW_DEFAULT, NULL);
2550         nfs_rw_init(&rp->r_lkserlock, NULL, RW_DEFAULT, NULL);
2551         mutex_init(&rp->r_statelock, NULL, MUTEX_DEFAULT, NULL);
2552         cv_init(&rp->r_cv, NULL, CV_DEFAULT, NULL);
2553         cv_init(&rp->r_commit.c_cv, NULL, CV_DEFAULT, NULL);
2554         rp->r_fh.fh_len = fh->fh_len;
2555         bcopy(fh->fh_buf, rp->r_fh.fh_buf, fh->fh_len);
2556         rp->r_server = mi->mi_curr_serv;
2557         if (FAILOVER_MOUNT(mi)) {
2558                 /*
2559                  * If replicated servers, stash pathnames
2560                  */


3002 
3003 /*
3004  * This routine destroys all the resources associated with the rnode
3005  * and then the rnode itself.
3006  */
3007 static void
3008 destroy_rnode(rnode_t *rp)
3009 {
3010         vnode_t *vp;
3011         vfs_t *vfsp;
3012 
3013         vp = RTOV(rp);
3014         vfsp = vp->v_vfsp;
3015 
3016         ASSERT(vp->v_count == 1);
3017         ASSERT(rp->r_count == 0);
3018         ASSERT(rp->r_lmpl == NULL);
3019         ASSERT(rp->r_mapcnt == 0);
3020         ASSERT(!(rp->r_flags & RHASHED));
3021         ASSERT(rp->r_freef == NULL && rp->r_freeb == NULL);
3022         atomic_add_long((ulong_t *)&rnew, -1);
3023 #ifdef DEBUG
3024         clstat_debug.nrnode.value.ui64--;
3025 #endif
3026         nfs_rw_destroy(&rp->r_rwlock);
3027         nfs_rw_destroy(&rp->r_lkserlock);
3028         mutex_destroy(&rp->r_statelock);
3029         cv_destroy(&rp->r_cv);
3030         cv_destroy(&rp->r_commit.c_cv);
3031         if (rp->r_flags & RDELMAPLIST)
3032                 list_destroy(&rp->r_indelmap);
3033         nfs_free_r_path(rp);
3034         avl_destroy(&rp->r_dir);
3035         vn_invalid(vp);
3036         vn_free(vp);
3037         kmem_cache_free(rnode_cache, rp);
3038         VFS_RELE(vfsp);
3039 }
3040 
3041 /*
3042  * Flush all vnodes in this (or every) vfs.


3796                 return (ENXIO);
3797         default:
3798                 return ((int)status);
3799         }
3800 #endif
3801 }
3802 
3803 rddir_cache *
3804 rddir_cache_alloc(int flags)
3805 {
3806         rddir_cache *rc;
3807 
3808         rc = kmem_alloc(sizeof (*rc), flags);
3809         if (rc != NULL) {
3810                 rc->entries = NULL;
3811                 rc->flags = RDDIR;
3812                 cv_init(&rc->cv, NULL, CV_DEFAULT, NULL);
3813                 mutex_init(&rc->lock, NULL, MUTEX_DEFAULT, NULL);
3814                 rc->count = 1;
3815 #ifdef DEBUG
3816                 atomic_add_64(&clstat_debug.dirent.value.ui64, 1);
3817 #endif
3818         }
3819         return (rc);
3820 }
3821 
3822 static void
3823 rddir_cache_free(rddir_cache *rc)
3824 {
3825 
3826 #ifdef DEBUG
3827         atomic_add_64(&clstat_debug.dirent.value.ui64, -1);
3828 #endif
3829         if (rc->entries != NULL) {
3830 #ifdef DEBUG
3831                 rddir_cache_buf_free(rc->entries, rc->buflen);
3832 #else
3833                 kmem_free(rc->entries, rc->buflen);
3834 #endif
3835         }
3836         cv_destroy(&rc->cv);
3837         mutex_destroy(&rc->lock);
3838         kmem_free(rc, sizeof (*rc));
3839 }
3840 
3841 void
3842 rddir_cache_hold(rddir_cache *rc)
3843 {
3844 
3845         mutex_enter(&rc->lock);
3846         rc->count++;
3847         mutex_exit(&rc->lock);




 398                 (void) clnt_tli_kinit(cp->ch_client, svp->sv_knconf,
 399                     &svp->sv_addr, ci->cl_readsize, ci->cl_retrans, cr);
 400                 error = sec_clnt_geth(cp->ch_client, svp->sv_secdata, cr,
 401                     &cp->ch_client->cl_auth);
 402                 if (error || cp->ch_client->cl_auth == NULL) {
 403                         CLNT_DESTROY(cp->ch_client);
 404                         kmem_cache_free(chtab_cache, cp);
 405                         return ((error != 0) ? error : EINTR);
 406                 }
 407                 ch->ch_timesused++;
 408                 *newcl = cp->ch_client;
 409                 *chp = cp;
 410                 return (0);
 411         }
 412 
 413         /*
 414          * There weren't any free client handles which fit, so allocate
 415          * a new one and use that.
 416          */
 417 #ifdef DEBUG
 418         atomic_inc_64(&nfscl->nfscl_stat.clalloc.value.ui64);
 419 #endif
 420         mutex_exit(&nfscl->nfscl_chtable_lock);
 421 
 422         nfscl->nfscl_stat.cltoomany.value.ui64++;
 423         if (newch != NULL) {
 424                 kmem_free(newch->ch_protofmly, strlen(newch->ch_protofmly) + 1);
 425                 kmem_free(newch, sizeof (*newch));
 426         }
 427 
 428         cp = kmem_cache_alloc(chtab_cache, KM_SLEEP);
 429         cp->ch_head = ch;
 430 
 431         sigintr(&smask, (int)ci->cl_flags & MI_INT);
 432         error = clnt_tli_kcreate(svp->sv_knconf, &svp->sv_addr, ci->cl_prog,
 433             ci->cl_vers, ci->cl_readsize, ci->cl_retrans, cr, &cp->ch_client);
 434         sigunintr(&smask);
 435 
 436         if (error != 0) {
 437                 kmem_cache_free(chtab_cache, cp);
 438 #ifdef DEBUG
 439                 atomic_dec_64(&nfscl->nfscl_stat.clalloc.value.ui64);
 440 #endif
 441                 /*
 442                  * Warning is unnecessary if error is EINTR.
 443                  */
 444                 if (error != EINTR) {
 445                         nfs_cmn_err(error, CE_WARN,
 446                             "clget: couldn't create handle: %m\n");
 447                 }
 448                 return (error);
 449         }
 450         (void) CLNT_CONTROL(cp->ch_client, CLSET_PROGRESS, NULL);
 451         auth_destroy(cp->ch_client->cl_auth);
 452         error = sec_clnt_geth(cp->ch_client, svp->sv_secdata, cr,
 453             &cp->ch_client->cl_auth);
 454         if (error || cp->ch_client->cl_auth == NULL) {
 455                 CLNT_DESTROY(cp->ch_client);
 456                 kmem_cache_free(chtab_cache, cp);
 457 #ifdef DEBUG
 458                 atomic_dec_64(&nfscl->nfscl_stat.clalloc.value.ui64);
 459 #endif
 460                 return ((error != 0) ? error : EINTR);
 461         }
 462         ch->ch_timesused++;
 463         *newcl = cp->ch_client;
 464         ASSERT(cp->ch_client->cl_nosignal == FALSE);
 465         *chp = cp;
 466         return (0);
 467 }
 468 
 469 int
 470 clget(clinfo_t *ci, servinfo_t *svp, cred_t *cr, CLIENT **newcl,
 471     struct chtab **chp)
 472 {
 473         struct nfs_clnt *nfscl;
 474 
 475         nfscl = zone_getspecific(nfsclnt_zone_key, nfs_zone());
 476         ASSERT(nfscl != NULL);
 477 
 478         return (clget_impl(ci, svp, cr, newcl, chp, nfscl));


2520                 mutex_destroy(&rp->r_statelock);
2521                 cv_destroy(&rp->r_cv);
2522                 cv_destroy(&rp->r_commit.c_cv);
2523                 nfs_free_r_path(rp);
2524                 avl_destroy(&rp->r_dir);
2525                 /*
2526                  * Make sure that if rnode is recycled then
2527                  * VFS count is decremented properly before
2528                  * reuse.
2529                  */
2530                 VFS_RELE(vp->v_vfsp);
2531                 vn_reinit(vp);
2532         } else {
2533                 vnode_t *new_vp;
2534 
2535                 mutex_exit(&rpfreelist_lock);
2536 
2537                 rp = kmem_cache_alloc(rnode_cache, KM_SLEEP);
2538                 new_vp = vn_alloc(KM_SLEEP);
2539 
2540                 atomic_inc_ulong((ulong_t *)&rnew);
2541 #ifdef DEBUG
2542                 clstat_debug.nrnode.value.ui64++;
2543 #endif
2544                 vp = new_vp;
2545         }
2546 
2547         bzero(rp, sizeof (*rp));
2548         rp->r_vnode = vp;
2549         nfs_rw_init(&rp->r_rwlock, NULL, RW_DEFAULT, NULL);
2550         nfs_rw_init(&rp->r_lkserlock, NULL, RW_DEFAULT, NULL);
2551         mutex_init(&rp->r_statelock, NULL, MUTEX_DEFAULT, NULL);
2552         cv_init(&rp->r_cv, NULL, CV_DEFAULT, NULL);
2553         cv_init(&rp->r_commit.c_cv, NULL, CV_DEFAULT, NULL);
2554         rp->r_fh.fh_len = fh->fh_len;
2555         bcopy(fh->fh_buf, rp->r_fh.fh_buf, fh->fh_len);
2556         rp->r_server = mi->mi_curr_serv;
2557         if (FAILOVER_MOUNT(mi)) {
2558                 /*
2559                  * If replicated servers, stash pathnames
2560                  */


3002 
3003 /*
3004  * This routine destroys all the resources associated with the rnode
3005  * and then the rnode itself.
3006  */
3007 static void
3008 destroy_rnode(rnode_t *rp)
3009 {
3010         vnode_t *vp;
3011         vfs_t *vfsp;
3012 
3013         vp = RTOV(rp);
3014         vfsp = vp->v_vfsp;
3015 
3016         ASSERT(vp->v_count == 1);
3017         ASSERT(rp->r_count == 0);
3018         ASSERT(rp->r_lmpl == NULL);
3019         ASSERT(rp->r_mapcnt == 0);
3020         ASSERT(!(rp->r_flags & RHASHED));
3021         ASSERT(rp->r_freef == NULL && rp->r_freeb == NULL);
3022         atomic_dec_ulong((ulong_t *)&rnew);
3023 #ifdef DEBUG
3024         clstat_debug.nrnode.value.ui64--;
3025 #endif
3026         nfs_rw_destroy(&rp->r_rwlock);
3027         nfs_rw_destroy(&rp->r_lkserlock);
3028         mutex_destroy(&rp->r_statelock);
3029         cv_destroy(&rp->r_cv);
3030         cv_destroy(&rp->r_commit.c_cv);
3031         if (rp->r_flags & RDELMAPLIST)
3032                 list_destroy(&rp->r_indelmap);
3033         nfs_free_r_path(rp);
3034         avl_destroy(&rp->r_dir);
3035         vn_invalid(vp);
3036         vn_free(vp);
3037         kmem_cache_free(rnode_cache, rp);
3038         VFS_RELE(vfsp);
3039 }
3040 
3041 /*
3042  * Flush all vnodes in this (or every) vfs.


3796                 return (ENXIO);
3797         default:
3798                 return ((int)status);
3799         }
3800 #endif
3801 }
3802 
3803 rddir_cache *
3804 rddir_cache_alloc(int flags)
3805 {
3806         rddir_cache *rc;
3807 
3808         rc = kmem_alloc(sizeof (*rc), flags);
3809         if (rc != NULL) {
3810                 rc->entries = NULL;
3811                 rc->flags = RDDIR;
3812                 cv_init(&rc->cv, NULL, CV_DEFAULT, NULL);
3813                 mutex_init(&rc->lock, NULL, MUTEX_DEFAULT, NULL);
3814                 rc->count = 1;
3815 #ifdef DEBUG
3816                 atomic_inc_64(&clstat_debug.dirent.value.ui64);
3817 #endif
3818         }
3819         return (rc);
3820 }
3821 
3822 static void
3823 rddir_cache_free(rddir_cache *rc)
3824 {
3825 
3826 #ifdef DEBUG
3827         atomic_dec_64(&clstat_debug.dirent.value.ui64);
3828 #endif
3829         if (rc->entries != NULL) {
3830 #ifdef DEBUG
3831                 rddir_cache_buf_free(rc->entries, rc->buflen);
3832 #else
3833                 kmem_free(rc->entries, rc->buflen);
3834 #endif
3835         }
3836         cv_destroy(&rc->cv);
3837         mutex_destroy(&rc->lock);
3838         kmem_free(rc, sizeof (*rc));
3839 }
3840 
3841 void
3842 rddir_cache_hold(rddir_cache *rc)
3843 {
3844 
3845         mutex_enter(&rc->lock);
3846         rc->count++;
3847         mutex_exit(&rc->lock);