Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*


 386                 mutex_destroy(&np->r_statelock);
 387                 cv_destroy(&np->r_cv);
 388                 /*
 389                  * Make sure that if smbnode is recycled then
 390                  * VFS count is decremented properly before
 391                  * reuse.
 392                  */
 393                 VFS_RELE(vp->v_vfsp);
 394                 vn_reinit(vp);
 395         } else {
 396                 /*
 397                  * allocate and initialize a new smbnode
 398                  */
 399                 vnode_t *new_vp;
 400 
 401                 mutex_exit(&smbfreelist_lock);
 402 
 403                 np = kmem_cache_alloc(smbnode_cache, KM_SLEEP);
 404                 new_vp = vn_alloc(KM_SLEEP);
 405 
 406                 atomic_add_long((ulong_t *)&smbnodenew, 1);
 407                 vp = new_vp;
 408         }
 409 
 410         /*
 411          * Allocate and copy the rpath we'll need below.
 412          */
 413         new_rpath = kmem_alloc(rplen + 1, KM_SLEEP);
 414         bcopy(rpath, new_rpath, rplen);
 415         new_rpath[rplen] = '\0';
 416 
 417         /* Initialize smbnode_t */
 418         bzero(np, sizeof (*np));
 419 
 420         smbfs_rw_init(&np->r_rwlock, NULL, RW_DEFAULT, NULL);
 421         smbfs_rw_init(&np->r_lkserlock, NULL, RW_DEFAULT, NULL);
 422         mutex_init(&np->r_statelock, NULL, MUTEX_DEFAULT, NULL);
 423         cv_init(&np->r_cv, NULL, CV_DEFAULT, NULL);
 424         /* cv_init(&np->r_commit.c_cv, NULL, CV_DEFAULT, NULL); */
 425 
 426         np->r_vnode = vp;


1010  *
1011  * NFS: nfs_subr.c:destroy_rnode
1012  */
1013 static void
1014 sn_destroy_node(smbnode_t *np)
1015 {
1016         vnode_t *vp;
1017         vfs_t *vfsp;
1018 
1019         vp = SMBTOV(np);
1020         vfsp = vp->v_vfsp;
1021 
1022         ASSERT(vp->v_count == 1);
1023         ASSERT(np->r_count == 0);
1024         ASSERT(np->r_mapcnt == 0);
1025         ASSERT(np->r_secattr.vsa_aclentp == NULL);
1026         ASSERT(np->r_cred == NULL);
1027         ASSERT(np->n_rpath == NULL);
1028         ASSERT(!(np->r_flags & RHASHED));
1029         ASSERT(np->r_freef == NULL && np->r_freeb == NULL);
1030         atomic_add_long((ulong_t *)&smbnodenew, -1);
1031         vn_invalid(vp);
1032         vn_free(vp);
1033         kmem_cache_free(smbnode_cache, np);
1034         VFS_RELE(vfsp);
1035 }
1036 
1037 /*
1038  * Flush all vnodes in this (or every) vfs.
1039  * Used by nfs_sync and by nfs_unmount.
1040  */
1041 /*ARGSUSED*/
1042 void
1043 smbfs_rflush(struct vfs *vfsp, cred_t *cr)
1044 {
1045         /* Todo: mmap support. */
1046 }
1047 
1048 /* access cache */
1049 /* client handles */
1050 




 386                 mutex_destroy(&np->r_statelock);
 387                 cv_destroy(&np->r_cv);
 388                 /*
 389                  * Make sure that if smbnode is recycled then
 390                  * VFS count is decremented properly before
 391                  * reuse.
 392                  */
 393                 VFS_RELE(vp->v_vfsp);
 394                 vn_reinit(vp);
 395         } else {
 396                 /*
 397                  * allocate and initialize a new smbnode
 398                  */
 399                 vnode_t *new_vp;
 400 
 401                 mutex_exit(&smbfreelist_lock);
 402 
 403                 np = kmem_cache_alloc(smbnode_cache, KM_SLEEP);
 404                 new_vp = vn_alloc(KM_SLEEP);
 405 
 406                 atomic_inc_ulong((ulong_t *)&smbnodenew);
 407                 vp = new_vp;
 408         }
 409 
 410         /*
 411          * Allocate and copy the rpath we'll need below.
 412          */
 413         new_rpath = kmem_alloc(rplen + 1, KM_SLEEP);
 414         bcopy(rpath, new_rpath, rplen);
 415         new_rpath[rplen] = '\0';
 416 
 417         /* Initialize smbnode_t */
 418         bzero(np, sizeof (*np));
 419 
 420         smbfs_rw_init(&np->r_rwlock, NULL, RW_DEFAULT, NULL);
 421         smbfs_rw_init(&np->r_lkserlock, NULL, RW_DEFAULT, NULL);
 422         mutex_init(&np->r_statelock, NULL, MUTEX_DEFAULT, NULL);
 423         cv_init(&np->r_cv, NULL, CV_DEFAULT, NULL);
 424         /* cv_init(&np->r_commit.c_cv, NULL, CV_DEFAULT, NULL); */
 425 
 426         np->r_vnode = vp;


1010  *
1011  * NFS: nfs_subr.c:destroy_rnode
1012  */
1013 static void
1014 sn_destroy_node(smbnode_t *np)
1015 {
1016         vnode_t *vp;
1017         vfs_t *vfsp;
1018 
1019         vp = SMBTOV(np);
1020         vfsp = vp->v_vfsp;
1021 
1022         ASSERT(vp->v_count == 1);
1023         ASSERT(np->r_count == 0);
1024         ASSERT(np->r_mapcnt == 0);
1025         ASSERT(np->r_secattr.vsa_aclentp == NULL);
1026         ASSERT(np->r_cred == NULL);
1027         ASSERT(np->n_rpath == NULL);
1028         ASSERT(!(np->r_flags & RHASHED));
1029         ASSERT(np->r_freef == NULL && np->r_freeb == NULL);
1030         atomic_dec_ulong((ulong_t *)&smbnodenew);
1031         vn_invalid(vp);
1032         vn_free(vp);
1033         kmem_cache_free(smbnode_cache, np);
1034         VFS_RELE(vfsp);
1035 }
1036 
1037 /*
1038  * Flush all vnodes in this (or every) vfs.
1039  * Used by nfs_sync and by nfs_unmount.
1040  */
1041 /*ARGSUSED*/
1042 void
1043 smbfs_rflush(struct vfs *vfsp, cred_t *cr)
1044 {
1045         /* Todo: mmap support. */
1046 }
1047 
1048 /* access cache */
1049 /* client handles */
1050