630 * destroy old locks before bzero'ing and
631 * recreating the locks below.
632 */
633 uninit_rnode4(rp);
634
635 /*
636 * Make sure that if rnode is recycled then
637 * VFS count is decremented properly before
638 * reuse.
639 */
640 VFS_RELE(vp->v_vfsp);
641 vn_reinit(vp);
642 } else {
643 vnode_t *new_vp;
644
645 mutex_exit(&rp4freelist_lock);
646
647 rp = kmem_cache_alloc(rnode4_cache, KM_SLEEP);
648 new_vp = vn_alloc(KM_SLEEP);
649
650 atomic_add_long((ulong_t *)&rnode4_new, 1);
651 #ifdef DEBUG
652 clstat4_debug.nrnode.value.ui64++;
653 #endif
654 vp = new_vp;
655 }
656
657 bzero(rp, sizeof (*rp));
658 rp->r_vnode = vp;
659 nfs_rw_init(&rp->r_rwlock, NULL, RW_DEFAULT, NULL);
660 nfs_rw_init(&rp->r_lkserlock, NULL, RW_DEFAULT, NULL);
661 mutex_init(&rp->r_svlock, NULL, MUTEX_DEFAULT, NULL);
662 mutex_init(&rp->r_statelock, NULL, MUTEX_DEFAULT, NULL);
663 mutex_init(&rp->r_statev4_lock, NULL, MUTEX_DEFAULT, NULL);
664 mutex_init(&rp->r_os_lock, NULL, MUTEX_DEFAULT, NULL);
665 rp->created_v4 = 0;
666 list_create(&rp->r_open_streams, sizeof (nfs4_open_stream_t),
667 offsetof(nfs4_open_stream_t, os_node));
668 rp->r_lo_head.lo_prev_rnode = &rp->r_lo_head;
669 rp->r_lo_head.lo_next_rnode = &rp->r_lo_head;
670 cv_init(&rp->r_cv, NULL, CV_DEFAULT, NULL);
1203 rp4_addfree(rp, cr);
1204 }
1205 }
1206
1207 /*
1208 * This routine destroys all the resources of an rnode
1209 * and finally the rnode itself.
1210 */
1211 static void
1212 destroy_rnode4(rnode4_t *rp)
1213 {
1214 vnode_t *vp;
1215 vfs_t *vfsp;
1216
1217 ASSERT(rp->r_deleg_type == OPEN_DELEGATE_NONE);
1218
1219 vp = RTOV4(rp);
1220 vfsp = vp->v_vfsp;
1221
1222 uninit_rnode4(rp);
1223 atomic_add_long((ulong_t *)&rnode4_new, -1);
1224 #ifdef DEBUG
1225 clstat4_debug.nrnode.value.ui64--;
1226 #endif
1227 kmem_cache_free(rnode4_cache, rp);
1228 vn_invalid(vp);
1229 vn_free(vp);
1230 VFS_RELE(vfsp);
1231 }
1232
1233 /*
1234 * Invalidate the attributes on all rnodes forcing the next getattr
1235 * to go over the wire. Used to flush stale uid and gid mappings.
1236 * Maybe done on a per vfsp, or all rnodes (vfsp == NULL)
1237 */
1238 void
1239 nfs4_rnode_invalidate(struct vfs *vfsp)
1240 {
1241 int index;
1242 rnode4_t *rp;
1243 vnode_t *vp;
|
630 * destroy old locks before bzero'ing and
631 * recreating the locks below.
632 */
633 uninit_rnode4(rp);
634
635 /*
636 * Make sure that if rnode is recycled then
637 * VFS count is decremented properly before
638 * reuse.
639 */
640 VFS_RELE(vp->v_vfsp);
641 vn_reinit(vp);
642 } else {
643 vnode_t *new_vp;
644
645 mutex_exit(&rp4freelist_lock);
646
647 rp = kmem_cache_alloc(rnode4_cache, KM_SLEEP);
648 new_vp = vn_alloc(KM_SLEEP);
649
650 atomic_inc_ulong((ulong_t *)&rnode4_new);
651 #ifdef DEBUG
652 clstat4_debug.nrnode.value.ui64++;
653 #endif
654 vp = new_vp;
655 }
656
657 bzero(rp, sizeof (*rp));
658 rp->r_vnode = vp;
659 nfs_rw_init(&rp->r_rwlock, NULL, RW_DEFAULT, NULL);
660 nfs_rw_init(&rp->r_lkserlock, NULL, RW_DEFAULT, NULL);
661 mutex_init(&rp->r_svlock, NULL, MUTEX_DEFAULT, NULL);
662 mutex_init(&rp->r_statelock, NULL, MUTEX_DEFAULT, NULL);
663 mutex_init(&rp->r_statev4_lock, NULL, MUTEX_DEFAULT, NULL);
664 mutex_init(&rp->r_os_lock, NULL, MUTEX_DEFAULT, NULL);
665 rp->created_v4 = 0;
666 list_create(&rp->r_open_streams, sizeof (nfs4_open_stream_t),
667 offsetof(nfs4_open_stream_t, os_node));
668 rp->r_lo_head.lo_prev_rnode = &rp->r_lo_head;
669 rp->r_lo_head.lo_next_rnode = &rp->r_lo_head;
670 cv_init(&rp->r_cv, NULL, CV_DEFAULT, NULL);
1203 rp4_addfree(rp, cr);
1204 }
1205 }
1206
1207 /*
1208 * This routine destroys all the resources of an rnode
1209 * and finally the rnode itself.
1210 */
1211 static void
1212 destroy_rnode4(rnode4_t *rp)
1213 {
1214 vnode_t *vp;
1215 vfs_t *vfsp;
1216
1217 ASSERT(rp->r_deleg_type == OPEN_DELEGATE_NONE);
1218
1219 vp = RTOV4(rp);
1220 vfsp = vp->v_vfsp;
1221
1222 uninit_rnode4(rp);
1223 atomic_dec_ulong((ulong_t *)&rnode4_new);
1224 #ifdef DEBUG
1225 clstat4_debug.nrnode.value.ui64--;
1226 #endif
1227 kmem_cache_free(rnode4_cache, rp);
1228 vn_invalid(vp);
1229 vn_free(vp);
1230 VFS_RELE(vfsp);
1231 }
1232
1233 /*
1234 * Invalidate the attributes on all rnodes forcing the next getattr
1235 * to go over the wire. Used to flush stale uid and gid mappings.
1236 * Maybe done on a per vfsp, or all rnodes (vfsp == NULL)
1237 */
1238 void
1239 nfs4_rnode_invalidate(struct vfs *vfsp)
1240 {
1241 int index;
1242 rnode4_t *rp;
1243 vnode_t *vp;
|