Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*


10473         /*
10474          * Check to see if the vnode is currently marked as not cachable.
10475          * This means portions of the file are locked (through VOP_FRLOCK).
10476          * In this case the map request must be refused.  We use
10477          * rp->r_lkserlock to avoid a race with concurrent lock requests.
10478          *
10479          * Atomically increment r_inmap after acquiring r_rwlock. The
10480          * idea here is to acquire r_rwlock to block read/write and
10481          * not to protect r_inmap. r_inmap will inform nfs4_read/write()
10482          * that we are in nfs4_map(). Now, r_rwlock is acquired in order
10483          * and we can prevent the deadlock that would have occurred
10484          * when nfs4_addmap() would have acquired it out of order.
10485          *
10486          * Since we are not protecting r_inmap by any lock, we do not
10487          * hold any lock when we decrement it. We atomically decrement
10488          * r_inmap after we release r_lkserlock.
10489          */
10490 
10491         if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, INTR4(vp)))
10492                 return (EINTR);
10493         atomic_add_int(&rp->r_inmap, 1);
10494         nfs_rw_exit(&rp->r_rwlock);
10495 
10496         if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR4(vp))) {
10497                 atomic_add_int(&rp->r_inmap, -1);
10498                 return (EINTR);
10499         }
10500 
10501 
10502         if (vp->v_flag & VNOCACHE) {
10503                 error = EAGAIN;
10504                 goto done;
10505         }
10506 
10507         /*
10508          * Don't allow concurrent locks and mapping if mandatory locking is
10509          * enabled.
10510          */
10511         if (flk_has_remote_locks(vp)) {
10512                 struct vattr va;
10513                 va.va_mask = AT_MODE;
10514                 error = nfs4getattr(vp, &va, cr);
10515                 if (error != 0)
10516                         goto done;
10517                 if (MANDLOCK(vp, va.va_mode)) {


10585                 mutex_exit(&osp->os_sync_lock);
10586                 open_stream_rele(osp, rp);
10587         }
10588 
10589         vn_a.vp = vp;
10590         vn_a.offset = off;
10591         vn_a.type = (flags & MAP_TYPE);
10592         vn_a.prot = (uchar_t)prot;
10593         vn_a.maxprot = (uchar_t)maxprot;
10594         vn_a.flags = (flags & ~MAP_TYPE);
10595         vn_a.cred = cr;
10596         vn_a.amp = NULL;
10597         vn_a.szc = 0;
10598         vn_a.lgrp_mem_policy_flags = 0;
10599 
10600         error = as_map(as, *addrp, len, segvn_create, &vn_a);
10601         as_rangeunlock(as);
10602 
10603 done:
10604         nfs_rw_exit(&rp->r_lkserlock);
10605         atomic_add_int(&rp->r_inmap, -1);
10606         return (error);
10607 }
10608 
10609 /*
10610  * We're most likely dealing with a kernel module that likes to READ
10611  * and mmap without OPENing the file (ie: lookup/read/mmap), so lets
10612  * officially OPEN the file to create the necessary client state
10613  * for bookkeeping of os_mmap_read/write counts.
10614  *
10615  * Since VOP_MAP only passes in a pointer to the vnode rather than
10616  * a double pointer, we can't handle the case where nfs4open_otw()
10617  * returns a different vnode than the one passed into VOP_MAP (since
10618  * VOP_DELMAP will not see the vnode nfs4open_otw used).  In this case,
10619  * we return NULL and let nfs4_map() fail.  Note: the only case where
10620  * this should happen is if the file got removed and replaced with the
10621  * same name on the server (in addition to the fact that we're trying
10622  * to VOP_MAP withouth VOP_OPENing the file in the first place).
10623  */
10624 static int
10625 open_and_get_osp(vnode_t *map_vp, cred_t *cr, nfs4_open_stream_t **ospp)




10473         /*
10474          * Check to see if the vnode is currently marked as not cachable.
10475          * This means portions of the file are locked (through VOP_FRLOCK).
10476          * In this case the map request must be refused.  We use
10477          * rp->r_lkserlock to avoid a race with concurrent lock requests.
10478          *
10479          * Atomically increment r_inmap after acquiring r_rwlock. The
10480          * idea here is to acquire r_rwlock to block read/write and
10481          * not to protect r_inmap. r_inmap will inform nfs4_read/write()
10482          * that we are in nfs4_map(). Now, r_rwlock is acquired in order
10483          * and we can prevent the deadlock that would have occurred
10484          * when nfs4_addmap() would have acquired it out of order.
10485          *
10486          * Since we are not protecting r_inmap by any lock, we do not
10487          * hold any lock when we decrement it. We atomically decrement
10488          * r_inmap after we release r_lkserlock.
10489          */
10490 
10491         if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, INTR4(vp)))
10492                 return (EINTR);
10493         atomic_inc_uint(&rp->r_inmap);
10494         nfs_rw_exit(&rp->r_rwlock);
10495 
10496         if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR4(vp))) {
10497                 atomic_dec_uint(&rp->r_inmap);
10498                 return (EINTR);
10499         }
10500 
10501 
10502         if (vp->v_flag & VNOCACHE) {
10503                 error = EAGAIN;
10504                 goto done;
10505         }
10506 
10507         /*
10508          * Don't allow concurrent locks and mapping if mandatory locking is
10509          * enabled.
10510          */
10511         if (flk_has_remote_locks(vp)) {
10512                 struct vattr va;
10513                 va.va_mask = AT_MODE;
10514                 error = nfs4getattr(vp, &va, cr);
10515                 if (error != 0)
10516                         goto done;
10517                 if (MANDLOCK(vp, va.va_mode)) {


10585                 mutex_exit(&osp->os_sync_lock);
10586                 open_stream_rele(osp, rp);
10587         }
10588 
10589         vn_a.vp = vp;
10590         vn_a.offset = off;
10591         vn_a.type = (flags & MAP_TYPE);
10592         vn_a.prot = (uchar_t)prot;
10593         vn_a.maxprot = (uchar_t)maxprot;
10594         vn_a.flags = (flags & ~MAP_TYPE);
10595         vn_a.cred = cr;
10596         vn_a.amp = NULL;
10597         vn_a.szc = 0;
10598         vn_a.lgrp_mem_policy_flags = 0;
10599 
10600         error = as_map(as, *addrp, len, segvn_create, &vn_a);
10601         as_rangeunlock(as);
10602 
10603 done:
10604         nfs_rw_exit(&rp->r_lkserlock);
10605         atomic_dec_uint(&rp->r_inmap);
10606         return (error);
10607 }
10608 
10609 /*
10610  * We're most likely dealing with a kernel module that likes to READ
10611  * and mmap without OPENing the file (ie: lookup/read/mmap), so lets
10612  * officially OPEN the file to create the necessary client state
10613  * for bookkeeping of os_mmap_read/write counts.
10614  *
10615  * Since VOP_MAP only passes in a pointer to the vnode rather than
10616  * a double pointer, we can't handle the case where nfs4open_otw()
10617  * returns a different vnode than the one passed into VOP_MAP (since
10618  * VOP_DELMAP will not see the vnode nfs4open_otw used).  In this case,
10619  * we return NULL and let nfs4_map() fail.  Note: the only case where
10620  * this should happen is if the file got removed and replaced with the
10621  * same name on the server (in addition to the fact that we're trying
10622  * to VOP_MAP withouth VOP_OPENing the file in the first place).
10623  */
10624 static int
10625 open_and_get_osp(vnode_t *map_vp, cred_t *cr, nfs4_open_stream_t **ospp)