5259 * In this case the map request must be refused. We use
5260 * rp->r_lkserlock to avoid a race with concurrent lock requests.
5261 */
5262 rp = VTOR(vp);
5263
5264 /*
5265 * Atomically increment r_inmap after acquiring r_rwlock. The
5266 * idea here is to acquire r_rwlock to block read/write and
5267 * not to protect r_inmap. r_inmap will inform nfs3_read/write()
5268 * that we are in nfs3_map(). Now, r_rwlock is acquired in order
5269 * and we can prevent the deadlock that would have occurred
5270 * when nfs3_addmap() would have acquired it out of order.
5271 *
5272 * Since we are not protecting r_inmap by any lock, we do not
5273 * hold any lock when we decrement it. We atomically decrement
5274 * r_inmap after we release r_lkserlock.
5275 */
5276
5277 if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, INTR(vp)))
5278 return (EINTR);
5279 atomic_add_int(&rp->r_inmap, 1);
5280 nfs_rw_exit(&rp->r_rwlock);
5281
5282 if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR(vp))) {
5283 atomic_add_int(&rp->r_inmap, -1);
5284 return (EINTR);
5285 }
5286
5287 if (vp->v_flag & VNOCACHE) {
5288 error = EAGAIN;
5289 goto done;
5290 }
5291
5292 /*
5293 * Don't allow concurrent locks and mapping if mandatory locking is
5294 * enabled.
5295 */
5296 if ((flk_has_remote_locks(vp) || lm_has_sleep(vp)) &&
5297 MANDLOCK(vp, va.va_mode)) {
5298 error = EAGAIN;
5299 goto done;
5300 }
5301
5302 as_rangelock(as);
5303 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
5305 as_rangeunlock(as);
5306 goto done;
5307 }
5308
5309 vn_a.vp = vp;
5310 vn_a.offset = off;
5311 vn_a.type = (flags & MAP_TYPE);
5312 vn_a.prot = (uchar_t)prot;
5313 vn_a.maxprot = (uchar_t)maxprot;
5314 vn_a.flags = (flags & ~MAP_TYPE);
5315 vn_a.cred = cr;
5316 vn_a.amp = NULL;
5317 vn_a.szc = 0;
5318 vn_a.lgrp_mem_policy_flags = 0;
5319
5320 error = as_map(as, *addrp, len, segvn_create, &vn_a);
5321 as_rangeunlock(as);
5322
5323 done:
5324 nfs_rw_exit(&rp->r_lkserlock);
5325 atomic_add_int(&rp->r_inmap, -1);
5326 return (error);
5327 }
5328
5329 /* ARGSUSED */
5330 static int
5331 nfs3_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
5332 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
5333 cred_t *cr, caller_context_t *ct)
5334 {
5335 rnode_t *rp;
5336
5337 if (vp->v_flag & VNOMAP)
5338 return (ENOSYS);
5339 if (nfs_zone() != VTOMI(vp)->mi_zone)
5340 return (EIO);
5341
5342 rp = VTOR(vp);
5343 atomic_add_long((ulong_t *)&rp->r_mapcnt, btopr(len));
5344
5345 return (0);
|
5259 * In this case the map request must be refused. We use
5260 * rp->r_lkserlock to avoid a race with concurrent lock requests.
5261 */
5262 rp = VTOR(vp);
5263
5264 /*
5265 * Atomically increment r_inmap after acquiring r_rwlock. The
5266 * idea here is to acquire r_rwlock to block read/write and
5267 * not to protect r_inmap. r_inmap will inform nfs3_read/write()
5268 * that we are in nfs3_map(). Now, r_rwlock is acquired in order
5269 * and we can prevent the deadlock that would have occurred
5270 * when nfs3_addmap() would have acquired it out of order.
5271 *
5272 * Since we are not protecting r_inmap by any lock, we do not
5273 * hold any lock when we decrement it. We atomically decrement
5274 * r_inmap after we release r_lkserlock.
5275 */
5276
5277 if (nfs_rw_enter_sig(&rp->r_rwlock, RW_WRITER, INTR(vp)))
5278 return (EINTR);
5279 atomic_inc_uint(&rp->r_inmap);
5280 nfs_rw_exit(&rp->r_rwlock);
5281
5282 if (nfs_rw_enter_sig(&rp->r_lkserlock, RW_READER, INTR(vp))) {
5283 atomic_dec_uint(&rp->r_inmap);
5284 return (EINTR);
5285 }
5286
5287 if (vp->v_flag & VNOCACHE) {
5288 error = EAGAIN;
5289 goto done;
5290 }
5291
5292 /*
5293 * Don't allow concurrent locks and mapping if mandatory locking is
5294 * enabled.
5295 */
5296 if ((flk_has_remote_locks(vp) || lm_has_sleep(vp)) &&
5297 MANDLOCK(vp, va.va_mode)) {
5298 error = EAGAIN;
5299 goto done;
5300 }
5301
5302 as_rangelock(as);
5303 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
5305 as_rangeunlock(as);
5306 goto done;
5307 }
5308
5309 vn_a.vp = vp;
5310 vn_a.offset = off;
5311 vn_a.type = (flags & MAP_TYPE);
5312 vn_a.prot = (uchar_t)prot;
5313 vn_a.maxprot = (uchar_t)maxprot;
5314 vn_a.flags = (flags & ~MAP_TYPE);
5315 vn_a.cred = cr;
5316 vn_a.amp = NULL;
5317 vn_a.szc = 0;
5318 vn_a.lgrp_mem_policy_flags = 0;
5319
5320 error = as_map(as, *addrp, len, segvn_create, &vn_a);
5321 as_rangeunlock(as);
5322
5323 done:
5324 nfs_rw_exit(&rp->r_lkserlock);
5325 atomic_dec_uint(&rp->r_inmap);
5326 return (error);
5327 }
5328
5329 /* ARGSUSED */
5330 static int
5331 nfs3_addmap(vnode_t *vp, offset_t off, struct as *as, caddr_t addr,
5332 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags,
5333 cred_t *cr, caller_context_t *ct)
5334 {
5335 rnode_t *rp;
5336
5337 if (vp->v_flag & VNOMAP)
5338 return (ENOSYS);
5339 if (nfs_zone() != VTOMI(vp)->mi_zone)
5340 return (EIO);
5341
5342 rp = VTOR(vp);
5343 atomic_add_long((ulong_t *)&rp->r_mapcnt, btopr(len));
5344
5345 return (0);
|