5636 /*
5637 * Note that if we are retrying (because ufs_lockfs_trybegin failed in
5638 * the previous attempt), some other thread could have grabbed
5639 * the same VA range if MAP_FIXED is set. In that case, choose_addr
5640 * would unmap the valid VA range, that is ok.
5641 */
5642 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
5643 if (error != 0) {
5644 as_rangeunlock(as);
5645 goto out;
5646 }
5647
5648 /*
5649 * a_lock has to be acquired before entering the lockfs protocol
5650 * because that is the order in which pagefault works. Also we cannot
5651 * block on a_lock here because this waiting writer will prevent
5652 * further readers like ufs_read from progressing and could cause
5653 * deadlock between ufs_read/ufs_map/pagefault when a quiesce is
5654 * pending.
5655 */
5656 while (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_WRITER)) {
5657 ufs_map_alock_retry_cnt++;
5658 delay(RETRY_LOCK_DELAY);
5659 }
5660
5661 /*
5662 * We can't hold as->a_lock and wait for lockfs to succeed because
5663 * the proc tools might hang on a_lock, so call ufs_lockfs_trybegin()
5664 * instead.
5665 */
5666 if (error = ufs_lockfs_trybegin(ufsvfsp, &ulp, ULOCKFS_MAP_MASK)) {
5667 /*
5668 * ufs_lockfs_trybegin() did not succeed. It is safer to give up
5669 * as->a_lock and wait for ulp->ul_fs_lock status to change.
5670 */
5671 ufs_map_lockfs_retry_cnt++;
5672 AS_LOCK_EXIT(as, &as->a_lock);
5673 as_rangeunlock(as);
5674 if (error == EIO)
5675 goto out;
5676
5677 mutex_enter(&ulp->ul_lock);
5678 while (ulp->ul_fs_lock & ULOCKFS_MAP_MASK) {
5679 if (ULOCKFS_IS_SLOCK(ulp) || ufsvfsp->vfs_nointr) {
5680 cv_wait(&ulp->ul_cv, &ulp->ul_lock);
5681 } else {
5682 sigintr(&smask, 1);
5683 sig = cv_wait_sig(&ulp->ul_cv, &ulp->ul_lock);
5684 sigunintr(&smask);
5685 if (((ulp->ul_fs_lock & ULOCKFS_MAP_MASK) &&
5686 !sig) || ufsvfsp->vfs_dontblock) {
5687 mutex_exit(&ulp->ul_lock);
5688 return (EINTR);
5689 }
5690 }
5691 }
5692 mutex_exit(&ulp->ul_lock);
|
5636 /*
5637 * Note that if we are retrying (because ufs_lockfs_trybegin failed in
5638 * the previous attempt), some other thread could have grabbed
5639 * the same VA range if MAP_FIXED is set. In that case, choose_addr
5640 * would unmap the valid VA range, that is ok.
5641 */
5642 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
5643 if (error != 0) {
5644 as_rangeunlock(as);
5645 goto out;
5646 }
5647
5648 /*
5649 * a_lock has to be acquired before entering the lockfs protocol
5650 * because that is the order in which pagefault works. Also we cannot
5651 * block on a_lock here because this waiting writer will prevent
5652 * further readers like ufs_read from progressing and could cause
5653 * deadlock between ufs_read/ufs_map/pagefault when a quiesce is
5654 * pending.
5655 */
5656 while (!AS_LOCK_TRYENTER(as, RW_WRITER)) {
5657 ufs_map_alock_retry_cnt++;
5658 delay(RETRY_LOCK_DELAY);
5659 }
5660
5661 /*
5662 * We can't hold as->a_lock and wait for lockfs to succeed because
5663 * the proc tools might hang on a_lock, so call ufs_lockfs_trybegin()
5664 * instead.
5665 */
5666 if (error = ufs_lockfs_trybegin(ufsvfsp, &ulp, ULOCKFS_MAP_MASK)) {
5667 /*
5668 * ufs_lockfs_trybegin() did not succeed. It is safer to give up
5669 * as->a_lock and wait for ulp->ul_fs_lock status to change.
5670 */
5671 ufs_map_lockfs_retry_cnt++;
5672 AS_LOCK_EXIT(as);
5673 as_rangeunlock(as);
5674 if (error == EIO)
5675 goto out;
5676
5677 mutex_enter(&ulp->ul_lock);
5678 while (ulp->ul_fs_lock & ULOCKFS_MAP_MASK) {
5679 if (ULOCKFS_IS_SLOCK(ulp) || ufsvfsp->vfs_nointr) {
5680 cv_wait(&ulp->ul_cv, &ulp->ul_lock);
5681 } else {
5682 sigintr(&smask, 1);
5683 sig = cv_wait_sig(&ulp->ul_cv, &ulp->ul_lock);
5684 sigunintr(&smask);
5685 if (((ulp->ul_fs_lock & ULOCKFS_MAP_MASK) &&
5686 !sig) || ufsvfsp->vfs_dontblock) {
5687 mutex_exit(&ulp->ul_lock);
5688 return (EINTR);
5689 }
5690 }
5691 }
5692 mutex_exit(&ulp->ul_lock);
|