5959 * SE_EXCL. Instead we rely on the fact that a forced umount or
5960 * applying a filesystem lock via ufs_fiolfs() will block in the
5961 * implicit call to ufs_flush() until we unlock the pages after the
5962 * return to segvn. Other ufs_quiesce() callers keep ufs_quiesce_pend
5963 * above 0 until they are done. We have to be careful not to increment
5964 * ul_vnops_cnt here after forceful unmount hlocks the file system.
5965 *
5966 * If pp is NULL use ul_lock to make sure we don't increment
5967 * ul_vnops_cnt after forceful unmount hlocks the file system.
5968 */
5969 if (vmpss || pp == NULL) {
5970 ulp = &ufsvfsp->vfs_ulockfs;
5971 if (pp == NULL)
5972 mutex_enter(&ulp->ul_lock);
5973 if (ulp->ul_fs_lock & ULOCKFS_GETREAD_MASK) {
5974 if (pp == NULL) {
5975 mutex_exit(&ulp->ul_lock);
5976 }
5977 return (vmpss ? EIO : EINVAL);
5978 }
5979 atomic_add_long(&ulp->ul_vnops_cnt, 1);
5980 if (pp == NULL)
5981 mutex_exit(&ulp->ul_lock);
5982 if (ufs_quiesce_pend) {
5983 if (!atomic_add_long_nv(&ulp->ul_vnops_cnt, -1))
5984 cv_broadcast(&ulp->ul_cv);
5985 return (vmpss ? EIO : EINVAL);
5986 }
5987 }
5988
5989 if (dolock) {
5990 /*
5991 * segvn may call VOP_PAGEIO() instead of VOP_GETPAGE() to
5992 * handle a fault against a segment that maps vnode pages with
5993 * large mappings. Segvn creates pages and holds them locked
5994 * SE_EXCL during VOP_PAGEIO() call. In this case we have to
5995 * use rw_tryenter() to avoid a potential deadlock since in
5996 * lock order i_contents needs to be taken first.
5997 * Segvn will retry via VOP_GETPAGE() if VOP_PAGEIO() fails.
5998 */
5999 if (!vmpss) {
6000 rw_enter(&ip->i_contents, RW_READER);
6001 } else if (!rw_tryenter(&ip->i_contents, RW_READER)) {
6002 if (!atomic_add_long_nv(&ulp->ul_vnops_cnt, -1))
6003 cv_broadcast(&ulp->ul_cv);
6004 return (EDEADLK);
6005 }
6006 }
6007
6008 /*
6009 * Return an error to segvn because the pagefault request is beyond
6010 * PAGESIZE rounded EOF.
6011 */
6012 if (vmpss && btopr(io_off + io_len) > btopr(ip->i_size)) {
6013 if (dolock)
6014 rw_exit(&ip->i_contents);
6015 if (!atomic_add_long_nv(&ulp->ul_vnops_cnt, -1))
6016 cv_broadcast(&ulp->ul_cv);
6017 return (EFAULT);
6018 }
6019
6020 if (pp == NULL) {
6021 if (bmap_has_holes(ip)) {
6022 err = ENOSYS;
6023 } else {
6024 err = EINVAL;
6025 }
6026 if (dolock)
6027 rw_exit(&ip->i_contents);
6028 if (!atomic_add_long_nv(&ulp->ul_vnops_cnt, -1))
6029 cv_broadcast(&ulp->ul_cv);
6030 return (err);
6031 }
6032
6033 /*
6034 * Break the io request into chunks, one for each contiguous
6035 * stretch of disk blocks in the target file.
6036 */
6037 while (done_len < io_len) {
6038 ASSERT(cpp);
6039 contig = 0;
6040 if (err = bmap_read(ip, (u_offset_t)(io_off + done_len),
6041 &bn, &contig))
6042 break;
6043
6044 if (bn == UFS_HOLE) { /* No holey swapfiles */
6045 if (vmpss) {
6046 err = EFAULT;
6047 break;
6048 }
6115 pvn_read_done(cpp, B_ERROR);
6116 else
6117 pvn_write_done(cpp, B_ERROR);
6118 } else {
6119 /* Re-assemble list and let caller clean up */
6120 page_list_concat(&opp, &cpp);
6121 page_list_concat(&opp, &npp);
6122 }
6123 }
6124
6125 if (vmpss && !(ip->i_flag & IACC) && !ULOCKFS_IS_NOIACC(ulp) &&
6126 ufsvfsp->vfs_fs->fs_ronly == 0 && !ufsvfsp->vfs_noatime) {
6127 mutex_enter(&ip->i_tlock);
6128 ip->i_flag |= IACC;
6129 ITIMES_NOLOCK(ip);
6130 mutex_exit(&ip->i_tlock);
6131 }
6132
6133 if (dolock)
6134 rw_exit(&ip->i_contents);
6135 if (vmpss && !atomic_add_long_nv(&ulp->ul_vnops_cnt, -1))
6136 cv_broadcast(&ulp->ul_cv);
6137 return (err);
6138 }
6139
6140 /*
6141 * Called when the kernel is in a frozen state to dump data
6142 * directly to the device. It uses a private dump data structure,
6143 * set up by dump_ctl, to locate the correct disk block to which to dump.
6144 */
6145 /*ARGSUSED*/
6146 static int
6147 ufs_dump(vnode_t *vp, caddr_t addr, offset_t ldbn, offset_t dblks,
6148 caller_context_t *ct)
6149 {
6150 u_offset_t file_size;
6151 struct inode *ip = VTOI(vp);
6152 struct fs *fs = ip->i_fs;
6153 daddr_t dbn, lfsbn;
6154 int disk_blks = fs->fs_bsize >> DEV_BSHIFT;
6155 int error = 0;
|
5959 * SE_EXCL. Instead we rely on the fact that a forced umount or
5960 * applying a filesystem lock via ufs_fiolfs() will block in the
5961 * implicit call to ufs_flush() until we unlock the pages after the
5962 * return to segvn. Other ufs_quiesce() callers keep ufs_quiesce_pend
5963 * above 0 until they are done. We have to be careful not to increment
5964 * ul_vnops_cnt here after forceful unmount hlocks the file system.
5965 *
5966 * If pp is NULL use ul_lock to make sure we don't increment
5967 * ul_vnops_cnt after forceful unmount hlocks the file system.
5968 */
5969 if (vmpss || pp == NULL) {
5970 ulp = &ufsvfsp->vfs_ulockfs;
5971 if (pp == NULL)
5972 mutex_enter(&ulp->ul_lock);
5973 if (ulp->ul_fs_lock & ULOCKFS_GETREAD_MASK) {
5974 if (pp == NULL) {
5975 mutex_exit(&ulp->ul_lock);
5976 }
5977 return (vmpss ? EIO : EINVAL);
5978 }
5979 atomic_inc_ulong(&ulp->ul_vnops_cnt);
5980 if (pp == NULL)
5981 mutex_exit(&ulp->ul_lock);
5982 if (ufs_quiesce_pend) {
5983 if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
5984 cv_broadcast(&ulp->ul_cv);
5985 return (vmpss ? EIO : EINVAL);
5986 }
5987 }
5988
5989 if (dolock) {
5990 /*
5991 * segvn may call VOP_PAGEIO() instead of VOP_GETPAGE() to
5992 * handle a fault against a segment that maps vnode pages with
5993 * large mappings. Segvn creates pages and holds them locked
5994 * SE_EXCL during VOP_PAGEIO() call. In this case we have to
5995 * use rw_tryenter() to avoid a potential deadlock since in
5996 * lock order i_contents needs to be taken first.
5997 * Segvn will retry via VOP_GETPAGE() if VOP_PAGEIO() fails.
5998 */
5999 if (!vmpss) {
6000 rw_enter(&ip->i_contents, RW_READER);
6001 } else if (!rw_tryenter(&ip->i_contents, RW_READER)) {
6002 if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6003 cv_broadcast(&ulp->ul_cv);
6004 return (EDEADLK);
6005 }
6006 }
6007
6008 /*
6009 * Return an error to segvn because the pagefault request is beyond
6010 * PAGESIZE rounded EOF.
6011 */
6012 if (vmpss && btopr(io_off + io_len) > btopr(ip->i_size)) {
6013 if (dolock)
6014 rw_exit(&ip->i_contents);
6015 if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6016 cv_broadcast(&ulp->ul_cv);
6017 return (EFAULT);
6018 }
6019
6020 if (pp == NULL) {
6021 if (bmap_has_holes(ip)) {
6022 err = ENOSYS;
6023 } else {
6024 err = EINVAL;
6025 }
6026 if (dolock)
6027 rw_exit(&ip->i_contents);
6028 if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6029 cv_broadcast(&ulp->ul_cv);
6030 return (err);
6031 }
6032
6033 /*
6034 * Break the io request into chunks, one for each contiguous
6035 * stretch of disk blocks in the target file.
6036 */
6037 while (done_len < io_len) {
6038 ASSERT(cpp);
6039 contig = 0;
6040 if (err = bmap_read(ip, (u_offset_t)(io_off + done_len),
6041 &bn, &contig))
6042 break;
6043
6044 if (bn == UFS_HOLE) { /* No holey swapfiles */
6045 if (vmpss) {
6046 err = EFAULT;
6047 break;
6048 }
6115 pvn_read_done(cpp, B_ERROR);
6116 else
6117 pvn_write_done(cpp, B_ERROR);
6118 } else {
6119 /* Re-assemble list and let caller clean up */
6120 page_list_concat(&opp, &cpp);
6121 page_list_concat(&opp, &npp);
6122 }
6123 }
6124
6125 if (vmpss && !(ip->i_flag & IACC) && !ULOCKFS_IS_NOIACC(ulp) &&
6126 ufsvfsp->vfs_fs->fs_ronly == 0 && !ufsvfsp->vfs_noatime) {
6127 mutex_enter(&ip->i_tlock);
6128 ip->i_flag |= IACC;
6129 ITIMES_NOLOCK(ip);
6130 mutex_exit(&ip->i_tlock);
6131 }
6132
6133 if (dolock)
6134 rw_exit(&ip->i_contents);
6135 if (vmpss && !atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6136 cv_broadcast(&ulp->ul_cv);
6137 return (err);
6138 }
6139
6140 /*
6141 * Called when the kernel is in a frozen state to dump data
6142 * directly to the device. It uses a private dump data structure,
6143 * set up by dump_ctl, to locate the correct disk block to which to dump.
6144 */
6145 /*ARGSUSED*/
6146 static int
6147 ufs_dump(vnode_t *vp, caddr_t addr, offset_t ldbn, offset_t dblks,
6148 caller_context_t *ct)
6149 {
6150 u_offset_t file_size;
6151 struct inode *ip = VTOI(vp);
6152 struct fs *fs = ip->i_fs;
6153 daddr_t dbn, lfsbn;
6154 int disk_blks = fs->fs_bsize >> DEV_BSHIFT;
6155 int error = 0;
|