1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 1984, 2010, Oracle and/or its affiliates. All rights reserved.
  24  * Copyright (c) 2013, Joyent, Inc. All rights reserved.
  25  */
  26 
  27 /*      Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
  28 /*        All Rights Reserved   */
  29 
  30 /*
  31  * Portions of this source code were derived from Berkeley 4.3 BSD
  32  * under license from the Regents of the University of California.
  33  */
  34 
  35 #include <sys/types.h>
  36 #include <sys/t_lock.h>
  37 #include <sys/ksynch.h>
  38 #include <sys/param.h>
  39 #include <sys/time.h>
  40 #include <sys/systm.h>
  41 #include <sys/sysmacros.h>
  42 #include <sys/resource.h>
  43 #include <sys/signal.h>
  44 #include <sys/cred.h>
  45 #include <sys/user.h>
  46 #include <sys/buf.h>
  47 #include <sys/vfs.h>
  48 #include <sys/vfs_opreg.h>
  49 #include <sys/vnode.h>
  50 #include <sys/proc.h>
  51 #include <sys/disp.h>
  52 #include <sys/file.h>
  53 #include <sys/fcntl.h>
  54 #include <sys/flock.h>
  55 #include <sys/atomic.h>
  56 #include <sys/kmem.h>
  57 #include <sys/uio.h>
  58 #include <sys/dnlc.h>
  59 #include <sys/conf.h>
  60 #include <sys/mman.h>
  61 #include <sys/pathname.h>
  62 #include <sys/debug.h>
  63 #include <sys/vmsystm.h>
  64 #include <sys/cmn_err.h>
  65 #include <sys/filio.h>
  66 #include <sys/policy.h>
  67 
  68 #include <sys/fs/ufs_fs.h>
  69 #include <sys/fs/ufs_lockfs.h>
  70 #include <sys/fs/ufs_filio.h>
  71 #include <sys/fs/ufs_inode.h>
  72 #include <sys/fs/ufs_fsdir.h>
  73 #include <sys/fs/ufs_quota.h>
  74 #include <sys/fs/ufs_log.h>
  75 #include <sys/fs/ufs_snap.h>
  76 #include <sys/fs/ufs_trans.h>
  77 #include <sys/fs/ufs_panic.h>
  78 #include <sys/fs/ufs_bio.h>
  79 #include <sys/dirent.h>           /* must be AFTER <sys/fs/fsdir.h>! */
  80 #include <sys/errno.h>
  81 #include <sys/fssnap_if.h>
  82 #include <sys/unistd.h>
  83 #include <sys/sunddi.h>
  84 
  85 #include <sys/filio.h>            /* _FIOIO */
  86 
  87 #include <vm/hat.h>
  88 #include <vm/page.h>
  89 #include <vm/pvn.h>
  90 #include <vm/as.h>
  91 #include <vm/seg.h>
  92 #include <vm/seg_map.h>
  93 #include <vm/seg_vn.h>
  94 #include <vm/seg_kmem.h>
  95 #include <vm/rm.h>
  96 #include <sys/swap.h>
  97 
  98 #include <fs/fs_subr.h>
  99 
 100 #include <sys/fs/decomp.h>
 101 
 102 static struct instats ins;
 103 
 104 static  int ufs_getpage_ra(struct vnode *, u_offset_t, struct seg *, caddr_t);
 105 static  int ufs_getpage_miss(struct vnode *, u_offset_t, size_t, struct seg *,
 106                 caddr_t, struct page **, size_t, enum seg_rw, int);
 107 static  int ufs_open(struct vnode **, int, struct cred *, caller_context_t *);
 108 static  int ufs_close(struct vnode *, int, int, offset_t, struct cred *,
 109                 caller_context_t *);
 110 static  int ufs_read(struct vnode *, struct uio *, int, struct cred *,
 111                 struct caller_context *);
 112 static  int ufs_write(struct vnode *, struct uio *, int, struct cred *,
 113                 struct caller_context *);
 114 static  int ufs_ioctl(struct vnode *, int, intptr_t, int, struct cred *,
 115                 int *, caller_context_t *);
 116 static  int ufs_getattr(struct vnode *, struct vattr *, int, struct cred *,
 117                 caller_context_t *);
 118 static  int ufs_setattr(struct vnode *, struct vattr *, int, struct cred *,
 119                 caller_context_t *);
 120 static  int ufs_access(struct vnode *, int, int, struct cred *,
 121                 caller_context_t *);
 122 static  int ufs_lookup(struct vnode *, char *, struct vnode **,
 123                 struct pathname *, int, struct vnode *, struct cred *,
 124                 caller_context_t *, int *, pathname_t *);
 125 static  int ufs_create(struct vnode *, char *, struct vattr *, enum vcexcl,
 126                 int, struct vnode **, struct cred *, int,
 127                 caller_context_t *, vsecattr_t  *);
 128 static  int ufs_remove(struct vnode *, char *, struct cred *,
 129                 caller_context_t *, int);
 130 static  int ufs_link(struct vnode *, struct vnode *, char *, struct cred *,
 131                 caller_context_t *, int);
 132 static  int ufs_rename(struct vnode *, char *, struct vnode *, char *,
 133                 struct cred *, caller_context_t *, int);
 134 static  int ufs_mkdir(struct vnode *, char *, struct vattr *, struct vnode **,
 135                 struct cred *, caller_context_t *, int, vsecattr_t *);
 136 static  int ufs_rmdir(struct vnode *, char *, struct vnode *, struct cred *,
 137                 caller_context_t *, int);
 138 static  int ufs_readdir(struct vnode *, struct uio *, struct cred *, int *,
 139                 caller_context_t *, int);
 140 static  int ufs_symlink(struct vnode *, char *, struct vattr *, char *,
 141                 struct cred *, caller_context_t *, int);
 142 static  int ufs_readlink(struct vnode *, struct uio *, struct cred *,
 143                 caller_context_t *);
 144 static  int ufs_fsync(struct vnode *, int, struct cred *, caller_context_t *);
 145 static  void ufs_inactive(struct vnode *, struct cred *, caller_context_t *);
 146 static  int ufs_fid(struct vnode *, struct fid *, caller_context_t *);
 147 static  int ufs_rwlock(struct vnode *, int, caller_context_t *);
 148 static  void ufs_rwunlock(struct vnode *, int, caller_context_t *);
 149 static  int ufs_seek(struct vnode *, offset_t, offset_t *, caller_context_t *);
 150 static  int ufs_frlock(struct vnode *, int, struct flock64 *, int, offset_t,
 151                 struct flk_callback *, struct cred *,
 152                 caller_context_t *);
 153 static  int ufs_space(struct vnode *, int, struct flock64 *, int, offset_t,
 154                 cred_t *, caller_context_t *);
 155 static  int ufs_getpage(struct vnode *, offset_t, size_t, uint_t *,
 156                 struct page **, size_t, struct seg *, caddr_t,
 157                 enum seg_rw, struct cred *, caller_context_t *);
 158 static  int ufs_putpage(struct vnode *, offset_t, size_t, int, struct cred *,
 159                 caller_context_t *);
 160 static  int ufs_putpages(struct vnode *, offset_t, size_t, int, struct cred *);
 161 static  int ufs_map(struct vnode *, offset_t, struct as *, caddr_t *, size_t,
 162                 uchar_t, uchar_t, uint_t, struct cred *, caller_context_t *);
 163 static  int ufs_addmap(struct vnode *, offset_t, struct as *, caddr_t,  size_t,
 164                 uchar_t, uchar_t, uint_t, struct cred *, caller_context_t *);
 165 static  int ufs_delmap(struct vnode *, offset_t, struct as *, caddr_t,  size_t,
 166                 uint_t, uint_t, uint_t, struct cred *, caller_context_t *);
 167 static  int ufs_poll(vnode_t *, short, int, short *, struct pollhead **,
 168                 caller_context_t *);
 169 static  int ufs_dump(vnode_t *, caddr_t, offset_t, offset_t,
 170     caller_context_t *);
 171 static  int ufs_l_pathconf(struct vnode *, int, ulong_t *, struct cred *,
 172                 caller_context_t *);
 173 static  int ufs_pageio(struct vnode *, struct page *, u_offset_t, size_t, int,
 174                 struct cred *, caller_context_t *);
 175 static  int ufs_dumpctl(vnode_t *, int, offset_t *, caller_context_t *);
 176 static  daddr32_t *save_dblks(struct inode *, struct ufsvfs *, daddr32_t *,
 177                 daddr32_t *, int, int);
 178 static  int ufs_getsecattr(struct vnode *, vsecattr_t *, int, struct cred *,
 179                 caller_context_t *);
 180 static  int ufs_setsecattr(struct vnode *, vsecattr_t *, int, struct cred *,
 181                 caller_context_t *);
 182 static  int ufs_priv_access(void *, int, struct cred *);
 183 static  int ufs_eventlookup(struct vnode *, char *, struct cred *,
 184     struct vnode **);
 185 extern int as_map_locked(struct as *, caddr_t, size_t, int ((*)()), void *);
 186 
 187 /*
 188  * For lockfs: ulockfs begin/end is now inlined in the ufs_xxx functions.
 189  *
 190  * XXX - ULOCKFS in fs_pathconf and ufs_ioctl is not inlined yet.
 191  */
 192 struct vnodeops *ufs_vnodeops;
 193 
 194 /* NOTE: "not blkd" below  means that the operation isn't blocked by lockfs */
 195 const fs_operation_def_t ufs_vnodeops_template[] = {
 196         VOPNAME_OPEN,           { .vop_open = ufs_open },       /* not blkd */
 197         VOPNAME_CLOSE,          { .vop_close = ufs_close },     /* not blkd */
 198         VOPNAME_READ,           { .vop_read = ufs_read },
 199         VOPNAME_WRITE,          { .vop_write = ufs_write },
 200         VOPNAME_IOCTL,          { .vop_ioctl = ufs_ioctl },
 201         VOPNAME_GETATTR,        { .vop_getattr = ufs_getattr },
 202         VOPNAME_SETATTR,        { .vop_setattr = ufs_setattr },
 203         VOPNAME_ACCESS,         { .vop_access = ufs_access },
 204         VOPNAME_LOOKUP,         { .vop_lookup = ufs_lookup },
 205         VOPNAME_CREATE,         { .vop_create = ufs_create },
 206         VOPNAME_REMOVE,         { .vop_remove = ufs_remove },
 207         VOPNAME_LINK,           { .vop_link = ufs_link },
 208         VOPNAME_RENAME,         { .vop_rename = ufs_rename },
 209         VOPNAME_MKDIR,          { .vop_mkdir = ufs_mkdir },
 210         VOPNAME_RMDIR,          { .vop_rmdir = ufs_rmdir },
 211         VOPNAME_READDIR,        { .vop_readdir = ufs_readdir },
 212         VOPNAME_SYMLINK,        { .vop_symlink = ufs_symlink },
 213         VOPNAME_READLINK,       { .vop_readlink = ufs_readlink },
 214         VOPNAME_FSYNC,          { .vop_fsync = ufs_fsync },
 215         VOPNAME_INACTIVE,       { .vop_inactive = ufs_inactive }, /* not blkd */
 216         VOPNAME_FID,            { .vop_fid = ufs_fid },
 217         VOPNAME_RWLOCK,         { .vop_rwlock = ufs_rwlock },   /* not blkd */
 218         VOPNAME_RWUNLOCK,       { .vop_rwunlock = ufs_rwunlock }, /* not blkd */
 219         VOPNAME_SEEK,           { .vop_seek = ufs_seek },
 220         VOPNAME_FRLOCK,         { .vop_frlock = ufs_frlock },
 221         VOPNAME_SPACE,          { .vop_space = ufs_space },
 222         VOPNAME_GETPAGE,        { .vop_getpage = ufs_getpage },
 223         VOPNAME_PUTPAGE,        { .vop_putpage = ufs_putpage },
 224         VOPNAME_MAP,            { .vop_map = ufs_map },
 225         VOPNAME_ADDMAP,         { .vop_addmap = ufs_addmap },   /* not blkd */
 226         VOPNAME_DELMAP,         { .vop_delmap = ufs_delmap },   /* not blkd */
 227         VOPNAME_POLL,           { .vop_poll = ufs_poll },       /* not blkd */
 228         VOPNAME_DUMP,           { .vop_dump = ufs_dump },
 229         VOPNAME_PATHCONF,       { .vop_pathconf = ufs_l_pathconf },
 230         VOPNAME_PAGEIO,         { .vop_pageio = ufs_pageio },
 231         VOPNAME_DUMPCTL,        { .vop_dumpctl = ufs_dumpctl },
 232         VOPNAME_GETSECATTR,     { .vop_getsecattr = ufs_getsecattr },
 233         VOPNAME_SETSECATTR,     { .vop_setsecattr = ufs_setsecattr },
 234         VOPNAME_VNEVENT,        { .vop_vnevent = fs_vnevent_support },
 235         NULL,                   NULL
 236 };
 237 
 238 #define MAX_BACKFILE_COUNT      9999
 239 
 240 /*
 241  * Created by ufs_dumpctl() to store a file's disk block info into memory.
 242  * Used by ufs_dump() to dump data to disk directly.
 243  */
 244 struct dump {
 245         struct inode    *ip;            /* the file we contain */
 246         daddr_t         fsbs;           /* number of blocks stored */
 247         struct timeval32 time;          /* time stamp for the struct */
 248         daddr32_t       dblk[1];        /* place holder for block info */
 249 };
 250 
 251 static struct dump *dump_info = NULL;
 252 
 253 /*
 254  * Previously there was no special action required for ordinary files.
 255  * (Devices are handled through the device file system.)
 256  * Now we support Large Files and Large File API requires open to
 257  * fail if file is large.
 258  * We could take care to prevent data corruption
 259  * by doing an atomic check of size and truncate if file is opened with
 260  * FTRUNC flag set but traditionally this is being done by the vfs/vnode
 261  * layers. So taking care of truncation here is a change in the existing
 262  * semantics of VOP_OPEN and therefore we chose not to implement any thing
 263  * here. The check for the size of the file > 2GB is being done at the
 264  * vfs layer in routine vn_open().
 265  */
 266 
 267 /* ARGSUSED */
 268 static int
 269 ufs_open(struct vnode **vpp, int flag, struct cred *cr, caller_context_t *ct)
 270 {
 271         return (0);
 272 }
 273 
 274 /*ARGSUSED*/
 275 static int
 276 ufs_close(struct vnode *vp, int flag, int count, offset_t offset,
 277         struct cred *cr, caller_context_t *ct)
 278 {
 279         cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
 280         cleanshares(vp, ttoproc(curthread)->p_pid);
 281 
 282         /*
 283          * Push partially filled cluster at last close.
 284          * ``last close'' is approximated because the dnlc
 285          * may have a hold on the vnode.
 286          * Checking for VBAD here will also act as a forced umount check.
 287          */
 288         if (vp->v_count <= 2 && vp->v_type != VBAD) {
 289                 struct inode *ip = VTOI(vp);
 290                 if (ip->i_delaylen) {
 291                         ins.in_poc.value.ul++;
 292                         (void) ufs_putpages(vp, ip->i_delayoff, ip->i_delaylen,
 293                             B_ASYNC | B_FREE, cr);
 294                         ip->i_delaylen = 0;
 295                 }
 296         }
 297 
 298         return (0);
 299 }
 300 
 301 /*ARGSUSED*/
 302 static int
 303 ufs_read(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cr,
 304         struct caller_context *ct)
 305 {
 306         struct inode *ip = VTOI(vp);
 307         struct ufsvfs *ufsvfsp;
 308         struct ulockfs *ulp = NULL;
 309         int error = 0;
 310         int intrans = 0;
 311 
 312         ASSERT(RW_READ_HELD(&ip->i_rwlock));
 313 
 314         /*
 315          * Mandatory locking needs to be done before ufs_lockfs_begin()
 316          * and TRANS_BEGIN_SYNC() calls since mandatory locks can sleep.
 317          */
 318         if (MANDLOCK(vp, ip->i_mode)) {
 319                 /*
 320                  * ufs_getattr ends up being called by chklock
 321                  */
 322                 error = chklock(vp, FREAD, uiop->uio_loffset,
 323                     uiop->uio_resid, uiop->uio_fmode, ct);
 324                 if (error)
 325                         goto out;
 326         }
 327 
 328         ufsvfsp = ip->i_ufsvfs;
 329         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_READ_MASK);
 330         if (error)
 331                 goto out;
 332 
 333         /*
 334          * In the case that a directory is opened for reading as a file
 335          * (eg "cat .") with the  O_RSYNC, O_SYNC and O_DSYNC flags set.
 336          * The locking order had to be changed to avoid a deadlock with
 337          * an update taking place on that directory at the same time.
 338          */
 339         if ((ip->i_mode & IFMT) == IFDIR) {
 340 
 341                 rw_enter(&ip->i_contents, RW_READER);
 342                 error = rdip(ip, uiop, ioflag, cr);
 343                 rw_exit(&ip->i_contents);
 344 
 345                 if (error) {
 346                         if (ulp)
 347                                 ufs_lockfs_end(ulp);
 348                         goto out;
 349                 }
 350 
 351                 if (ulp && (ioflag & FRSYNC) && (ioflag & (FSYNC | FDSYNC)) &&
 352                     TRANS_ISTRANS(ufsvfsp)) {
 353                         rw_exit(&ip->i_rwlock);
 354                         TRANS_BEGIN_SYNC(ufsvfsp, TOP_READ_SYNC, TOP_READ_SIZE,
 355                             error);
 356                         ASSERT(!error);
 357                         TRANS_END_SYNC(ufsvfsp, error, TOP_READ_SYNC,
 358                             TOP_READ_SIZE);
 359                         rw_enter(&ip->i_rwlock, RW_READER);
 360                 }
 361         } else {
 362                 /*
 363                  * Only transact reads to files opened for sync-read and
 364                  * sync-write on a file system that is not write locked.
 365                  *
 366                  * The ``not write locked'' check prevents problems with
 367                  * enabling/disabling logging on a busy file system.  E.g.,
 368                  * logging exists at the beginning of the read but does not
 369                  * at the end.
 370                  *
 371                  */
 372                 if (ulp && (ioflag & FRSYNC) && (ioflag & (FSYNC | FDSYNC)) &&
 373                     TRANS_ISTRANS(ufsvfsp)) {
 374                         TRANS_BEGIN_SYNC(ufsvfsp, TOP_READ_SYNC, TOP_READ_SIZE,
 375                             error);
 376                         ASSERT(!error);
 377                         intrans = 1;
 378                 }
 379 
 380                 rw_enter(&ip->i_contents, RW_READER);
 381                 error = rdip(ip, uiop, ioflag, cr);
 382                 rw_exit(&ip->i_contents);
 383 
 384                 if (intrans) {
 385                         TRANS_END_SYNC(ufsvfsp, error, TOP_READ_SYNC,
 386                             TOP_READ_SIZE);
 387                 }
 388         }
 389 
 390         if (ulp) {
 391                 ufs_lockfs_end(ulp);
 392         }
 393 out:
 394 
 395         return (error);
 396 }
 397 
 398 extern  int     ufs_HW;         /* high water mark */
 399 extern  int     ufs_LW;         /* low water mark */
 400 int     ufs_WRITES = 1;         /* XXX - enable/disable */
 401 int     ufs_throttles = 0;      /* throttling count */
 402 int     ufs_allow_shared_writes = 1;    /* directio shared writes */
 403 
 404 static int
 405 ufs_check_rewrite(struct inode *ip, struct uio *uiop, int ioflag)
 406 {
 407         int     shared_write;
 408 
 409         /*
 410          * If the FDSYNC flag is set then ignore the global
 411          * ufs_allow_shared_writes in this case.
 412          */
 413         shared_write = (ioflag & FDSYNC) | ufs_allow_shared_writes;
 414 
 415         /*
 416          * Filter to determine if this request is suitable as a
 417          * concurrent rewrite. This write must not allocate blocks
 418          * by extending the file or filling in holes. No use trying
 419          * through FSYNC descriptors as the inode will be synchronously
 420          * updated after the write. The uio structure has not yet been
 421          * checked for sanity, so assume nothing.
 422          */
 423         return (((ip->i_mode & IFMT) == IFREG) && !(ioflag & FAPPEND) &&
 424             (uiop->uio_loffset >= (offset_t)0) &&
 425             (uiop->uio_loffset < ip->i_size) && (uiop->uio_resid > 0) &&
 426             ((ip->i_size - uiop->uio_loffset) >= uiop->uio_resid) &&
 427             !(ioflag & FSYNC) && !bmap_has_holes(ip) &&
 428             shared_write);
 429 }
 430 
 431 /*ARGSUSED*/
 432 static int
 433 ufs_write(struct vnode *vp, struct uio *uiop, int ioflag, cred_t *cr,
 434         caller_context_t *ct)
 435 {
 436         struct inode *ip = VTOI(vp);
 437         struct ufsvfs *ufsvfsp;
 438         struct ulockfs *ulp;
 439         int retry = 1;
 440         int error, resv, resid = 0;
 441         int directio_status;
 442         int exclusive;
 443         int rewriteflg;
 444         long start_resid = uiop->uio_resid;
 445 
 446         ASSERT(RW_LOCK_HELD(&ip->i_rwlock));
 447 
 448 retry_mandlock:
 449         /*
 450          * Mandatory locking needs to be done before ufs_lockfs_begin()
 451          * and TRANS_BEGIN_[A]SYNC() calls since mandatory locks can sleep.
 452          * Check for forced unmounts normally done in ufs_lockfs_begin().
 453          */
 454         if ((ufsvfsp = ip->i_ufsvfs) == NULL) {
 455                 error = EIO;
 456                 goto out;
 457         }
 458         if (MANDLOCK(vp, ip->i_mode)) {
 459 
 460                 ASSERT(RW_WRITE_HELD(&ip->i_rwlock));
 461 
 462                 /*
 463                  * ufs_getattr ends up being called by chklock
 464                  */
 465                 error = chklock(vp, FWRITE, uiop->uio_loffset,
 466                     uiop->uio_resid, uiop->uio_fmode, ct);
 467                 if (error)
 468                         goto out;
 469         }
 470 
 471         /* i_rwlock can change in chklock */
 472         exclusive = rw_write_held(&ip->i_rwlock);
 473         rewriteflg = ufs_check_rewrite(ip, uiop, ioflag);
 474 
 475         /*
 476          * Check for fast-path special case of directio re-writes.
 477          */
 478         if ((ip->i_flag & IDIRECTIO || ufsvfsp->vfs_forcedirectio) &&
 479             !exclusive && rewriteflg) {
 480 
 481                 error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_WRITE_MASK);
 482                 if (error)
 483                         goto out;
 484 
 485                 rw_enter(&ip->i_contents, RW_READER);
 486                 error = ufs_directio_write(ip, uiop, ioflag, 1, cr,
 487                     &directio_status);
 488                 if (directio_status == DIRECTIO_SUCCESS) {
 489                         uint_t i_flag_save;
 490 
 491                         if (start_resid != uiop->uio_resid)
 492                                 error = 0;
 493                         /*
 494                          * Special treatment of access times for re-writes.
 495                          * If IMOD is not already set, then convert it
 496                          * to IMODACC for this operation. This defers
 497                          * entering a delta into the log until the inode
 498                          * is flushed. This mimics what is done for read
 499                          * operations and inode access time.
 500                          */
 501                         mutex_enter(&ip->i_tlock);
 502                         i_flag_save = ip->i_flag;
 503                         ip->i_flag |= IUPD | ICHG;
 504                         ip->i_seq++;
 505                         ITIMES_NOLOCK(ip);
 506                         if ((i_flag_save & IMOD) == 0) {
 507                                 ip->i_flag &= ~IMOD;
 508                                 ip->i_flag |= IMODACC;
 509                         }
 510                         mutex_exit(&ip->i_tlock);
 511                         rw_exit(&ip->i_contents);
 512                         if (ulp)
 513                                 ufs_lockfs_end(ulp);
 514                         goto out;
 515                 }
 516                 rw_exit(&ip->i_contents);
 517                 if (ulp)
 518                         ufs_lockfs_end(ulp);
 519         }
 520 
 521         if (!exclusive && !rw_tryupgrade(&ip->i_rwlock)) {
 522                 rw_exit(&ip->i_rwlock);
 523                 rw_enter(&ip->i_rwlock, RW_WRITER);
 524                 /*
 525                  * Mandatory locking could have been enabled
 526                  * after dropping the i_rwlock.
 527                  */
 528                 if (MANDLOCK(vp, ip->i_mode))
 529                         goto retry_mandlock;
 530         }
 531 
 532         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_WRITE_MASK);
 533         if (error)
 534                 goto out;
 535 
 536         /*
 537          * Amount of log space needed for this write
 538          */
 539         if (!rewriteflg || !(ioflag & FDSYNC))
 540                 TRANS_WRITE_RESV(ip, uiop, ulp, &resv, &resid);
 541 
 542         /*
 543          * Throttle writes.
 544          */
 545         if (ufs_WRITES && (ip->i_writes > ufs_HW)) {
 546                 mutex_enter(&ip->i_tlock);
 547                 while (ip->i_writes > ufs_HW) {
 548                         ufs_throttles++;
 549                         cv_wait(&ip->i_wrcv, &ip->i_tlock);
 550                 }
 551                 mutex_exit(&ip->i_tlock);
 552         }
 553 
 554         /*
 555          * Enter Transaction
 556          *
 557          * If the write is a rewrite there is no need to open a transaction
 558          * if the FDSYNC flag is set and not the FSYNC.  In this case just
 559          * set the IMODACC flag to modify do the update at a later time
 560          * thus avoiding the overhead of the logging transaction that is
 561          * not required.
 562          */
 563         if (ioflag & (FSYNC|FDSYNC)) {
 564                 if (ulp) {
 565                         if (rewriteflg) {
 566                                 uint_t i_flag_save;
 567 
 568                                 rw_enter(&ip->i_contents, RW_READER);
 569                                 mutex_enter(&ip->i_tlock);
 570                                 i_flag_save = ip->i_flag;
 571                                 ip->i_flag |= IUPD | ICHG;
 572                                 ip->i_seq++;
 573                                 ITIMES_NOLOCK(ip);
 574                                 if ((i_flag_save & IMOD) == 0) {
 575                                         ip->i_flag &= ~IMOD;
 576                                         ip->i_flag |= IMODACC;
 577                                 }
 578                                 mutex_exit(&ip->i_tlock);
 579                                 rw_exit(&ip->i_contents);
 580                         } else {
 581                                 int terr = 0;
 582                                 TRANS_BEGIN_SYNC(ufsvfsp, TOP_WRITE_SYNC, resv,
 583                                     terr);
 584                                 ASSERT(!terr);
 585                         }
 586                 }
 587         } else {
 588                 if (ulp)
 589                         TRANS_BEGIN_ASYNC(ufsvfsp, TOP_WRITE, resv);
 590         }
 591 
 592         /*
 593          * Write the file
 594          */
 595         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
 596         rw_enter(&ip->i_contents, RW_WRITER);
 597         if ((ioflag & FAPPEND) != 0 && (ip->i_mode & IFMT) == IFREG) {
 598                 /*
 599                  * In append mode start at end of file.
 600                  */
 601                 uiop->uio_loffset = ip->i_size;
 602         }
 603 
 604         /*
 605          * Mild optimisation, don't call ufs_trans_write() unless we have to
 606          * Also, suppress file system full messages if we will retry.
 607          */
 608         if (retry)
 609                 ip->i_flag |= IQUIET;
 610         if (resid) {
 611                 TRANS_WRITE(ip, uiop, ioflag, error, ulp, cr, resv, resid);
 612         } else {
 613                 error = wrip(ip, uiop, ioflag, cr);
 614         }
 615         ip->i_flag &= ~IQUIET;
 616 
 617         rw_exit(&ip->i_contents);
 618         rw_exit(&ufsvfsp->vfs_dqrwlock);
 619 
 620         /*
 621          * Leave Transaction
 622          */
 623         if (ulp) {
 624                 if (ioflag & (FSYNC|FDSYNC)) {
 625                         if (!rewriteflg) {
 626                                 int terr = 0;
 627 
 628                                 TRANS_END_SYNC(ufsvfsp, terr, TOP_WRITE_SYNC,
 629                                     resv);
 630                                 if (error == 0)
 631                                         error = terr;
 632                         }
 633                 } else {
 634                         TRANS_END_ASYNC(ufsvfsp, TOP_WRITE, resv);
 635                 }
 636                 ufs_lockfs_end(ulp);
 637         }
 638 out:
 639         if ((error == ENOSPC) && retry && TRANS_ISTRANS(ufsvfsp)) {
 640                 /*
 641                  * Any blocks tied up in pending deletes?
 642                  */
 643                 ufs_delete_drain_wait(ufsvfsp, 1);
 644                 retry = 0;
 645                 goto retry_mandlock;
 646         }
 647 
 648         if (error == ENOSPC && (start_resid != uiop->uio_resid))
 649                 error = 0;
 650 
 651         return (error);
 652 }
 653 
 654 /*
 655  * Don't cache write blocks to files with the sticky bit set.
 656  * Used to keep swap files from blowing the page cache on a server.
 657  */
 658 int stickyhack = 1;
 659 
 660 /*
 661  * Free behind hacks.  The pager is busted.
 662  * XXX - need to pass the information down to writedone() in a flag like B_SEQ
 663  * or B_FREE_IF_TIGHT_ON_MEMORY.
 664  */
 665 int     freebehind = 1;
 666 int     smallfile = 0;
 667 u_offset_t smallfile64 = 32 * 1024;
 668 
 669 /*
 670  * While we should, in most cases, cache the pages for write, we
 671  * may also want to cache the pages for read as long as they are
 672  * frequently re-usable.
 673  *
 674  * If cache_read_ahead = 1, the pages for read will go to the tail
 675  * of the cache list when they are released, otherwise go to the head.
 676  */
 677 int     cache_read_ahead = 0;
 678 
 679 /*
 680  * Freebehind exists  so that as we read  large files  sequentially we
 681  * don't consume most of memory with pages  from a few files. It takes
 682  * longer to re-read from disk multiple small files as it does reading
 683  * one large one sequentially.  As system  memory grows customers need
 684  * to retain bigger chunks   of files in  memory.   The advent of  the
 685  * cachelist opens up of the possibility freeing pages  to the head or
 686  * tail of the list.
 687  *
 688  * Not freeing a page is a bet that the page will be read again before
 689  * it's segmap slot is needed for something else. If we loose the bet,
 690  * it means some  other thread is  burdened with the  page free we did
 691  * not do. If we win we save a free and reclaim.
 692  *
 693  * Freeing it at the tail  vs the head of cachelist  is a bet that the
 694  * page will survive until the next  read.  It's also saying that this
 695  * page is more likely to  be re-used than a  page freed some time ago
 696  * and never reclaimed.
 697  *
 698  * Freebehind maintains a  range of  file offset [smallfile1; smallfile2]
 699  *
 700  *            0 < offset < smallfile1 : pages are not freed.
 701  *   smallfile1 < offset < smallfile2 : pages freed to tail of cachelist.
 702  *   smallfile2 < offset              : pages freed to head of cachelist.
 703  *
 704  * The range  is  computed  at most  once  per second  and  depends on
 705  * freemem  and  ncpus_online.  Both parameters  are   bounded to be
 706  * >= smallfile && >= smallfile64.
 707  *
 708  * smallfile1 = (free memory / ncpu) / 1000
 709  * smallfile2 = (free memory / ncpu) / 10
 710  *
 711  * A few examples values:
 712  *
 713  *       Free Mem (in Bytes) [smallfile1; smallfile2]  [smallfile1; smallfile2]
 714  *                                 ncpus_online = 4          ncpus_online = 64
 715  *       ------------------  -----------------------   -----------------------
 716  *             1G                   [256K;  25M]               [32K; 1.5M]
 717  *            10G                   [2.5M; 250M]              [156K; 15M]
 718  *           100G                    [25M; 2.5G]              [1.5M; 150M]
 719  *
 720  */
 721 
 722 #define SMALLFILE1_D 1000
 723 #define SMALLFILE2_D 10
 724 static u_offset_t smallfile1 = 32 * 1024;
 725 static u_offset_t smallfile2 = 32 * 1024;
 726 static clock_t smallfile_update = 0; /* lbolt value of when to recompute */
 727 uint_t smallfile1_d = SMALLFILE1_D;
 728 uint_t smallfile2_d = SMALLFILE2_D;
 729 
 730 /*
 731  * wrip does the real work of write requests for ufs.
 732  */
 733 int
 734 wrip(struct inode *ip, struct uio *uio, int ioflag, struct cred *cr)
 735 {
 736         rlim64_t limit = uio->uio_llimit;
 737         u_offset_t off;
 738         u_offset_t old_i_size;
 739         struct fs *fs;
 740         struct vnode *vp;
 741         struct ufsvfs *ufsvfsp;
 742         caddr_t base;
 743         long start_resid = uio->uio_resid;   /* save starting resid */
 744         long premove_resid;                     /* resid before uiomove() */
 745         uint_t flags;
 746         int newpage;
 747         int iupdat_flag, directio_status;
 748         int n, on, mapon;
 749         int error, pagecreate;
 750         int do_dqrwlock;                /* drop/reacquire vfs_dqrwlock */
 751         int32_t iblocks;
 752         int     new_iblocks;
 753 
 754         /*
 755          * ip->i_size is incremented before the uiomove
 756          * is done on a write.  If the move fails (bad user
 757          * address) reset ip->i_size.
 758          * The better way would be to increment ip->i_size
 759          * only if the uiomove succeeds.
 760          */
 761         int i_size_changed = 0;
 762         o_mode_t type;
 763         int i_seq_needed = 0;
 764 
 765         vp = ITOV(ip);
 766 
 767         /*
 768          * check for forced unmount - should not happen as
 769          * the request passed the lockfs checks.
 770          */
 771         if ((ufsvfsp = ip->i_ufsvfs) == NULL)
 772                 return (EIO);
 773 
 774         fs = ip->i_fs;
 775 
 776         ASSERT(RW_WRITE_HELD(&ip->i_contents));
 777 
 778         /* check for valid filetype */
 779         type = ip->i_mode & IFMT;
 780         if ((type != IFREG) && (type != IFDIR) && (type != IFATTRDIR) &&
 781             (type != IFLNK) && (type != IFSHAD)) {
 782                 return (EIO);
 783         }
 784 
 785         /*
 786          * the actual limit of UFS file size
 787          * is UFS_MAXOFFSET_T
 788          */
 789         if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
 790                 limit = MAXOFFSET_T;
 791 
 792         if (uio->uio_loffset >= limit) {
 793                 proc_t *p = ttoproc(curthread);
 794 
 795                 mutex_enter(&p->p_lock);
 796                 (void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE], p->p_rctls,
 797                     p, RCA_UNSAFE_SIGINFO);
 798                 mutex_exit(&p->p_lock);
 799                 return (EFBIG);
 800         }
 801 
 802         /*
 803          * if largefiles are disallowed, the limit is
 804          * the pre-largefiles value of 2GB
 805          */
 806         if (ufsvfsp->vfs_lfflags & UFS_LARGEFILES)
 807                 limit = MIN(UFS_MAXOFFSET_T, limit);
 808         else
 809                 limit = MIN(MAXOFF32_T, limit);
 810 
 811         if (uio->uio_loffset < (offset_t)0) {
 812                 return (EINVAL);
 813         }
 814         if (uio->uio_resid == 0) {
 815                 return (0);
 816         }
 817 
 818         if (uio->uio_loffset >= limit)
 819                 return (EFBIG);
 820 
 821         ip->i_flag |= INOACC;        /* don't update ref time in getpage */
 822 
 823         if (ioflag & (FSYNC|FDSYNC)) {
 824                 ip->i_flag |= ISYNC;
 825                 iupdat_flag = 1;
 826         }
 827         /*
 828          * Try to go direct
 829          */
 830         if (ip->i_flag & IDIRECTIO || ufsvfsp->vfs_forcedirectio) {
 831                 uio->uio_llimit = limit;
 832                 error = ufs_directio_write(ip, uio, ioflag, 0, cr,
 833                     &directio_status);
 834                 /*
 835                  * If ufs_directio wrote to the file or set the flags,
 836                  * we need to update i_seq, but it may be deferred.
 837                  */
 838                 if (start_resid != uio->uio_resid ||
 839                     (ip->i_flag & (ICHG|IUPD))) {
 840                         i_seq_needed = 1;
 841                         ip->i_flag |= ISEQ;
 842                 }
 843                 if (directio_status == DIRECTIO_SUCCESS)
 844                         goto out;
 845         }
 846 
 847         /*
 848          * Behavior with respect to dropping/reacquiring vfs_dqrwlock:
 849          *
 850          * o shadow inodes: vfs_dqrwlock is not held at all
 851          * o quota updates: vfs_dqrwlock is read or write held
 852          * o other updates: vfs_dqrwlock is read held
 853          *
 854          * The first case is the only one where we do not hold
 855          * vfs_dqrwlock at all while entering wrip().
 856          * We must make sure not to downgrade/drop vfs_dqrwlock if we
 857          * have it as writer, i.e. if we are updating the quota inode.
 858          * There is no potential deadlock scenario in this case as
 859          * ufs_getpage() takes care of this and avoids reacquiring
 860          * vfs_dqrwlock in that case.
 861          *
 862          * This check is done here since the above conditions do not change
 863          * and we possibly loop below, so save a few cycles.
 864          */
 865         if ((type == IFSHAD) ||
 866             (rw_owner(&ufsvfsp->vfs_dqrwlock) == curthread)) {
 867                 do_dqrwlock = 0;
 868         } else {
 869                 do_dqrwlock = 1;
 870         }
 871 
 872         /*
 873          * Large Files: We cast MAXBMASK to offset_t
 874          * inorder to mask out the higher bits. Since offset_t
 875          * is a signed value, the high order bit set in MAXBMASK
 876          * value makes it do the right thing by having all bits 1
 877          * in the higher word. May be removed for _SOLARIS64_.
 878          */
 879 
 880         fs = ip->i_fs;
 881         do {
 882                 u_offset_t uoff = uio->uio_loffset;
 883                 off = uoff & (offset_t)MAXBMASK;
 884                 mapon = (int)(uoff & (offset_t)MAXBOFFSET);
 885                 on = (int)blkoff(fs, uoff);
 886                 n = (int)MIN(fs->fs_bsize - on, uio->uio_resid);
 887                 new_iblocks = 1;
 888 
 889                 if (type == IFREG && uoff + n >= limit) {
 890                         if (uoff >= limit) {
 891                                 error = EFBIG;
 892                                 goto out;
 893                         }
 894                         /*
 895                          * since uoff + n >= limit,
 896                          * therefore n >= limit - uoff, and n is an int
 897                          * so it is safe to cast it to an int
 898                          */
 899                         n = (int)(limit - (rlim64_t)uoff);
 900                 }
 901                 if (uoff + n > ip->i_size) {
 902                         /*
 903                          * We are extending the length of the file.
 904                          * bmap is used so that we are sure that
 905                          * if we need to allocate new blocks, that it
 906                          * is done here before we up the file size.
 907                          */
 908                         error = bmap_write(ip, uoff, (int)(on + n),
 909                             mapon == 0, NULL, cr);
 910                         /*
 911                          * bmap_write never drops i_contents so if
 912                          * the flags are set it changed the file.
 913                          */
 914                         if (ip->i_flag & (ICHG|IUPD)) {
 915                                 i_seq_needed = 1;
 916                                 ip->i_flag |= ISEQ;
 917                         }
 918                         if (error)
 919                                 break;
 920                         /*
 921                          * There is a window of vulnerability here.
 922                          * The sequence of operations: allocate file
 923                          * system blocks, uiomove the data into pages,
 924                          * and then update the size of the file in the
 925                          * inode, must happen atomically.  However, due
 926                          * to current locking constraints, this can not
 927                          * be done.
 928                          */
 929                         ASSERT(ip->i_writer == NULL);
 930                         ip->i_writer = curthread;
 931                         i_size_changed = 1;
 932                         /*
 933                          * If we are writing from the beginning of
 934                          * the mapping, we can just create the
 935                          * pages without having to read them.
 936                          */
 937                         pagecreate = (mapon == 0);
 938                 } else if (n == MAXBSIZE) {
 939                         /*
 940                          * Going to do a whole mappings worth,
 941                          * so we can just create the pages w/o
 942                          * having to read them in.  But before
 943                          * we do that, we need to make sure any
 944                          * needed blocks are allocated first.
 945                          */
 946                         iblocks = ip->i_blocks;
 947                         error = bmap_write(ip, uoff, (int)(on + n),
 948                             BI_ALLOC_ONLY, NULL, cr);
 949                         /*
 950                          * bmap_write never drops i_contents so if
 951                          * the flags are set it changed the file.
 952                          */
 953                         if (ip->i_flag & (ICHG|IUPD)) {
 954                                 i_seq_needed = 1;
 955                                 ip->i_flag |= ISEQ;
 956                         }
 957                         if (error)
 958                                 break;
 959                         pagecreate = 1;
 960                         /*
 961                          * check if the new created page needed the
 962                          * allocation of new disk blocks.
 963                          */
 964                         if (iblocks == ip->i_blocks)
 965                                 new_iblocks = 0; /* no new blocks allocated */
 966                 } else {
 967                         pagecreate = 0;
 968                         /*
 969                          * In sync mode flush the indirect blocks which
 970                          * may have been allocated and not written on
 971                          * disk. In above cases bmap_write will allocate
 972                          * in sync mode.
 973                          */
 974                         if (ioflag & (FSYNC|FDSYNC)) {
 975                                 error = ufs_indirblk_sync(ip, uoff);
 976                                 if (error)
 977                                         break;
 978                         }
 979                 }
 980 
 981                 /*
 982                  * At this point we can enter ufs_getpage() in one
 983                  * of two ways:
 984                  * 1) segmap_getmapflt() calls ufs_getpage() when the
 985                  *    forcefault parameter is true (pagecreate == 0)
 986                  * 2) uiomove() causes a page fault.
 987                  *
 988                  * We have to drop the contents lock to prevent the VM
 989                  * system from trying to reacquire it in ufs_getpage()
 990                  * should the uiomove cause a pagefault.
 991                  *
 992                  * We have to drop the reader vfs_dqrwlock here as well.
 993                  */
 994                 rw_exit(&ip->i_contents);
 995                 if (do_dqrwlock) {
 996                         ASSERT(RW_LOCK_HELD(&ufsvfsp->vfs_dqrwlock));
 997                         ASSERT(!(RW_WRITE_HELD(&ufsvfsp->vfs_dqrwlock)));
 998                         rw_exit(&ufsvfsp->vfs_dqrwlock);
 999                 }
1000 
1001                 newpage = 0;
1002                 premove_resid = uio->uio_resid;
1003 
1004                 /*
1005                  * Touch the page and fault it in if it is not in core
1006                  * before segmap_getmapflt or vpm_data_copy can lock it.
1007                  * This is to avoid the deadlock if the buffer is mapped
1008                  * to the same file through mmap which we want to write.
1009                  */
1010                 uio_prefaultpages((long)n, uio);
1011 
1012                 if (vpm_enable) {
1013                         /*
1014                          * Copy data. If new pages are created, part of
1015                          * the page that is not written will be initizliazed
1016                          * with zeros.
1017                          */
1018                         error = vpm_data_copy(vp, (off + mapon), (uint_t)n,
1019                             uio, !pagecreate, &newpage, 0, S_WRITE);
1020                 } else {
1021 
1022                         base = segmap_getmapflt(segkmap, vp, (off + mapon),
1023                             (uint_t)n, !pagecreate, S_WRITE);
1024 
1025                         /*
1026                          * segmap_pagecreate() returns 1 if it calls
1027                          * page_create_va() to allocate any pages.
1028                          */
1029 
1030                         if (pagecreate)
1031                                 newpage = segmap_pagecreate(segkmap, base,
1032                                     (size_t)n, 0);
1033 
1034                         error = uiomove(base + mapon, (long)n, UIO_WRITE, uio);
1035                 }
1036 
1037                 /*
1038                  * If "newpage" is set, then a new page was created and it
1039                  * does not contain valid data, so it needs to be initialized
1040                  * at this point.
1041                  * Otherwise the page contains old data, which was overwritten
1042                  * partially or as a whole in uiomove.
1043                  * If there is only one iovec structure within uio, then
1044                  * on error uiomove will not be able to update uio->uio_loffset
1045                  * and we would zero the whole page here!
1046                  *
1047                  * If uiomove fails because of an error, the old valid data
1048                  * is kept instead of filling the rest of the page with zero's.
1049                  */
1050                 if (!vpm_enable && newpage &&
1051                     uio->uio_loffset < roundup(off + mapon + n, PAGESIZE)) {
1052                         /*
1053                          * We created pages w/o initializing them completely,
1054                          * thus we need to zero the part that wasn't set up.
1055                          * This happens on most EOF write cases and if
1056                          * we had some sort of error during the uiomove.
1057                          */
1058                         int nzero, nmoved;
1059 
1060                         nmoved = (int)(uio->uio_loffset - (off + mapon));
1061                         ASSERT(nmoved >= 0 && nmoved <= n);
1062                         nzero = roundup(on + n, PAGESIZE) - nmoved;
1063                         ASSERT(nzero > 0 && mapon + nmoved + nzero <= MAXBSIZE);
1064                         (void) kzero(base + mapon + nmoved, (uint_t)nzero);
1065                 }
1066 
1067                 /*
1068                  * Unlock the pages allocated by page_create_va()
1069                  * in segmap_pagecreate()
1070                  */
1071                 if (!vpm_enable && newpage)
1072                         segmap_pageunlock(segkmap, base, (size_t)n, S_WRITE);
1073 
1074                 /*
1075                  * If the size of the file changed, then update the
1076                  * size field in the inode now.  This can't be done
1077                  * before the call to segmap_pageunlock or there is
1078                  * a potential deadlock with callers to ufs_putpage().
1079                  * They will be holding i_contents and trying to lock
1080                  * a page, while this thread is holding a page locked
1081                  * and trying to acquire i_contents.
1082                  */
1083                 if (i_size_changed) {
1084                         rw_enter(&ip->i_contents, RW_WRITER);
1085                         old_i_size = ip->i_size;
1086                         UFS_SET_ISIZE(uoff + n, ip);
1087                         TRANS_INODE(ufsvfsp, ip);
1088                         /*
1089                          * file has grown larger than 2GB. Set flag
1090                          * in superblock to indicate this, if it
1091                          * is not already set.
1092                          */
1093                         if ((ip->i_size > MAXOFF32_T) &&
1094                             !(fs->fs_flags & FSLARGEFILES)) {
1095                                 ASSERT(ufsvfsp->vfs_lfflags & UFS_LARGEFILES);
1096                                 mutex_enter(&ufsvfsp->vfs_lock);
1097                                 fs->fs_flags |= FSLARGEFILES;
1098                                 ufs_sbwrite(ufsvfsp);
1099                                 mutex_exit(&ufsvfsp->vfs_lock);
1100                         }
1101                         mutex_enter(&ip->i_tlock);
1102                         ip->i_writer = NULL;
1103                         cv_broadcast(&ip->i_wrcv);
1104                         mutex_exit(&ip->i_tlock);
1105                         rw_exit(&ip->i_contents);
1106                 }
1107 
1108                 if (error) {
1109                         /*
1110                          * If we failed on a write, we may have already
1111                          * allocated file blocks as well as pages.  It's
1112                          * hard to undo the block allocation, but we must
1113                          * be sure to invalidate any pages that may have
1114                          * been allocated.
1115                          *
1116                          * If the page was created without initialization
1117                          * then we must check if it should be possible
1118                          * to destroy the new page and to keep the old data
1119                          * on the disk.
1120                          *
1121                          * It is possible to destroy the page without
1122                          * having to write back its contents only when
1123                          * - the size of the file keeps unchanged
1124                          * - bmap_write() did not allocate new disk blocks
1125                          *   it is possible to create big files using "seek" and
1126                          *   write to the end of the file. A "write" to a
1127                          *   position before the end of the file would not
1128                          *   change the size of the file but it would allocate
1129                          *   new disk blocks.
1130                          * - uiomove intended to overwrite the whole page.
1131                          * - a new page was created (newpage == 1).
1132                          */
1133 
1134                         if (i_size_changed == 0 && new_iblocks == 0 &&
1135                             newpage) {
1136 
1137                                 /* unwind what uiomove eventually last did */
1138                                 uio->uio_resid = premove_resid;
1139 
1140                                 /*
1141                                  * destroy the page, do not write ambiguous
1142                                  * data to the disk.
1143                                  */
1144                                 flags = SM_DESTROY;
1145                         } else {
1146                                 /*
1147                                  * write the page back to the disk, if dirty,
1148                                  * and remove the page from the cache.
1149                                  */
1150                                 flags = SM_INVAL;
1151                         }
1152 
1153                         if (vpm_enable) {
1154                                 /*
1155                                  *  Flush pages.
1156                                  */
1157                                 (void) vpm_sync_pages(vp, off, n, flags);
1158                         } else {
1159                                 (void) segmap_release(segkmap, base, flags);
1160                         }
1161                 } else {
1162                         flags = 0;
1163                         /*
1164                          * Force write back for synchronous write cases.
1165                          */
1166                         if ((ioflag & (FSYNC|FDSYNC)) || type == IFDIR) {
1167                                 /*
1168                                  * If the sticky bit is set but the
1169                                  * execute bit is not set, we do a
1170                                  * synchronous write back and free
1171                                  * the page when done.  We set up swap
1172                                  * files to be handled this way to
1173                                  * prevent servers from keeping around
1174                                  * the client's swap pages too long.
1175                                  * XXX - there ought to be a better way.
1176                                  */
1177                                 if (IS_SWAPVP(vp)) {
1178                                         flags = SM_WRITE | SM_FREE |
1179                                             SM_DONTNEED;
1180                                         iupdat_flag = 0;
1181                                 } else {
1182                                         flags = SM_WRITE;
1183                                 }
1184                         } else if (n + on == MAXBSIZE || IS_SWAPVP(vp)) {
1185                                 /*
1186                                  * Have written a whole block.
1187                                  * Start an asynchronous write and
1188                                  * mark the buffer to indicate that
1189                                  * it won't be needed again soon.
1190                                  */
1191                                 flags = SM_WRITE | SM_ASYNC | SM_DONTNEED;
1192                         }
1193                         if (vpm_enable) {
1194                                 /*
1195                                  * Flush pages.
1196                                  */
1197                                 error = vpm_sync_pages(vp, off, n, flags);
1198                         } else {
1199                                 error = segmap_release(segkmap, base, flags);
1200                         }
1201                         /*
1202                          * If the operation failed and is synchronous,
1203                          * then we need to unwind what uiomove() last
1204                          * did so we can potentially return an error to
1205                          * the caller.  If this write operation was
1206                          * done in two pieces and the first succeeded,
1207                          * then we won't return an error for the second
1208                          * piece that failed.  However, we only want to
1209                          * return a resid value that reflects what was
1210                          * really done.
1211                          *
1212                          * Failures for non-synchronous operations can
1213                          * be ignored since the page subsystem will
1214                          * retry the operation until it succeeds or the
1215                          * file system is unmounted.
1216                          */
1217                         if (error) {
1218                                 if ((ioflag & (FSYNC | FDSYNC)) ||
1219                                     type == IFDIR) {
1220                                         uio->uio_resid = premove_resid;
1221                                 } else {
1222                                         error = 0;
1223                                 }
1224                         }
1225                 }
1226 
1227                 /*
1228                  * Re-acquire contents lock.
1229                  * If it was dropped, reacquire reader vfs_dqrwlock as well.
1230                  */
1231                 if (do_dqrwlock)
1232                         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
1233                 rw_enter(&ip->i_contents, RW_WRITER);
1234 
1235                 /*
1236                  * If the uiomove() failed or if a synchronous
1237                  * page push failed, fix up i_size.
1238                  */
1239                 if (error) {
1240                         if (i_size_changed) {
1241                                 /*
1242                                  * The uiomove failed, and we
1243                                  * allocated blocks,so get rid
1244                                  * of them.
1245                                  */
1246                                 (void) ufs_itrunc(ip, old_i_size, 0, cr);
1247                         }
1248                 } else {
1249                         /*
1250                          * XXX - Can this be out of the loop?
1251                          */
1252                         ip->i_flag |= IUPD | ICHG;
1253                         /*
1254                          * Only do one increase of i_seq for multiple
1255                          * pieces.  Because we drop locks, record
1256                          * the fact that we changed the timestamp and
1257                          * are deferring the increase in case another thread
1258                          * pushes our timestamp update.
1259                          */
1260                         i_seq_needed = 1;
1261                         ip->i_flag |= ISEQ;
1262                         if (i_size_changed)
1263                                 ip->i_flag |= IATTCHG;
1264                         if ((ip->i_mode & (IEXEC | (IEXEC >> 3) |
1265                             (IEXEC >> 6))) != 0 &&
1266                             (ip->i_mode & (ISUID | ISGID)) != 0 &&
1267                             secpolicy_vnode_setid_retain(cr,
1268                             (ip->i_mode & ISUID) != 0 && ip->i_uid == 0) != 0) {
1269                                 /*
1270                                  * Clear Set-UID & Set-GID bits on
1271                                  * successful write if not privileged
1272                                  * and at least one of the execute bits
1273                                  * is set.  If we always clear Set-GID,
1274                                  * mandatory file and record locking is
1275                                  * unuseable.
1276                                  */
1277                                 ip->i_mode &= ~(ISUID | ISGID);
1278                         }
1279                 }
1280                 /*
1281                  * In the case the FDSYNC flag is set and this is a
1282                  * "rewrite" we won't log a delta.
1283                  * The FSYNC flag overrides all cases.
1284                  */
1285                 if (!ufs_check_rewrite(ip, uio, ioflag) || !(ioflag & FDSYNC)) {
1286                         TRANS_INODE(ufsvfsp, ip);
1287                 }
1288         } while (error == 0 && uio->uio_resid > 0 && n != 0);
1289 
1290 out:
1291         /*
1292          * Make sure i_seq is increased at least once per write
1293          */
1294         if (i_seq_needed) {
1295                 ip->i_seq++;
1296                 ip->i_flag &= ~ISEQ;     /* no longer deferred */
1297         }
1298 
1299         /*
1300          * Inode is updated according to this table -
1301          *
1302          *   FSYNC        FDSYNC(posix.4)
1303          *   --------------------------
1304          *   always@      IATTCHG|IBDWRITE
1305          *
1306          * @ -  If we are doing synchronous write the only time we should
1307          *      not be sync'ing the ip here is if we have the stickyhack
1308          *      activated, the file is marked with the sticky bit and
1309          *      no exec bit, the file length has not been changed and
1310          *      no new blocks have been allocated during this write.
1311          */
1312 
1313         if ((ip->i_flag & ISYNC) != 0) {
1314                 /*
1315                  * we have eliminated nosync
1316                  */
1317                 if ((ip->i_flag & (IATTCHG|IBDWRITE)) ||
1318                     ((ioflag & FSYNC) && iupdat_flag)) {
1319                         ufs_iupdat(ip, 1);
1320                 }
1321         }
1322 
1323         /*
1324          * If we've already done a partial-write, terminate
1325          * the write but return no error unless the error is ENOSPC
1326          * because the caller can detect this and free resources and
1327          * try again.
1328          */
1329         if ((start_resid != uio->uio_resid) && (error != ENOSPC))
1330                 error = 0;
1331 
1332         ip->i_flag &= ~(INOACC | ISYNC);
1333         ITIMES_NOLOCK(ip);
1334         return (error);
1335 }
1336 
1337 /*
1338  * rdip does the real work of read requests for ufs.
1339  */
1340 int
1341 rdip(struct inode *ip, struct uio *uio, int ioflag, cred_t *cr)
1342 {
1343         u_offset_t off;
1344         caddr_t base;
1345         struct fs *fs;
1346         struct ufsvfs *ufsvfsp;
1347         struct vnode *vp;
1348         long oresid = uio->uio_resid;
1349         u_offset_t n, on, mapon;
1350         int error = 0;
1351         int doupdate = 1;
1352         uint_t flags;
1353         int dofree, directio_status;
1354         krw_t rwtype;
1355         o_mode_t type;
1356         clock_t now;
1357 
1358         vp = ITOV(ip);
1359 
1360         ASSERT(RW_LOCK_HELD(&ip->i_contents));
1361 
1362         ufsvfsp = ip->i_ufsvfs;
1363 
1364         if (ufsvfsp == NULL)
1365                 return (EIO);
1366 
1367         fs = ufsvfsp->vfs_fs;
1368 
1369         /* check for valid filetype */
1370         type = ip->i_mode & IFMT;
1371         if ((type != IFREG) && (type != IFDIR) && (type != IFATTRDIR) &&
1372             (type != IFLNK) && (type != IFSHAD)) {
1373                 return (EIO);
1374         }
1375 
1376         if (uio->uio_loffset > UFS_MAXOFFSET_T) {
1377                 error = 0;
1378                 goto out;
1379         }
1380         if (uio->uio_loffset < (offset_t)0) {
1381                 return (EINVAL);
1382         }
1383         if (uio->uio_resid == 0) {
1384                 return (0);
1385         }
1386 
1387         if (!ULOCKFS_IS_NOIACC(ITOUL(ip)) && (fs->fs_ronly == 0) &&
1388             (!ufsvfsp->vfs_noatime)) {
1389                 mutex_enter(&ip->i_tlock);
1390                 ip->i_flag |= IACC;
1391                 mutex_exit(&ip->i_tlock);
1392         }
1393         /*
1394          * Try to go direct
1395          */
1396         if (ip->i_flag & IDIRECTIO || ufsvfsp->vfs_forcedirectio) {
1397                 error = ufs_directio_read(ip, uio, cr, &directio_status);
1398                 if (directio_status == DIRECTIO_SUCCESS)
1399                         goto out;
1400         }
1401 
1402         rwtype = (rw_write_held(&ip->i_contents)?RW_WRITER:RW_READER);
1403 
1404         do {
1405                 offset_t diff;
1406                 u_offset_t uoff = uio->uio_loffset;
1407                 off = uoff & (offset_t)MAXBMASK;
1408                 mapon = (u_offset_t)(uoff & (offset_t)MAXBOFFSET);
1409                 on = (u_offset_t)blkoff(fs, uoff);
1410                 n = MIN((u_offset_t)fs->fs_bsize - on,
1411                     (u_offset_t)uio->uio_resid);
1412 
1413                 diff = ip->i_size - uoff;
1414 
1415                 if (diff <= (offset_t)0) {
1416                         error = 0;
1417                         goto out;
1418                 }
1419                 if (diff < (offset_t)n)
1420                         n = (int)diff;
1421 
1422                 /*
1423                  * We update smallfile2 and smallfile1 at most every second.
1424                  */
1425                 now = ddi_get_lbolt();
1426                 if (now >= smallfile_update) {
1427                         uint64_t percpufreeb;
1428                         if (smallfile1_d == 0) smallfile1_d = SMALLFILE1_D;
1429                         if (smallfile2_d == 0) smallfile2_d = SMALLFILE2_D;
1430                         percpufreeb = ptob((uint64_t)freemem) / ncpus_online;
1431                         smallfile1 = percpufreeb / smallfile1_d;
1432                         smallfile2 = percpufreeb / smallfile2_d;
1433                         smallfile1 = MAX(smallfile1, smallfile);
1434                         smallfile1 = MAX(smallfile1, smallfile64);
1435                         smallfile2 = MAX(smallfile1, smallfile2);
1436                         smallfile_update = now + hz;
1437                 }
1438 
1439                 dofree = freebehind &&
1440                     ip->i_nextr == (off & PAGEMASK) && off > smallfile1;
1441 
1442                 /*
1443                  * At this point we can enter ufs_getpage() in one of two
1444                  * ways:
1445                  * 1) segmap_getmapflt() calls ufs_getpage() when the
1446                  *    forcefault parameter is true (value of 1 is passed)
1447                  * 2) uiomove() causes a page fault.
1448                  *
1449                  * We cannot hold onto an i_contents reader lock without
1450                  * risking deadlock in ufs_getpage() so drop a reader lock.
1451                  * The ufs_getpage() dolock logic already allows for a
1452                  * thread holding i_contents as writer to work properly
1453                  * so we keep a writer lock.
1454                  */
1455                 if (rwtype == RW_READER)
1456                         rw_exit(&ip->i_contents);
1457 
1458                 if (vpm_enable) {
1459                         /*
1460                          * Copy data.
1461                          */
1462                         error = vpm_data_copy(vp, (off + mapon), (uint_t)n,
1463                             uio, 1, NULL, 0, S_READ);
1464                 } else {
1465                         base = segmap_getmapflt(segkmap, vp, (off + mapon),
1466                             (uint_t)n, 1, S_READ);
1467                         error = uiomove(base + mapon, (long)n, UIO_READ, uio);
1468                 }
1469 
1470                 flags = 0;
1471                 if (!error) {
1472                         /*
1473                          * If  reading sequential  we won't need  this
1474                          * buffer again  soon.  For  offsets in  range
1475                          * [smallfile1,  smallfile2] release the pages
1476                          * at   the  tail  of the   cache list, larger
1477                          * offsets are released at the head.
1478                          */
1479                         if (dofree) {
1480                                 flags = SM_FREE | SM_ASYNC;
1481                                 if ((cache_read_ahead == 0) &&
1482                                     (off > smallfile2))
1483                                         flags |=  SM_DONTNEED;
1484                         }
1485                         /*
1486                          * In POSIX SYNC (FSYNC and FDSYNC) read mode,
1487                          * we want to make sure that the page which has
1488                          * been read, is written on disk if it is dirty.
1489                          * And corresponding indirect blocks should also
1490                          * be flushed out.
1491                          */
1492                         if ((ioflag & FRSYNC) && (ioflag & (FSYNC|FDSYNC))) {
1493                                 flags &= ~SM_ASYNC;
1494                                 flags |= SM_WRITE;
1495                         }
1496                         if (vpm_enable) {
1497                                 error = vpm_sync_pages(vp, off, n, flags);
1498                         } else {
1499                                 error = segmap_release(segkmap, base, flags);
1500                         }
1501                 } else {
1502                         if (vpm_enable) {
1503                                 (void) vpm_sync_pages(vp, off, n, flags);
1504                         } else {
1505                                 (void) segmap_release(segkmap, base, flags);
1506                         }
1507                 }
1508 
1509                 if (rwtype == RW_READER)
1510                         rw_enter(&ip->i_contents, rwtype);
1511         } while (error == 0 && uio->uio_resid > 0 && n != 0);
1512 out:
1513         /*
1514          * Inode is updated according to this table if FRSYNC is set.
1515          *
1516          *   FSYNC        FDSYNC(posix.4)
1517          *   --------------------------
1518          *   always       IATTCHG|IBDWRITE
1519          */
1520         /*
1521          * The inode is not updated if we're logging and the inode is a
1522          * directory with FRSYNC, FSYNC and FDSYNC flags set.
1523          */
1524         if (ioflag & FRSYNC) {
1525                 if (TRANS_ISTRANS(ufsvfsp) && ((ip->i_mode & IFMT) == IFDIR)) {
1526                         doupdate = 0;
1527                 }
1528                 if (doupdate) {
1529                         if ((ioflag & FSYNC) ||
1530                             ((ioflag & FDSYNC) &&
1531                             (ip->i_flag & (IATTCHG|IBDWRITE)))) {
1532                                 ufs_iupdat(ip, 1);
1533                         }
1534                 }
1535         }
1536         /*
1537          * If we've already done a partial read, terminate
1538          * the read but return no error.
1539          */
1540         if (oresid != uio->uio_resid)
1541                 error = 0;
1542         ITIMES(ip);
1543 
1544         return (error);
1545 }
1546 
1547 /* ARGSUSED */
1548 static int
1549 ufs_ioctl(
1550         struct vnode    *vp,
1551         int             cmd,
1552         intptr_t        arg,
1553         int             flag,
1554         struct cred     *cr,
1555         int             *rvalp,
1556         caller_context_t *ct)
1557 {
1558         struct lockfs   lockfs, lockfs_out;
1559         struct ufsvfs   *ufsvfsp = VTOI(vp)->i_ufsvfs;
1560         char            *comment, *original_comment;
1561         struct fs       *fs;
1562         struct ulockfs  *ulp;
1563         offset_t        off;
1564         extern int      maxphys;
1565         int             error;
1566         int             issync;
1567         int             trans_size;
1568 
1569 
1570         /*
1571          * forcibly unmounted
1572          */
1573         if (ufsvfsp == NULL || vp->v_vfsp == NULL ||
1574             vp->v_vfsp->vfs_flag & VFS_UNMOUNTED)
1575                 return (EIO);
1576         fs = ufsvfsp->vfs_fs;
1577 
1578         if (cmd == Q_QUOTACTL) {
1579                 error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_QUOTA_MASK);
1580                 if (error)
1581                         return (error);
1582 
1583                 if (ulp) {
1584                         TRANS_BEGIN_ASYNC(ufsvfsp, TOP_QUOTA,
1585                             TOP_SETQUOTA_SIZE(fs));
1586                 }
1587 
1588                 error = quotactl(vp, arg, flag, cr);
1589 
1590                 if (ulp) {
1591                         TRANS_END_ASYNC(ufsvfsp, TOP_QUOTA,
1592                             TOP_SETQUOTA_SIZE(fs));
1593                         ufs_lockfs_end(ulp);
1594                 }
1595                 return (error);
1596         }
1597 
1598         switch (cmd) {
1599                 case _FIOLFS:
1600                         /*
1601                          * file system locking
1602                          */
1603                         if (secpolicy_fs_config(cr, ufsvfsp->vfs_vfs) != 0)
1604                                 return (EPERM);
1605 
1606                         if ((flag & DATAMODEL_MASK) == DATAMODEL_NATIVE) {
1607                                 if (copyin((caddr_t)arg, &lockfs,
1608                                     sizeof (struct lockfs)))
1609                                         return (EFAULT);
1610                         }
1611 #ifdef _SYSCALL32_IMPL
1612                         else {
1613                                 struct lockfs32 lockfs32;
1614                                 /* Translate ILP32 lockfs to LP64 lockfs */
1615                                 if (copyin((caddr_t)arg, &lockfs32,
1616                                     sizeof (struct lockfs32)))
1617                                         return (EFAULT);
1618                                 lockfs.lf_lock = (ulong_t)lockfs32.lf_lock;
1619                                 lockfs.lf_flags = (ulong_t)lockfs32.lf_flags;
1620                                 lockfs.lf_key = (ulong_t)lockfs32.lf_key;
1621                                 lockfs.lf_comlen = (ulong_t)lockfs32.lf_comlen;
1622                                 lockfs.lf_comment =
1623                                     (caddr_t)(uintptr_t)lockfs32.lf_comment;
1624                         }
1625 #endif /* _SYSCALL32_IMPL */
1626 
1627                         if (lockfs.lf_comlen) {
1628                                 if (lockfs.lf_comlen > LOCKFS_MAXCOMMENTLEN)
1629                                         return (ENAMETOOLONG);
1630                                 comment =
1631                                     kmem_alloc(lockfs.lf_comlen, KM_SLEEP);
1632                                 if (copyin(lockfs.lf_comment, comment,
1633                                     lockfs.lf_comlen)) {
1634                                         kmem_free(comment, lockfs.lf_comlen);
1635                                         return (EFAULT);
1636                                 }
1637                                 original_comment = lockfs.lf_comment;
1638                                 lockfs.lf_comment = comment;
1639                         }
1640                         if ((error = ufs_fiolfs(vp, &lockfs, 0)) == 0) {
1641                                 lockfs.lf_comment = original_comment;
1642 
1643                                 if ((flag & DATAMODEL_MASK) ==
1644                                     DATAMODEL_NATIVE) {
1645                                         (void) copyout(&lockfs, (caddr_t)arg,
1646                                             sizeof (struct lockfs));
1647                                 }
1648 #ifdef _SYSCALL32_IMPL
1649                                 else {
1650                                         struct lockfs32 lockfs32;
1651                                         /* Translate LP64 to ILP32 lockfs */
1652                                         lockfs32.lf_lock =
1653                                             (uint32_t)lockfs.lf_lock;
1654                                         lockfs32.lf_flags =
1655                                             (uint32_t)lockfs.lf_flags;
1656                                         lockfs32.lf_key =
1657                                             (uint32_t)lockfs.lf_key;
1658                                         lockfs32.lf_comlen =
1659                                             (uint32_t)lockfs.lf_comlen;
1660                                         lockfs32.lf_comment =
1661                                             (uint32_t)(uintptr_t)
1662                                             lockfs.lf_comment;
1663                                         (void) copyout(&lockfs32, (caddr_t)arg,
1664                                             sizeof (struct lockfs32));
1665                                 }
1666 #endif /* _SYSCALL32_IMPL */
1667 
1668                         } else {
1669                                 if (lockfs.lf_comlen)
1670                                         kmem_free(comment, lockfs.lf_comlen);
1671                         }
1672                         return (error);
1673 
1674                 case _FIOLFSS:
1675                         /*
1676                          * get file system locking status
1677                          */
1678 
1679                         if ((flag & DATAMODEL_MASK) == DATAMODEL_NATIVE) {
1680                                 if (copyin((caddr_t)arg, &lockfs,
1681                                     sizeof (struct lockfs)))
1682                                         return (EFAULT);
1683                         }
1684 #ifdef _SYSCALL32_IMPL
1685                         else {
1686                                 struct lockfs32 lockfs32;
1687                                 /* Translate ILP32 lockfs to LP64 lockfs */
1688                                 if (copyin((caddr_t)arg, &lockfs32,
1689                                     sizeof (struct lockfs32)))
1690                                         return (EFAULT);
1691                                 lockfs.lf_lock = (ulong_t)lockfs32.lf_lock;
1692                                 lockfs.lf_flags = (ulong_t)lockfs32.lf_flags;
1693                                 lockfs.lf_key = (ulong_t)lockfs32.lf_key;
1694                                 lockfs.lf_comlen = (ulong_t)lockfs32.lf_comlen;
1695                                 lockfs.lf_comment =
1696                                     (caddr_t)(uintptr_t)lockfs32.lf_comment;
1697                         }
1698 #endif /* _SYSCALL32_IMPL */
1699 
1700                         if (error =  ufs_fiolfss(vp, &lockfs_out))
1701                                 return (error);
1702                         lockfs.lf_lock = lockfs_out.lf_lock;
1703                         lockfs.lf_key = lockfs_out.lf_key;
1704                         lockfs.lf_flags = lockfs_out.lf_flags;
1705                         lockfs.lf_comlen = MIN(lockfs.lf_comlen,
1706                             lockfs_out.lf_comlen);
1707 
1708                         if ((flag & DATAMODEL_MASK) == DATAMODEL_NATIVE) {
1709                                 if (copyout(&lockfs, (caddr_t)arg,
1710                                     sizeof (struct lockfs)))
1711                                         return (EFAULT);
1712                         }
1713 #ifdef _SYSCALL32_IMPL
1714                         else {
1715                                 /* Translate LP64 to ILP32 lockfs */
1716                                 struct lockfs32 lockfs32;
1717                                 lockfs32.lf_lock = (uint32_t)lockfs.lf_lock;
1718                                 lockfs32.lf_flags = (uint32_t)lockfs.lf_flags;
1719                                 lockfs32.lf_key = (uint32_t)lockfs.lf_key;
1720                                 lockfs32.lf_comlen = (uint32_t)lockfs.lf_comlen;
1721                                 lockfs32.lf_comment =
1722                                     (uint32_t)(uintptr_t)lockfs.lf_comment;
1723                                 if (copyout(&lockfs32, (caddr_t)arg,
1724                                     sizeof (struct lockfs32)))
1725                                         return (EFAULT);
1726                         }
1727 #endif /* _SYSCALL32_IMPL */
1728 
1729                         if (lockfs.lf_comlen &&
1730                             lockfs.lf_comment && lockfs_out.lf_comment)
1731                                 if (copyout(lockfs_out.lf_comment,
1732                                     lockfs.lf_comment, lockfs.lf_comlen))
1733                                         return (EFAULT);
1734                         return (0);
1735 
1736                 case _FIOSATIME:
1737                         /*
1738                          * set access time
1739                          */
1740 
1741                         /*
1742                          * if mounted w/o atime, return quietly.
1743                          * I briefly thought about returning ENOSYS, but
1744                          * figured that most apps would consider this fatal
1745                          * but the idea is to make this as seamless as poss.
1746                          */
1747                         if (ufsvfsp->vfs_noatime)
1748                                 return (0);
1749 
1750                         error = ufs_lockfs_begin(ufsvfsp, &ulp,
1751                             ULOCKFS_SETATTR_MASK);
1752                         if (error)
1753                                 return (error);
1754 
1755                         if (ulp) {
1756                                 trans_size = (int)TOP_SETATTR_SIZE(VTOI(vp));
1757                                 TRANS_BEGIN_CSYNC(ufsvfsp, issync,
1758                                     TOP_SETATTR, trans_size);
1759                         }
1760 
1761                         error = ufs_fiosatime(vp, (struct timeval *)arg,
1762                             flag, cr);
1763 
1764                         if (ulp) {
1765                                 TRANS_END_CSYNC(ufsvfsp, error, issync,
1766                                     TOP_SETATTR, trans_size);
1767                                 ufs_lockfs_end(ulp);
1768                         }
1769                         return (error);
1770 
1771                 case _FIOSDIO:
1772                         /*
1773                          * set delayed-io
1774                          */
1775                         return (ufs_fiosdio(vp, (uint_t *)arg, flag, cr));
1776 
1777                 case _FIOGDIO:
1778                         /*
1779                          * get delayed-io
1780                          */
1781                         return (ufs_fiogdio(vp, (uint_t *)arg, flag, cr));
1782 
1783                 case _FIOIO:
1784                         /*
1785                          * inode open
1786                          */
1787                         error = ufs_lockfs_begin(ufsvfsp, &ulp,
1788                             ULOCKFS_VGET_MASK);
1789                         if (error)
1790                                 return (error);
1791 
1792                         error = ufs_fioio(vp, (struct fioio *)arg, flag, cr);
1793 
1794                         if (ulp) {
1795                                 ufs_lockfs_end(ulp);
1796                         }
1797                         return (error);
1798 
1799                 case _FIOFFS:
1800                         /*
1801                          * file system flush (push w/invalidate)
1802                          */
1803                         if ((caddr_t)arg != NULL)
1804                                 return (EINVAL);
1805                         return (ufs_fioffs(vp, NULL, cr));
1806 
1807                 case _FIOISBUSY:
1808                         /*
1809                          * Contract-private interface for Legato
1810                          * Purge this vnode from the DNLC and decide
1811                          * if this vnode is busy (*arg == 1) or not
1812                          * (*arg == 0)
1813                          */
1814                         if (secpolicy_fs_config(cr, ufsvfsp->vfs_vfs) != 0)
1815                                 return (EPERM);
1816                         error = ufs_fioisbusy(vp, (int *)arg, cr);
1817                         return (error);
1818 
1819                 case _FIODIRECTIO:
1820                         return (ufs_fiodirectio(vp, (int)arg, cr));
1821 
1822                 case _FIOTUNE:
1823                         /*
1824                          * Tune the file system (aka setting fs attributes)
1825                          */
1826                         error = ufs_lockfs_begin(ufsvfsp, &ulp,
1827                             ULOCKFS_SETATTR_MASK);
1828                         if (error)
1829                                 return (error);
1830 
1831                         error = ufs_fiotune(vp, (struct fiotune *)arg, cr);
1832 
1833                         if (ulp)
1834                                 ufs_lockfs_end(ulp);
1835                         return (error);
1836 
1837                 case _FIOLOGENABLE:
1838                         if (secpolicy_fs_config(cr, ufsvfsp->vfs_vfs) != 0)
1839                                 return (EPERM);
1840                         return (ufs_fiologenable(vp, (void *)arg, cr, flag));
1841 
1842                 case _FIOLOGDISABLE:
1843                         if (secpolicy_fs_config(cr, ufsvfsp->vfs_vfs) != 0)
1844                                 return (EPERM);
1845                         return (ufs_fiologdisable(vp, (void *)arg, cr, flag));
1846 
1847                 case _FIOISLOG:
1848                         return (ufs_fioislog(vp, (void *)arg, cr, flag));
1849 
1850                 case _FIOSNAPSHOTCREATE_MULTI:
1851                 {
1852                         struct fiosnapcreate_multi      fc, *fcp;
1853                         size_t  fcm_size;
1854 
1855                         if (copyin((void *)arg, &fc, sizeof (fc)))
1856                                 return (EFAULT);
1857                         if (fc.backfilecount > MAX_BACKFILE_COUNT)
1858                                 return (EINVAL);
1859                         fcm_size = sizeof (struct fiosnapcreate_multi) +
1860                             (fc.backfilecount - 1) * sizeof (int);
1861                         fcp = (struct fiosnapcreate_multi *)
1862                             kmem_alloc(fcm_size, KM_SLEEP);
1863                         if (copyin((void *)arg, fcp, fcm_size)) {
1864                                 kmem_free(fcp, fcm_size);
1865                                 return (EFAULT);
1866                         }
1867                         error = ufs_snap_create(vp, fcp, cr);
1868                         /*
1869                          * Do copyout even if there is an error because
1870                          * the details of error is stored in fcp.
1871                          */
1872                         if (copyout(fcp, (void *)arg, fcm_size))
1873                                 error = EFAULT;
1874                         kmem_free(fcp, fcm_size);
1875                         return (error);
1876                 }
1877 
1878                 case _FIOSNAPSHOTDELETE:
1879                 {
1880                         struct fiosnapdelete    fc;
1881 
1882                         if (copyin((void *)arg, &fc, sizeof (fc)))
1883                                 return (EFAULT);
1884                         error = ufs_snap_delete(vp, &fc, cr);
1885                         if (!error && copyout(&fc, (void *)arg, sizeof (fc)))
1886                                 error = EFAULT;
1887                         return (error);
1888                 }
1889 
1890                 case _FIOGETSUPERBLOCK:
1891                         if (copyout(fs, (void *)arg, SBSIZE))
1892                                 return (EFAULT);
1893                         return (0);
1894 
1895                 case _FIOGETMAXPHYS:
1896                         if (copyout(&maxphys, (void *)arg, sizeof (maxphys)))
1897                                 return (EFAULT);
1898                         return (0);
1899 
1900                 /*
1901                  * The following 3 ioctls are for TSufs support
1902                  * although could potentially be used elsewhere
1903                  */
1904                 case _FIO_SET_LUFS_DEBUG:
1905                         if (secpolicy_fs_config(cr, ufsvfsp->vfs_vfs) != 0)
1906                                 return (EPERM);
1907                         lufs_debug = (uint32_t)arg;
1908                         return (0);
1909 
1910                 case _FIO_SET_LUFS_ERROR:
1911                         if (secpolicy_fs_config(cr, ufsvfsp->vfs_vfs) != 0)
1912                                 return (EPERM);
1913                         TRANS_SETERROR(ufsvfsp);
1914                         return (0);
1915 
1916                 case _FIO_GET_TOP_STATS:
1917                 {
1918                         fio_lufs_stats_t *ls;
1919                         ml_unit_t *ul = ufsvfsp->vfs_log;
1920 
1921                         ls = kmem_zalloc(sizeof (*ls), KM_SLEEP);
1922                         ls->ls_debug = ul->un_debug; /* return debug value */
1923                         /* Copy stucture if statistics are being kept */
1924                         if (ul->un_logmap->mtm_tops) {
1925                                 ls->ls_topstats = *(ul->un_logmap->mtm_tops);
1926                         }
1927                         error = 0;
1928                         if (copyout(ls, (void *)arg, sizeof (*ls)))
1929                                 error = EFAULT;
1930                         kmem_free(ls, sizeof (*ls));
1931                         return (error);
1932                 }
1933 
1934                 case _FIO_SEEK_DATA:
1935                 case _FIO_SEEK_HOLE:
1936                         if (ddi_copyin((void *)arg, &off, sizeof (off), flag))
1937                                 return (EFAULT);
1938                         /* offset paramater is in/out */
1939                         error = ufs_fio_holey(vp, cmd, &off);
1940                         if (error)
1941                                 return (error);
1942                         if (ddi_copyout(&off, (void *)arg, sizeof (off), flag))
1943                                 return (EFAULT);
1944                         return (0);
1945 
1946                 case _FIO_COMPRESSED:
1947                 {
1948                         /*
1949                          * This is a project private ufs ioctl() to mark
1950                          * the inode as that belonging to a compressed
1951                          * file. This is used to mark individual
1952                          * compressed files in a miniroot archive.
1953                          * The files compressed in this manner are
1954                          * automatically decompressed by the dcfs filesystem
1955                          * (via an interception in ufs_lookup - see decompvp())
1956                          * which is layered on top of ufs on a system running
1957                          * from the archive. See uts/common/fs/dcfs for details.
1958                          * This ioctl only marks the file as compressed - the
1959                          * actual compression is done by fiocompress (a
1960                          * userland utility) which invokes this ioctl().
1961                          */
1962                         struct inode *ip = VTOI(vp);
1963 
1964                         error = ufs_lockfs_begin(ufsvfsp, &ulp,
1965                             ULOCKFS_SETATTR_MASK);
1966                         if (error)
1967                                 return (error);
1968 
1969                         if (ulp) {
1970                                 TRANS_BEGIN_ASYNC(ufsvfsp, TOP_IUPDAT,
1971                                     TOP_IUPDAT_SIZE(ip));
1972                         }
1973 
1974                         error = ufs_mark_compressed(vp);
1975 
1976                         if (ulp) {
1977                                 TRANS_END_ASYNC(ufsvfsp, TOP_IUPDAT,
1978                                     TOP_IUPDAT_SIZE(ip));
1979                                 ufs_lockfs_end(ulp);
1980                         }
1981 
1982                         return (error);
1983 
1984                 }
1985 
1986                 default:
1987                         return (ENOTTY);
1988         }
1989 }
1990 
1991 
1992 /* ARGSUSED */
1993 static int
1994 ufs_getattr(struct vnode *vp, struct vattr *vap, int flags,
1995         struct cred *cr, caller_context_t *ct)
1996 {
1997         struct inode *ip = VTOI(vp);
1998         struct ufsvfs *ufsvfsp;
1999         int err;
2000 
2001         if (vap->va_mask == AT_SIZE) {
2002                 /*
2003                  * for performance, if only the size is requested don't bother
2004                  * with anything else.
2005                  */
2006                 UFS_GET_ISIZE(&vap->va_size, ip);
2007                 return (0);
2008         }
2009 
2010         /*
2011          * inlined lockfs checks
2012          */
2013         ufsvfsp = ip->i_ufsvfs;
2014         if ((ufsvfsp == NULL) || ULOCKFS_IS_HLOCK(&ufsvfsp->vfs_ulockfs)) {
2015                 err = EIO;
2016                 goto out;
2017         }
2018 
2019         rw_enter(&ip->i_contents, RW_READER);
2020         /*
2021          * Return all the attributes.  This should be refined so
2022          * that it only returns what's asked for.
2023          */
2024 
2025         /*
2026          * Copy from inode table.
2027          */
2028         vap->va_type = vp->v_type;
2029         vap->va_mode = ip->i_mode & MODEMASK;
2030         /*
2031          * If there is an ACL and there is a mask entry, then do the
2032          * extra work that completes the equivalent of an acltomode(3)
2033          * call.  According to POSIX P1003.1e, the acl mask should be
2034          * returned in the group permissions field.
2035          *
2036          * - start with the original permission and mode bits (from above)
2037          * - clear the group owner bits
2038          * - add in the mask bits.
2039          */
2040         if (ip->i_ufs_acl && ip->i_ufs_acl->aclass.acl_ismask) {
2041                 vap->va_mode &= ~((VREAD | VWRITE | VEXEC) >> 3);
2042                 vap->va_mode |=
2043                     (ip->i_ufs_acl->aclass.acl_maskbits & PERMMASK) << 3;
2044         }
2045         vap->va_uid = ip->i_uid;
2046         vap->va_gid = ip->i_gid;
2047         vap->va_fsid = ip->i_dev;
2048         vap->va_nodeid = (ino64_t)ip->i_number;
2049         vap->va_nlink = ip->i_nlink;
2050         vap->va_size = ip->i_size;
2051         if (vp->v_type == VCHR || vp->v_type == VBLK)
2052                 vap->va_rdev = ip->i_rdev;
2053         else
2054                 vap->va_rdev = 0;    /* not a b/c spec. */
2055         mutex_enter(&ip->i_tlock);
2056         ITIMES_NOLOCK(ip);      /* mark correct time in inode */
2057         vap->va_seq = ip->i_seq;
2058         vap->va_atime.tv_sec = (time_t)ip->i_atime.tv_sec;
2059         vap->va_atime.tv_nsec = ip->i_atime.tv_usec*1000;
2060         vap->va_mtime.tv_sec = (time_t)ip->i_mtime.tv_sec;
2061         vap->va_mtime.tv_nsec = ip->i_mtime.tv_usec*1000;
2062         vap->va_ctime.tv_sec = (time_t)ip->i_ctime.tv_sec;
2063         vap->va_ctime.tv_nsec = ip->i_ctime.tv_usec*1000;
2064         mutex_exit(&ip->i_tlock);
2065 
2066         switch (ip->i_mode & IFMT) {
2067 
2068         case IFBLK:
2069                 vap->va_blksize = MAXBSIZE;          /* was BLKDEV_IOSIZE */
2070                 break;
2071 
2072         case IFCHR:
2073                 vap->va_blksize = MAXBSIZE;
2074                 break;
2075 
2076         default:
2077                 vap->va_blksize = ip->i_fs->fs_bsize;
2078                 break;
2079         }
2080         vap->va_nblocks = (fsblkcnt64_t)ip->i_blocks;
2081         rw_exit(&ip->i_contents);
2082         err = 0;
2083 
2084 out:
2085         return (err);
2086 }
2087 
2088 /*
2089  * Special wrapper to provide a callback for secpolicy_vnode_setattr().
2090  * The i_contents lock is already held by the caller and we need to
2091  * declare the inode as 'void *' argument.
2092  */
2093 static int
2094 ufs_priv_access(void *vip, int mode, struct cred *cr)
2095 {
2096         struct inode *ip = vip;
2097 
2098         return (ufs_iaccess(ip, mode, cr, 0));
2099 }
2100 
2101 /*ARGSUSED4*/
2102 static int
2103 ufs_setattr(
2104         struct vnode *vp,
2105         struct vattr *vap,
2106         int flags,
2107         struct cred *cr,
2108         caller_context_t *ct)
2109 {
2110         struct inode *ip = VTOI(vp);
2111         struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
2112         struct fs *fs;
2113         struct ulockfs *ulp;
2114         char *errmsg1;
2115         char *errmsg2;
2116         long blocks;
2117         long int mask = vap->va_mask;
2118         size_t len1, len2;
2119         int issync;
2120         int trans_size;
2121         int dotrans;
2122         int dorwlock;
2123         int error;
2124         int owner_change;
2125         int dodqlock;
2126         timestruc_t now;
2127         vattr_t oldva;
2128         int retry = 1;
2129         int indeadlock;
2130 
2131         /*
2132          * Cannot set these attributes.
2133          */
2134         if ((mask & AT_NOSET) || (mask & AT_XVATTR))
2135                 return (EINVAL);
2136 
2137         /*
2138          * check for forced unmount
2139          */
2140         if (ufsvfsp == NULL)
2141                 return (EIO);
2142 
2143         fs = ufsvfsp->vfs_fs;
2144         if (fs->fs_ronly != 0)
2145                 return (EROFS);
2146 
2147 again:
2148         errmsg1 = NULL;
2149         errmsg2 = NULL;
2150         dotrans = 0;
2151         dorwlock = 0;
2152         dodqlock = 0;
2153 
2154         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_SETATTR_MASK);
2155         if (error)
2156                 goto out;
2157 
2158         /*
2159          * Acquire i_rwlock before TRANS_BEGIN_CSYNC() if this is a file.
2160          * This follows the protocol for read()/write().
2161          */
2162         if (vp->v_type != VDIR) {
2163                 /*
2164                  * ufs_tryirwlock uses rw_tryenter and checks for SLOCK to
2165                  * avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
2166                  * possible, retries the operation.
2167                  */
2168                 ufs_tryirwlock(&ip->i_rwlock, RW_WRITER, retry_file);
2169                 if (indeadlock) {
2170                         if (ulp)
2171                                 ufs_lockfs_end(ulp);
2172                         goto again;
2173                 }
2174                 dorwlock = 1;
2175         }
2176 
2177         /*
2178          * Truncate file.  Must have write permission and not be a directory.
2179          */
2180         if (mask & AT_SIZE) {
2181                 rw_enter(&ip->i_contents, RW_WRITER);
2182                 if (vp->v_type == VDIR) {
2183                         error = EISDIR;
2184                         goto update_inode;
2185                 }
2186                 if (error = ufs_iaccess(ip, IWRITE, cr, 0))
2187                         goto update_inode;
2188 
2189                 rw_exit(&ip->i_contents);
2190                 error = TRANS_ITRUNC(ip, vap->va_size, 0, cr);
2191                 if (error) {
2192                         rw_enter(&ip->i_contents, RW_WRITER);
2193                         goto update_inode;
2194                 }
2195 
2196                 if (error == 0 && vap->va_size)
2197                         vnevent_truncate(vp, ct);
2198         }
2199 
2200         if (ulp) {
2201                 trans_size = (int)TOP_SETATTR_SIZE(ip);
2202                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_SETATTR, trans_size);
2203                 ++dotrans;
2204         }
2205 
2206         /*
2207          * Acquire i_rwlock after TRANS_BEGIN_CSYNC() if this is a directory.
2208          * This follows the protocol established by
2209          * ufs_link/create/remove/rename/mkdir/rmdir/symlink.
2210          */
2211         if (vp->v_type == VDIR) {
2212                 ufs_tryirwlock_trans(&ip->i_rwlock, RW_WRITER, TOP_SETATTR,
2213                     retry_dir);
2214                 if (indeadlock)
2215                         goto again;
2216                 dorwlock = 1;
2217         }
2218 
2219         /*
2220          * Grab quota lock if we are changing the file's owner.
2221          */
2222         if (mask & AT_UID) {
2223                 rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
2224                 dodqlock = 1;
2225         }
2226         rw_enter(&ip->i_contents, RW_WRITER);
2227 
2228         oldva.va_mode = ip->i_mode;
2229         oldva.va_uid = ip->i_uid;
2230         oldva.va_gid = ip->i_gid;
2231 
2232         vap->va_mask &= ~AT_SIZE;
2233 
2234         error = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
2235             ufs_priv_access, ip);
2236         if (error)
2237                 goto update_inode;
2238 
2239         mask = vap->va_mask;
2240 
2241         /*
2242          * Change file access modes.
2243          */
2244         if (mask & AT_MODE) {
2245                 ip->i_mode = (ip->i_mode & IFMT) | (vap->va_mode & ~IFMT);
2246                 TRANS_INODE(ufsvfsp, ip);
2247                 ip->i_flag |= ICHG;
2248                 if (stickyhack) {
2249                         mutex_enter(&vp->v_lock);
2250                         if ((ip->i_mode & (ISVTX | IEXEC | IFDIR)) == ISVTX)
2251                                 vp->v_flag |= VSWAPLIKE;
2252                         else
2253                                 vp->v_flag &= ~VSWAPLIKE;
2254                         mutex_exit(&vp->v_lock);
2255                 }
2256         }
2257         if (mask & (AT_UID|AT_GID)) {
2258                 if (mask & AT_UID) {
2259                         /*
2260                          * Don't change ownership of the quota inode.
2261                          */
2262                         if (ufsvfsp->vfs_qinod == ip) {
2263                                 ASSERT(ufsvfsp->vfs_qflags & MQ_ENABLED);
2264                                 error = EINVAL;
2265                                 goto update_inode;
2266                         }
2267 
2268                         /*
2269                          * No real ownership change.
2270                          */
2271                         if (ip->i_uid == vap->va_uid) {
2272                                 blocks = 0;
2273                                 owner_change = 0;
2274                         }
2275                         /*
2276                          * Remove the blocks and the file, from the old user's
2277                          * quota.
2278                          */
2279                         else {
2280                                 blocks = ip->i_blocks;
2281                                 owner_change = 1;
2282 
2283                                 (void) chkdq(ip, -blocks, /* force */ 1, cr,
2284                                     (char **)NULL, (size_t *)NULL);
2285                                 (void) chkiq(ufsvfsp, /* change */ -1, ip,
2286                                     (uid_t)ip->i_uid, /* force */ 1, cr,
2287                                     (char **)NULL, (size_t *)NULL);
2288                                 dqrele(ip->i_dquot);
2289                         }
2290 
2291                         ip->i_uid = vap->va_uid;
2292 
2293                         /*
2294                          * There is a real ownership change.
2295                          */
2296                         if (owner_change) {
2297                                 /*
2298                                  * Add the blocks and the file to the new
2299                                  * user's quota.
2300                                  */
2301                                 ip->i_dquot = getinoquota(ip);
2302                                 (void) chkdq(ip, blocks, /* force */ 1, cr,
2303                                     &errmsg1, &len1);
2304                                 (void) chkiq(ufsvfsp, /* change */ 1,
2305                                     (struct inode *)NULL, (uid_t)ip->i_uid,
2306                                     /* force */ 1, cr, &errmsg2, &len2);
2307                         }
2308                 }
2309                 if (mask & AT_GID) {
2310                         ip->i_gid = vap->va_gid;
2311                 }
2312                 TRANS_INODE(ufsvfsp, ip);
2313                 ip->i_flag |= ICHG;
2314         }
2315         /*
2316          * Change file access or modified times.
2317          */
2318         if (mask & (AT_ATIME|AT_MTIME)) {
2319                 /* Check that the time value is within ufs range */
2320                 if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2321                     ((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2322                         error = EOVERFLOW;
2323                         goto update_inode;
2324                 }
2325 
2326                 /*
2327                  * if the "noaccess" mount option is set and only atime
2328                  * update is requested, do nothing. No error is returned.
2329                  */
2330                 if ((ufsvfsp->vfs_noatime) &&
2331                     ((mask & (AT_ATIME|AT_MTIME)) == AT_ATIME))
2332                         goto skip_atime;
2333 
2334                 if (mask & AT_ATIME) {
2335                         ip->i_atime.tv_sec = vap->va_atime.tv_sec;
2336                         ip->i_atime.tv_usec = vap->va_atime.tv_nsec / 1000;
2337                         ip->i_flag &= ~IACC;
2338                 }
2339                 if (mask & AT_MTIME) {
2340                         ip->i_mtime.tv_sec = vap->va_mtime.tv_sec;
2341                         ip->i_mtime.tv_usec = vap->va_mtime.tv_nsec / 1000;
2342                         gethrestime(&now);
2343                         if (now.tv_sec > TIME32_MAX) {
2344                                 /*
2345                                  * In 2038, ctime sticks forever..
2346                                  */
2347                                 ip->i_ctime.tv_sec = TIME32_MAX;
2348                                 ip->i_ctime.tv_usec = 0;
2349                         } else {
2350                                 ip->i_ctime.tv_sec = now.tv_sec;
2351                                 ip->i_ctime.tv_usec = now.tv_nsec / 1000;
2352                         }
2353                         ip->i_flag &= ~(IUPD|ICHG);
2354                         ip->i_flag |= IMODTIME;
2355                 }
2356                 TRANS_INODE(ufsvfsp, ip);
2357                 ip->i_flag |= IMOD;
2358         }
2359 
2360 skip_atime:
2361         /*
2362          * The presence of a shadow inode may indicate an ACL, but does
2363          * not imply an ACL.  Future FSD types should be handled here too
2364          * and check for the presence of the attribute-specific data
2365          * before referencing it.
2366          */
2367         if (ip->i_shadow) {
2368                 /*
2369                  * XXX if ufs_iupdat is changed to sandbagged write fix
2370                  * ufs_acl_setattr to push ip to keep acls consistent
2371                  *
2372                  * Suppress out of inodes messages if we will retry.
2373                  */
2374                 if (retry)
2375                         ip->i_flag |= IQUIET;
2376                 error = ufs_acl_setattr(ip, vap, cr);
2377                 ip->i_flag &= ~IQUIET;
2378         }
2379 
2380 update_inode:
2381         /*
2382          * Setattr always increases the sequence number
2383          */
2384         ip->i_seq++;
2385 
2386         /*
2387          * if nfsd and not logging; push synchronously
2388          */
2389         if ((curthread->t_flag & T_DONTPEND) && !TRANS_ISTRANS(ufsvfsp)) {
2390                 ufs_iupdat(ip, 1);
2391         } else {
2392                 ITIMES_NOLOCK(ip);
2393         }
2394 
2395         rw_exit(&ip->i_contents);
2396         if (dodqlock) {
2397                 rw_exit(&ufsvfsp->vfs_dqrwlock);
2398         }
2399         if (dorwlock)
2400                 rw_exit(&ip->i_rwlock);
2401 
2402         if (ulp) {
2403                 if (dotrans) {
2404                         int terr = 0;
2405                         TRANS_END_CSYNC(ufsvfsp, terr, issync, TOP_SETATTR,
2406                             trans_size);
2407                         if (error == 0)
2408                                 error = terr;
2409                 }
2410                 ufs_lockfs_end(ulp);
2411         }
2412 out:
2413         /*
2414          * If out of inodes or blocks, see if we can free something
2415          * up from the delete queue.
2416          */
2417         if ((error == ENOSPC) && retry && TRANS_ISTRANS(ufsvfsp)) {
2418                 ufs_delete_drain_wait(ufsvfsp, 1);
2419                 retry = 0;
2420                 if (errmsg1 != NULL)
2421                         kmem_free(errmsg1, len1);
2422                 if (errmsg2 != NULL)
2423                         kmem_free(errmsg2, len2);
2424                 goto again;
2425         }
2426         if (errmsg1 != NULL) {
2427                 uprintf(errmsg1);
2428                 kmem_free(errmsg1, len1);
2429         }
2430         if (errmsg2 != NULL) {
2431                 uprintf(errmsg2);
2432                 kmem_free(errmsg2, len2);
2433         }
2434         return (error);
2435 }
2436 
2437 /*ARGSUSED*/
2438 static int
2439 ufs_access(struct vnode *vp, int mode, int flags, struct cred *cr,
2440         caller_context_t *ct)
2441 {
2442         struct inode *ip = VTOI(vp);
2443 
2444         if (ip->i_ufsvfs == NULL)
2445                 return (EIO);
2446 
2447         /*
2448          * The ufs_iaccess function wants to be called with
2449          * mode bits expressed as "ufs specific" bits.
2450          * I.e., VWRITE|VREAD|VEXEC do not make sense to
2451          * ufs_iaccess() but IWRITE|IREAD|IEXEC do.
2452          * But since they're the same we just pass the vnode mode
2453          * bit but just verify that assumption at compile time.
2454          */
2455 #if IWRITE != VWRITE || IREAD != VREAD || IEXEC != VEXEC
2456 #error "ufs_access needs to map Vmodes to Imodes"
2457 #endif
2458         return (ufs_iaccess(ip, mode, cr, 1));
2459 }
2460 
2461 /* ARGSUSED */
2462 static int
2463 ufs_readlink(struct vnode *vp, struct uio *uiop, struct cred *cr,
2464         caller_context_t *ct)
2465 {
2466         struct inode *ip = VTOI(vp);
2467         struct ufsvfs *ufsvfsp;
2468         struct ulockfs *ulp;
2469         int error;
2470         int fastsymlink;
2471 
2472         if (vp->v_type != VLNK) {
2473                 error = EINVAL;
2474                 goto nolockout;
2475         }
2476 
2477         /*
2478          * If the symbolic link is empty there is nothing to read.
2479          * Fast-track these empty symbolic links
2480          */
2481         if (ip->i_size == 0) {
2482                 error = 0;
2483                 goto nolockout;
2484         }
2485 
2486         ufsvfsp = ip->i_ufsvfs;
2487         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_READLINK_MASK);
2488         if (error)
2489                 goto nolockout;
2490         /*
2491          * The ip->i_rwlock protects the data blocks used for FASTSYMLINK
2492          */
2493 again:
2494         fastsymlink = 0;
2495         if (ip->i_flag & IFASTSYMLNK) {
2496                 rw_enter(&ip->i_rwlock, RW_READER);
2497                 rw_enter(&ip->i_contents, RW_READER);
2498                 if (ip->i_flag & IFASTSYMLNK) {
2499                         if (!ULOCKFS_IS_NOIACC(ITOUL(ip)) &&
2500                             (ip->i_fs->fs_ronly == 0) &&
2501                             (!ufsvfsp->vfs_noatime)) {
2502                                 mutex_enter(&ip->i_tlock);
2503                                 ip->i_flag |= IACC;
2504                                 mutex_exit(&ip->i_tlock);
2505                         }
2506                         error = uiomove((caddr_t)&ip->i_db[1],
2507                             MIN(ip->i_size, uiop->uio_resid),
2508                             UIO_READ, uiop);
2509                         ITIMES(ip);
2510                         ++fastsymlink;
2511                 }
2512                 rw_exit(&ip->i_contents);
2513                 rw_exit(&ip->i_rwlock);
2514         }
2515         if (!fastsymlink) {
2516                 ssize_t size;   /* number of bytes read  */
2517                 caddr_t basep;  /* pointer to input data */
2518                 ino_t ino;
2519                 long  igen;
2520                 struct uio tuio;        /* temp uio struct */
2521                 struct uio *tuiop;
2522                 iovec_t tiov;           /* temp iovec struct */
2523                 char kbuf[FSL_SIZE];    /* buffer to hold fast symlink */
2524                 int tflag = 0;          /* flag to indicate temp vars used */
2525 
2526                 ino = ip->i_number;
2527                 igen = ip->i_gen;
2528                 size = uiop->uio_resid;
2529                 basep = uiop->uio_iov->iov_base;
2530                 tuiop = uiop;
2531 
2532                 rw_enter(&ip->i_rwlock, RW_WRITER);
2533                 rw_enter(&ip->i_contents, RW_WRITER);
2534                 if (ip->i_flag & IFASTSYMLNK) {
2535                         rw_exit(&ip->i_contents);
2536                         rw_exit(&ip->i_rwlock);
2537                         goto again;
2538                 }
2539 
2540                 /* can this be a fast symlink and is it a user buffer? */
2541                 if (ip->i_size <= FSL_SIZE &&
2542                     (uiop->uio_segflg == UIO_USERSPACE ||
2543                     uiop->uio_segflg == UIO_USERISPACE)) {
2544 
2545                         bzero(&tuio, sizeof (struct uio));
2546                         /*
2547                          * setup a kernel buffer to read link into.  this
2548                          * is to fix a race condition where the user buffer
2549                          * got corrupted before copying it into the inode.
2550                          */
2551                         size = ip->i_size;
2552                         tiov.iov_len = size;
2553                         tiov.iov_base = kbuf;
2554                         tuio.uio_iov = &tiov;
2555                         tuio.uio_iovcnt = 1;
2556                         tuio.uio_offset = uiop->uio_offset;
2557                         tuio.uio_segflg = UIO_SYSSPACE;
2558                         tuio.uio_fmode = uiop->uio_fmode;
2559                         tuio.uio_extflg = uiop->uio_extflg;
2560                         tuio.uio_limit = uiop->uio_limit;
2561                         tuio.uio_resid = size;
2562 
2563                         basep = tuio.uio_iov->iov_base;
2564                         tuiop = &tuio;
2565                         tflag = 1;
2566                 }
2567 
2568                 error = rdip(ip, tuiop, 0, cr);
2569                 if (!(error == 0 && ip->i_number == ino && ip->i_gen == igen)) {
2570                         rw_exit(&ip->i_contents);
2571                         rw_exit(&ip->i_rwlock);
2572                         goto out;
2573                 }
2574 
2575                 if (tflag == 0)
2576                         size -= uiop->uio_resid;
2577 
2578                 if ((tflag == 0 && ip->i_size <= FSL_SIZE &&
2579                     ip->i_size == size) || (tflag == 1 &&
2580                     tuio.uio_resid == 0)) {
2581                         error = kcopy(basep, &ip->i_db[1], ip->i_size);
2582                         if (error == 0) {
2583                                 ip->i_flag |= IFASTSYMLNK;
2584                                 /*
2585                                  * free page
2586                                  */
2587                                 (void) VOP_PUTPAGE(ITOV(ip),
2588                                     (offset_t)0, PAGESIZE,
2589                                     (B_DONTNEED | B_FREE | B_FORCE | B_ASYNC),
2590                                     cr, ct);
2591                         } else {
2592                                 int i;
2593                                 /* error, clear garbage left behind */
2594                                 for (i = 1; i < NDADDR; i++)
2595                                         ip->i_db[i] = 0;
2596                                 for (i = 0; i < NIADDR; i++)
2597                                         ip->i_ib[i] = 0;
2598                         }
2599                 }
2600                 if (tflag == 1) {
2601                         /* now, copy it into the user buffer */
2602                         error = uiomove((caddr_t)kbuf,
2603                             MIN(size, uiop->uio_resid),
2604                             UIO_READ, uiop);
2605                 }
2606                 rw_exit(&ip->i_contents);
2607                 rw_exit(&ip->i_rwlock);
2608         }
2609 out:
2610         if (ulp) {
2611                 ufs_lockfs_end(ulp);
2612         }
2613 nolockout:
2614         return (error);
2615 }
2616 
2617 /* ARGSUSED */
2618 static int
2619 ufs_fsync(struct vnode *vp, int syncflag, struct cred *cr,
2620         caller_context_t *ct)
2621 {
2622         struct inode *ip = VTOI(vp);
2623         struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
2624         struct ulockfs *ulp;
2625         int error;
2626 
2627         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_FSYNC_MASK);
2628         if (error)
2629                 return (error);
2630 
2631         if (TRANS_ISTRANS(ufsvfsp)) {
2632                 /*
2633                  * First push out any data pages
2634                  */
2635                 if (vn_has_cached_data(vp) && !(syncflag & FNODSYNC) &&
2636                     (vp->v_type != VCHR) && !(IS_SWAPVP(vp))) {
2637                         error = VOP_PUTPAGE(vp, (offset_t)0, (size_t)0,
2638                             0, CRED(), ct);
2639                         if (error)
2640                                 goto out;
2641                 }
2642 
2643                 /*
2644                  * Delta any delayed inode times updates
2645                  * and push inode to log.
2646                  * All other inode deltas will have already been delta'd
2647                  * and will be pushed during the commit.
2648                  */
2649                 if (!(syncflag & FDSYNC) &&
2650                     ((ip->i_flag & (IMOD|IMODACC)) == IMODACC)) {
2651                         if (ulp) {
2652                                 TRANS_BEGIN_ASYNC(ufsvfsp, TOP_FSYNC,
2653                                     TOP_SYNCIP_SIZE);
2654                         }
2655                         rw_enter(&ip->i_contents, RW_READER);
2656                         mutex_enter(&ip->i_tlock);
2657                         ip->i_flag &= ~IMODTIME;
2658                         mutex_exit(&ip->i_tlock);
2659                         ufs_iupdat(ip, I_SYNC);
2660                         rw_exit(&ip->i_contents);
2661                         if (ulp) {
2662                                 TRANS_END_ASYNC(ufsvfsp, TOP_FSYNC,
2663                                     TOP_SYNCIP_SIZE);
2664                         }
2665                 }
2666 
2667                 /*
2668                  * Commit the Moby transaction
2669                  *
2670                  * Deltas have already been made so we just need to
2671                  * commit them with a synchronous transaction.
2672                  * TRANS_BEGIN_SYNC() will return an error
2673                  * if there are no deltas to commit, for an
2674                  * empty transaction.
2675                  */
2676                 if (ulp) {
2677                         TRANS_BEGIN_SYNC(ufsvfsp, TOP_FSYNC, TOP_COMMIT_SIZE,
2678                             error);
2679                         if (error) {
2680                                 error = 0; /* commit wasn't needed */
2681                                 goto out;
2682                         }
2683                         TRANS_END_SYNC(ufsvfsp, error, TOP_FSYNC,
2684                             TOP_COMMIT_SIZE);
2685                 }
2686         } else {        /* not logging */
2687                 if (!(IS_SWAPVP(vp)))
2688                         if (syncflag & FNODSYNC) {
2689                                 /* Just update the inode only */
2690                                 TRANS_IUPDAT(ip, 1);
2691                                 error = 0;
2692                         } else if (syncflag & FDSYNC)
2693                                 /* Do data-synchronous writes */
2694                                 error = TRANS_SYNCIP(ip, 0, I_DSYNC, TOP_FSYNC);
2695                         else
2696                                 /* Do synchronous writes */
2697                                 error = TRANS_SYNCIP(ip, 0, I_SYNC, TOP_FSYNC);
2698 
2699                 rw_enter(&ip->i_contents, RW_WRITER);
2700                 if (!error)
2701                         error = ufs_sync_indir(ip);
2702                 rw_exit(&ip->i_contents);
2703         }
2704 out:
2705         if (ulp) {
2706                 ufs_lockfs_end(ulp);
2707         }
2708         return (error);
2709 }
2710 
2711 /*ARGSUSED*/
2712 static void
2713 ufs_inactive(struct vnode *vp, struct cred *cr, caller_context_t *ct)
2714 {
2715         ufs_iinactive(VTOI(vp));
2716 }
2717 
2718 /*
2719  * Unix file system operations having to do with directory manipulation.
2720  */
2721 int ufs_lookup_idle_count = 2;  /* Number of inodes to idle each time */
2722 /* ARGSUSED */
2723 static int
2724 ufs_lookup(struct vnode *dvp, char *nm, struct vnode **vpp,
2725         struct pathname *pnp, int flags, struct vnode *rdir, struct cred *cr,
2726         caller_context_t *ct, int *direntflags, pathname_t *realpnp)
2727 {
2728         struct inode *ip;
2729         struct inode *sip;
2730         struct inode *xip;
2731         struct ufsvfs *ufsvfsp;
2732         struct ulockfs *ulp;
2733         struct vnode *vp;
2734         int error;
2735 
2736         /*
2737          * Check flags for type of lookup (regular file or attribute file)
2738          */
2739 
2740         ip = VTOI(dvp);
2741 
2742         if (flags & LOOKUP_XATTR) {
2743 
2744                 /*
2745                  * If not mounted with XATTR support then return EINVAL
2746                  */
2747 
2748                 if (!(ip->i_ufsvfs->vfs_vfs->vfs_flag & VFS_XATTR))
2749                         return (EINVAL);
2750                 /*
2751                  * We don't allow recursive attributes...
2752                  * Maybe someday we will.
2753                  */
2754                 if ((ip->i_cflags & IXATTR)) {
2755                         return (EINVAL);
2756                 }
2757 
2758                 if ((vp = dnlc_lookup(dvp, XATTR_DIR_NAME)) == NULL) {
2759                         error = ufs_xattr_getattrdir(dvp, &sip, flags, cr);
2760                         if (error) {
2761                                 *vpp = NULL;
2762                                 goto out;
2763                         }
2764 
2765                         vp = ITOV(sip);
2766                         dnlc_update(dvp, XATTR_DIR_NAME, vp);
2767                 }
2768 
2769                 /*
2770                  * Check accessibility of directory.
2771                  */
2772                 if (vp == DNLC_NO_VNODE) {
2773                         VN_RELE(vp);
2774                         error = ENOENT;
2775                         goto out;
2776                 }
2777                 if ((error = ufs_iaccess(VTOI(vp), IEXEC, cr, 1)) != 0) {
2778                         VN_RELE(vp);
2779                         goto out;
2780                 }
2781 
2782                 *vpp = vp;
2783                 return (0);
2784         }
2785 
2786         /*
2787          * Check for a null component, which we should treat as
2788          * looking at dvp from within it's parent, so we don't
2789          * need a call to ufs_iaccess(), as it has already been
2790          * done.
2791          */
2792         if (nm[0] == 0) {
2793                 VN_HOLD(dvp);
2794                 error = 0;
2795                 *vpp = dvp;
2796                 goto out;
2797         }
2798 
2799         /*
2800          * Check for "." ie itself. this is a quick check and
2801          * avoids adding "." into the dnlc (which have been seen
2802          * to occupy >10% of the cache).
2803          */
2804         if ((nm[0] == '.') && (nm[1] == 0)) {
2805                 /*
2806                  * Don't return without checking accessibility
2807                  * of the directory. We only need the lock if
2808                  * we are going to return it.
2809                  */
2810                 if ((error = ufs_iaccess(ip, IEXEC, cr, 1)) == 0) {
2811                         VN_HOLD(dvp);
2812                         *vpp = dvp;
2813                 }
2814                 goto out;
2815         }
2816 
2817         /*
2818          * Fast path: Check the directory name lookup cache.
2819          */
2820         if (vp = dnlc_lookup(dvp, nm)) {
2821                 /*
2822                  * Check accessibility of directory.
2823                  */
2824                 if ((error = ufs_iaccess(ip, IEXEC, cr, 1)) != 0) {
2825                         VN_RELE(vp);
2826                         goto out;
2827                 }
2828                 if (vp == DNLC_NO_VNODE) {
2829                         VN_RELE(vp);
2830                         error = ENOENT;
2831                         goto out;
2832                 }
2833                 xip = VTOI(vp);
2834                 ulp = NULL;
2835                 goto fastpath;
2836         }
2837 
2838         /*
2839          * Keep the idle queue from getting too long by
2840          * idling two inodes before attempting to allocate another.
2841          *    This operation must be performed before entering
2842          *    lockfs or a transaction.
2843          */
2844         if (ufs_idle_q.uq_ne > ufs_idle_q.uq_hiwat)
2845                 if ((curthread->t_flag & T_DONTBLOCK) == 0) {
2846                         ins.in_lidles.value.ul += ufs_lookup_idle_count;
2847                         ufs_idle_some(ufs_lookup_idle_count);
2848                 }
2849 
2850 retry_lookup:
2851         /*
2852          * Check accessibility of directory.
2853          */
2854         if (error = ufs_diraccess(ip, IEXEC, cr))
2855                 goto out;
2856 
2857         ufsvfsp = ip->i_ufsvfs;
2858         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_LOOKUP_MASK);
2859         if (error)
2860                 goto out;
2861 
2862         error = ufs_dirlook(ip, nm, &xip, cr, 1, 0);
2863 
2864 fastpath:
2865         if (error == 0) {
2866                 ip = xip;
2867                 *vpp = ITOV(ip);
2868 
2869                 /*
2870                  * If vnode is a device return special vnode instead.
2871                  */
2872                 if (IS_DEVVP(*vpp)) {
2873                         struct vnode *newvp;
2874 
2875                         newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type,
2876                             cr);
2877                         VN_RELE(*vpp);
2878                         if (newvp == NULL)
2879                                 error = ENOSYS;
2880                         else
2881                                 *vpp = newvp;
2882                 } else if (ip->i_cflags & ICOMPRESS) {
2883                         struct vnode *newvp;
2884 
2885                         /*
2886                          * Compressed file, substitute dcfs vnode
2887                          */
2888                         newvp = decompvp(*vpp, cr, ct);
2889                         VN_RELE(*vpp);
2890                         if (newvp == NULL)
2891                                 error = ENOSYS;
2892                         else
2893                                 *vpp = newvp;
2894                 }
2895         }
2896         if (ulp) {
2897                 ufs_lockfs_end(ulp);
2898         }
2899 
2900         if (error == EAGAIN)
2901                 goto retry_lookup;
2902 
2903 out:
2904         return (error);
2905 }
2906 
2907 /*ARGSUSED*/
2908 static int
2909 ufs_create(struct vnode *dvp, char *name, struct vattr *vap, enum vcexcl excl,
2910         int mode, struct vnode **vpp, struct cred *cr, int flag,
2911         caller_context_t *ct, vsecattr_t *vsecp)
2912 {
2913         struct inode *ip;
2914         struct inode *xip;
2915         struct inode *dip;
2916         struct vnode *xvp;
2917         struct ufsvfs *ufsvfsp;
2918         struct ulockfs *ulp;
2919         int error;
2920         int issync;
2921         int truncflag;
2922         int trans_size;
2923         int noentry;
2924         int defer_dip_seq_update = 0;   /* need to defer update of dip->i_seq */
2925         int retry = 1;
2926         int indeadlock;
2927 
2928 again:
2929         ip = VTOI(dvp);
2930         ufsvfsp = ip->i_ufsvfs;
2931         truncflag = 0;
2932 
2933         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_CREATE_MASK);
2934         if (error)
2935                 goto out;
2936 
2937         if (ulp) {
2938                 trans_size = (int)TOP_CREATE_SIZE(ip);
2939                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_CREATE, trans_size);
2940         }
2941 
2942         if ((vap->va_mode & VSVTX) && secpolicy_vnode_stky_modify(cr) != 0)
2943                 vap->va_mode &= ~VSVTX;
2944 
2945         if (*name == '\0') {
2946                 /*
2947                  * Null component name refers to the directory itself.
2948                  */
2949                 VN_HOLD(dvp);
2950                 /*
2951                  * Even though this is an error case, we need to grab the
2952                  * quota lock since the error handling code below is common.
2953                  */
2954                 rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
2955                 rw_enter(&ip->i_contents, RW_WRITER);
2956                 error = EEXIST;
2957         } else {
2958                 xip = NULL;
2959                 noentry = 0;
2960                 /*
2961                  * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
2962                  * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
2963                  * possible, retries the operation.
2964                  */
2965                 ufs_tryirwlock_trans(&ip->i_rwlock, RW_WRITER, TOP_CREATE,
2966                     retry_dir);
2967                 if (indeadlock)
2968                         goto again;
2969 
2970                 xvp = dnlc_lookup(dvp, name);
2971                 if (xvp == DNLC_NO_VNODE) {
2972                         noentry = 1;
2973                         VN_RELE(xvp);
2974                         xvp = NULL;
2975                 }
2976                 if (xvp) {
2977                         rw_exit(&ip->i_rwlock);
2978                         if (error = ufs_iaccess(ip, IEXEC, cr, 1)) {
2979                                 VN_RELE(xvp);
2980                         } else {
2981                                 error = EEXIST;
2982                                 xip = VTOI(xvp);
2983                         }
2984                 } else {
2985                         /*
2986                          * Suppress file system full message if we will retry
2987                          */
2988                         error = ufs_direnter_cm(ip, name, DE_CREATE,
2989                             vap, &xip, cr, (noentry | (retry ? IQUIET : 0)));
2990                         if (error == EAGAIN) {
2991                                 if (ulp) {
2992                                         TRANS_END_CSYNC(ufsvfsp, error, issync,
2993                                             TOP_CREATE, trans_size);
2994                                         ufs_lockfs_end(ulp);
2995                                 }
2996                                 goto again;
2997                         }
2998                         rw_exit(&ip->i_rwlock);
2999                 }
3000                 ip = xip;
3001                 if (ip != NULL) {
3002                         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
3003                         rw_enter(&ip->i_contents, RW_WRITER);
3004                 }
3005         }
3006 
3007         /*
3008          * If the file already exists and this is a non-exclusive create,
3009          * check permissions and allow access for non-directories.
3010          * Read-only create of an existing directory is also allowed.
3011          * We fail an exclusive create of anything which already exists.
3012          */
3013         if (error == EEXIST) {
3014                 dip = VTOI(dvp);
3015                 if (excl == NONEXCL) {
3016                         if ((((ip->i_mode & IFMT) == IFDIR) ||
3017                             ((ip->i_mode & IFMT) == IFATTRDIR)) &&
3018                             (mode & IWRITE))
3019                                 error = EISDIR;
3020                         else if (mode)
3021                                 error = ufs_iaccess(ip, mode, cr, 0);
3022                         else
3023                                 error = 0;
3024                 }
3025                 if (error) {
3026                         rw_exit(&ip->i_contents);
3027                         rw_exit(&ufsvfsp->vfs_dqrwlock);
3028                         VN_RELE(ITOV(ip));
3029                         goto unlock;
3030                 }
3031                 /*
3032                  * If the error EEXIST was set, then i_seq can not
3033                  * have been updated. The sequence number interface
3034                  * is defined such that a non-error VOP_CREATE must
3035                  * increase the dir va_seq it by at least one. If we
3036                  * have cleared the error, increase i_seq. Note that
3037                  * we are increasing the dir i_seq and in rare cases
3038                  * ip may actually be from the dvp, so we already have
3039                  * the locks and it will not be subject to truncation.
3040                  * In case we have to update i_seq of the parent
3041                  * directory dip, we have to defer it till we have
3042                  * released our locks on ip due to lock ordering requirements.
3043                  */
3044                 if (ip != dip)
3045                         defer_dip_seq_update = 1;
3046                 else
3047                         ip->i_seq++;
3048 
3049                 if (((ip->i_mode & IFMT) == IFREG) &&
3050                     (vap->va_mask & AT_SIZE) && vap->va_size == 0) {
3051                         /*
3052                          * Truncate regular files, if requested by caller.
3053                          * Grab i_rwlock to make sure no one else is
3054                          * currently writing to the file (we promised
3055                          * bmap we would do this).
3056                          * Must get the locks in the correct order.
3057                          */
3058                         if (ip->i_size == 0) {
3059                                 ip->i_flag |= ICHG | IUPD;
3060                                 ip->i_seq++;
3061                                 TRANS_INODE(ufsvfsp, ip);
3062                         } else {
3063                                 /*
3064                                  * Large Files: Why this check here?
3065                                  * Though we do it in vn_create() we really
3066                                  * want to guarantee that we do not destroy
3067                                  * Large file data by atomically checking
3068                                  * the size while holding the contents
3069                                  * lock.
3070                                  */
3071                                 if (flag && !(flag & FOFFMAX) &&
3072                                     ((ip->i_mode & IFMT) == IFREG) &&
3073                                     (ip->i_size > (offset_t)MAXOFF32_T)) {
3074                                         rw_exit(&ip->i_contents);
3075                                         rw_exit(&ufsvfsp->vfs_dqrwlock);
3076                                         error = EOVERFLOW;
3077                                         goto unlock;
3078                                 }
3079                                 if (TRANS_ISTRANS(ufsvfsp))
3080                                         truncflag++;
3081                                 else {
3082                                         rw_exit(&ip->i_contents);
3083                                         rw_exit(&ufsvfsp->vfs_dqrwlock);
3084                                         ufs_tryirwlock_trans(&ip->i_rwlock,
3085                                             RW_WRITER, TOP_CREATE,
3086                                             retry_file);
3087                                         if (indeadlock) {
3088                                                 VN_RELE(ITOV(ip));
3089                                                 goto again;
3090                                         }
3091                                         rw_enter(&ufsvfsp->vfs_dqrwlock,
3092                                             RW_READER);
3093                                         rw_enter(&ip->i_contents, RW_WRITER);
3094                                         (void) ufs_itrunc(ip, (u_offset_t)0, 0,
3095                                             cr);
3096                                         rw_exit(&ip->i_rwlock);
3097                                 }
3098 
3099                         }
3100                         if (error == 0) {
3101                                 vnevent_create(ITOV(ip), ct);
3102                         }
3103                 }
3104         }
3105 
3106         if (error) {
3107                 if (ip != NULL) {
3108                         rw_exit(&ufsvfsp->vfs_dqrwlock);
3109                         rw_exit(&ip->i_contents);
3110                 }
3111                 goto unlock;
3112         }
3113 
3114         *vpp = ITOV(ip);
3115         ITIMES(ip);
3116         rw_exit(&ip->i_contents);
3117         rw_exit(&ufsvfsp->vfs_dqrwlock);
3118 
3119         /*
3120          * If vnode is a device return special vnode instead.
3121          */
3122         if (!error && IS_DEVVP(*vpp)) {
3123                 struct vnode *newvp;
3124 
3125                 newvp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr);
3126                 VN_RELE(*vpp);
3127                 if (newvp == NULL) {
3128                         error = ENOSYS;
3129                         goto unlock;
3130                 }
3131                 truncflag = 0;
3132                 *vpp = newvp;
3133         }
3134 unlock:
3135 
3136         /*
3137          * Do the deferred update of the parent directory's sequence
3138          * number now.
3139          */
3140         if (defer_dip_seq_update == 1) {
3141                 rw_enter(&dip->i_contents, RW_READER);
3142                 mutex_enter(&dip->i_tlock);
3143                 dip->i_seq++;
3144                 mutex_exit(&dip->i_tlock);
3145                 rw_exit(&dip->i_contents);
3146         }
3147 
3148         if (ulp) {
3149                 int terr = 0;
3150 
3151                 TRANS_END_CSYNC(ufsvfsp, terr, issync, TOP_CREATE,
3152                     trans_size);
3153 
3154                 /*
3155                  * If we haven't had a more interesting failure
3156                  * already, then anything that might've happened
3157                  * here should be reported.
3158                  */
3159                 if (error == 0)
3160                         error = terr;
3161         }
3162 
3163         if (!error && truncflag) {
3164                 ufs_tryirwlock(&ip->i_rwlock, RW_WRITER, retry_trunc);
3165                 if (indeadlock) {
3166                         if (ulp)
3167                                 ufs_lockfs_end(ulp);
3168                         VN_RELE(ITOV(ip));
3169                         goto again;
3170                 }
3171                 (void) TRANS_ITRUNC(ip, (u_offset_t)0, 0, cr);
3172                 rw_exit(&ip->i_rwlock);
3173         }
3174 
3175         if (ulp)
3176                 ufs_lockfs_end(ulp);
3177 
3178         /*
3179          * If no inodes available, try to free one up out of the
3180          * pending delete queue.
3181          */
3182         if ((error == ENOSPC) && retry && TRANS_ISTRANS(ufsvfsp)) {
3183                 ufs_delete_drain_wait(ufsvfsp, 1);
3184                 retry = 0;
3185                 goto again;
3186         }
3187 
3188 out:
3189         return (error);
3190 }
3191 
3192 extern int ufs_idle_max;
3193 /*ARGSUSED*/
3194 static int
3195 ufs_remove(struct vnode *vp, char *nm, struct cred *cr,
3196         caller_context_t *ct, int flags)
3197 {
3198         struct inode *ip = VTOI(vp);
3199         struct ufsvfs *ufsvfsp  = ip->i_ufsvfs;
3200         struct ulockfs *ulp;
3201         vnode_t *rmvp = NULL;   /* Vnode corresponding to name being removed */
3202         int indeadlock;
3203         int error;
3204         int issync;
3205         int trans_size;
3206 
3207         /*
3208          * don't let the delete queue get too long
3209          */
3210         if (ufsvfsp == NULL) {
3211                 error = EIO;
3212                 goto out;
3213         }
3214         if (ufsvfsp->vfs_delete.uq_ne > ufs_idle_max)
3215                 ufs_delete_drain(vp->v_vfsp, 1, 1);
3216 
3217         error = ufs_eventlookup(vp, nm, cr, &rmvp);
3218         if (rmvp != NULL) {
3219                 /* Only send the event if there were no errors */
3220                 if (error == 0)
3221                         vnevent_remove(rmvp, vp, nm, ct);
3222                 VN_RELE(rmvp);
3223         }
3224 
3225 retry_remove:
3226         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_REMOVE_MASK);
3227         if (error)
3228                 goto out;
3229 
3230         if (ulp)
3231                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_REMOVE,
3232                     trans_size = (int)TOP_REMOVE_SIZE(VTOI(vp)));
3233 
3234         /*
3235          * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3236          * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3237          * possible, retries the operation.
3238          */
3239         ufs_tryirwlock_trans(&ip->i_rwlock, RW_WRITER, TOP_REMOVE, retry);
3240         if (indeadlock)
3241                 goto retry_remove;
3242         error = ufs_dirremove(ip, nm, (struct inode *)0, (struct vnode *)0,
3243             DR_REMOVE, cr);
3244         rw_exit(&ip->i_rwlock);
3245 
3246         if (ulp) {
3247                 TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_REMOVE, trans_size);
3248                 ufs_lockfs_end(ulp);
3249         }
3250 
3251 out:
3252         return (error);
3253 }
3254 
3255 /*
3256  * Link a file or a directory.  Only privileged processes are allowed to
3257  * make links to directories.
3258  */
3259 /*ARGSUSED*/
3260 static int
3261 ufs_link(struct vnode *tdvp, struct vnode *svp, char *tnm, struct cred *cr,
3262         caller_context_t *ct, int flags)
3263 {
3264         struct inode *sip;
3265         struct inode *tdp = VTOI(tdvp);
3266         struct ufsvfs *ufsvfsp = tdp->i_ufsvfs;
3267         struct ulockfs *ulp;
3268         struct vnode *realvp;
3269         int error;
3270         int issync;
3271         int trans_size;
3272         int isdev;
3273         int indeadlock;
3274 
3275 retry_link:
3276         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_LINK_MASK);
3277         if (error)
3278                 goto out;
3279 
3280         if (ulp)
3281                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_LINK,
3282                     trans_size = (int)TOP_LINK_SIZE(VTOI(tdvp)));
3283 
3284         if (VOP_REALVP(svp, &realvp, ct) == 0)
3285                 svp = realvp;
3286 
3287         /*
3288          * Make sure link for extended attributes is valid
3289          * We only support hard linking of attr in ATTRDIR to ATTRDIR
3290          *
3291          * Make certain we don't attempt to look at a device node as
3292          * a ufs inode.
3293          */
3294 
3295         isdev = IS_DEVVP(svp);
3296         if (((isdev == 0) && ((VTOI(svp)->i_cflags & IXATTR) == 0) &&
3297             ((tdp->i_mode & IFMT) == IFATTRDIR)) ||
3298             ((isdev == 0) && (VTOI(svp)->i_cflags & IXATTR) &&
3299             ((tdp->i_mode & IFMT) == IFDIR))) {
3300                 error = EINVAL;
3301                 goto unlock;
3302         }
3303 
3304         sip = VTOI(svp);
3305         if ((svp->v_type == VDIR &&
3306             secpolicy_fs_linkdir(cr, ufsvfsp->vfs_vfs) != 0) ||
3307             (sip->i_uid != crgetuid(cr) && secpolicy_basic_link(cr) != 0)) {
3308                 error = EPERM;
3309                 goto unlock;
3310         }
3311 
3312         /*
3313          * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3314          * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3315          * possible, retries the operation.
3316          */
3317         ufs_tryirwlock_trans(&tdp->i_rwlock, RW_WRITER, TOP_LINK, retry);
3318         if (indeadlock)
3319                 goto retry_link;
3320         error = ufs_direnter_lr(tdp, tnm, DE_LINK, (struct inode *)0,
3321             sip, cr);
3322         rw_exit(&tdp->i_rwlock);
3323 
3324 unlock:
3325         if (ulp) {
3326                 TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_LINK, trans_size);
3327                 ufs_lockfs_end(ulp);
3328         }
3329 
3330         if (!error) {
3331                 vnevent_link(svp, ct);
3332         }
3333 out:
3334         return (error);
3335 }
3336 
3337 uint64_t ufs_rename_retry_cnt;
3338 uint64_t ufs_rename_upgrade_retry_cnt;
3339 uint64_t ufs_rename_dircheck_retry_cnt;
3340 clock_t  ufs_rename_backoff_delay = 1;
3341 
3342 /*
3343  * Rename a file or directory.
3344  * We are given the vnode and entry string of the source and the
3345  * vnode and entry string of the place we want to move the source
3346  * to (the target). The essential operation is:
3347  *      unlink(target);
3348  *      link(source, target);
3349  *      unlink(source);
3350  * but "atomically".  Can't do full commit without saving state in
3351  * the inode on disk, which isn't feasible at this time.  Best we
3352  * can do is always guarantee that the TARGET exists.
3353  */
3354 
3355 /*ARGSUSED*/
3356 static int
3357 ufs_rename(
3358         struct vnode *sdvp,             /* old (source) parent vnode */
3359         char *snm,                      /* old (source) entry name */
3360         struct vnode *tdvp,             /* new (target) parent vnode */
3361         char *tnm,                      /* new (target) entry name */
3362         struct cred *cr,
3363         caller_context_t *ct,
3364         int flags)
3365 {
3366         struct inode *sip = NULL;       /* source inode */
3367         struct inode *ip = NULL;        /* check inode */
3368         struct inode *sdp;              /* old (source) parent inode */
3369         struct inode *tdp;              /* new (target) parent inode */
3370         struct vnode *svp = NULL;       /* source vnode */
3371         struct vnode *tvp = NULL;       /* target vnode, if it exists */
3372         struct vnode *realvp;
3373         struct ufsvfs *ufsvfsp;
3374         struct ulockfs *ulp;
3375         struct ufs_slot slot;
3376         timestruc_t now;
3377         int error;
3378         int issync;
3379         int trans_size;
3380         krwlock_t *first_lock;
3381         krwlock_t *second_lock;
3382         krwlock_t *reverse_lock;
3383         int serr, terr;
3384 
3385         sdp = VTOI(sdvp);
3386         slot.fbp = NULL;
3387         ufsvfsp = sdp->i_ufsvfs;
3388 
3389         if (VOP_REALVP(tdvp, &realvp, ct) == 0)
3390                 tdvp = realvp;
3391 
3392         terr = ufs_eventlookup(tdvp, tnm, cr, &tvp);
3393         serr = ufs_eventlookup(sdvp, snm, cr, &svp);
3394 
3395         if ((serr == 0) && ((terr == 0) || (terr == ENOENT))) {
3396                 if (tvp != NULL)
3397                         vnevent_rename_dest(tvp, tdvp, tnm, ct);
3398 
3399                 /*
3400                  * Notify the target directory of the rename event
3401                  * if source and target directories are not the same.
3402                  */
3403                 if (sdvp != tdvp)
3404                         vnevent_rename_dest_dir(tdvp, ct);
3405 
3406                 if (svp != NULL)
3407                         vnevent_rename_src(svp, sdvp, snm, ct);
3408         }
3409 
3410         if (tvp != NULL)
3411                 VN_RELE(tvp);
3412 
3413         if (svp != NULL)
3414                 VN_RELE(svp);
3415 
3416 retry_rename:
3417         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_RENAME_MASK);
3418         if (error)
3419                 goto out;
3420 
3421         if (ulp)
3422                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_RENAME,
3423                     trans_size = (int)TOP_RENAME_SIZE(sdp));
3424 
3425         if (VOP_REALVP(tdvp, &realvp, ct) == 0)
3426                 tdvp = realvp;
3427 
3428         tdp = VTOI(tdvp);
3429 
3430         /*
3431          * We only allow renaming of attributes from ATTRDIR to ATTRDIR.
3432          */
3433         if ((tdp->i_mode & IFMT) != (sdp->i_mode & IFMT)) {
3434                 error = EINVAL;
3435                 goto unlock;
3436         }
3437 
3438         /*
3439          * Check accessibility of directory.
3440          */
3441         if (error = ufs_diraccess(sdp, IEXEC, cr))
3442                 goto unlock;
3443 
3444         /*
3445          * Look up inode of file we're supposed to rename.
3446          */
3447         gethrestime(&now);
3448         if (error = ufs_dirlook(sdp, snm, &sip, cr, 0, 0)) {
3449                 if (error == EAGAIN) {
3450                         if (ulp) {
3451                                 TRANS_END_CSYNC(ufsvfsp, error, issync,
3452                                     TOP_RENAME, trans_size);
3453                                 ufs_lockfs_end(ulp);
3454                         }
3455                         goto retry_rename;
3456                 }
3457 
3458                 goto unlock;
3459         }
3460 
3461         /*
3462          * Lock both the source and target directories (they may be
3463          * the same) to provide the atomicity semantics that was
3464          * previously provided by the per file system vfs_rename_lock
3465          *
3466          * with vfs_rename_lock removed to allow simultaneous renames
3467          * within a file system, ufs_dircheckpath can deadlock while
3468          * traversing back to ensure that source is not a parent directory
3469          * of target parent directory. This is because we get into
3470          * ufs_dircheckpath with the sdp and tdp locks held as RW_WRITER.
3471          * If the tdp and sdp of the simultaneous renames happen to be
3472          * in the path of each other, it can lead to a deadlock. This
3473          * can be avoided by getting the locks as RW_READER here and then
3474          * upgrading to RW_WRITER after completing the ufs_dircheckpath.
3475          *
3476          * We hold the target directory's i_rwlock after calling
3477          * ufs_lockfs_begin but in many other operations (like ufs_readdir)
3478          * VOP_RWLOCK is explicitly called by the filesystem independent code
3479          * before calling the file system operation. In these cases the order
3480          * is reversed (i.e i_rwlock is taken first and then ufs_lockfs_begin
3481          * is called). This is fine as long as ufs_lockfs_begin acts as a VOP
3482          * counter but with ufs_quiesce setting the SLOCK bit this becomes a
3483          * synchronizing object which might lead to a deadlock. So we use
3484          * rw_tryenter instead of rw_enter. If we fail to get this lock and
3485          * find that SLOCK bit is set, we call ufs_lockfs_end and restart the
3486          * operation.
3487          */
3488 retry:
3489         first_lock = &tdp->i_rwlock;
3490         second_lock = &sdp->i_rwlock;
3491 retry_firstlock:
3492         if (!rw_tryenter(first_lock, RW_READER)) {
3493                 /*
3494                  * We didn't get the lock. Check if the SLOCK is set in the
3495                  * ufsvfs. If yes, we might be in a deadlock. Safer to give up
3496                  * and wait for SLOCK to be cleared.
3497                  */
3498 
3499                 if (ulp && ULOCKFS_IS_SLOCK(ulp)) {
3500                         TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_RENAME,
3501                             trans_size);
3502                         ufs_lockfs_end(ulp);
3503                         goto retry_rename;
3504 
3505                 } else {
3506                         /*
3507                          * SLOCK isn't set so this is a genuine synchronization
3508                          * case. Let's try again after giving them a breather.
3509                          */
3510                         delay(RETRY_LOCK_DELAY);
3511                         goto  retry_firstlock;
3512                 }
3513         }
3514         /*
3515          * Need to check if the tdp and sdp are same !!!
3516          */
3517         if ((tdp != sdp) && (!rw_tryenter(second_lock, RW_READER))) {
3518                 /*
3519                  * We didn't get the lock. Check if the SLOCK is set in the
3520                  * ufsvfs. If yes, we might be in a deadlock. Safer to give up
3521                  * and wait for SLOCK to be cleared.
3522                  */
3523 
3524                 rw_exit(first_lock);
3525                 if (ulp && ULOCKFS_IS_SLOCK(ulp)) {
3526                         TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_RENAME,
3527                             trans_size);
3528                         ufs_lockfs_end(ulp);
3529                         goto retry_rename;
3530 
3531                 } else {
3532                         /*
3533                          * So we couldn't get the second level peer lock *and*
3534                          * the SLOCK bit isn't set. Too bad we can be
3535                          * contentding with someone wanting these locks otherway
3536                          * round. Reverse the locks in case there is a heavy
3537                          * contention for the second level lock.
3538                          */
3539                         reverse_lock = first_lock;
3540                         first_lock = second_lock;
3541                         second_lock = reverse_lock;
3542                         ufs_rename_retry_cnt++;
3543                         goto  retry_firstlock;
3544                 }
3545         }
3546 
3547         if (sip == tdp) {
3548                 error = EINVAL;
3549                 goto errout;
3550         }
3551         /*
3552          * Make sure we can delete the source entry.  This requires
3553          * write permission on the containing directory.
3554          * Check for sticky directories.
3555          */
3556         rw_enter(&sdp->i_contents, RW_READER);
3557         rw_enter(&sip->i_contents, RW_READER);
3558         if ((error = ufs_iaccess(sdp, IWRITE, cr, 0)) != 0 ||
3559             (error = ufs_sticky_remove_access(sdp, sip, cr)) != 0) {
3560                 rw_exit(&sip->i_contents);
3561                 rw_exit(&sdp->i_contents);
3562                 goto errout;
3563         }
3564 
3565         /*
3566          * If this is a rename of a directory and the parent is
3567          * different (".." must be changed), then the source
3568          * directory must not be in the directory hierarchy
3569          * above the target, as this would orphan everything
3570          * below the source directory.  Also the user must have
3571          * write permission in the source so as to be able to
3572          * change "..".
3573          */
3574         if ((((sip->i_mode & IFMT) == IFDIR) ||
3575             ((sip->i_mode & IFMT) == IFATTRDIR)) && sdp != tdp) {
3576                 ino_t   inum;
3577 
3578                 if (error = ufs_iaccess(sip, IWRITE, cr, 0)) {
3579                         rw_exit(&sip->i_contents);
3580                         rw_exit(&sdp->i_contents);
3581                         goto errout;
3582                 }
3583                 inum = sip->i_number;
3584                 rw_exit(&sip->i_contents);
3585                 rw_exit(&sdp->i_contents);
3586                 if ((error = ufs_dircheckpath(inum, tdp, sdp, cr))) {
3587                         /*
3588                          * If we got EAGAIN ufs_dircheckpath detected a
3589                          * potential deadlock and backed out. We need
3590                          * to retry the operation since sdp and tdp have
3591                          * to be released to avoid the deadlock.
3592                          */
3593                         if (error == EAGAIN) {
3594                                 rw_exit(&tdp->i_rwlock);
3595                                 if (tdp != sdp)
3596                                         rw_exit(&sdp->i_rwlock);
3597                                 delay(ufs_rename_backoff_delay);
3598                                 ufs_rename_dircheck_retry_cnt++;
3599                                 goto retry;
3600                         }
3601                         goto errout;
3602                 }
3603         } else {
3604                 rw_exit(&sip->i_contents);
3605                 rw_exit(&sdp->i_contents);
3606         }
3607 
3608 
3609         /*
3610          * Check for renaming '.' or '..' or alias of '.'
3611          */
3612         if (strcmp(snm, ".") == 0 || strcmp(snm, "..") == 0 || sdp == sip) {
3613                 error = EINVAL;
3614                 goto errout;
3615         }
3616 
3617         /*
3618          * Simultaneous renames can deadlock in ufs_dircheckpath since it
3619          * tries to traverse back the file tree with both tdp and sdp held
3620          * as RW_WRITER. To avoid that we have to hold the tdp and sdp locks
3621          * as RW_READERS  till ufs_dircheckpath is done.
3622          * Now that ufs_dircheckpath is done with, we can upgrade the locks
3623          * to RW_WRITER.
3624          */
3625         if (!rw_tryupgrade(&tdp->i_rwlock)) {
3626                 /*
3627                  * The upgrade failed. We got to give away the lock
3628                  * as to avoid deadlocking with someone else who is
3629                  * waiting for writer lock. With the lock gone, we
3630                  * cannot be sure the checks done above will hold
3631                  * good when we eventually get them back as writer.
3632                  * So if we can't upgrade we drop the locks and retry
3633                  * everything again.
3634                  */
3635                 rw_exit(&tdp->i_rwlock);
3636                 if (tdp != sdp)
3637                         rw_exit(&sdp->i_rwlock);
3638                 delay(ufs_rename_backoff_delay);
3639                 ufs_rename_upgrade_retry_cnt++;
3640                 goto retry;
3641         }
3642         if (tdp != sdp) {
3643                 if (!rw_tryupgrade(&sdp->i_rwlock)) {
3644                         /*
3645                          * The upgrade failed. We got to give away the lock
3646                          * as to avoid deadlocking with someone else who is
3647                          * waiting for writer lock. With the lock gone, we
3648                          * cannot be sure the checks done above will hold
3649                          * good when we eventually get them back as writer.
3650                          * So if we can't upgrade we drop the locks and retry
3651                          * everything again.
3652                          */
3653                         rw_exit(&tdp->i_rwlock);
3654                         rw_exit(&sdp->i_rwlock);
3655                         delay(ufs_rename_backoff_delay);
3656                         ufs_rename_upgrade_retry_cnt++;
3657                         goto retry;
3658                 }
3659         }
3660 
3661         /*
3662          * Now that all the locks are held check to make sure another thread
3663          * didn't slip in and take out the sip.
3664          */
3665         slot.status = NONE;
3666         if ((sip->i_ctime.tv_usec * 1000) > now.tv_nsec ||
3667             sip->i_ctime.tv_sec > now.tv_sec) {
3668                 rw_enter(&sdp->i_ufsvfs->vfs_dqrwlock, RW_READER);
3669                 rw_enter(&sdp->i_contents, RW_WRITER);
3670                 error = ufs_dircheckforname(sdp, snm, strlen(snm), &slot,
3671                     &ip, cr, 0);
3672                 rw_exit(&sdp->i_contents);
3673                 rw_exit(&sdp->i_ufsvfs->vfs_dqrwlock);
3674                 if (error) {
3675                         goto errout;
3676                 }
3677                 if (ip == NULL) {
3678                         error = ENOENT;
3679                         goto errout;
3680                 } else {
3681                         /*
3682                          * If the inode was found need to drop the v_count
3683                          * so as not to keep the filesystem from being
3684                          * unmounted at a later time.
3685                          */
3686                         VN_RELE(ITOV(ip));
3687                 }
3688 
3689                 /*
3690                  * Release the slot.fbp that has the page mapped and
3691                  * locked SE_SHARED, and could be used in in
3692                  * ufs_direnter_lr() which needs to get the SE_EXCL lock
3693                  * on said page.
3694                  */
3695                 if (slot.fbp) {
3696                         fbrelse(slot.fbp, S_OTHER);
3697                         slot.fbp = NULL;
3698                 }
3699         }
3700 
3701         /*
3702          * Link source to the target.
3703          */
3704         if (error = ufs_direnter_lr(tdp, tnm, DE_RENAME, sdp, sip, cr)) {
3705                 /*
3706                  * ESAME isn't really an error; it indicates that the
3707                  * operation should not be done because the source and target
3708                  * are the same file, but that no error should be reported.
3709                  */
3710                 if (error == ESAME)
3711                         error = 0;
3712                 goto errout;
3713         }
3714 
3715         /*
3716          * Unlink the source.
3717          * Remove the source entry.  ufs_dirremove() checks that the entry
3718          * still reflects sip, and returns an error if it doesn't.
3719          * If the entry has changed just forget about it.  Release
3720          * the source inode.
3721          */
3722         if ((error = ufs_dirremove(sdp, snm, sip, (struct vnode *)0,
3723             DR_RENAME, cr)) == ENOENT)
3724                 error = 0;
3725 
3726 errout:
3727         if (slot.fbp)
3728                 fbrelse(slot.fbp, S_OTHER);
3729 
3730         rw_exit(&tdp->i_rwlock);
3731         if (sdp != tdp) {
3732                 rw_exit(&sdp->i_rwlock);
3733         }
3734 
3735         VN_RELE(ITOV(sip));
3736 
3737 unlock:
3738         if (ulp) {
3739                 TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_RENAME, trans_size);
3740                 ufs_lockfs_end(ulp);
3741         }
3742 
3743 out:
3744         return (error);
3745 }
3746 
3747 /*ARGSUSED*/
3748 static int
3749 ufs_mkdir(struct vnode *dvp, char *dirname, struct vattr *vap,
3750         struct vnode **vpp, struct cred *cr, caller_context_t *ct, int flags,
3751         vsecattr_t *vsecp)
3752 {
3753         struct inode *ip;
3754         struct inode *xip;
3755         struct ufsvfs *ufsvfsp;
3756         struct ulockfs *ulp;
3757         int error;
3758         int issync;
3759         int trans_size;
3760         int indeadlock;
3761         int retry = 1;
3762 
3763         ASSERT((vap->va_mask & (AT_TYPE|AT_MODE)) == (AT_TYPE|AT_MODE));
3764 
3765         /*
3766          * Can't make directory in attr hidden dir
3767          */
3768         if ((VTOI(dvp)->i_mode & IFMT) == IFATTRDIR)
3769                 return (EINVAL);
3770 
3771 again:
3772         ip = VTOI(dvp);
3773         ufsvfsp = ip->i_ufsvfs;
3774         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_MKDIR_MASK);
3775         if (error)
3776                 goto out;
3777         if (ulp)
3778                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_MKDIR,
3779                     trans_size = (int)TOP_MKDIR_SIZE(ip));
3780 
3781         /*
3782          * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3783          * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3784          * possible, retries the operation.
3785          */
3786         ufs_tryirwlock_trans(&ip->i_rwlock, RW_WRITER, TOP_MKDIR, retry);
3787         if (indeadlock)
3788                 goto again;
3789 
3790         error = ufs_direnter_cm(ip, dirname, DE_MKDIR, vap, &xip, cr,
3791             (retry ? IQUIET : 0));
3792         if (error == EAGAIN) {
3793                 if (ulp) {
3794                         TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_MKDIR,
3795                             trans_size);
3796                         ufs_lockfs_end(ulp);
3797                 }
3798                 goto again;
3799         }
3800 
3801         rw_exit(&ip->i_rwlock);
3802         if (error == 0) {
3803                 ip = xip;
3804                 *vpp = ITOV(ip);
3805         } else if (error == EEXIST)
3806                 VN_RELE(ITOV(xip));
3807 
3808         if (ulp) {
3809                 int terr = 0;
3810                 TRANS_END_CSYNC(ufsvfsp, terr, issync, TOP_MKDIR, trans_size);
3811                 ufs_lockfs_end(ulp);
3812                 if (error == 0)
3813                         error = terr;
3814         }
3815 out:
3816         if ((error == ENOSPC) && retry && TRANS_ISTRANS(ufsvfsp)) {
3817                 ufs_delete_drain_wait(ufsvfsp, 1);
3818                 retry = 0;
3819                 goto again;
3820         }
3821 
3822         return (error);
3823 }
3824 
3825 /*ARGSUSED*/
3826 static int
3827 ufs_rmdir(struct vnode *vp, char *nm, struct vnode *cdir, struct cred *cr,
3828         caller_context_t *ct, int flags)
3829 {
3830         struct inode *ip = VTOI(vp);
3831         struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
3832         struct ulockfs *ulp;
3833         vnode_t *rmvp = NULL;   /* Vnode of removed directory */
3834         int error;
3835         int issync;
3836         int trans_size;
3837         int indeadlock;
3838 
3839         /*
3840          * don't let the delete queue get too long
3841          */
3842         if (ufsvfsp == NULL) {
3843                 error = EIO;
3844                 goto out;
3845         }
3846         if (ufsvfsp->vfs_delete.uq_ne > ufs_idle_max)
3847                 ufs_delete_drain(vp->v_vfsp, 1, 1);
3848 
3849         error = ufs_eventlookup(vp, nm, cr, &rmvp);
3850         if (rmvp != NULL) {
3851                 /* Only send the event if there were no errors */
3852                 if (error == 0)
3853                         vnevent_rmdir(rmvp, vp, nm, ct);
3854                 VN_RELE(rmvp);
3855         }
3856 
3857 retry_rmdir:
3858         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_RMDIR_MASK);
3859         if (error)
3860                 goto out;
3861 
3862         if (ulp)
3863                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_RMDIR,
3864                     trans_size = TOP_RMDIR_SIZE);
3865 
3866         /*
3867          * ufs_tryirwlock_trans uses rw_tryenter and checks for SLOCK
3868          * to avoid i_rwlock, ufs_lockfs_begin deadlock. If deadlock
3869          * possible, retries the operation.
3870          */
3871         ufs_tryirwlock_trans(&ip->i_rwlock, RW_WRITER, TOP_RMDIR, retry);
3872         if (indeadlock)
3873                 goto retry_rmdir;
3874         error = ufs_dirremove(ip, nm, (struct inode *)0, cdir, DR_RMDIR, cr);
3875 
3876         rw_exit(&ip->i_rwlock);
3877 
3878         if (ulp) {
3879                 TRANS_END_CSYNC(ufsvfsp, error, issync, TOP_RMDIR,
3880                     trans_size);
3881                 ufs_lockfs_end(ulp);
3882         }
3883 
3884 out:
3885         return (error);
3886 }
3887 
3888 /* ARGSUSED */
3889 static int
3890 ufs_readdir(
3891         struct vnode *vp,
3892         struct uio *uiop,
3893         struct cred *cr,
3894         int *eofp,
3895         caller_context_t *ct,
3896         int flags)
3897 {
3898         struct iovec *iovp;
3899         struct inode *ip;
3900         struct direct *idp;
3901         struct dirent64 *odp;
3902         struct fbuf *fbp;
3903         struct ufsvfs *ufsvfsp;
3904         struct ulockfs *ulp;
3905         caddr_t outbuf;
3906         size_t bufsize;
3907         uint_t offset;
3908         uint_t bytes_wanted, total_bytes_wanted;
3909         int incount = 0;
3910         int outcount = 0;
3911         int error;
3912 
3913         ip = VTOI(vp);
3914         ASSERT(RW_READ_HELD(&ip->i_rwlock));
3915 
3916         if (uiop->uio_loffset >= MAXOFF32_T) {
3917                 if (eofp)
3918                         *eofp = 1;
3919                 return (0);
3920         }
3921 
3922         /*
3923          * Check if we have been called with a valid iov_len
3924          * and bail out if not, otherwise we may potentially loop
3925          * forever further down.
3926          */
3927         if (uiop->uio_iov->iov_len <= 0) {
3928                 error = EINVAL;
3929                 goto out;
3930         }
3931 
3932         /*
3933          * Large Files: When we come here we are guaranteed that
3934          * uio_offset can be used safely. The high word is zero.
3935          */
3936 
3937         ufsvfsp = ip->i_ufsvfs;
3938         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_READDIR_MASK);
3939         if (error)
3940                 goto out;
3941 
3942         iovp = uiop->uio_iov;
3943         total_bytes_wanted = iovp->iov_len;
3944 
3945         /* Large Files: directory files should not be "large" */
3946 
3947         ASSERT(ip->i_size <= MAXOFF32_T);
3948 
3949         /* Force offset to be valid (to guard against bogus lseek() values) */
3950         offset = (uint_t)uiop->uio_offset & ~(DIRBLKSIZ - 1);
3951 
3952         /* Quit if at end of file or link count of zero (posix) */
3953         if (offset >= (uint_t)ip->i_size || ip->i_nlink <= 0) {
3954                 if (eofp)
3955                         *eofp = 1;
3956                 error = 0;
3957                 goto unlock;
3958         }
3959 
3960         /*
3961          * Get space to change directory entries into fs independent format.
3962          * Do fast alloc for the most commonly used-request size (filesystem
3963          * block size).
3964          */
3965         if (uiop->uio_segflg != UIO_SYSSPACE || uiop->uio_iovcnt != 1) {
3966                 bufsize = total_bytes_wanted;
3967                 outbuf = kmem_alloc(bufsize, KM_SLEEP);
3968                 odp = (struct dirent64 *)outbuf;
3969         } else {
3970                 bufsize = total_bytes_wanted;
3971                 odp = (struct dirent64 *)iovp->iov_base;
3972         }
3973 
3974 nextblk:
3975         bytes_wanted = total_bytes_wanted;
3976 
3977         /* Truncate request to file size */
3978         if (offset + bytes_wanted > (int)ip->i_size)
3979                 bytes_wanted = (int)(ip->i_size - offset);
3980 
3981         /* Comply with MAXBSIZE boundary restrictions of fbread() */
3982         if ((offset & MAXBOFFSET) + bytes_wanted > MAXBSIZE)
3983                 bytes_wanted = MAXBSIZE - (offset & MAXBOFFSET);
3984 
3985         /*
3986          * Read in the next chunk.
3987          * We are still holding the i_rwlock.
3988          */
3989         error = fbread(vp, (offset_t)offset, bytes_wanted, S_OTHER, &fbp);
3990 
3991         if (error)
3992                 goto update_inode;
3993         if (!ULOCKFS_IS_NOIACC(ITOUL(ip)) && (ip->i_fs->fs_ronly == 0) &&
3994             (!ufsvfsp->vfs_noatime)) {
3995                 ip->i_flag |= IACC;
3996         }
3997         incount = 0;
3998         idp = (struct direct *)fbp->fb_addr;
3999         if (idp->d_ino == 0 && idp->d_reclen == 0 && idp->d_namlen == 0) {
4000                 cmn_err(CE_WARN, "ufs_readdir: bad dir, inumber = %llu, "
4001                     "fs = %s\n",
4002                     (u_longlong_t)ip->i_number, ufsvfsp->vfs_fs->fs_fsmnt);
4003                 fbrelse(fbp, S_OTHER);
4004                 error = ENXIO;
4005                 goto update_inode;
4006         }
4007         /* Transform to file-system independent format */
4008         while (incount < bytes_wanted) {
4009                 /*
4010                  * If the current directory entry is mangled, then skip
4011                  * to the next block.  It would be nice to set the FSBAD
4012                  * flag in the super-block so that a fsck is forced on
4013                  * next reboot, but locking is a problem.
4014                  */
4015                 if (idp->d_reclen & 0x3) {
4016                         offset = (offset + DIRBLKSIZ) & ~(DIRBLKSIZ-1);
4017                         break;
4018                 }
4019 
4020                 /* Skip to requested offset and skip empty entries */
4021                 if (idp->d_ino != 0 && offset >= (uint_t)uiop->uio_offset) {
4022                         ushort_t this_reclen =
4023                             DIRENT64_RECLEN(idp->d_namlen);
4024                         /* Buffer too small for any entries */
4025                         if (!outcount && this_reclen > bufsize) {
4026                                 fbrelse(fbp, S_OTHER);
4027                                 error = EINVAL;
4028                                 goto update_inode;
4029                         }
4030                         /* If would overrun the buffer, quit */
4031                         if (outcount + this_reclen > bufsize) {
4032                                 break;
4033                         }
4034                         /* Take this entry */
4035                         odp->d_ino = (ino64_t)idp->d_ino;
4036                         odp->d_reclen = (ushort_t)this_reclen;
4037                         odp->d_off = (offset_t)(offset + idp->d_reclen);
4038 
4039                         /* use strncpy(9f) to zero out uninitialized bytes */
4040 
4041                         ASSERT(strlen(idp->d_name) + 1 <=
4042                             DIRENT64_NAMELEN(this_reclen));
4043                         (void) strncpy(odp->d_name, idp->d_name,
4044                             DIRENT64_NAMELEN(this_reclen));
4045                         outcount += odp->d_reclen;
4046                         odp = (struct dirent64 *)
4047                             ((intptr_t)odp + odp->d_reclen);
4048                         ASSERT(outcount <= bufsize);
4049                 }
4050                 if (idp->d_reclen) {
4051                         incount += idp->d_reclen;
4052                         offset += idp->d_reclen;
4053                         idp = (struct direct *)((intptr_t)idp + idp->d_reclen);
4054                 } else {
4055                         offset = (offset + DIRBLKSIZ) & ~(DIRBLKSIZ-1);
4056                         break;
4057                 }
4058         }
4059         /* Release the chunk */
4060         fbrelse(fbp, S_OTHER);
4061 
4062         /* Read whole block, but got no entries, read another if not eof */
4063 
4064         /*
4065          * Large Files: casting i_size to int here is not a problem
4066          * because directory sizes are always less than MAXOFF32_T.
4067          * See assertion above.
4068          */
4069 
4070         if (offset < (int)ip->i_size && !outcount)
4071                 goto nextblk;
4072 
4073         /* Copy out the entry data */
4074         if (uiop->uio_segflg == UIO_SYSSPACE && uiop->uio_iovcnt == 1) {
4075                 iovp->iov_base += outcount;
4076                 iovp->iov_len -= outcount;
4077                 uiop->uio_resid -= outcount;
4078                 uiop->uio_offset = offset;
4079         } else if ((error = uiomove(outbuf, (long)outcount, UIO_READ,
4080             uiop)) == 0)
4081                 uiop->uio_offset = offset;
4082 update_inode:
4083         ITIMES(ip);
4084         if (uiop->uio_segflg != UIO_SYSSPACE || uiop->uio_iovcnt != 1)
4085                 kmem_free(outbuf, bufsize);
4086 
4087         if (eofp && error == 0)
4088                 *eofp = (uiop->uio_offset >= (int)ip->i_size);
4089 unlock:
4090         if (ulp) {
4091                 ufs_lockfs_end(ulp);
4092         }
4093 out:
4094         return (error);
4095 }
4096 
4097 /*ARGSUSED*/
4098 static int
4099 ufs_symlink(
4100         struct vnode *dvp,              /* ptr to parent dir vnode */
4101         char *linkname,                 /* name of symbolic link */
4102         struct vattr *vap,              /* attributes */
4103         char *target,                   /* target path */
4104         struct cred *cr,                /* user credentials */
4105         caller_context_t *ct,
4106         int flags)
4107 {
4108         struct inode *ip, *dip = VTOI(dvp);
4109         struct ufsvfs *ufsvfsp = dip->i_ufsvfs;
4110         struct ulockfs *ulp;
4111         int error;
4112         int issync;
4113         int trans_size;
4114         int residual;
4115         int ioflag;
4116         int retry = 1;
4117 
4118         /*
4119          * No symlinks in attrdirs at this time
4120          */
4121         if ((VTOI(dvp)->i_mode & IFMT) == IFATTRDIR)
4122                 return (EINVAL);
4123 
4124 again:
4125         ip = (struct inode *)NULL;
4126         vap->va_type = VLNK;
4127         vap->va_rdev = 0;
4128 
4129         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_SYMLINK_MASK);
4130         if (error)
4131                 goto out;
4132 
4133         if (ulp)
4134                 TRANS_BEGIN_CSYNC(ufsvfsp, issync, TOP_SYMLINK,
4135                     trans_size = (int)TOP_SYMLINK_SIZE(dip));
4136 
4137         /*
4138          * We must create the inode before the directory entry, to avoid
4139          * racing with readlink().  ufs_dirmakeinode requires that we
4140          * hold the quota lock as reader, and directory locks as writer.
4141          */
4142 
4143         rw_enter(&dip->i_rwlock, RW_WRITER);
4144         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
4145         rw_enter(&dip->i_contents, RW_WRITER);
4146 
4147         /*
4148          * Suppress any out of inodes messages if we will retry on
4149          * ENOSP
4150          */
4151         if (retry)
4152                 dip->i_flag |= IQUIET;
4153 
4154         error = ufs_dirmakeinode(dip, &ip, vap, DE_SYMLINK, cr);
4155 
4156         dip->i_flag &= ~IQUIET;
4157 
4158         rw_exit(&dip->i_contents);
4159         rw_exit(&ufsvfsp->vfs_dqrwlock);
4160         rw_exit(&dip->i_rwlock);
4161 
4162         if (error)
4163                 goto unlock;
4164 
4165         /*
4166          * OK.  The inode has been created.  Write out the data of the
4167          * symbolic link.  Since symbolic links are metadata, and should
4168          * remain consistent across a system crash, we need to force the
4169          * data out synchronously.
4170          *
4171          * (This is a change from the semantics in earlier releases, which
4172          * only created symbolic links synchronously if the semi-documented
4173          * 'syncdir' option was set, or if we were being invoked by the NFS
4174          * server, which requires symbolic links to be created synchronously.)
4175          *
4176          * We need to pass in a pointer for the residual length; otherwise
4177          * ufs_rdwri() will always return EIO if it can't write the data,
4178          * even if the error was really ENOSPC or EDQUOT.
4179          */
4180 
4181         ioflag = FWRITE | FDSYNC;
4182         residual = 0;
4183 
4184         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
4185         rw_enter(&ip->i_contents, RW_WRITER);
4186 
4187         /*
4188          * Suppress file system full messages if we will retry
4189          */
4190         if (retry)
4191                 ip->i_flag |= IQUIET;
4192 
4193         error = ufs_rdwri(UIO_WRITE, ioflag, ip, target, strlen(target),
4194             (offset_t)0, UIO_SYSSPACE, &residual, cr);
4195 
4196         ip->i_flag &= ~IQUIET;
4197 
4198         if (error) {
4199                 rw_exit(&ip->i_contents);
4200                 rw_exit(&ufsvfsp->vfs_dqrwlock);
4201                 goto remove;
4202         }
4203 
4204         /*
4205          * If the link's data is small enough, we can cache it in the inode.
4206          * This is a "fast symbolic link".  We don't use the first direct
4207          * block because that's actually used to point at the symbolic link's
4208          * contents on disk; but we know that none of the other direct or
4209          * indirect blocks can be used because symbolic links are restricted
4210          * to be smaller than a file system block.
4211          */
4212 
4213         ASSERT(MAXPATHLEN <= VBSIZE(ITOV(ip)));
4214 
4215         if (ip->i_size > 0 && ip->i_size <= FSL_SIZE) {
4216                 if (kcopy(target, &ip->i_db[1], ip->i_size) == 0) {
4217                         ip->i_flag |= IFASTSYMLNK;
4218                 } else {
4219                         int i;
4220                         /* error, clear garbage left behind */
4221                         for (i = 1; i < NDADDR; i++)
4222                                 ip->i_db[i] = 0;
4223                         for (i = 0; i < NIADDR; i++)
4224                                 ip->i_ib[i] = 0;
4225                 }
4226         }
4227 
4228         rw_exit(&ip->i_contents);
4229         rw_exit(&ufsvfsp->vfs_dqrwlock);
4230 
4231         /*
4232          * OK.  We've successfully created the symbolic link.  All that
4233          * remains is to insert it into the appropriate directory.
4234          */
4235 
4236         rw_enter(&dip->i_rwlock, RW_WRITER);
4237         error = ufs_direnter_lr(dip, linkname, DE_SYMLINK, NULL, ip, cr);
4238         rw_exit(&dip->i_rwlock);
4239 
4240         /*
4241          * Fall through into remove-on-error code.  We're either done, or we
4242          * need to remove the inode (if we couldn't insert it).
4243          */
4244 
4245 remove:
4246         if (error && (ip != NULL)) {
4247                 rw_enter(&ip->i_contents, RW_WRITER);
4248                 ip->i_nlink--;
4249                 ip->i_flag |= ICHG;
4250                 ip->i_seq++;
4251                 ufs_setreclaim(ip);
4252                 rw_exit(&ip->i_contents);
4253         }
4254 
4255 unlock:
4256         if (ip != NULL)
4257                 VN_RELE(ITOV(ip));
4258 
4259         if (ulp) {
4260                 int terr = 0;
4261 
4262                 TRANS_END_CSYNC(ufsvfsp, terr, issync, TOP_SYMLINK,
4263                     trans_size);
4264                 ufs_lockfs_end(ulp);
4265                 if (error == 0)
4266                         error = terr;
4267         }
4268 
4269         /*
4270          * We may have failed due to lack of an inode or of a block to
4271          * store the target in.  Try flushing the delete queue to free
4272          * logically-available things up and try again.
4273          */
4274         if ((error == ENOSPC) && retry && TRANS_ISTRANS(ufsvfsp)) {
4275                 ufs_delete_drain_wait(ufsvfsp, 1);
4276                 retry = 0;
4277                 goto again;
4278         }
4279 
4280 out:
4281         return (error);
4282 }
4283 
4284 /*
4285  * Ufs specific routine used to do ufs io.
4286  */
4287 int
4288 ufs_rdwri(enum uio_rw rw, int ioflag, struct inode *ip, caddr_t base,
4289         ssize_t len, offset_t offset, enum uio_seg seg, int *aresid,
4290         struct cred *cr)
4291 {
4292         struct uio auio;
4293         struct iovec aiov;
4294         int error;
4295 
4296         ASSERT(RW_LOCK_HELD(&ip->i_contents));
4297 
4298         bzero((caddr_t)&auio, sizeof (uio_t));
4299         bzero((caddr_t)&aiov, sizeof (iovec_t));
4300 
4301         aiov.iov_base = base;
4302         aiov.iov_len = len;
4303         auio.uio_iov = &aiov;
4304         auio.uio_iovcnt = 1;
4305         auio.uio_loffset = offset;
4306         auio.uio_segflg = (short)seg;
4307         auio.uio_resid = len;
4308 
4309         if (rw == UIO_WRITE) {
4310                 auio.uio_fmode = FWRITE;
4311                 auio.uio_extflg = UIO_COPY_DEFAULT;
4312                 auio.uio_llimit = curproc->p_fsz_ctl;
4313                 error = wrip(ip, &auio, ioflag, cr);
4314         } else {
4315                 auio.uio_fmode = FREAD;
4316                 auio.uio_extflg = UIO_COPY_CACHED;
4317                 auio.uio_llimit = MAXOFFSET_T;
4318                 error = rdip(ip, &auio, ioflag, cr);
4319         }
4320 
4321         if (aresid) {
4322                 *aresid = auio.uio_resid;
4323         } else if (auio.uio_resid) {
4324                 error = EIO;
4325         }
4326         return (error);
4327 }
4328 
4329 /*ARGSUSED*/
4330 static int
4331 ufs_fid(struct vnode *vp, struct fid *fidp, caller_context_t *ct)
4332 {
4333         struct ufid *ufid;
4334         struct inode *ip = VTOI(vp);
4335 
4336         if (ip->i_ufsvfs == NULL)
4337                 return (EIO);
4338 
4339         if (fidp->fid_len < (sizeof (struct ufid) - sizeof (ushort_t))) {
4340                 fidp->fid_len = sizeof (struct ufid) - sizeof (ushort_t);
4341                 return (ENOSPC);
4342         }
4343 
4344         ufid = (struct ufid *)fidp;
4345         bzero((char *)ufid, sizeof (struct ufid));
4346         ufid->ufid_len = sizeof (struct ufid) - sizeof (ushort_t);
4347         ufid->ufid_ino = ip->i_number;
4348         ufid->ufid_gen = ip->i_gen;
4349 
4350         return (0);
4351 }
4352 
4353 /* ARGSUSED2 */
4354 static int
4355 ufs_rwlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
4356 {
4357         struct inode    *ip = VTOI(vp);
4358         struct ufsvfs   *ufsvfsp;
4359         int             forcedirectio;
4360 
4361         /*
4362          * Read case is easy.
4363          */
4364         if (!write_lock) {
4365                 rw_enter(&ip->i_rwlock, RW_READER);
4366                 return (V_WRITELOCK_FALSE);
4367         }
4368 
4369         /*
4370          * Caller has requested a writer lock, but that inhibits any
4371          * concurrency in the VOPs that follow. Acquire the lock shared
4372          * and defer exclusive access until it is known to be needed in
4373          * other VOP handlers. Some cases can be determined here.
4374          */
4375 
4376         /*
4377          * If directio is not set, there is no chance of concurrency,
4378          * so just acquire the lock exclusive. Beware of a forced
4379          * unmount before looking at the mount option.
4380          */
4381         ufsvfsp = ip->i_ufsvfs;
4382         forcedirectio = ufsvfsp ? ufsvfsp->vfs_forcedirectio : 0;
4383         if (!(ip->i_flag & IDIRECTIO || forcedirectio) ||
4384             !ufs_allow_shared_writes) {
4385                 rw_enter(&ip->i_rwlock, RW_WRITER);
4386                 return (V_WRITELOCK_TRUE);
4387         }
4388 
4389         /*
4390          * Mandatory locking forces acquiring i_rwlock exclusive.
4391          */
4392         if (MANDLOCK(vp, ip->i_mode)) {
4393                 rw_enter(&ip->i_rwlock, RW_WRITER);
4394                 return (V_WRITELOCK_TRUE);
4395         }
4396 
4397         /*
4398          * Acquire the lock shared in case a concurrent write follows.
4399          * Mandatory locking could have become enabled before the lock
4400          * was acquired. Re-check and upgrade if needed.
4401          */
4402         rw_enter(&ip->i_rwlock, RW_READER);
4403         if (MANDLOCK(vp, ip->i_mode)) {
4404                 rw_exit(&ip->i_rwlock);
4405                 rw_enter(&ip->i_rwlock, RW_WRITER);
4406                 return (V_WRITELOCK_TRUE);
4407         }
4408         return (V_WRITELOCK_FALSE);
4409 }
4410 
4411 /*ARGSUSED*/
4412 static void
4413 ufs_rwunlock(struct vnode *vp, int write_lock, caller_context_t *ctp)
4414 {
4415         struct inode    *ip = VTOI(vp);
4416 
4417         rw_exit(&ip->i_rwlock);
4418 }
4419 
4420 /* ARGSUSED */
4421 static int
4422 ufs_seek(struct vnode *vp, offset_t ooff, offset_t *noffp,
4423         caller_context_t *ct)
4424 {
4425         return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
4426 }
4427 
4428 /* ARGSUSED */
4429 static int
4430 ufs_frlock(struct vnode *vp, int cmd, struct flock64 *bfp, int flag,
4431         offset_t offset, struct flk_callback *flk_cbp, struct cred *cr,
4432         caller_context_t *ct)
4433 {
4434         struct inode *ip = VTOI(vp);
4435 
4436         if (ip->i_ufsvfs == NULL)
4437                 return (EIO);
4438 
4439         /*
4440          * If file is being mapped, disallow frlock.
4441          * XXX I am not holding tlock while checking i_mapcnt because the
4442          * current locking strategy drops all locks before calling fs_frlock.
4443          * So, mapcnt could change before we enter fs_frlock making is
4444          * meaningless to have held tlock in the first place.
4445          */
4446         if (ip->i_mapcnt > 0 && MANDLOCK(vp, ip->i_mode))
4447                 return (EAGAIN);
4448         return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
4449 }
4450 
4451 /* ARGSUSED */
4452 static int
4453 ufs_space(struct vnode *vp, int cmd, struct flock64 *bfp, int flag,
4454         offset_t offset, cred_t *cr, caller_context_t *ct)
4455 {
4456         struct ufsvfs *ufsvfsp = VTOI(vp)->i_ufsvfs;
4457         struct ulockfs *ulp;
4458         int error;
4459 
4460         if ((error = convoff(vp, bfp, 0, offset)) == 0) {
4461                 if (cmd == F_FREESP) {
4462                         error = ufs_lockfs_begin(ufsvfsp, &ulp,
4463                             ULOCKFS_SPACE_MASK);
4464                         if (error)
4465                                 return (error);
4466                         error = ufs_freesp(vp, bfp, flag, cr);
4467 
4468                         if (error == 0 && bfp->l_start == 0)
4469                                 vnevent_truncate(vp, ct);
4470                 } else if (cmd == F_ALLOCSP) {
4471                         error = ufs_lockfs_begin(ufsvfsp, &ulp,
4472                             ULOCKFS_FALLOCATE_MASK);
4473                         if (error)
4474                                 return (error);
4475                         error = ufs_allocsp(vp, bfp, cr);
4476                 } else
4477                         return (EINVAL); /* Command not handled here */
4478 
4479                 if (ulp)
4480                         ufs_lockfs_end(ulp);
4481 
4482         }
4483         return (error);
4484 }
4485 
4486 /*
4487  * Used to determine if read ahead should be done. Also used to
4488  * to determine when write back occurs.
4489  */
4490 #define CLUSTSZ(ip)             ((ip)->i_ufsvfs->vfs_ioclustsz)
4491 
4492 /*
4493  * A faster version of ufs_getpage.
4494  *
4495  * We optimize by inlining the pvn_getpages iterator, eliminating
4496  * calls to bmap_read if file doesn't have UFS holes, and avoiding
4497  * the overhead of page_exists().
4498  *
4499  * When files has UFS_HOLES and ufs_getpage is called with S_READ,
4500  * we set *protp to PROT_READ to avoid calling bmap_read. This approach
4501  * victimizes performance when a file with UFS holes is faulted
4502  * first in the S_READ mode, and then in the S_WRITE mode. We will get
4503  * two MMU faults in this case.
4504  *
4505  * XXX - the inode fields which control the sequential mode are not
4506  *       protected by any mutex. The read ahead will act wild if
4507  *       multiple processes will access the file concurrently and
4508  *       some of them in sequential mode. One particulary bad case
4509  *       is if another thread will change the value of i_nextrio between
4510  *       the time this thread tests the i_nextrio value and then reads it
4511  *       again to use it as the offset for the read ahead.
4512  */
4513 /*ARGSUSED*/
4514 static int
4515 ufs_getpage(struct vnode *vp, offset_t off, size_t len, uint_t *protp,
4516         page_t *plarr[], size_t plsz, struct seg *seg, caddr_t addr,
4517         enum seg_rw rw, struct cred *cr, caller_context_t *ct)
4518 {
4519         u_offset_t      uoff = (u_offset_t)off; /* type conversion */
4520         u_offset_t      pgoff;
4521         u_offset_t      eoff;
4522         struct inode    *ip = VTOI(vp);
4523         struct ufsvfs   *ufsvfsp = ip->i_ufsvfs;
4524         struct fs       *fs;
4525         struct ulockfs  *ulp;
4526         page_t          **pl;
4527         caddr_t         pgaddr;
4528         krw_t           rwtype;
4529         int             err;
4530         int             has_holes;
4531         int             beyond_eof;
4532         int             seqmode;
4533         int             pgsize = PAGESIZE;
4534         int             dolock;
4535         int             do_qlock;
4536         int             trans_size;
4537 
4538         ASSERT((uoff & PAGEOFFSET) == 0);
4539 
4540         if (protp)
4541                 *protp = PROT_ALL;
4542 
4543         /*
4544          * Obey the lockfs protocol
4545          */
4546         err = ufs_lockfs_begin_getpage(ufsvfsp, &ulp, seg,
4547             rw == S_READ || rw == S_EXEC, protp);
4548         if (err)
4549                 goto out;
4550 
4551         fs = ufsvfsp->vfs_fs;
4552 
4553         if (ulp && (rw == S_CREATE || rw == S_WRITE) &&
4554             !(vp->v_flag & VISSWAP)) {
4555                 /*
4556                  * Try to start a transaction, will return if blocking is
4557                  * expected to occur and the address space is not the
4558                  * kernel address space.
4559                  */
4560                 trans_size = TOP_GETPAGE_SIZE(ip);
4561                 if (seg->s_as != &kas) {
4562                         TRANS_TRY_BEGIN_ASYNC(ufsvfsp, TOP_GETPAGE,
4563                             trans_size, err)
4564                         if (err == EWOULDBLOCK) {
4565                                 /*
4566                                  * Use EDEADLK here because the VM code
4567                                  * can normally never see this error.
4568                                  */
4569                                 err = EDEADLK;
4570                                 ufs_lockfs_end(ulp);
4571                                 goto out;
4572                         }
4573                 } else {
4574                         TRANS_BEGIN_ASYNC(ufsvfsp, TOP_GETPAGE, trans_size);
4575                 }
4576         }
4577 
4578         if (vp->v_flag & VNOMAP) {
4579                 err = ENOSYS;
4580                 goto unlock;
4581         }
4582 
4583         seqmode = ip->i_nextr == uoff && rw != S_CREATE;
4584 
4585         rwtype = RW_READER;             /* start as a reader */
4586         dolock = (rw_owner(&ip->i_contents) != curthread);
4587         /*
4588          * If this thread owns the lock, i.e., this thread grabbed it
4589          * as writer somewhere above, then we don't need to grab the
4590          * lock as reader in this routine.
4591          */
4592         do_qlock = (rw_owner(&ufsvfsp->vfs_dqrwlock) != curthread);
4593 
4594 retrylock:
4595         if (dolock) {
4596                 /*
4597                  * Grab the quota lock if we need to call
4598                  * bmap_write() below (with i_contents as writer).
4599                  */
4600                 if (do_qlock && rwtype == RW_WRITER)
4601                         rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER);
4602                 rw_enter(&ip->i_contents, rwtype);
4603         }
4604 
4605         /*
4606          * We may be getting called as a side effect of a bmap using
4607          * fbread() when the blocks might be being allocated and the
4608          * size has not yet been up'ed.  In this case we want to be
4609          * able to return zero pages if we get back UFS_HOLE from
4610          * calling bmap for a non write case here.  We also might have
4611          * to read some frags from the disk into a page if we are
4612          * extending the number of frags for a given lbn in bmap().
4613          * Large Files: The read of i_size here is atomic because
4614          * i_contents is held here. If dolock is zero, the lock
4615          * is held in bmap routines.
4616          */
4617         beyond_eof = uoff + len >
4618             P2ROUNDUP_TYPED(ip->i_size, PAGESIZE, u_offset_t);
4619         if (beyond_eof && seg != segkmap) {
4620                 if (dolock) {
4621                         rw_exit(&ip->i_contents);
4622                         if (do_qlock && rwtype == RW_WRITER)
4623                                 rw_exit(&ufsvfsp->vfs_dqrwlock);
4624                 }
4625                 err = EFAULT;
4626                 goto unlock;
4627         }
4628 
4629         /*
4630          * Must hold i_contents lock throughout the call to pvn_getpages
4631          * since locked pages are returned from each call to ufs_getapage.
4632          * Must *not* return locked pages and then try for contents lock
4633          * due to lock ordering requirements (inode > page)
4634          */
4635 
4636         has_holes = bmap_has_holes(ip);
4637 
4638         if ((rw == S_WRITE || rw == S_CREATE) && has_holes && !beyond_eof) {
4639                 int     blk_size;
4640                 u_offset_t offset;
4641 
4642                 /*
4643                  * We must acquire the RW_WRITER lock in order to
4644                  * call bmap_write().
4645                  */
4646                 if (dolock && rwtype == RW_READER) {
4647                         rwtype = RW_WRITER;
4648 
4649                         /*
4650                          * Grab the quota lock before
4651                          * upgrading i_contents, but if we can't grab it
4652                          * don't wait here due to lock order:
4653                          * vfs_dqrwlock > i_contents.
4654                          */
4655                         if (do_qlock &&
4656                             rw_tryenter(&ufsvfsp->vfs_dqrwlock, RW_READER)
4657                             == 0) {
4658                                 rw_exit(&ip->i_contents);
4659                                 goto retrylock;
4660                         }
4661                         if (!rw_tryupgrade(&ip->i_contents)) {
4662                                 rw_exit(&ip->i_contents);
4663                                 if (do_qlock)
4664                                         rw_exit(&ufsvfsp->vfs_dqrwlock);
4665                                 goto retrylock;
4666                         }
4667                 }
4668 
4669                 /*
4670                  * May be allocating disk blocks for holes here as
4671                  * a result of mmap faults. write(2) does the bmap_write
4672                  * in rdip/wrip, not here. We are not dealing with frags
4673                  * in this case.
4674                  */
4675                 /*
4676                  * Large Files: We cast fs_bmask field to offset_t
4677                  * just as we do for MAXBMASK because uoff is a 64-bit
4678                  * data type. fs_bmask will still be a 32-bit type
4679                  * as we cannot change any ondisk data structures.
4680                  */
4681 
4682                 offset = uoff & (offset_t)fs->fs_bmask;
4683                 while (offset < uoff + len) {
4684                         blk_size = (int)blksize(fs, ip, lblkno(fs, offset));
4685                         err = bmap_write(ip, offset, blk_size,
4686                             BI_NORMAL, NULL, cr);
4687                         if (ip->i_flag & (ICHG|IUPD))
4688                                 ip->i_seq++;
4689                         if (err)
4690                                 goto update_inode;
4691                         offset += blk_size; /* XXX - make this contig */
4692                 }
4693         }
4694 
4695         /*
4696          * Can be a reader from now on.
4697          */
4698         if (dolock && rwtype == RW_WRITER) {
4699                 rw_downgrade(&ip->i_contents);
4700                 /*
4701                  * We can release vfs_dqrwlock early so do it, but make
4702                  * sure we don't try to release it again at the bottom.
4703                  */
4704                 if (do_qlock) {
4705                         rw_exit(&ufsvfsp->vfs_dqrwlock);
4706                         do_qlock = 0;
4707                 }
4708         }
4709 
4710         /*
4711          * We remove PROT_WRITE in cases when the file has UFS holes
4712          * because we don't  want to call bmap_read() to check each
4713          * page if it is backed with a disk block.
4714          */
4715         if (protp && has_holes && rw != S_WRITE && rw != S_CREATE)
4716                 *protp &= ~PROT_WRITE;
4717 
4718         err = 0;
4719 
4720         /*
4721          * The loop looks up pages in the range [off, off + len).
4722          * For each page, we first check if we should initiate an asynchronous
4723          * read ahead before we call page_lookup (we may sleep in page_lookup
4724          * for a previously initiated disk read).
4725          */
4726         eoff = (uoff + len);
4727         for (pgoff = uoff, pgaddr = addr, pl = plarr;
4728             pgoff < eoff; /* empty */) {
4729                 page_t  *pp;
4730                 u_offset_t      nextrio;
4731                 se_t    se;
4732                 int retval;
4733 
4734                 se = ((rw == S_CREATE || rw == S_OTHER) ? SE_EXCL : SE_SHARED);
4735 
4736                 /* Handle async getpage (faultahead) */
4737                 if (plarr == NULL) {
4738                         ip->i_nextrio = pgoff;
4739                         (void) ufs_getpage_ra(vp, pgoff, seg, pgaddr);
4740                         pgoff += pgsize;
4741                         pgaddr += pgsize;
4742                         continue;
4743                 }
4744                 /*
4745                  * Check if we should initiate read ahead of next cluster.
4746                  * We call page_exists only when we need to confirm that
4747                  * we have the current page before we initiate the read ahead.
4748                  */
4749                 nextrio = ip->i_nextrio;
4750                 if (seqmode &&
4751                     pgoff + CLUSTSZ(ip) >= nextrio && pgoff <= nextrio &&
4752                     nextrio < ip->i_size && page_exists(vp, pgoff)) {
4753                         retval = ufs_getpage_ra(vp, pgoff, seg, pgaddr);
4754                         /*
4755                          * We always read ahead the next cluster of data
4756                          * starting from i_nextrio. If the page (vp,nextrio)
4757                          * is actually in core at this point, the routine
4758                          * ufs_getpage_ra() will stop pre-fetching data
4759                          * until we read that page in a synchronized manner
4760                          * through ufs_getpage_miss(). So, we should increase
4761                          * i_nextrio if the page (vp, nextrio) exists.
4762                          */
4763                         if ((retval == 0) && page_exists(vp, nextrio)) {
4764                                 ip->i_nextrio = nextrio + pgsize;
4765                         }
4766                 }
4767 
4768                 if ((pp = page_lookup(vp, pgoff, se)) != NULL) {
4769                         /*
4770                          * We found the page in the page cache.
4771                          */
4772                         *pl++ = pp;
4773                         pgoff += pgsize;
4774                         pgaddr += pgsize;
4775                         len -= pgsize;
4776                         plsz -= pgsize;
4777                 } else  {
4778                         /*
4779                          * We have to create the page, or read it from disk.
4780                          */
4781                         if (err = ufs_getpage_miss(vp, pgoff, len, seg, pgaddr,
4782                             pl, plsz, rw, seqmode))
4783                                 goto error;
4784 
4785                         while (*pl != NULL) {
4786                                 pl++;
4787                                 pgoff += pgsize;
4788                                 pgaddr += pgsize;
4789                                 len -= pgsize;
4790                                 plsz -= pgsize;
4791                         }
4792                 }
4793         }
4794 
4795         /*
4796          * Return pages up to plsz if they are in the page cache.
4797          * We cannot return pages if there is a chance that they are
4798          * backed with a UFS hole and rw is S_WRITE or S_CREATE.
4799          */
4800         if (plarr && !(has_holes && (rw == S_WRITE || rw == S_CREATE))) {
4801 
4802                 ASSERT((protp == NULL) ||
4803                     !(has_holes && (*protp & PROT_WRITE)));
4804 
4805                 eoff = pgoff + plsz;
4806                 while (pgoff < eoff) {
4807                         page_t          *pp;
4808 
4809                         if ((pp = page_lookup_nowait(vp, pgoff,
4810                             SE_SHARED)) == NULL)
4811                                 break;
4812 
4813                         *pl++ = pp;
4814                         pgoff += pgsize;
4815                         plsz -= pgsize;
4816                 }
4817         }
4818 
4819         if (plarr)
4820                 *pl = NULL;                     /* Terminate page list */
4821         ip->i_nextr = pgoff;
4822 
4823 error:
4824         if (err && plarr) {
4825                 /*
4826                  * Release any pages we have locked.
4827                  */
4828                 while (pl > &plarr[0])
4829                         page_unlock(*--pl);
4830 
4831                 plarr[0] = NULL;
4832         }
4833 
4834 update_inode:
4835         /*
4836          * If the inode is not already marked for IACC (in rdip() for read)
4837          * and the inode is not marked for no access time update (in wrip()
4838          * for write) then update the inode access time and mod time now.
4839          */
4840         if ((ip->i_flag & (IACC | INOACC)) == 0) {
4841                 if ((rw != S_OTHER) && (ip->i_mode & IFMT) != IFDIR) {
4842                         if (!ULOCKFS_IS_NOIACC(ITOUL(ip)) &&
4843                             (fs->fs_ronly == 0) &&
4844                             (!ufsvfsp->vfs_noatime)) {
4845                                 mutex_enter(&ip->i_tlock);
4846                                 ip->i_flag |= IACC;
4847                                 ITIMES_NOLOCK(ip);
4848                                 mutex_exit(&ip->i_tlock);
4849                         }
4850                 }
4851         }
4852 
4853         if (dolock) {
4854                 rw_exit(&ip->i_contents);
4855                 if (do_qlock && rwtype == RW_WRITER)
4856                         rw_exit(&ufsvfsp->vfs_dqrwlock);
4857         }
4858 
4859 unlock:
4860         if (ulp) {
4861                 if ((rw == S_CREATE || rw == S_WRITE) &&
4862                     !(vp->v_flag & VISSWAP)) {
4863                         TRANS_END_ASYNC(ufsvfsp, TOP_GETPAGE, trans_size);
4864                 }
4865                 ufs_lockfs_end(ulp);
4866         }
4867 out:
4868         return (err);
4869 }
4870 
4871 /*
4872  * ufs_getpage_miss is called when ufs_getpage missed the page in the page
4873  * cache. The page is either read from the disk, or it's created.
4874  * A page is created (without disk read) if rw == S_CREATE, or if
4875  * the page is not backed with a real disk block (UFS hole).
4876  */
4877 /* ARGSUSED */
4878 static int
4879 ufs_getpage_miss(struct vnode *vp, u_offset_t off, size_t len, struct seg *seg,
4880         caddr_t addr, page_t *pl[], size_t plsz, enum seg_rw rw, int seq)
4881 {
4882         struct inode    *ip = VTOI(vp);
4883         page_t          *pp;
4884         daddr_t         bn;
4885         size_t          io_len;
4886         int             crpage = 0;
4887         int             err;
4888         int             contig;
4889         int             bsize = ip->i_fs->fs_bsize;
4890 
4891         /*
4892          * Figure out whether the page can be created, or must be
4893          * must be read from the disk.
4894          */
4895         if (rw == S_CREATE)
4896                 crpage = 1;
4897         else {
4898                 contig = 0;
4899                 if (err = bmap_read(ip, off, &bn, &contig))
4900                         return (err);
4901 
4902                 crpage = (bn == UFS_HOLE);
4903 
4904                 /*
4905                  * If its also a fallocated block that hasn't been written to
4906                  * yet, we will treat it just like a UFS_HOLE and create
4907                  * a zero page for it
4908                  */
4909                 if (ISFALLOCBLK(ip, bn))
4910                         crpage = 1;
4911         }
4912 
4913         if (crpage) {
4914                 if ((pp = page_create_va(vp, off, PAGESIZE, PG_WAIT, seg,
4915                     addr)) == NULL) {
4916                         return (ufs_fault(vp,
4917                             "ufs_getpage_miss: page_create == NULL"));
4918                 }
4919 
4920                 if (rw != S_CREATE)
4921                         pagezero(pp, 0, PAGESIZE);
4922 
4923                 io_len = PAGESIZE;
4924         } else {
4925                 u_offset_t      io_off;
4926                 uint_t  xlen;
4927                 struct buf      *bp;
4928                 ufsvfs_t        *ufsvfsp = ip->i_ufsvfs;
4929 
4930                 /*
4931                  * If access is not in sequential order, we read from disk
4932                  * in bsize units.
4933                  *
4934                  * We limit the size of the transfer to bsize if we are reading
4935                  * from the beginning of the file. Note in this situation we
4936                  * will hedge our bets and initiate an async read ahead of
4937                  * the second block.
4938                  */
4939                 if (!seq || off == 0)
4940                         contig = MIN(contig, bsize);
4941 
4942                 pp = pvn_read_kluster(vp, off, seg, addr, &io_off,
4943                     &io_len, off, contig, 0);
4944 
4945                 /*
4946                  * Some other thread has entered the page.
4947                  * ufs_getpage will retry page_lookup.
4948                  */
4949                 if (pp == NULL) {
4950                         pl[0] = NULL;
4951                         return (0);
4952                 }
4953 
4954                 /*
4955                  * Zero part of the page which we are not
4956                  * going to read from the disk.
4957                  */
4958                 xlen = io_len & PAGEOFFSET;
4959                 if (xlen != 0)
4960                         pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
4961 
4962                 bp = pageio_setup(pp, io_len, ip->i_devvp, B_READ);
4963                 bp->b_edev = ip->i_dev;
4964                 bp->b_dev = cmpdev(ip->i_dev);
4965                 bp->b_blkno = bn;
4966                 bp->b_un.b_addr = (caddr_t)0;
4967                 bp->b_file = ip->i_vnode;
4968                 bp->b_offset = off;
4969 
4970                 if (ufsvfsp->vfs_log) {
4971                         lufs_read_strategy(ufsvfsp->vfs_log, bp);
4972                 } else if (ufsvfsp->vfs_snapshot) {
4973                         fssnap_strategy(&ufsvfsp->vfs_snapshot, bp);
4974                 } else {
4975                         ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
4976                         ub.ub_getpages.value.ul++;
4977                         (void) bdev_strategy(bp);
4978                         lwp_stat_update(LWP_STAT_INBLK, 1);
4979                 }
4980 
4981                 ip->i_nextrio = off + ((io_len + PAGESIZE - 1) & PAGEMASK);
4982 
4983                 /*
4984                  * If the file access is sequential, initiate read ahead
4985                  * of the next cluster.
4986                  */
4987                 if (seq && ip->i_nextrio < ip->i_size)
4988                         (void) ufs_getpage_ra(vp, off, seg, addr);
4989                 err = biowait(bp);
4990                 pageio_done(bp);
4991 
4992                 if (err) {
4993                         pvn_read_done(pp, B_ERROR);
4994                         return (err);
4995                 }
4996         }
4997 
4998         pvn_plist_init(pp, pl, plsz, off, io_len, rw);
4999         return (0);
5000 }
5001 
5002 /*
5003  * Read ahead a cluster from the disk. Returns the length in bytes.
5004  */
5005 static int
5006 ufs_getpage_ra(struct vnode *vp, u_offset_t off, struct seg *seg, caddr_t addr)
5007 {
5008         struct inode    *ip = VTOI(vp);
5009         page_t          *pp;
5010         u_offset_t      io_off = ip->i_nextrio;
5011         ufsvfs_t        *ufsvfsp;
5012         caddr_t         addr2 = addr + (io_off - off);
5013         struct buf      *bp;
5014         daddr_t         bn;
5015         size_t          io_len;
5016         int             err;
5017         int             contig;
5018         int             xlen;
5019         int             bsize = ip->i_fs->fs_bsize;
5020 
5021         /*
5022          * If the directio advisory is in effect on this file,
5023          * then do not do buffered read ahead. Read ahead makes
5024          * it more difficult on threads using directio as they
5025          * will be forced to flush the pages from this vnode.
5026          */
5027         if ((ufsvfsp = ip->i_ufsvfs) == NULL)
5028                 return (0);
5029         if (ip->i_flag & IDIRECTIO || ufsvfsp->vfs_forcedirectio)
5030                 return (0);
5031 
5032         /*
5033          * Is this test needed?
5034          */
5035         if (addr2 >= seg->s_base + seg->s_size)
5036                 return (0);
5037 
5038         contig = 0;
5039         err = bmap_read(ip, io_off, &bn, &contig);
5040         /*
5041          * If its a UFS_HOLE or a fallocated block, do not perform
5042          * any read ahead's since there probably is nothing to read ahead
5043          */
5044         if (err || bn == UFS_HOLE || ISFALLOCBLK(ip, bn))
5045                 return (0);
5046 
5047         /*
5048          * Limit the transfer size to bsize if this is the 2nd block.
5049          */
5050         if (io_off == (u_offset_t)bsize)
5051                 contig = MIN(contig, bsize);
5052 
5053         if ((pp = pvn_read_kluster(vp, io_off, seg, addr2, &io_off,
5054             &io_len, io_off, contig, 1)) == NULL)
5055                 return (0);
5056 
5057         /*
5058          * Zero part of page which we are not going to read from disk
5059          */
5060         if ((xlen = (io_len & PAGEOFFSET)) > 0)
5061                 pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
5062 
5063         ip->i_nextrio = (io_off + io_len + PAGESIZE - 1) & PAGEMASK;
5064 
5065         bp = pageio_setup(pp, io_len, ip->i_devvp, B_READ | B_ASYNC);
5066         bp->b_edev = ip->i_dev;
5067         bp->b_dev = cmpdev(ip->i_dev);
5068         bp->b_blkno = bn;
5069         bp->b_un.b_addr = (caddr_t)0;
5070         bp->b_file = ip->i_vnode;
5071         bp->b_offset = off;
5072 
5073         if (ufsvfsp->vfs_log) {
5074                 lufs_read_strategy(ufsvfsp->vfs_log, bp);
5075         } else if (ufsvfsp->vfs_snapshot) {
5076                 fssnap_strategy(&ufsvfsp->vfs_snapshot, bp);
5077         } else {
5078                 ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
5079                 ub.ub_getras.value.ul++;
5080                 (void) bdev_strategy(bp);
5081                 lwp_stat_update(LWP_STAT_INBLK, 1);
5082         }
5083 
5084         return (io_len);
5085 }
5086 
5087 int     ufs_delay = 1;
5088 /*
5089  * Flags are composed of {B_INVAL, B_FREE, B_DONTNEED, B_FORCE, B_ASYNC}
5090  *
5091  * LMXXX - the inode really ought to contain a pointer to one of these
5092  * async args.  Stuff gunk in there and just hand the whole mess off.
5093  * This would replace i_delaylen, i_delayoff.
5094  */
5095 /*ARGSUSED*/
5096 static int
5097 ufs_putpage(struct vnode *vp, offset_t off, size_t len, int flags,
5098         struct cred *cr, caller_context_t *ct)
5099 {
5100         struct inode *ip = VTOI(vp);
5101         int err = 0;
5102 
5103         if (vp->v_count == 0) {
5104                 return (ufs_fault(vp, "ufs_putpage: bad v_count == 0"));
5105         }
5106 
5107         /*
5108          * XXX - Why should this check be made here?
5109          */
5110         if (vp->v_flag & VNOMAP) {
5111                 err = ENOSYS;
5112                 goto errout;
5113         }
5114 
5115         if (ip->i_ufsvfs == NULL) {
5116                 err = EIO;
5117                 goto errout;
5118         }
5119 
5120         if (flags & B_ASYNC) {
5121                 if (ufs_delay && len &&
5122                     (flags & ~(B_ASYNC|B_DONTNEED|B_FREE)) == 0) {
5123                         mutex_enter(&ip->i_tlock);
5124                         /*
5125                          * If nobody stalled, start a new cluster.
5126                          */
5127                         if (ip->i_delaylen == 0) {
5128                                 ip->i_delayoff = off;
5129                                 ip->i_delaylen = len;
5130                                 mutex_exit(&ip->i_tlock);
5131                                 goto errout;
5132                         }
5133                         /*
5134                          * If we have a full cluster or they are not contig,
5135                          * then push last cluster and start over.
5136                          */
5137                         if (ip->i_delaylen >= CLUSTSZ(ip) ||
5138                             ip->i_delayoff + ip->i_delaylen != off) {
5139                                 u_offset_t doff;
5140                                 size_t dlen;
5141 
5142                                 doff = ip->i_delayoff;
5143                                 dlen = ip->i_delaylen;
5144                                 ip->i_delayoff = off;
5145                                 ip->i_delaylen = len;
5146                                 mutex_exit(&ip->i_tlock);
5147                                 err = ufs_putpages(vp, doff, dlen,
5148                                     flags, cr);
5149                                 /* LMXXX - flags are new val, not old */
5150                                 goto errout;
5151                         }
5152                         /*
5153                          * There is something there, it's not full, and
5154                          * it is contig.
5155                          */
5156                         ip->i_delaylen += len;
5157                         mutex_exit(&ip->i_tlock);
5158                         goto errout;
5159                 }
5160                 /*
5161                  * Must have weird flags or we are not clustering.
5162                  */
5163         }
5164 
5165         err = ufs_putpages(vp, off, len, flags, cr);
5166 
5167 errout:
5168         return (err);
5169 }
5170 
5171 /*
5172  * If len == 0, do from off to EOF.
5173  *
5174  * The normal cases should be len == 0 & off == 0 (entire vp list),
5175  * len == MAXBSIZE (from segmap_release actions), and len == PAGESIZE
5176  * (from pageout).
5177  */
5178 /*ARGSUSED*/
5179 static int
5180 ufs_putpages(
5181         struct vnode *vp,
5182         offset_t off,
5183         size_t len,
5184         int flags,
5185         struct cred *cr)
5186 {
5187         u_offset_t io_off;
5188         u_offset_t eoff;
5189         struct inode *ip = VTOI(vp);
5190         page_t *pp;
5191         size_t io_len;
5192         int err = 0;
5193         int dolock;
5194 
5195         if (vp->v_count == 0)
5196                 return (ufs_fault(vp, "ufs_putpages: v_count == 0"));
5197         /*
5198          * Acquire the readers/write inode lock before locking
5199          * any pages in this inode.
5200          * The inode lock is held during i/o.
5201          */
5202         if (len == 0) {
5203                 mutex_enter(&ip->i_tlock);
5204                 ip->i_delayoff = ip->i_delaylen = 0;
5205                 mutex_exit(&ip->i_tlock);
5206         }
5207         dolock = (rw_owner(&ip->i_contents) != curthread);
5208         if (dolock) {
5209                 /*
5210                  * Must synchronize this thread and any possible thread
5211                  * operating in the window of vulnerability in wrip().
5212                  * It is dangerous to allow both a thread doing a putpage
5213                  * and a thread writing, so serialize them.  The exception
5214                  * is when the thread in wrip() does something which causes
5215                  * a putpage operation.  Then, the thread must be allowed
5216                  * to continue.  It may encounter a bmap_read problem in
5217                  * ufs_putapage, but that is handled in ufs_putapage.
5218                  * Allow async writers to proceed, we don't want to block
5219                  * the pageout daemon.
5220                  */
5221                 if (ip->i_writer == curthread)
5222                         rw_enter(&ip->i_contents, RW_READER);
5223                 else {
5224                         for (;;) {
5225                                 rw_enter(&ip->i_contents, RW_READER);
5226                                 mutex_enter(&ip->i_tlock);
5227                                 /*
5228                                  * If there is no thread in the critical
5229                                  * section of wrip(), then proceed.
5230                                  * Otherwise, wait until there isn't one.
5231                                  */
5232                                 if (ip->i_writer == NULL) {
5233                                         mutex_exit(&ip->i_tlock);
5234                                         break;
5235                                 }
5236                                 rw_exit(&ip->i_contents);
5237                                 /*
5238                                  * Bounce async writers when we have a writer
5239                                  * working on this file so we don't deadlock
5240                                  * the pageout daemon.
5241                                  */
5242                                 if (flags & B_ASYNC) {
5243                                         mutex_exit(&ip->i_tlock);
5244                                         return (0);
5245                                 }
5246                                 cv_wait(&ip->i_wrcv, &ip->i_tlock);
5247                                 mutex_exit(&ip->i_tlock);
5248                         }
5249                 }
5250         }
5251 
5252         if (!vn_has_cached_data(vp)) {
5253                 if (dolock)
5254                         rw_exit(&ip->i_contents);
5255                 return (0);
5256         }
5257 
5258         if (len == 0) {
5259                 /*
5260                  * Search the entire vp list for pages >= off.
5261                  */
5262                 err = pvn_vplist_dirty(vp, (u_offset_t)off, ufs_putapage,
5263                     flags, cr);
5264         } else {
5265                 /*
5266                  * Loop over all offsets in the range looking for
5267                  * pages to deal with.
5268                  */
5269                 if ((eoff = blkroundup(ip->i_fs, ip->i_size)) != 0)
5270                         eoff = MIN(off + len, eoff);
5271                 else
5272                         eoff = off + len;
5273 
5274                 for (io_off = off; io_off < eoff; io_off += io_len) {
5275                         /*
5276                          * If we are not invalidating, synchronously
5277                          * freeing or writing pages, use the routine
5278                          * page_lookup_nowait() to prevent reclaiming
5279                          * them from the free list.
5280                          */
5281                         if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
5282                                 pp = page_lookup(vp, io_off,
5283                                     (flags & (B_INVAL | B_FREE)) ?
5284                                     SE_EXCL : SE_SHARED);
5285                         } else {
5286                                 pp = page_lookup_nowait(vp, io_off,
5287                                     (flags & B_FREE) ? SE_EXCL : SE_SHARED);
5288                         }
5289 
5290                         if (pp == NULL || pvn_getdirty(pp, flags) == 0)
5291                                 io_len = PAGESIZE;
5292                         else {
5293                                 u_offset_t *io_offp = &io_off;
5294 
5295                                 err = ufs_putapage(vp, pp, io_offp, &io_len,
5296                                     flags, cr);
5297                                 if (err != 0)
5298                                         break;
5299                                 /*
5300                                  * "io_off" and "io_len" are returned as
5301                                  * the range of pages we actually wrote.
5302                                  * This allows us to skip ahead more quickly
5303                                  * since several pages may've been dealt
5304                                  * with by this iteration of the loop.
5305                                  */
5306                         }
5307                 }
5308         }
5309         if (err == 0 && off == 0 && (len == 0 || len >= ip->i_size)) {
5310                 /*
5311                  * We have just sync'ed back all the pages on
5312                  * the inode, turn off the IMODTIME flag.
5313                  */
5314                 mutex_enter(&ip->i_tlock);
5315                 ip->i_flag &= ~IMODTIME;
5316                 mutex_exit(&ip->i_tlock);
5317         }
5318         if (dolock)
5319                 rw_exit(&ip->i_contents);
5320         return (err);
5321 }
5322 
5323 static void
5324 ufs_iodone(buf_t *bp)
5325 {
5326         struct inode *ip;
5327 
5328         ASSERT((bp->b_pages->p_vnode != NULL) && !(bp->b_flags & B_READ));
5329 
5330         bp->b_iodone = NULL;
5331 
5332         ip = VTOI(bp->b_pages->p_vnode);
5333 
5334         mutex_enter(&ip->i_tlock);
5335         if (ip->i_writes >= ufs_LW) {
5336                 if ((ip->i_writes -= bp->b_bcount) <= ufs_LW)
5337                         if (ufs_WRITES)
5338                                 cv_broadcast(&ip->i_wrcv); /* wake all up */
5339         } else {
5340                 ip->i_writes -= bp->b_bcount;
5341         }
5342 
5343         mutex_exit(&ip->i_tlock);
5344         iodone(bp);
5345 }
5346 
5347 /*
5348  * Write out a single page, possibly klustering adjacent
5349  * dirty pages.  The inode lock must be held.
5350  *
5351  * LMXXX - bsize < pagesize not done.
5352  */
5353 /*ARGSUSED*/
5354 int
5355 ufs_putapage(
5356         struct vnode *vp,
5357         page_t *pp,
5358         u_offset_t *offp,
5359         size_t *lenp,           /* return values */
5360         int flags,
5361         struct cred *cr)
5362 {
5363         u_offset_t io_off;
5364         u_offset_t off;
5365         struct inode *ip = VTOI(vp);
5366         struct ufsvfs *ufsvfsp = ip->i_ufsvfs;
5367         struct fs *fs;
5368         struct buf *bp;
5369         size_t io_len;
5370         daddr_t bn;
5371         int err;
5372         int contig;
5373         int dotrans;
5374 
5375         ASSERT(RW_LOCK_HELD(&ip->i_contents));
5376 
5377         if (ufsvfsp == NULL) {
5378                 err = EIO;
5379                 goto out_trace;
5380         }
5381 
5382         fs = ip->i_fs;
5383         ASSERT(fs->fs_ronly == 0);
5384 
5385         /*
5386          * If the modified time on the inode has not already been
5387          * set elsewhere (e.g. for write/setattr) we set the time now.
5388          * This gives us approximate modified times for mmap'ed files
5389          * which are modified via stores in the user address space.
5390          */
5391         if ((ip->i_flag & IMODTIME) == 0) {
5392                 mutex_enter(&ip->i_tlock);
5393                 ip->i_flag |= IUPD;
5394                 ip->i_seq++;
5395                 ITIMES_NOLOCK(ip);
5396                 mutex_exit(&ip->i_tlock);
5397         }
5398 
5399         /*
5400          * Align the request to a block boundry (for old file systems),
5401          * and go ask bmap() how contiguous things are for this file.
5402          */
5403         off = pp->p_offset & (offset_t)fs->fs_bmask;  /* block align it */
5404         contig = 0;
5405         err = bmap_read(ip, off, &bn, &contig);
5406         if (err)
5407                 goto out;
5408         if (bn == UFS_HOLE) {                   /* putpage never allocates */
5409                 /*
5410                  * logging device is in error mode; simply return EIO
5411                  */
5412                 if (TRANS_ISERROR(ufsvfsp)) {
5413                         err = EIO;
5414                         goto out;
5415                 }
5416                 /*
5417                  * Oops, the thread in the window in wrip() did some
5418                  * sort of operation which caused a putpage in the bad
5419                  * range.  In this case, just return an error which will
5420                  * cause the software modified bit on the page to set
5421                  * and the page will get written out again later.
5422                  */
5423                 if (ip->i_writer == curthread) {
5424                         err = EIO;
5425                         goto out;
5426                 }
5427                 /*
5428                  * If the pager is trying to push a page in the bad range
5429                  * just tell him to try again later when things are better.
5430                  */
5431                 if (flags & B_ASYNC) {
5432                         err = EAGAIN;
5433                         goto out;
5434                 }
5435                 err = ufs_fault(ITOV(ip), "ufs_putapage: bn == UFS_HOLE");
5436                 goto out;
5437         }
5438 
5439         /*
5440          * If it is an fallocate'd block, reverse the negativity since
5441          * we are now writing to it
5442          */
5443         if (ISFALLOCBLK(ip, bn)) {
5444                 err = bmap_set_bn(vp, off, dbtofsb(fs, -bn));
5445                 if (err)
5446                         goto out;
5447 
5448                 bn = -bn;
5449         }
5450 
5451         /*
5452          * Take the length (of contiguous bytes) passed back from bmap()
5453          * and _try_ and get a set of pages covering that extent.
5454          */
5455         pp = pvn_write_kluster(vp, pp, &io_off, &io_len, off, contig, flags);
5456 
5457         /*
5458          * May have run out of memory and not clustered backwards.
5459          * off          p_offset
5460          * [  pp - 1  ][   pp   ]
5461          * [    block           ]
5462          * We told bmap off, so we have to adjust the bn accordingly.
5463          */
5464         if (io_off > off) {
5465                 bn += btod(io_off - off);
5466                 contig -= (io_off - off);
5467         }
5468 
5469         /*
5470          * bmap was carefull to tell us the right size so use that.
5471          * There might be unallocated frags at the end.
5472          * LMXXX - bzero the end of the page?  We must be writing after EOF.
5473          */
5474         if (io_len > contig) {
5475                 ASSERT(io_len - contig < fs->fs_bsize);
5476                 io_len -= (io_len - contig);
5477         }
5478 
5479         /*
5480          * Handle the case where we are writing the last page after EOF.
5481          *
5482          * XXX - just a patch for i-mt3.
5483          */
5484         if (io_len == 0) {
5485                 ASSERT(pp->p_offset >=
5486                     (u_offset_t)(roundup(ip->i_size, PAGESIZE)));
5487                 io_len = PAGESIZE;
5488         }
5489 
5490         bp = pageio_setup(pp, io_len, ip->i_devvp, B_WRITE | flags);
5491 
5492         ULOCKFS_SET_MOD(ITOUL(ip));
5493 
5494         bp->b_edev = ip->i_dev;
5495         bp->b_dev = cmpdev(ip->i_dev);
5496         bp->b_blkno = bn;
5497         bp->b_un.b_addr = (caddr_t)0;
5498         bp->b_file = ip->i_vnode;
5499 
5500         /*
5501          * File contents of shadow or quota inodes are metadata, and updates
5502          * to these need to be put into a logging transaction. All direct
5503          * callers in UFS do that, but fsflush can come here _before_ the
5504          * normal codepath. An example would be updating ACL information, for
5505          * which the normal codepath would be:
5506          *      ufs_si_store()
5507          *      ufs_rdwri()
5508          *      wrip()
5509          *      segmap_release()
5510          *      VOP_PUTPAGE()
5511          * Here, fsflush can pick up the dirty page before segmap_release()
5512          * forces it out. If that happens, there's no transaction.
5513          * We therefore need to test whether a transaction exists, and if not
5514          * create one - for fsflush.
5515          */
5516         dotrans =
5517             (((ip->i_mode & IFMT) == IFSHAD || ufsvfsp->vfs_qinod == ip) &&
5518             ((curthread->t_flag & T_DONTBLOCK) == 0) &&
5519             (TRANS_ISTRANS(ufsvfsp)));
5520 
5521         if (dotrans) {
5522                 curthread->t_flag |= T_DONTBLOCK;
5523                 TRANS_BEGIN_ASYNC(ufsvfsp, TOP_PUTPAGE, TOP_PUTPAGE_SIZE(ip));
5524         }
5525         if (TRANS_ISTRANS(ufsvfsp)) {
5526                 if ((ip->i_mode & IFMT) == IFSHAD) {
5527                         TRANS_BUF(ufsvfsp, 0, io_len, bp, DT_SHAD);
5528                 } else if (ufsvfsp->vfs_qinod == ip) {
5529                         TRANS_DELTA(ufsvfsp, ldbtob(bn), bp->b_bcount, DT_QR,
5530                             0, 0);
5531                 }
5532         }
5533         if (dotrans) {
5534                 TRANS_END_ASYNC(ufsvfsp, TOP_PUTPAGE, TOP_PUTPAGE_SIZE(ip));
5535                 curthread->t_flag &= ~T_DONTBLOCK;
5536         }
5537 
5538         /* write throttle */
5539 
5540         ASSERT(bp->b_iodone == NULL);
5541         bp->b_iodone = (int (*)())ufs_iodone;
5542         mutex_enter(&ip->i_tlock);
5543         ip->i_writes += bp->b_bcount;
5544         mutex_exit(&ip->i_tlock);
5545 
5546         if (bp->b_flags & B_ASYNC) {
5547                 if (ufsvfsp->vfs_log) {
5548                         lufs_write_strategy(ufsvfsp->vfs_log, bp);
5549                 } else if (ufsvfsp->vfs_snapshot) {
5550                         fssnap_strategy(&ufsvfsp->vfs_snapshot, bp);
5551                 } else {
5552                         ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
5553                         ub.ub_putasyncs.value.ul++;
5554                         (void) bdev_strategy(bp);
5555                         lwp_stat_update(LWP_STAT_OUBLK, 1);
5556                 }
5557         } else {
5558                 if (ufsvfsp->vfs_log) {
5559                         lufs_write_strategy(ufsvfsp->vfs_log, bp);
5560                 } else if (ufsvfsp->vfs_snapshot) {
5561                         fssnap_strategy(&ufsvfsp->vfs_snapshot, bp);
5562                 } else {
5563                         ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
5564                         ub.ub_putsyncs.value.ul++;
5565                         (void) bdev_strategy(bp);
5566                         lwp_stat_update(LWP_STAT_OUBLK, 1);
5567                 }
5568                 err = biowait(bp);
5569                 pageio_done(bp);
5570                 pvn_write_done(pp, ((err) ? B_ERROR : 0) | B_WRITE | flags);
5571         }
5572 
5573         pp = NULL;
5574 
5575 out:
5576         if (err != 0 && pp != NULL)
5577                 pvn_write_done(pp, B_ERROR | B_WRITE | flags);
5578 
5579         if (offp)
5580                 *offp = io_off;
5581         if (lenp)
5582                 *lenp = io_len;
5583 out_trace:
5584         return (err);
5585 }
5586 
5587 uint64_t ufs_map_alock_retry_cnt;
5588 uint64_t ufs_map_lockfs_retry_cnt;
5589 
5590 /* ARGSUSED */
5591 static int
5592 ufs_map(struct vnode *vp,
5593         offset_t off,
5594         struct as *as,
5595         caddr_t *addrp,
5596         size_t len,
5597         uchar_t prot,
5598         uchar_t maxprot,
5599         uint_t flags,
5600         struct cred *cr,
5601         caller_context_t *ct)
5602 {
5603         struct segvn_crargs vn_a;
5604         struct ufsvfs *ufsvfsp = VTOI(vp)->i_ufsvfs;
5605         struct ulockfs *ulp;
5606         int error, sig;
5607         k_sigset_t smask;
5608         caddr_t hint = *addrp;
5609 
5610         if (vp->v_flag & VNOMAP) {
5611                 error = ENOSYS;
5612                 goto out;
5613         }
5614 
5615         if (off < (offset_t)0 || (offset_t)(off + len) < (offset_t)0) {
5616                 error = ENXIO;
5617                 goto out;
5618         }
5619 
5620         if (vp->v_type != VREG) {
5621                 error = ENODEV;
5622                 goto out;
5623         }
5624 
5625 retry_map:
5626         *addrp = hint;
5627         /*
5628          * If file is being locked, disallow mapping.
5629          */
5630         if (vn_has_mandatory_locks(vp, VTOI(vp)->i_mode)) {
5631                 error = EAGAIN;
5632                 goto out;
5633         }
5634 
5635         as_rangelock(as);
5636         /*
5637          * Note that if we are retrying (because ufs_lockfs_trybegin failed in
5638          * the previous attempt), some other thread could have grabbed
5639          * the same VA range if MAP_FIXED is set. In that case, choose_addr
5640          * would unmap the valid VA range, that is ok.
5641          */
5642         error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
5643         if (error != 0) {
5644                 as_rangeunlock(as);
5645                 goto out;
5646         }
5647 
5648         /*
5649          * a_lock has to be acquired before entering the lockfs protocol
5650          * because that is the order in which pagefault works. Also we cannot
5651          * block on a_lock here because this waiting writer will prevent
5652          * further readers like ufs_read from progressing and could cause
5653          * deadlock between ufs_read/ufs_map/pagefault when a quiesce is
5654          * pending.
5655          */
5656         while (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_WRITER)) {
5657                 ufs_map_alock_retry_cnt++;
5658                 delay(RETRY_LOCK_DELAY);
5659         }
5660 
5661         /*
5662          * We can't hold as->a_lock and wait for lockfs to succeed because
5663          * the proc tools might hang on a_lock, so call ufs_lockfs_trybegin()
5664          * instead.
5665          */
5666         if (error = ufs_lockfs_trybegin(ufsvfsp, &ulp, ULOCKFS_MAP_MASK)) {
5667                 /*
5668                  * ufs_lockfs_trybegin() did not succeed. It is safer to give up
5669                  * as->a_lock and wait for ulp->ul_fs_lock status to change.
5670                  */
5671                 ufs_map_lockfs_retry_cnt++;
5672                 AS_LOCK_EXIT(as, &as->a_lock);
5673                 as_rangeunlock(as);
5674                 if (error == EIO)
5675                         goto out;
5676 
5677                 mutex_enter(&ulp->ul_lock);
5678                 while (ulp->ul_fs_lock & ULOCKFS_MAP_MASK) {
5679                         if (ULOCKFS_IS_SLOCK(ulp) || ufsvfsp->vfs_nointr) {
5680                                 cv_wait(&ulp->ul_cv, &ulp->ul_lock);
5681                         } else {
5682                                 sigintr(&smask, 1);
5683                                 sig = cv_wait_sig(&ulp->ul_cv, &ulp->ul_lock);
5684                                 sigunintr(&smask);
5685                                 if (((ulp->ul_fs_lock & ULOCKFS_MAP_MASK) &&
5686                                     !sig) || ufsvfsp->vfs_dontblock) {
5687                                         mutex_exit(&ulp->ul_lock);
5688                                         return (EINTR);
5689                                 }
5690                         }
5691                 }
5692                 mutex_exit(&ulp->ul_lock);
5693                 goto retry_map;
5694         }
5695 
5696         vn_a.vp = vp;
5697         vn_a.offset = (u_offset_t)off;
5698         vn_a.type = flags & MAP_TYPE;
5699         vn_a.prot = prot;
5700         vn_a.maxprot = maxprot;
5701         vn_a.cred = cr;
5702         vn_a.amp = NULL;
5703         vn_a.flags = flags & ~MAP_TYPE;
5704         vn_a.szc = 0;
5705         vn_a.lgrp_mem_policy_flags = 0;
5706 
5707         error = as_map_locked(as, *addrp, len, segvn_create, &vn_a);
5708         if (ulp)
5709                 ufs_lockfs_end(ulp);
5710         as_rangeunlock(as);
5711 out:
5712         return (error);
5713 }
5714 
5715 /* ARGSUSED */
5716 static int
5717 ufs_addmap(struct vnode *vp,
5718         offset_t off,
5719         struct as *as,
5720         caddr_t addr,
5721         size_t  len,
5722         uchar_t  prot,
5723         uchar_t  maxprot,
5724         uint_t    flags,
5725         struct cred *cr,
5726         caller_context_t *ct)
5727 {
5728         struct inode *ip = VTOI(vp);
5729 
5730         if (vp->v_flag & VNOMAP) {
5731                 return (ENOSYS);
5732         }
5733 
5734         mutex_enter(&ip->i_tlock);
5735         ip->i_mapcnt += btopr(len);
5736         mutex_exit(&ip->i_tlock);
5737         return (0);
5738 }
5739 
5740 /*ARGSUSED*/
5741 static int
5742 ufs_delmap(struct vnode *vp, offset_t off, struct as *as, caddr_t addr,
5743         size_t len, uint_t prot,  uint_t maxprot,  uint_t flags,
5744         struct cred *cr, caller_context_t *ct)
5745 {
5746         struct inode *ip = VTOI(vp);
5747 
5748         if (vp->v_flag & VNOMAP) {
5749                 return (ENOSYS);
5750         }
5751 
5752         mutex_enter(&ip->i_tlock);
5753         ip->i_mapcnt -= btopr(len);  /* Count released mappings */
5754         ASSERT(ip->i_mapcnt >= 0);
5755         mutex_exit(&ip->i_tlock);
5756         return (0);
5757 }
5758 /*
5759  * Return the answer requested to poll() for non-device files
5760  */
5761 struct pollhead ufs_pollhd;
5762 
5763 /* ARGSUSED */
5764 int
5765 ufs_poll(vnode_t *vp, short ev, int any, short *revp, struct pollhead **phpp,
5766         caller_context_t *ct)
5767 {
5768         struct ufsvfs   *ufsvfsp;
5769 
5770         *revp = 0;
5771         ufsvfsp = VTOI(vp)->i_ufsvfs;
5772 
5773         if (!ufsvfsp) {
5774                 *revp = POLLHUP;
5775                 goto out;
5776         }
5777 
5778         if (ULOCKFS_IS_HLOCK(&ufsvfsp->vfs_ulockfs) ||
5779             ULOCKFS_IS_ELOCK(&ufsvfsp->vfs_ulockfs)) {
5780                 *revp |= POLLERR;
5781 
5782         } else {
5783                 if ((ev & POLLOUT) && !ufsvfsp->vfs_fs->fs_ronly &&
5784                     !ULOCKFS_IS_WLOCK(&ufsvfsp->vfs_ulockfs))
5785                         *revp |= POLLOUT;
5786 
5787                 if ((ev & POLLWRBAND) && !ufsvfsp->vfs_fs->fs_ronly &&
5788                     !ULOCKFS_IS_WLOCK(&ufsvfsp->vfs_ulockfs))
5789                         *revp |= POLLWRBAND;
5790 
5791                 if (ev & POLLIN)
5792                         *revp |= POLLIN;
5793 
5794                 if (ev & POLLRDNORM)
5795                         *revp |= POLLRDNORM;
5796 
5797                 if (ev & POLLRDBAND)
5798                         *revp |= POLLRDBAND;
5799         }
5800 
5801         if ((ev & POLLPRI) && (*revp & (POLLERR|POLLHUP)))
5802                 *revp |= POLLPRI;
5803 out:
5804         *phpp = !any && !*revp ? &ufs_pollhd : (struct pollhead *)NULL;
5805 
5806         return (0);
5807 }
5808 
5809 /* ARGSUSED */
5810 static int
5811 ufs_l_pathconf(struct vnode *vp, int cmd, ulong_t *valp, struct cred *cr,
5812         caller_context_t *ct)
5813 {
5814         struct ufsvfs   *ufsvfsp = VTOI(vp)->i_ufsvfs;
5815         struct ulockfs  *ulp = NULL;
5816         struct inode    *sip = NULL;
5817         int             error;
5818         struct inode    *ip = VTOI(vp);
5819         int             issync;
5820 
5821         error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_PATHCONF_MASK);
5822         if (error)
5823                 return (error);
5824 
5825         switch (cmd) {
5826                 /*
5827                  * Have to handle _PC_NAME_MAX here, because the normal way
5828                  * [fs_pathconf() -> VOP_STATVFS() -> ufs_statvfs()]
5829                  * results in a lock ordering reversal between
5830                  * ufs_lockfs_{begin,end}() and
5831                  * ufs_thread_{suspend,continue}().
5832                  *
5833                  * Keep in sync with ufs_statvfs().
5834                  */
5835         case _PC_NAME_MAX:
5836                 *valp = MAXNAMLEN;
5837                 break;
5838 
5839         case _PC_FILESIZEBITS:
5840                 if (ufsvfsp->vfs_lfflags & UFS_LARGEFILES)
5841                         *valp = UFS_FILESIZE_BITS;
5842                 else
5843                         *valp = 32;
5844                 break;
5845 
5846         case _PC_XATTR_EXISTS:
5847                 if (vp->v_vfsp->vfs_flag & VFS_XATTR) {
5848 
5849                         error =
5850                             ufs_xattr_getattrdir(vp, &sip, LOOKUP_XATTR, cr);
5851                         if (error ==  0 && sip != NULL) {
5852                                 /* Start transaction */
5853                                 if (ulp) {
5854                                         TRANS_BEGIN_CSYNC(ufsvfsp, issync,
5855                                             TOP_RMDIR, TOP_RMDIR_SIZE);
5856                                 }
5857                                 /*
5858                                  * Is directory empty
5859                                  */
5860                                 rw_enter(&sip->i_rwlock, RW_WRITER);
5861                                 rw_enter(&sip->i_contents, RW_WRITER);
5862                                 if (ufs_xattrdirempty(sip,
5863                                     sip->i_number, CRED())) {
5864                                         rw_enter(&ip->i_contents, RW_WRITER);
5865                                         ufs_unhook_shadow(ip, sip);
5866                                         rw_exit(&ip->i_contents);
5867 
5868                                         *valp = 0;
5869 
5870                                 } else
5871                                         *valp = 1;
5872                                 rw_exit(&sip->i_contents);
5873                                 rw_exit(&sip->i_rwlock);
5874                                 if (ulp) {
5875                                         TRANS_END_CSYNC(ufsvfsp, error, issync,
5876                                             TOP_RMDIR, TOP_RMDIR_SIZE);
5877                                 }
5878                                 VN_RELE(ITOV(sip));
5879                         } else if (error == ENOENT) {
5880                                 *valp = 0;
5881                                 error = 0;
5882                         }
5883                 } else {
5884                         error = fs_pathconf(vp, cmd, valp, cr, ct);
5885                 }
5886                 break;
5887 
5888         case _PC_ACL_ENABLED:
5889                 *valp = _ACL_ACLENT_ENABLED;
5890                 break;
5891 
5892         case _PC_MIN_HOLE_SIZE:
5893                 *valp = (ulong_t)ip->i_fs->fs_bsize;
5894                 break;
5895 
5896         case _PC_SATTR_ENABLED:
5897         case _PC_SATTR_EXISTS:
5898                 *valp = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
5899                     (vp->v_type == VREG || vp->v_type == VDIR);
5900                 break;
5901 
5902         case _PC_TIMESTAMP_RESOLUTION:
5903                 /*
5904                  * UFS keeps only microsecond timestamp resolution.
5905                  * This is historical and will probably never change.
5906                  */
5907                 *valp = 1000L;
5908                 break;
5909 
5910         default:
5911                 error = fs_pathconf(vp, cmd, valp, cr, ct);
5912                 break;
5913         }
5914 
5915         if (ulp != NULL) {
5916                 ufs_lockfs_end(ulp);
5917         }
5918         return (error);
5919 }
5920 
5921 int ufs_pageio_writes, ufs_pageio_reads;
5922 
5923 /*ARGSUSED*/
5924 static int
5925 ufs_pageio(struct vnode *vp, page_t *pp, u_offset_t io_off, size_t io_len,
5926         int flags, struct cred *cr, caller_context_t *ct)
5927 {
5928         struct inode *ip = VTOI(vp);
5929         struct ufsvfs *ufsvfsp;
5930         page_t *npp = NULL, *opp = NULL, *cpp = pp;
5931         struct buf *bp;
5932         daddr_t bn;
5933         size_t done_len = 0, cur_len = 0;
5934         int err = 0;
5935         int contig = 0;
5936         int dolock;
5937         int vmpss = 0;
5938         struct ulockfs *ulp;
5939 
5940         if ((flags & B_READ) && pp != NULL && pp->p_vnode == vp &&
5941             vp->v_mpssdata != NULL) {
5942                 vmpss = 1;
5943         }
5944 
5945         dolock = (rw_owner(&ip->i_contents) != curthread);
5946         /*
5947          * We need a better check.  Ideally, we would use another
5948          * vnodeops so that hlocked and forcibly unmounted file
5949          * systems would return EIO where appropriate and w/o the
5950          * need for these checks.
5951          */
5952         if ((ufsvfsp = ip->i_ufsvfs) == NULL)
5953                 return (EIO);
5954 
5955         /*
5956          * For vmpss (pp can be NULL) case respect the quiesce protocol.
5957          * ul_lock must be taken before locking pages so we can't use it here
5958          * if pp is non NULL because segvn already locked pages
5959          * SE_EXCL. Instead we rely on the fact that a forced umount or
5960          * applying a filesystem lock via ufs_fiolfs() will block in the
5961          * implicit call to ufs_flush() until we unlock the pages after the
5962          * return to segvn. Other ufs_quiesce() callers keep ufs_quiesce_pend
5963          * above 0 until they are done. We have to be careful not to increment
5964          * ul_vnops_cnt here after forceful unmount hlocks the file system.
5965          *
5966          * If pp is NULL use ul_lock to make sure we don't increment
5967          * ul_vnops_cnt after forceful unmount hlocks the file system.
5968          */
5969         if (vmpss || pp == NULL) {
5970                 ulp = &ufsvfsp->vfs_ulockfs;
5971                 if (pp == NULL)
5972                         mutex_enter(&ulp->ul_lock);
5973                 if (ulp->ul_fs_lock & ULOCKFS_GETREAD_MASK) {
5974                         if (pp == NULL) {
5975                                 mutex_exit(&ulp->ul_lock);
5976                         }
5977                         return (vmpss ? EIO : EINVAL);
5978                 }
5979                 atomic_inc_ulong(&ulp->ul_vnops_cnt);
5980                 if (pp == NULL)
5981                         mutex_exit(&ulp->ul_lock);
5982                 if (ufs_quiesce_pend) {
5983                         if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
5984                                 cv_broadcast(&ulp->ul_cv);
5985                         return (vmpss ? EIO : EINVAL);
5986                 }
5987         }
5988 
5989         if (dolock) {
5990                 /*
5991                  * segvn may call VOP_PAGEIO() instead of VOP_GETPAGE() to
5992                  * handle a fault against a segment that maps vnode pages with
5993                  * large mappings.  Segvn creates pages and holds them locked
5994                  * SE_EXCL during VOP_PAGEIO() call. In this case we have to
5995                  * use rw_tryenter() to avoid a potential deadlock since in
5996                  * lock order i_contents needs to be taken first.
5997                  * Segvn will retry via VOP_GETPAGE() if VOP_PAGEIO() fails.
5998                  */
5999                 if (!vmpss) {
6000                         rw_enter(&ip->i_contents, RW_READER);
6001                 } else if (!rw_tryenter(&ip->i_contents, RW_READER)) {
6002                         if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6003                                 cv_broadcast(&ulp->ul_cv);
6004                         return (EDEADLK);
6005                 }
6006         }
6007 
6008         /*
6009          * Return an error to segvn because the pagefault request is beyond
6010          * PAGESIZE rounded EOF.
6011          */
6012         if (vmpss && btopr(io_off + io_len) > btopr(ip->i_size)) {
6013                 if (dolock)
6014                         rw_exit(&ip->i_contents);
6015                 if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6016                         cv_broadcast(&ulp->ul_cv);
6017                 return (EFAULT);
6018         }
6019 
6020         if (pp == NULL) {
6021                 if (bmap_has_holes(ip)) {
6022                         err = ENOSYS;
6023                 } else {
6024                         err = EINVAL;
6025                 }
6026                 if (dolock)
6027                         rw_exit(&ip->i_contents);
6028                 if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6029                         cv_broadcast(&ulp->ul_cv);
6030                 return (err);
6031         }
6032 
6033         /*
6034          * Break the io request into chunks, one for each contiguous
6035          * stretch of disk blocks in the target file.
6036          */
6037         while (done_len < io_len) {
6038                 ASSERT(cpp);
6039                 contig = 0;
6040                 if (err = bmap_read(ip, (u_offset_t)(io_off + done_len),
6041                     &bn, &contig))
6042                         break;
6043 
6044                 if (bn == UFS_HOLE) {   /* No holey swapfiles */
6045                         if (vmpss) {
6046                                 err = EFAULT;
6047                                 break;
6048                         }
6049                         err = ufs_fault(ITOV(ip), "ufs_pageio: bn == UFS_HOLE");
6050                         break;
6051                 }
6052 
6053                 cur_len = MIN(io_len - done_len, contig);
6054                 /*
6055                  * Zero out a page beyond EOF, when the last block of
6056                  * a file is a UFS fragment so that ufs_pageio() can be used
6057                  * instead of ufs_getpage() to handle faults against
6058                  * segvn segments that use large pages.
6059                  */
6060                 page_list_break(&cpp, &npp, btopr(cur_len));
6061                 if ((flags & B_READ) && (cur_len & PAGEOFFSET)) {
6062                         size_t xlen = cur_len & PAGEOFFSET;
6063                         pagezero(cpp->p_prev, xlen, PAGESIZE - xlen);
6064                 }
6065 
6066                 bp = pageio_setup(cpp, cur_len, ip->i_devvp, flags);
6067                 ASSERT(bp != NULL);
6068 
6069                 bp->b_edev = ip->i_dev;
6070                 bp->b_dev = cmpdev(ip->i_dev);
6071                 bp->b_blkno = bn;
6072                 bp->b_un.b_addr = (caddr_t)0;
6073                 bp->b_file = ip->i_vnode;
6074 
6075                 ufsvfsp->vfs_iotstamp = ddi_get_lbolt();
6076                 ub.ub_pageios.value.ul++;
6077                 if (ufsvfsp->vfs_snapshot)
6078                         fssnap_strategy(&(ufsvfsp->vfs_snapshot), bp);
6079                 else
6080                         (void) bdev_strategy(bp);
6081 
6082                 if (flags & B_READ)
6083                         ufs_pageio_reads++;
6084                 else
6085                         ufs_pageio_writes++;
6086                 if (flags & B_READ)
6087                         lwp_stat_update(LWP_STAT_INBLK, 1);
6088                 else
6089                         lwp_stat_update(LWP_STAT_OUBLK, 1);
6090                 /*
6091                  * If the request is not B_ASYNC, wait for i/o to complete
6092                  * and re-assemble the page list to return to the caller.
6093                  * If it is B_ASYNC we leave the page list in pieces and
6094                  * cleanup() will dispose of them.
6095                  */
6096                 if ((flags & B_ASYNC) == 0) {
6097                         err = biowait(bp);
6098                         pageio_done(bp);
6099                         if (err)
6100                                 break;
6101                         page_list_concat(&opp, &cpp);
6102                 }
6103                 cpp = npp;
6104                 npp = NULL;
6105                 if (flags & B_READ)
6106                         cur_len = P2ROUNDUP_TYPED(cur_len, PAGESIZE, size_t);
6107                 done_len += cur_len;
6108         }
6109         ASSERT(err || (cpp == NULL && npp == NULL && done_len == io_len));
6110         if (err) {
6111                 if (flags & B_ASYNC) {
6112                         /* Cleanup unprocessed parts of list */
6113                         page_list_concat(&cpp, &npp);
6114                         if (flags & B_READ)
6115                                 pvn_read_done(cpp, B_ERROR);
6116                         else
6117                                 pvn_write_done(cpp, B_ERROR);
6118                 } else {
6119                         /* Re-assemble list and let caller clean up */
6120                         page_list_concat(&opp, &cpp);
6121                         page_list_concat(&opp, &npp);
6122                 }
6123         }
6124 
6125         if (vmpss && !(ip->i_flag & IACC) && !ULOCKFS_IS_NOIACC(ulp) &&
6126             ufsvfsp->vfs_fs->fs_ronly == 0 && !ufsvfsp->vfs_noatime) {
6127                 mutex_enter(&ip->i_tlock);
6128                 ip->i_flag |= IACC;
6129                 ITIMES_NOLOCK(ip);
6130                 mutex_exit(&ip->i_tlock);
6131         }
6132 
6133         if (dolock)
6134                 rw_exit(&ip->i_contents);
6135         if (vmpss && !atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6136                 cv_broadcast(&ulp->ul_cv);
6137         return (err);
6138 }
6139 
6140 /*
6141  * Called when the kernel is in a frozen state to dump data
6142  * directly to the device. It uses a private dump data structure,
6143  * set up by dump_ctl, to locate the correct disk block to which to dump.
6144  */
6145 /*ARGSUSED*/
6146 static int
6147 ufs_dump(vnode_t *vp, caddr_t addr, offset_t ldbn, offset_t dblks,
6148     caller_context_t *ct)
6149 {
6150         u_offset_t      file_size;
6151         struct inode    *ip = VTOI(vp);
6152         struct fs       *fs = ip->i_fs;
6153         daddr_t         dbn, lfsbn;
6154         int             disk_blks = fs->fs_bsize >> DEV_BSHIFT;
6155         int             error = 0;
6156         int             ndbs, nfsbs;
6157 
6158         /*
6159          * forced unmount case
6160          */
6161         if (ip->i_ufsvfs == NULL)
6162                 return (EIO);
6163         /*
6164          * Validate the inode that it has not been modified since
6165          * the dump structure is allocated.
6166          */
6167         mutex_enter(&ip->i_tlock);
6168         if ((dump_info == NULL) ||
6169             (dump_info->ip != ip) ||
6170             (dump_info->time.tv_sec != ip->i_mtime.tv_sec) ||
6171             (dump_info->time.tv_usec != ip->i_mtime.tv_usec)) {
6172                 mutex_exit(&ip->i_tlock);
6173                 return (-1);
6174         }
6175         mutex_exit(&ip->i_tlock);
6176 
6177         /*
6178          * See that the file has room for this write
6179          */
6180         UFS_GET_ISIZE(&file_size, ip);
6181 
6182         if (ldbtob(ldbn + dblks) > file_size)
6183                 return (ENOSPC);
6184 
6185         /*
6186          * Find the physical disk block numbers from the dump
6187          * private data structure directly and write out the data
6188          * in contiguous block lumps
6189          */
6190         while (dblks > 0 && !error) {
6191                 lfsbn = (daddr_t)lblkno(fs, ldbtob(ldbn));
6192                 dbn = fsbtodb(fs, dump_info->dblk[lfsbn]) + ldbn % disk_blks;
6193                 nfsbs = 1;
6194                 ndbs = disk_blks - ldbn % disk_blks;
6195                 while (ndbs < dblks && fsbtodb(fs, dump_info->dblk[lfsbn +
6196                     nfsbs]) == dbn + ndbs) {
6197                         nfsbs++;
6198                         ndbs += disk_blks;
6199                 }
6200                 if (ndbs > dblks)
6201                         ndbs = dblks;
6202                 error = bdev_dump(ip->i_dev, addr, dbn, ndbs);
6203                 addr += ldbtob((offset_t)ndbs);
6204                 dblks -= ndbs;
6205                 ldbn += ndbs;
6206         }
6207         return (error);
6208 
6209 }
6210 
6211 /*
6212  * Prepare the file system before and after the dump operation.
6213  *
6214  * action = DUMP_ALLOC:
6215  * Preparation before dump, allocate dump private data structure
6216  * to hold all the direct and indirect block info for dump.
6217  *
6218  * action = DUMP_FREE:
6219  * Clean up after dump, deallocate the dump private data structure.
6220  *
6221  * action = DUMP_SCAN:
6222  * Scan dump_info for *blkp DEV_BSIZE blocks of contig fs space;
6223  * if found, the starting file-relative DEV_BSIZE lbn is written
6224  * to *bklp; that lbn is intended for use with VOP_DUMP()
6225  */
6226 /*ARGSUSED*/
6227 static int
6228 ufs_dumpctl(vnode_t *vp, int action, offset_t *blkp, caller_context_t *ct)
6229 {
6230         struct inode    *ip = VTOI(vp);
6231         ufsvfs_t        *ufsvfsp = ip->i_ufsvfs;
6232         struct fs       *fs;
6233         daddr32_t       *dblk, *storeblk;
6234         daddr32_t       *nextblk, *endblk;
6235         struct buf      *bp;
6236         int             i, entry, entries;
6237         int             n, ncontig;
6238 
6239         /*
6240          * check for forced unmount
6241          */
6242         if (ufsvfsp == NULL)
6243                 return (EIO);
6244 
6245         if (action == DUMP_ALLOC) {
6246                 /*
6247                  * alloc and record dump_info
6248                  */
6249                 if (dump_info != NULL)
6250                         return (EINVAL);
6251 
6252                 ASSERT(vp->v_type == VREG);
6253                 fs = ufsvfsp->vfs_fs;
6254 
6255                 rw_enter(&ip->i_contents, RW_READER);
6256 
6257                 if (bmap_has_holes(ip)) {
6258                         rw_exit(&ip->i_contents);
6259                         return (EFAULT);
6260                 }
6261 
6262                 /*
6263                  * calculate and allocate space needed according to i_size
6264                  */
6265                 entries = (int)lblkno(fs, blkroundup(fs, ip->i_size));
6266                 dump_info = kmem_alloc(sizeof (struct dump) +
6267                     (entries - 1) * sizeof (daddr32_t), KM_NOSLEEP);
6268                 if (dump_info == NULL) {
6269                         rw_exit(&ip->i_contents);
6270                         return (ENOMEM);
6271                 }
6272 
6273                 /* Start saving the info */
6274                 dump_info->fsbs = entries;
6275                 dump_info->ip = ip;
6276                 storeblk = &dump_info->dblk[0];
6277 
6278                 /* Direct Blocks */
6279                 for (entry = 0; entry < NDADDR && entry < entries; entry++)
6280                         *storeblk++ = ip->i_db[entry];
6281 
6282                 /* Indirect Blocks */
6283                 for (i = 0; i < NIADDR; i++) {
6284                         int error = 0;
6285 
6286                         bp = UFS_BREAD(ufsvfsp,
6287                             ip->i_dev, fsbtodb(fs, ip->i_ib[i]), fs->fs_bsize);
6288                         if (bp->b_flags & B_ERROR)
6289                                 error = EIO;
6290                         else {
6291                                 dblk = bp->b_un.b_daddr;
6292                                 if ((storeblk = save_dblks(ip, ufsvfsp,
6293                                     storeblk, dblk, i, entries)) == NULL)
6294                                         error = EIO;
6295                         }
6296 
6297                         brelse(bp);
6298 
6299                         if (error != 0) {
6300                                 kmem_free(dump_info, sizeof (struct dump) +
6301                                     (entries - 1) * sizeof (daddr32_t));
6302                                 rw_exit(&ip->i_contents);
6303                                 dump_info = NULL;
6304                                 return (error);
6305                         }
6306                 }
6307                 /* and time stamp the information */
6308                 mutex_enter(&ip->i_tlock);
6309                 dump_info->time = ip->i_mtime;
6310                 mutex_exit(&ip->i_tlock);
6311 
6312                 rw_exit(&ip->i_contents);
6313         } else if (action == DUMP_FREE) {
6314                 /*
6315                  * free dump_info
6316                  */
6317                 if (dump_info == NULL)
6318                         return (EINVAL);
6319                 entries = dump_info->fsbs - 1;
6320                 kmem_free(dump_info, sizeof (struct dump) +
6321                     entries * sizeof (daddr32_t));
6322                 dump_info = NULL;
6323         } else if (action == DUMP_SCAN) {
6324                 /*
6325                  * scan dump_info
6326                  */
6327                 if (dump_info == NULL)
6328                         return (EINVAL);
6329 
6330                 dblk = dump_info->dblk;
6331                 nextblk = dblk + 1;
6332                 endblk = dblk + dump_info->fsbs - 1;
6333                 fs = ufsvfsp->vfs_fs;
6334                 ncontig = *blkp >> (fs->fs_bshift - DEV_BSHIFT);
6335 
6336                 /*
6337                  * scan dblk[] entries; contig fs space is found when:
6338                  * ((current blkno + frags per block) == next blkno)
6339                  */
6340                 n = 0;
6341                 while (n < ncontig && dblk < endblk) {
6342                         if ((*dblk + fs->fs_frag) == *nextblk)
6343                                 n++;
6344                         else
6345                                 n = 0;
6346                         dblk++;
6347                         nextblk++;
6348                 }
6349 
6350                 /*
6351                  * index is where size bytes of contig space begins;
6352                  * conversion from index to the file's DEV_BSIZE lbn
6353                  * is equivalent to:  (index * fs_bsize) / DEV_BSIZE
6354                  */
6355                 if (n == ncontig) {
6356                         i = (dblk - dump_info->dblk) - ncontig;
6357                         *blkp = i << (fs->fs_bshift - DEV_BSHIFT);
6358                 } else
6359                         return (EFAULT);
6360         }
6361         return (0);
6362 }
6363 
6364 /*
6365  * Recursive helper function for ufs_dumpctl().  It follows the indirect file
6366  * system  blocks until it reaches the the disk block addresses, which are
6367  * then stored into the given buffer, storeblk.
6368  */
6369 static daddr32_t *
6370 save_dblks(struct inode *ip, struct ufsvfs *ufsvfsp,  daddr32_t *storeblk,
6371     daddr32_t *dblk, int level, int entries)
6372 {
6373         struct fs       *fs = ufsvfsp->vfs_fs;
6374         struct buf      *bp;
6375         int             i;
6376 
6377         if (level == 0) {
6378                 for (i = 0; i < NINDIR(fs); i++) {
6379                         if (storeblk - dump_info->dblk >= entries)
6380                                 break;
6381                         *storeblk++ = dblk[i];
6382                 }
6383                 return (storeblk);
6384         }
6385         for (i = 0; i < NINDIR(fs); i++) {
6386                 if (storeblk - dump_info->dblk >= entries)
6387                         break;
6388                 bp = UFS_BREAD(ufsvfsp,
6389                     ip->i_dev, fsbtodb(fs, dblk[i]), fs->fs_bsize);
6390                 if (bp->b_flags & B_ERROR) {
6391                         brelse(bp);
6392                         return (NULL);
6393                 }
6394                 storeblk = save_dblks(ip, ufsvfsp, storeblk, bp->b_un.b_daddr,
6395                     level - 1, entries);
6396                 brelse(bp);
6397 
6398                 if (storeblk == NULL)
6399                         return (NULL);
6400         }
6401         return (storeblk);
6402 }
6403 
6404 /* ARGSUSED */
6405 static int
6406 ufs_getsecattr(struct vnode *vp, vsecattr_t *vsap, int flag,
6407         struct cred *cr, caller_context_t *ct)
6408 {
6409         struct inode    *ip = VTOI(vp);
6410         struct ulockfs  *ulp;
6411         struct ufsvfs   *ufsvfsp = ip->i_ufsvfs;
6412         ulong_t         vsa_mask = vsap->vsa_mask;
6413         int             err = EINVAL;
6414 
6415         vsa_mask &= (VSA_ACL | VSA_ACLCNT | VSA_DFACL | VSA_DFACLCNT);
6416 
6417         /*
6418          * Only grab locks if needed - they're not needed to check vsa_mask
6419          * or if the mask contains no acl flags.
6420          */
6421         if (vsa_mask != 0) {
6422                 if (err = ufs_lockfs_begin(ufsvfsp, &ulp,
6423                     ULOCKFS_GETATTR_MASK))
6424                         return (err);
6425 
6426                 rw_enter(&ip->i_contents, RW_READER);
6427                 err = ufs_acl_get(ip, vsap, flag, cr);
6428                 rw_exit(&ip->i_contents);
6429 
6430                 if (ulp)
6431                         ufs_lockfs_end(ulp);
6432         }
6433         return (err);
6434 }
6435 
6436 /* ARGSUSED */
6437 static int
6438 ufs_setsecattr(struct vnode *vp, vsecattr_t *vsap, int flag, struct cred *cr,
6439         caller_context_t *ct)
6440 {
6441         struct inode    *ip = VTOI(vp);
6442         struct ulockfs  *ulp = NULL;
6443         struct ufsvfs   *ufsvfsp = VTOI(vp)->i_ufsvfs;
6444         ulong_t         vsa_mask = vsap->vsa_mask;
6445         int             err;
6446         int             haverwlock = 1;
6447         int             trans_size;
6448         int             donetrans = 0;
6449         int             retry = 1;
6450 
6451         ASSERT(RW_LOCK_HELD(&ip->i_rwlock));
6452 
6453         /* Abort now if the request is either empty or invalid. */
6454         vsa_mask &= (VSA_ACL | VSA_ACLCNT | VSA_DFACL | VSA_DFACLCNT);
6455         if ((vsa_mask == 0) ||
6456             ((vsap->vsa_aclentp == NULL) &&
6457             (vsap->vsa_dfaclentp == NULL))) {
6458                 err = EINVAL;
6459                 goto out;
6460         }
6461 
6462         /*
6463          * Following convention, if this is a directory then we acquire the
6464          * inode's i_rwlock after starting a UFS logging transaction;
6465          * otherwise, we acquire it beforehand. Since we were called (and
6466          * must therefore return) with the lock held, we will have to drop it,
6467          * and later reacquire it, if operating on a directory.
6468          */
6469         if (vp->v_type == VDIR) {
6470                 rw_exit(&ip->i_rwlock);
6471                 haverwlock = 0;
6472         } else {
6473                 /* Upgrade the lock if required. */
6474                 if (!rw_write_held(&ip->i_rwlock)) {
6475                         rw_exit(&ip->i_rwlock);
6476                         rw_enter(&ip->i_rwlock, RW_WRITER);
6477                 }
6478         }
6479 
6480 again:
6481         ASSERT(!(vp->v_type == VDIR && haverwlock));
6482         if (err = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_SETATTR_MASK)) {
6483                 ulp = NULL;
6484                 retry = 0;
6485                 goto out;
6486         }
6487 
6488         /*
6489          * Check that the file system supports this operation. Note that
6490          * ufs_lockfs_begin() will have checked that the file system had
6491          * not been forcibly unmounted.
6492          */
6493         if (ufsvfsp->vfs_fs->fs_ronly) {
6494                 err = EROFS;
6495                 goto out;
6496         }
6497         if (ufsvfsp->vfs_nosetsec) {
6498                 err = ENOSYS;
6499                 goto out;
6500         }
6501 
6502         if (ulp) {
6503                 TRANS_BEGIN_ASYNC(ufsvfsp, TOP_SETSECATTR,
6504                     trans_size = TOP_SETSECATTR_SIZE(VTOI(vp)));
6505                 donetrans = 1;
6506         }
6507 
6508         if (vp->v_type == VDIR) {
6509                 rw_enter(&ip->i_rwlock, RW_WRITER);
6510                 haverwlock = 1;
6511         }
6512 
6513         ASSERT(haverwlock);
6514 
6515         /* Do the actual work. */
6516         rw_enter(&ip->i_contents, RW_WRITER);
6517         /*
6518          * Suppress out of inodes messages if we will retry.
6519          */
6520         if (retry)
6521                 ip->i_flag |= IQUIET;
6522         err = ufs_acl_set(ip, vsap, flag, cr);
6523         ip->i_flag &= ~IQUIET;
6524         rw_exit(&ip->i_contents);
6525 
6526 out:
6527         if (ulp) {
6528                 if (donetrans) {
6529                         /*
6530                          * top_end_async() can eventually call
6531                          * top_end_sync(), which can block. We must
6532                          * therefore observe the lock-ordering protocol
6533                          * here as well.
6534                          */
6535                         if (vp->v_type == VDIR) {
6536                                 rw_exit(&ip->i_rwlock);
6537                                 haverwlock = 0;
6538                         }
6539                         TRANS_END_ASYNC(ufsvfsp, TOP_SETSECATTR, trans_size);
6540                 }
6541                 ufs_lockfs_end(ulp);
6542         }
6543         /*
6544          * If no inodes available, try scaring a logically-
6545          * free one out of the delete queue to someplace
6546          * that we can find it.
6547          */
6548         if ((err == ENOSPC) && retry && TRANS_ISTRANS(ufsvfsp)) {
6549                 ufs_delete_drain_wait(ufsvfsp, 1);
6550                 retry = 0;
6551                 if (vp->v_type == VDIR && haverwlock) {
6552                         rw_exit(&ip->i_rwlock);
6553                         haverwlock = 0;
6554                 }
6555                 goto again;
6556         }
6557         /*
6558          * If we need to reacquire the lock then it is safe to do so
6559          * as a reader. This is because ufs_rwunlock(), which will be
6560          * called by our caller after we return, does not differentiate
6561          * between shared and exclusive locks.
6562          */
6563         if (!haverwlock) {
6564                 ASSERT(vp->v_type == VDIR);
6565                 rw_enter(&ip->i_rwlock, RW_READER);
6566         }
6567 
6568         return (err);
6569 }
6570 
6571 /*
6572  * Locate the vnode to be used for an event notification. As this will
6573  * be called prior to the name space change perform basic verification
6574  * that the change will be allowed.
6575  */
6576 
6577 static int
6578 ufs_eventlookup(struct vnode *dvp, char *nm, struct cred *cr,
6579     struct vnode **vpp)
6580 {
6581         int     namlen;
6582         int     error;
6583         struct vnode    *vp;
6584         struct inode    *ip;
6585         struct inode    *xip;
6586         struct ufsvfs   *ufsvfsp;
6587         struct ulockfs  *ulp;
6588 
6589         ip = VTOI(dvp);
6590         *vpp = NULL;
6591 
6592         if ((namlen = strlen(nm)) == 0)
6593                 return (EINVAL);
6594 
6595         if (nm[0] == '.') {
6596                 if (namlen == 1)
6597                         return (EINVAL);
6598                 else if ((namlen == 2) && nm[1] == '.') {
6599                         return (EEXIST);
6600                 }
6601         }
6602 
6603         /*
6604          * Check accessibility and write access of parent directory as we
6605          * only want to post the event if we're able to make a change.
6606          */
6607         if (error = ufs_diraccess(ip, IEXEC|IWRITE, cr))
6608                 return (error);
6609 
6610         if (vp = dnlc_lookup(dvp, nm)) {
6611                 if (vp == DNLC_NO_VNODE) {
6612                         VN_RELE(vp);
6613                         return (ENOENT);
6614                 }
6615 
6616                 *vpp = vp;
6617                 return (0);
6618         }
6619 
6620         /*
6621          * Keep the idle queue from getting too long by idling two
6622          * inodes before attempting to allocate another.
6623          * This operation must be performed before entering lockfs
6624          * or a transaction.
6625          */
6626         if (ufs_idle_q.uq_ne > ufs_idle_q.uq_hiwat)
6627                 if ((curthread->t_flag & T_DONTBLOCK) == 0) {
6628                         ins.in_lidles.value.ul += ufs_lookup_idle_count;
6629                         ufs_idle_some(ufs_lookup_idle_count);
6630                 }
6631 
6632         ufsvfsp = ip->i_ufsvfs;
6633 
6634 retry_lookup:
6635         if (error = ufs_lockfs_begin(ufsvfsp, &ulp, ULOCKFS_LOOKUP_MASK))
6636                 return (error);
6637 
6638         if ((error = ufs_dirlook(ip, nm, &xip, cr, 1, 1)) == 0) {
6639                 vp = ITOV(xip);
6640                 *vpp = vp;
6641         }
6642 
6643         if (ulp) {
6644                 ufs_lockfs_end(ulp);
6645         }
6646 
6647         if (error == EAGAIN)
6648                 goto retry_lookup;
6649 
6650         return (error);
6651 }