597 * can happen after dropping contents.
598 */
599 rw_exit(&ip->i_contents);
600 rw_enter(&ip->i_contents, RW_WRITER);
601 }
602 (void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0,
603 B_INVAL, cr, NULL);
604 if (vn_has_cached_data(vp))
605 goto errout;
606 if (!exclusive)
607 rw_downgrade(&ip->i_contents);
608 ufs_directio_kstats.nflushes.value.ui64++;
609 }
610
611 /*
612 * Direct Writes
613 */
614
615 if (!exclusive) {
616 ufs_shared_writes++;
617 ncur = atomic_add_32_nv(&ufs_cur_writes, 1);
618 if (ncur > ufs_maxcur_writes)
619 ufs_maxcur_writes = ncur;
620 }
621
622 /*
623 * proc and as are for VM operations in directio_start()
624 */
625 if (uio->uio_segflg == UIO_USERSPACE) {
626 procp = ttoproc(curthread);
627 as = procp->p_as;
628 } else {
629 procp = NULL;
630 as = &kas;
631 }
632 *statusp = DIRECTIO_SUCCESS;
633 error = 0;
634 newerror = 0;
635 resid = uio->uio_resid;
636 bytes_written = 0;
637 ufs_directio_kstats.logical_writes.value.ui64++;
710 iov->iov_len -= nbytes;
711 iov->iov_base += nbytes;
712 uio->uio_loffset += nbytes;
713 resid -= nbytes;
714 pglck_len -= nbytes;
715 }
716
717 /*
718 * Wait for outstanding requests
719 */
720 newerror = directio_wait(tail, &bytes_written);
721
722 /*
723 * Release VM resources
724 */
725 as_pageunlock(as, pplist, pglck_base, pglck_size, S_READ);
726
727 }
728
729 if (!exclusive) {
730 atomic_add_32(&ufs_cur_writes, -1);
731 /*
732 * If this write was done shared, readers may
733 * have pulled in unmodified pages. Get rid of
734 * these potentially stale pages.
735 */
736 if (vn_has_cached_data(vp)) {
737 rw_exit(&ip->i_contents);
738 rw_enter(&ip->i_contents, RW_WRITER);
739 (void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0,
740 B_INVAL, cr, NULL);
741 ufs_directio_kstats.nflushes.value.ui64++;
742 rw_downgrade(&ip->i_contents);
743 }
744 }
745
746 /*
747 * If error, adjust resid to begin at the first
748 * un-writable byte.
749 */
750 if (error == 0)
|
597 * can happen after dropping contents.
598 */
599 rw_exit(&ip->i_contents);
600 rw_enter(&ip->i_contents, RW_WRITER);
601 }
602 (void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0,
603 B_INVAL, cr, NULL);
604 if (vn_has_cached_data(vp))
605 goto errout;
606 if (!exclusive)
607 rw_downgrade(&ip->i_contents);
608 ufs_directio_kstats.nflushes.value.ui64++;
609 }
610
611 /*
612 * Direct Writes
613 */
614
615 if (!exclusive) {
616 ufs_shared_writes++;
617 ncur = atomic_inc_32_nv(&ufs_cur_writes);
618 if (ncur > ufs_maxcur_writes)
619 ufs_maxcur_writes = ncur;
620 }
621
622 /*
623 * proc and as are for VM operations in directio_start()
624 */
625 if (uio->uio_segflg == UIO_USERSPACE) {
626 procp = ttoproc(curthread);
627 as = procp->p_as;
628 } else {
629 procp = NULL;
630 as = &kas;
631 }
632 *statusp = DIRECTIO_SUCCESS;
633 error = 0;
634 newerror = 0;
635 resid = uio->uio_resid;
636 bytes_written = 0;
637 ufs_directio_kstats.logical_writes.value.ui64++;
710 iov->iov_len -= nbytes;
711 iov->iov_base += nbytes;
712 uio->uio_loffset += nbytes;
713 resid -= nbytes;
714 pglck_len -= nbytes;
715 }
716
717 /*
718 * Wait for outstanding requests
719 */
720 newerror = directio_wait(tail, &bytes_written);
721
722 /*
723 * Release VM resources
724 */
725 as_pageunlock(as, pplist, pglck_base, pglck_size, S_READ);
726
727 }
728
729 if (!exclusive) {
730 atomic_dec_32(&ufs_cur_writes);
731 /*
732 * If this write was done shared, readers may
733 * have pulled in unmodified pages. Get rid of
734 * these potentially stale pages.
735 */
736 if (vn_has_cached_data(vp)) {
737 rw_exit(&ip->i_contents);
738 rw_enter(&ip->i_contents, RW_WRITER);
739 (void) VOP_PUTPAGE(vp, (offset_t)0, (size_t)0,
740 B_INVAL, cr, NULL);
741 ufs_directio_kstats.nflushes.value.ui64++;
742 rw_downgrade(&ip->i_contents);
743 }
744 }
745
746 /*
747 * If error, adjust resid to begin at the first
748 * un-writable byte.
749 */
750 if (error == 0)
|