327 return (EFAULT);
328
329 if (dio > 1)
330 return (EINVAL);
331
332 /* file system has been forcibly unmounted */
333 if (VTOI(vp)->i_ufsvfs == NULL)
334 return (EIO);
335
336 ip = VTOI(vp);
337 ufsvfsp = ip->i_ufsvfs;
338 ulp = &ufsvfsp->vfs_ulockfs;
339
340 /* logging file system; dio ignored */
341 if (TRANS_ISTRANS(ufsvfsp))
342 return (error);
343
344 /* hold the mutex to prevent race with a lockfs request */
345 vfs_lock_wait(vp->v_vfsp);
346 mutex_enter(&ulp->ul_lock);
347 atomic_add_long(&ufs_quiesce_pend, 1);
348
349 if (ULOCKFS_IS_HLOCK(ulp)) {
350 error = EIO;
351 goto out;
352 }
353
354 if (ULOCKFS_IS_ELOCK(ulp)) {
355 error = EBUSY;
356 goto out;
357 }
358 /* wait for outstanding accesses to finish */
359 if (error = ufs_quiesce(ulp))
360 goto out;
361
362 /* flush w/invalidate */
363 if (error = ufs_flush(vp->v_vfsp))
364 goto out;
365
366 /*
367 * update dio
371
372 /*
373 * enable/disable clean flag processing
374 */
375 fs = ip->i_fs;
376 if (fs->fs_ronly == 0 &&
377 fs->fs_clean != FSBAD &&
378 fs->fs_clean != FSLOG) {
379 if (dio)
380 fs->fs_clean = FSSUSPEND;
381 else
382 fs->fs_clean = FSACTIVE;
383 ufs_sbwrite(ufsvfsp);
384 mutex_exit(&ufsvfsp->vfs_lock);
385 } else
386 mutex_exit(&ufsvfsp->vfs_lock);
387 out:
388 /*
389 * we need this broadcast because of the ufs_quiesce call above
390 */
391 atomic_add_long(&ufs_quiesce_pend, -1);
392 cv_broadcast(&ulp->ul_cv);
393 mutex_exit(&ulp->ul_lock);
394 vfs_unlock(vp->v_vfsp);
395 return (error);
396 }
397
398 /*
399 * ufs_fioffs - ioctl handler for flushing file system
400 */
401 /* ARGSUSED */
402 int
403 ufs_fioffs(
404 struct vnode *vp,
405 char *vap, /* must be NULL - reserved */
406 struct cred *cr) /* credentials from ufs_ioctl */
407 {
408 int error;
409 struct ufsvfs *ufsvfsp;
410 struct ulockfs *ulp;
411
412 /* file system has been forcibly unmounted */
413 ufsvfsp = VTOI(vp)->i_ufsvfs;
414 if (ufsvfsp == NULL)
415 return (EIO);
416
417 ulp = &ufsvfsp->vfs_ulockfs;
418
419 /*
420 * suspend the delete thread
421 * this must be done outside the lockfs locking protocol
422 */
423 vfs_lock_wait(vp->v_vfsp);
424 ufs_thread_suspend(&ufsvfsp->vfs_delete);
425
426 /* hold the mutex to prevent race with a lockfs request */
427 mutex_enter(&ulp->ul_lock);
428 atomic_add_long(&ufs_quiesce_pend, 1);
429
430 if (ULOCKFS_IS_HLOCK(ulp)) {
431 error = EIO;
432 goto out;
433 }
434 if (ULOCKFS_IS_ELOCK(ulp)) {
435 error = EBUSY;
436 goto out;
437 }
438 /* wait for outstanding accesses to finish */
439 if (error = ufs_quiesce(ulp))
440 goto out;
441
442 /*
443 * If logging, and the logmap was marked as not rollable,
444 * make it rollable now, and start the trans_roll thread and
445 * the reclaim thread. The log at this point is safe to write to.
446 */
447 if (ufsvfsp->vfs_log) {
448 ml_unit_t *ul = ufsvfsp->vfs_log;
469 cmn_err(CE_NOTE,
470 "Filesystem Flush "
471 "Failed to update "
472 "Reclaim Status for "
473 " %s, Write failed to "
474 "update superblock, "
475 "error %d",
476 refstr_value(mntpt),
477 err);
478 refstr_rele(mntpt);
479 }
480 }
481 }
482 }
483 }
484
485 /* synchronously flush dirty data and metadata */
486 error = ufs_flush(vp->v_vfsp);
487
488 out:
489 atomic_add_long(&ufs_quiesce_pend, -1);
490 cv_broadcast(&ulp->ul_cv);
491 mutex_exit(&ulp->ul_lock);
492 vfs_unlock(vp->v_vfsp);
493
494 /*
495 * allow the delete thread to continue
496 */
497 ufs_thread_continue(&ufsvfsp->vfs_delete);
498 return (error);
499 }
500
501 /*
502 * ufs_fioisbusy
503 * Get number of references on this vnode.
504 * Contract-private interface for Legato's NetWorker product.
505 */
506 /* ARGSUSED */
507 int
508 ufs_fioisbusy(struct vnode *vp, int *isbusy, struct cred *cr)
509 {
|
327 return (EFAULT);
328
329 if (dio > 1)
330 return (EINVAL);
331
332 /* file system has been forcibly unmounted */
333 if (VTOI(vp)->i_ufsvfs == NULL)
334 return (EIO);
335
336 ip = VTOI(vp);
337 ufsvfsp = ip->i_ufsvfs;
338 ulp = &ufsvfsp->vfs_ulockfs;
339
340 /* logging file system; dio ignored */
341 if (TRANS_ISTRANS(ufsvfsp))
342 return (error);
343
344 /* hold the mutex to prevent race with a lockfs request */
345 vfs_lock_wait(vp->v_vfsp);
346 mutex_enter(&ulp->ul_lock);
347 atomic_inc_ulong(&ufs_quiesce_pend);
348
349 if (ULOCKFS_IS_HLOCK(ulp)) {
350 error = EIO;
351 goto out;
352 }
353
354 if (ULOCKFS_IS_ELOCK(ulp)) {
355 error = EBUSY;
356 goto out;
357 }
358 /* wait for outstanding accesses to finish */
359 if (error = ufs_quiesce(ulp))
360 goto out;
361
362 /* flush w/invalidate */
363 if (error = ufs_flush(vp->v_vfsp))
364 goto out;
365
366 /*
367 * update dio
371
372 /*
373 * enable/disable clean flag processing
374 */
375 fs = ip->i_fs;
376 if (fs->fs_ronly == 0 &&
377 fs->fs_clean != FSBAD &&
378 fs->fs_clean != FSLOG) {
379 if (dio)
380 fs->fs_clean = FSSUSPEND;
381 else
382 fs->fs_clean = FSACTIVE;
383 ufs_sbwrite(ufsvfsp);
384 mutex_exit(&ufsvfsp->vfs_lock);
385 } else
386 mutex_exit(&ufsvfsp->vfs_lock);
387 out:
388 /*
389 * we need this broadcast because of the ufs_quiesce call above
390 */
391 atomic_dec_ulong(&ufs_quiesce_pend);
392 cv_broadcast(&ulp->ul_cv);
393 mutex_exit(&ulp->ul_lock);
394 vfs_unlock(vp->v_vfsp);
395 return (error);
396 }
397
398 /*
399 * ufs_fioffs - ioctl handler for flushing file system
400 */
401 /* ARGSUSED */
402 int
403 ufs_fioffs(
404 struct vnode *vp,
405 char *vap, /* must be NULL - reserved */
406 struct cred *cr) /* credentials from ufs_ioctl */
407 {
408 int error;
409 struct ufsvfs *ufsvfsp;
410 struct ulockfs *ulp;
411
412 /* file system has been forcibly unmounted */
413 ufsvfsp = VTOI(vp)->i_ufsvfs;
414 if (ufsvfsp == NULL)
415 return (EIO);
416
417 ulp = &ufsvfsp->vfs_ulockfs;
418
419 /*
420 * suspend the delete thread
421 * this must be done outside the lockfs locking protocol
422 */
423 vfs_lock_wait(vp->v_vfsp);
424 ufs_thread_suspend(&ufsvfsp->vfs_delete);
425
426 /* hold the mutex to prevent race with a lockfs request */
427 mutex_enter(&ulp->ul_lock);
428 atomic_inc_ulong(&ufs_quiesce_pend);
429
430 if (ULOCKFS_IS_HLOCK(ulp)) {
431 error = EIO;
432 goto out;
433 }
434 if (ULOCKFS_IS_ELOCK(ulp)) {
435 error = EBUSY;
436 goto out;
437 }
438 /* wait for outstanding accesses to finish */
439 if (error = ufs_quiesce(ulp))
440 goto out;
441
442 /*
443 * If logging, and the logmap was marked as not rollable,
444 * make it rollable now, and start the trans_roll thread and
445 * the reclaim thread. The log at this point is safe to write to.
446 */
447 if (ufsvfsp->vfs_log) {
448 ml_unit_t *ul = ufsvfsp->vfs_log;
469 cmn_err(CE_NOTE,
470 "Filesystem Flush "
471 "Failed to update "
472 "Reclaim Status for "
473 " %s, Write failed to "
474 "update superblock, "
475 "error %d",
476 refstr_value(mntpt),
477 err);
478 refstr_rele(mntpt);
479 }
480 }
481 }
482 }
483 }
484
485 /* synchronously flush dirty data and metadata */
486 error = ufs_flush(vp->v_vfsp);
487
488 out:
489 atomic_dec_ulong(&ufs_quiesce_pend);
490 cv_broadcast(&ulp->ul_cv);
491 mutex_exit(&ulp->ul_lock);
492 vfs_unlock(vp->v_vfsp);
493
494 /*
495 * allow the delete thread to continue
496 */
497 ufs_thread_continue(&ufsvfsp->vfs_delete);
498 return (error);
499 }
500
501 /*
502 * ufs_fioisbusy
503 * Get number of references on this vnode.
504 * Contract-private interface for Legato's NetWorker product.
505 */
506 /* ARGSUSED */
507 int
508 ufs_fioisbusy(struct vnode *vp, int *isbusy, struct cred *cr)
509 {
|