Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/ufs/ufs_vnops.c
          +++ new/usr/src/uts/common/fs/ufs/ufs_vnops.c
↓ open down ↓ 5968 lines elided ↑ open up ↑
5969 5969          if (vmpss || pp == NULL) {
5970 5970                  ulp = &ufsvfsp->vfs_ulockfs;
5971 5971                  if (pp == NULL)
5972 5972                          mutex_enter(&ulp->ul_lock);
5973 5973                  if (ulp->ul_fs_lock & ULOCKFS_GETREAD_MASK) {
5974 5974                          if (pp == NULL) {
5975 5975                                  mutex_exit(&ulp->ul_lock);
5976 5976                          }
5977 5977                          return (vmpss ? EIO : EINVAL);
5978 5978                  }
5979      -                atomic_add_long(&ulp->ul_vnops_cnt, 1);
     5979 +                atomic_inc_ulong(&ulp->ul_vnops_cnt);
5980 5980                  if (pp == NULL)
5981 5981                          mutex_exit(&ulp->ul_lock);
5982 5982                  if (ufs_quiesce_pend) {
5983      -                        if (!atomic_add_long_nv(&ulp->ul_vnops_cnt, -1))
     5983 +                        if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
5984 5984                                  cv_broadcast(&ulp->ul_cv);
5985 5985                          return (vmpss ? EIO : EINVAL);
5986 5986                  }
5987 5987          }
5988 5988  
5989 5989          if (dolock) {
5990 5990                  /*
5991 5991                   * segvn may call VOP_PAGEIO() instead of VOP_GETPAGE() to
5992 5992                   * handle a fault against a segment that maps vnode pages with
5993 5993                   * large mappings.  Segvn creates pages and holds them locked
5994 5994                   * SE_EXCL during VOP_PAGEIO() call. In this case we have to
5995 5995                   * use rw_tryenter() to avoid a potential deadlock since in
5996 5996                   * lock order i_contents needs to be taken first.
5997 5997                   * Segvn will retry via VOP_GETPAGE() if VOP_PAGEIO() fails.
5998 5998                   */
5999 5999                  if (!vmpss) {
6000 6000                          rw_enter(&ip->i_contents, RW_READER);
6001 6001                  } else if (!rw_tryenter(&ip->i_contents, RW_READER)) {
6002      -                        if (!atomic_add_long_nv(&ulp->ul_vnops_cnt, -1))
     6002 +                        if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6003 6003                                  cv_broadcast(&ulp->ul_cv);
6004 6004                          return (EDEADLK);
6005 6005                  }
6006 6006          }
6007 6007  
6008 6008          /*
6009 6009           * Return an error to segvn because the pagefault request is beyond
6010 6010           * PAGESIZE rounded EOF.
6011 6011           */
6012 6012          if (vmpss && btopr(io_off + io_len) > btopr(ip->i_size)) {
6013 6013                  if (dolock)
6014 6014                          rw_exit(&ip->i_contents);
6015      -                if (!atomic_add_long_nv(&ulp->ul_vnops_cnt, -1))
     6015 +                if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6016 6016                          cv_broadcast(&ulp->ul_cv);
6017 6017                  return (EFAULT);
6018 6018          }
6019 6019  
6020 6020          if (pp == NULL) {
6021 6021                  if (bmap_has_holes(ip)) {
6022 6022                          err = ENOSYS;
6023 6023                  } else {
6024 6024                          err = EINVAL;
6025 6025                  }
6026 6026                  if (dolock)
6027 6027                          rw_exit(&ip->i_contents);
6028      -                if (!atomic_add_long_nv(&ulp->ul_vnops_cnt, -1))
     6028 +                if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6029 6029                          cv_broadcast(&ulp->ul_cv);
6030 6030                  return (err);
6031 6031          }
6032 6032  
6033 6033          /*
6034 6034           * Break the io request into chunks, one for each contiguous
6035 6035           * stretch of disk blocks in the target file.
6036 6036           */
6037 6037          while (done_len < io_len) {
6038 6038                  ASSERT(cpp);
↓ open down ↓ 86 lines elided ↑ open up ↑
6125 6125          if (vmpss && !(ip->i_flag & IACC) && !ULOCKFS_IS_NOIACC(ulp) &&
6126 6126              ufsvfsp->vfs_fs->fs_ronly == 0 && !ufsvfsp->vfs_noatime) {
6127 6127                  mutex_enter(&ip->i_tlock);
6128 6128                  ip->i_flag |= IACC;
6129 6129                  ITIMES_NOLOCK(ip);
6130 6130                  mutex_exit(&ip->i_tlock);
6131 6131          }
6132 6132  
6133 6133          if (dolock)
6134 6134                  rw_exit(&ip->i_contents);
6135      -        if (vmpss && !atomic_add_long_nv(&ulp->ul_vnops_cnt, -1))
     6135 +        if (vmpss && !atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
6136 6136                  cv_broadcast(&ulp->ul_cv);
6137 6137          return (err);
6138 6138  }
6139 6139  
6140 6140  /*
6141 6141   * Called when the kernel is in a frozen state to dump data
6142 6142   * directly to the device. It uses a private dump data structure,
6143 6143   * set up by dump_ctl, to locate the correct disk block to which to dump.
6144 6144   */
6145 6145  /*ARGSUSED*/
↓ open down ↓ 506 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX