Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/ufs/ufs_lockfs.c
          +++ new/usr/src/uts/common/fs/ufs/ufs_lockfs.c
↓ open down ↓ 925 lines elided ↑ open up ↑
 926  926          SEARCH_ULOCKFSP(head, ulp, info);
 927  927  
 928  928          /*
 929  929           * Suspend both the reclaim thread and the delete thread.
 930  930           * This must be done outside the lockfs locking protocol.
 931  931           */
 932  932          ufs_thread_suspend(&ufsvfsp->vfs_reclaim);
 933  933          ufs_thread_suspend(&ufsvfsp->vfs_delete);
 934  934  
 935  935          mutex_enter(&ulp->ul_lock);
 936      -        atomic_add_long(&ufs_quiesce_pend, 1);
      936 +        atomic_inc_ulong(&ufs_quiesce_pend);
 937  937  
 938  938          /*
 939  939           * Quit if there is another lockfs request in progress
 940  940           * that is waiting for existing ufs_vnops to complete.
 941  941           */
 942  942          if (ULOCKFS_IS_BUSY(ulp)) {
 943  943                  error = EBUSY;
 944  944                  goto errexit;
 945  945          }
 946  946  
↓ open down ↓ 209 lines elided ↑ open up ↑
1156 1156          else if (errlck == RE_ERRLCK)
1157 1157                  ufsfx_lockfs(ufsvfsp);
1158 1158  
1159 1159          /* don't allow error lock from user to invoke panic */
1160 1160          else if (from_user && errlck == SET_ERRLCK &&
1161 1161              !(ufsvfsp->vfs_fsfx.fx_flags & (UFSMNT_ONERROR_PANIC >> 4)))
1162 1162                  (void) ufs_fault(ufsvfsp->vfs_root,
1163 1163                      ulp->ul_lockfs.lf_comment && ulp->ul_lockfs.lf_comlen > 0 ?
1164 1164                      ulp->ul_lockfs.lf_comment: "user-applied error lock");
1165 1165  
1166      -        atomic_add_long(&ufs_quiesce_pend, -1);
     1166 +        atomic_dec_ulong(&ufs_quiesce_pend);
1167 1167          mutex_exit(&ulp->ul_lock);
1168 1168          vfs_unlock(vfsp);
1169 1169  
1170 1170          if (ULOCKFS_IS_HLOCK(&ufsvfsp->vfs_ulockfs))
1171 1171                  poll_events |= POLLERR;
1172 1172  
1173 1173          pollwakeup(&ufs_pollhd, poll_events);
1174 1174  
1175 1175          /*
1176 1176           * Allow both the delete thread and the reclaim thread to
↓ open down ↓ 18 lines elided ↑ open up ↑
1195 1195           * ufs quiesce operation as it can lead to deadlock
1196 1196           * with getpage.
1197 1197           */
1198 1198          if (signal == 0)
1199 1199                  (void) ufs_thaw(vfsp, ufsvfsp, ulp);
1200 1200  
1201 1201          ULOCKFS_CLR_BUSY(ulp);
1202 1202          LOCKFS_CLR_BUSY(&ulp->ul_lockfs);
1203 1203  
1204 1204  errexit:
1205      -        atomic_add_long(&ufs_quiesce_pend, -1);
     1205 +        atomic_dec_ulong(&ufs_quiesce_pend);
1206 1206          mutex_exit(&ulp->ul_lock);
1207 1207          vfs_unlock(vfsp);
1208 1208  
1209 1209          /*
1210 1210           * Allow both the delete thread and the reclaim thread to
1211 1211           * continue.
1212 1212           */
1213 1213          ufs_thread_continue(&ufsvfsp->vfs_delete);
1214 1214          ufs_thread_continue(&ufsvfsp->vfs_reclaim);
1215 1215  
↓ open down ↓ 76 lines elided ↑ open up ↑
1292 1292                          sigintr(&smask, 1);
1293 1293                          sig = cv_wait_sig(&ulp->ul_cv, &ulp->ul_lock);
1294 1294                          sigunintr(&smask);
1295 1295                          if ((!sig && (ulp->ul_fs_lock & mask)) ||
1296 1296                              ufsvfsp->vfs_dontblock)
1297 1297                                  return (EINTR);
1298 1298                  }
1299 1299          }
1300 1300  
1301 1301          if (mask & ULOCKFS_FWLOCK) {
1302      -                atomic_add_long(&ulp->ul_falloc_cnt, 1);
     1302 +                atomic_inc_ulong(&ulp->ul_falloc_cnt);
1303 1303                  ULOCKFS_SET_FALLOC(ulp);
1304 1304          } else {
1305      -                atomic_add_long(&ulp->ul_vnops_cnt, 1);
     1305 +                atomic_inc_ulong(&ulp->ul_vnops_cnt);
1306 1306          }
1307 1307  
1308 1308          return (0);
1309 1309  }
1310 1310  
1311 1311  /*
1312 1312   * Check whether we came across the handcrafted lockfs protocol path. We can't
1313 1313   * simply check for T_DONTBLOCK here as one would assume since this can also
1314 1314   * falsely catch recursive VOP's going to a different filesystem, instead we
1315 1315   * check if we already hold the ulockfs->ul_lock mutex.
↓ open down ↓ 57 lines elided ↑ open up ↑
1373 1373           * Increment the ctr irrespective of the lockfs state. If the lockfs
1374 1374           * state is not ULOCKFS_ULOCK, we can decrement it later. However,
1375 1375           * before incrementing we need to check if there is a pending quiesce
1376 1376           * request because if we have a continuous stream of ufs_lockfs_begin
1377 1377           * requests pounding on a few cpu's then the ufs_quiesce thread might
1378 1378           * never see the value of zero for ctr - a livelock kind of scenario.
1379 1379           */
1380 1380          ctr = (mask & ULOCKFS_FWLOCK) ?
1381 1381              &ulp->ul_falloc_cnt : &ulp->ul_vnops_cnt;
1382 1382          if (!ULOCKFS_IS_SLOCK(ulp)) {
1383      -                atomic_add_long(ctr, 1);
     1383 +                atomic_inc_ulong(ctr);
1384 1384                  op_cnt_incremented++;
1385 1385          }
1386 1386  
1387 1387          /*
1388 1388           * If the lockfs state (indicated by ul_fs_lock) is not just
1389 1389           * ULOCKFS_ULOCK, then we will be routed through ufs_check_lockfs
1390 1390           * where there is a check with an appropriate mask to selectively allow
1391 1391           * operations permitted for that kind of lockfs state.
1392 1392           *
1393 1393           * Even these selective operations should not be allowed to go through
1394 1394           * if a lockfs request is in progress because that could result in inode
1395 1395           * modifications during a quiesce and could hence result in inode
1396 1396           * reconciliation failures. ULOCKFS_SLOCK alone would not be sufficient,
1397 1397           * so make use of ufs_quiesce_pend to disallow vnode operations when a
1398 1398           * quiesce is in progress.
1399 1399           */
1400 1400          if (!ULOCKFS_IS_JUSTULOCK(ulp) || ufs_quiesce_pend) {
1401 1401                  if (op_cnt_incremented)
1402      -                        if (!atomic_add_long_nv(ctr, -1))
     1402 +                        if (!atomic_dec_ulong_nv(ctr))
1403 1403                                  cv_broadcast(&ulp->ul_cv);
1404 1404                  mutex_enter(&ulp->ul_lock);
1405 1405                  error = ufs_check_lockfs(ufsvfsp, ulp, mask);
1406 1406                  mutex_exit(&ulp->ul_lock);
1407 1407                  if (error) {
1408 1408                          if (ulockfs_info_free == NULL)
1409 1409                                  kmem_free(ulockfs_info_temp,
1410 1410                                      sizeof (ulockfs_info_t));
1411 1411                          return (error);
1412 1412                  }
↓ open down ↓ 94 lines elided ↑ open up ↑
1507 1507           */
1508 1508          info->ulp = NULL;
1509 1509  
1510 1510          if (ufs_lockfs_top_vop_return(head))
1511 1511                  curthread->t_flag &= ~T_DONTBLOCK;
1512 1512  
1513 1513          /* fallocate thread */
1514 1514          if (ULOCKFS_IS_FALLOC(ulp) && info->flags & ULOCK_INFO_FALLOCATE) {
1515 1515                  /* Clear the thread's fallocate state */
1516 1516                  info->flags &= ~ULOCK_INFO_FALLOCATE;
1517      -                if (!atomic_add_long_nv(&ulp->ul_falloc_cnt, -1)) {
     1517 +                if (!atomic_dec_ulong_nv(&ulp->ul_falloc_cnt)) {
1518 1518                          mutex_enter(&ulp->ul_lock);
1519 1519                          ULOCKFS_CLR_FALLOC(ulp);
1520 1520                          cv_broadcast(&ulp->ul_cv);
1521 1521                          mutex_exit(&ulp->ul_lock);
1522 1522                  }
1523 1523          } else  { /* normal thread */
1524      -                if (!atomic_add_long_nv(&ulp->ul_vnops_cnt, -1))
     1524 +                if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
1525 1525                          cv_broadcast(&ulp->ul_cv);
1526 1526          }
1527 1527  }
1528 1528  
1529 1529  /*
1530 1530   * ufs_lockfs_trybegin - try to start the lockfs locking protocol without
1531 1531   * blocking.
1532 1532   */
1533 1533  int
1534 1534  ufs_lockfs_trybegin(struct ufsvfs *ufsvfsp, struct ulockfs **ulpp, ulong_t mask)
↓ open down ↓ 45 lines elided ↑ open up ↑
1580 1580           * Increment the ctr irrespective of the lockfs state. If the lockfs
1581 1581           * state is not ULOCKFS_ULOCK, we can decrement it later. However,
1582 1582           * before incrementing we need to check if there is a pending quiesce
1583 1583           * request because if we have a continuous stream of ufs_lockfs_begin
1584 1584           * requests pounding on a few cpu's then the ufs_quiesce thread might
1585 1585           * never see the value of zero for ctr - a livelock kind of scenario.
1586 1586           */
1587 1587          ctr = (mask & ULOCKFS_FWLOCK) ?
1588 1588              &ulp->ul_falloc_cnt : &ulp->ul_vnops_cnt;
1589 1589          if (!ULOCKFS_IS_SLOCK(ulp)) {
1590      -                atomic_add_long(ctr, 1);
     1590 +                atomic_inc_ulong(ctr);
1591 1591                  op_cnt_incremented++;
1592 1592          }
1593 1593  
1594 1594          if (!ULOCKFS_IS_JUSTULOCK(ulp) || ufs_quiesce_pend) {
1595 1595                  /*
1596 1596                   * Non-blocking version of ufs_check_lockfs() code.
1597 1597                   *
1598 1598                   * If the file system is not hard locked or error locked
1599 1599                   * and if ulp->ul_fs_lock allows this operation, increment
1600 1600                   * the appropriate counter and proceed (For eg., In case the
1601 1601                   * file system is delete locked, a mmap can still go through).
1602 1602                   */
1603 1603                  if (op_cnt_incremented)
1604      -                        if (!atomic_add_long_nv(ctr, -1))
     1604 +                        if (!atomic_dec_ulong_nv(ctr))
1605 1605                                  cv_broadcast(&ulp->ul_cv);
1606 1606                  mutex_enter(&ulp->ul_lock);
1607 1607                  if (ULOCKFS_IS_HLOCK(ulp) ||
1608 1608                      (ULOCKFS_IS_ELOCK(ulp) && ufsvfsp->vfs_dontblock))
1609 1609                          error = EIO;
1610 1610                  else if (ulp->ul_fs_lock & mask)
1611 1611                          error = EAGAIN;
1612 1612  
1613 1613                  if (error) {
1614 1614                          mutex_exit(&ulp->ul_lock);
1615 1615                          if (ulockfs_info_free == NULL)
1616 1616                                  kmem_free(ulockfs_info_temp,
1617 1617                                      sizeof (ulockfs_info_t));
1618 1618                          return (error);
1619 1619                  }
1620      -                atomic_add_long(ctr, 1);
     1620 +                atomic_inc_ulong(ctr);
1621 1621                  if (mask & ULOCKFS_FWLOCK)
1622 1622                          ULOCKFS_SET_FALLOC(ulp);
1623 1623                  mutex_exit(&ulp->ul_lock);
1624 1624          } else {
1625 1625                  /*
1626 1626                   * This is the common case of file system in a unlocked state.
1627 1627                   *
1628 1628                   * If a file system is unlocked, we would expect the ctr to have
1629 1629                   * been incremented by now. But this will not be true when a
1630 1630                   * quiesce is winding up - SLOCK was set when we checked before
↓ open down ↓ 10 lines elided ↑ open up ↑
1641 1641                          else if (ulp->ul_fs_lock & mask)
1642 1642                                  error = EAGAIN;
1643 1643  
1644 1644                          if (error) {
1645 1645                                  mutex_exit(&ulp->ul_lock);
1646 1646                                  if (ulockfs_info_free == NULL)
1647 1647                                          kmem_free(ulockfs_info_temp,
1648 1648                                              sizeof (ulockfs_info_t));
1649 1649                                  return (error);
1650 1650                          }
1651      -                        atomic_add_long(ctr, 1);
     1651 +                        atomic_inc_ulong(ctr);
1652 1652                          if (mask & ULOCKFS_FWLOCK)
1653 1653                                  ULOCKFS_SET_FALLOC(ulp);
1654 1654                          mutex_exit(&ulp->ul_lock);
1655 1655                  } else if (mask & ULOCKFS_FWLOCK) {
1656 1656                          mutex_enter(&ulp->ul_lock);
1657 1657                          ULOCKFS_SET_FALLOC(ulp);
1658 1658                          mutex_exit(&ulp->ul_lock);
1659 1659                  }
1660 1660          }
1661 1661  
↓ open down ↓ 61 lines elided ↑ open up ↑
1723 1723                              KM_NOSLEEP)) == NULL) {
1724 1724                                  *ulpp = NULL;
1725 1725                                  return (ENOMEM);
1726 1726                          }
1727 1727                  }
1728 1728          }
1729 1729  
1730 1730          /*
1731 1731           * First time VOP call
1732 1732           */
1733      -        atomic_add_long(&ulp->ul_vnops_cnt, 1);
     1733 +        atomic_inc_ulong(&ulp->ul_vnops_cnt);
1734 1734          if (!ULOCKFS_IS_JUSTULOCK(ulp) || ufs_quiesce_pend) {
1735      -                if (!atomic_add_long_nv(&ulp->ul_vnops_cnt, -1))
     1735 +                if (!atomic_dec_ulong_nv(&ulp->ul_vnops_cnt))
1736 1736                          cv_broadcast(&ulp->ul_cv);
1737 1737                  mutex_enter(&ulp->ul_lock);
1738 1738                  if (seg->s_ops == &segvn_ops &&
1739 1739                      ((struct segvn_data *)seg->s_data)->type != MAP_SHARED) {
1740 1740                          mask = (ulong_t)ULOCKFS_GETREAD_MASK;
1741 1741                  } else if (protp && read_access) {
1742 1742                          /*
1743 1743                           * Restrict the mapping to readonly.
1744 1744                           * Writes to this mapping will cause
1745 1745                           * another fault which will then
↓ open down ↓ 52 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX