Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/os/errorq.c
          +++ new/usr/src/uts/common/os/errorq.c
↓ open down ↓ 516 lines elided ↑ open up ↑
 517  517   * smaller than the queue element size, the remainder of the queue element is
 518  518   * filled with zeroes.  This function may be called from any context subject
 519  519   * to the Platform Considerations described above.
 520  520   */
 521  521  void
 522  522  errorq_dispatch(errorq_t *eqp, const void *data, size_t len, uint_t flag)
 523  523  {
 524  524          errorq_elem_t *eep, *old;
 525  525  
 526  526          if (eqp == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE)) {
 527      -                atomic_add_64(&errorq_lost, 1);
      527 +                atomic_inc_64(&errorq_lost);
 528  528                  return; /* drop error if queue is uninitialized or disabled */
 529  529          }
 530  530  
 531  531          for (;;) {
 532  532                  int i, rval;
 533  533  
 534  534                  if ((i = errorq_availbit(eqp->eq_bitmap, eqp->eq_qlen,
 535  535                      eqp->eq_rotor)) == -1) {
 536      -                        atomic_add_64(&eqp->eq_kstat.eqk_dropped.value.ui64, 1);
      536 +                        atomic_inc_64(&eqp->eq_kstat.eqk_dropped.value.ui64);
 537  537                          return;
 538  538                  }
 539  539                  BT_ATOMIC_SET_EXCL(eqp->eq_bitmap, i, rval);
 540  540                  if (rval == 0) {
 541  541                          eqp->eq_rotor = i;
 542  542                          eep = &eqp->eq_elems[i];
 543  543                          break;
 544  544                  }
 545  545          }
 546  546  
↓ open down ↓ 5 lines elided ↑ open up ↑
 552  552  
 553  553          for (;;) {
 554  554                  old = eqp->eq_pend;
 555  555                  eep->eqe_prev = old;
 556  556                  membar_producer();
 557  557  
 558  558                  if (atomic_cas_ptr(&eqp->eq_pend, old, eep) == old)
 559  559                          break;
 560  560          }
 561  561  
 562      -        atomic_add_64(&eqp->eq_kstat.eqk_dispatched.value.ui64, 1);
      562 +        atomic_inc_64(&eqp->eq_kstat.eqk_dispatched.value.ui64);
 563  563  
 564  564          if (flag == ERRORQ_ASYNC && eqp->eq_id != NULL)
 565  565                  ddi_trigger_softintr(eqp->eq_id);
 566  566  }
 567  567  
 568  568  /*
 569  569   * Drain the specified error queue by calling eq_func() for each pending error.
 570  570   * This function must be called at or below LOCK_LEVEL or from panic context.
 571  571   * In order to synchronize with other attempts to drain the queue, we acquire
 572  572   * the adaptive eq_lock, blocking other consumers.  Once this lock is held,
↓ open down ↓ 287 lines elided ↑ open up ↑
 860  860   * element.  The element is retured to the free pool when either
 861  861   * errorq_commit() is called and the element asynchronously processed
 862  862   * or immediately when errorq_cancel() is called.
 863  863   */
 864  864  errorq_elem_t *
 865  865  errorq_reserve(errorq_t *eqp)
 866  866  {
 867  867          errorq_elem_t *eqep;
 868  868  
 869  869          if (eqp == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE)) {
 870      -                atomic_add_64(&errorq_lost, 1);
      870 +                atomic_inc_64(&errorq_lost);
 871  871                  return (NULL);
 872  872          }
 873  873  
 874  874          for (;;) {
 875  875                  int i, rval;
 876  876  
 877  877                  if ((i = errorq_availbit(eqp->eq_bitmap, eqp->eq_qlen,
 878  878                      eqp->eq_rotor)) == -1) {
 879      -                        atomic_add_64(&eqp->eq_kstat.eqk_dropped.value.ui64, 1);
      879 +                        atomic_inc_64(&eqp->eq_kstat.eqk_dropped.value.ui64);
 880  880                          return (NULL);
 881  881                  }
 882  882                  BT_ATOMIC_SET_EXCL(eqp->eq_bitmap, i, rval);
 883  883                  if (rval == 0) {
 884  884                          eqp->eq_rotor = i;
 885  885                          eqep = &eqp->eq_elems[i];
 886  886                          break;
 887  887                  }
 888  888          }
 889  889  
 890  890          if (eqp->eq_flags & ERRORQ_NVLIST) {
 891  891                  errorq_nvelem_t *eqnp = eqep->eqe_data;
 892  892                  nv_alloc_reset(eqnp->eqn_nva);
 893  893                  eqnp->eqn_nvl = fm_nvlist_create(eqnp->eqn_nva);
 894  894          }
 895  895  
 896      -        atomic_add_64(&eqp->eq_kstat.eqk_reserved.value.ui64, 1);
      896 +        atomic_inc_64(&eqp->eq_kstat.eqk_reserved.value.ui64);
 897  897          return (eqep);
 898  898  }
 899  899  
 900  900  /*
 901  901   * Commit an errorq element (eqep) for dispatching.
 902  902   * This function may be called from any context subject
 903  903   * to the Platform Considerations described above.
 904  904   */
 905  905  void
 906  906  errorq_commit(errorq_t *eqp, errorq_elem_t *eqep, uint_t flag)
 907  907  {
 908  908          errorq_elem_t *old;
 909  909  
 910  910          if (eqep == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE)) {
 911      -                atomic_add_64(&eqp->eq_kstat.eqk_commit_fail.value.ui64, 1);
      911 +                atomic_inc_64(&eqp->eq_kstat.eqk_commit_fail.value.ui64);
 912  912                  return;
 913  913          }
 914  914  
 915  915          for (;;) {
 916  916                  old = eqp->eq_pend;
 917  917                  eqep->eqe_prev = old;
 918  918                  membar_producer();
 919  919  
 920  920                  if (atomic_cas_ptr(&eqp->eq_pend, old, eqep) == old)
 921  921                          break;
 922  922          }
 923  923  
 924      -        atomic_add_64(&eqp->eq_kstat.eqk_committed.value.ui64, 1);
      924 +        atomic_inc_64(&eqp->eq_kstat.eqk_committed.value.ui64);
 925  925  
 926  926          if (flag == ERRORQ_ASYNC && eqp->eq_id != NULL)
 927  927                  ddi_trigger_softintr(eqp->eq_id);
 928  928  }
 929  929  
 930  930  /*
 931  931   * Cancel an errorq element reservation by returning the specified element
 932  932   * to the free pool.  Duplicate or invalid frees are not supported.
 933  933   */
 934  934  void
 935  935  errorq_cancel(errorq_t *eqp, errorq_elem_t *eqep)
 936  936  {
 937  937          if (eqep == NULL || !(eqp->eq_flags & ERRORQ_ACTIVE))
 938  938                  return;
 939  939  
 940  940          BT_ATOMIC_CLEAR(eqp->eq_bitmap, eqep - eqp->eq_elems);
 941  941  
 942      -        atomic_add_64(&eqp->eq_kstat.eqk_cancelled.value.ui64, 1);
      942 +        atomic_inc_64(&eqp->eq_kstat.eqk_cancelled.value.ui64);
 943  943  }
 944  944  
 945  945  /*
 946  946   * Write elements on the dump list of each nvlist errorq to the dump device.
 947  947   * Upon reboot, fmd(1M) will extract and replay them for diagnosis.
 948  948   */
 949  949  void
 950  950  errorq_dump(void)
 951  951  {
 952  952          errorq_elem_t *eep;
↓ open down ↓ 85 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX