Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/sys/crypto/sched_impl.h
          +++ new/usr/src/uts/common/sys/crypto/sched_impl.h
↓ open down ↓ 109 lines elided ↑ open up ↑
 110  110          (tlist != NULL && is_in_triedlist(pd, tlist))
 111  111  
 112  112  #define IS_RECOVERABLE(error)                   \
 113  113          (error == CRYPTO_BUFFER_TOO_BIG ||      \
 114  114          error == CRYPTO_BUSY ||                 \
 115  115          error == CRYPTO_DEVICE_ERROR ||         \
 116  116          error == CRYPTO_DEVICE_MEMORY ||        \
 117  117          error == CRYPTO_KEY_SIZE_RANGE ||       \
 118  118          error == CRYPTO_NO_PERMISSION)
 119  119  
 120      -#define KCF_ATOMIC_INCR(x)      atomic_add_32(&(x), 1)
 121      -#define KCF_ATOMIC_DECR(x)      atomic_add_32(&(x), -1)
      120 +#define KCF_ATOMIC_INCR(x)      atomic_inc_32(&(x))
      121 +#define KCF_ATOMIC_DECR(x)      atomic_dec_32(&(x))
 122  122  
 123  123  /*
 124  124   * Node structure for synchronous requests.
 125  125   */
 126  126  typedef struct kcf_sreq_node {
 127  127          /* Should always be the first field in this structure */
 128  128          kcf_call_type_t         sn_type;
 129  129          /*
 130  130           * sn_cv and sr_lock are used to wait for the
 131  131           * operation to complete. sn_lock also protects
↓ open down ↓ 71 lines elided ↑ open up ↑
 203  203          kcf_prov_cpu_t          *an_mp;
 204  204          kcf_prov_tried_t        *an_tried_plist;
 205  205  
 206  206          struct kcf_areq_node    *an_idnext;     /* Next in ID hash */
 207  207          struct kcf_areq_node    *an_idprev;     /* Prev in ID hash */
 208  208          kcondvar_t              an_done;        /* Signal request completion */
 209  209          uint_t                  an_refcnt;
 210  210  } kcf_areq_node_t;
 211  211  
 212  212  #define KCF_AREQ_REFHOLD(areq) {                \
 213      -        atomic_add_32(&(areq)->an_refcnt, 1);   \
      213 +        atomic_inc_32(&(areq)->an_refcnt);      \
 214  214          ASSERT((areq)->an_refcnt != 0);         \
 215  215  }
 216  216  
 217  217  #define KCF_AREQ_REFRELE(areq) {                                \
 218  218          ASSERT((areq)->an_refcnt != 0);                         \
 219  219          membar_exit();                                          \
 220      -        if (atomic_add_32_nv(&(areq)->an_refcnt, -1) == 0)      \
      220 +        if (atomic_dec_32_nv(&(areq)->an_refcnt) == 0)  \
 221  221                  kcf_free_req(areq);                             \
 222  222  }
 223  223  
 224  224  #define GET_REQ_TYPE(arg) *((kcf_call_type_t *)(arg))
 225  225  
 226  226  #define NOTIFY_CLIENT(areq, err) (*(areq)->an_reqarg.cr_callback_func)(\
 227  227          (areq)->an_reqarg.cr_callback_arg, err);
 228  228  
 229  229  /* For internally generated call requests for dual operations */
 230  230  typedef struct kcf_call_req {
↓ open down ↓ 76 lines elided ↑ open up ↑
 307  307          kcf_mech_entry_t        *kc_mech;
 308  308          struct kcf_context      *kc_secondctx;  /* for dual contexts */
 309  309  } kcf_context_t;
 310  310  
 311  311  /*
 312  312   * Bump up the reference count on the framework private context. A
 313  313   * global context or a request that references this structure should
 314  314   * do a hold.
 315  315   */
 316  316  #define KCF_CONTEXT_REFHOLD(ictx) {             \
 317      -        atomic_add_32(&(ictx)->kc_refcnt, 1);   \
      317 +        atomic_inc_32(&(ictx)->kc_refcnt);      \
 318  318          ASSERT((ictx)->kc_refcnt != 0);         \
 319  319  }
 320  320  
 321  321  /*
 322  322   * Decrement the reference count on the framework private context.
 323  323   * When the last reference is released, the framework private
 324  324   * context structure is freed along with the global context.
 325  325   */
 326  326  #define KCF_CONTEXT_REFRELE(ictx) {                             \
 327  327          ASSERT((ictx)->kc_refcnt != 0);                         \
 328  328          membar_exit();                                          \
 329      -        if (atomic_add_32_nv(&(ictx)->kc_refcnt, -1) == 0)      \
      329 +        if (atomic_dec_32_nv(&(ictx)->kc_refcnt) == 0)  \
 330  330                  kcf_free_context(ictx);                         \
 331  331  }
 332  332  
 333  333  /*
 334  334   * Check if we can release the context now. In case of CRYPTO_QUEUED
 335  335   * we do not release it as we can do it only after the provider notified
 336  336   * us. In case of CRYPTO_BUSY, the client can retry the request using
 337  337   * the context, so we do not release the context.
 338  338   *
 339  339   * This macro should be called only from the final routine in
↓ open down ↓ 183 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX