Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

*** 115,126 **** error == CRYPTO_DEVICE_ERROR || \ error == CRYPTO_DEVICE_MEMORY || \ error == CRYPTO_KEY_SIZE_RANGE || \ error == CRYPTO_NO_PERMISSION) ! #define KCF_ATOMIC_INCR(x) atomic_add_32(&(x), 1) ! #define KCF_ATOMIC_DECR(x) atomic_add_32(&(x), -1) /* * Node structure for synchronous requests. */ typedef struct kcf_sreq_node { --- 115,126 ---- error == CRYPTO_DEVICE_ERROR || \ error == CRYPTO_DEVICE_MEMORY || \ error == CRYPTO_KEY_SIZE_RANGE || \ error == CRYPTO_NO_PERMISSION) ! #define KCF_ATOMIC_INCR(x) atomic_inc_32(&(x)) ! #define KCF_ATOMIC_DECR(x) atomic_dec_32(&(x)) /* * Node structure for synchronous requests. */ typedef struct kcf_sreq_node {
*** 208,225 **** kcondvar_t an_done; /* Signal request completion */ uint_t an_refcnt; } kcf_areq_node_t; #define KCF_AREQ_REFHOLD(areq) { \ ! atomic_add_32(&(areq)->an_refcnt, 1); \ ASSERT((areq)->an_refcnt != 0); \ } #define KCF_AREQ_REFRELE(areq) { \ ASSERT((areq)->an_refcnt != 0); \ membar_exit(); \ ! if (atomic_add_32_nv(&(areq)->an_refcnt, -1) == 0) \ kcf_free_req(areq); \ } #define GET_REQ_TYPE(arg) *((kcf_call_type_t *)(arg)) --- 208,225 ---- kcondvar_t an_done; /* Signal request completion */ uint_t an_refcnt; } kcf_areq_node_t; #define KCF_AREQ_REFHOLD(areq) { \ ! atomic_inc_32(&(areq)->an_refcnt); \ ASSERT((areq)->an_refcnt != 0); \ } #define KCF_AREQ_REFRELE(areq) { \ ASSERT((areq)->an_refcnt != 0); \ membar_exit(); \ ! if (atomic_dec_32_nv(&(areq)->an_refcnt) == 0) \ kcf_free_req(areq); \ } #define GET_REQ_TYPE(arg) *((kcf_call_type_t *)(arg))
*** 312,322 **** * Bump up the reference count on the framework private context. A * global context or a request that references this structure should * do a hold. */ #define KCF_CONTEXT_REFHOLD(ictx) { \ ! atomic_add_32(&(ictx)->kc_refcnt, 1); \ ASSERT((ictx)->kc_refcnt != 0); \ } /* * Decrement the reference count on the framework private context. --- 312,322 ---- * Bump up the reference count on the framework private context. A * global context or a request that references this structure should * do a hold. */ #define KCF_CONTEXT_REFHOLD(ictx) { \ ! atomic_inc_32(&(ictx)->kc_refcnt); \ ASSERT((ictx)->kc_refcnt != 0); \ } /* * Decrement the reference count on the framework private context.
*** 324,334 **** * context structure is freed along with the global context. */ #define KCF_CONTEXT_REFRELE(ictx) { \ ASSERT((ictx)->kc_refcnt != 0); \ membar_exit(); \ ! if (atomic_add_32_nv(&(ictx)->kc_refcnt, -1) == 0) \ kcf_free_context(ictx); \ } /* * Check if we can release the context now. In case of CRYPTO_QUEUED --- 324,334 ---- * context structure is freed along with the global context. */ #define KCF_CONTEXT_REFRELE(ictx) { \ ASSERT((ictx)->kc_refcnt != 0); \ membar_exit(); \ ! if (atomic_dec_32_nv(&(ictx)->kc_refcnt) == 0) \ kcf_free_context(ictx); \ } /* * Check if we can release the context now. In case of CRYPTO_QUEUED