Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*


 100         struct kcf_prov_tried   *pt_next;
 101 } kcf_prov_tried_t;
 102 
 103 /* Must be different from KM_SLEEP and KM_NOSLEEP */
 104 #define KCF_HOLD_PROV   0x1000
 105 
 106 #define IS_FG_SUPPORTED(mdesc, fg)              \
 107         (((mdesc)->pm_mech_info.cm_func_group_mask & (fg)) != 0)
 108 
 109 #define IS_PROVIDER_TRIED(pd, tlist)            \
 110         (tlist != NULL && is_in_triedlist(pd, tlist))
 111 
 112 #define IS_RECOVERABLE(error)                   \
 113         (error == CRYPTO_BUFFER_TOO_BIG ||      \
 114         error == CRYPTO_BUSY ||                 \
 115         error == CRYPTO_DEVICE_ERROR ||         \
 116         error == CRYPTO_DEVICE_MEMORY ||        \
 117         error == CRYPTO_KEY_SIZE_RANGE ||       \
 118         error == CRYPTO_NO_PERMISSION)
 119 
 120 #define KCF_ATOMIC_INCR(x)      atomic_add_32(&(x), 1)
 121 #define KCF_ATOMIC_DECR(x)      atomic_add_32(&(x), -1)
 122 
 123 /*
 124  * Node structure for synchronous requests.
 125  */
 126 typedef struct kcf_sreq_node {
 127         /* Should always be the first field in this structure */
 128         kcf_call_type_t         sn_type;
 129         /*
 130          * sn_cv and sr_lock are used to wait for the
 131          * operation to complete. sn_lock also protects
 132          * the sn_state field.
 133          */
 134         kcondvar_t              sn_cv;
 135         kmutex_t                sn_lock;
 136         kcf_req_status_t        sn_state;
 137 
 138         /*
 139          * Return value from the operation. This will be
 140          * one of the CRYPTO_* errors defined in common.h.
 141          */


 193         /*
 194          * Next and previous nodes in the global software
 195          * queue. These fields are NULL for a hardware
 196          * provider since we use a taskq there.
 197          */
 198         struct kcf_areq_node    *an_next;
 199         struct kcf_areq_node    *an_prev;
 200 
 201         /* Provider handling this request */
 202         kcf_provider_desc_t     *an_provider;
 203         kcf_prov_cpu_t          *an_mp;
 204         kcf_prov_tried_t        *an_tried_plist;
 205 
 206         struct kcf_areq_node    *an_idnext;     /* Next in ID hash */
 207         struct kcf_areq_node    *an_idprev;     /* Prev in ID hash */
 208         kcondvar_t              an_done;        /* Signal request completion */
 209         uint_t                  an_refcnt;
 210 } kcf_areq_node_t;
 211 
 212 #define KCF_AREQ_REFHOLD(areq) {                \
 213         atomic_add_32(&(areq)->an_refcnt, 1);    \
 214         ASSERT((areq)->an_refcnt != 0);              \
 215 }
 216 
 217 #define KCF_AREQ_REFRELE(areq) {                                \
 218         ASSERT((areq)->an_refcnt != 0);                              \
 219         membar_exit();                                          \
 220         if (atomic_add_32_nv(&(areq)->an_refcnt, -1) == 0)       \
 221                 kcf_free_req(areq);                             \
 222 }
 223 
 224 #define GET_REQ_TYPE(arg) *((kcf_call_type_t *)(arg))
 225 
 226 #define NOTIFY_CLIENT(areq, err) (*(areq)->an_reqarg.cr_callback_func)(\
 227         (areq)->an_reqarg.cr_callback_arg, err);
 228 
 229 /* For internally generated call requests for dual operations */
 230 typedef struct kcf_call_req {
 231         crypto_call_req_t       kr_callreq;     /* external client call req */
 232         kcf_req_params_t        kr_params;      /* Params saved for next call */
 233         kcf_areq_node_t         *kr_areq;       /* Use this areq */
 234         off_t                   kr_saveoffset;
 235         size_t                  kr_savelen;
 236 } kcf_dual_req_t;
 237 
 238 /*
 239  * The following are some what similar to macros in callo.h, which implement
 240  * callout tables.


 297         kmutex_t                kc_in_use_lock;
 298         /*
 299          * kc_req_chain_first and kc_req_chain_last are used to chain
 300          * multiple async requests using the same context. They should be
 301          * NULL for sync requests.
 302          */
 303         kcf_areq_node_t         *kc_req_chain_first;
 304         kcf_areq_node_t         *kc_req_chain_last;
 305         kcf_provider_desc_t     *kc_prov_desc;  /* Prov. descriptor */
 306         kcf_provider_desc_t     *kc_sw_prov_desc;       /* Prov. descriptor */
 307         kcf_mech_entry_t        *kc_mech;
 308         struct kcf_context      *kc_secondctx;  /* for dual contexts */
 309 } kcf_context_t;
 310 
 311 /*
 312  * Bump up the reference count on the framework private context. A
 313  * global context or a request that references this structure should
 314  * do a hold.
 315  */
 316 #define KCF_CONTEXT_REFHOLD(ictx) {             \
 317         atomic_add_32(&(ictx)->kc_refcnt, 1);    \
 318         ASSERT((ictx)->kc_refcnt != 0);              \
 319 }
 320 
 321 /*
 322  * Decrement the reference count on the framework private context.
 323  * When the last reference is released, the framework private
 324  * context structure is freed along with the global context.
 325  */
 326 #define KCF_CONTEXT_REFRELE(ictx) {                             \
 327         ASSERT((ictx)->kc_refcnt != 0);                              \
 328         membar_exit();                                          \
 329         if (atomic_add_32_nv(&(ictx)->kc_refcnt, -1) == 0)       \
 330                 kcf_free_context(ictx);                         \
 331 }
 332 
 333 /*
 334  * Check if we can release the context now. In case of CRYPTO_QUEUED
 335  * we do not release it as we can do it only after the provider notified
 336  * us. In case of CRYPTO_BUSY, the client can retry the request using
 337  * the context, so we do not release the context.
 338  *
 339  * This macro should be called only from the final routine in
 340  * an init/update/final sequence. We do not release the context in case
 341  * of update operations. We require the consumer to free it
 342  * explicitly, in case it wants to abandon the operation. This is done
 343  * as there may be mechanisms in ECB mode that can continue even if
 344  * an operation on a block fails.
 345  */
 346 #define KCF_CONTEXT_COND_RELEASE(rv, kcf_ctx) {                 \
 347         if (KCF_CONTEXT_DONE(rv))                               \
 348                 KCF_CONTEXT_REFRELE(kcf_ctx);                   \
 349 }




 100         struct kcf_prov_tried   *pt_next;
 101 } kcf_prov_tried_t;
 102 
 103 /* Must be different from KM_SLEEP and KM_NOSLEEP */
 104 #define KCF_HOLD_PROV   0x1000
 105 
 106 #define IS_FG_SUPPORTED(mdesc, fg)              \
 107         (((mdesc)->pm_mech_info.cm_func_group_mask & (fg)) != 0)
 108 
 109 #define IS_PROVIDER_TRIED(pd, tlist)            \
 110         (tlist != NULL && is_in_triedlist(pd, tlist))
 111 
 112 #define IS_RECOVERABLE(error)                   \
 113         (error == CRYPTO_BUFFER_TOO_BIG ||      \
 114         error == CRYPTO_BUSY ||                 \
 115         error == CRYPTO_DEVICE_ERROR ||         \
 116         error == CRYPTO_DEVICE_MEMORY ||        \
 117         error == CRYPTO_KEY_SIZE_RANGE ||       \
 118         error == CRYPTO_NO_PERMISSION)
 119 
 120 #define KCF_ATOMIC_INCR(x)      atomic_inc_32(&(x))
 121 #define KCF_ATOMIC_DECR(x)      atomic_dec_32(&(x))
 122 
 123 /*
 124  * Node structure for synchronous requests.
 125  */
 126 typedef struct kcf_sreq_node {
 127         /* Should always be the first field in this structure */
 128         kcf_call_type_t         sn_type;
 129         /*
 130          * sn_cv and sr_lock are used to wait for the
 131          * operation to complete. sn_lock also protects
 132          * the sn_state field.
 133          */
 134         kcondvar_t              sn_cv;
 135         kmutex_t                sn_lock;
 136         kcf_req_status_t        sn_state;
 137 
 138         /*
 139          * Return value from the operation. This will be
 140          * one of the CRYPTO_* errors defined in common.h.
 141          */


 193         /*
 194          * Next and previous nodes in the global software
 195          * queue. These fields are NULL for a hardware
 196          * provider since we use a taskq there.
 197          */
 198         struct kcf_areq_node    *an_next;
 199         struct kcf_areq_node    *an_prev;
 200 
 201         /* Provider handling this request */
 202         kcf_provider_desc_t     *an_provider;
 203         kcf_prov_cpu_t          *an_mp;
 204         kcf_prov_tried_t        *an_tried_plist;
 205 
 206         struct kcf_areq_node    *an_idnext;     /* Next in ID hash */
 207         struct kcf_areq_node    *an_idprev;     /* Prev in ID hash */
 208         kcondvar_t              an_done;        /* Signal request completion */
 209         uint_t                  an_refcnt;
 210 } kcf_areq_node_t;
 211 
 212 #define KCF_AREQ_REFHOLD(areq) {                \
 213         atomic_inc_32(&(areq)->an_refcnt);       \
 214         ASSERT((areq)->an_refcnt != 0);              \
 215 }
 216 
 217 #define KCF_AREQ_REFRELE(areq) {                                \
 218         ASSERT((areq)->an_refcnt != 0);                              \
 219         membar_exit();                                          \
 220         if (atomic_dec_32_nv(&(areq)->an_refcnt) == 0)   \
 221                 kcf_free_req(areq);                             \
 222 }
 223 
 224 #define GET_REQ_TYPE(arg) *((kcf_call_type_t *)(arg))
 225 
 226 #define NOTIFY_CLIENT(areq, err) (*(areq)->an_reqarg.cr_callback_func)(\
 227         (areq)->an_reqarg.cr_callback_arg, err);
 228 
 229 /* For internally generated call requests for dual operations */
 230 typedef struct kcf_call_req {
 231         crypto_call_req_t       kr_callreq;     /* external client call req */
 232         kcf_req_params_t        kr_params;      /* Params saved for next call */
 233         kcf_areq_node_t         *kr_areq;       /* Use this areq */
 234         off_t                   kr_saveoffset;
 235         size_t                  kr_savelen;
 236 } kcf_dual_req_t;
 237 
 238 /*
 239  * The following are some what similar to macros in callo.h, which implement
 240  * callout tables.


 297         kmutex_t                kc_in_use_lock;
 298         /*
 299          * kc_req_chain_first and kc_req_chain_last are used to chain
 300          * multiple async requests using the same context. They should be
 301          * NULL for sync requests.
 302          */
 303         kcf_areq_node_t         *kc_req_chain_first;
 304         kcf_areq_node_t         *kc_req_chain_last;
 305         kcf_provider_desc_t     *kc_prov_desc;  /* Prov. descriptor */
 306         kcf_provider_desc_t     *kc_sw_prov_desc;       /* Prov. descriptor */
 307         kcf_mech_entry_t        *kc_mech;
 308         struct kcf_context      *kc_secondctx;  /* for dual contexts */
 309 } kcf_context_t;
 310 
 311 /*
 312  * Bump up the reference count on the framework private context. A
 313  * global context or a request that references this structure should
 314  * do a hold.
 315  */
 316 #define KCF_CONTEXT_REFHOLD(ictx) {             \
 317         atomic_inc_32(&(ictx)->kc_refcnt);       \
 318         ASSERT((ictx)->kc_refcnt != 0);              \
 319 }
 320 
 321 /*
 322  * Decrement the reference count on the framework private context.
 323  * When the last reference is released, the framework private
 324  * context structure is freed along with the global context.
 325  */
 326 #define KCF_CONTEXT_REFRELE(ictx) {                             \
 327         ASSERT((ictx)->kc_refcnt != 0);                              \
 328         membar_exit();                                          \
 329         if (atomic_dec_32_nv(&(ictx)->kc_refcnt) == 0)   \
 330                 kcf_free_context(ictx);                         \
 331 }
 332 
 333 /*
 334  * Check if we can release the context now. In case of CRYPTO_QUEUED
 335  * we do not release it as we can do it only after the provider notified
 336  * us. In case of CRYPTO_BUSY, the client can retry the request using
 337  * the context, so we do not release the context.
 338  *
 339  * This macro should be called only from the final routine in
 340  * an init/update/final sequence. We do not release the context in case
 341  * of update operations. We require the consumer to free it
 342  * explicitly, in case it wants to abandon the operation. This is done
 343  * as there may be mechanisms in ECB mode that can continue even if
 344  * an operation on a block fails.
 345  */
 346 #define KCF_CONTEXT_COND_RELEASE(rv, kcf_ctx) {                 \
 347         if (KCF_CONTEXT_DONE(rv))                               \
 348                 KCF_CONTEXT_REFRELE(kcf_ctx);                   \
 349 }