Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

*** 1624,1634 **** vmem_free(vmp, slab, slabsize); vmem_alloc_failure: kmem_log_event(kmem_failure_log, cp, NULL, NULL); ! atomic_add_64(&cp->cache_alloc_fail, 1); return (NULL); } /* --- 1624,1634 ---- vmem_free(vmp, slab, slabsize); vmem_alloc_failure: kmem_log_event(kmem_failure_log, cp, NULL, NULL); ! atomic_inc_64(&cp->cache_alloc_fail); return (NULL); } /*
*** 1993,2003 **** mtbf = 0; } if (mtbf || (construct && cp->cache_constructor != NULL && cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) { ! atomic_add_64(&cp->cache_alloc_fail, 1); btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE; if (cp->cache_flags & KMF_DEADBEEF) copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); kmem_slab_free(cp, buf); return (1); --- 1993,2003 ---- mtbf = 0; } if (mtbf || (construct && cp->cache_constructor != NULL && cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) { ! atomic_inc_64(&cp->cache_alloc_fail); btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE; if (cp->cache_flags & KMF_DEADBEEF) copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify); kmem_slab_free(cp, buf); return (1);
*** 2601,2611 **** return (buf); } if (cp->cache_constructor != NULL && cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) { ! atomic_add_64(&cp->cache_alloc_fail, 1); kmem_slab_free(cp, buf); return (NULL); } return (buf); --- 2601,2611 ---- return (buf); } if (cp->cache_constructor != NULL && cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) { ! atomic_inc_64(&cp->cache_alloc_fail); kmem_slab_free(cp, buf); return (NULL); } return (buf);
*** 4875,4885 **** return; } } else if (cp->cache_constructor != NULL && cp->cache_constructor(callback->kmm_to_buf, cp->cache_private, KM_NOSLEEP) != 0) { ! atomic_add_64(&cp->cache_alloc_fail, 1); KMEM_STAT_ADD(kmem_move_stats.kms_constructor_fail); kmem_slab_free(cp, callback->kmm_to_buf); kmem_move_end(cp, callback); return; } --- 4875,4885 ---- return; } } else if (cp->cache_constructor != NULL && cp->cache_constructor(callback->kmm_to_buf, cp->cache_private, KM_NOSLEEP) != 0) { ! atomic_inc_64(&cp->cache_alloc_fail); KMEM_STAT_ADD(kmem_move_stats.kms_constructor_fail); kmem_slab_free(cp, callback->kmm_to_buf); kmem_move_end(cp, callback); return; }