Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*


1609 
1610         kmem_log_event(kmem_slab_log, cp, sp, slab);
1611 
1612         return (sp);
1613 
1614 bufctl_alloc_failure:
1615 
1616         while ((bcp = sp->slab_head) != NULL) {
1617                 sp->slab_head = bcp->bc_next;
1618                 kmem_cache_free(cp->cache_bufctl_cache, bcp);
1619         }
1620         kmem_cache_free(kmem_slab_cache, sp);
1621 
1622 slab_alloc_failure:
1623 
1624         vmem_free(vmp, slab, slabsize);
1625 
1626 vmem_alloc_failure:
1627 
1628         kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1629         atomic_add_64(&cp->cache_alloc_fail, 1);
1630 
1631         return (NULL);
1632 }
1633 
1634 /*
1635  * Destroy a slab.
1636  */
1637 static void
1638 kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp)
1639 {
1640         vmem_t *vmp = cp->cache_arena;
1641         void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
1642 
1643         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1644         ASSERT(sp->slab_refcnt == 0);
1645 
1646         if (cp->cache_flags & KMF_HASH) {
1647                 kmem_bufctl_t *bcp;
1648                 while ((bcp = sp->slab_head) != NULL) {
1649                         sp->slab_head = bcp->bc_next;


1978                             cp->cache_verify)) {
1979                                 kmem_error(KMERR_MODIFIED, cp, buf);
1980                                 return (-1);
1981                         }
1982                 }
1983         }
1984         btp->bt_redzone = KMEM_REDZONE_PATTERN;
1985 
1986         if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 &&
1987             gethrtime() % mtbf == 0 &&
1988             (kmflag & (KM_NOSLEEP | KM_PANIC)) == KM_NOSLEEP) {
1989                 kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1990                 if (!construct && cp->cache_destructor != NULL)
1991                         cp->cache_destructor(buf, cp->cache_private);
1992         } else {
1993                 mtbf = 0;
1994         }
1995 
1996         if (mtbf || (construct && cp->cache_constructor != NULL &&
1997             cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) {
1998                 atomic_add_64(&cp->cache_alloc_fail, 1);
1999                 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
2000                 if (cp->cache_flags & KMF_DEADBEEF)
2001                         copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2002                 kmem_slab_free(cp, buf);
2003                 return (1);
2004         }
2005 
2006         if (cp->cache_flags & KMF_AUDIT) {
2007                 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
2008         }
2009 
2010         if ((cp->cache_flags & KMF_LITE) &&
2011             !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
2012                 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
2013         }
2014 
2015         return (0);
2016 }
2017 
2018 static int


2586                  * Make kmem_cache_alloc_debug() apply the constructor for us.
2587                  */
2588                 int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller());
2589                 if (rc != 0) {
2590                         if (kmflag & KM_NOSLEEP)
2591                                 return (NULL);
2592                         /*
2593                          * kmem_cache_alloc_debug() detected corruption
2594                          * but didn't panic (kmem_panic <= 0). We should not be
2595                          * here because the constructor failed (indicated by a
2596                          * return code of 1). Try again.
2597                          */
2598                         ASSERT(rc == -1);
2599                         return (kmem_cache_alloc(cp, kmflag));
2600                 }
2601                 return (buf);
2602         }
2603 
2604         if (cp->cache_constructor != NULL &&
2605             cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) {
2606                 atomic_add_64(&cp->cache_alloc_fail, 1);
2607                 kmem_slab_free(cp, buf);
2608                 return (NULL);
2609         }
2610 
2611         return (buf);
2612 }
2613 
2614 /*
2615  * The freed argument tells whether or not kmem_cache_free_debug() has already
2616  * been called so that we can avoid the duplicate free error. For example, a
2617  * buffer on a magazine has already been freed by the client but is still
2618  * constructed.
2619  */
2620 static void
2621 kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed)
2622 {
2623         if (!freed && (cp->cache_flags & KMF_BUFTAG))
2624                 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2625                         return;
2626 


4860         if (free_on_slab) {
4861                 KMEM_STAT_ADD(kmem_move_stats.kms_hunt_found_slab);
4862                 kmem_slab_free(cp, callback->kmm_to_buf);
4863                 kmem_move_end(cp, callback);
4864                 return;
4865         }
4866 
4867         if (cp->cache_flags & KMF_BUFTAG) {
4868                 /*
4869                  * Make kmem_cache_alloc_debug() apply the constructor for us.
4870                  */
4871                 if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf,
4872                     KM_NOSLEEP, 1, caller()) != 0) {
4873                         KMEM_STAT_ADD(kmem_move_stats.kms_alloc_fail);
4874                         kmem_move_end(cp, callback);
4875                         return;
4876                 }
4877         } else if (cp->cache_constructor != NULL &&
4878             cp->cache_constructor(callback->kmm_to_buf, cp->cache_private,
4879             KM_NOSLEEP) != 0) {
4880                 atomic_add_64(&cp->cache_alloc_fail, 1);
4881                 KMEM_STAT_ADD(kmem_move_stats.kms_constructor_fail);
4882                 kmem_slab_free(cp, callback->kmm_to_buf);
4883                 kmem_move_end(cp, callback);
4884                 return;
4885         }
4886 
4887         KMEM_STAT_ADD(kmem_move_stats.kms_callbacks);
4888         KMEM_STAT_COND_ADD((callback->kmm_flags & KMM_NOTIFY),
4889             kmem_move_stats.kms_notify_callbacks);
4890         cp->cache_defrag->kmd_callbacks++;
4891         cp->cache_defrag->kmd_thread = curthread;
4892         cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf;
4893         cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf;
4894         DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *,
4895             callback);
4896 
4897         response = cp->cache_move(callback->kmm_from_buf,
4898             callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private);
4899 
4900         DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *,




1609 
1610         kmem_log_event(kmem_slab_log, cp, sp, slab);
1611 
1612         return (sp);
1613 
1614 bufctl_alloc_failure:
1615 
1616         while ((bcp = sp->slab_head) != NULL) {
1617                 sp->slab_head = bcp->bc_next;
1618                 kmem_cache_free(cp->cache_bufctl_cache, bcp);
1619         }
1620         kmem_cache_free(kmem_slab_cache, sp);
1621 
1622 slab_alloc_failure:
1623 
1624         vmem_free(vmp, slab, slabsize);
1625 
1626 vmem_alloc_failure:
1627 
1628         kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1629         atomic_inc_64(&cp->cache_alloc_fail);
1630 
1631         return (NULL);
1632 }
1633 
1634 /*
1635  * Destroy a slab.
1636  */
1637 static void
1638 kmem_slab_destroy(kmem_cache_t *cp, kmem_slab_t *sp)
1639 {
1640         vmem_t *vmp = cp->cache_arena;
1641         void *slab = (void *)P2ALIGN((uintptr_t)sp->slab_base, vmp->vm_quantum);
1642 
1643         ASSERT(MUTEX_NOT_HELD(&cp->cache_lock));
1644         ASSERT(sp->slab_refcnt == 0);
1645 
1646         if (cp->cache_flags & KMF_HASH) {
1647                 kmem_bufctl_t *bcp;
1648                 while ((bcp = sp->slab_head) != NULL) {
1649                         sp->slab_head = bcp->bc_next;


1978                             cp->cache_verify)) {
1979                                 kmem_error(KMERR_MODIFIED, cp, buf);
1980                                 return (-1);
1981                         }
1982                 }
1983         }
1984         btp->bt_redzone = KMEM_REDZONE_PATTERN;
1985 
1986         if ((mtbf = kmem_mtbf | cp->cache_mtbf) != 0 &&
1987             gethrtime() % mtbf == 0 &&
1988             (kmflag & (KM_NOSLEEP | KM_PANIC)) == KM_NOSLEEP) {
1989                 kmem_log_event(kmem_failure_log, cp, NULL, NULL);
1990                 if (!construct && cp->cache_destructor != NULL)
1991                         cp->cache_destructor(buf, cp->cache_private);
1992         } else {
1993                 mtbf = 0;
1994         }
1995 
1996         if (mtbf || (construct && cp->cache_constructor != NULL &&
1997             cp->cache_constructor(buf, cp->cache_private, kmflag) != 0)) {
1998                 atomic_inc_64(&cp->cache_alloc_fail);
1999                 btp->bt_bxstat = (intptr_t)bcp ^ KMEM_BUFTAG_FREE;
2000                 if (cp->cache_flags & KMF_DEADBEEF)
2001                         copy_pattern(KMEM_FREE_PATTERN, buf, cp->cache_verify);
2002                 kmem_slab_free(cp, buf);
2003                 return (1);
2004         }
2005 
2006         if (cp->cache_flags & KMF_AUDIT) {
2007                 KMEM_AUDIT(kmem_transaction_log, cp, bcp);
2008         }
2009 
2010         if ((cp->cache_flags & KMF_LITE) &&
2011             !(cp->cache_cflags & KMC_KMEM_ALLOC)) {
2012                 KMEM_BUFTAG_LITE_ENTER(btp, kmem_lite_count, caller);
2013         }
2014 
2015         return (0);
2016 }
2017 
2018 static int


2586                  * Make kmem_cache_alloc_debug() apply the constructor for us.
2587                  */
2588                 int rc = kmem_cache_alloc_debug(cp, buf, kmflag, 1, caller());
2589                 if (rc != 0) {
2590                         if (kmflag & KM_NOSLEEP)
2591                                 return (NULL);
2592                         /*
2593                          * kmem_cache_alloc_debug() detected corruption
2594                          * but didn't panic (kmem_panic <= 0). We should not be
2595                          * here because the constructor failed (indicated by a
2596                          * return code of 1). Try again.
2597                          */
2598                         ASSERT(rc == -1);
2599                         return (kmem_cache_alloc(cp, kmflag));
2600                 }
2601                 return (buf);
2602         }
2603 
2604         if (cp->cache_constructor != NULL &&
2605             cp->cache_constructor(buf, cp->cache_private, kmflag) != 0) {
2606                 atomic_inc_64(&cp->cache_alloc_fail);
2607                 kmem_slab_free(cp, buf);
2608                 return (NULL);
2609         }
2610 
2611         return (buf);
2612 }
2613 
2614 /*
2615  * The freed argument tells whether or not kmem_cache_free_debug() has already
2616  * been called so that we can avoid the duplicate free error. For example, a
2617  * buffer on a magazine has already been freed by the client but is still
2618  * constructed.
2619  */
2620 static void
2621 kmem_slab_free_constructed(kmem_cache_t *cp, void *buf, boolean_t freed)
2622 {
2623         if (!freed && (cp->cache_flags & KMF_BUFTAG))
2624                 if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2625                         return;
2626 


4860         if (free_on_slab) {
4861                 KMEM_STAT_ADD(kmem_move_stats.kms_hunt_found_slab);
4862                 kmem_slab_free(cp, callback->kmm_to_buf);
4863                 kmem_move_end(cp, callback);
4864                 return;
4865         }
4866 
4867         if (cp->cache_flags & KMF_BUFTAG) {
4868                 /*
4869                  * Make kmem_cache_alloc_debug() apply the constructor for us.
4870                  */
4871                 if (kmem_cache_alloc_debug(cp, callback->kmm_to_buf,
4872                     KM_NOSLEEP, 1, caller()) != 0) {
4873                         KMEM_STAT_ADD(kmem_move_stats.kms_alloc_fail);
4874                         kmem_move_end(cp, callback);
4875                         return;
4876                 }
4877         } else if (cp->cache_constructor != NULL &&
4878             cp->cache_constructor(callback->kmm_to_buf, cp->cache_private,
4879             KM_NOSLEEP) != 0) {
4880                 atomic_inc_64(&cp->cache_alloc_fail);
4881                 KMEM_STAT_ADD(kmem_move_stats.kms_constructor_fail);
4882                 kmem_slab_free(cp, callback->kmm_to_buf);
4883                 kmem_move_end(cp, callback);
4884                 return;
4885         }
4886 
4887         KMEM_STAT_ADD(kmem_move_stats.kms_callbacks);
4888         KMEM_STAT_COND_ADD((callback->kmm_flags & KMM_NOTIFY),
4889             kmem_move_stats.kms_notify_callbacks);
4890         cp->cache_defrag->kmd_callbacks++;
4891         cp->cache_defrag->kmd_thread = curthread;
4892         cp->cache_defrag->kmd_from_buf = callback->kmm_from_buf;
4893         cp->cache_defrag->kmd_to_buf = callback->kmm_to_buf;
4894         DTRACE_PROBE2(kmem__move__start, kmem_cache_t *, cp, kmem_move_t *,
4895             callback);
4896 
4897         response = cp->cache_move(callback->kmm_from_buf,
4898             callback->kmm_to_buf, cp->cache_bufsize, cp->cache_private);
4899 
4900         DTRACE_PROBE3(kmem__move__end, kmem_cache_t *, cp, kmem_move_t *,