Print this page
5042 stop using deprecated atomic functions


3149         }
3150         else
3151                 kmem_cache_applyall_id(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3152 
3153         /*
3154          * We use taskq_dispatch() to schedule a timeout to clear
3155          * the flag so that kmem_reap() becomes self-throttling:
3156          * we won't reap again until the current reap completes *and*
3157          * at least kmem_reap_interval ticks have elapsed.
3158          */
3159         if (!taskq_dispatch(kmem_taskq, kmem_reap_done, flag, TQ_NOSLEEP))
3160                 kmem_reap_done(flag);
3161 }
3162 
3163 static void
3164 kmem_reap_common(void *flag_arg)
3165 {
3166         uint32_t *flag = (uint32_t *)flag_arg;
3167 
3168         if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL ||
3169             cas32(flag, 0, 1) != 0)
3170                 return;
3171 
3172         /*
3173          * It may not be kosher to do memory allocation when a reap is called
3174          * is called (for example, if vmem_populate() is in the call chain).
3175          * So we start the reap going with a TQ_NOALLOC dispatch.  If the
3176          * dispatch fails, we reset the flag, and the next reap will try again.
3177          */
3178         if (!taskq_dispatch(kmem_taskq, kmem_reap_start, flag, TQ_NOALLOC))
3179                 *flag = 0;
3180 }
3181 
3182 /*
3183  * Reclaim all unused memory from all caches.  Called from the VM system
3184  * when memory gets tight.
3185  */
3186 void
3187 kmem_reap(void)
3188 {
3189         kmem_reap_common(&kmem_reaping);




3149         }
3150         else
3151                 kmem_cache_applyall_id(kmem_cache_reap, kmem_taskq, TQ_NOSLEEP);
3152 
3153         /*
3154          * We use taskq_dispatch() to schedule a timeout to clear
3155          * the flag so that kmem_reap() becomes self-throttling:
3156          * we won't reap again until the current reap completes *and*
3157          * at least kmem_reap_interval ticks have elapsed.
3158          */
3159         if (!taskq_dispatch(kmem_taskq, kmem_reap_done, flag, TQ_NOSLEEP))
3160                 kmem_reap_done(flag);
3161 }
3162 
3163 static void
3164 kmem_reap_common(void *flag_arg)
3165 {
3166         uint32_t *flag = (uint32_t *)flag_arg;
3167 
3168         if (MUTEX_HELD(&kmem_cache_lock) || kmem_taskq == NULL ||
3169             atomic_cas_32(flag, 0, 1) != 0)
3170                 return;
3171 
3172         /*
3173          * It may not be kosher to do memory allocation when a reap is called
3174          * is called (for example, if vmem_populate() is in the call chain).
3175          * So we start the reap going with a TQ_NOALLOC dispatch.  If the
3176          * dispatch fails, we reset the flag, and the next reap will try again.
3177          */
3178         if (!taskq_dispatch(kmem_taskq, kmem_reap_start, flag, TQ_NOALLOC))
3179                 *flag = 0;
3180 }
3181 
3182 /*
3183  * Reclaim all unused memory from all caches.  Called from the VM system
3184  * when memory gets tight.
3185  */
3186 void
3187 kmem_reap(void)
3188 {
3189         kmem_reap_common(&kmem_reaping);