3749 * Get a kmem_cache structure. We arrange that cp->cache_cpu[]
3750 * is aligned on a KMEM_CPU_CACHE_SIZE boundary to prevent
3751 * false sharing of per-CPU data.
3752 */
3753 cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE,
3754 P2NPHASE(csize, KMEM_CPU_CACHE_SIZE), 0, NULL, NULL, VM_SLEEP);
3755 bzero(cp, csize);
3756 list_link_init(&cp->cache_link);
3757
3758 if (align == 0)
3759 align = KMEM_ALIGN;
3760
3761 /*
3762 * If we're not at least KMEM_ALIGN aligned, we can't use free
3763 * memory to hold bufctl information (because we can't safely
3764 * perform word loads and stores on it).
3765 */
3766 if (align < KMEM_ALIGN)
3767 cflags |= KMC_NOTOUCH;
3768
3769 if ((align & (align - 1)) != 0 || align > vmp->vm_quantum)
3770 panic("kmem_cache_create: bad alignment %lu", align);
3771
3772 mutex_enter(&kmem_flags_lock);
3773 if (kmem_flags & KMF_RANDOMIZE)
3774 kmem_flags = (((kmem_flags | ~KMF_RANDOM) + 1) & KMF_RANDOM) |
3775 KMF_RANDOMIZE;
3776 cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG;
3777 mutex_exit(&kmem_flags_lock);
3778
3779 /*
3780 * Make sure all the various flags are reasonable.
3781 */
3782 ASSERT(!(cflags & KMC_NOHASH) || !(cflags & KMC_NOTOUCH));
3783
3784 if (cp->cache_flags & KMF_LITE) {
3785 if (bufsize >= kmem_lite_minsize &&
3786 align <= kmem_lite_maxalign &&
3787 P2PHASE(bufsize, kmem_lite_maxalign) != 0) {
3788 cp->cache_flags |= KMF_BUFTAG;
3789 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
|
3749 * Get a kmem_cache structure. We arrange that cp->cache_cpu[]
3750 * is aligned on a KMEM_CPU_CACHE_SIZE boundary to prevent
3751 * false sharing of per-CPU data.
3752 */
3753 cp = vmem_xalloc(kmem_cache_arena, csize, KMEM_CPU_CACHE_SIZE,
3754 P2NPHASE(csize, KMEM_CPU_CACHE_SIZE), 0, NULL, NULL, VM_SLEEP);
3755 bzero(cp, csize);
3756 list_link_init(&cp->cache_link);
3757
3758 if (align == 0)
3759 align = KMEM_ALIGN;
3760
3761 /*
3762 * If we're not at least KMEM_ALIGN aligned, we can't use free
3763 * memory to hold bufctl information (because we can't safely
3764 * perform word loads and stores on it).
3765 */
3766 if (align < KMEM_ALIGN)
3767 cflags |= KMC_NOTOUCH;
3768
3769 if (!ISP2(align) || align > vmp->vm_quantum)
3770 panic("kmem_cache_create: bad alignment %lu", align);
3771
3772 mutex_enter(&kmem_flags_lock);
3773 if (kmem_flags & KMF_RANDOMIZE)
3774 kmem_flags = (((kmem_flags | ~KMF_RANDOM) + 1) & KMF_RANDOM) |
3775 KMF_RANDOMIZE;
3776 cp->cache_flags = (kmem_flags | cflags) & KMF_DEBUG;
3777 mutex_exit(&kmem_flags_lock);
3778
3779 /*
3780 * Make sure all the various flags are reasonable.
3781 */
3782 ASSERT(!(cflags & KMC_NOHASH) || !(cflags & KMC_NOTOUCH));
3783
3784 if (cp->cache_flags & KMF_LITE) {
3785 if (bufsize >= kmem_lite_minsize &&
3786 align <= kmem_lite_maxalign &&
3787 P2PHASE(bufsize, kmem_lite_maxalign) != 0) {
3788 cp->cache_flags |= KMF_BUFTAG;
3789 cp->cache_flags &= ~(KMF_AUDIT | KMF_FIREWALL);
|