903 */
904 vmem_advance(vmp, rotor, vsp);
905 mutex_exit(&vmp->vm_lock);
906 return ((void *)addr);
907 }
908
909 /*
910 * Checks if vmp is guaranteed to have a size-byte buffer somewhere on its
911 * freelist. If size is not a power-of-2, it can return a false-negative.
912 *
913 * Used to decide if a newly imported span is superfluous after re-acquiring
914 * the arena lock.
915 */
916 static int
917 vmem_canalloc(vmem_t *vmp, size_t size)
918 {
919 int hb;
920 int flist = 0;
921 ASSERT(MUTEX_HELD(&vmp->vm_lock));
922
923 if ((size & (size - 1)) == 0)
924 flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
925 else if ((hb = highbit(size)) < VMEM_FREELISTS)
926 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
927
928 return (flist);
929 }
930
931 /*
932 * Allocate size bytes at offset phase from an align boundary such that the
933 * resulting segment [addr, addr + size) is a subset of [minaddr, maxaddr)
934 * that does not straddle a nocross-aligned boundary.
935 */
936 void *
937 vmem_xalloc(vmem_t *vmp, size_t size, size_t align_arg, size_t phase,
938 size_t nocross, void *minaddr, void *maxaddr, int vmflag)
939 {
940 vmem_seg_t *vsp;
941 vmem_seg_t *vbest = NULL;
942 uintptr_t addr, taddr, start, end;
943 uintptr_t align = (align_arg != 0) ? align_arg : vmp->vm_quantum;
944 void *vaddr, *xvaddr = NULL;
945 size_t xsize;
946 int hb, flist, resv;
947 uint32_t mtbf;
948
949 if ((align | phase | nocross) & (vmp->vm_quantum - 1))
950 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
951 "parameters not vm_quantum aligned",
952 (void *)vmp, size, align_arg, phase, nocross,
953 minaddr, maxaddr, vmflag);
954
955 if (nocross != 0 &&
956 (align > nocross || P2ROUNDUP(phase + size, align) > nocross))
957 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
958 "overconstrained allocation",
959 (void *)vmp, size, align_arg, phase, nocross,
960 minaddr, maxaddr, vmflag);
961
962 if (phase >= align || (align & (align - 1)) != 0 ||
963 (nocross & (nocross - 1)) != 0)
964 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
965 "parameters inconsistent or invalid",
966 (void *)vmp, size, align_arg, phase, nocross,
967 minaddr, maxaddr, vmflag);
968
969 if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
970 (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP)
971 return (NULL);
972
973 mutex_enter(&vmp->vm_lock);
974 for (;;) {
975 if (vmp->vm_nsegfree < VMEM_MINFREE &&
976 !vmem_populate(vmp, vmflag))
977 break;
978 do_alloc:
979 /*
980 * highbit() returns the highest bit + 1, which is exactly
981 * what we want: we want to search the first freelist whose
982 * members are *definitely* large enough to satisfy our
983 * allocation. However, there are certain cases in which we
984 * want to look at the next-smallest freelist (which *might*
985 * be able to satisfy the allocation):
986 *
987 * (1) The size is exactly a power of 2, in which case
988 * the smaller freelist is always big enough;
989 *
990 * (2) All other freelists are empty;
991 *
992 * (3) We're in the highest possible freelist, which is
993 * always empty (e.g. the 4GB freelist on 32-bit systems);
994 *
995 * (4) We're doing a best-fit or first-fit allocation.
996 */
997 if ((size & (size - 1)) == 0) {
998 flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
999 } else {
1000 hb = highbit(size);
1001 if ((vmp->vm_freemap >> hb) == 0 ||
1002 hb == VMEM_FREELISTS ||
1003 (vmflag & (VM_BESTFIT | VM_FIRSTFIT)))
1004 hb--;
1005 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
1006 }
1007
1008 for (vbest = NULL, vsp = (flist == 0) ? NULL :
1009 vmp->vm_freelist[flist - 1].vs_knext;
1010 vsp != NULL; vsp = vsp->vs_knext) {
1011 vmp->vm_kstat.vk_search.value.ui64++;
1012 if (vsp->vs_start == 0) {
1013 /*
1014 * We're moving up to a larger freelist,
1015 * so if we've already found a candidate,
1016 * the fit can't possibly get any better.
1017 */
1273 return (kmem_cache_alloc(vmp->vm_qcache[(size - 1) >>
1274 vmp->vm_qshift], vmflag & VM_KMFLAGS));
1275
1276 if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
1277 (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP)
1278 return (NULL);
1279
1280 if (vmflag & VM_NEXTFIT)
1281 return (vmem_nextfit_alloc(vmp, size, vmflag));
1282
1283 if (vmflag & (VM_BESTFIT | VM_FIRSTFIT))
1284 return (vmem_xalloc(vmp, size, vmp->vm_quantum, 0, 0,
1285 NULL, NULL, vmflag));
1286
1287 /*
1288 * Unconstrained instant-fit allocation from the segment list.
1289 */
1290 mutex_enter(&vmp->vm_lock);
1291
1292 if (vmp->vm_nsegfree >= VMEM_MINFREE || vmem_populate(vmp, vmflag)) {
1293 if ((size & (size - 1)) == 0)
1294 flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
1295 else if ((hb = highbit(size)) < VMEM_FREELISTS)
1296 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
1297 }
1298
1299 if (flist-- == 0) {
1300 mutex_exit(&vmp->vm_lock);
1301 return (vmem_xalloc(vmp, size, vmp->vm_quantum,
1302 0, 0, NULL, NULL, vmflag));
1303 }
1304
1305 ASSERT(size <= (1UL << flist));
1306 vsp = vmp->vm_freelist[flist].vs_knext;
1307 addr = vsp->vs_start;
1308 if (vmflag & VM_ENDALLOC) {
1309 addr += vsp->vs_end - (addr + size);
1310 }
1311 (void) vmem_seg_alloc(vmp, vsp, addr, size);
1312 mutex_exit(&vmp->vm_lock);
1313 return ((void *)addr);
|
903 */
904 vmem_advance(vmp, rotor, vsp);
905 mutex_exit(&vmp->vm_lock);
906 return ((void *)addr);
907 }
908
909 /*
910 * Checks if vmp is guaranteed to have a size-byte buffer somewhere on its
911 * freelist. If size is not a power-of-2, it can return a false-negative.
912 *
913 * Used to decide if a newly imported span is superfluous after re-acquiring
914 * the arena lock.
915 */
916 static int
917 vmem_canalloc(vmem_t *vmp, size_t size)
918 {
919 int hb;
920 int flist = 0;
921 ASSERT(MUTEX_HELD(&vmp->vm_lock));
922
923 if (ISP2(size))
924 flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
925 else if ((hb = highbit(size)) < VMEM_FREELISTS)
926 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
927
928 return (flist);
929 }
930
931 /*
932 * Allocate size bytes at offset phase from an align boundary such that the
933 * resulting segment [addr, addr + size) is a subset of [minaddr, maxaddr)
934 * that does not straddle a nocross-aligned boundary.
935 */
936 void *
937 vmem_xalloc(vmem_t *vmp, size_t size, size_t align_arg, size_t phase,
938 size_t nocross, void *minaddr, void *maxaddr, int vmflag)
939 {
940 vmem_seg_t *vsp;
941 vmem_seg_t *vbest = NULL;
942 uintptr_t addr, taddr, start, end;
943 uintptr_t align = (align_arg != 0) ? align_arg : vmp->vm_quantum;
944 void *vaddr, *xvaddr = NULL;
945 size_t xsize;
946 int hb, flist, resv;
947 uint32_t mtbf;
948
949 if ((align | phase | nocross) & (vmp->vm_quantum - 1))
950 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
951 "parameters not vm_quantum aligned",
952 (void *)vmp, size, align_arg, phase, nocross,
953 minaddr, maxaddr, vmflag);
954
955 if (nocross != 0 &&
956 (align > nocross || P2ROUNDUP(phase + size, align) > nocross))
957 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
958 "overconstrained allocation",
959 (void *)vmp, size, align_arg, phase, nocross,
960 minaddr, maxaddr, vmflag);
961
962 if (phase >= align || !ISP2(align) || !ISP2(nocross))
963 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
964 "parameters inconsistent or invalid",
965 (void *)vmp, size, align_arg, phase, nocross,
966 minaddr, maxaddr, vmflag);
967
968 if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
969 (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP)
970 return (NULL);
971
972 mutex_enter(&vmp->vm_lock);
973 for (;;) {
974 if (vmp->vm_nsegfree < VMEM_MINFREE &&
975 !vmem_populate(vmp, vmflag))
976 break;
977 do_alloc:
978 /*
979 * highbit() returns the highest bit + 1, which is exactly
980 * what we want: we want to search the first freelist whose
981 * members are *definitely* large enough to satisfy our
982 * allocation. However, there are certain cases in which we
983 * want to look at the next-smallest freelist (which *might*
984 * be able to satisfy the allocation):
985 *
986 * (1) The size is exactly a power of 2, in which case
987 * the smaller freelist is always big enough;
988 *
989 * (2) All other freelists are empty;
990 *
991 * (3) We're in the highest possible freelist, which is
992 * always empty (e.g. the 4GB freelist on 32-bit systems);
993 *
994 * (4) We're doing a best-fit or first-fit allocation.
995 */
996 if (ISP2(size)) {
997 flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
998 } else {
999 hb = highbit(size);
1000 if ((vmp->vm_freemap >> hb) == 0 ||
1001 hb == VMEM_FREELISTS ||
1002 (vmflag & (VM_BESTFIT | VM_FIRSTFIT)))
1003 hb--;
1004 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
1005 }
1006
1007 for (vbest = NULL, vsp = (flist == 0) ? NULL :
1008 vmp->vm_freelist[flist - 1].vs_knext;
1009 vsp != NULL; vsp = vsp->vs_knext) {
1010 vmp->vm_kstat.vk_search.value.ui64++;
1011 if (vsp->vs_start == 0) {
1012 /*
1013 * We're moving up to a larger freelist,
1014 * so if we've already found a candidate,
1015 * the fit can't possibly get any better.
1016 */
1272 return (kmem_cache_alloc(vmp->vm_qcache[(size - 1) >>
1273 vmp->vm_qshift], vmflag & VM_KMFLAGS));
1274
1275 if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
1276 (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP)
1277 return (NULL);
1278
1279 if (vmflag & VM_NEXTFIT)
1280 return (vmem_nextfit_alloc(vmp, size, vmflag));
1281
1282 if (vmflag & (VM_BESTFIT | VM_FIRSTFIT))
1283 return (vmem_xalloc(vmp, size, vmp->vm_quantum, 0, 0,
1284 NULL, NULL, vmflag));
1285
1286 /*
1287 * Unconstrained instant-fit allocation from the segment list.
1288 */
1289 mutex_enter(&vmp->vm_lock);
1290
1291 if (vmp->vm_nsegfree >= VMEM_MINFREE || vmem_populate(vmp, vmflag)) {
1292 if (ISP2(size))
1293 flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
1294 else if ((hb = highbit(size)) < VMEM_FREELISTS)
1295 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
1296 }
1297
1298 if (flist-- == 0) {
1299 mutex_exit(&vmp->vm_lock);
1300 return (vmem_xalloc(vmp, size, vmp->vm_quantum,
1301 0, 0, NULL, NULL, vmflag));
1302 }
1303
1304 ASSERT(size <= (1UL << flist));
1305 vsp = vmp->vm_freelist[flist].vs_knext;
1306 addr = vsp->vs_start;
1307 if (vmflag & VM_ENDALLOC) {
1308 addr += vsp->vs_end - (addr + size);
1309 }
1310 (void) vmem_seg_alloc(vmp, vsp, addr, size);
1311 mutex_exit(&vmp->vm_lock);
1312 return ((void *)addr);
|