Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/vmem.c
+++ new/usr/src/uts/common/os/vmem.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /*
27 27 * Copyright (c) 2012 by Delphix. All rights reserved.
28 28 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
29 29 */
30 30
31 31 /*
32 32 * Big Theory Statement for the virtual memory allocator.
33 33 *
34 34 * For a more complete description of the main ideas, see:
35 35 *
36 36 * Jeff Bonwick and Jonathan Adams,
37 37 *
38 38 * Magazines and vmem: Extending the Slab Allocator to Many CPUs and
39 39 * Arbitrary Resources.
40 40 *
41 41 * Proceedings of the 2001 Usenix Conference.
42 42 * Available as http://www.usenix.org/event/usenix01/bonwick.html
43 43 *
44 44 *
45 45 * 1. General Concepts
46 46 * -------------------
47 47 *
48 48 * 1.1 Overview
49 49 * ------------
50 50 * We divide the kernel address space into a number of logically distinct
51 51 * pieces, or *arenas*: text, data, heap, stack, and so on. Within these
52 52 * arenas we often subdivide further; for example, we use heap addresses
53 53 * not only for the kernel heap (kmem_alloc() space), but also for DVMA,
54 54 * bp_mapin(), /dev/kmem, and even some device mappings like the TOD chip.
55 55 * The kernel address space, therefore, is most accurately described as
56 56 * a tree of arenas in which each node of the tree *imports* some subset
57 57 * of its parent. The virtual memory allocator manages these arenas and
58 58 * supports their natural hierarchical structure.
59 59 *
60 60 * 1.2 Arenas
61 61 * ----------
62 62 * An arena is nothing more than a set of integers. These integers most
63 63 * commonly represent virtual addresses, but in fact they can represent
64 64 * anything at all. For example, we could use an arena containing the
65 65 * integers minpid through maxpid to allocate process IDs. vmem_create()
66 66 * and vmem_destroy() create and destroy vmem arenas. In order to
67 67 * differentiate between arenas used for adresses and arenas used for
68 68 * identifiers, the VMC_IDENTIFIER flag is passed to vmem_create(). This
69 69 * prevents identifier exhaustion from being diagnosed as general memory
70 70 * failure.
71 71 *
72 72 * 1.3 Spans
73 73 * ---------
74 74 * We represent the integers in an arena as a collection of *spans*, or
75 75 * contiguous ranges of integers. For example, the kernel heap consists
76 76 * of just one span: [kernelheap, ekernelheap). Spans can be added to an
77 77 * arena in two ways: explicitly, by vmem_add(), or implicitly, by
78 78 * importing, as described in Section 1.5 below.
79 79 *
80 80 * 1.4 Segments
81 81 * ------------
82 82 * Spans are subdivided into *segments*, each of which is either allocated
83 83 * or free. A segment, like a span, is a contiguous range of integers.
84 84 * Each allocated segment [addr, addr + size) represents exactly one
85 85 * vmem_alloc(size) that returned addr. Free segments represent the space
86 86 * between allocated segments. If two free segments are adjacent, we
87 87 * coalesce them into one larger segment; that is, if segments [a, b) and
88 88 * [b, c) are both free, we merge them into a single segment [a, c).
89 89 * The segments within a span are linked together in increasing-address order
90 90 * so we can easily determine whether coalescing is possible.
91 91 *
92 92 * Segments never cross span boundaries. When all segments within
93 93 * an imported span become free, we return the span to its source.
94 94 *
95 95 * 1.5 Imported Memory
96 96 * -------------------
97 97 * As mentioned in the overview, some arenas are logical subsets of
98 98 * other arenas. For example, kmem_va_arena (a virtual address cache
99 99 * that satisfies most kmem_slab_create() requests) is just a subset
100 100 * of heap_arena (the kernel heap) that provides caching for the most
101 101 * common slab sizes. When kmem_va_arena runs out of virtual memory,
102 102 * it *imports* more from the heap; we say that heap_arena is the
103 103 * *vmem source* for kmem_va_arena. vmem_create() allows you to
104 104 * specify any existing vmem arena as the source for your new arena.
105 105 * Topologically, since every arena is a child of at most one source,
106 106 * the set of all arenas forms a collection of trees.
107 107 *
108 108 * 1.6 Constrained Allocations
109 109 * ---------------------------
110 110 * Some vmem clients are quite picky about the kind of address they want.
111 111 * For example, the DVMA code may need an address that is at a particular
112 112 * phase with respect to some alignment (to get good cache coloring), or
113 113 * that lies within certain limits (the addressable range of a device),
114 114 * or that doesn't cross some boundary (a DMA counter restriction) --
115 115 * or all of the above. vmem_xalloc() allows the client to specify any
116 116 * or all of these constraints.
117 117 *
118 118 * 1.7 The Vmem Quantum
119 119 * --------------------
120 120 * Every arena has a notion of 'quantum', specified at vmem_create() time,
121 121 * that defines the arena's minimum unit of currency. Most commonly the
122 122 * quantum is either 1 or PAGESIZE, but any power of 2 is legal.
123 123 * All vmem allocations are guaranteed to be quantum-aligned.
124 124 *
125 125 * 1.8 Quantum Caching
126 126 * -------------------
127 127 * A vmem arena may be so hot (frequently used) that the scalability of vmem
128 128 * allocation is a significant concern. We address this by allowing the most
129 129 * common allocation sizes to be serviced by the kernel memory allocator,
130 130 * which provides low-latency per-cpu caching. The qcache_max argument to
131 131 * vmem_create() specifies the largest allocation size to cache.
132 132 *
133 133 * 1.9 Relationship to Kernel Memory Allocator
134 134 * -------------------------------------------
135 135 * Every kmem cache has a vmem arena as its slab supplier. The kernel memory
136 136 * allocator uses vmem_alloc() and vmem_free() to create and destroy slabs.
137 137 *
138 138 *
139 139 * 2. Implementation
140 140 * -----------------
141 141 *
142 142 * 2.1 Segment lists and markers
143 143 * -----------------------------
144 144 * The segment structure (vmem_seg_t) contains two doubly-linked lists.
145 145 *
146 146 * The arena list (vs_anext/vs_aprev) links all segments in the arena.
147 147 * In addition to the allocated and free segments, the arena contains
148 148 * special marker segments at span boundaries. Span markers simplify
149 149 * coalescing and importing logic by making it easy to tell both when
150 150 * we're at a span boundary (so we don't coalesce across it), and when
151 151 * a span is completely free (its neighbors will both be span markers).
152 152 *
153 153 * Imported spans will have vs_import set.
154 154 *
155 155 * The next-of-kin list (vs_knext/vs_kprev) links segments of the same type:
156 156 * (1) for allocated segments, vs_knext is the hash chain linkage;
157 157 * (2) for free segments, vs_knext is the freelist linkage;
158 158 * (3) for span marker segments, vs_knext is the next span marker.
159 159 *
160 160 * 2.2 Allocation hashing
161 161 * ----------------------
162 162 * We maintain a hash table of all allocated segments, hashed by address.
163 163 * This allows vmem_free() to discover the target segment in constant time.
164 164 * vmem_update() periodically resizes hash tables to keep hash chains short.
165 165 *
166 166 * 2.3 Freelist management
167 167 * -----------------------
168 168 * We maintain power-of-2 freelists for free segments, i.e. free segments
169 169 * of size >= 2^n reside in vmp->vm_freelist[n]. To ensure constant-time
170 170 * allocation, vmem_xalloc() looks not in the first freelist that *might*
171 171 * satisfy the allocation, but in the first freelist that *definitely*
172 172 * satisfies the allocation (unless VM_BESTFIT is specified, or all larger
173 173 * freelists are empty). For example, a 1000-byte allocation will be
174 174 * satisfied not from the 512..1023-byte freelist, whose members *might*
175 175 * contains a 1000-byte segment, but from a 1024-byte or larger freelist,
176 176 * the first member of which will *definitely* satisfy the allocation.
177 177 * This ensures that vmem_xalloc() works in constant time.
178 178 *
179 179 * We maintain a bit map to determine quickly which freelists are non-empty.
180 180 * vmp->vm_freemap & (1 << n) is non-zero iff vmp->vm_freelist[n] is non-empty.
181 181 *
182 182 * The different freelists are linked together into one large freelist,
183 183 * with the freelist heads serving as markers. Freelist markers simplify
184 184 * the maintenance of vm_freemap by making it easy to tell when we're taking
185 185 * the last member of a freelist (both of its neighbors will be markers).
186 186 *
187 187 * 2.4 Vmem Locking
188 188 * ----------------
189 189 * For simplicity, all arena state is protected by a per-arena lock.
190 190 * For very hot arenas, use quantum caching for scalability.
191 191 *
192 192 * 2.5 Vmem Population
193 193 * -------------------
194 194 * Any internal vmem routine that might need to allocate new segment
195 195 * structures must prepare in advance by calling vmem_populate(), which
196 196 * will preallocate enough vmem_seg_t's to get is through the entire
197 197 * operation without dropping the arena lock.
198 198 *
199 199 * 2.6 Auditing
200 200 * ------------
201 201 * If KMF_AUDIT is set in kmem_flags, we audit vmem allocations as well.
202 202 * Since virtual addresses cannot be scribbled on, there is no equivalent
203 203 * in vmem to redzone checking, deadbeef, or other kmem debugging features.
204 204 * Moreover, we do not audit frees because segment coalescing destroys the
205 205 * association between an address and its segment structure. Auditing is
206 206 * thus intended primarily to keep track of who's consuming the arena.
207 207 * Debugging support could certainly be extended in the future if it proves
208 208 * necessary, but we do so much live checking via the allocation hash table
209 209 * that even non-DEBUG systems get quite a bit of sanity checking already.
210 210 */
211 211
212 212 #include <sys/vmem_impl.h>
213 213 #include <sys/kmem.h>
214 214 #include <sys/kstat.h>
215 215 #include <sys/param.h>
216 216 #include <sys/systm.h>
217 217 #include <sys/atomic.h>
218 218 #include <sys/bitmap.h>
219 219 #include <sys/sysmacros.h>
220 220 #include <sys/cmn_err.h>
221 221 #include <sys/debug.h>
222 222 #include <sys/panic.h>
223 223
224 224 #define VMEM_INITIAL 10 /* early vmem arenas */
225 225 #define VMEM_SEG_INITIAL 200 /* early segments */
226 226
227 227 /*
228 228 * Adding a new span to an arena requires two segment structures: one to
229 229 * represent the span, and one to represent the free segment it contains.
230 230 */
231 231 #define VMEM_SEGS_PER_SPAN_CREATE 2
232 232
233 233 /*
234 234 * Allocating a piece of an existing segment requires 0-2 segment structures
235 235 * depending on how much of the segment we're allocating.
236 236 *
237 237 * To allocate the entire segment, no new segment structures are needed; we
238 238 * simply move the existing segment structure from the freelist to the
239 239 * allocation hash table.
240 240 *
241 241 * To allocate a piece from the left or right end of the segment, we must
242 242 * split the segment into two pieces (allocated part and remainder), so we
243 243 * need one new segment structure to represent the remainder.
244 244 *
245 245 * To allocate from the middle of a segment, we need two new segment strucures
246 246 * to represent the remainders on either side of the allocated part.
247 247 */
248 248 #define VMEM_SEGS_PER_EXACT_ALLOC 0
249 249 #define VMEM_SEGS_PER_LEFT_ALLOC 1
250 250 #define VMEM_SEGS_PER_RIGHT_ALLOC 1
251 251 #define VMEM_SEGS_PER_MIDDLE_ALLOC 2
252 252
253 253 /*
254 254 * vmem_populate() preallocates segment structures for vmem to do its work.
255 255 * It must preallocate enough for the worst case, which is when we must import
256 256 * a new span and then allocate from the middle of it.
257 257 */
258 258 #define VMEM_SEGS_PER_ALLOC_MAX \
259 259 (VMEM_SEGS_PER_SPAN_CREATE + VMEM_SEGS_PER_MIDDLE_ALLOC)
260 260
261 261 /*
262 262 * The segment structures themselves are allocated from vmem_seg_arena, so
263 263 * we have a recursion problem when vmem_seg_arena needs to populate itself.
264 264 * We address this by working out the maximum number of segment structures
265 265 * this act will require, and multiplying by the maximum number of threads
266 266 * that we'll allow to do it simultaneously.
267 267 *
268 268 * The worst-case segment consumption to populate vmem_seg_arena is as
269 269 * follows (depicted as a stack trace to indicate why events are occurring):
270 270 *
271 271 * (In order to lower the fragmentation in the heap_arena, we specify a
272 272 * minimum import size for the vmem_metadata_arena which is the same size
273 273 * as the kmem_va quantum cache allocations. This causes the worst-case
274 274 * allocation from the vmem_metadata_arena to be 3 segments.)
275 275 *
276 276 * vmem_alloc(vmem_seg_arena) -> 2 segs (span create + exact alloc)
277 277 * segkmem_alloc(vmem_metadata_arena)
278 278 * vmem_alloc(vmem_metadata_arena) -> 3 segs (span create + left alloc)
279 279 * vmem_alloc(heap_arena) -> 1 seg (left alloc)
280 280 * page_create()
281 281 * hat_memload()
282 282 * kmem_cache_alloc()
283 283 * kmem_slab_create()
284 284 * vmem_alloc(hat_memload_arena) -> 2 segs (span create + exact alloc)
285 285 * segkmem_alloc(heap_arena)
286 286 * vmem_alloc(heap_arena) -> 1 seg (left alloc)
287 287 * page_create()
288 288 * hat_memload() -> (hat layer won't recurse further)
289 289 *
290 290 * The worst-case consumption for each arena is 3 segment structures.
291 291 * Of course, a 3-seg reserve could easily be blown by multiple threads.
292 292 * Therefore, we serialize all allocations from vmem_seg_arena (which is OK
293 293 * because they're rare). We cannot allow a non-blocking allocation to get
294 294 * tied up behind a blocking allocation, however, so we use separate locks
295 295 * for VM_SLEEP and VM_NOSLEEP allocations. Similarly, VM_PUSHPAGE allocations
296 296 * must not block behind ordinary VM_SLEEPs. In addition, if the system is
297 297 * panicking then we must keep enough resources for panic_thread to do its
298 298 * work. Thus we have at most four threads trying to allocate from
299 299 * vmem_seg_arena, and each thread consumes at most three segment structures,
300 300 * so we must maintain a 12-seg reserve.
301 301 */
302 302 #define VMEM_POPULATE_RESERVE 12
303 303
304 304 /*
305 305 * vmem_populate() ensures that each arena has VMEM_MINFREE seg structures
306 306 * so that it can satisfy the worst-case allocation *and* participate in
307 307 * worst-case allocation from vmem_seg_arena.
308 308 */
309 309 #define VMEM_MINFREE (VMEM_POPULATE_RESERVE + VMEM_SEGS_PER_ALLOC_MAX)
310 310
311 311 static vmem_t vmem0[VMEM_INITIAL];
312 312 static vmem_t *vmem_populator[VMEM_INITIAL];
313 313 static uint32_t vmem_id;
314 314 static uint32_t vmem_populators;
315 315 static vmem_seg_t vmem_seg0[VMEM_SEG_INITIAL];
316 316 static vmem_seg_t *vmem_segfree;
317 317 static kmutex_t vmem_list_lock;
318 318 static kmutex_t vmem_segfree_lock;
319 319 static kmutex_t vmem_sleep_lock;
320 320 static kmutex_t vmem_nosleep_lock;
321 321 static kmutex_t vmem_pushpage_lock;
322 322 static kmutex_t vmem_panic_lock;
323 323 static vmem_t *vmem_list;
324 324 static vmem_t *vmem_metadata_arena;
325 325 static vmem_t *vmem_seg_arena;
326 326 static vmem_t *vmem_hash_arena;
327 327 static vmem_t *vmem_vmem_arena;
328 328 static long vmem_update_interval = 15; /* vmem_update() every 15 seconds */
329 329 uint32_t vmem_mtbf; /* mean time between failures [default: off] */
330 330 size_t vmem_seg_size = sizeof (vmem_seg_t);
331 331
332 332 static vmem_kstat_t vmem_kstat_template = {
333 333 { "mem_inuse", KSTAT_DATA_UINT64 },
334 334 { "mem_import", KSTAT_DATA_UINT64 },
335 335 { "mem_total", KSTAT_DATA_UINT64 },
336 336 { "vmem_source", KSTAT_DATA_UINT32 },
337 337 { "alloc", KSTAT_DATA_UINT64 },
338 338 { "free", KSTAT_DATA_UINT64 },
339 339 { "wait", KSTAT_DATA_UINT64 },
340 340 { "fail", KSTAT_DATA_UINT64 },
341 341 { "lookup", KSTAT_DATA_UINT64 },
342 342 { "search", KSTAT_DATA_UINT64 },
343 343 { "populate_wait", KSTAT_DATA_UINT64 },
344 344 { "populate_fail", KSTAT_DATA_UINT64 },
345 345 { "contains", KSTAT_DATA_UINT64 },
346 346 { "contains_search", KSTAT_DATA_UINT64 },
347 347 };
348 348
349 349 /*
350 350 * Insert/delete from arena list (type 'a') or next-of-kin list (type 'k').
351 351 */
352 352 #define VMEM_INSERT(vprev, vsp, type) \
353 353 { \
354 354 vmem_seg_t *vnext = (vprev)->vs_##type##next; \
355 355 (vsp)->vs_##type##next = (vnext); \
356 356 (vsp)->vs_##type##prev = (vprev); \
357 357 (vprev)->vs_##type##next = (vsp); \
358 358 (vnext)->vs_##type##prev = (vsp); \
359 359 }
360 360
361 361 #define VMEM_DELETE(vsp, type) \
362 362 { \
363 363 vmem_seg_t *vprev = (vsp)->vs_##type##prev; \
364 364 vmem_seg_t *vnext = (vsp)->vs_##type##next; \
365 365 (vprev)->vs_##type##next = (vnext); \
366 366 (vnext)->vs_##type##prev = (vprev); \
367 367 }
368 368
369 369 /*
370 370 * Get a vmem_seg_t from the global segfree list.
371 371 */
372 372 static vmem_seg_t *
373 373 vmem_getseg_global(void)
374 374 {
375 375 vmem_seg_t *vsp;
376 376
377 377 mutex_enter(&vmem_segfree_lock);
378 378 if ((vsp = vmem_segfree) != NULL)
379 379 vmem_segfree = vsp->vs_knext;
380 380 mutex_exit(&vmem_segfree_lock);
381 381
382 382 return (vsp);
383 383 }
384 384
385 385 /*
386 386 * Put a vmem_seg_t on the global segfree list.
387 387 */
388 388 static void
389 389 vmem_putseg_global(vmem_seg_t *vsp)
390 390 {
391 391 mutex_enter(&vmem_segfree_lock);
392 392 vsp->vs_knext = vmem_segfree;
393 393 vmem_segfree = vsp;
394 394 mutex_exit(&vmem_segfree_lock);
395 395 }
396 396
397 397 /*
398 398 * Get a vmem_seg_t from vmp's segfree list.
399 399 */
400 400 static vmem_seg_t *
401 401 vmem_getseg(vmem_t *vmp)
402 402 {
403 403 vmem_seg_t *vsp;
404 404
405 405 ASSERT(vmp->vm_nsegfree > 0);
406 406
407 407 vsp = vmp->vm_segfree;
408 408 vmp->vm_segfree = vsp->vs_knext;
409 409 vmp->vm_nsegfree--;
410 410
411 411 return (vsp);
412 412 }
413 413
414 414 /*
415 415 * Put a vmem_seg_t on vmp's segfree list.
416 416 */
417 417 static void
418 418 vmem_putseg(vmem_t *vmp, vmem_seg_t *vsp)
419 419 {
420 420 vsp->vs_knext = vmp->vm_segfree;
421 421 vmp->vm_segfree = vsp;
422 422 vmp->vm_nsegfree++;
423 423 }
424 424
425 425 /*
426 426 * Add vsp to the appropriate freelist.
427 427 */
428 428 static void
429 429 vmem_freelist_insert(vmem_t *vmp, vmem_seg_t *vsp)
430 430 {
431 431 vmem_seg_t *vprev;
432 432
433 433 ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
434 434
435 435 vprev = (vmem_seg_t *)&vmp->vm_freelist[highbit(VS_SIZE(vsp)) - 1];
436 436 vsp->vs_type = VMEM_FREE;
437 437 vmp->vm_freemap |= VS_SIZE(vprev);
438 438 VMEM_INSERT(vprev, vsp, k);
439 439
440 440 cv_broadcast(&vmp->vm_cv);
441 441 }
442 442
443 443 /*
444 444 * Take vsp from the freelist.
445 445 */
446 446 static void
447 447 vmem_freelist_delete(vmem_t *vmp, vmem_seg_t *vsp)
448 448 {
449 449 ASSERT(*VMEM_HASH(vmp, vsp->vs_start) != vsp);
450 450 ASSERT(vsp->vs_type == VMEM_FREE);
451 451
452 452 if (vsp->vs_knext->vs_start == 0 && vsp->vs_kprev->vs_start == 0) {
453 453 /*
454 454 * The segments on both sides of 'vsp' are freelist heads,
455 455 * so taking vsp leaves the freelist at vsp->vs_kprev empty.
456 456 */
457 457 ASSERT(vmp->vm_freemap & VS_SIZE(vsp->vs_kprev));
458 458 vmp->vm_freemap ^= VS_SIZE(vsp->vs_kprev);
459 459 }
460 460 VMEM_DELETE(vsp, k);
461 461 }
462 462
463 463 /*
464 464 * Add vsp to the allocated-segment hash table and update kstats.
465 465 */
466 466 static void
467 467 vmem_hash_insert(vmem_t *vmp, vmem_seg_t *vsp)
468 468 {
469 469 vmem_seg_t **bucket;
470 470
471 471 vsp->vs_type = VMEM_ALLOC;
472 472 bucket = VMEM_HASH(vmp, vsp->vs_start);
473 473 vsp->vs_knext = *bucket;
474 474 *bucket = vsp;
475 475
476 476 if (vmem_seg_size == sizeof (vmem_seg_t)) {
477 477 vsp->vs_depth = (uint8_t)getpcstack(vsp->vs_stack,
478 478 VMEM_STACK_DEPTH);
479 479 vsp->vs_thread = curthread;
480 480 vsp->vs_timestamp = gethrtime();
481 481 } else {
482 482 vsp->vs_depth = 0;
483 483 }
484 484
485 485 vmp->vm_kstat.vk_alloc.value.ui64++;
486 486 vmp->vm_kstat.vk_mem_inuse.value.ui64 += VS_SIZE(vsp);
487 487 }
488 488
489 489 /*
490 490 * Remove vsp from the allocated-segment hash table and update kstats.
491 491 */
492 492 static vmem_seg_t *
493 493 vmem_hash_delete(vmem_t *vmp, uintptr_t addr, size_t size)
494 494 {
495 495 vmem_seg_t *vsp, **prev_vspp;
496 496
497 497 prev_vspp = VMEM_HASH(vmp, addr);
498 498 while ((vsp = *prev_vspp) != NULL) {
499 499 if (vsp->vs_start == addr) {
500 500 *prev_vspp = vsp->vs_knext;
501 501 break;
502 502 }
503 503 vmp->vm_kstat.vk_lookup.value.ui64++;
504 504 prev_vspp = &vsp->vs_knext;
505 505 }
506 506
507 507 if (vsp == NULL)
508 508 panic("vmem_hash_delete(%p, %lx, %lu): bad free",
509 509 (void *)vmp, addr, size);
510 510 if (VS_SIZE(vsp) != size)
511 511 panic("vmem_hash_delete(%p, %lx, %lu): wrong size (expect %lu)",
512 512 (void *)vmp, addr, size, VS_SIZE(vsp));
513 513
514 514 vmp->vm_kstat.vk_free.value.ui64++;
515 515 vmp->vm_kstat.vk_mem_inuse.value.ui64 -= size;
516 516
517 517 return (vsp);
518 518 }
519 519
520 520 /*
521 521 * Create a segment spanning the range [start, end) and add it to the arena.
522 522 */
523 523 static vmem_seg_t *
524 524 vmem_seg_create(vmem_t *vmp, vmem_seg_t *vprev, uintptr_t start, uintptr_t end)
525 525 {
526 526 vmem_seg_t *newseg = vmem_getseg(vmp);
527 527
528 528 newseg->vs_start = start;
529 529 newseg->vs_end = end;
530 530 newseg->vs_type = 0;
531 531 newseg->vs_import = 0;
532 532
533 533 VMEM_INSERT(vprev, newseg, a);
534 534
535 535 return (newseg);
536 536 }
537 537
538 538 /*
539 539 * Remove segment vsp from the arena.
540 540 */
541 541 static void
542 542 vmem_seg_destroy(vmem_t *vmp, vmem_seg_t *vsp)
543 543 {
544 544 ASSERT(vsp->vs_type != VMEM_ROTOR);
545 545 VMEM_DELETE(vsp, a);
546 546
547 547 vmem_putseg(vmp, vsp);
548 548 }
549 549
550 550 /*
551 551 * Add the span [vaddr, vaddr + size) to vmp and update kstats.
552 552 */
553 553 static vmem_seg_t *
554 554 vmem_span_create(vmem_t *vmp, void *vaddr, size_t size, uint8_t import)
555 555 {
556 556 vmem_seg_t *newseg, *span;
557 557 uintptr_t start = (uintptr_t)vaddr;
558 558 uintptr_t end = start + size;
559 559
560 560 ASSERT(MUTEX_HELD(&vmp->vm_lock));
561 561
562 562 if ((start | end) & (vmp->vm_quantum - 1))
563 563 panic("vmem_span_create(%p, %p, %lu): misaligned",
564 564 (void *)vmp, vaddr, size);
565 565
566 566 span = vmem_seg_create(vmp, vmp->vm_seg0.vs_aprev, start, end);
567 567 span->vs_type = VMEM_SPAN;
568 568 span->vs_import = import;
569 569 VMEM_INSERT(vmp->vm_seg0.vs_kprev, span, k);
570 570
571 571 newseg = vmem_seg_create(vmp, span, start, end);
572 572 vmem_freelist_insert(vmp, newseg);
573 573
574 574 if (import)
575 575 vmp->vm_kstat.vk_mem_import.value.ui64 += size;
576 576 vmp->vm_kstat.vk_mem_total.value.ui64 += size;
577 577
578 578 return (newseg);
579 579 }
580 580
581 581 /*
582 582 * Remove span vsp from vmp and update kstats.
583 583 */
584 584 static void
585 585 vmem_span_destroy(vmem_t *vmp, vmem_seg_t *vsp)
586 586 {
587 587 vmem_seg_t *span = vsp->vs_aprev;
588 588 size_t size = VS_SIZE(vsp);
589 589
590 590 ASSERT(MUTEX_HELD(&vmp->vm_lock));
591 591 ASSERT(span->vs_type == VMEM_SPAN);
592 592
593 593 if (span->vs_import)
594 594 vmp->vm_kstat.vk_mem_import.value.ui64 -= size;
595 595 vmp->vm_kstat.vk_mem_total.value.ui64 -= size;
596 596
597 597 VMEM_DELETE(span, k);
598 598
599 599 vmem_seg_destroy(vmp, vsp);
600 600 vmem_seg_destroy(vmp, span);
601 601 }
602 602
603 603 /*
604 604 * Allocate the subrange [addr, addr + size) from segment vsp.
605 605 * If there are leftovers on either side, place them on the freelist.
606 606 * Returns a pointer to the segment representing [addr, addr + size).
607 607 */
608 608 static vmem_seg_t *
609 609 vmem_seg_alloc(vmem_t *vmp, vmem_seg_t *vsp, uintptr_t addr, size_t size)
610 610 {
611 611 uintptr_t vs_start = vsp->vs_start;
612 612 uintptr_t vs_end = vsp->vs_end;
613 613 size_t vs_size = vs_end - vs_start;
614 614 size_t realsize = P2ROUNDUP(size, vmp->vm_quantum);
615 615 uintptr_t addr_end = addr + realsize;
616 616
617 617 ASSERT(P2PHASE(vs_start, vmp->vm_quantum) == 0);
618 618 ASSERT(P2PHASE(addr, vmp->vm_quantum) == 0);
619 619 ASSERT(vsp->vs_type == VMEM_FREE);
620 620 ASSERT(addr >= vs_start && addr_end - 1 <= vs_end - 1);
621 621 ASSERT(addr - 1 <= addr_end - 1);
622 622
623 623 /*
624 624 * If we're allocating from the start of the segment, and the
625 625 * remainder will be on the same freelist, we can save quite
626 626 * a bit of work.
627 627 */
628 628 if (P2SAMEHIGHBIT(vs_size, vs_size - realsize) && addr == vs_start) {
629 629 ASSERT(highbit(vs_size) == highbit(vs_size - realsize));
630 630 vsp->vs_start = addr_end;
631 631 vsp = vmem_seg_create(vmp, vsp->vs_aprev, addr, addr + size);
632 632 vmem_hash_insert(vmp, vsp);
633 633 return (vsp);
634 634 }
635 635
636 636 vmem_freelist_delete(vmp, vsp);
637 637
638 638 if (vs_end != addr_end)
639 639 vmem_freelist_insert(vmp,
640 640 vmem_seg_create(vmp, vsp, addr_end, vs_end));
641 641
642 642 if (vs_start != addr)
643 643 vmem_freelist_insert(vmp,
644 644 vmem_seg_create(vmp, vsp->vs_aprev, vs_start, addr));
645 645
646 646 vsp->vs_start = addr;
647 647 vsp->vs_end = addr + size;
648 648
649 649 vmem_hash_insert(vmp, vsp);
650 650 return (vsp);
651 651 }
652 652
653 653 /*
654 654 * Returns 1 if we are populating, 0 otherwise.
655 655 * Call it if we want to prevent recursion from HAT.
656 656 */
657 657 int
658 658 vmem_is_populator()
659 659 {
660 660 return (mutex_owner(&vmem_sleep_lock) == curthread ||
661 661 mutex_owner(&vmem_nosleep_lock) == curthread ||
662 662 mutex_owner(&vmem_pushpage_lock) == curthread ||
663 663 mutex_owner(&vmem_panic_lock) == curthread);
664 664 }
665 665
666 666 /*
667 667 * Populate vmp's segfree list with VMEM_MINFREE vmem_seg_t structures.
668 668 */
669 669 static int
670 670 vmem_populate(vmem_t *vmp, int vmflag)
671 671 {
672 672 char *p;
673 673 vmem_seg_t *vsp;
674 674 ssize_t nseg;
675 675 size_t size;
676 676 kmutex_t *lp;
677 677 int i;
678 678
679 679 while (vmp->vm_nsegfree < VMEM_MINFREE &&
680 680 (vsp = vmem_getseg_global()) != NULL)
681 681 vmem_putseg(vmp, vsp);
682 682
683 683 if (vmp->vm_nsegfree >= VMEM_MINFREE)
684 684 return (1);
685 685
686 686 /*
687 687 * If we're already populating, tap the reserve.
688 688 */
689 689 if (vmem_is_populator()) {
690 690 ASSERT(vmp->vm_cflags & VMC_POPULATOR);
691 691 return (1);
692 692 }
693 693
694 694 mutex_exit(&vmp->vm_lock);
695 695
696 696 if (panic_thread == curthread)
697 697 lp = &vmem_panic_lock;
698 698 else if (vmflag & VM_NOSLEEP)
699 699 lp = &vmem_nosleep_lock;
700 700 else if (vmflag & VM_PUSHPAGE)
701 701 lp = &vmem_pushpage_lock;
702 702 else
703 703 lp = &vmem_sleep_lock;
704 704
705 705 mutex_enter(lp);
706 706
707 707 nseg = VMEM_MINFREE + vmem_populators * VMEM_POPULATE_RESERVE;
708 708 size = P2ROUNDUP(nseg * vmem_seg_size, vmem_seg_arena->vm_quantum);
709 709 nseg = size / vmem_seg_size;
710 710
711 711 /*
712 712 * The following vmem_alloc() may need to populate vmem_seg_arena
713 713 * and all the things it imports from. When doing so, it will tap
714 714 * each arena's reserve to prevent recursion (see the block comment
715 715 * above the definition of VMEM_POPULATE_RESERVE).
716 716 */
717 717 p = vmem_alloc(vmem_seg_arena, size, vmflag & VM_KMFLAGS);
718 718 if (p == NULL) {
719 719 mutex_exit(lp);
720 720 mutex_enter(&vmp->vm_lock);
721 721 vmp->vm_kstat.vk_populate_fail.value.ui64++;
722 722 return (0);
723 723 }
724 724
725 725 /*
726 726 * Restock the arenas that may have been depleted during population.
727 727 */
728 728 for (i = 0; i < vmem_populators; i++) {
729 729 mutex_enter(&vmem_populator[i]->vm_lock);
730 730 while (vmem_populator[i]->vm_nsegfree < VMEM_POPULATE_RESERVE)
731 731 vmem_putseg(vmem_populator[i],
732 732 (vmem_seg_t *)(p + --nseg * vmem_seg_size));
733 733 mutex_exit(&vmem_populator[i]->vm_lock);
734 734 }
735 735
736 736 mutex_exit(lp);
737 737 mutex_enter(&vmp->vm_lock);
738 738
739 739 /*
740 740 * Now take our own segments.
741 741 */
742 742 ASSERT(nseg >= VMEM_MINFREE);
743 743 while (vmp->vm_nsegfree < VMEM_MINFREE)
744 744 vmem_putseg(vmp, (vmem_seg_t *)(p + --nseg * vmem_seg_size));
745 745
746 746 /*
747 747 * Give the remainder to charity.
748 748 */
749 749 while (nseg > 0)
750 750 vmem_putseg_global((vmem_seg_t *)(p + --nseg * vmem_seg_size));
751 751
752 752 return (1);
753 753 }
754 754
755 755 /*
756 756 * Advance a walker from its previous position to 'afterme'.
757 757 * Note: may drop and reacquire vmp->vm_lock.
758 758 */
759 759 static void
760 760 vmem_advance(vmem_t *vmp, vmem_seg_t *walker, vmem_seg_t *afterme)
761 761 {
762 762 vmem_seg_t *vprev = walker->vs_aprev;
763 763 vmem_seg_t *vnext = walker->vs_anext;
764 764 vmem_seg_t *vsp = NULL;
765 765
766 766 VMEM_DELETE(walker, a);
767 767
768 768 if (afterme != NULL)
769 769 VMEM_INSERT(afterme, walker, a);
770 770
771 771 /*
772 772 * The walker segment's presence may have prevented its neighbors
773 773 * from coalescing. If so, coalesce them now.
774 774 */
775 775 if (vprev->vs_type == VMEM_FREE) {
776 776 if (vnext->vs_type == VMEM_FREE) {
777 777 ASSERT(vprev->vs_end == vnext->vs_start);
778 778 vmem_freelist_delete(vmp, vnext);
779 779 vmem_freelist_delete(vmp, vprev);
780 780 vprev->vs_end = vnext->vs_end;
781 781 vmem_freelist_insert(vmp, vprev);
782 782 vmem_seg_destroy(vmp, vnext);
783 783 }
784 784 vsp = vprev;
785 785 } else if (vnext->vs_type == VMEM_FREE) {
786 786 vsp = vnext;
787 787 }
788 788
789 789 /*
790 790 * vsp could represent a complete imported span,
791 791 * in which case we must return it to the source.
792 792 */
793 793 if (vsp != NULL && vsp->vs_aprev->vs_import &&
794 794 vmp->vm_source_free != NULL &&
795 795 vsp->vs_aprev->vs_type == VMEM_SPAN &&
796 796 vsp->vs_anext->vs_type == VMEM_SPAN) {
797 797 void *vaddr = (void *)vsp->vs_start;
798 798 size_t size = VS_SIZE(vsp);
799 799 ASSERT(size == VS_SIZE(vsp->vs_aprev));
800 800 vmem_freelist_delete(vmp, vsp);
801 801 vmem_span_destroy(vmp, vsp);
802 802 mutex_exit(&vmp->vm_lock);
803 803 vmp->vm_source_free(vmp->vm_source, vaddr, size);
804 804 mutex_enter(&vmp->vm_lock);
805 805 }
806 806 }
807 807
808 808 /*
809 809 * VM_NEXTFIT allocations deliberately cycle through all virtual addresses
810 810 * in an arena, so that we avoid reusing addresses for as long as possible.
811 811 * This helps to catch used-after-freed bugs. It's also the perfect policy
812 812 * for allocating things like process IDs, where we want to cycle through
813 813 * all values in order.
814 814 */
815 815 static void *
816 816 vmem_nextfit_alloc(vmem_t *vmp, size_t size, int vmflag)
817 817 {
818 818 vmem_seg_t *vsp, *rotor;
819 819 uintptr_t addr;
820 820 size_t realsize = P2ROUNDUP(size, vmp->vm_quantum);
821 821 size_t vs_size;
822 822
823 823 mutex_enter(&vmp->vm_lock);
824 824
825 825 if (vmp->vm_nsegfree < VMEM_MINFREE && !vmem_populate(vmp, vmflag)) {
826 826 mutex_exit(&vmp->vm_lock);
827 827 return (NULL);
828 828 }
829 829
830 830 /*
831 831 * The common case is that the segment right after the rotor is free,
832 832 * and large enough that extracting 'size' bytes won't change which
833 833 * freelist it's on. In this case we can avoid a *lot* of work.
834 834 * Instead of the normal vmem_seg_alloc(), we just advance the start
835 835 * address of the victim segment. Instead of moving the rotor, we
836 836 * create the new segment structure *behind the rotor*, which has
837 837 * the same effect. And finally, we know we don't have to coalesce
838 838 * the rotor's neighbors because the new segment lies between them.
839 839 */
840 840 rotor = &vmp->vm_rotor;
841 841 vsp = rotor->vs_anext;
842 842 if (vsp->vs_type == VMEM_FREE && (vs_size = VS_SIZE(vsp)) > realsize &&
843 843 P2SAMEHIGHBIT(vs_size, vs_size - realsize)) {
844 844 ASSERT(highbit(vs_size) == highbit(vs_size - realsize));
845 845 addr = vsp->vs_start;
846 846 vsp->vs_start = addr + realsize;
847 847 vmem_hash_insert(vmp,
848 848 vmem_seg_create(vmp, rotor->vs_aprev, addr, addr + size));
849 849 mutex_exit(&vmp->vm_lock);
850 850 return ((void *)addr);
851 851 }
852 852
853 853 /*
854 854 * Starting at the rotor, look for a segment large enough to
855 855 * satisfy the allocation.
856 856 */
857 857 for (;;) {
858 858 vmp->vm_kstat.vk_search.value.ui64++;
859 859 if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size)
860 860 break;
861 861 vsp = vsp->vs_anext;
862 862 if (vsp == rotor) {
863 863 /*
864 864 * We've come full circle. One possibility is that the
865 865 * there's actually enough space, but the rotor itself
866 866 * is preventing the allocation from succeeding because
867 867 * it's sitting between two free segments. Therefore,
868 868 * we advance the rotor and see if that liberates a
869 869 * suitable segment.
870 870 */
871 871 vmem_advance(vmp, rotor, rotor->vs_anext);
872 872 vsp = rotor->vs_aprev;
873 873 if (vsp->vs_type == VMEM_FREE && VS_SIZE(vsp) >= size)
874 874 break;
875 875 /*
876 876 * If there's a lower arena we can import from, or it's
877 877 * a VM_NOSLEEP allocation, let vmem_xalloc() handle it.
878 878 * Otherwise, wait until another thread frees something.
879 879 */
880 880 if (vmp->vm_source_alloc != NULL ||
881 881 (vmflag & VM_NOSLEEP)) {
882 882 mutex_exit(&vmp->vm_lock);
883 883 return (vmem_xalloc(vmp, size, vmp->vm_quantum,
884 884 0, 0, NULL, NULL, vmflag & VM_KMFLAGS));
885 885 }
886 886 vmp->vm_kstat.vk_wait.value.ui64++;
887 887 cv_wait(&vmp->vm_cv, &vmp->vm_lock);
888 888 vsp = rotor->vs_anext;
889 889 }
890 890 }
891 891
892 892 /*
893 893 * We found a segment. Extract enough space to satisfy the allocation.
894 894 */
895 895 addr = vsp->vs_start;
896 896 vsp = vmem_seg_alloc(vmp, vsp, addr, size);
897 897 ASSERT(vsp->vs_type == VMEM_ALLOC &&
898 898 vsp->vs_start == addr && vsp->vs_end == addr + size);
899 899
900 900 /*
901 901 * Advance the rotor to right after the newly-allocated segment.
902 902 * That's where the next VM_NEXTFIT allocation will begin searching.
903 903 */
904 904 vmem_advance(vmp, rotor, vsp);
905 905 mutex_exit(&vmp->vm_lock);
906 906 return ((void *)addr);
907 907 }
908 908
909 909 /*
910 910 * Checks if vmp is guaranteed to have a size-byte buffer somewhere on its
911 911 * freelist. If size is not a power-of-2, it can return a false-negative.
912 912 *
913 913 * Used to decide if a newly imported span is superfluous after re-acquiring
914 914 * the arena lock.
915 915 */
916 916 static int
917 917 vmem_canalloc(vmem_t *vmp, size_t size)
918 918 {
919 919 int hb;
920 920 int flist = 0;
921 921 ASSERT(MUTEX_HELD(&vmp->vm_lock));
922 922
923 923 if ((size & (size - 1)) == 0)
924 924 flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
925 925 else if ((hb = highbit(size)) < VMEM_FREELISTS)
926 926 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
927 927
928 928 return (flist);
929 929 }
930 930
931 931 /*
932 932 * Allocate size bytes at offset phase from an align boundary such that the
933 933 * resulting segment [addr, addr + size) is a subset of [minaddr, maxaddr)
934 934 * that does not straddle a nocross-aligned boundary.
935 935 */
936 936 void *
937 937 vmem_xalloc(vmem_t *vmp, size_t size, size_t align_arg, size_t phase,
938 938 size_t nocross, void *minaddr, void *maxaddr, int vmflag)
939 939 {
940 940 vmem_seg_t *vsp;
941 941 vmem_seg_t *vbest = NULL;
942 942 uintptr_t addr, taddr, start, end;
943 943 uintptr_t align = (align_arg != 0) ? align_arg : vmp->vm_quantum;
944 944 void *vaddr, *xvaddr = NULL;
945 945 size_t xsize;
946 946 int hb, flist, resv;
947 947 uint32_t mtbf;
948 948
949 949 if ((align | phase | nocross) & (vmp->vm_quantum - 1))
950 950 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
951 951 "parameters not vm_quantum aligned",
952 952 (void *)vmp, size, align_arg, phase, nocross,
953 953 minaddr, maxaddr, vmflag);
954 954
955 955 if (nocross != 0 &&
956 956 (align > nocross || P2ROUNDUP(phase + size, align) > nocross))
957 957 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
958 958 "overconstrained allocation",
959 959 (void *)vmp, size, align_arg, phase, nocross,
960 960 minaddr, maxaddr, vmflag);
961 961
962 962 if (phase >= align || (align & (align - 1)) != 0 ||
963 963 (nocross & (nocross - 1)) != 0)
964 964 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
965 965 "parameters inconsistent or invalid",
966 966 (void *)vmp, size, align_arg, phase, nocross,
967 967 minaddr, maxaddr, vmflag);
968 968
969 969 if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
970 970 (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP)
971 971 return (NULL);
972 972
973 973 mutex_enter(&vmp->vm_lock);
974 974 for (;;) {
975 975 if (vmp->vm_nsegfree < VMEM_MINFREE &&
976 976 !vmem_populate(vmp, vmflag))
977 977 break;
978 978 do_alloc:
979 979 /*
980 980 * highbit() returns the highest bit + 1, which is exactly
981 981 * what we want: we want to search the first freelist whose
982 982 * members are *definitely* large enough to satisfy our
983 983 * allocation. However, there are certain cases in which we
984 984 * want to look at the next-smallest freelist (which *might*
985 985 * be able to satisfy the allocation):
986 986 *
987 987 * (1) The size is exactly a power of 2, in which case
988 988 * the smaller freelist is always big enough;
989 989 *
990 990 * (2) All other freelists are empty;
991 991 *
992 992 * (3) We're in the highest possible freelist, which is
993 993 * always empty (e.g. the 4GB freelist on 32-bit systems);
994 994 *
995 995 * (4) We're doing a best-fit or first-fit allocation.
996 996 */
997 997 if ((size & (size - 1)) == 0) {
998 998 flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
999 999 } else {
1000 1000 hb = highbit(size);
1001 1001 if ((vmp->vm_freemap >> hb) == 0 ||
1002 1002 hb == VMEM_FREELISTS ||
1003 1003 (vmflag & (VM_BESTFIT | VM_FIRSTFIT)))
1004 1004 hb--;
1005 1005 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
1006 1006 }
1007 1007
1008 1008 for (vbest = NULL, vsp = (flist == 0) ? NULL :
1009 1009 vmp->vm_freelist[flist - 1].vs_knext;
1010 1010 vsp != NULL; vsp = vsp->vs_knext) {
1011 1011 vmp->vm_kstat.vk_search.value.ui64++;
1012 1012 if (vsp->vs_start == 0) {
1013 1013 /*
1014 1014 * We're moving up to a larger freelist,
1015 1015 * so if we've already found a candidate,
1016 1016 * the fit can't possibly get any better.
1017 1017 */
1018 1018 if (vbest != NULL)
1019 1019 break;
1020 1020 /*
1021 1021 * Find the next non-empty freelist.
1022 1022 */
1023 1023 flist = lowbit(P2ALIGN(vmp->vm_freemap,
1024 1024 VS_SIZE(vsp)));
1025 1025 if (flist-- == 0)
1026 1026 break;
1027 1027 vsp = (vmem_seg_t *)&vmp->vm_freelist[flist];
1028 1028 ASSERT(vsp->vs_knext->vs_type == VMEM_FREE);
1029 1029 continue;
1030 1030 }
1031 1031 if (vsp->vs_end - 1 < (uintptr_t)minaddr)
1032 1032 continue;
1033 1033 if (vsp->vs_start > (uintptr_t)maxaddr - 1)
1034 1034 continue;
1035 1035 start = MAX(vsp->vs_start, (uintptr_t)minaddr);
1036 1036 end = MIN(vsp->vs_end - 1, (uintptr_t)maxaddr - 1) + 1;
1037 1037 taddr = P2PHASEUP(start, align, phase);
1038 1038 if (P2BOUNDARY(taddr, size, nocross))
1039 1039 taddr +=
1040 1040 P2ROUNDUP(P2NPHASE(taddr, nocross), align);
1041 1041 if ((taddr - start) + size > end - start ||
1042 1042 (vbest != NULL && VS_SIZE(vsp) >= VS_SIZE(vbest)))
1043 1043 continue;
1044 1044 vbest = vsp;
1045 1045 addr = taddr;
1046 1046 if (!(vmflag & VM_BESTFIT) || VS_SIZE(vbest) == size)
1047 1047 break;
1048 1048 }
1049 1049 if (vbest != NULL)
1050 1050 break;
1051 1051 ASSERT(xvaddr == NULL);
1052 1052 if (size == 0)
1053 1053 panic("vmem_xalloc(): size == 0");
1054 1054 if (vmp->vm_source_alloc != NULL && nocross == 0 &&
1055 1055 minaddr == NULL && maxaddr == NULL) {
1056 1056 size_t aneeded, asize;
1057 1057 size_t aquantum = MAX(vmp->vm_quantum,
1058 1058 vmp->vm_source->vm_quantum);
1059 1059 size_t aphase = phase;
1060 1060 if ((align > aquantum) &&
1061 1061 !(vmp->vm_cflags & VMC_XALIGN)) {
1062 1062 aphase = (P2PHASE(phase, aquantum) != 0) ?
1063 1063 align - vmp->vm_quantum : align - aquantum;
1064 1064 ASSERT(aphase >= phase);
1065 1065 }
1066 1066 aneeded = MAX(size + aphase, vmp->vm_min_import);
1067 1067 asize = P2ROUNDUP(aneeded, aquantum);
1068 1068
1069 1069 if (asize < size) {
1070 1070 /*
1071 1071 * The rounding induced overflow; return NULL
1072 1072 * if we are permitted to fail the allocation
1073 1073 * (and explicitly panic if we aren't).
1074 1074 */
1075 1075 if ((vmflag & VM_NOSLEEP) &&
1076 1076 !(vmflag & VM_PANIC)) {
1077 1077 mutex_exit(&vmp->vm_lock);
1078 1078 return (NULL);
1079 1079 }
1080 1080
1081 1081 panic("vmem_xalloc(): size overflow");
1082 1082 }
1083 1083
1084 1084 /*
1085 1085 * Determine how many segment structures we'll consume.
1086 1086 * The calculation must be precise because if we're
1087 1087 * here on behalf of vmem_populate(), we are taking
1088 1088 * segments from a very limited reserve.
1089 1089 */
1090 1090 if (size == asize && !(vmp->vm_cflags & VMC_XALLOC))
1091 1091 resv = VMEM_SEGS_PER_SPAN_CREATE +
1092 1092 VMEM_SEGS_PER_EXACT_ALLOC;
1093 1093 else if (phase == 0 &&
1094 1094 align <= vmp->vm_source->vm_quantum)
1095 1095 resv = VMEM_SEGS_PER_SPAN_CREATE +
1096 1096 VMEM_SEGS_PER_LEFT_ALLOC;
1097 1097 else
1098 1098 resv = VMEM_SEGS_PER_ALLOC_MAX;
1099 1099
1100 1100 ASSERT(vmp->vm_nsegfree >= resv);
1101 1101 vmp->vm_nsegfree -= resv; /* reserve our segs */
1102 1102 mutex_exit(&vmp->vm_lock);
1103 1103 if (vmp->vm_cflags & VMC_XALLOC) {
1104 1104 size_t oasize = asize;
1105 1105 vaddr = ((vmem_ximport_t *)
1106 1106 vmp->vm_source_alloc)(vmp->vm_source,
1107 1107 &asize, align, vmflag & VM_KMFLAGS);
1108 1108 ASSERT(asize >= oasize);
1109 1109 ASSERT(P2PHASE(asize,
1110 1110 vmp->vm_source->vm_quantum) == 0);
1111 1111 ASSERT(!(vmp->vm_cflags & VMC_XALIGN) ||
1112 1112 IS_P2ALIGNED(vaddr, align));
1113 1113 } else {
1114 1114 vaddr = vmp->vm_source_alloc(vmp->vm_source,
1115 1115 asize, vmflag & VM_KMFLAGS);
1116 1116 }
1117 1117 mutex_enter(&vmp->vm_lock);
1118 1118 vmp->vm_nsegfree += resv; /* claim reservation */
1119 1119 aneeded = size + align - vmp->vm_quantum;
1120 1120 aneeded = P2ROUNDUP(aneeded, vmp->vm_quantum);
1121 1121 if (vaddr != NULL) {
1122 1122 /*
1123 1123 * Since we dropped the vmem lock while
1124 1124 * calling the import function, other
1125 1125 * threads could have imported space
1126 1126 * and made our import unnecessary. In
1127 1127 * order to save space, we return
1128 1128 * excess imports immediately.
1129 1129 */
1130 1130 if (asize > aneeded &&
1131 1131 vmp->vm_source_free != NULL &&
1132 1132 vmem_canalloc(vmp, aneeded)) {
1133 1133 ASSERT(resv >=
1134 1134 VMEM_SEGS_PER_MIDDLE_ALLOC);
1135 1135 xvaddr = vaddr;
1136 1136 xsize = asize;
1137 1137 goto do_alloc;
1138 1138 }
1139 1139 vbest = vmem_span_create(vmp, vaddr, asize, 1);
1140 1140 addr = P2PHASEUP(vbest->vs_start, align, phase);
1141 1141 break;
1142 1142 } else if (vmem_canalloc(vmp, aneeded)) {
1143 1143 /*
1144 1144 * Our import failed, but another thread
1145 1145 * added sufficient free memory to the arena
1146 1146 * to satisfy our request. Go back and
1147 1147 * grab it.
1148 1148 */
1149 1149 ASSERT(resv >= VMEM_SEGS_PER_MIDDLE_ALLOC);
1150 1150 goto do_alloc;
1151 1151 }
1152 1152 }
1153 1153
1154 1154 /*
1155 1155 * If the requestor chooses to fail the allocation attempt
1156 1156 * rather than reap wait and retry - get out of the loop.
1157 1157 */
1158 1158 if (vmflag & VM_ABORT)
1159 1159 break;
1160 1160 mutex_exit(&vmp->vm_lock);
1161 1161 if (vmp->vm_cflags & VMC_IDENTIFIER)
1162 1162 kmem_reap_idspace();
1163 1163 else
1164 1164 kmem_reap();
1165 1165 mutex_enter(&vmp->vm_lock);
1166 1166 if (vmflag & VM_NOSLEEP)
1167 1167 break;
1168 1168 vmp->vm_kstat.vk_wait.value.ui64++;
1169 1169 cv_wait(&vmp->vm_cv, &vmp->vm_lock);
1170 1170 }
1171 1171 if (vbest != NULL) {
1172 1172 ASSERT(vbest->vs_type == VMEM_FREE);
1173 1173 ASSERT(vbest->vs_knext != vbest);
1174 1174 /* re-position to end of buffer */
1175 1175 if (vmflag & VM_ENDALLOC) {
1176 1176 addr += ((vbest->vs_end - (addr + size)) / align) *
1177 1177 align;
1178 1178 }
1179 1179 (void) vmem_seg_alloc(vmp, vbest, addr, size);
1180 1180 mutex_exit(&vmp->vm_lock);
1181 1181 if (xvaddr)
1182 1182 vmp->vm_source_free(vmp->vm_source, xvaddr, xsize);
1183 1183 ASSERT(P2PHASE(addr, align) == phase);
1184 1184 ASSERT(!P2BOUNDARY(addr, size, nocross));
1185 1185 ASSERT(addr >= (uintptr_t)minaddr);
1186 1186 ASSERT(addr + size - 1 <= (uintptr_t)maxaddr - 1);
1187 1187 return ((void *)addr);
1188 1188 }
1189 1189 vmp->vm_kstat.vk_fail.value.ui64++;
1190 1190 mutex_exit(&vmp->vm_lock);
1191 1191 if (vmflag & VM_PANIC)
1192 1192 panic("vmem_xalloc(%p, %lu, %lu, %lu, %lu, %p, %p, %x): "
1193 1193 "cannot satisfy mandatory allocation",
1194 1194 (void *)vmp, size, align_arg, phase, nocross,
1195 1195 minaddr, maxaddr, vmflag);
1196 1196 ASSERT(xvaddr == NULL);
1197 1197 return (NULL);
1198 1198 }
1199 1199
1200 1200 /*
1201 1201 * Free the segment [vaddr, vaddr + size), where vaddr was a constrained
1202 1202 * allocation. vmem_xalloc() and vmem_xfree() must always be paired because
1203 1203 * both routines bypass the quantum caches.
1204 1204 */
1205 1205 void
1206 1206 vmem_xfree(vmem_t *vmp, void *vaddr, size_t size)
1207 1207 {
1208 1208 vmem_seg_t *vsp, *vnext, *vprev;
1209 1209
1210 1210 mutex_enter(&vmp->vm_lock);
1211 1211
1212 1212 vsp = vmem_hash_delete(vmp, (uintptr_t)vaddr, size);
1213 1213 vsp->vs_end = P2ROUNDUP(vsp->vs_end, vmp->vm_quantum);
1214 1214
1215 1215 /*
1216 1216 * Attempt to coalesce with the next segment.
1217 1217 */
1218 1218 vnext = vsp->vs_anext;
1219 1219 if (vnext->vs_type == VMEM_FREE) {
1220 1220 ASSERT(vsp->vs_end == vnext->vs_start);
1221 1221 vmem_freelist_delete(vmp, vnext);
1222 1222 vsp->vs_end = vnext->vs_end;
1223 1223 vmem_seg_destroy(vmp, vnext);
1224 1224 }
1225 1225
1226 1226 /*
1227 1227 * Attempt to coalesce with the previous segment.
1228 1228 */
1229 1229 vprev = vsp->vs_aprev;
1230 1230 if (vprev->vs_type == VMEM_FREE) {
1231 1231 ASSERT(vprev->vs_end == vsp->vs_start);
1232 1232 vmem_freelist_delete(vmp, vprev);
1233 1233 vprev->vs_end = vsp->vs_end;
1234 1234 vmem_seg_destroy(vmp, vsp);
1235 1235 vsp = vprev;
1236 1236 }
1237 1237
1238 1238 /*
1239 1239 * If the entire span is free, return it to the source.
1240 1240 */
1241 1241 if (vsp->vs_aprev->vs_import && vmp->vm_source_free != NULL &&
1242 1242 vsp->vs_aprev->vs_type == VMEM_SPAN &&
1243 1243 vsp->vs_anext->vs_type == VMEM_SPAN) {
1244 1244 vaddr = (void *)vsp->vs_start;
1245 1245 size = VS_SIZE(vsp);
1246 1246 ASSERT(size == VS_SIZE(vsp->vs_aprev));
1247 1247 vmem_span_destroy(vmp, vsp);
1248 1248 mutex_exit(&vmp->vm_lock);
1249 1249 vmp->vm_source_free(vmp->vm_source, vaddr, size);
1250 1250 } else {
1251 1251 vmem_freelist_insert(vmp, vsp);
1252 1252 mutex_exit(&vmp->vm_lock);
1253 1253 }
1254 1254 }
1255 1255
1256 1256 /*
1257 1257 * Allocate size bytes from arena vmp. Returns the allocated address
1258 1258 * on success, NULL on failure. vmflag specifies VM_SLEEP or VM_NOSLEEP,
1259 1259 * and may also specify best-fit, first-fit, or next-fit allocation policy
1260 1260 * instead of the default instant-fit policy. VM_SLEEP allocations are
1261 1261 * guaranteed to succeed.
1262 1262 */
1263 1263 void *
1264 1264 vmem_alloc(vmem_t *vmp, size_t size, int vmflag)
1265 1265 {
1266 1266 vmem_seg_t *vsp;
1267 1267 uintptr_t addr;
1268 1268 int hb;
1269 1269 int flist = 0;
1270 1270 uint32_t mtbf;
1271 1271
1272 1272 if (size - 1 < vmp->vm_qcache_max)
1273 1273 return (kmem_cache_alloc(vmp->vm_qcache[(size - 1) >>
1274 1274 vmp->vm_qshift], vmflag & VM_KMFLAGS));
1275 1275
1276 1276 if ((mtbf = vmem_mtbf | vmp->vm_mtbf) != 0 && gethrtime() % mtbf == 0 &&
1277 1277 (vmflag & (VM_NOSLEEP | VM_PANIC)) == VM_NOSLEEP)
1278 1278 return (NULL);
1279 1279
1280 1280 if (vmflag & VM_NEXTFIT)
1281 1281 return (vmem_nextfit_alloc(vmp, size, vmflag));
1282 1282
1283 1283 if (vmflag & (VM_BESTFIT | VM_FIRSTFIT))
1284 1284 return (vmem_xalloc(vmp, size, vmp->vm_quantum, 0, 0,
1285 1285 NULL, NULL, vmflag));
1286 1286
1287 1287 /*
1288 1288 * Unconstrained instant-fit allocation from the segment list.
1289 1289 */
1290 1290 mutex_enter(&vmp->vm_lock);
1291 1291
1292 1292 if (vmp->vm_nsegfree >= VMEM_MINFREE || vmem_populate(vmp, vmflag)) {
1293 1293 if ((size & (size - 1)) == 0)
1294 1294 flist = lowbit(P2ALIGN(vmp->vm_freemap, size));
1295 1295 else if ((hb = highbit(size)) < VMEM_FREELISTS)
1296 1296 flist = lowbit(P2ALIGN(vmp->vm_freemap, 1UL << hb));
1297 1297 }
1298 1298
1299 1299 if (flist-- == 0) {
1300 1300 mutex_exit(&vmp->vm_lock);
1301 1301 return (vmem_xalloc(vmp, size, vmp->vm_quantum,
1302 1302 0, 0, NULL, NULL, vmflag));
1303 1303 }
1304 1304
1305 1305 ASSERT(size <= (1UL << flist));
1306 1306 vsp = vmp->vm_freelist[flist].vs_knext;
1307 1307 addr = vsp->vs_start;
1308 1308 if (vmflag & VM_ENDALLOC) {
1309 1309 addr += vsp->vs_end - (addr + size);
1310 1310 }
1311 1311 (void) vmem_seg_alloc(vmp, vsp, addr, size);
1312 1312 mutex_exit(&vmp->vm_lock);
1313 1313 return ((void *)addr);
1314 1314 }
1315 1315
1316 1316 /*
1317 1317 * Free the segment [vaddr, vaddr + size).
1318 1318 */
1319 1319 void
1320 1320 vmem_free(vmem_t *vmp, void *vaddr, size_t size)
1321 1321 {
1322 1322 if (size - 1 < vmp->vm_qcache_max)
1323 1323 kmem_cache_free(vmp->vm_qcache[(size - 1) >> vmp->vm_qshift],
1324 1324 vaddr);
1325 1325 else
1326 1326 vmem_xfree(vmp, vaddr, size);
1327 1327 }
1328 1328
1329 1329 /*
1330 1330 * Determine whether arena vmp contains the segment [vaddr, vaddr + size).
1331 1331 */
1332 1332 int
1333 1333 vmem_contains(vmem_t *vmp, void *vaddr, size_t size)
1334 1334 {
1335 1335 uintptr_t start = (uintptr_t)vaddr;
1336 1336 uintptr_t end = start + size;
1337 1337 vmem_seg_t *vsp;
1338 1338 vmem_seg_t *seg0 = &vmp->vm_seg0;
1339 1339
1340 1340 mutex_enter(&vmp->vm_lock);
1341 1341 vmp->vm_kstat.vk_contains.value.ui64++;
1342 1342 for (vsp = seg0->vs_knext; vsp != seg0; vsp = vsp->vs_knext) {
1343 1343 vmp->vm_kstat.vk_contains_search.value.ui64++;
1344 1344 ASSERT(vsp->vs_type == VMEM_SPAN);
1345 1345 if (start >= vsp->vs_start && end - 1 <= vsp->vs_end - 1)
1346 1346 break;
1347 1347 }
1348 1348 mutex_exit(&vmp->vm_lock);
1349 1349 return (vsp != seg0);
1350 1350 }
1351 1351
1352 1352 /*
1353 1353 * Add the span [vaddr, vaddr + size) to arena vmp.
1354 1354 */
1355 1355 void *
1356 1356 vmem_add(vmem_t *vmp, void *vaddr, size_t size, int vmflag)
1357 1357 {
1358 1358 if (vaddr == NULL || size == 0)
1359 1359 panic("vmem_add(%p, %p, %lu): bad arguments",
1360 1360 (void *)vmp, vaddr, size);
1361 1361
1362 1362 ASSERT(!vmem_contains(vmp, vaddr, size));
1363 1363
1364 1364 mutex_enter(&vmp->vm_lock);
1365 1365 if (vmem_populate(vmp, vmflag))
1366 1366 (void) vmem_span_create(vmp, vaddr, size, 0);
1367 1367 else
1368 1368 vaddr = NULL;
1369 1369 mutex_exit(&vmp->vm_lock);
1370 1370 return (vaddr);
1371 1371 }
1372 1372
1373 1373 /*
1374 1374 * Walk the vmp arena, applying func to each segment matching typemask.
1375 1375 * If VMEM_REENTRANT is specified, the arena lock is dropped across each
1376 1376 * call to func(); otherwise, it is held for the duration of vmem_walk()
1377 1377 * to ensure a consistent snapshot. Note that VMEM_REENTRANT callbacks
1378 1378 * are *not* necessarily consistent, so they may only be used when a hint
1379 1379 * is adequate.
1380 1380 */
1381 1381 void
1382 1382 vmem_walk(vmem_t *vmp, int typemask,
1383 1383 void (*func)(void *, void *, size_t), void *arg)
1384 1384 {
1385 1385 vmem_seg_t *vsp;
1386 1386 vmem_seg_t *seg0 = &vmp->vm_seg0;
1387 1387 vmem_seg_t walker;
1388 1388
1389 1389 if (typemask & VMEM_WALKER)
1390 1390 return;
1391 1391
1392 1392 bzero(&walker, sizeof (walker));
1393 1393 walker.vs_type = VMEM_WALKER;
1394 1394
1395 1395 mutex_enter(&vmp->vm_lock);
1396 1396 VMEM_INSERT(seg0, &walker, a);
1397 1397 for (vsp = seg0->vs_anext; vsp != seg0; vsp = vsp->vs_anext) {
1398 1398 if (vsp->vs_type & typemask) {
1399 1399 void *start = (void *)vsp->vs_start;
1400 1400 size_t size = VS_SIZE(vsp);
1401 1401 if (typemask & VMEM_REENTRANT) {
1402 1402 vmem_advance(vmp, &walker, vsp);
1403 1403 mutex_exit(&vmp->vm_lock);
1404 1404 func(arg, start, size);
1405 1405 mutex_enter(&vmp->vm_lock);
1406 1406 vsp = &walker;
1407 1407 } else {
1408 1408 func(arg, start, size);
1409 1409 }
1410 1410 }
1411 1411 }
1412 1412 vmem_advance(vmp, &walker, NULL);
1413 1413 mutex_exit(&vmp->vm_lock);
1414 1414 }
1415 1415
1416 1416 /*
1417 1417 * Return the total amount of memory whose type matches typemask. Thus:
1418 1418 *
1419 1419 * typemask VMEM_ALLOC yields total memory allocated (in use).
1420 1420 * typemask VMEM_FREE yields total memory free (available).
1421 1421 * typemask (VMEM_ALLOC | VMEM_FREE) yields total arena size.
1422 1422 */
1423 1423 size_t
1424 1424 vmem_size(vmem_t *vmp, int typemask)
1425 1425 {
1426 1426 uint64_t size = 0;
1427 1427
1428 1428 if (typemask & VMEM_ALLOC)
1429 1429 size += vmp->vm_kstat.vk_mem_inuse.value.ui64;
1430 1430 if (typemask & VMEM_FREE)
1431 1431 size += vmp->vm_kstat.vk_mem_total.value.ui64 -
1432 1432 vmp->vm_kstat.vk_mem_inuse.value.ui64;
1433 1433 return ((size_t)size);
1434 1434 }
1435 1435
1436 1436 /*
1437 1437 * Create an arena called name whose initial span is [base, base + size).
1438 1438 * The arena's natural unit of currency is quantum, so vmem_alloc()
1439 1439 * guarantees quantum-aligned results. The arena may import new spans
1440 1440 * by invoking afunc() on source, and may return those spans by invoking
1441 1441 * ffunc() on source. To make small allocations fast and scalable,
1442 1442 * the arena offers high-performance caching for each integer multiple
1443 1443 * of quantum up to qcache_max.
1444 1444 */
1445 1445 static vmem_t *
↓ open down ↓ |
1445 lines elided |
↑ open up ↑ |
1446 1446 vmem_create_common(const char *name, void *base, size_t size, size_t quantum,
1447 1447 void *(*afunc)(vmem_t *, size_t, int),
1448 1448 void (*ffunc)(vmem_t *, void *, size_t),
1449 1449 vmem_t *source, size_t qcache_max, int vmflag)
1450 1450 {
1451 1451 int i;
1452 1452 size_t nqcache;
1453 1453 vmem_t *vmp, *cur, **vmpp;
1454 1454 vmem_seg_t *vsp;
1455 1455 vmem_freelist_t *vfp;
1456 - uint32_t id = atomic_add_32_nv(&vmem_id, 1);
1456 + uint32_t id = atomic_inc_32_nv(&vmem_id);
1457 1457
1458 1458 if (vmem_vmem_arena != NULL) {
1459 1459 vmp = vmem_alloc(vmem_vmem_arena, sizeof (vmem_t),
1460 1460 vmflag & VM_KMFLAGS);
1461 1461 } else {
1462 1462 ASSERT(id <= VMEM_INITIAL);
1463 1463 vmp = &vmem0[id - 1];
1464 1464 }
1465 1465
1466 1466 /* An identifier arena must inherit from another identifier arena */
1467 1467 ASSERT(source == NULL || ((source->vm_cflags & VMC_IDENTIFIER) ==
1468 1468 (vmflag & VMC_IDENTIFIER)));
1469 1469
1470 1470 if (vmp == NULL)
1471 1471 return (NULL);
1472 1472 bzero(vmp, sizeof (vmem_t));
1473 1473
1474 1474 (void) snprintf(vmp->vm_name, VMEM_NAMELEN, "%s", name);
1475 1475 mutex_init(&vmp->vm_lock, NULL, MUTEX_DEFAULT, NULL);
1476 1476 cv_init(&vmp->vm_cv, NULL, CV_DEFAULT, NULL);
1477 1477 vmp->vm_cflags = vmflag;
1478 1478 vmflag &= VM_KMFLAGS;
1479 1479
1480 1480 vmp->vm_quantum = quantum;
1481 1481 vmp->vm_qshift = highbit(quantum) - 1;
1482 1482 nqcache = MIN(qcache_max >> vmp->vm_qshift, VMEM_NQCACHE_MAX);
1483 1483
1484 1484 for (i = 0; i <= VMEM_FREELISTS; i++) {
1485 1485 vfp = &vmp->vm_freelist[i];
1486 1486 vfp->vs_end = 1UL << i;
1487 1487 vfp->vs_knext = (vmem_seg_t *)(vfp + 1);
1488 1488 vfp->vs_kprev = (vmem_seg_t *)(vfp - 1);
1489 1489 }
1490 1490
1491 1491 vmp->vm_freelist[0].vs_kprev = NULL;
1492 1492 vmp->vm_freelist[VMEM_FREELISTS].vs_knext = NULL;
1493 1493 vmp->vm_freelist[VMEM_FREELISTS].vs_end = 0;
1494 1494 vmp->vm_hash_table = vmp->vm_hash0;
1495 1495 vmp->vm_hash_mask = VMEM_HASH_INITIAL - 1;
1496 1496 vmp->vm_hash_shift = highbit(vmp->vm_hash_mask);
1497 1497
1498 1498 vsp = &vmp->vm_seg0;
1499 1499 vsp->vs_anext = vsp;
1500 1500 vsp->vs_aprev = vsp;
1501 1501 vsp->vs_knext = vsp;
1502 1502 vsp->vs_kprev = vsp;
1503 1503 vsp->vs_type = VMEM_SPAN;
1504 1504
1505 1505 vsp = &vmp->vm_rotor;
1506 1506 vsp->vs_type = VMEM_ROTOR;
1507 1507 VMEM_INSERT(&vmp->vm_seg0, vsp, a);
1508 1508
1509 1509 bcopy(&vmem_kstat_template, &vmp->vm_kstat, sizeof (vmem_kstat_t));
1510 1510
1511 1511 vmp->vm_id = id;
1512 1512 if (source != NULL)
1513 1513 vmp->vm_kstat.vk_source_id.value.ui32 = source->vm_id;
1514 1514 vmp->vm_source = source;
1515 1515 vmp->vm_source_alloc = afunc;
1516 1516 vmp->vm_source_free = ffunc;
1517 1517
1518 1518 /*
1519 1519 * Some arenas (like vmem_metadata and kmem_metadata) cannot
1520 1520 * use quantum caching to lower fragmentation. Instead, we
1521 1521 * increase their imports, giving a similar effect.
1522 1522 */
1523 1523 if (vmp->vm_cflags & VMC_NO_QCACHE) {
1524 1524 vmp->vm_min_import =
1525 1525 VMEM_QCACHE_SLABSIZE(nqcache << vmp->vm_qshift);
1526 1526 nqcache = 0;
1527 1527 }
1528 1528
1529 1529 if (nqcache != 0) {
1530 1530 ASSERT(!(vmflag & VM_NOSLEEP));
1531 1531 vmp->vm_qcache_max = nqcache << vmp->vm_qshift;
1532 1532 for (i = 0; i < nqcache; i++) {
1533 1533 char buf[VMEM_NAMELEN + 21];
1534 1534 (void) sprintf(buf, "%s_%lu", vmp->vm_name,
1535 1535 (i + 1) * quantum);
1536 1536 vmp->vm_qcache[i] = kmem_cache_create(buf,
1537 1537 (i + 1) * quantum, quantum, NULL, NULL, NULL,
1538 1538 NULL, vmp, KMC_QCACHE | KMC_NOTOUCH);
1539 1539 }
1540 1540 }
1541 1541
1542 1542 if ((vmp->vm_ksp = kstat_create("vmem", vmp->vm_id, vmp->vm_name,
1543 1543 "vmem", KSTAT_TYPE_NAMED, sizeof (vmem_kstat_t) /
1544 1544 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL)) != NULL) {
1545 1545 vmp->vm_ksp->ks_data = &vmp->vm_kstat;
1546 1546 kstat_install(vmp->vm_ksp);
1547 1547 }
↓ open down ↓ |
81 lines elided |
↑ open up ↑ |
1548 1548
1549 1549 mutex_enter(&vmem_list_lock);
1550 1550 vmpp = &vmem_list;
1551 1551 while ((cur = *vmpp) != NULL)
1552 1552 vmpp = &cur->vm_next;
1553 1553 *vmpp = vmp;
1554 1554 mutex_exit(&vmem_list_lock);
1555 1555
1556 1556 if (vmp->vm_cflags & VMC_POPULATOR) {
1557 1557 ASSERT(vmem_populators < VMEM_INITIAL);
1558 - vmem_populator[atomic_add_32_nv(&vmem_populators, 1) - 1] = vmp;
1558 + vmem_populator[atomic_inc_32_nv(&vmem_populators) - 1] = vmp;
1559 1559 mutex_enter(&vmp->vm_lock);
1560 1560 (void) vmem_populate(vmp, vmflag | VM_PANIC);
1561 1561 mutex_exit(&vmp->vm_lock);
1562 1562 }
1563 1563
1564 1564 if ((base || size) && vmem_add(vmp, base, size, vmflag) == NULL) {
1565 1565 vmem_destroy(vmp);
1566 1566 return (NULL);
1567 1567 }
1568 1568
1569 1569 return (vmp);
1570 1570 }
1571 1571
1572 1572 vmem_t *
1573 1573 vmem_xcreate(const char *name, void *base, size_t size, size_t quantum,
1574 1574 vmem_ximport_t *afunc, vmem_free_t *ffunc, vmem_t *source,
1575 1575 size_t qcache_max, int vmflag)
1576 1576 {
1577 1577 ASSERT(!(vmflag & (VMC_POPULATOR | VMC_XALLOC)));
1578 1578 vmflag &= ~(VMC_POPULATOR | VMC_XALLOC);
1579 1579
1580 1580 return (vmem_create_common(name, base, size, quantum,
1581 1581 (vmem_alloc_t *)afunc, ffunc, source, qcache_max,
1582 1582 vmflag | VMC_XALLOC));
1583 1583 }
1584 1584
1585 1585 vmem_t *
1586 1586 vmem_create(const char *name, void *base, size_t size, size_t quantum,
1587 1587 vmem_alloc_t *afunc, vmem_free_t *ffunc, vmem_t *source,
1588 1588 size_t qcache_max, int vmflag)
1589 1589 {
1590 1590 ASSERT(!(vmflag & (VMC_XALLOC | VMC_XALIGN)));
1591 1591 vmflag &= ~(VMC_XALLOC | VMC_XALIGN);
1592 1592
1593 1593 return (vmem_create_common(name, base, size, quantum,
1594 1594 afunc, ffunc, source, qcache_max, vmflag));
1595 1595 }
1596 1596
1597 1597 /*
1598 1598 * Destroy arena vmp.
1599 1599 */
1600 1600 void
1601 1601 vmem_destroy(vmem_t *vmp)
1602 1602 {
1603 1603 vmem_t *cur, **vmpp;
1604 1604 vmem_seg_t *seg0 = &vmp->vm_seg0;
1605 1605 vmem_seg_t *vsp, *anext;
1606 1606 size_t leaked;
1607 1607 int i;
1608 1608
1609 1609 mutex_enter(&vmem_list_lock);
1610 1610 vmpp = &vmem_list;
1611 1611 while ((cur = *vmpp) != vmp)
1612 1612 vmpp = &cur->vm_next;
1613 1613 *vmpp = vmp->vm_next;
1614 1614 mutex_exit(&vmem_list_lock);
1615 1615
1616 1616 for (i = 0; i < VMEM_NQCACHE_MAX; i++)
1617 1617 if (vmp->vm_qcache[i])
1618 1618 kmem_cache_destroy(vmp->vm_qcache[i]);
1619 1619
1620 1620 leaked = vmem_size(vmp, VMEM_ALLOC);
1621 1621 if (leaked != 0)
1622 1622 cmn_err(CE_WARN, "vmem_destroy('%s'): leaked %lu %s",
1623 1623 vmp->vm_name, leaked, (vmp->vm_cflags & VMC_IDENTIFIER) ?
1624 1624 "identifiers" : "bytes");
1625 1625
1626 1626 if (vmp->vm_hash_table != vmp->vm_hash0)
1627 1627 vmem_free(vmem_hash_arena, vmp->vm_hash_table,
1628 1628 (vmp->vm_hash_mask + 1) * sizeof (void *));
1629 1629
1630 1630 /*
1631 1631 * Give back the segment structures for anything that's left in the
1632 1632 * arena, e.g. the primary spans and their free segments.
1633 1633 */
1634 1634 VMEM_DELETE(&vmp->vm_rotor, a);
1635 1635 for (vsp = seg0->vs_anext; vsp != seg0; vsp = anext) {
1636 1636 anext = vsp->vs_anext;
1637 1637 vmem_putseg_global(vsp);
1638 1638 }
1639 1639
1640 1640 while (vmp->vm_nsegfree > 0)
1641 1641 vmem_putseg_global(vmem_getseg(vmp));
1642 1642
1643 1643 kstat_delete(vmp->vm_ksp);
1644 1644
1645 1645 mutex_destroy(&vmp->vm_lock);
1646 1646 cv_destroy(&vmp->vm_cv);
1647 1647 vmem_free(vmem_vmem_arena, vmp, sizeof (vmem_t));
1648 1648 }
1649 1649
1650 1650 /*
1651 1651 * Resize vmp's hash table to keep the average lookup depth near 1.0.
1652 1652 */
1653 1653 static void
1654 1654 vmem_hash_rescale(vmem_t *vmp)
1655 1655 {
1656 1656 vmem_seg_t **old_table, **new_table, *vsp;
1657 1657 size_t old_size, new_size, h, nseg;
1658 1658
1659 1659 nseg = (size_t)(vmp->vm_kstat.vk_alloc.value.ui64 -
1660 1660 vmp->vm_kstat.vk_free.value.ui64);
1661 1661
1662 1662 new_size = MAX(VMEM_HASH_INITIAL, 1 << (highbit(3 * nseg + 4) - 2));
1663 1663 old_size = vmp->vm_hash_mask + 1;
1664 1664
1665 1665 if ((old_size >> 1) <= new_size && new_size <= (old_size << 1))
1666 1666 return;
1667 1667
1668 1668 new_table = vmem_alloc(vmem_hash_arena, new_size * sizeof (void *),
1669 1669 VM_NOSLEEP);
1670 1670 if (new_table == NULL)
1671 1671 return;
1672 1672 bzero(new_table, new_size * sizeof (void *));
1673 1673
1674 1674 mutex_enter(&vmp->vm_lock);
1675 1675
1676 1676 old_size = vmp->vm_hash_mask + 1;
1677 1677 old_table = vmp->vm_hash_table;
1678 1678
1679 1679 vmp->vm_hash_mask = new_size - 1;
1680 1680 vmp->vm_hash_table = new_table;
1681 1681 vmp->vm_hash_shift = highbit(vmp->vm_hash_mask);
1682 1682
1683 1683 for (h = 0; h < old_size; h++) {
1684 1684 vsp = old_table[h];
1685 1685 while (vsp != NULL) {
1686 1686 uintptr_t addr = vsp->vs_start;
1687 1687 vmem_seg_t *next_vsp = vsp->vs_knext;
1688 1688 vmem_seg_t **hash_bucket = VMEM_HASH(vmp, addr);
1689 1689 vsp->vs_knext = *hash_bucket;
1690 1690 *hash_bucket = vsp;
1691 1691 vsp = next_vsp;
1692 1692 }
1693 1693 }
1694 1694
1695 1695 mutex_exit(&vmp->vm_lock);
1696 1696
1697 1697 if (old_table != vmp->vm_hash0)
1698 1698 vmem_free(vmem_hash_arena, old_table,
1699 1699 old_size * sizeof (void *));
1700 1700 }
1701 1701
1702 1702 /*
1703 1703 * Perform periodic maintenance on all vmem arenas.
1704 1704 */
1705 1705 void
1706 1706 vmem_update(void *dummy)
1707 1707 {
1708 1708 vmem_t *vmp;
1709 1709
1710 1710 mutex_enter(&vmem_list_lock);
1711 1711 for (vmp = vmem_list; vmp != NULL; vmp = vmp->vm_next) {
1712 1712 /*
1713 1713 * If threads are waiting for resources, wake them up
1714 1714 * periodically so they can issue another kmem_reap()
1715 1715 * to reclaim resources cached by the slab allocator.
1716 1716 */
1717 1717 cv_broadcast(&vmp->vm_cv);
1718 1718
1719 1719 /*
1720 1720 * Rescale the hash table to keep the hash chains short.
1721 1721 */
1722 1722 vmem_hash_rescale(vmp);
1723 1723 }
1724 1724 mutex_exit(&vmem_list_lock);
1725 1725
1726 1726 (void) timeout(vmem_update, dummy, vmem_update_interval * hz);
1727 1727 }
1728 1728
1729 1729 void
1730 1730 vmem_qcache_reap(vmem_t *vmp)
1731 1731 {
1732 1732 int i;
1733 1733
1734 1734 /*
1735 1735 * Reap any quantum caches that may be part of this vmem.
1736 1736 */
1737 1737 for (i = 0; i < VMEM_NQCACHE_MAX; i++)
1738 1738 if (vmp->vm_qcache[i])
1739 1739 kmem_cache_reap_now(vmp->vm_qcache[i]);
1740 1740 }
1741 1741
1742 1742 /*
1743 1743 * Prepare vmem for use.
1744 1744 */
1745 1745 vmem_t *
1746 1746 vmem_init(const char *heap_name,
1747 1747 void *heap_start, size_t heap_size, size_t heap_quantum,
1748 1748 void *(*heap_alloc)(vmem_t *, size_t, int),
1749 1749 void (*heap_free)(vmem_t *, void *, size_t))
1750 1750 {
1751 1751 uint32_t id;
1752 1752 int nseg = VMEM_SEG_INITIAL;
1753 1753 vmem_t *heap;
1754 1754
1755 1755 while (--nseg >= 0)
1756 1756 vmem_putseg_global(&vmem_seg0[nseg]);
1757 1757
1758 1758 heap = vmem_create(heap_name,
1759 1759 heap_start, heap_size, heap_quantum,
1760 1760 NULL, NULL, NULL, 0,
1761 1761 VM_SLEEP | VMC_POPULATOR);
1762 1762
1763 1763 vmem_metadata_arena = vmem_create("vmem_metadata",
1764 1764 NULL, 0, heap_quantum,
1765 1765 vmem_alloc, vmem_free, heap, 8 * heap_quantum,
1766 1766 VM_SLEEP | VMC_POPULATOR | VMC_NO_QCACHE);
1767 1767
1768 1768 vmem_seg_arena = vmem_create("vmem_seg",
1769 1769 NULL, 0, heap_quantum,
1770 1770 heap_alloc, heap_free, vmem_metadata_arena, 0,
1771 1771 VM_SLEEP | VMC_POPULATOR);
1772 1772
1773 1773 vmem_hash_arena = vmem_create("vmem_hash",
1774 1774 NULL, 0, 8,
1775 1775 heap_alloc, heap_free, vmem_metadata_arena, 0,
1776 1776 VM_SLEEP);
1777 1777
1778 1778 vmem_vmem_arena = vmem_create("vmem_vmem",
1779 1779 vmem0, sizeof (vmem0), 1,
1780 1780 heap_alloc, heap_free, vmem_metadata_arena, 0,
1781 1781 VM_SLEEP);
1782 1782
1783 1783 for (id = 0; id < vmem_id; id++)
1784 1784 (void) vmem_xalloc(vmem_vmem_arena, sizeof (vmem_t),
1785 1785 1, 0, 0, &vmem0[id], &vmem0[id + 1],
1786 1786 VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
1787 1787
1788 1788 return (heap);
1789 1789 }
↓ open down ↓ |
221 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX