Print this page
segop_getpolicy already checks for a NULL op
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_kmem.c
+++ new/usr/src/uts/common/vm/seg_kmem.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/types.h>
26 26 #include <sys/t_lock.h>
27 27 #include <sys/param.h>
28 28 #include <sys/sysmacros.h>
29 29 #include <sys/tuneable.h>
30 30 #include <sys/systm.h>
31 31 #include <sys/vm.h>
32 32 #include <sys/kmem.h>
33 33 #include <sys/vmem.h>
34 34 #include <sys/mman.h>
35 35 #include <sys/cmn_err.h>
36 36 #include <sys/debug.h>
37 37 #include <sys/dumphdr.h>
38 38 #include <sys/bootconf.h>
39 39 #include <sys/lgrp.h>
40 40 #include <vm/seg_kmem.h>
41 41 #include <vm/hat.h>
42 42 #include <vm/page.h>
43 43 #include <vm/vm_dep.h>
44 44 #include <vm/faultcode.h>
45 45 #include <sys/promif.h>
46 46 #include <vm/seg_kp.h>
47 47 #include <sys/bitmap.h>
48 48 #include <sys/mem_cage.h>
49 49
50 50 #ifdef __sparc
51 51 #include <sys/ivintr.h>
52 52 #include <sys/panic.h>
53 53 #endif
54 54
55 55 /*
56 56 * seg_kmem is the primary kernel memory segment driver. It
57 57 * maps the kernel heap [kernelheap, ekernelheap), module text,
58 58 * and all memory which was allocated before the VM was initialized
59 59 * into kas.
60 60 *
61 61 * Pages which belong to seg_kmem are hashed into &kvp vnode at
62 62 * an offset equal to (u_offset_t)virt_addr, and have p_lckcnt >= 1.
63 63 * They must never be paged out since segkmem_fault() is a no-op to
64 64 * prevent recursive faults.
65 65 *
66 66 * Currently, seg_kmem pages are sharelocked (p_sharelock == 1) on
67 67 * __x86 and are unlocked (p_sharelock == 0) on __sparc. Once __x86
68 68 * supports relocation the #ifdef kludges can be removed.
69 69 *
70 70 * seg_kmem pages may be subject to relocation by page_relocate(),
71 71 * provided that the HAT supports it; if this is so, segkmem_reloc
72 72 * will be set to a nonzero value. All boot time allocated memory as
73 73 * well as static memory is considered off limits to relocation.
74 74 * Pages are "relocatable" if p_state does not have P_NORELOC set, so
75 75 * we request P_NORELOC pages for memory that isn't safe to relocate.
76 76 *
77 77 * The kernel heap is logically divided up into four pieces:
78 78 *
79 79 * heap32_arena is for allocations that require 32-bit absolute
80 80 * virtual addresses (e.g. code that uses 32-bit pointers/offsets).
81 81 *
82 82 * heap_core is for allocations that require 2GB *relative*
83 83 * offsets; in other words all memory from heap_core is within
84 84 * 2GB of all other memory from the same arena. This is a requirement
85 85 * of the addressing modes of some processors in supervisor code.
86 86 *
87 87 * heap_arena is the general heap arena.
88 88 *
89 89 * static_arena is the static memory arena. Allocations from it
90 90 * are not subject to relocation so it is safe to use the memory
91 91 * physical address as well as the virtual address (e.g. the VA to
92 92 * PA translations are static). Caches may import from static_arena;
93 93 * all other static memory allocations should use static_alloc_arena.
94 94 *
95 95 * On some platforms which have limited virtual address space, seg_kmem
96 96 * may share [kernelheap, ekernelheap) with seg_kp; if this is so,
97 97 * segkp_bitmap is non-NULL, and each bit represents a page of virtual
98 98 * address space which is actually seg_kp mapped.
99 99 */
100 100
101 101 extern ulong_t *segkp_bitmap; /* Is set if segkp is from the kernel heap */
102 102
103 103 char *kernelheap; /* start of primary kernel heap */
104 104 char *ekernelheap; /* end of primary kernel heap */
105 105 struct seg kvseg; /* primary kernel heap segment */
106 106 struct seg kvseg_core; /* "core" kernel heap segment */
107 107 struct seg kzioseg; /* Segment for zio mappings */
108 108 vmem_t *heap_arena; /* primary kernel heap arena */
109 109 vmem_t *heap_core_arena; /* core kernel heap arena */
110 110 char *heap_core_base; /* start of core kernel heap arena */
111 111 char *heap_lp_base; /* start of kernel large page heap arena */
112 112 char *heap_lp_end; /* end of kernel large page heap arena */
113 113 vmem_t *hat_memload_arena; /* HAT translation data */
114 114 struct seg kvseg32; /* 32-bit kernel heap segment */
115 115 vmem_t *heap32_arena; /* 32-bit kernel heap arena */
116 116 vmem_t *heaptext_arena; /* heaptext arena */
117 117 struct as kas; /* kernel address space */
118 118 int segkmem_reloc; /* enable/disable relocatable segkmem pages */
119 119 vmem_t *static_arena; /* arena for caches to import static memory */
120 120 vmem_t *static_alloc_arena; /* arena for allocating static memory */
121 121 vmem_t *zio_arena = NULL; /* arena for allocating zio memory */
122 122 vmem_t *zio_alloc_arena = NULL; /* arena for allocating zio memory */
123 123
124 124 /*
125 125 * seg_kmem driver can map part of the kernel heap with large pages.
126 126 * Currently this functionality is implemented for sparc platforms only.
127 127 *
128 128 * The large page size "segkmem_lpsize" for kernel heap is selected in the
129 129 * platform specific code. It can also be modified via /etc/system file.
130 130 * Setting segkmem_lpsize to PAGESIZE in /etc/system disables usage of large
131 131 * pages for kernel heap. "segkmem_lpshift" is adjusted appropriately to
132 132 * match segkmem_lpsize.
133 133 *
134 134 * At boot time we carve from kernel heap arena a range of virtual addresses
135 135 * that will be used for large page mappings. This range [heap_lp_base,
136 136 * heap_lp_end) is set up as a separate vmem arena - "heap_lp_arena". We also
137 137 * create "kmem_lp_arena" that caches memory already backed up by large
138 138 * pages. kmem_lp_arena imports virtual segments from heap_lp_arena.
139 139 */
140 140
141 141 size_t segkmem_lpsize;
142 142 static uint_t segkmem_lpshift = PAGESHIFT;
143 143 int segkmem_lpszc = 0;
144 144
145 145 size_t segkmem_kmemlp_quantum = 0x400000; /* 4MB */
146 146 size_t segkmem_heaplp_quantum;
147 147 vmem_t *heap_lp_arena;
148 148 static vmem_t *kmem_lp_arena;
149 149 static vmem_t *segkmem_ppa_arena;
150 150 static segkmem_lpcb_t segkmem_lpcb;
151 151
152 152 /*
153 153 * We use "segkmem_kmemlp_max" to limit the total amount of physical memory
154 154 * consumed by the large page heap. By default this parameter is set to 1/8 of
155 155 * physmem but can be adjusted through /etc/system either directly or
156 156 * indirectly by setting "segkmem_kmemlp_pcnt" to the percent of physmem
157 157 * we allow for large page heap.
158 158 */
159 159 size_t segkmem_kmemlp_max;
160 160 static uint_t segkmem_kmemlp_pcnt;
161 161
162 162 /*
163 163 * Getting large pages for kernel heap could be problematic due to
164 164 * physical memory fragmentation. That's why we allow to preallocate
165 165 * "segkmem_kmemlp_min" bytes at boot time.
166 166 */
167 167 static size_t segkmem_kmemlp_min;
168 168
169 169 /*
170 170 * Throttling is used to avoid expensive tries to allocate large pages
171 171 * for kernel heap when a lot of succesive attempts to do so fail.
172 172 */
173 173 static ulong_t segkmem_lpthrottle_max = 0x400000;
174 174 static ulong_t segkmem_lpthrottle_start = 0x40;
175 175 static ulong_t segkmem_use_lpthrottle = 1;
176 176
177 177 /*
178 178 * Freed pages accumulate on a garbage list until segkmem is ready,
179 179 * at which point we call segkmem_gc() to free it all.
180 180 */
181 181 typedef struct segkmem_gc_list {
182 182 struct segkmem_gc_list *gc_next;
183 183 vmem_t *gc_arena;
184 184 size_t gc_size;
185 185 } segkmem_gc_list_t;
186 186
187 187 static segkmem_gc_list_t *segkmem_gc_list;
188 188
189 189 /*
190 190 * Allocations from the hat_memload arena add VM_MEMLOAD to their
191 191 * vmflags so that segkmem_xalloc() can inform the hat layer that it needs
192 192 * to take steps to prevent infinite recursion. HAT allocations also
193 193 * must be non-relocatable to prevent recursive page faults.
194 194 */
195 195 static void *
196 196 hat_memload_alloc(vmem_t *vmp, size_t size, int flags)
197 197 {
198 198 flags |= (VM_MEMLOAD | VM_NORELOC);
199 199 return (segkmem_alloc(vmp, size, flags));
200 200 }
201 201
202 202 /*
203 203 * Allocations from static_arena arena (or any other arena that uses
204 204 * segkmem_alloc_permanent()) require non-relocatable (permanently
205 205 * wired) memory pages, since these pages are referenced by physical
206 206 * as well as virtual address.
207 207 */
208 208 void *
209 209 segkmem_alloc_permanent(vmem_t *vmp, size_t size, int flags)
210 210 {
211 211 return (segkmem_alloc(vmp, size, flags | VM_NORELOC));
212 212 }
213 213
214 214 /*
215 215 * Initialize kernel heap boundaries.
216 216 */
217 217 void
218 218 kernelheap_init(
219 219 void *heap_start,
220 220 void *heap_end,
221 221 char *first_avail,
222 222 void *core_start,
223 223 void *core_end)
224 224 {
225 225 uintptr_t textbase;
226 226 size_t core_size;
227 227 size_t heap_size;
228 228 vmem_t *heaptext_parent;
229 229 size_t heap_lp_size = 0;
230 230 #ifdef __sparc
231 231 size_t kmem64_sz = kmem64_aligned_end - kmem64_base;
232 232 #endif /* __sparc */
233 233
234 234 kernelheap = heap_start;
235 235 ekernelheap = heap_end;
236 236
237 237 #ifdef __sparc
238 238 heap_lp_size = (((uintptr_t)heap_end - (uintptr_t)heap_start) / 4);
239 239 /*
240 240 * Bias heap_lp start address by kmem64_sz to reduce collisions
241 241 * in 4M kernel TSB between kmem64 area and heap_lp
242 242 */
243 243 kmem64_sz = P2ROUNDUP(kmem64_sz, MMU_PAGESIZE256M);
244 244 if (kmem64_sz <= heap_lp_size / 2)
245 245 heap_lp_size -= kmem64_sz;
246 246 heap_lp_base = ekernelheap - heap_lp_size;
247 247 heap_lp_end = heap_lp_base + heap_lp_size;
248 248 #endif /* __sparc */
249 249
250 250 /*
251 251 * If this platform has a 'core' heap area, then the space for
252 252 * overflow module text should be carved out of the end of that
253 253 * heap. Otherwise, it gets carved out of the general purpose
254 254 * heap.
255 255 */
256 256 core_size = (uintptr_t)core_end - (uintptr_t)core_start;
257 257 if (core_size > 0) {
258 258 ASSERT(core_size >= HEAPTEXT_SIZE);
259 259 textbase = (uintptr_t)core_end - HEAPTEXT_SIZE;
260 260 core_size -= HEAPTEXT_SIZE;
261 261 }
262 262 #ifndef __sparc
263 263 else {
264 264 ekernelheap -= HEAPTEXT_SIZE;
265 265 textbase = (uintptr_t)ekernelheap;
266 266 }
267 267 #endif
268 268
269 269 heap_size = (uintptr_t)ekernelheap - (uintptr_t)kernelheap;
270 270 heap_arena = vmem_init("heap", kernelheap, heap_size, PAGESIZE,
271 271 segkmem_alloc, segkmem_free);
272 272
273 273 if (core_size > 0) {
274 274 heap_core_arena = vmem_create("heap_core", core_start,
275 275 core_size, PAGESIZE, NULL, NULL, NULL, 0, VM_SLEEP);
276 276 heap_core_base = core_start;
277 277 } else {
278 278 heap_core_arena = heap_arena;
279 279 heap_core_base = kernelheap;
280 280 }
281 281
282 282 /*
283 283 * reserve space for the large page heap. If large pages for kernel
284 284 * heap is enabled large page heap arean will be created later in the
285 285 * boot sequence in segkmem_heap_lp_init(). Otherwise the allocated
286 286 * range will be returned back to the heap_arena.
287 287 */
288 288 if (heap_lp_size) {
289 289 (void) vmem_xalloc(heap_arena, heap_lp_size, PAGESIZE, 0, 0,
290 290 heap_lp_base, heap_lp_end,
291 291 VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
292 292 }
293 293
294 294 /*
295 295 * Remove the already-spoken-for memory range [kernelheap, first_avail).
296 296 */
297 297 (void) vmem_xalloc(heap_arena, first_avail - kernelheap, PAGESIZE,
298 298 0, 0, kernelheap, first_avail, VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
299 299
300 300 #ifdef __sparc
301 301 heap32_arena = vmem_create("heap32", (void *)SYSBASE32,
302 302 SYSLIMIT32 - SYSBASE32 - HEAPTEXT_SIZE, PAGESIZE, NULL,
303 303 NULL, NULL, 0, VM_SLEEP);
304 304 /*
305 305 * Prom claims the physical and virtual resources used by panicbuf
306 306 * and inter_vec_table. So reserve space for panicbuf, intr_vec_table,
307 307 * reserved interrupt vector data structures from 32-bit heap.
308 308 */
309 309 (void) vmem_xalloc(heap32_arena, PANICBUFSIZE, PAGESIZE, 0, 0,
310 310 panicbuf, panicbuf + PANICBUFSIZE,
311 311 VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
312 312
313 313 (void) vmem_xalloc(heap32_arena, IVSIZE, PAGESIZE, 0, 0,
314 314 intr_vec_table, (caddr_t)intr_vec_table + IVSIZE,
315 315 VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
316 316
317 317 textbase = SYSLIMIT32 - HEAPTEXT_SIZE;
318 318 heaptext_parent = NULL;
319 319 #else /* __sparc */
320 320 heap32_arena = heap_core_arena;
321 321 heaptext_parent = heap_core_arena;
322 322 #endif /* __sparc */
323 323
324 324 heaptext_arena = vmem_create("heaptext", (void *)textbase,
325 325 HEAPTEXT_SIZE, PAGESIZE, NULL, NULL, heaptext_parent, 0, VM_SLEEP);
326 326
327 327 /*
328 328 * Create a set of arenas for memory with static translations
329 329 * (e.g. VA -> PA translations cannot change). Since using
330 330 * kernel pages by physical address implies it isn't safe to
331 331 * walk across page boundaries, the static_arena quantum must
332 332 * be PAGESIZE. Any kmem caches that require static memory
333 333 * should source from static_arena, while direct allocations
334 334 * should only use static_alloc_arena.
335 335 */
336 336 static_arena = vmem_create("static", NULL, 0, PAGESIZE,
337 337 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP);
338 338 static_alloc_arena = vmem_create("static_alloc", NULL, 0,
339 339 sizeof (uint64_t), vmem_alloc, vmem_free, static_arena,
340 340 0, VM_SLEEP);
341 341
342 342 /*
343 343 * Create an arena for translation data (ptes, hmes, or hblks).
344 344 * We need an arena for this because hat_memload() is essential
345 345 * to vmem_populate() (see comments in common/os/vmem.c).
346 346 *
347 347 * Note: any kmem cache that allocates from hat_memload_arena
348 348 * must be created as a KMC_NOHASH cache (i.e. no external slab
349 349 * and bufctl structures to allocate) so that slab creation doesn't
350 350 * require anything more than a single vmem_alloc().
351 351 */
352 352 hat_memload_arena = vmem_create("hat_memload", NULL, 0, PAGESIZE,
353 353 hat_memload_alloc, segkmem_free, heap_arena, 0,
354 354 VM_SLEEP | VMC_POPULATOR | VMC_DUMPSAFE);
355 355 }
356 356
357 357 void
358 358 boot_mapin(caddr_t addr, size_t size)
359 359 {
360 360 caddr_t eaddr;
361 361 page_t *pp;
362 362 pfn_t pfnum;
363 363
364 364 if (page_resv(btop(size), KM_NOSLEEP) == 0)
365 365 panic("boot_mapin: page_resv failed");
366 366
367 367 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
368 368 pfnum = va_to_pfn(addr);
369 369 if (pfnum == PFN_INVALID)
370 370 continue;
371 371 if ((pp = page_numtopp_nolock(pfnum)) == NULL)
372 372 panic("boot_mapin(): No pp for pfnum = %lx", pfnum);
373 373
374 374 /*
375 375 * must break up any large pages that may have constituent
376 376 * pages being utilized for BOP_ALLOC()'s before calling
377 377 * page_numtopp().The locking code (ie. page_reclaim())
378 378 * can't handle them
379 379 */
380 380 if (pp->p_szc != 0)
381 381 page_boot_demote(pp);
382 382
383 383 pp = page_numtopp(pfnum, SE_EXCL);
384 384 if (pp == NULL || PP_ISFREE(pp))
385 385 panic("boot_alloc: pp is NULL or free");
386 386
387 387 /*
388 388 * If the cage is on but doesn't yet contain this page,
389 389 * mark it as non-relocatable.
390 390 */
391 391 if (kcage_on && !PP_ISNORELOC(pp)) {
392 392 PP_SETNORELOC(pp);
393 393 PLCNT_XFER_NORELOC(pp);
394 394 }
395 395
396 396 (void) page_hashin(pp, &kvp, (u_offset_t)(uintptr_t)addr, NULL);
397 397 pp->p_lckcnt = 1;
398 398 #if defined(__x86)
399 399 page_downgrade(pp);
400 400 #else
401 401 page_unlock(pp);
402 402 #endif
403 403 }
404 404 }
405 405
406 406 /*
407 407 * Get pages from boot and hash them into the kernel's vp.
408 408 * Used after page structs have been allocated, but before segkmem is ready.
409 409 */
410 410 void *
411 411 boot_alloc(void *inaddr, size_t size, uint_t align)
412 412 {
413 413 caddr_t addr = inaddr;
414 414
415 415 if (bootops == NULL)
416 416 prom_panic("boot_alloc: attempt to allocate memory after "
417 417 "BOP_GONE");
418 418
419 419 size = ptob(btopr(size));
420 420 #ifdef __sparc
421 421 if (bop_alloc_chunk(addr, size, align) != (caddr_t)addr)
422 422 panic("boot_alloc: bop_alloc_chunk failed");
423 423 #else
424 424 if (BOP_ALLOC(bootops, addr, size, align) != addr)
425 425 panic("boot_alloc: BOP_ALLOC failed");
426 426 #endif
427 427 boot_mapin((caddr_t)addr, size);
428 428 return (addr);
429 429 }
430 430
431 431 /*ARGSUSED*/
432 432 static faultcode_t
433 433 segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
434 434 enum fault_type type, enum seg_rw rw)
435 435 {
436 436 pgcnt_t npages;
437 437 spgcnt_t pg;
438 438 page_t *pp;
439 439 struct vnode *vp = seg->s_data;
440 440
441 441 ASSERT(RW_READ_HELD(&seg->s_as->a_lock));
442 442
443 443 if (seg->s_as != &kas || size > seg->s_size ||
444 444 addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
445 445 panic("segkmem_fault: bad args");
446 446
447 447 /*
448 448 * If it is one of segkp pages, call segkp_fault.
449 449 */
450 450 if (segkp_bitmap && seg == &kvseg &&
451 451 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
452 452 return (segop_fault(hat, segkp, addr, size, type, rw));
453 453
454 454 if (rw != S_READ && rw != S_WRITE && rw != S_OTHER)
455 455 return (FC_NOSUPPORT);
456 456
457 457 npages = btopr(size);
458 458
459 459 switch (type) {
460 460 case F_SOFTLOCK: /* lock down already-loaded translations */
461 461 for (pg = 0; pg < npages; pg++) {
462 462 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
463 463 SE_SHARED);
464 464 if (pp == NULL) {
465 465 /*
466 466 * Hmm, no page. Does a kernel mapping
467 467 * exist for it?
468 468 */
469 469 if (!hat_probe(kas.a_hat, addr)) {
470 470 addr -= PAGESIZE;
471 471 while (--pg >= 0) {
472 472 pp = page_find(vp, (u_offset_t)
473 473 (uintptr_t)addr);
474 474 if (pp)
475 475 page_unlock(pp);
476 476 addr -= PAGESIZE;
477 477 }
478 478 return (FC_NOMAP);
479 479 }
480 480 }
481 481 addr += PAGESIZE;
482 482 }
483 483 if (rw == S_OTHER)
484 484 hat_reserve(seg->s_as, addr, size);
485 485 return (0);
486 486 case F_SOFTUNLOCK:
487 487 while (npages--) {
488 488 pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
489 489 if (pp)
490 490 page_unlock(pp);
491 491 addr += PAGESIZE;
492 492 }
493 493 return (0);
494 494 default:
495 495 return (FC_NOSUPPORT);
496 496 }
497 497 /*NOTREACHED*/
498 498 }
499 499
500 500 static int
501 501 segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
502 502 {
503 503 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
504 504
505 505 if (seg->s_as != &kas || size > seg->s_size ||
506 506 addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
507 507 panic("segkmem_setprot: bad args");
508 508
509 509 /*
510 510 * If it is one of segkp pages, call segkp.
511 511 */
512 512 if (segkp_bitmap && seg == &kvseg &&
513 513 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
514 514 return (segop_setprot(segkp, addr, size, prot));
515 515
516 516 if (prot == 0)
517 517 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD);
518 518 else
519 519 hat_chgprot(kas.a_hat, addr, size, prot);
520 520 return (0);
521 521 }
522 522
523 523 /*
524 524 * This is a dummy segkmem function overloaded to call segkp
525 525 * when segkp is under the heap.
526 526 */
527 527 /* ARGSUSED */
528 528 static int
529 529 segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
530 530 {
531 531 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
532 532
533 533 if (seg->s_as != &kas)
534 534 panic("segkmem badop");
535 535
536 536 /*
537 537 * If it is one of segkp pages, call into segkp.
538 538 */
539 539 if (segkp_bitmap && seg == &kvseg &&
540 540 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
541 541 return (segop_checkprot(segkp, addr, size, prot));
542 542
543 543 panic("segkmem badop");
544 544 return (0);
545 545 }
546 546
547 547 /*
548 548 * This is a dummy segkmem function overloaded to call segkp
549 549 * when segkp is under the heap.
550 550 */
551 551 /* ARGSUSED */
552 552 static int
553 553 segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
554 554 {
555 555 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
556 556
557 557 if (seg->s_as != &kas)
558 558 panic("segkmem badop");
559 559
560 560 /*
561 561 * If it is one of segkp pages, call into segkp.
562 562 */
563 563 if (segkp_bitmap && seg == &kvseg &&
564 564 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
565 565 return (segop_kluster(segkp, addr, delta));
566 566
567 567 panic("segkmem badop");
568 568 return (0);
569 569 }
570 570
571 571 static void
572 572 segkmem_xdump_range(void *arg, void *start, size_t size)
573 573 {
574 574 struct as *as = arg;
575 575 caddr_t addr = start;
576 576 caddr_t addr_end = addr + size;
577 577
578 578 while (addr < addr_end) {
579 579 pfn_t pfn = hat_getpfnum(kas.a_hat, addr);
580 580 if (pfn != PFN_INVALID && pfn <= physmax && pf_is_memory(pfn))
581 581 dump_addpage(as, addr, pfn);
582 582 addr += PAGESIZE;
583 583 dump_timeleft = dump_timeout;
584 584 }
585 585 }
586 586
587 587 static void
588 588 segkmem_dump_range(void *arg, void *start, size_t size)
589 589 {
590 590 caddr_t addr = start;
591 591 caddr_t addr_end = addr + size;
592 592
593 593 /*
594 594 * If we are about to start dumping the range of addresses we
595 595 * carved out of the kernel heap for the large page heap walk
596 596 * heap_lp_arena to find what segments are actually populated
597 597 */
598 598 if (SEGKMEM_USE_LARGEPAGES &&
599 599 addr == heap_lp_base && addr_end == heap_lp_end &&
600 600 vmem_size(heap_lp_arena, VMEM_ALLOC) < size) {
601 601 vmem_walk(heap_lp_arena, VMEM_ALLOC | VMEM_REENTRANT,
602 602 segkmem_xdump_range, arg);
603 603 } else {
604 604 segkmem_xdump_range(arg, start, size);
605 605 }
606 606 }
607 607
608 608 static void
609 609 segkmem_dump(struct seg *seg)
610 610 {
611 611 /*
612 612 * The kernel's heap_arena (represented by kvseg) is a very large
613 613 * VA space, most of which is typically unused. To speed up dumping
614 614 * we use vmem_walk() to quickly find the pieces of heap_arena that
615 615 * are actually in use. We do the same for heap32_arena and
616 616 * heap_core.
617 617 *
618 618 * We specify VMEM_REENTRANT to vmem_walk() because dump_addpage()
619 619 * may ultimately need to allocate memory. Reentrant walks are
620 620 * necessarily imperfect snapshots. The kernel heap continues
621 621 * to change during a live crash dump, for example. For a normal
622 622 * crash dump, however, we know that there won't be any other threads
623 623 * messing with the heap. Therefore, at worst, we may fail to dump
624 624 * the pages that get allocated by the act of dumping; but we will
625 625 * always dump every page that was allocated when the walk began.
626 626 *
627 627 * The other segkmem segments are dense (fully populated), so there's
628 628 * no need to use this technique when dumping them.
629 629 *
630 630 * Note: when adding special dump handling for any new sparsely-
631 631 * populated segments, be sure to add similar handling to the ::kgrep
632 632 * code in mdb.
633 633 */
634 634 if (seg == &kvseg) {
635 635 vmem_walk(heap_arena, VMEM_ALLOC | VMEM_REENTRANT,
636 636 segkmem_dump_range, seg->s_as);
637 637 #ifndef __sparc
638 638 vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
639 639 segkmem_dump_range, seg->s_as);
640 640 #endif
641 641 } else if (seg == &kvseg_core) {
642 642 vmem_walk(heap_core_arena, VMEM_ALLOC | VMEM_REENTRANT,
643 643 segkmem_dump_range, seg->s_as);
644 644 } else if (seg == &kvseg32) {
645 645 vmem_walk(heap32_arena, VMEM_ALLOC | VMEM_REENTRANT,
646 646 segkmem_dump_range, seg->s_as);
647 647 vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
648 648 segkmem_dump_range, seg->s_as);
649 649 } else if (seg == &kzioseg) {
650 650 /*
651 651 * We don't want to dump pages attached to kzioseg since they
652 652 * contain file data from ZFS. If this page's segment is
653 653 * kzioseg return instead of writing it to the dump device.
654 654 */
655 655 return;
656 656 } else {
657 657 segkmem_dump_range(seg->s_as, seg->s_base, seg->s_size);
658 658 }
659 659 }
660 660
661 661 /*
662 662 * lock/unlock kmem pages over a given range [addr, addr+len).
663 663 * Returns a shadow list of pages in ppp. If there are holes
664 664 * in the range (e.g. some of the kernel mappings do not have
665 665 * underlying page_ts) returns ENOTSUP so that as_pagelock()
666 666 * will handle the range via as_fault(F_SOFTLOCK).
667 667 */
668 668 /*ARGSUSED*/
669 669 static int
670 670 segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len,
671 671 page_t ***ppp, enum lock_type type, enum seg_rw rw)
672 672 {
673 673 page_t **pplist, *pp;
674 674 pgcnt_t npages;
675 675 spgcnt_t pg;
676 676 size_t nb;
677 677 struct vnode *vp = seg->s_data;
678 678
679 679 ASSERT(ppp != NULL);
680 680
681 681 /*
682 682 * If it is one of segkp pages, call into segkp.
683 683 */
684 684 if (segkp_bitmap && seg == &kvseg &&
685 685 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
686 686 return (segop_pagelock(segkp, addr, len, ppp, type, rw));
687 687
688 688 npages = btopr(len);
689 689 nb = sizeof (page_t *) * npages;
690 690
691 691 if (type == L_PAGEUNLOCK) {
692 692 pplist = *ppp;
693 693 ASSERT(pplist != NULL);
694 694
695 695 for (pg = 0; pg < npages; pg++) {
696 696 pp = pplist[pg];
697 697 page_unlock(pp);
698 698 }
699 699 kmem_free(pplist, nb);
700 700 return (0);
701 701 }
702 702
703 703 ASSERT(type == L_PAGELOCK);
704 704
705 705 pplist = kmem_alloc(nb, KM_NOSLEEP);
706 706 if (pplist == NULL) {
707 707 *ppp = NULL;
708 708 return (ENOTSUP); /* take the slow path */
709 709 }
710 710
711 711 for (pg = 0; pg < npages; pg++) {
712 712 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_SHARED);
713 713 if (pp == NULL) {
714 714 while (--pg >= 0)
715 715 page_unlock(pplist[pg]);
716 716 kmem_free(pplist, nb);
717 717 *ppp = NULL;
718 718 return (ENOTSUP);
719 719 }
720 720 pplist[pg] = pp;
721 721 addr += PAGESIZE;
722 722 }
723 723
724 724 *ppp = pplist;
725 725 return (0);
726 726 }
727 727
728 728 /*
729 729 * This is a dummy segkmem function overloaded to call segkp
730 730 * when segkp is under the heap.
731 731 */
732 732 /* ARGSUSED */
733 733 static int
734 734 segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
735 735 {
736 736 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
737 737
738 738 if (seg->s_as != &kas)
739 739 panic("segkmem badop");
740 740
741 741 /*
742 742 * If it is one of segkp pages, call into segkp.
↓ open down ↓ |
742 lines elided |
↑ open up ↑ |
743 743 */
744 744 if (segkp_bitmap && seg == &kvseg &&
745 745 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
746 746 return (segop_getmemid(segkp, addr, memidp));
747 747
748 748 panic("segkmem badop");
749 749 return (0);
750 750 }
751 751
752 752 /*ARGSUSED*/
753 -static lgrp_mem_policy_info_t *
754 -segkmem_getpolicy(struct seg *seg, caddr_t addr)
755 -{
756 - return (NULL);
757 -}
758 -
759 -/*ARGSUSED*/
760 753 static int
761 754 segkmem_capable(struct seg *seg, segcapability_t capability)
762 755 {
763 756 if (capability == S_CAPABILITY_NOMINFLT)
764 757 return (1);
765 758 return (0);
766 759 }
767 760
768 761 static struct seg_ops segkmem_ops = {
769 762 .fault = segkmem_fault,
770 763 .setprot = segkmem_setprot,
771 764 .checkprot = segkmem_checkprot,
772 765 .kluster = segkmem_kluster,
773 766 .dump = segkmem_dump,
774 767 .pagelock = segkmem_pagelock,
775 768 .getmemid = segkmem_getmemid,
776 - .getpolicy = segkmem_getpolicy,
777 769 .capable = segkmem_capable,
778 770 };
779 771
780 772 int
781 773 segkmem_zio_create(struct seg *seg)
782 774 {
783 775 ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
784 776 seg->s_ops = &segkmem_ops;
785 777 seg->s_data = &zvp;
786 778 kas.a_size += seg->s_size;
787 779 return (0);
788 780 }
789 781
790 782 int
791 783 segkmem_create(struct seg *seg)
792 784 {
793 785 ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
794 786 seg->s_ops = &segkmem_ops;
795 787 seg->s_data = &kvp;
796 788 kas.a_size += seg->s_size;
797 789 return (0);
798 790 }
799 791
800 792 /*ARGSUSED*/
801 793 page_t *
802 794 segkmem_page_create(void *addr, size_t size, int vmflag, void *arg)
803 795 {
804 796 struct seg kseg;
805 797 int pgflags;
806 798 struct vnode *vp = arg;
807 799
808 800 if (vp == NULL)
809 801 vp = &kvp;
810 802
811 803 kseg.s_as = &kas;
812 804 pgflags = PG_EXCL;
813 805
814 806 if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
815 807 pgflags |= PG_NORELOC;
816 808 if ((vmflag & VM_NOSLEEP) == 0)
817 809 pgflags |= PG_WAIT;
818 810 if (vmflag & VM_PANIC)
819 811 pgflags |= PG_PANIC;
820 812 if (vmflag & VM_PUSHPAGE)
821 813 pgflags |= PG_PUSHPAGE;
822 814 if (vmflag & VM_NORMALPRI) {
823 815 ASSERT(vmflag & VM_NOSLEEP);
824 816 pgflags |= PG_NORMALPRI;
825 817 }
826 818
827 819 return (page_create_va(vp, (u_offset_t)(uintptr_t)addr, size,
828 820 pgflags, &kseg, addr));
829 821 }
830 822
831 823 /*
832 824 * Allocate pages to back the virtual address range [addr, addr + size).
833 825 * If addr is NULL, allocate the virtual address space as well.
834 826 */
835 827 void *
836 828 segkmem_xalloc(vmem_t *vmp, void *inaddr, size_t size, int vmflag, uint_t attr,
837 829 page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg)
838 830 {
839 831 page_t *ppl;
840 832 caddr_t addr = inaddr;
841 833 pgcnt_t npages = btopr(size);
842 834 int allocflag;
843 835
844 836 if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
845 837 return (NULL);
846 838
847 839 ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
848 840
849 841 if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
850 842 if (inaddr == NULL)
851 843 vmem_free(vmp, addr, size);
852 844 return (NULL);
853 845 }
854 846
855 847 ppl = page_create_func(addr, size, vmflag, pcarg);
856 848 if (ppl == NULL) {
857 849 if (inaddr == NULL)
858 850 vmem_free(vmp, addr, size);
859 851 page_unresv(npages);
860 852 return (NULL);
861 853 }
862 854
863 855 /*
864 856 * Under certain conditions, we need to let the HAT layer know
865 857 * that it cannot safely allocate memory. Allocations from
866 858 * the hat_memload vmem arena always need this, to prevent
867 859 * infinite recursion.
868 860 *
869 861 * In addition, the x86 hat cannot safely do memory
870 862 * allocations while in vmem_populate(), because there
871 863 * is no simple bound on its usage.
872 864 */
873 865 if (vmflag & VM_MEMLOAD)
874 866 allocflag = HAT_NO_KALLOC;
875 867 #if defined(__x86)
876 868 else if (vmem_is_populator())
877 869 allocflag = HAT_NO_KALLOC;
878 870 #endif
879 871 else
880 872 allocflag = 0;
881 873
882 874 while (ppl != NULL) {
883 875 page_t *pp = ppl;
884 876 page_sub(&ppl, pp);
885 877 ASSERT(page_iolock_assert(pp));
886 878 ASSERT(PAGE_EXCL(pp));
887 879 page_io_unlock(pp);
888 880 hat_memload(kas.a_hat, (caddr_t)(uintptr_t)pp->p_offset, pp,
889 881 (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
890 882 HAT_LOAD_LOCK | allocflag);
891 883 pp->p_lckcnt = 1;
892 884 #if defined(__x86)
893 885 page_downgrade(pp);
894 886 #else
895 887 if (vmflag & SEGKMEM_SHARELOCKED)
896 888 page_downgrade(pp);
897 889 else
898 890 page_unlock(pp);
899 891 #endif
900 892 }
901 893
902 894 return (addr);
903 895 }
904 896
905 897 static void *
906 898 segkmem_alloc_vn(vmem_t *vmp, size_t size, int vmflag, struct vnode *vp)
907 899 {
908 900 void *addr;
909 901 segkmem_gc_list_t *gcp, **prev_gcpp;
910 902
911 903 ASSERT(vp != NULL);
912 904
913 905 if (kvseg.s_base == NULL) {
914 906 #ifndef __sparc
915 907 if (bootops->bsys_alloc == NULL)
916 908 halt("Memory allocation between bop_alloc() and "
917 909 "kmem_alloc().\n");
918 910 #endif
919 911
920 912 /*
921 913 * There's not a lot of memory to go around during boot,
922 914 * so recycle it if we can.
923 915 */
924 916 for (prev_gcpp = &segkmem_gc_list; (gcp = *prev_gcpp) != NULL;
925 917 prev_gcpp = &gcp->gc_next) {
926 918 if (gcp->gc_arena == vmp && gcp->gc_size == size) {
927 919 *prev_gcpp = gcp->gc_next;
928 920 return (gcp);
929 921 }
930 922 }
931 923
932 924 addr = vmem_alloc(vmp, size, vmflag | VM_PANIC);
933 925 if (boot_alloc(addr, size, BO_NO_ALIGN) != addr)
934 926 panic("segkmem_alloc: boot_alloc failed");
935 927 return (addr);
936 928 }
937 929 return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
938 930 segkmem_page_create, vp));
939 931 }
940 932
941 933 void *
942 934 segkmem_alloc(vmem_t *vmp, size_t size, int vmflag)
943 935 {
944 936 return (segkmem_alloc_vn(vmp, size, vmflag, &kvp));
945 937 }
946 938
947 939 void *
948 940 segkmem_zio_alloc(vmem_t *vmp, size_t size, int vmflag)
949 941 {
950 942 return (segkmem_alloc_vn(vmp, size, vmflag, &zvp));
951 943 }
952 944
953 945 /*
954 946 * Any changes to this routine must also be carried over to
955 947 * devmap_free_pages() in the seg_dev driver. This is because
956 948 * we currently don't have a special kernel segment for non-paged
957 949 * kernel memory that is exported by drivers to user space.
958 950 */
959 951 static void
960 952 segkmem_free_vn(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp,
961 953 void (*func)(page_t *))
962 954 {
963 955 page_t *pp;
964 956 caddr_t addr = inaddr;
965 957 caddr_t eaddr;
966 958 pgcnt_t npages = btopr(size);
967 959
968 960 ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
969 961 ASSERT(vp != NULL);
970 962
971 963 if (kvseg.s_base == NULL) {
972 964 segkmem_gc_list_t *gc = inaddr;
973 965 gc->gc_arena = vmp;
974 966 gc->gc_size = size;
975 967 gc->gc_next = segkmem_gc_list;
976 968 segkmem_gc_list = gc;
977 969 return;
978 970 }
979 971
980 972 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
981 973
982 974 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
983 975 #if defined(__x86)
984 976 pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
985 977 if (pp == NULL)
986 978 panic("segkmem_free: page not found");
987 979 if (!page_tryupgrade(pp)) {
988 980 /*
989 981 * Some other thread has a sharelock. Wait for
990 982 * it to drop the lock so we can free this page.
991 983 */
992 984 page_unlock(pp);
993 985 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
994 986 SE_EXCL);
995 987 }
996 988 #else
997 989 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
998 990 #endif
999 991 if (pp == NULL)
1000 992 panic("segkmem_free: page not found");
1001 993 /* Clear p_lckcnt so page_destroy() doesn't update availrmem */
1002 994 pp->p_lckcnt = 0;
1003 995 if (func)
1004 996 func(pp);
1005 997 else
1006 998 page_destroy(pp, 0);
1007 999 }
1008 1000 if (func == NULL)
1009 1001 page_unresv(npages);
1010 1002
1011 1003 if (vmp != NULL)
1012 1004 vmem_free(vmp, inaddr, size);
1013 1005
1014 1006 }
1015 1007
1016 1008 void
1017 1009 segkmem_xfree(vmem_t *vmp, void *inaddr, size_t size, void (*func)(page_t *))
1018 1010 {
1019 1011 segkmem_free_vn(vmp, inaddr, size, &kvp, func);
1020 1012 }
1021 1013
1022 1014 void
1023 1015 segkmem_free(vmem_t *vmp, void *inaddr, size_t size)
1024 1016 {
1025 1017 segkmem_free_vn(vmp, inaddr, size, &kvp, NULL);
1026 1018 }
1027 1019
1028 1020 void
1029 1021 segkmem_zio_free(vmem_t *vmp, void *inaddr, size_t size)
1030 1022 {
1031 1023 segkmem_free_vn(vmp, inaddr, size, &zvp, NULL);
1032 1024 }
1033 1025
1034 1026 void
1035 1027 segkmem_gc(void)
1036 1028 {
1037 1029 ASSERT(kvseg.s_base != NULL);
1038 1030 while (segkmem_gc_list != NULL) {
1039 1031 segkmem_gc_list_t *gc = segkmem_gc_list;
1040 1032 segkmem_gc_list = gc->gc_next;
1041 1033 segkmem_free(gc->gc_arena, gc, gc->gc_size);
1042 1034 }
1043 1035 }
1044 1036
1045 1037 /*
1046 1038 * Legacy entry points from here to end of file.
1047 1039 */
1048 1040 void
1049 1041 segkmem_mapin(struct seg *seg, void *addr, size_t size, uint_t vprot,
1050 1042 pfn_t pfn, uint_t flags)
1051 1043 {
1052 1044 hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1053 1045 hat_devload(seg->s_as->a_hat, addr, size, pfn, vprot,
1054 1046 flags | HAT_LOAD_LOCK);
1055 1047 }
1056 1048
1057 1049 void
1058 1050 segkmem_mapout(struct seg *seg, void *addr, size_t size)
1059 1051 {
1060 1052 hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1061 1053 }
1062 1054
1063 1055 void *
1064 1056 kmem_getpages(pgcnt_t npages, int kmflag)
1065 1057 {
1066 1058 return (kmem_alloc(ptob(npages), kmflag));
1067 1059 }
1068 1060
1069 1061 void
1070 1062 kmem_freepages(void *addr, pgcnt_t npages)
1071 1063 {
1072 1064 kmem_free(addr, ptob(npages));
1073 1065 }
1074 1066
1075 1067 /*
1076 1068 * segkmem_page_create_large() allocates a large page to be used for the kmem
1077 1069 * caches. If kpr is enabled we ask for a relocatable page unless requested
1078 1070 * otherwise. If kpr is disabled we have to ask for a non-reloc page
1079 1071 */
1080 1072 static page_t *
1081 1073 segkmem_page_create_large(void *addr, size_t size, int vmflag, void *arg)
1082 1074 {
1083 1075 int pgflags;
1084 1076
1085 1077 pgflags = PG_EXCL;
1086 1078
1087 1079 if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
1088 1080 pgflags |= PG_NORELOC;
1089 1081 if (!(vmflag & VM_NOSLEEP))
1090 1082 pgflags |= PG_WAIT;
1091 1083 if (vmflag & VM_PUSHPAGE)
1092 1084 pgflags |= PG_PUSHPAGE;
1093 1085 if (vmflag & VM_NORMALPRI)
1094 1086 pgflags |= PG_NORMALPRI;
1095 1087
1096 1088 return (page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size,
1097 1089 pgflags, &kvseg, addr, arg));
1098 1090 }
1099 1091
1100 1092 /*
1101 1093 * Allocate a large page to back the virtual address range
1102 1094 * [addr, addr + size). If addr is NULL, allocate the virtual address
1103 1095 * space as well.
1104 1096 */
1105 1097 static void *
1106 1098 segkmem_xalloc_lp(vmem_t *vmp, void *inaddr, size_t size, int vmflag,
1107 1099 uint_t attr, page_t *(*page_create_func)(void *, size_t, int, void *),
1108 1100 void *pcarg)
1109 1101 {
1110 1102 caddr_t addr = inaddr, pa;
1111 1103 size_t lpsize = segkmem_lpsize;
1112 1104 pgcnt_t npages = btopr(size);
1113 1105 pgcnt_t nbpages = btop(lpsize);
1114 1106 pgcnt_t nlpages = size >> segkmem_lpshift;
1115 1107 size_t ppasize = nbpages * sizeof (page_t *);
1116 1108 page_t *pp, *rootpp, **ppa, *pplist = NULL;
1117 1109 int i;
1118 1110
1119 1111 vmflag |= VM_NOSLEEP;
1120 1112
1121 1113 if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
1122 1114 return (NULL);
1123 1115 }
1124 1116
1125 1117 /*
1126 1118 * allocate an array we need for hat_memload_array.
1127 1119 * we use a separate arena to avoid recursion.
1128 1120 * we will not need this array when hat_memload_array learns pp++
1129 1121 */
1130 1122 if ((ppa = vmem_alloc(segkmem_ppa_arena, ppasize, vmflag)) == NULL) {
1131 1123 goto fail_array_alloc;
1132 1124 }
1133 1125
1134 1126 if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
1135 1127 goto fail_vmem_alloc;
1136 1128
1137 1129 ASSERT(((uintptr_t)addr & (lpsize - 1)) == 0);
1138 1130
1139 1131 /* create all the pages */
1140 1132 for (pa = addr, i = 0; i < nlpages; i++, pa += lpsize) {
1141 1133 if ((pp = page_create_func(pa, lpsize, vmflag, pcarg)) == NULL)
1142 1134 goto fail_page_create;
1143 1135 page_list_concat(&pplist, &pp);
1144 1136 }
1145 1137
1146 1138 /* at this point we have all the resource to complete the request */
1147 1139 while ((rootpp = pplist) != NULL) {
1148 1140 for (i = 0; i < nbpages; i++) {
1149 1141 ASSERT(pplist != NULL);
1150 1142 pp = pplist;
1151 1143 page_sub(&pplist, pp);
1152 1144 ASSERT(page_iolock_assert(pp));
1153 1145 page_io_unlock(pp);
1154 1146 ppa[i] = pp;
1155 1147 }
1156 1148 /*
1157 1149 * Load the locked entry. It's OK to preload the entry into the
1158 1150 * TSB since we now support large mappings in the kernel TSB.
1159 1151 */
1160 1152 hat_memload_array(kas.a_hat,
1161 1153 (caddr_t)(uintptr_t)rootpp->p_offset, lpsize,
1162 1154 ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
1163 1155 HAT_LOAD_LOCK);
1164 1156
1165 1157 for (--i; i >= 0; --i) {
1166 1158 ppa[i]->p_lckcnt = 1;
1167 1159 page_unlock(ppa[i]);
1168 1160 }
1169 1161 }
1170 1162
1171 1163 vmem_free(segkmem_ppa_arena, ppa, ppasize);
1172 1164 return (addr);
1173 1165
1174 1166 fail_page_create:
1175 1167 while ((rootpp = pplist) != NULL) {
1176 1168 for (i = 0, pp = pplist; i < nbpages; i++, pp = pplist) {
1177 1169 ASSERT(pp != NULL);
1178 1170 page_sub(&pplist, pp);
1179 1171 ASSERT(page_iolock_assert(pp));
1180 1172 page_io_unlock(pp);
1181 1173 }
1182 1174 page_destroy_pages(rootpp);
1183 1175 }
1184 1176
1185 1177 if (inaddr == NULL)
1186 1178 vmem_free(vmp, addr, size);
1187 1179
1188 1180 fail_vmem_alloc:
1189 1181 vmem_free(segkmem_ppa_arena, ppa, ppasize);
1190 1182
1191 1183 fail_array_alloc:
1192 1184 page_unresv(npages);
1193 1185
1194 1186 return (NULL);
1195 1187 }
1196 1188
1197 1189 static void
1198 1190 segkmem_free_one_lp(caddr_t addr, size_t size)
1199 1191 {
1200 1192 page_t *pp, *rootpp = NULL;
1201 1193 pgcnt_t pgs_left = btopr(size);
1202 1194
1203 1195 ASSERT(size == segkmem_lpsize);
1204 1196
1205 1197 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1206 1198
1207 1199 for (; pgs_left > 0; addr += PAGESIZE, pgs_left--) {
1208 1200 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
1209 1201 if (pp == NULL)
1210 1202 panic("segkmem_free_one_lp: page not found");
1211 1203 ASSERT(PAGE_EXCL(pp));
1212 1204 pp->p_lckcnt = 0;
1213 1205 if (rootpp == NULL)
1214 1206 rootpp = pp;
1215 1207 }
1216 1208 ASSERT(rootpp != NULL);
1217 1209 page_destroy_pages(rootpp);
1218 1210
1219 1211 /* page_unresv() is done by the caller */
1220 1212 }
1221 1213
1222 1214 /*
1223 1215 * This function is called to import new spans into the vmem arenas like
1224 1216 * kmem_default_arena and kmem_oversize_arena. It first tries to import
1225 1217 * spans from large page arena - kmem_lp_arena. In order to do this it might
1226 1218 * have to "upgrade the requested size" to kmem_lp_arena quantum. If
1227 1219 * it was not able to satisfy the upgraded request it then calls regular
1228 1220 * segkmem_alloc() that satisfies the request by importing from "*vmp" arena
1229 1221 */
1230 1222 /*ARGSUSED*/
1231 1223 void *
1232 1224 segkmem_alloc_lp(vmem_t *vmp, size_t *sizep, size_t align, int vmflag)
1233 1225 {
1234 1226 size_t size;
1235 1227 kthread_t *t = curthread;
1236 1228 segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1237 1229
1238 1230 ASSERT(sizep != NULL);
1239 1231
1240 1232 size = *sizep;
1241 1233
1242 1234 if (lpcb->lp_uselp && !(t->t_flag & T_PANIC) &&
1243 1235 !(vmflag & SEGKMEM_SHARELOCKED)) {
1244 1236
1245 1237 size_t kmemlp_qnt = segkmem_kmemlp_quantum;
1246 1238 size_t asize = P2ROUNDUP(size, kmemlp_qnt);
1247 1239 void *addr = NULL;
1248 1240 ulong_t *lpthrtp = &lpcb->lp_throttle;
1249 1241 ulong_t lpthrt = *lpthrtp;
1250 1242 int dowakeup = 0;
1251 1243 int doalloc = 1;
1252 1244
1253 1245 ASSERT(kmem_lp_arena != NULL);
1254 1246 ASSERT(asize >= size);
1255 1247
1256 1248 if (lpthrt != 0) {
1257 1249 /* try to update the throttle value */
1258 1250 lpthrt = atomic_inc_ulong_nv(lpthrtp);
1259 1251 if (lpthrt >= segkmem_lpthrottle_max) {
1260 1252 lpthrt = atomic_cas_ulong(lpthrtp, lpthrt,
1261 1253 segkmem_lpthrottle_max / 4);
1262 1254 }
1263 1255
1264 1256 /*
1265 1257 * when we get above throttle start do an exponential
1266 1258 * backoff at trying large pages and reaping
1267 1259 */
1268 1260 if (lpthrt > segkmem_lpthrottle_start &&
1269 1261 !ISP2(lpthrt)) {
1270 1262 lpcb->allocs_throttled++;
1271 1263 lpthrt--;
1272 1264 if (ISP2(lpthrt))
1273 1265 kmem_reap();
1274 1266 return (segkmem_alloc(vmp, size, vmflag));
1275 1267 }
1276 1268 }
1277 1269
1278 1270 if (!(vmflag & VM_NOSLEEP) &&
1279 1271 segkmem_heaplp_quantum >= (8 * kmemlp_qnt) &&
1280 1272 vmem_size(kmem_lp_arena, VMEM_FREE) <= kmemlp_qnt &&
1281 1273 asize < (segkmem_heaplp_quantum - kmemlp_qnt)) {
1282 1274
1283 1275 /*
1284 1276 * we are low on free memory in kmem_lp_arena
1285 1277 * we let only one guy to allocate heap_lp
1286 1278 * quantum size chunk that everybody is going to
1287 1279 * share
1288 1280 */
1289 1281 mutex_enter(&lpcb->lp_lock);
1290 1282
1291 1283 if (lpcb->lp_wait) {
1292 1284
1293 1285 /* we are not the first one - wait */
1294 1286 cv_wait(&lpcb->lp_cv, &lpcb->lp_lock);
1295 1287 if (vmem_size(kmem_lp_arena, VMEM_FREE) <
1296 1288 kmemlp_qnt) {
1297 1289 doalloc = 0;
1298 1290 }
1299 1291 } else if (vmem_size(kmem_lp_arena, VMEM_FREE) <=
1300 1292 kmemlp_qnt) {
1301 1293
1302 1294 /*
1303 1295 * we are the first one, make sure we import
1304 1296 * a large page
1305 1297 */
1306 1298 if (asize == kmemlp_qnt)
1307 1299 asize += kmemlp_qnt;
1308 1300 dowakeup = 1;
1309 1301 lpcb->lp_wait = 1;
1310 1302 }
1311 1303
1312 1304 mutex_exit(&lpcb->lp_lock);
1313 1305 }
1314 1306
1315 1307 /*
1316 1308 * VM_ABORT flag prevents sleeps in vmem_xalloc when
1317 1309 * large pages are not available. In that case this allocation
1318 1310 * attempt will fail and we will retry allocation with small
1319 1311 * pages. We also do not want to panic if this allocation fails
1320 1312 * because we are going to retry.
1321 1313 */
1322 1314 if (doalloc) {
1323 1315 addr = vmem_alloc(kmem_lp_arena, asize,
1324 1316 (vmflag | VM_ABORT) & ~VM_PANIC);
1325 1317
1326 1318 if (dowakeup) {
1327 1319 mutex_enter(&lpcb->lp_lock);
1328 1320 ASSERT(lpcb->lp_wait != 0);
1329 1321 lpcb->lp_wait = 0;
1330 1322 cv_broadcast(&lpcb->lp_cv);
1331 1323 mutex_exit(&lpcb->lp_lock);
1332 1324 }
1333 1325 }
1334 1326
1335 1327 if (addr != NULL) {
1336 1328 *sizep = asize;
1337 1329 *lpthrtp = 0;
1338 1330 return (addr);
1339 1331 }
1340 1332
1341 1333 if (vmflag & VM_NOSLEEP)
1342 1334 lpcb->nosleep_allocs_failed++;
1343 1335 else
1344 1336 lpcb->sleep_allocs_failed++;
1345 1337 lpcb->alloc_bytes_failed += size;
1346 1338
1347 1339 /* if large page throttling is not started yet do it */
1348 1340 if (segkmem_use_lpthrottle && lpthrt == 0) {
1349 1341 lpthrt = atomic_cas_ulong(lpthrtp, lpthrt, 1);
1350 1342 }
1351 1343 }
1352 1344 return (segkmem_alloc(vmp, size, vmflag));
1353 1345 }
1354 1346
1355 1347 void
1356 1348 segkmem_free_lp(vmem_t *vmp, void *inaddr, size_t size)
1357 1349 {
1358 1350 if (kmem_lp_arena == NULL || !IS_KMEM_VA_LARGEPAGE((caddr_t)inaddr)) {
1359 1351 segkmem_free(vmp, inaddr, size);
1360 1352 } else {
1361 1353 vmem_free(kmem_lp_arena, inaddr, size);
1362 1354 }
1363 1355 }
1364 1356
1365 1357 /*
1366 1358 * segkmem_alloc_lpi() imports virtual memory from large page heap arena
1367 1359 * into kmem_lp arena. In the process it maps the imported segment with
1368 1360 * large pages
1369 1361 */
1370 1362 static void *
1371 1363 segkmem_alloc_lpi(vmem_t *vmp, size_t size, int vmflag)
1372 1364 {
1373 1365 segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1374 1366 void *addr;
1375 1367
1376 1368 ASSERT(size != 0);
1377 1369 ASSERT(vmp == heap_lp_arena);
1378 1370
1379 1371 /* do not allow large page heap grow beyound limits */
1380 1372 if (vmem_size(vmp, VMEM_ALLOC) >= segkmem_kmemlp_max) {
1381 1373 lpcb->allocs_limited++;
1382 1374 return (NULL);
1383 1375 }
1384 1376
1385 1377 addr = segkmem_xalloc_lp(vmp, NULL, size, vmflag, 0,
1386 1378 segkmem_page_create_large, NULL);
1387 1379 return (addr);
1388 1380 }
1389 1381
1390 1382 /*
1391 1383 * segkmem_free_lpi() returns virtual memory back into large page heap arena
1392 1384 * from kmem_lp arena. Beore doing this it unmaps the segment and frees
1393 1385 * large pages used to map it.
1394 1386 */
1395 1387 static void
1396 1388 segkmem_free_lpi(vmem_t *vmp, void *inaddr, size_t size)
1397 1389 {
1398 1390 pgcnt_t nlpages = size >> segkmem_lpshift;
1399 1391 size_t lpsize = segkmem_lpsize;
1400 1392 caddr_t addr = inaddr;
1401 1393 pgcnt_t npages = btopr(size);
1402 1394 int i;
1403 1395
1404 1396 ASSERT(vmp == heap_lp_arena);
1405 1397 ASSERT(IS_KMEM_VA_LARGEPAGE(addr));
1406 1398 ASSERT(((uintptr_t)inaddr & (lpsize - 1)) == 0);
1407 1399
1408 1400 for (i = 0; i < nlpages; i++) {
1409 1401 segkmem_free_one_lp(addr, lpsize);
1410 1402 addr += lpsize;
1411 1403 }
1412 1404
1413 1405 page_unresv(npages);
1414 1406
1415 1407 vmem_free(vmp, inaddr, size);
1416 1408 }
1417 1409
1418 1410 /*
1419 1411 * This function is called at system boot time by kmem_init right after
1420 1412 * /etc/system file has been read. It checks based on hardware configuration
1421 1413 * and /etc/system settings if system is going to use large pages. The
1422 1414 * initialiazation necessary to actually start using large pages
1423 1415 * happens later in the process after segkmem_heap_lp_init() is called.
1424 1416 */
1425 1417 int
1426 1418 segkmem_lpsetup()
1427 1419 {
1428 1420 int use_large_pages = 0;
1429 1421
1430 1422 #ifdef __sparc
1431 1423
1432 1424 size_t memtotal = physmem * PAGESIZE;
1433 1425
1434 1426 if (heap_lp_base == NULL) {
1435 1427 segkmem_lpsize = PAGESIZE;
1436 1428 return (0);
1437 1429 }
1438 1430
1439 1431 /* get a platform dependent value of large page size for kernel heap */
1440 1432 segkmem_lpsize = get_segkmem_lpsize(segkmem_lpsize);
1441 1433
1442 1434 if (segkmem_lpsize <= PAGESIZE) {
1443 1435 /*
1444 1436 * put virtual space reserved for the large page kernel
1445 1437 * back to the regular heap
1446 1438 */
1447 1439 vmem_xfree(heap_arena, heap_lp_base,
1448 1440 heap_lp_end - heap_lp_base);
1449 1441 heap_lp_base = NULL;
1450 1442 heap_lp_end = NULL;
1451 1443 segkmem_lpsize = PAGESIZE;
1452 1444 return (0);
1453 1445 }
1454 1446
1455 1447 /* set heap_lp quantum if necessary */
1456 1448 if (segkmem_heaplp_quantum == 0 || !ISP2(segkmem_heaplp_quantum) ||
1457 1449 P2PHASE(segkmem_heaplp_quantum, segkmem_lpsize)) {
1458 1450 segkmem_heaplp_quantum = segkmem_lpsize;
1459 1451 }
1460 1452
1461 1453 /* set kmem_lp quantum if necessary */
1462 1454 if (segkmem_kmemlp_quantum == 0 || !ISP2(segkmem_kmemlp_quantum) ||
1463 1455 segkmem_kmemlp_quantum > segkmem_heaplp_quantum) {
1464 1456 segkmem_kmemlp_quantum = segkmem_heaplp_quantum;
1465 1457 }
1466 1458
1467 1459 /* set total amount of memory allowed for large page kernel heap */
1468 1460 if (segkmem_kmemlp_max == 0) {
1469 1461 if (segkmem_kmemlp_pcnt == 0 || segkmem_kmemlp_pcnt > 100)
1470 1462 segkmem_kmemlp_pcnt = 12;
1471 1463 segkmem_kmemlp_max = (memtotal * segkmem_kmemlp_pcnt) / 100;
1472 1464 }
1473 1465 segkmem_kmemlp_max = P2ROUNDUP(segkmem_kmemlp_max,
1474 1466 segkmem_heaplp_quantum);
1475 1467
1476 1468 /* fix lp kmem preallocation request if necesssary */
1477 1469 if (segkmem_kmemlp_min) {
1478 1470 segkmem_kmemlp_min = P2ROUNDUP(segkmem_kmemlp_min,
1479 1471 segkmem_heaplp_quantum);
1480 1472 if (segkmem_kmemlp_min > segkmem_kmemlp_max)
1481 1473 segkmem_kmemlp_min = segkmem_kmemlp_max;
1482 1474 }
1483 1475
1484 1476 use_large_pages = 1;
1485 1477 segkmem_lpszc = page_szc(segkmem_lpsize);
1486 1478 segkmem_lpshift = page_get_shift(segkmem_lpszc);
1487 1479
1488 1480 #endif
1489 1481 return (use_large_pages);
1490 1482 }
1491 1483
1492 1484 void
1493 1485 segkmem_zio_init(void *zio_mem_base, size_t zio_mem_size)
1494 1486 {
1495 1487 ASSERT(zio_mem_base != NULL);
1496 1488 ASSERT(zio_mem_size != 0);
1497 1489
1498 1490 /*
1499 1491 * To reduce VA space fragmentation, we set up quantum caches for the
1500 1492 * smaller sizes; we chose 32k because that translates to 128k VA
1501 1493 * slabs, which matches nicely with the common 128k zio_data bufs.
1502 1494 */
1503 1495 zio_arena = vmem_create("zfs_file_data", zio_mem_base, zio_mem_size,
1504 1496 PAGESIZE, NULL, NULL, NULL, 32 * 1024, VM_SLEEP);
1505 1497
1506 1498 zio_alloc_arena = vmem_create("zfs_file_data_buf", NULL, 0, PAGESIZE,
1507 1499 segkmem_zio_alloc, segkmem_zio_free, zio_arena, 0, VM_SLEEP);
1508 1500
1509 1501 ASSERT(zio_arena != NULL);
1510 1502 ASSERT(zio_alloc_arena != NULL);
1511 1503 }
1512 1504
1513 1505 #ifdef __sparc
1514 1506
1515 1507
1516 1508 static void *
1517 1509 segkmem_alloc_ppa(vmem_t *vmp, size_t size, int vmflag)
1518 1510 {
1519 1511 size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
1520 1512 void *addr;
1521 1513
1522 1514 if (ppaquantum <= PAGESIZE)
1523 1515 return (segkmem_alloc(vmp, size, vmflag));
1524 1516
1525 1517 ASSERT((size & (ppaquantum - 1)) == 0);
1526 1518
1527 1519 addr = vmem_xalloc(vmp, size, ppaquantum, 0, 0, NULL, NULL, vmflag);
1528 1520 if (addr != NULL && segkmem_xalloc(vmp, addr, size, vmflag, 0,
1529 1521 segkmem_page_create, NULL) == NULL) {
1530 1522 vmem_xfree(vmp, addr, size);
1531 1523 addr = NULL;
1532 1524 }
1533 1525
1534 1526 return (addr);
1535 1527 }
1536 1528
1537 1529 static void
1538 1530 segkmem_free_ppa(vmem_t *vmp, void *addr, size_t size)
1539 1531 {
1540 1532 size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
1541 1533
1542 1534 ASSERT(addr != NULL);
1543 1535
1544 1536 if (ppaquantum <= PAGESIZE) {
1545 1537 segkmem_free(vmp, addr, size);
1546 1538 } else {
1547 1539 segkmem_free(NULL, addr, size);
1548 1540 vmem_xfree(vmp, addr, size);
1549 1541 }
1550 1542 }
1551 1543
1552 1544 void
1553 1545 segkmem_heap_lp_init()
1554 1546 {
1555 1547 segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1556 1548 size_t heap_lp_size = heap_lp_end - heap_lp_base;
1557 1549 size_t lpsize = segkmem_lpsize;
1558 1550 size_t ppaquantum;
1559 1551 void *addr;
1560 1552
1561 1553 if (segkmem_lpsize <= PAGESIZE) {
1562 1554 ASSERT(heap_lp_base == NULL);
1563 1555 ASSERT(heap_lp_end == NULL);
1564 1556 return;
1565 1557 }
1566 1558
1567 1559 ASSERT(segkmem_heaplp_quantum >= lpsize);
1568 1560 ASSERT((segkmem_heaplp_quantum & (lpsize - 1)) == 0);
1569 1561 ASSERT(lpcb->lp_uselp == 0);
1570 1562 ASSERT(heap_lp_base != NULL);
1571 1563 ASSERT(heap_lp_end != NULL);
1572 1564 ASSERT(heap_lp_base < heap_lp_end);
1573 1565 ASSERT(heap_lp_arena == NULL);
1574 1566 ASSERT(((uintptr_t)heap_lp_base & (lpsize - 1)) == 0);
1575 1567 ASSERT(((uintptr_t)heap_lp_end & (lpsize - 1)) == 0);
1576 1568
1577 1569 /* create large page heap arena */
1578 1570 heap_lp_arena = vmem_create("heap_lp", heap_lp_base, heap_lp_size,
1579 1571 segkmem_heaplp_quantum, NULL, NULL, NULL, 0, VM_SLEEP);
1580 1572
1581 1573 ASSERT(heap_lp_arena != NULL);
1582 1574
1583 1575 /* This arena caches memory already mapped by large pages */
1584 1576 kmem_lp_arena = vmem_create("kmem_lp", NULL, 0, segkmem_kmemlp_quantum,
1585 1577 segkmem_alloc_lpi, segkmem_free_lpi, heap_lp_arena, 0, VM_SLEEP);
1586 1578
1587 1579 ASSERT(kmem_lp_arena != NULL);
1588 1580
1589 1581 mutex_init(&lpcb->lp_lock, NULL, MUTEX_DEFAULT, NULL);
1590 1582 cv_init(&lpcb->lp_cv, NULL, CV_DEFAULT, NULL);
1591 1583
1592 1584 /*
1593 1585 * this arena is used for the array of page_t pointers necessary
1594 1586 * to call hat_mem_load_array
1595 1587 */
1596 1588 ppaquantum = btopr(lpsize) * sizeof (page_t *);
1597 1589 segkmem_ppa_arena = vmem_create("segkmem_ppa", NULL, 0, ppaquantum,
1598 1590 segkmem_alloc_ppa, segkmem_free_ppa, heap_arena, ppaquantum,
1599 1591 VM_SLEEP);
1600 1592
1601 1593 ASSERT(segkmem_ppa_arena != NULL);
1602 1594
1603 1595 /* prealloacate some memory for the lp kernel heap */
1604 1596 if (segkmem_kmemlp_min) {
1605 1597
1606 1598 ASSERT(P2PHASE(segkmem_kmemlp_min,
1607 1599 segkmem_heaplp_quantum) == 0);
1608 1600
1609 1601 if ((addr = segkmem_alloc_lpi(heap_lp_arena,
1610 1602 segkmem_kmemlp_min, VM_SLEEP)) != NULL) {
1611 1603
1612 1604 addr = vmem_add(kmem_lp_arena, addr,
1613 1605 segkmem_kmemlp_min, VM_SLEEP);
1614 1606 ASSERT(addr != NULL);
1615 1607 }
1616 1608 }
1617 1609
1618 1610 lpcb->lp_uselp = 1;
1619 1611 }
1620 1612
1621 1613 #endif
↓ open down ↓ |
835 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX