Print this page
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_kmem.c
+++ new/usr/src/uts/common/vm/seg_kmem.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/types.h>
26 26 #include <sys/t_lock.h>
27 27 #include <sys/param.h>
28 28 #include <sys/sysmacros.h>
29 29 #include <sys/tuneable.h>
30 30 #include <sys/systm.h>
31 31 #include <sys/vm.h>
32 32 #include <sys/kmem.h>
33 33 #include <sys/vmem.h>
34 34 #include <sys/mman.h>
35 35 #include <sys/cmn_err.h>
36 36 #include <sys/debug.h>
37 37 #include <sys/dumphdr.h>
38 38 #include <sys/bootconf.h>
39 39 #include <sys/lgrp.h>
40 40 #include <vm/seg_kmem.h>
41 41 #include <vm/hat.h>
42 42 #include <vm/page.h>
43 43 #include <vm/vm_dep.h>
44 44 #include <vm/faultcode.h>
45 45 #include <sys/promif.h>
46 46 #include <vm/seg_kp.h>
47 47 #include <sys/bitmap.h>
48 48 #include <sys/mem_cage.h>
49 49
50 50 #ifdef __sparc
51 51 #include <sys/ivintr.h>
52 52 #include <sys/panic.h>
53 53 #endif
54 54
55 55 /*
56 56 * seg_kmem is the primary kernel memory segment driver. It
57 57 * maps the kernel heap [kernelheap, ekernelheap), module text,
58 58 * and all memory which was allocated before the VM was initialized
59 59 * into kas.
60 60 *
61 61 * Pages which belong to seg_kmem are hashed into &kvp vnode at
62 62 * an offset equal to (u_offset_t)virt_addr, and have p_lckcnt >= 1.
63 63 * They must never be paged out since segkmem_fault() is a no-op to
64 64 * prevent recursive faults.
65 65 *
66 66 * Currently, seg_kmem pages are sharelocked (p_sharelock == 1) on
67 67 * __x86 and are unlocked (p_sharelock == 0) on __sparc. Once __x86
68 68 * supports relocation the #ifdef kludges can be removed.
69 69 *
70 70 * seg_kmem pages may be subject to relocation by page_relocate(),
71 71 * provided that the HAT supports it; if this is so, segkmem_reloc
72 72 * will be set to a nonzero value. All boot time allocated memory as
73 73 * well as static memory is considered off limits to relocation.
74 74 * Pages are "relocatable" if p_state does not have P_NORELOC set, so
75 75 * we request P_NORELOC pages for memory that isn't safe to relocate.
76 76 *
77 77 * The kernel heap is logically divided up into four pieces:
78 78 *
79 79 * heap32_arena is for allocations that require 32-bit absolute
80 80 * virtual addresses (e.g. code that uses 32-bit pointers/offsets).
81 81 *
82 82 * heap_core is for allocations that require 2GB *relative*
83 83 * offsets; in other words all memory from heap_core is within
84 84 * 2GB of all other memory from the same arena. This is a requirement
85 85 * of the addressing modes of some processors in supervisor code.
86 86 *
87 87 * heap_arena is the general heap arena.
88 88 *
89 89 * static_arena is the static memory arena. Allocations from it
90 90 * are not subject to relocation so it is safe to use the memory
91 91 * physical address as well as the virtual address (e.g. the VA to
92 92 * PA translations are static). Caches may import from static_arena;
93 93 * all other static memory allocations should use static_alloc_arena.
94 94 *
95 95 * On some platforms which have limited virtual address space, seg_kmem
96 96 * may share [kernelheap, ekernelheap) with seg_kp; if this is so,
97 97 * segkp_bitmap is non-NULL, and each bit represents a page of virtual
98 98 * address space which is actually seg_kp mapped.
99 99 */
100 100
101 101 extern ulong_t *segkp_bitmap; /* Is set if segkp is from the kernel heap */
102 102
103 103 char *kernelheap; /* start of primary kernel heap */
104 104 char *ekernelheap; /* end of primary kernel heap */
105 105 struct seg kvseg; /* primary kernel heap segment */
106 106 struct seg kvseg_core; /* "core" kernel heap segment */
107 107 struct seg kzioseg; /* Segment for zio mappings */
108 108 vmem_t *heap_arena; /* primary kernel heap arena */
109 109 vmem_t *heap_core_arena; /* core kernel heap arena */
110 110 char *heap_core_base; /* start of core kernel heap arena */
111 111 char *heap_lp_base; /* start of kernel large page heap arena */
112 112 char *heap_lp_end; /* end of kernel large page heap arena */
113 113 vmem_t *hat_memload_arena; /* HAT translation data */
114 114 struct seg kvseg32; /* 32-bit kernel heap segment */
115 115 vmem_t *heap32_arena; /* 32-bit kernel heap arena */
116 116 vmem_t *heaptext_arena; /* heaptext arena */
117 117 struct as kas; /* kernel address space */
118 118 int segkmem_reloc; /* enable/disable relocatable segkmem pages */
119 119 vmem_t *static_arena; /* arena for caches to import static memory */
120 120 vmem_t *static_alloc_arena; /* arena for allocating static memory */
121 121 vmem_t *zio_arena = NULL; /* arena for allocating zio memory */
122 122 vmem_t *zio_alloc_arena = NULL; /* arena for allocating zio memory */
123 123
124 124 /*
125 125 * seg_kmem driver can map part of the kernel heap with large pages.
126 126 * Currently this functionality is implemented for sparc platforms only.
127 127 *
128 128 * The large page size "segkmem_lpsize" for kernel heap is selected in the
129 129 * platform specific code. It can also be modified via /etc/system file.
130 130 * Setting segkmem_lpsize to PAGESIZE in /etc/system disables usage of large
131 131 * pages for kernel heap. "segkmem_lpshift" is adjusted appropriately to
132 132 * match segkmem_lpsize.
133 133 *
134 134 * At boot time we carve from kernel heap arena a range of virtual addresses
135 135 * that will be used for large page mappings. This range [heap_lp_base,
136 136 * heap_lp_end) is set up as a separate vmem arena - "heap_lp_arena". We also
137 137 * create "kmem_lp_arena" that caches memory already backed up by large
138 138 * pages. kmem_lp_arena imports virtual segments from heap_lp_arena.
139 139 */
140 140
141 141 size_t segkmem_lpsize;
142 142 static uint_t segkmem_lpshift = PAGESHIFT;
143 143 int segkmem_lpszc = 0;
144 144
145 145 size_t segkmem_kmemlp_quantum = 0x400000; /* 4MB */
146 146 size_t segkmem_heaplp_quantum;
147 147 vmem_t *heap_lp_arena;
148 148 static vmem_t *kmem_lp_arena;
149 149 static vmem_t *segkmem_ppa_arena;
150 150 static segkmem_lpcb_t segkmem_lpcb;
151 151
152 152 /*
153 153 * We use "segkmem_kmemlp_max" to limit the total amount of physical memory
154 154 * consumed by the large page heap. By default this parameter is set to 1/8 of
155 155 * physmem but can be adjusted through /etc/system either directly or
156 156 * indirectly by setting "segkmem_kmemlp_pcnt" to the percent of physmem
157 157 * we allow for large page heap.
158 158 */
159 159 size_t segkmem_kmemlp_max;
160 160 static uint_t segkmem_kmemlp_pcnt;
161 161
162 162 /*
163 163 * Getting large pages for kernel heap could be problematic due to
164 164 * physical memory fragmentation. That's why we allow to preallocate
165 165 * "segkmem_kmemlp_min" bytes at boot time.
166 166 */
167 167 static size_t segkmem_kmemlp_min;
168 168
169 169 /*
170 170 * Throttling is used to avoid expensive tries to allocate large pages
171 171 * for kernel heap when a lot of succesive attempts to do so fail.
172 172 */
173 173 static ulong_t segkmem_lpthrottle_max = 0x400000;
174 174 static ulong_t segkmem_lpthrottle_start = 0x40;
175 175 static ulong_t segkmem_use_lpthrottle = 1;
176 176
177 177 /*
178 178 * Freed pages accumulate on a garbage list until segkmem is ready,
179 179 * at which point we call segkmem_gc() to free it all.
180 180 */
181 181 typedef struct segkmem_gc_list {
182 182 struct segkmem_gc_list *gc_next;
183 183 vmem_t *gc_arena;
184 184 size_t gc_size;
185 185 } segkmem_gc_list_t;
186 186
187 187 static segkmem_gc_list_t *segkmem_gc_list;
188 188
189 189 /*
190 190 * Allocations from the hat_memload arena add VM_MEMLOAD to their
191 191 * vmflags so that segkmem_xalloc() can inform the hat layer that it needs
192 192 * to take steps to prevent infinite recursion. HAT allocations also
193 193 * must be non-relocatable to prevent recursive page faults.
194 194 */
195 195 static void *
196 196 hat_memload_alloc(vmem_t *vmp, size_t size, int flags)
197 197 {
198 198 flags |= (VM_MEMLOAD | VM_NORELOC);
199 199 return (segkmem_alloc(vmp, size, flags));
200 200 }
201 201
202 202 /*
203 203 * Allocations from static_arena arena (or any other arena that uses
204 204 * segkmem_alloc_permanent()) require non-relocatable (permanently
205 205 * wired) memory pages, since these pages are referenced by physical
206 206 * as well as virtual address.
207 207 */
208 208 void *
209 209 segkmem_alloc_permanent(vmem_t *vmp, size_t size, int flags)
210 210 {
211 211 return (segkmem_alloc(vmp, size, flags | VM_NORELOC));
212 212 }
213 213
214 214 /*
215 215 * Initialize kernel heap boundaries.
216 216 */
217 217 void
218 218 kernelheap_init(
219 219 void *heap_start,
220 220 void *heap_end,
221 221 char *first_avail,
222 222 void *core_start,
223 223 void *core_end)
224 224 {
225 225 uintptr_t textbase;
226 226 size_t core_size;
227 227 size_t heap_size;
228 228 vmem_t *heaptext_parent;
229 229 size_t heap_lp_size = 0;
230 230 #ifdef __sparc
231 231 size_t kmem64_sz = kmem64_aligned_end - kmem64_base;
232 232 #endif /* __sparc */
233 233
234 234 kernelheap = heap_start;
235 235 ekernelheap = heap_end;
236 236
237 237 #ifdef __sparc
238 238 heap_lp_size = (((uintptr_t)heap_end - (uintptr_t)heap_start) / 4);
239 239 /*
240 240 * Bias heap_lp start address by kmem64_sz to reduce collisions
241 241 * in 4M kernel TSB between kmem64 area and heap_lp
242 242 */
243 243 kmem64_sz = P2ROUNDUP(kmem64_sz, MMU_PAGESIZE256M);
244 244 if (kmem64_sz <= heap_lp_size / 2)
245 245 heap_lp_size -= kmem64_sz;
246 246 heap_lp_base = ekernelheap - heap_lp_size;
247 247 heap_lp_end = heap_lp_base + heap_lp_size;
248 248 #endif /* __sparc */
249 249
250 250 /*
251 251 * If this platform has a 'core' heap area, then the space for
252 252 * overflow module text should be carved out of the end of that
253 253 * heap. Otherwise, it gets carved out of the general purpose
254 254 * heap.
255 255 */
256 256 core_size = (uintptr_t)core_end - (uintptr_t)core_start;
257 257 if (core_size > 0) {
258 258 ASSERT(core_size >= HEAPTEXT_SIZE);
259 259 textbase = (uintptr_t)core_end - HEAPTEXT_SIZE;
260 260 core_size -= HEAPTEXT_SIZE;
261 261 }
262 262 #ifndef __sparc
263 263 else {
264 264 ekernelheap -= HEAPTEXT_SIZE;
265 265 textbase = (uintptr_t)ekernelheap;
266 266 }
267 267 #endif
268 268
269 269 heap_size = (uintptr_t)ekernelheap - (uintptr_t)kernelheap;
270 270 heap_arena = vmem_init("heap", kernelheap, heap_size, PAGESIZE,
271 271 segkmem_alloc, segkmem_free);
272 272
273 273 if (core_size > 0) {
274 274 heap_core_arena = vmem_create("heap_core", core_start,
275 275 core_size, PAGESIZE, NULL, NULL, NULL, 0, VM_SLEEP);
276 276 heap_core_base = core_start;
277 277 } else {
278 278 heap_core_arena = heap_arena;
279 279 heap_core_base = kernelheap;
280 280 }
281 281
282 282 /*
283 283 * reserve space for the large page heap. If large pages for kernel
284 284 * heap is enabled large page heap arean will be created later in the
285 285 * boot sequence in segkmem_heap_lp_init(). Otherwise the allocated
286 286 * range will be returned back to the heap_arena.
287 287 */
288 288 if (heap_lp_size) {
289 289 (void) vmem_xalloc(heap_arena, heap_lp_size, PAGESIZE, 0, 0,
290 290 heap_lp_base, heap_lp_end,
291 291 VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
292 292 }
293 293
294 294 /*
295 295 * Remove the already-spoken-for memory range [kernelheap, first_avail).
296 296 */
297 297 (void) vmem_xalloc(heap_arena, first_avail - kernelheap, PAGESIZE,
298 298 0, 0, kernelheap, first_avail, VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
299 299
300 300 #ifdef __sparc
301 301 heap32_arena = vmem_create("heap32", (void *)SYSBASE32,
302 302 SYSLIMIT32 - SYSBASE32 - HEAPTEXT_SIZE, PAGESIZE, NULL,
303 303 NULL, NULL, 0, VM_SLEEP);
304 304 /*
305 305 * Prom claims the physical and virtual resources used by panicbuf
306 306 * and inter_vec_table. So reserve space for panicbuf, intr_vec_table,
307 307 * reserved interrupt vector data structures from 32-bit heap.
308 308 */
309 309 (void) vmem_xalloc(heap32_arena, PANICBUFSIZE, PAGESIZE, 0, 0,
310 310 panicbuf, panicbuf + PANICBUFSIZE,
311 311 VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
312 312
313 313 (void) vmem_xalloc(heap32_arena, IVSIZE, PAGESIZE, 0, 0,
314 314 intr_vec_table, (caddr_t)intr_vec_table + IVSIZE,
315 315 VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
316 316
317 317 textbase = SYSLIMIT32 - HEAPTEXT_SIZE;
318 318 heaptext_parent = NULL;
319 319 #else /* __sparc */
320 320 heap32_arena = heap_core_arena;
321 321 heaptext_parent = heap_core_arena;
322 322 #endif /* __sparc */
323 323
324 324 heaptext_arena = vmem_create("heaptext", (void *)textbase,
325 325 HEAPTEXT_SIZE, PAGESIZE, NULL, NULL, heaptext_parent, 0, VM_SLEEP);
326 326
327 327 /*
328 328 * Create a set of arenas for memory with static translations
329 329 * (e.g. VA -> PA translations cannot change). Since using
330 330 * kernel pages by physical address implies it isn't safe to
331 331 * walk across page boundaries, the static_arena quantum must
332 332 * be PAGESIZE. Any kmem caches that require static memory
333 333 * should source from static_arena, while direct allocations
334 334 * should only use static_alloc_arena.
335 335 */
336 336 static_arena = vmem_create("static", NULL, 0, PAGESIZE,
337 337 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP);
338 338 static_alloc_arena = vmem_create("static_alloc", NULL, 0,
339 339 sizeof (uint64_t), vmem_alloc, vmem_free, static_arena,
340 340 0, VM_SLEEP);
341 341
342 342 /*
343 343 * Create an arena for translation data (ptes, hmes, or hblks).
344 344 * We need an arena for this because hat_memload() is essential
345 345 * to vmem_populate() (see comments in common/os/vmem.c).
346 346 *
347 347 * Note: any kmem cache that allocates from hat_memload_arena
348 348 * must be created as a KMC_NOHASH cache (i.e. no external slab
349 349 * and bufctl structures to allocate) so that slab creation doesn't
350 350 * require anything more than a single vmem_alloc().
351 351 */
352 352 hat_memload_arena = vmem_create("hat_memload", NULL, 0, PAGESIZE,
353 353 hat_memload_alloc, segkmem_free, heap_arena, 0,
354 354 VM_SLEEP | VMC_POPULATOR | VMC_DUMPSAFE);
355 355 }
356 356
357 357 void
358 358 boot_mapin(caddr_t addr, size_t size)
359 359 {
360 360 caddr_t eaddr;
361 361 page_t *pp;
362 362 pfn_t pfnum;
363 363
364 364 if (page_resv(btop(size), KM_NOSLEEP) == 0)
365 365 panic("boot_mapin: page_resv failed");
366 366
367 367 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
368 368 pfnum = va_to_pfn(addr);
369 369 if (pfnum == PFN_INVALID)
370 370 continue;
371 371 if ((pp = page_numtopp_nolock(pfnum)) == NULL)
372 372 panic("boot_mapin(): No pp for pfnum = %lx", pfnum);
373 373
374 374 /*
375 375 * must break up any large pages that may have constituent
376 376 * pages being utilized for BOP_ALLOC()'s before calling
377 377 * page_numtopp().The locking code (ie. page_reclaim())
378 378 * can't handle them
379 379 */
380 380 if (pp->p_szc != 0)
381 381 page_boot_demote(pp);
382 382
383 383 pp = page_numtopp(pfnum, SE_EXCL);
384 384 if (pp == NULL || PP_ISFREE(pp))
385 385 panic("boot_alloc: pp is NULL or free");
386 386
387 387 /*
388 388 * If the cage is on but doesn't yet contain this page,
389 389 * mark it as non-relocatable.
390 390 */
391 391 if (kcage_on && !PP_ISNORELOC(pp)) {
392 392 PP_SETNORELOC(pp);
393 393 PLCNT_XFER_NORELOC(pp);
394 394 }
395 395
396 396 (void) page_hashin(pp, &kvp, (u_offset_t)(uintptr_t)addr, NULL);
397 397 pp->p_lckcnt = 1;
398 398 #if defined(__x86)
399 399 page_downgrade(pp);
400 400 #else
401 401 page_unlock(pp);
402 402 #endif
403 403 }
404 404 }
405 405
406 406 /*
407 407 * Get pages from boot and hash them into the kernel's vp.
408 408 * Used after page structs have been allocated, but before segkmem is ready.
409 409 */
410 410 void *
411 411 boot_alloc(void *inaddr, size_t size, uint_t align)
412 412 {
413 413 caddr_t addr = inaddr;
414 414
415 415 if (bootops == NULL)
416 416 prom_panic("boot_alloc: attempt to allocate memory after "
417 417 "BOP_GONE");
418 418
419 419 size = ptob(btopr(size));
420 420 #ifdef __sparc
421 421 if (bop_alloc_chunk(addr, size, align) != (caddr_t)addr)
422 422 panic("boot_alloc: bop_alloc_chunk failed");
423 423 #else
424 424 if (BOP_ALLOC(bootops, addr, size, align) != addr)
425 425 panic("boot_alloc: BOP_ALLOC failed");
426 426 #endif
427 427 boot_mapin((caddr_t)addr, size);
428 428 return (addr);
429 429 }
430 430
431 431 /*ARGSUSED*/
432 432 static faultcode_t
433 433 segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
434 434 enum fault_type type, enum seg_rw rw)
435 435 {
436 436 pgcnt_t npages;
437 437 spgcnt_t pg;
438 438 page_t *pp;
439 439 struct vnode *vp = seg->s_data;
440 440
441 441 ASSERT(RW_READ_HELD(&seg->s_as->a_lock));
442 442
443 443 if (seg->s_as != &kas || size > seg->s_size ||
444 444 addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
445 445 panic("segkmem_fault: bad args");
446 446
447 447 /*
448 448 * If it is one of segkp pages, call segkp_fault.
449 449 */
450 450 if (segkp_bitmap && seg == &kvseg &&
451 451 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
452 452 return (segop_fault(hat, segkp, addr, size, type, rw));
453 453
454 454 if (rw != S_READ && rw != S_WRITE && rw != S_OTHER)
455 455 return (FC_NOSUPPORT);
456 456
457 457 npages = btopr(size);
458 458
459 459 switch (type) {
460 460 case F_SOFTLOCK: /* lock down already-loaded translations */
461 461 for (pg = 0; pg < npages; pg++) {
462 462 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
463 463 SE_SHARED);
464 464 if (pp == NULL) {
465 465 /*
466 466 * Hmm, no page. Does a kernel mapping
467 467 * exist for it?
468 468 */
469 469 if (!hat_probe(kas.a_hat, addr)) {
470 470 addr -= PAGESIZE;
471 471 while (--pg >= 0) {
472 472 pp = page_find(vp, (u_offset_t)
473 473 (uintptr_t)addr);
474 474 if (pp)
475 475 page_unlock(pp);
476 476 addr -= PAGESIZE;
477 477 }
478 478 return (FC_NOMAP);
479 479 }
480 480 }
481 481 addr += PAGESIZE;
482 482 }
483 483 if (rw == S_OTHER)
484 484 hat_reserve(seg->s_as, addr, size);
485 485 return (0);
486 486 case F_SOFTUNLOCK:
487 487 while (npages--) {
488 488 pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
489 489 if (pp)
490 490 page_unlock(pp);
491 491 addr += PAGESIZE;
492 492 }
493 493 return (0);
494 494 default:
495 495 return (FC_NOSUPPORT);
496 496 }
497 497 /*NOTREACHED*/
498 498 }
499 499
500 500 static int
501 501 segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
502 502 {
503 503 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
504 504
505 505 if (seg->s_as != &kas || size > seg->s_size ||
506 506 addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
507 507 panic("segkmem_setprot: bad args");
508 508
509 509 /*
510 510 * If it is one of segkp pages, call segkp.
511 511 */
512 512 if (segkp_bitmap && seg == &kvseg &&
513 513 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
514 514 return (segop_setprot(segkp, addr, size, prot));
515 515
516 516 if (prot == 0)
517 517 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD);
518 518 else
519 519 hat_chgprot(kas.a_hat, addr, size, prot);
520 520 return (0);
521 521 }
522 522
523 523 /*
524 524 * This is a dummy segkmem function overloaded to call segkp
525 525 * when segkp is under the heap.
526 526 */
527 527 /* ARGSUSED */
528 528 static int
529 529 segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
530 530 {
531 531 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
532 532
533 533 if (seg->s_as != &kas)
534 534 panic("segkmem badop");
535 535
536 536 /*
537 537 * If it is one of segkp pages, call into segkp.
538 538 */
539 539 if (segkp_bitmap && seg == &kvseg &&
540 540 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
541 541 return (segop_checkprot(segkp, addr, size, prot));
542 542
543 543 panic("segkmem badop");
544 544 return (0);
545 545 }
546 546
547 547 /*
548 548 * This is a dummy segkmem function overloaded to call segkp
549 549 * when segkp is under the heap.
550 550 */
551 551 /* ARGSUSED */
552 552 static int
553 553 segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
554 554 {
555 555 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
556 556
557 557 if (seg->s_as != &kas)
558 558 panic("segkmem badop");
559 559
560 560 /*
561 561 * If it is one of segkp pages, call into segkp.
562 562 */
563 563 if (segkp_bitmap && seg == &kvseg &&
564 564 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
565 565 return (segop_kluster(segkp, addr, delta));
566 566
567 567 panic("segkmem badop");
568 568 return (0);
569 569 }
570 570
571 571 static void
572 572 segkmem_xdump_range(void *arg, void *start, size_t size)
573 573 {
574 574 struct as *as = arg;
575 575 caddr_t addr = start;
576 576 caddr_t addr_end = addr + size;
577 577
578 578 while (addr < addr_end) {
579 579 pfn_t pfn = hat_getpfnum(kas.a_hat, addr);
580 580 if (pfn != PFN_INVALID && pfn <= physmax && pf_is_memory(pfn))
581 581 dump_addpage(as, addr, pfn);
582 582 addr += PAGESIZE;
583 583 dump_timeleft = dump_timeout;
584 584 }
585 585 }
586 586
587 587 static void
588 588 segkmem_dump_range(void *arg, void *start, size_t size)
589 589 {
590 590 caddr_t addr = start;
591 591 caddr_t addr_end = addr + size;
592 592
593 593 /*
594 594 * If we are about to start dumping the range of addresses we
595 595 * carved out of the kernel heap for the large page heap walk
596 596 * heap_lp_arena to find what segments are actually populated
597 597 */
598 598 if (SEGKMEM_USE_LARGEPAGES &&
599 599 addr == heap_lp_base && addr_end == heap_lp_end &&
600 600 vmem_size(heap_lp_arena, VMEM_ALLOC) < size) {
601 601 vmem_walk(heap_lp_arena, VMEM_ALLOC | VMEM_REENTRANT,
602 602 segkmem_xdump_range, arg);
603 603 } else {
604 604 segkmem_xdump_range(arg, start, size);
605 605 }
606 606 }
607 607
608 608 static void
609 609 segkmem_dump(struct seg *seg)
610 610 {
611 611 /*
612 612 * The kernel's heap_arena (represented by kvseg) is a very large
613 613 * VA space, most of which is typically unused. To speed up dumping
614 614 * we use vmem_walk() to quickly find the pieces of heap_arena that
615 615 * are actually in use. We do the same for heap32_arena and
616 616 * heap_core.
617 617 *
618 618 * We specify VMEM_REENTRANT to vmem_walk() because dump_addpage()
619 619 * may ultimately need to allocate memory. Reentrant walks are
620 620 * necessarily imperfect snapshots. The kernel heap continues
621 621 * to change during a live crash dump, for example. For a normal
622 622 * crash dump, however, we know that there won't be any other threads
623 623 * messing with the heap. Therefore, at worst, we may fail to dump
624 624 * the pages that get allocated by the act of dumping; but we will
625 625 * always dump every page that was allocated when the walk began.
626 626 *
627 627 * The other segkmem segments are dense (fully populated), so there's
628 628 * no need to use this technique when dumping them.
629 629 *
630 630 * Note: when adding special dump handling for any new sparsely-
631 631 * populated segments, be sure to add similar handling to the ::kgrep
632 632 * code in mdb.
633 633 */
634 634 if (seg == &kvseg) {
635 635 vmem_walk(heap_arena, VMEM_ALLOC | VMEM_REENTRANT,
636 636 segkmem_dump_range, seg->s_as);
637 637 #ifndef __sparc
638 638 vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
639 639 segkmem_dump_range, seg->s_as);
640 640 #endif
641 641 } else if (seg == &kvseg_core) {
642 642 vmem_walk(heap_core_arena, VMEM_ALLOC | VMEM_REENTRANT,
643 643 segkmem_dump_range, seg->s_as);
644 644 } else if (seg == &kvseg32) {
645 645 vmem_walk(heap32_arena, VMEM_ALLOC | VMEM_REENTRANT,
646 646 segkmem_dump_range, seg->s_as);
647 647 vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
648 648 segkmem_dump_range, seg->s_as);
649 649 } else if (seg == &kzioseg) {
650 650 /*
651 651 * We don't want to dump pages attached to kzioseg since they
652 652 * contain file data from ZFS. If this page's segment is
653 653 * kzioseg return instead of writing it to the dump device.
654 654 */
655 655 return;
656 656 } else {
657 657 segkmem_dump_range(seg->s_as, seg->s_base, seg->s_size);
658 658 }
659 659 }
660 660
661 661 /*
662 662 * lock/unlock kmem pages over a given range [addr, addr+len).
663 663 * Returns a shadow list of pages in ppp. If there are holes
664 664 * in the range (e.g. some of the kernel mappings do not have
665 665 * underlying page_ts) returns ENOTSUP so that as_pagelock()
666 666 * will handle the range via as_fault(F_SOFTLOCK).
667 667 */
668 668 /*ARGSUSED*/
669 669 static int
670 670 segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len,
671 671 page_t ***ppp, enum lock_type type, enum seg_rw rw)
672 672 {
673 673 page_t **pplist, *pp;
674 674 pgcnt_t npages;
675 675 spgcnt_t pg;
676 676 size_t nb;
677 677 struct vnode *vp = seg->s_data;
678 678
679 679 ASSERT(ppp != NULL);
680 680
681 681 /*
682 682 * If it is one of segkp pages, call into segkp.
683 683 */
684 684 if (segkp_bitmap && seg == &kvseg &&
685 685 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
686 686 return (segop_pagelock(segkp, addr, len, ppp, type, rw));
687 687
688 688 npages = btopr(len);
689 689 nb = sizeof (page_t *) * npages;
690 690
691 691 if (type == L_PAGEUNLOCK) {
692 692 pplist = *ppp;
693 693 ASSERT(pplist != NULL);
694 694
695 695 for (pg = 0; pg < npages; pg++) {
696 696 pp = pplist[pg];
697 697 page_unlock(pp);
698 698 }
699 699 kmem_free(pplist, nb);
700 700 return (0);
701 701 }
702 702
703 703 ASSERT(type == L_PAGELOCK);
704 704
705 705 pplist = kmem_alloc(nb, KM_NOSLEEP);
706 706 if (pplist == NULL) {
707 707 *ppp = NULL;
708 708 return (ENOTSUP); /* take the slow path */
709 709 }
710 710
711 711 for (pg = 0; pg < npages; pg++) {
712 712 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_SHARED);
713 713 if (pp == NULL) {
714 714 while (--pg >= 0)
715 715 page_unlock(pplist[pg]);
716 716 kmem_free(pplist, nb);
717 717 *ppp = NULL;
718 718 return (ENOTSUP);
719 719 }
720 720 pplist[pg] = pp;
721 721 addr += PAGESIZE;
722 722 }
723 723
724 724 *ppp = pplist;
725 725 return (0);
726 726 }
727 727
728 728 /*
729 729 * This is a dummy segkmem function overloaded to call segkp
730 730 * when segkp is under the heap.
731 731 */
732 732 /* ARGSUSED */
733 733 static int
734 734 segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
735 735 {
736 736 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
737 737
738 738 if (seg->s_as != &kas)
739 739 panic("segkmem badop");
740 740
741 741 /*
742 742 * If it is one of segkp pages, call into segkp.
743 743 */
744 744 if (segkp_bitmap && seg == &kvseg &&
745 745 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
746 746 return (segop_getmemid(segkp, addr, memidp));
747 747
748 748 panic("segkmem badop");
749 749 return (0);
750 750 }
751 751
752 752 /*ARGSUSED*/
753 753 static lgrp_mem_policy_info_t *
754 754 segkmem_getpolicy(struct seg *seg, caddr_t addr)
755 755 {
756 756 return (NULL);
757 757 }
758 758
759 759 /*ARGSUSED*/
760 760 static int
761 761 segkmem_capable(struct seg *seg, segcapability_t capability)
762 762 {
763 763 if (capability == S_CAPABILITY_NOMINFLT)
764 764 return (1);
765 765 return (0);
766 766 }
767 767
↓ open down ↓ |
767 lines elided |
↑ open up ↑ |
768 768 static struct seg_ops segkmem_ops = {
769 769 .fault = segkmem_fault,
770 770 .setprot = segkmem_setprot,
771 771 .checkprot = segkmem_checkprot,
772 772 .kluster = segkmem_kluster,
773 773 .dump = segkmem_dump,
774 774 .pagelock = segkmem_pagelock,
775 775 .getmemid = segkmem_getmemid,
776 776 .getpolicy = segkmem_getpolicy,
777 777 .capable = segkmem_capable,
778 - .inherit = seg_inherit_notsup,
779 778 };
780 779
781 780 int
782 781 segkmem_zio_create(struct seg *seg)
783 782 {
784 783 ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
785 784 seg->s_ops = &segkmem_ops;
786 785 seg->s_data = &zvp;
787 786 kas.a_size += seg->s_size;
788 787 return (0);
789 788 }
790 789
791 790 int
792 791 segkmem_create(struct seg *seg)
793 792 {
794 793 ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
795 794 seg->s_ops = &segkmem_ops;
796 795 seg->s_data = &kvp;
797 796 kas.a_size += seg->s_size;
798 797 return (0);
799 798 }
800 799
801 800 /*ARGSUSED*/
802 801 page_t *
803 802 segkmem_page_create(void *addr, size_t size, int vmflag, void *arg)
804 803 {
805 804 struct seg kseg;
806 805 int pgflags;
807 806 struct vnode *vp = arg;
808 807
809 808 if (vp == NULL)
810 809 vp = &kvp;
811 810
812 811 kseg.s_as = &kas;
813 812 pgflags = PG_EXCL;
814 813
815 814 if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
816 815 pgflags |= PG_NORELOC;
817 816 if ((vmflag & VM_NOSLEEP) == 0)
818 817 pgflags |= PG_WAIT;
819 818 if (vmflag & VM_PANIC)
820 819 pgflags |= PG_PANIC;
821 820 if (vmflag & VM_PUSHPAGE)
822 821 pgflags |= PG_PUSHPAGE;
823 822 if (vmflag & VM_NORMALPRI) {
824 823 ASSERT(vmflag & VM_NOSLEEP);
825 824 pgflags |= PG_NORMALPRI;
826 825 }
827 826
828 827 return (page_create_va(vp, (u_offset_t)(uintptr_t)addr, size,
829 828 pgflags, &kseg, addr));
830 829 }
831 830
832 831 /*
833 832 * Allocate pages to back the virtual address range [addr, addr + size).
834 833 * If addr is NULL, allocate the virtual address space as well.
835 834 */
836 835 void *
837 836 segkmem_xalloc(vmem_t *vmp, void *inaddr, size_t size, int vmflag, uint_t attr,
838 837 page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg)
839 838 {
840 839 page_t *ppl;
841 840 caddr_t addr = inaddr;
842 841 pgcnt_t npages = btopr(size);
843 842 int allocflag;
844 843
845 844 if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
846 845 return (NULL);
847 846
848 847 ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
849 848
850 849 if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
851 850 if (inaddr == NULL)
852 851 vmem_free(vmp, addr, size);
853 852 return (NULL);
854 853 }
855 854
856 855 ppl = page_create_func(addr, size, vmflag, pcarg);
857 856 if (ppl == NULL) {
858 857 if (inaddr == NULL)
859 858 vmem_free(vmp, addr, size);
860 859 page_unresv(npages);
861 860 return (NULL);
862 861 }
863 862
864 863 /*
865 864 * Under certain conditions, we need to let the HAT layer know
866 865 * that it cannot safely allocate memory. Allocations from
867 866 * the hat_memload vmem arena always need this, to prevent
868 867 * infinite recursion.
869 868 *
870 869 * In addition, the x86 hat cannot safely do memory
871 870 * allocations while in vmem_populate(), because there
872 871 * is no simple bound on its usage.
873 872 */
874 873 if (vmflag & VM_MEMLOAD)
875 874 allocflag = HAT_NO_KALLOC;
876 875 #if defined(__x86)
877 876 else if (vmem_is_populator())
878 877 allocflag = HAT_NO_KALLOC;
879 878 #endif
880 879 else
881 880 allocflag = 0;
882 881
883 882 while (ppl != NULL) {
884 883 page_t *pp = ppl;
885 884 page_sub(&ppl, pp);
886 885 ASSERT(page_iolock_assert(pp));
887 886 ASSERT(PAGE_EXCL(pp));
888 887 page_io_unlock(pp);
889 888 hat_memload(kas.a_hat, (caddr_t)(uintptr_t)pp->p_offset, pp,
890 889 (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
891 890 HAT_LOAD_LOCK | allocflag);
892 891 pp->p_lckcnt = 1;
893 892 #if defined(__x86)
894 893 page_downgrade(pp);
895 894 #else
896 895 if (vmflag & SEGKMEM_SHARELOCKED)
897 896 page_downgrade(pp);
898 897 else
899 898 page_unlock(pp);
900 899 #endif
901 900 }
902 901
903 902 return (addr);
904 903 }
905 904
906 905 static void *
907 906 segkmem_alloc_vn(vmem_t *vmp, size_t size, int vmflag, struct vnode *vp)
908 907 {
909 908 void *addr;
910 909 segkmem_gc_list_t *gcp, **prev_gcpp;
911 910
912 911 ASSERT(vp != NULL);
913 912
914 913 if (kvseg.s_base == NULL) {
915 914 #ifndef __sparc
916 915 if (bootops->bsys_alloc == NULL)
917 916 halt("Memory allocation between bop_alloc() and "
918 917 "kmem_alloc().\n");
919 918 #endif
920 919
921 920 /*
922 921 * There's not a lot of memory to go around during boot,
923 922 * so recycle it if we can.
924 923 */
925 924 for (prev_gcpp = &segkmem_gc_list; (gcp = *prev_gcpp) != NULL;
926 925 prev_gcpp = &gcp->gc_next) {
927 926 if (gcp->gc_arena == vmp && gcp->gc_size == size) {
928 927 *prev_gcpp = gcp->gc_next;
929 928 return (gcp);
930 929 }
931 930 }
932 931
933 932 addr = vmem_alloc(vmp, size, vmflag | VM_PANIC);
934 933 if (boot_alloc(addr, size, BO_NO_ALIGN) != addr)
935 934 panic("segkmem_alloc: boot_alloc failed");
936 935 return (addr);
937 936 }
938 937 return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
939 938 segkmem_page_create, vp));
940 939 }
941 940
942 941 void *
943 942 segkmem_alloc(vmem_t *vmp, size_t size, int vmflag)
944 943 {
945 944 return (segkmem_alloc_vn(vmp, size, vmflag, &kvp));
946 945 }
947 946
948 947 void *
949 948 segkmem_zio_alloc(vmem_t *vmp, size_t size, int vmflag)
950 949 {
951 950 return (segkmem_alloc_vn(vmp, size, vmflag, &zvp));
952 951 }
953 952
954 953 /*
955 954 * Any changes to this routine must also be carried over to
956 955 * devmap_free_pages() in the seg_dev driver. This is because
957 956 * we currently don't have a special kernel segment for non-paged
958 957 * kernel memory that is exported by drivers to user space.
959 958 */
960 959 static void
961 960 segkmem_free_vn(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp,
962 961 void (*func)(page_t *))
963 962 {
964 963 page_t *pp;
965 964 caddr_t addr = inaddr;
966 965 caddr_t eaddr;
967 966 pgcnt_t npages = btopr(size);
968 967
969 968 ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
970 969 ASSERT(vp != NULL);
971 970
972 971 if (kvseg.s_base == NULL) {
973 972 segkmem_gc_list_t *gc = inaddr;
974 973 gc->gc_arena = vmp;
975 974 gc->gc_size = size;
976 975 gc->gc_next = segkmem_gc_list;
977 976 segkmem_gc_list = gc;
978 977 return;
979 978 }
980 979
981 980 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
982 981
983 982 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
984 983 #if defined(__x86)
985 984 pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
986 985 if (pp == NULL)
987 986 panic("segkmem_free: page not found");
988 987 if (!page_tryupgrade(pp)) {
989 988 /*
990 989 * Some other thread has a sharelock. Wait for
991 990 * it to drop the lock so we can free this page.
992 991 */
993 992 page_unlock(pp);
994 993 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
995 994 SE_EXCL);
996 995 }
997 996 #else
998 997 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
999 998 #endif
1000 999 if (pp == NULL)
1001 1000 panic("segkmem_free: page not found");
1002 1001 /* Clear p_lckcnt so page_destroy() doesn't update availrmem */
1003 1002 pp->p_lckcnt = 0;
1004 1003 if (func)
1005 1004 func(pp);
1006 1005 else
1007 1006 page_destroy(pp, 0);
1008 1007 }
1009 1008 if (func == NULL)
1010 1009 page_unresv(npages);
1011 1010
1012 1011 if (vmp != NULL)
1013 1012 vmem_free(vmp, inaddr, size);
1014 1013
1015 1014 }
1016 1015
1017 1016 void
1018 1017 segkmem_xfree(vmem_t *vmp, void *inaddr, size_t size, void (*func)(page_t *))
1019 1018 {
1020 1019 segkmem_free_vn(vmp, inaddr, size, &kvp, func);
1021 1020 }
1022 1021
1023 1022 void
1024 1023 segkmem_free(vmem_t *vmp, void *inaddr, size_t size)
1025 1024 {
1026 1025 segkmem_free_vn(vmp, inaddr, size, &kvp, NULL);
1027 1026 }
1028 1027
1029 1028 void
1030 1029 segkmem_zio_free(vmem_t *vmp, void *inaddr, size_t size)
1031 1030 {
1032 1031 segkmem_free_vn(vmp, inaddr, size, &zvp, NULL);
1033 1032 }
1034 1033
1035 1034 void
1036 1035 segkmem_gc(void)
1037 1036 {
1038 1037 ASSERT(kvseg.s_base != NULL);
1039 1038 while (segkmem_gc_list != NULL) {
1040 1039 segkmem_gc_list_t *gc = segkmem_gc_list;
1041 1040 segkmem_gc_list = gc->gc_next;
1042 1041 segkmem_free(gc->gc_arena, gc, gc->gc_size);
1043 1042 }
1044 1043 }
1045 1044
1046 1045 /*
1047 1046 * Legacy entry points from here to end of file.
1048 1047 */
1049 1048 void
1050 1049 segkmem_mapin(struct seg *seg, void *addr, size_t size, uint_t vprot,
1051 1050 pfn_t pfn, uint_t flags)
1052 1051 {
1053 1052 hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1054 1053 hat_devload(seg->s_as->a_hat, addr, size, pfn, vprot,
1055 1054 flags | HAT_LOAD_LOCK);
1056 1055 }
1057 1056
1058 1057 void
1059 1058 segkmem_mapout(struct seg *seg, void *addr, size_t size)
1060 1059 {
1061 1060 hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1062 1061 }
1063 1062
1064 1063 void *
1065 1064 kmem_getpages(pgcnt_t npages, int kmflag)
1066 1065 {
1067 1066 return (kmem_alloc(ptob(npages), kmflag));
1068 1067 }
1069 1068
1070 1069 void
1071 1070 kmem_freepages(void *addr, pgcnt_t npages)
1072 1071 {
1073 1072 kmem_free(addr, ptob(npages));
1074 1073 }
1075 1074
1076 1075 /*
1077 1076 * segkmem_page_create_large() allocates a large page to be used for the kmem
1078 1077 * caches. If kpr is enabled we ask for a relocatable page unless requested
1079 1078 * otherwise. If kpr is disabled we have to ask for a non-reloc page
1080 1079 */
1081 1080 static page_t *
1082 1081 segkmem_page_create_large(void *addr, size_t size, int vmflag, void *arg)
1083 1082 {
1084 1083 int pgflags;
1085 1084
1086 1085 pgflags = PG_EXCL;
1087 1086
1088 1087 if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
1089 1088 pgflags |= PG_NORELOC;
1090 1089 if (!(vmflag & VM_NOSLEEP))
1091 1090 pgflags |= PG_WAIT;
1092 1091 if (vmflag & VM_PUSHPAGE)
1093 1092 pgflags |= PG_PUSHPAGE;
1094 1093 if (vmflag & VM_NORMALPRI)
1095 1094 pgflags |= PG_NORMALPRI;
1096 1095
1097 1096 return (page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size,
1098 1097 pgflags, &kvseg, addr, arg));
1099 1098 }
1100 1099
1101 1100 /*
1102 1101 * Allocate a large page to back the virtual address range
1103 1102 * [addr, addr + size). If addr is NULL, allocate the virtual address
1104 1103 * space as well.
1105 1104 */
1106 1105 static void *
1107 1106 segkmem_xalloc_lp(vmem_t *vmp, void *inaddr, size_t size, int vmflag,
1108 1107 uint_t attr, page_t *(*page_create_func)(void *, size_t, int, void *),
1109 1108 void *pcarg)
1110 1109 {
1111 1110 caddr_t addr = inaddr, pa;
1112 1111 size_t lpsize = segkmem_lpsize;
1113 1112 pgcnt_t npages = btopr(size);
1114 1113 pgcnt_t nbpages = btop(lpsize);
1115 1114 pgcnt_t nlpages = size >> segkmem_lpshift;
1116 1115 size_t ppasize = nbpages * sizeof (page_t *);
1117 1116 page_t *pp, *rootpp, **ppa, *pplist = NULL;
1118 1117 int i;
1119 1118
1120 1119 vmflag |= VM_NOSLEEP;
1121 1120
1122 1121 if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
1123 1122 return (NULL);
1124 1123 }
1125 1124
1126 1125 /*
1127 1126 * allocate an array we need for hat_memload_array.
1128 1127 * we use a separate arena to avoid recursion.
1129 1128 * we will not need this array when hat_memload_array learns pp++
1130 1129 */
1131 1130 if ((ppa = vmem_alloc(segkmem_ppa_arena, ppasize, vmflag)) == NULL) {
1132 1131 goto fail_array_alloc;
1133 1132 }
1134 1133
1135 1134 if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
1136 1135 goto fail_vmem_alloc;
1137 1136
1138 1137 ASSERT(((uintptr_t)addr & (lpsize - 1)) == 0);
1139 1138
1140 1139 /* create all the pages */
1141 1140 for (pa = addr, i = 0; i < nlpages; i++, pa += lpsize) {
1142 1141 if ((pp = page_create_func(pa, lpsize, vmflag, pcarg)) == NULL)
1143 1142 goto fail_page_create;
1144 1143 page_list_concat(&pplist, &pp);
1145 1144 }
1146 1145
1147 1146 /* at this point we have all the resource to complete the request */
1148 1147 while ((rootpp = pplist) != NULL) {
1149 1148 for (i = 0; i < nbpages; i++) {
1150 1149 ASSERT(pplist != NULL);
1151 1150 pp = pplist;
1152 1151 page_sub(&pplist, pp);
1153 1152 ASSERT(page_iolock_assert(pp));
1154 1153 page_io_unlock(pp);
1155 1154 ppa[i] = pp;
1156 1155 }
1157 1156 /*
1158 1157 * Load the locked entry. It's OK to preload the entry into the
1159 1158 * TSB since we now support large mappings in the kernel TSB.
1160 1159 */
1161 1160 hat_memload_array(kas.a_hat,
1162 1161 (caddr_t)(uintptr_t)rootpp->p_offset, lpsize,
1163 1162 ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
1164 1163 HAT_LOAD_LOCK);
1165 1164
1166 1165 for (--i; i >= 0; --i) {
1167 1166 ppa[i]->p_lckcnt = 1;
1168 1167 page_unlock(ppa[i]);
1169 1168 }
1170 1169 }
1171 1170
1172 1171 vmem_free(segkmem_ppa_arena, ppa, ppasize);
1173 1172 return (addr);
1174 1173
1175 1174 fail_page_create:
1176 1175 while ((rootpp = pplist) != NULL) {
1177 1176 for (i = 0, pp = pplist; i < nbpages; i++, pp = pplist) {
1178 1177 ASSERT(pp != NULL);
1179 1178 page_sub(&pplist, pp);
1180 1179 ASSERT(page_iolock_assert(pp));
1181 1180 page_io_unlock(pp);
1182 1181 }
1183 1182 page_destroy_pages(rootpp);
1184 1183 }
1185 1184
1186 1185 if (inaddr == NULL)
1187 1186 vmem_free(vmp, addr, size);
1188 1187
1189 1188 fail_vmem_alloc:
1190 1189 vmem_free(segkmem_ppa_arena, ppa, ppasize);
1191 1190
1192 1191 fail_array_alloc:
1193 1192 page_unresv(npages);
1194 1193
1195 1194 return (NULL);
1196 1195 }
1197 1196
1198 1197 static void
1199 1198 segkmem_free_one_lp(caddr_t addr, size_t size)
1200 1199 {
1201 1200 page_t *pp, *rootpp = NULL;
1202 1201 pgcnt_t pgs_left = btopr(size);
1203 1202
1204 1203 ASSERT(size == segkmem_lpsize);
1205 1204
1206 1205 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1207 1206
1208 1207 for (; pgs_left > 0; addr += PAGESIZE, pgs_left--) {
1209 1208 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
1210 1209 if (pp == NULL)
1211 1210 panic("segkmem_free_one_lp: page not found");
1212 1211 ASSERT(PAGE_EXCL(pp));
1213 1212 pp->p_lckcnt = 0;
1214 1213 if (rootpp == NULL)
1215 1214 rootpp = pp;
1216 1215 }
1217 1216 ASSERT(rootpp != NULL);
1218 1217 page_destroy_pages(rootpp);
1219 1218
1220 1219 /* page_unresv() is done by the caller */
1221 1220 }
1222 1221
1223 1222 /*
1224 1223 * This function is called to import new spans into the vmem arenas like
1225 1224 * kmem_default_arena and kmem_oversize_arena. It first tries to import
1226 1225 * spans from large page arena - kmem_lp_arena. In order to do this it might
1227 1226 * have to "upgrade the requested size" to kmem_lp_arena quantum. If
1228 1227 * it was not able to satisfy the upgraded request it then calls regular
1229 1228 * segkmem_alloc() that satisfies the request by importing from "*vmp" arena
1230 1229 */
1231 1230 /*ARGSUSED*/
1232 1231 void *
1233 1232 segkmem_alloc_lp(vmem_t *vmp, size_t *sizep, size_t align, int vmflag)
1234 1233 {
1235 1234 size_t size;
1236 1235 kthread_t *t = curthread;
1237 1236 segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1238 1237
1239 1238 ASSERT(sizep != NULL);
1240 1239
1241 1240 size = *sizep;
1242 1241
1243 1242 if (lpcb->lp_uselp && !(t->t_flag & T_PANIC) &&
1244 1243 !(vmflag & SEGKMEM_SHARELOCKED)) {
1245 1244
1246 1245 size_t kmemlp_qnt = segkmem_kmemlp_quantum;
1247 1246 size_t asize = P2ROUNDUP(size, kmemlp_qnt);
1248 1247 void *addr = NULL;
1249 1248 ulong_t *lpthrtp = &lpcb->lp_throttle;
1250 1249 ulong_t lpthrt = *lpthrtp;
1251 1250 int dowakeup = 0;
1252 1251 int doalloc = 1;
1253 1252
1254 1253 ASSERT(kmem_lp_arena != NULL);
1255 1254 ASSERT(asize >= size);
1256 1255
1257 1256 if (lpthrt != 0) {
1258 1257 /* try to update the throttle value */
1259 1258 lpthrt = atomic_inc_ulong_nv(lpthrtp);
1260 1259 if (lpthrt >= segkmem_lpthrottle_max) {
1261 1260 lpthrt = atomic_cas_ulong(lpthrtp, lpthrt,
1262 1261 segkmem_lpthrottle_max / 4);
1263 1262 }
1264 1263
1265 1264 /*
1266 1265 * when we get above throttle start do an exponential
1267 1266 * backoff at trying large pages and reaping
1268 1267 */
1269 1268 if (lpthrt > segkmem_lpthrottle_start &&
1270 1269 !ISP2(lpthrt)) {
1271 1270 lpcb->allocs_throttled++;
1272 1271 lpthrt--;
1273 1272 if (ISP2(lpthrt))
1274 1273 kmem_reap();
1275 1274 return (segkmem_alloc(vmp, size, vmflag));
1276 1275 }
1277 1276 }
1278 1277
1279 1278 if (!(vmflag & VM_NOSLEEP) &&
1280 1279 segkmem_heaplp_quantum >= (8 * kmemlp_qnt) &&
1281 1280 vmem_size(kmem_lp_arena, VMEM_FREE) <= kmemlp_qnt &&
1282 1281 asize < (segkmem_heaplp_quantum - kmemlp_qnt)) {
1283 1282
1284 1283 /*
1285 1284 * we are low on free memory in kmem_lp_arena
1286 1285 * we let only one guy to allocate heap_lp
1287 1286 * quantum size chunk that everybody is going to
1288 1287 * share
1289 1288 */
1290 1289 mutex_enter(&lpcb->lp_lock);
1291 1290
1292 1291 if (lpcb->lp_wait) {
1293 1292
1294 1293 /* we are not the first one - wait */
1295 1294 cv_wait(&lpcb->lp_cv, &lpcb->lp_lock);
1296 1295 if (vmem_size(kmem_lp_arena, VMEM_FREE) <
1297 1296 kmemlp_qnt) {
1298 1297 doalloc = 0;
1299 1298 }
1300 1299 } else if (vmem_size(kmem_lp_arena, VMEM_FREE) <=
1301 1300 kmemlp_qnt) {
1302 1301
1303 1302 /*
1304 1303 * we are the first one, make sure we import
1305 1304 * a large page
1306 1305 */
1307 1306 if (asize == kmemlp_qnt)
1308 1307 asize += kmemlp_qnt;
1309 1308 dowakeup = 1;
1310 1309 lpcb->lp_wait = 1;
1311 1310 }
1312 1311
1313 1312 mutex_exit(&lpcb->lp_lock);
1314 1313 }
1315 1314
1316 1315 /*
1317 1316 * VM_ABORT flag prevents sleeps in vmem_xalloc when
1318 1317 * large pages are not available. In that case this allocation
1319 1318 * attempt will fail and we will retry allocation with small
1320 1319 * pages. We also do not want to panic if this allocation fails
1321 1320 * because we are going to retry.
1322 1321 */
1323 1322 if (doalloc) {
1324 1323 addr = vmem_alloc(kmem_lp_arena, asize,
1325 1324 (vmflag | VM_ABORT) & ~VM_PANIC);
1326 1325
1327 1326 if (dowakeup) {
1328 1327 mutex_enter(&lpcb->lp_lock);
1329 1328 ASSERT(lpcb->lp_wait != 0);
1330 1329 lpcb->lp_wait = 0;
1331 1330 cv_broadcast(&lpcb->lp_cv);
1332 1331 mutex_exit(&lpcb->lp_lock);
1333 1332 }
1334 1333 }
1335 1334
1336 1335 if (addr != NULL) {
1337 1336 *sizep = asize;
1338 1337 *lpthrtp = 0;
1339 1338 return (addr);
1340 1339 }
1341 1340
1342 1341 if (vmflag & VM_NOSLEEP)
1343 1342 lpcb->nosleep_allocs_failed++;
1344 1343 else
1345 1344 lpcb->sleep_allocs_failed++;
1346 1345 lpcb->alloc_bytes_failed += size;
1347 1346
1348 1347 /* if large page throttling is not started yet do it */
1349 1348 if (segkmem_use_lpthrottle && lpthrt == 0) {
1350 1349 lpthrt = atomic_cas_ulong(lpthrtp, lpthrt, 1);
1351 1350 }
1352 1351 }
1353 1352 return (segkmem_alloc(vmp, size, vmflag));
1354 1353 }
1355 1354
1356 1355 void
1357 1356 segkmem_free_lp(vmem_t *vmp, void *inaddr, size_t size)
1358 1357 {
1359 1358 if (kmem_lp_arena == NULL || !IS_KMEM_VA_LARGEPAGE((caddr_t)inaddr)) {
1360 1359 segkmem_free(vmp, inaddr, size);
1361 1360 } else {
1362 1361 vmem_free(kmem_lp_arena, inaddr, size);
1363 1362 }
1364 1363 }
1365 1364
1366 1365 /*
1367 1366 * segkmem_alloc_lpi() imports virtual memory from large page heap arena
1368 1367 * into kmem_lp arena. In the process it maps the imported segment with
1369 1368 * large pages
1370 1369 */
1371 1370 static void *
1372 1371 segkmem_alloc_lpi(vmem_t *vmp, size_t size, int vmflag)
1373 1372 {
1374 1373 segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1375 1374 void *addr;
1376 1375
1377 1376 ASSERT(size != 0);
1378 1377 ASSERT(vmp == heap_lp_arena);
1379 1378
1380 1379 /* do not allow large page heap grow beyound limits */
1381 1380 if (vmem_size(vmp, VMEM_ALLOC) >= segkmem_kmemlp_max) {
1382 1381 lpcb->allocs_limited++;
1383 1382 return (NULL);
1384 1383 }
1385 1384
1386 1385 addr = segkmem_xalloc_lp(vmp, NULL, size, vmflag, 0,
1387 1386 segkmem_page_create_large, NULL);
1388 1387 return (addr);
1389 1388 }
1390 1389
1391 1390 /*
1392 1391 * segkmem_free_lpi() returns virtual memory back into large page heap arena
1393 1392 * from kmem_lp arena. Beore doing this it unmaps the segment and frees
1394 1393 * large pages used to map it.
1395 1394 */
1396 1395 static void
1397 1396 segkmem_free_lpi(vmem_t *vmp, void *inaddr, size_t size)
1398 1397 {
1399 1398 pgcnt_t nlpages = size >> segkmem_lpshift;
1400 1399 size_t lpsize = segkmem_lpsize;
1401 1400 caddr_t addr = inaddr;
1402 1401 pgcnt_t npages = btopr(size);
1403 1402 int i;
1404 1403
1405 1404 ASSERT(vmp == heap_lp_arena);
1406 1405 ASSERT(IS_KMEM_VA_LARGEPAGE(addr));
1407 1406 ASSERT(((uintptr_t)inaddr & (lpsize - 1)) == 0);
1408 1407
1409 1408 for (i = 0; i < nlpages; i++) {
1410 1409 segkmem_free_one_lp(addr, lpsize);
1411 1410 addr += lpsize;
1412 1411 }
1413 1412
1414 1413 page_unresv(npages);
1415 1414
1416 1415 vmem_free(vmp, inaddr, size);
1417 1416 }
1418 1417
1419 1418 /*
1420 1419 * This function is called at system boot time by kmem_init right after
1421 1420 * /etc/system file has been read. It checks based on hardware configuration
1422 1421 * and /etc/system settings if system is going to use large pages. The
1423 1422 * initialiazation necessary to actually start using large pages
1424 1423 * happens later in the process after segkmem_heap_lp_init() is called.
1425 1424 */
1426 1425 int
1427 1426 segkmem_lpsetup()
1428 1427 {
1429 1428 int use_large_pages = 0;
1430 1429
1431 1430 #ifdef __sparc
1432 1431
1433 1432 size_t memtotal = physmem * PAGESIZE;
1434 1433
1435 1434 if (heap_lp_base == NULL) {
1436 1435 segkmem_lpsize = PAGESIZE;
1437 1436 return (0);
1438 1437 }
1439 1438
1440 1439 /* get a platform dependent value of large page size for kernel heap */
1441 1440 segkmem_lpsize = get_segkmem_lpsize(segkmem_lpsize);
1442 1441
1443 1442 if (segkmem_lpsize <= PAGESIZE) {
1444 1443 /*
1445 1444 * put virtual space reserved for the large page kernel
1446 1445 * back to the regular heap
1447 1446 */
1448 1447 vmem_xfree(heap_arena, heap_lp_base,
1449 1448 heap_lp_end - heap_lp_base);
1450 1449 heap_lp_base = NULL;
1451 1450 heap_lp_end = NULL;
1452 1451 segkmem_lpsize = PAGESIZE;
1453 1452 return (0);
1454 1453 }
1455 1454
1456 1455 /* set heap_lp quantum if necessary */
1457 1456 if (segkmem_heaplp_quantum == 0 || !ISP2(segkmem_heaplp_quantum) ||
1458 1457 P2PHASE(segkmem_heaplp_quantum, segkmem_lpsize)) {
1459 1458 segkmem_heaplp_quantum = segkmem_lpsize;
1460 1459 }
1461 1460
1462 1461 /* set kmem_lp quantum if necessary */
1463 1462 if (segkmem_kmemlp_quantum == 0 || !ISP2(segkmem_kmemlp_quantum) ||
1464 1463 segkmem_kmemlp_quantum > segkmem_heaplp_quantum) {
1465 1464 segkmem_kmemlp_quantum = segkmem_heaplp_quantum;
1466 1465 }
1467 1466
1468 1467 /* set total amount of memory allowed for large page kernel heap */
1469 1468 if (segkmem_kmemlp_max == 0) {
1470 1469 if (segkmem_kmemlp_pcnt == 0 || segkmem_kmemlp_pcnt > 100)
1471 1470 segkmem_kmemlp_pcnt = 12;
1472 1471 segkmem_kmemlp_max = (memtotal * segkmem_kmemlp_pcnt) / 100;
1473 1472 }
1474 1473 segkmem_kmemlp_max = P2ROUNDUP(segkmem_kmemlp_max,
1475 1474 segkmem_heaplp_quantum);
1476 1475
1477 1476 /* fix lp kmem preallocation request if necesssary */
1478 1477 if (segkmem_kmemlp_min) {
1479 1478 segkmem_kmemlp_min = P2ROUNDUP(segkmem_kmemlp_min,
1480 1479 segkmem_heaplp_quantum);
1481 1480 if (segkmem_kmemlp_min > segkmem_kmemlp_max)
1482 1481 segkmem_kmemlp_min = segkmem_kmemlp_max;
1483 1482 }
1484 1483
1485 1484 use_large_pages = 1;
1486 1485 segkmem_lpszc = page_szc(segkmem_lpsize);
1487 1486 segkmem_lpshift = page_get_shift(segkmem_lpszc);
1488 1487
1489 1488 #endif
1490 1489 return (use_large_pages);
1491 1490 }
1492 1491
1493 1492 void
1494 1493 segkmem_zio_init(void *zio_mem_base, size_t zio_mem_size)
1495 1494 {
1496 1495 ASSERT(zio_mem_base != NULL);
1497 1496 ASSERT(zio_mem_size != 0);
1498 1497
1499 1498 /*
1500 1499 * To reduce VA space fragmentation, we set up quantum caches for the
1501 1500 * smaller sizes; we chose 32k because that translates to 128k VA
1502 1501 * slabs, which matches nicely with the common 128k zio_data bufs.
1503 1502 */
1504 1503 zio_arena = vmem_create("zfs_file_data", zio_mem_base, zio_mem_size,
1505 1504 PAGESIZE, NULL, NULL, NULL, 32 * 1024, VM_SLEEP);
1506 1505
1507 1506 zio_alloc_arena = vmem_create("zfs_file_data_buf", NULL, 0, PAGESIZE,
1508 1507 segkmem_zio_alloc, segkmem_zio_free, zio_arena, 0, VM_SLEEP);
1509 1508
1510 1509 ASSERT(zio_arena != NULL);
1511 1510 ASSERT(zio_alloc_arena != NULL);
1512 1511 }
1513 1512
1514 1513 #ifdef __sparc
1515 1514
1516 1515
1517 1516 static void *
1518 1517 segkmem_alloc_ppa(vmem_t *vmp, size_t size, int vmflag)
1519 1518 {
1520 1519 size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
1521 1520 void *addr;
1522 1521
1523 1522 if (ppaquantum <= PAGESIZE)
1524 1523 return (segkmem_alloc(vmp, size, vmflag));
1525 1524
1526 1525 ASSERT((size & (ppaquantum - 1)) == 0);
1527 1526
1528 1527 addr = vmem_xalloc(vmp, size, ppaquantum, 0, 0, NULL, NULL, vmflag);
1529 1528 if (addr != NULL && segkmem_xalloc(vmp, addr, size, vmflag, 0,
1530 1529 segkmem_page_create, NULL) == NULL) {
1531 1530 vmem_xfree(vmp, addr, size);
1532 1531 addr = NULL;
1533 1532 }
1534 1533
1535 1534 return (addr);
1536 1535 }
1537 1536
1538 1537 static void
1539 1538 segkmem_free_ppa(vmem_t *vmp, void *addr, size_t size)
1540 1539 {
1541 1540 size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
1542 1541
1543 1542 ASSERT(addr != NULL);
1544 1543
1545 1544 if (ppaquantum <= PAGESIZE) {
1546 1545 segkmem_free(vmp, addr, size);
1547 1546 } else {
1548 1547 segkmem_free(NULL, addr, size);
1549 1548 vmem_xfree(vmp, addr, size);
1550 1549 }
1551 1550 }
1552 1551
1553 1552 void
1554 1553 segkmem_heap_lp_init()
1555 1554 {
1556 1555 segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1557 1556 size_t heap_lp_size = heap_lp_end - heap_lp_base;
1558 1557 size_t lpsize = segkmem_lpsize;
1559 1558 size_t ppaquantum;
1560 1559 void *addr;
1561 1560
1562 1561 if (segkmem_lpsize <= PAGESIZE) {
1563 1562 ASSERT(heap_lp_base == NULL);
1564 1563 ASSERT(heap_lp_end == NULL);
1565 1564 return;
1566 1565 }
1567 1566
1568 1567 ASSERT(segkmem_heaplp_quantum >= lpsize);
1569 1568 ASSERT((segkmem_heaplp_quantum & (lpsize - 1)) == 0);
1570 1569 ASSERT(lpcb->lp_uselp == 0);
1571 1570 ASSERT(heap_lp_base != NULL);
1572 1571 ASSERT(heap_lp_end != NULL);
1573 1572 ASSERT(heap_lp_base < heap_lp_end);
1574 1573 ASSERT(heap_lp_arena == NULL);
1575 1574 ASSERT(((uintptr_t)heap_lp_base & (lpsize - 1)) == 0);
1576 1575 ASSERT(((uintptr_t)heap_lp_end & (lpsize - 1)) == 0);
1577 1576
1578 1577 /* create large page heap arena */
1579 1578 heap_lp_arena = vmem_create("heap_lp", heap_lp_base, heap_lp_size,
1580 1579 segkmem_heaplp_quantum, NULL, NULL, NULL, 0, VM_SLEEP);
1581 1580
1582 1581 ASSERT(heap_lp_arena != NULL);
1583 1582
1584 1583 /* This arena caches memory already mapped by large pages */
1585 1584 kmem_lp_arena = vmem_create("kmem_lp", NULL, 0, segkmem_kmemlp_quantum,
1586 1585 segkmem_alloc_lpi, segkmem_free_lpi, heap_lp_arena, 0, VM_SLEEP);
1587 1586
1588 1587 ASSERT(kmem_lp_arena != NULL);
1589 1588
1590 1589 mutex_init(&lpcb->lp_lock, NULL, MUTEX_DEFAULT, NULL);
1591 1590 cv_init(&lpcb->lp_cv, NULL, CV_DEFAULT, NULL);
1592 1591
1593 1592 /*
1594 1593 * this arena is used for the array of page_t pointers necessary
1595 1594 * to call hat_mem_load_array
1596 1595 */
1597 1596 ppaquantum = btopr(lpsize) * sizeof (page_t *);
1598 1597 segkmem_ppa_arena = vmem_create("segkmem_ppa", NULL, 0, ppaquantum,
1599 1598 segkmem_alloc_ppa, segkmem_free_ppa, heap_arena, ppaquantum,
1600 1599 VM_SLEEP);
1601 1600
1602 1601 ASSERT(segkmem_ppa_arena != NULL);
1603 1602
1604 1603 /* prealloacate some memory for the lp kernel heap */
1605 1604 if (segkmem_kmemlp_min) {
1606 1605
1607 1606 ASSERT(P2PHASE(segkmem_kmemlp_min,
1608 1607 segkmem_heaplp_quantum) == 0);
1609 1608
1610 1609 if ((addr = segkmem_alloc_lpi(heap_lp_arena,
1611 1610 segkmem_kmemlp_min, VM_SLEEP)) != NULL) {
1612 1611
1613 1612 addr = vmem_add(kmem_lp_arena, addr,
1614 1613 segkmem_kmemlp_min, VM_SLEEP);
1615 1614 ASSERT(addr != NULL);
1616 1615 }
1617 1616 }
1618 1617
1619 1618 lpcb->lp_uselp = 1;
1620 1619 }
1621 1620
1622 1621 #endif
↓ open down ↓ |
834 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX