Print this page
no need for bad-op segment op functions
The segment drivers have a number of bad-op functions that simply panic.
Keeping the function pointer NULL will accomplish the same thing in most
cases. In other cases, keeping the function pointer NULL will result in
proper error code being returned.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_kmem.c
+++ new/usr/src/uts/common/vm/seg_kmem.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/types.h>
26 26 #include <sys/t_lock.h>
27 27 #include <sys/param.h>
28 28 #include <sys/sysmacros.h>
29 29 #include <sys/tuneable.h>
30 30 #include <sys/systm.h>
31 31 #include <sys/vm.h>
32 32 #include <sys/kmem.h>
33 33 #include <sys/vmem.h>
34 34 #include <sys/mman.h>
35 35 #include <sys/cmn_err.h>
36 36 #include <sys/debug.h>
37 37 #include <sys/dumphdr.h>
38 38 #include <sys/bootconf.h>
39 39 #include <sys/lgrp.h>
40 40 #include <vm/seg_kmem.h>
41 41 #include <vm/hat.h>
42 42 #include <vm/page.h>
43 43 #include <vm/vm_dep.h>
44 44 #include <vm/faultcode.h>
45 45 #include <sys/promif.h>
46 46 #include <vm/seg_kp.h>
47 47 #include <sys/bitmap.h>
48 48 #include <sys/mem_cage.h>
49 49
50 50 #ifdef __sparc
51 51 #include <sys/ivintr.h>
52 52 #include <sys/panic.h>
53 53 #endif
54 54
55 55 /*
56 56 * seg_kmem is the primary kernel memory segment driver. It
57 57 * maps the kernel heap [kernelheap, ekernelheap), module text,
58 58 * and all memory which was allocated before the VM was initialized
59 59 * into kas.
60 60 *
61 61 * Pages which belong to seg_kmem are hashed into &kvp vnode at
62 62 * an offset equal to (u_offset_t)virt_addr, and have p_lckcnt >= 1.
63 63 * They must never be paged out since segkmem_fault() is a no-op to
64 64 * prevent recursive faults.
65 65 *
66 66 * Currently, seg_kmem pages are sharelocked (p_sharelock == 1) on
67 67 * __x86 and are unlocked (p_sharelock == 0) on __sparc. Once __x86
68 68 * supports relocation the #ifdef kludges can be removed.
69 69 *
70 70 * seg_kmem pages may be subject to relocation by page_relocate(),
71 71 * provided that the HAT supports it; if this is so, segkmem_reloc
72 72 * will be set to a nonzero value. All boot time allocated memory as
73 73 * well as static memory is considered off limits to relocation.
74 74 * Pages are "relocatable" if p_state does not have P_NORELOC set, so
75 75 * we request P_NORELOC pages for memory that isn't safe to relocate.
76 76 *
77 77 * The kernel heap is logically divided up into four pieces:
78 78 *
79 79 * heap32_arena is for allocations that require 32-bit absolute
80 80 * virtual addresses (e.g. code that uses 32-bit pointers/offsets).
81 81 *
82 82 * heap_core is for allocations that require 2GB *relative*
83 83 * offsets; in other words all memory from heap_core is within
84 84 * 2GB of all other memory from the same arena. This is a requirement
85 85 * of the addressing modes of some processors in supervisor code.
86 86 *
87 87 * heap_arena is the general heap arena.
88 88 *
89 89 * static_arena is the static memory arena. Allocations from it
90 90 * are not subject to relocation so it is safe to use the memory
91 91 * physical address as well as the virtual address (e.g. the VA to
92 92 * PA translations are static). Caches may import from static_arena;
93 93 * all other static memory allocations should use static_alloc_arena.
94 94 *
95 95 * On some platforms which have limited virtual address space, seg_kmem
96 96 * may share [kernelheap, ekernelheap) with seg_kp; if this is so,
97 97 * segkp_bitmap is non-NULL, and each bit represents a page of virtual
98 98 * address space which is actually seg_kp mapped.
99 99 */
100 100
101 101 extern ulong_t *segkp_bitmap; /* Is set if segkp is from the kernel heap */
102 102
103 103 char *kernelheap; /* start of primary kernel heap */
104 104 char *ekernelheap; /* end of primary kernel heap */
105 105 struct seg kvseg; /* primary kernel heap segment */
106 106 struct seg kvseg_core; /* "core" kernel heap segment */
107 107 struct seg kzioseg; /* Segment for zio mappings */
108 108 vmem_t *heap_arena; /* primary kernel heap arena */
109 109 vmem_t *heap_core_arena; /* core kernel heap arena */
110 110 char *heap_core_base; /* start of core kernel heap arena */
111 111 char *heap_lp_base; /* start of kernel large page heap arena */
112 112 char *heap_lp_end; /* end of kernel large page heap arena */
113 113 vmem_t *hat_memload_arena; /* HAT translation data */
114 114 struct seg kvseg32; /* 32-bit kernel heap segment */
115 115 vmem_t *heap32_arena; /* 32-bit kernel heap arena */
116 116 vmem_t *heaptext_arena; /* heaptext arena */
117 117 struct as kas; /* kernel address space */
118 118 int segkmem_reloc; /* enable/disable relocatable segkmem pages */
119 119 vmem_t *static_arena; /* arena for caches to import static memory */
120 120 vmem_t *static_alloc_arena; /* arena for allocating static memory */
121 121 vmem_t *zio_arena = NULL; /* arena for allocating zio memory */
122 122 vmem_t *zio_alloc_arena = NULL; /* arena for allocating zio memory */
123 123
124 124 /*
125 125 * seg_kmem driver can map part of the kernel heap with large pages.
126 126 * Currently this functionality is implemented for sparc platforms only.
127 127 *
128 128 * The large page size "segkmem_lpsize" for kernel heap is selected in the
129 129 * platform specific code. It can also be modified via /etc/system file.
130 130 * Setting segkmem_lpsize to PAGESIZE in /etc/system disables usage of large
131 131 * pages for kernel heap. "segkmem_lpshift" is adjusted appropriately to
132 132 * match segkmem_lpsize.
133 133 *
134 134 * At boot time we carve from kernel heap arena a range of virtual addresses
135 135 * that will be used for large page mappings. This range [heap_lp_base,
136 136 * heap_lp_end) is set up as a separate vmem arena - "heap_lp_arena". We also
137 137 * create "kmem_lp_arena" that caches memory already backed up by large
138 138 * pages. kmem_lp_arena imports virtual segments from heap_lp_arena.
139 139 */
140 140
141 141 size_t segkmem_lpsize;
142 142 static uint_t segkmem_lpshift = PAGESHIFT;
143 143 int segkmem_lpszc = 0;
144 144
145 145 size_t segkmem_kmemlp_quantum = 0x400000; /* 4MB */
146 146 size_t segkmem_heaplp_quantum;
147 147 vmem_t *heap_lp_arena;
148 148 static vmem_t *kmem_lp_arena;
149 149 static vmem_t *segkmem_ppa_arena;
150 150 static segkmem_lpcb_t segkmem_lpcb;
151 151
152 152 /*
153 153 * We use "segkmem_kmemlp_max" to limit the total amount of physical memory
154 154 * consumed by the large page heap. By default this parameter is set to 1/8 of
155 155 * physmem but can be adjusted through /etc/system either directly or
156 156 * indirectly by setting "segkmem_kmemlp_pcnt" to the percent of physmem
157 157 * we allow for large page heap.
158 158 */
159 159 size_t segkmem_kmemlp_max;
160 160 static uint_t segkmem_kmemlp_pcnt;
161 161
162 162 /*
163 163 * Getting large pages for kernel heap could be problematic due to
164 164 * physical memory fragmentation. That's why we allow to preallocate
165 165 * "segkmem_kmemlp_min" bytes at boot time.
166 166 */
167 167 static size_t segkmem_kmemlp_min;
168 168
169 169 /*
170 170 * Throttling is used to avoid expensive tries to allocate large pages
171 171 * for kernel heap when a lot of succesive attempts to do so fail.
172 172 */
173 173 static ulong_t segkmem_lpthrottle_max = 0x400000;
174 174 static ulong_t segkmem_lpthrottle_start = 0x40;
175 175 static ulong_t segkmem_use_lpthrottle = 1;
176 176
177 177 /*
178 178 * Freed pages accumulate on a garbage list until segkmem is ready,
179 179 * at which point we call segkmem_gc() to free it all.
180 180 */
181 181 typedef struct segkmem_gc_list {
182 182 struct segkmem_gc_list *gc_next;
183 183 vmem_t *gc_arena;
184 184 size_t gc_size;
185 185 } segkmem_gc_list_t;
186 186
187 187 static segkmem_gc_list_t *segkmem_gc_list;
188 188
189 189 /*
190 190 * Allocations from the hat_memload arena add VM_MEMLOAD to their
191 191 * vmflags so that segkmem_xalloc() can inform the hat layer that it needs
192 192 * to take steps to prevent infinite recursion. HAT allocations also
193 193 * must be non-relocatable to prevent recursive page faults.
194 194 */
195 195 static void *
196 196 hat_memload_alloc(vmem_t *vmp, size_t size, int flags)
197 197 {
198 198 flags |= (VM_MEMLOAD | VM_NORELOC);
199 199 return (segkmem_alloc(vmp, size, flags));
200 200 }
201 201
202 202 /*
203 203 * Allocations from static_arena arena (or any other arena that uses
204 204 * segkmem_alloc_permanent()) require non-relocatable (permanently
205 205 * wired) memory pages, since these pages are referenced by physical
206 206 * as well as virtual address.
207 207 */
208 208 void *
209 209 segkmem_alloc_permanent(vmem_t *vmp, size_t size, int flags)
210 210 {
211 211 return (segkmem_alloc(vmp, size, flags | VM_NORELOC));
212 212 }
213 213
214 214 /*
215 215 * Initialize kernel heap boundaries.
216 216 */
217 217 void
218 218 kernelheap_init(
219 219 void *heap_start,
220 220 void *heap_end,
221 221 char *first_avail,
222 222 void *core_start,
223 223 void *core_end)
224 224 {
225 225 uintptr_t textbase;
226 226 size_t core_size;
227 227 size_t heap_size;
228 228 vmem_t *heaptext_parent;
229 229 size_t heap_lp_size = 0;
230 230 #ifdef __sparc
231 231 size_t kmem64_sz = kmem64_aligned_end - kmem64_base;
232 232 #endif /* __sparc */
233 233
234 234 kernelheap = heap_start;
235 235 ekernelheap = heap_end;
236 236
237 237 #ifdef __sparc
238 238 heap_lp_size = (((uintptr_t)heap_end - (uintptr_t)heap_start) / 4);
239 239 /*
240 240 * Bias heap_lp start address by kmem64_sz to reduce collisions
241 241 * in 4M kernel TSB between kmem64 area and heap_lp
242 242 */
243 243 kmem64_sz = P2ROUNDUP(kmem64_sz, MMU_PAGESIZE256M);
244 244 if (kmem64_sz <= heap_lp_size / 2)
245 245 heap_lp_size -= kmem64_sz;
246 246 heap_lp_base = ekernelheap - heap_lp_size;
247 247 heap_lp_end = heap_lp_base + heap_lp_size;
248 248 #endif /* __sparc */
249 249
250 250 /*
251 251 * If this platform has a 'core' heap area, then the space for
252 252 * overflow module text should be carved out of the end of that
253 253 * heap. Otherwise, it gets carved out of the general purpose
254 254 * heap.
255 255 */
256 256 core_size = (uintptr_t)core_end - (uintptr_t)core_start;
257 257 if (core_size > 0) {
258 258 ASSERT(core_size >= HEAPTEXT_SIZE);
259 259 textbase = (uintptr_t)core_end - HEAPTEXT_SIZE;
260 260 core_size -= HEAPTEXT_SIZE;
261 261 }
262 262 #ifndef __sparc
263 263 else {
264 264 ekernelheap -= HEAPTEXT_SIZE;
265 265 textbase = (uintptr_t)ekernelheap;
266 266 }
267 267 #endif
268 268
269 269 heap_size = (uintptr_t)ekernelheap - (uintptr_t)kernelheap;
270 270 heap_arena = vmem_init("heap", kernelheap, heap_size, PAGESIZE,
271 271 segkmem_alloc, segkmem_free);
272 272
273 273 if (core_size > 0) {
274 274 heap_core_arena = vmem_create("heap_core", core_start,
275 275 core_size, PAGESIZE, NULL, NULL, NULL, 0, VM_SLEEP);
276 276 heap_core_base = core_start;
277 277 } else {
278 278 heap_core_arena = heap_arena;
279 279 heap_core_base = kernelheap;
280 280 }
281 281
282 282 /*
283 283 * reserve space for the large page heap. If large pages for kernel
284 284 * heap is enabled large page heap arean will be created later in the
285 285 * boot sequence in segkmem_heap_lp_init(). Otherwise the allocated
286 286 * range will be returned back to the heap_arena.
287 287 */
288 288 if (heap_lp_size) {
289 289 (void) vmem_xalloc(heap_arena, heap_lp_size, PAGESIZE, 0, 0,
290 290 heap_lp_base, heap_lp_end,
291 291 VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
292 292 }
293 293
294 294 /*
295 295 * Remove the already-spoken-for memory range [kernelheap, first_avail).
296 296 */
297 297 (void) vmem_xalloc(heap_arena, first_avail - kernelheap, PAGESIZE,
298 298 0, 0, kernelheap, first_avail, VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
299 299
300 300 #ifdef __sparc
301 301 heap32_arena = vmem_create("heap32", (void *)SYSBASE32,
302 302 SYSLIMIT32 - SYSBASE32 - HEAPTEXT_SIZE, PAGESIZE, NULL,
303 303 NULL, NULL, 0, VM_SLEEP);
304 304 /*
305 305 * Prom claims the physical and virtual resources used by panicbuf
306 306 * and inter_vec_table. So reserve space for panicbuf, intr_vec_table,
307 307 * reserved interrupt vector data structures from 32-bit heap.
308 308 */
309 309 (void) vmem_xalloc(heap32_arena, PANICBUFSIZE, PAGESIZE, 0, 0,
310 310 panicbuf, panicbuf + PANICBUFSIZE,
311 311 VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
312 312
313 313 (void) vmem_xalloc(heap32_arena, IVSIZE, PAGESIZE, 0, 0,
314 314 intr_vec_table, (caddr_t)intr_vec_table + IVSIZE,
315 315 VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
316 316
317 317 textbase = SYSLIMIT32 - HEAPTEXT_SIZE;
318 318 heaptext_parent = NULL;
319 319 #else /* __sparc */
320 320 heap32_arena = heap_core_arena;
321 321 heaptext_parent = heap_core_arena;
322 322 #endif /* __sparc */
323 323
324 324 heaptext_arena = vmem_create("heaptext", (void *)textbase,
325 325 HEAPTEXT_SIZE, PAGESIZE, NULL, NULL, heaptext_parent, 0, VM_SLEEP);
326 326
327 327 /*
328 328 * Create a set of arenas for memory with static translations
329 329 * (e.g. VA -> PA translations cannot change). Since using
330 330 * kernel pages by physical address implies it isn't safe to
331 331 * walk across page boundaries, the static_arena quantum must
332 332 * be PAGESIZE. Any kmem caches that require static memory
333 333 * should source from static_arena, while direct allocations
334 334 * should only use static_alloc_arena.
335 335 */
336 336 static_arena = vmem_create("static", NULL, 0, PAGESIZE,
337 337 segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP);
338 338 static_alloc_arena = vmem_create("static_alloc", NULL, 0,
339 339 sizeof (uint64_t), vmem_alloc, vmem_free, static_arena,
340 340 0, VM_SLEEP);
341 341
342 342 /*
343 343 * Create an arena for translation data (ptes, hmes, or hblks).
344 344 * We need an arena for this because hat_memload() is essential
345 345 * to vmem_populate() (see comments in common/os/vmem.c).
346 346 *
347 347 * Note: any kmem cache that allocates from hat_memload_arena
348 348 * must be created as a KMC_NOHASH cache (i.e. no external slab
349 349 * and bufctl structures to allocate) so that slab creation doesn't
350 350 * require anything more than a single vmem_alloc().
351 351 */
352 352 hat_memload_arena = vmem_create("hat_memload", NULL, 0, PAGESIZE,
353 353 hat_memload_alloc, segkmem_free, heap_arena, 0,
354 354 VM_SLEEP | VMC_POPULATOR | VMC_DUMPSAFE);
355 355 }
356 356
357 357 void
358 358 boot_mapin(caddr_t addr, size_t size)
359 359 {
360 360 caddr_t eaddr;
361 361 page_t *pp;
362 362 pfn_t pfnum;
363 363
364 364 if (page_resv(btop(size), KM_NOSLEEP) == 0)
365 365 panic("boot_mapin: page_resv failed");
366 366
367 367 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
368 368 pfnum = va_to_pfn(addr);
369 369 if (pfnum == PFN_INVALID)
370 370 continue;
371 371 if ((pp = page_numtopp_nolock(pfnum)) == NULL)
372 372 panic("boot_mapin(): No pp for pfnum = %lx", pfnum);
373 373
374 374 /*
375 375 * must break up any large pages that may have constituent
376 376 * pages being utilized for BOP_ALLOC()'s before calling
377 377 * page_numtopp().The locking code (ie. page_reclaim())
378 378 * can't handle them
379 379 */
380 380 if (pp->p_szc != 0)
381 381 page_boot_demote(pp);
382 382
383 383 pp = page_numtopp(pfnum, SE_EXCL);
384 384 if (pp == NULL || PP_ISFREE(pp))
385 385 panic("boot_alloc: pp is NULL or free");
386 386
387 387 /*
388 388 * If the cage is on but doesn't yet contain this page,
389 389 * mark it as non-relocatable.
390 390 */
391 391 if (kcage_on && !PP_ISNORELOC(pp)) {
392 392 PP_SETNORELOC(pp);
393 393 PLCNT_XFER_NORELOC(pp);
394 394 }
395 395
396 396 (void) page_hashin(pp, &kvp, (u_offset_t)(uintptr_t)addr, NULL);
397 397 pp->p_lckcnt = 1;
398 398 #if defined(__x86)
399 399 page_downgrade(pp);
400 400 #else
401 401 page_unlock(pp);
402 402 #endif
403 403 }
404 404 }
405 405
406 406 /*
407 407 * Get pages from boot and hash them into the kernel's vp.
408 408 * Used after page structs have been allocated, but before segkmem is ready.
409 409 */
410 410 void *
411 411 boot_alloc(void *inaddr, size_t size, uint_t align)
412 412 {
413 413 caddr_t addr = inaddr;
414 414
415 415 if (bootops == NULL)
416 416 prom_panic("boot_alloc: attempt to allocate memory after "
417 417 "BOP_GONE");
418 418
419 419 size = ptob(btopr(size));
420 420 #ifdef __sparc
↓ open down ↓ |
420 lines elided |
↑ open up ↑ |
421 421 if (bop_alloc_chunk(addr, size, align) != (caddr_t)addr)
422 422 panic("boot_alloc: bop_alloc_chunk failed");
423 423 #else
424 424 if (BOP_ALLOC(bootops, addr, size, align) != addr)
425 425 panic("boot_alloc: BOP_ALLOC failed");
426 426 #endif
427 427 boot_mapin((caddr_t)addr, size);
428 428 return (addr);
429 429 }
430 430
431 -static void
432 -segkmem_badop()
433 -{
434 - panic("segkmem_badop");
435 -}
436 -
437 -#define SEGKMEM_BADOP(t) (t(*)())segkmem_badop
438 -
439 431 /*ARGSUSED*/
440 432 static faultcode_t
441 433 segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
442 434 enum fault_type type, enum seg_rw rw)
443 435 {
444 436 pgcnt_t npages;
445 437 spgcnt_t pg;
446 438 page_t *pp;
447 439 struct vnode *vp = seg->s_data;
448 440
449 441 ASSERT(RW_READ_HELD(&seg->s_as->a_lock));
450 442
451 443 if (seg->s_as != &kas || size > seg->s_size ||
452 444 addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
453 445 panic("segkmem_fault: bad args");
454 446
455 447 /*
456 448 * If it is one of segkp pages, call segkp_fault.
457 449 */
458 450 if (segkp_bitmap && seg == &kvseg &&
459 451 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
460 452 return (segop_fault(hat, segkp, addr, size, type, rw));
461 453
462 454 if (rw != S_READ && rw != S_WRITE && rw != S_OTHER)
463 455 return (FC_NOSUPPORT);
464 456
465 457 npages = btopr(size);
466 458
467 459 switch (type) {
468 460 case F_SOFTLOCK: /* lock down already-loaded translations */
469 461 for (pg = 0; pg < npages; pg++) {
470 462 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
471 463 SE_SHARED);
472 464 if (pp == NULL) {
473 465 /*
474 466 * Hmm, no page. Does a kernel mapping
475 467 * exist for it?
476 468 */
477 469 if (!hat_probe(kas.a_hat, addr)) {
478 470 addr -= PAGESIZE;
479 471 while (--pg >= 0) {
480 472 pp = page_find(vp, (u_offset_t)
481 473 (uintptr_t)addr);
482 474 if (pp)
483 475 page_unlock(pp);
484 476 addr -= PAGESIZE;
485 477 }
486 478 return (FC_NOMAP);
487 479 }
488 480 }
489 481 addr += PAGESIZE;
490 482 }
491 483 if (rw == S_OTHER)
492 484 hat_reserve(seg->s_as, addr, size);
493 485 return (0);
494 486 case F_SOFTUNLOCK:
495 487 while (npages--) {
496 488 pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
497 489 if (pp)
498 490 page_unlock(pp);
499 491 addr += PAGESIZE;
500 492 }
501 493 return (0);
502 494 default:
503 495 return (FC_NOSUPPORT);
504 496 }
505 497 /*NOTREACHED*/
506 498 }
507 499
508 500 static int
509 501 segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
510 502 {
511 503 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
512 504
513 505 if (seg->s_as != &kas || size > seg->s_size ||
514 506 addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
515 507 panic("segkmem_setprot: bad args");
516 508
517 509 /*
518 510 * If it is one of segkp pages, call segkp.
519 511 */
520 512 if (segkp_bitmap && seg == &kvseg &&
521 513 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
522 514 return (segop_setprot(segkp, addr, size, prot));
523 515
524 516 if (prot == 0)
525 517 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD);
526 518 else
527 519 hat_chgprot(kas.a_hat, addr, size, prot);
528 520 return (0);
529 521 }
530 522
531 523 /*
↓ open down ↓ |
83 lines elided |
↑ open up ↑ |
532 524 * This is a dummy segkmem function overloaded to call segkp
533 525 * when segkp is under the heap.
534 526 */
535 527 /* ARGSUSED */
536 528 static int
537 529 segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
538 530 {
539 531 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
540 532
541 533 if (seg->s_as != &kas)
542 - segkmem_badop();
534 + panic("segkmem badop");
543 535
544 536 /*
545 537 * If it is one of segkp pages, call into segkp.
546 538 */
547 539 if (segkp_bitmap && seg == &kvseg &&
548 540 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
549 541 return (segop_checkprot(segkp, addr, size, prot));
550 542
551 - segkmem_badop();
543 + panic("segkmem badop");
552 544 return (0);
553 545 }
554 546
555 547 /*
556 548 * This is a dummy segkmem function overloaded to call segkp
557 549 * when segkp is under the heap.
558 550 */
559 551 /* ARGSUSED */
560 552 static int
561 553 segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
562 554 {
563 555 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
564 556
565 557 if (seg->s_as != &kas)
566 - segkmem_badop();
558 + panic("segkmem badop");
567 559
568 560 /*
569 561 * If it is one of segkp pages, call into segkp.
570 562 */
571 563 if (segkp_bitmap && seg == &kvseg &&
572 564 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
573 565 return (segop_kluster(segkp, addr, delta));
574 566
575 - segkmem_badop();
567 + panic("segkmem badop");
576 568 return (0);
577 569 }
578 570
579 571 static void
580 572 segkmem_xdump_range(void *arg, void *start, size_t size)
581 573 {
582 574 struct as *as = arg;
583 575 caddr_t addr = start;
584 576 caddr_t addr_end = addr + size;
585 577
586 578 while (addr < addr_end) {
587 579 pfn_t pfn = hat_getpfnum(kas.a_hat, addr);
588 580 if (pfn != PFN_INVALID && pfn <= physmax && pf_is_memory(pfn))
589 581 dump_addpage(as, addr, pfn);
590 582 addr += PAGESIZE;
591 583 dump_timeleft = dump_timeout;
592 584 }
593 585 }
594 586
595 587 static void
596 588 segkmem_dump_range(void *arg, void *start, size_t size)
597 589 {
598 590 caddr_t addr = start;
599 591 caddr_t addr_end = addr + size;
600 592
601 593 /*
602 594 * If we are about to start dumping the range of addresses we
603 595 * carved out of the kernel heap for the large page heap walk
604 596 * heap_lp_arena to find what segments are actually populated
605 597 */
606 598 if (SEGKMEM_USE_LARGEPAGES &&
607 599 addr == heap_lp_base && addr_end == heap_lp_end &&
608 600 vmem_size(heap_lp_arena, VMEM_ALLOC) < size) {
609 601 vmem_walk(heap_lp_arena, VMEM_ALLOC | VMEM_REENTRANT,
610 602 segkmem_xdump_range, arg);
611 603 } else {
612 604 segkmem_xdump_range(arg, start, size);
613 605 }
614 606 }
615 607
616 608 static void
617 609 segkmem_dump(struct seg *seg)
618 610 {
619 611 /*
620 612 * The kernel's heap_arena (represented by kvseg) is a very large
621 613 * VA space, most of which is typically unused. To speed up dumping
622 614 * we use vmem_walk() to quickly find the pieces of heap_arena that
623 615 * are actually in use. We do the same for heap32_arena and
624 616 * heap_core.
625 617 *
626 618 * We specify VMEM_REENTRANT to vmem_walk() because dump_addpage()
627 619 * may ultimately need to allocate memory. Reentrant walks are
628 620 * necessarily imperfect snapshots. The kernel heap continues
629 621 * to change during a live crash dump, for example. For a normal
630 622 * crash dump, however, we know that there won't be any other threads
631 623 * messing with the heap. Therefore, at worst, we may fail to dump
632 624 * the pages that get allocated by the act of dumping; but we will
633 625 * always dump every page that was allocated when the walk began.
634 626 *
635 627 * The other segkmem segments are dense (fully populated), so there's
636 628 * no need to use this technique when dumping them.
637 629 *
638 630 * Note: when adding special dump handling for any new sparsely-
639 631 * populated segments, be sure to add similar handling to the ::kgrep
640 632 * code in mdb.
641 633 */
642 634 if (seg == &kvseg) {
643 635 vmem_walk(heap_arena, VMEM_ALLOC | VMEM_REENTRANT,
644 636 segkmem_dump_range, seg->s_as);
645 637 #ifndef __sparc
646 638 vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
647 639 segkmem_dump_range, seg->s_as);
648 640 #endif
649 641 } else if (seg == &kvseg_core) {
650 642 vmem_walk(heap_core_arena, VMEM_ALLOC | VMEM_REENTRANT,
651 643 segkmem_dump_range, seg->s_as);
652 644 } else if (seg == &kvseg32) {
653 645 vmem_walk(heap32_arena, VMEM_ALLOC | VMEM_REENTRANT,
654 646 segkmem_dump_range, seg->s_as);
655 647 vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
656 648 segkmem_dump_range, seg->s_as);
657 649 } else if (seg == &kzioseg) {
658 650 /*
659 651 * We don't want to dump pages attached to kzioseg since they
660 652 * contain file data from ZFS. If this page's segment is
661 653 * kzioseg return instead of writing it to the dump device.
662 654 */
663 655 return;
664 656 } else {
665 657 segkmem_dump_range(seg->s_as, seg->s_base, seg->s_size);
666 658 }
667 659 }
668 660
669 661 /*
670 662 * lock/unlock kmem pages over a given range [addr, addr+len).
671 663 * Returns a shadow list of pages in ppp. If there are holes
672 664 * in the range (e.g. some of the kernel mappings do not have
673 665 * underlying page_ts) returns ENOTSUP so that as_pagelock()
674 666 * will handle the range via as_fault(F_SOFTLOCK).
675 667 */
676 668 /*ARGSUSED*/
677 669 static int
678 670 segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len,
679 671 page_t ***ppp, enum lock_type type, enum seg_rw rw)
680 672 {
681 673 page_t **pplist, *pp;
682 674 pgcnt_t npages;
683 675 spgcnt_t pg;
684 676 size_t nb;
685 677 struct vnode *vp = seg->s_data;
686 678
687 679 ASSERT(ppp != NULL);
688 680
689 681 /*
690 682 * If it is one of segkp pages, call into segkp.
691 683 */
692 684 if (segkp_bitmap && seg == &kvseg &&
693 685 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
694 686 return (segop_pagelock(segkp, addr, len, ppp, type, rw));
695 687
696 688 npages = btopr(len);
697 689 nb = sizeof (page_t *) * npages;
698 690
699 691 if (type == L_PAGEUNLOCK) {
700 692 pplist = *ppp;
701 693 ASSERT(pplist != NULL);
702 694
703 695 for (pg = 0; pg < npages; pg++) {
704 696 pp = pplist[pg];
705 697 page_unlock(pp);
706 698 }
707 699 kmem_free(pplist, nb);
708 700 return (0);
709 701 }
710 702
711 703 ASSERT(type == L_PAGELOCK);
712 704
713 705 pplist = kmem_alloc(nb, KM_NOSLEEP);
714 706 if (pplist == NULL) {
715 707 *ppp = NULL;
716 708 return (ENOTSUP); /* take the slow path */
717 709 }
718 710
719 711 for (pg = 0; pg < npages; pg++) {
720 712 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_SHARED);
721 713 if (pp == NULL) {
722 714 while (--pg >= 0)
723 715 page_unlock(pplist[pg]);
724 716 kmem_free(pplist, nb);
725 717 *ppp = NULL;
726 718 return (ENOTSUP);
727 719 }
728 720 pplist[pg] = pp;
729 721 addr += PAGESIZE;
730 722 }
731 723
732 724 *ppp = pplist;
733 725 return (0);
734 726 }
735 727
736 728 /*
↓ open down ↓ |
151 lines elided |
↑ open up ↑ |
737 729 * This is a dummy segkmem function overloaded to call segkp
738 730 * when segkp is under the heap.
739 731 */
740 732 /* ARGSUSED */
741 733 static int
742 734 segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
743 735 {
744 736 ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
745 737
746 738 if (seg->s_as != &kas)
747 - segkmem_badop();
739 + panic("segkmem badop");
748 740
749 741 /*
750 742 * If it is one of segkp pages, call into segkp.
751 743 */
752 744 if (segkp_bitmap && seg == &kvseg &&
753 745 BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
754 746 return (segop_getmemid(segkp, addr, memidp));
755 747
756 - segkmem_badop();
748 + panic("segkmem badop");
757 749 return (0);
758 750 }
759 751
760 752 /*ARGSUSED*/
761 753 static lgrp_mem_policy_info_t *
762 754 segkmem_getpolicy(struct seg *seg, caddr_t addr)
763 755 {
764 756 return (NULL);
765 757 }
766 758
767 759 /*ARGSUSED*/
768 760 static int
769 761 segkmem_capable(struct seg *seg, segcapability_t capability)
770 762 {
771 763 if (capability == S_CAPABILITY_NOMINFLT)
772 764 return (1);
773 765 return (0);
774 766 }
775 767
776 768 static struct seg_ops segkmem_ops = {
777 - .dup = SEGKMEM_BADOP(int),
778 - .unmap = SEGKMEM_BADOP(int),
779 - .free = SEGKMEM_BADOP(void),
780 769 .fault = segkmem_fault,
781 - .faulta = SEGKMEM_BADOP(faultcode_t),
782 770 .setprot = segkmem_setprot,
783 771 .checkprot = segkmem_checkprot,
784 772 .kluster = segkmem_kluster,
785 - .sync = SEGKMEM_BADOP(int),
786 - .incore = SEGKMEM_BADOP(size_t),
787 - .lockop = SEGKMEM_BADOP(int),
788 - .getprot = SEGKMEM_BADOP(int),
789 - .getoffset = SEGKMEM_BADOP(u_offset_t),
790 - .gettype = SEGKMEM_BADOP(int),
791 - .getvp = SEGKMEM_BADOP(int),
792 - .advise = SEGKMEM_BADOP(int),
793 773 .dump = segkmem_dump,
794 774 .pagelock = segkmem_pagelock,
795 - .setpagesize = SEGKMEM_BADOP(int),
796 775 .getmemid = segkmem_getmemid,
797 776 .getpolicy = segkmem_getpolicy,
798 777 .capable = segkmem_capable,
799 778 .inherit = seg_inherit_notsup,
800 779 };
801 780
802 781 int
803 782 segkmem_zio_create(struct seg *seg)
804 783 {
805 784 ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
806 785 seg->s_ops = &segkmem_ops;
807 786 seg->s_data = &zvp;
808 787 kas.a_size += seg->s_size;
809 788 return (0);
810 789 }
811 790
812 791 int
813 792 segkmem_create(struct seg *seg)
814 793 {
815 794 ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
816 795 seg->s_ops = &segkmem_ops;
817 796 seg->s_data = &kvp;
818 797 kas.a_size += seg->s_size;
819 798 return (0);
820 799 }
821 800
822 801 /*ARGSUSED*/
823 802 page_t *
824 803 segkmem_page_create(void *addr, size_t size, int vmflag, void *arg)
825 804 {
826 805 struct seg kseg;
827 806 int pgflags;
828 807 struct vnode *vp = arg;
829 808
830 809 if (vp == NULL)
831 810 vp = &kvp;
832 811
833 812 kseg.s_as = &kas;
834 813 pgflags = PG_EXCL;
835 814
836 815 if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
837 816 pgflags |= PG_NORELOC;
838 817 if ((vmflag & VM_NOSLEEP) == 0)
839 818 pgflags |= PG_WAIT;
840 819 if (vmflag & VM_PANIC)
841 820 pgflags |= PG_PANIC;
842 821 if (vmflag & VM_PUSHPAGE)
843 822 pgflags |= PG_PUSHPAGE;
844 823 if (vmflag & VM_NORMALPRI) {
845 824 ASSERT(vmflag & VM_NOSLEEP);
846 825 pgflags |= PG_NORMALPRI;
847 826 }
848 827
849 828 return (page_create_va(vp, (u_offset_t)(uintptr_t)addr, size,
850 829 pgflags, &kseg, addr));
851 830 }
852 831
853 832 /*
854 833 * Allocate pages to back the virtual address range [addr, addr + size).
855 834 * If addr is NULL, allocate the virtual address space as well.
856 835 */
857 836 void *
858 837 segkmem_xalloc(vmem_t *vmp, void *inaddr, size_t size, int vmflag, uint_t attr,
859 838 page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg)
860 839 {
861 840 page_t *ppl;
862 841 caddr_t addr = inaddr;
863 842 pgcnt_t npages = btopr(size);
864 843 int allocflag;
865 844
866 845 if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
867 846 return (NULL);
868 847
869 848 ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
870 849
871 850 if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
872 851 if (inaddr == NULL)
873 852 vmem_free(vmp, addr, size);
874 853 return (NULL);
875 854 }
876 855
877 856 ppl = page_create_func(addr, size, vmflag, pcarg);
878 857 if (ppl == NULL) {
879 858 if (inaddr == NULL)
880 859 vmem_free(vmp, addr, size);
881 860 page_unresv(npages);
882 861 return (NULL);
883 862 }
884 863
885 864 /*
886 865 * Under certain conditions, we need to let the HAT layer know
887 866 * that it cannot safely allocate memory. Allocations from
888 867 * the hat_memload vmem arena always need this, to prevent
889 868 * infinite recursion.
890 869 *
891 870 * In addition, the x86 hat cannot safely do memory
892 871 * allocations while in vmem_populate(), because there
893 872 * is no simple bound on its usage.
894 873 */
895 874 if (vmflag & VM_MEMLOAD)
896 875 allocflag = HAT_NO_KALLOC;
897 876 #if defined(__x86)
898 877 else if (vmem_is_populator())
899 878 allocflag = HAT_NO_KALLOC;
900 879 #endif
901 880 else
902 881 allocflag = 0;
903 882
904 883 while (ppl != NULL) {
905 884 page_t *pp = ppl;
906 885 page_sub(&ppl, pp);
907 886 ASSERT(page_iolock_assert(pp));
908 887 ASSERT(PAGE_EXCL(pp));
909 888 page_io_unlock(pp);
910 889 hat_memload(kas.a_hat, (caddr_t)(uintptr_t)pp->p_offset, pp,
911 890 (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
912 891 HAT_LOAD_LOCK | allocflag);
913 892 pp->p_lckcnt = 1;
914 893 #if defined(__x86)
915 894 page_downgrade(pp);
916 895 #else
917 896 if (vmflag & SEGKMEM_SHARELOCKED)
918 897 page_downgrade(pp);
919 898 else
920 899 page_unlock(pp);
921 900 #endif
922 901 }
923 902
924 903 return (addr);
925 904 }
926 905
927 906 static void *
928 907 segkmem_alloc_vn(vmem_t *vmp, size_t size, int vmflag, struct vnode *vp)
929 908 {
930 909 void *addr;
931 910 segkmem_gc_list_t *gcp, **prev_gcpp;
932 911
933 912 ASSERT(vp != NULL);
934 913
935 914 if (kvseg.s_base == NULL) {
936 915 #ifndef __sparc
937 916 if (bootops->bsys_alloc == NULL)
938 917 halt("Memory allocation between bop_alloc() and "
939 918 "kmem_alloc().\n");
940 919 #endif
941 920
942 921 /*
943 922 * There's not a lot of memory to go around during boot,
944 923 * so recycle it if we can.
945 924 */
946 925 for (prev_gcpp = &segkmem_gc_list; (gcp = *prev_gcpp) != NULL;
947 926 prev_gcpp = &gcp->gc_next) {
948 927 if (gcp->gc_arena == vmp && gcp->gc_size == size) {
949 928 *prev_gcpp = gcp->gc_next;
950 929 return (gcp);
951 930 }
952 931 }
953 932
954 933 addr = vmem_alloc(vmp, size, vmflag | VM_PANIC);
955 934 if (boot_alloc(addr, size, BO_NO_ALIGN) != addr)
956 935 panic("segkmem_alloc: boot_alloc failed");
957 936 return (addr);
958 937 }
959 938 return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
960 939 segkmem_page_create, vp));
961 940 }
962 941
963 942 void *
964 943 segkmem_alloc(vmem_t *vmp, size_t size, int vmflag)
965 944 {
966 945 return (segkmem_alloc_vn(vmp, size, vmflag, &kvp));
967 946 }
968 947
969 948 void *
970 949 segkmem_zio_alloc(vmem_t *vmp, size_t size, int vmflag)
971 950 {
972 951 return (segkmem_alloc_vn(vmp, size, vmflag, &zvp));
973 952 }
974 953
975 954 /*
976 955 * Any changes to this routine must also be carried over to
977 956 * devmap_free_pages() in the seg_dev driver. This is because
978 957 * we currently don't have a special kernel segment for non-paged
979 958 * kernel memory that is exported by drivers to user space.
980 959 */
981 960 static void
982 961 segkmem_free_vn(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp,
983 962 void (*func)(page_t *))
984 963 {
985 964 page_t *pp;
986 965 caddr_t addr = inaddr;
987 966 caddr_t eaddr;
988 967 pgcnt_t npages = btopr(size);
989 968
990 969 ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
991 970 ASSERT(vp != NULL);
992 971
993 972 if (kvseg.s_base == NULL) {
994 973 segkmem_gc_list_t *gc = inaddr;
995 974 gc->gc_arena = vmp;
996 975 gc->gc_size = size;
997 976 gc->gc_next = segkmem_gc_list;
998 977 segkmem_gc_list = gc;
999 978 return;
1000 979 }
1001 980
1002 981 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1003 982
1004 983 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
1005 984 #if defined(__x86)
1006 985 pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
1007 986 if (pp == NULL)
1008 987 panic("segkmem_free: page not found");
1009 988 if (!page_tryupgrade(pp)) {
1010 989 /*
1011 990 * Some other thread has a sharelock. Wait for
1012 991 * it to drop the lock so we can free this page.
1013 992 */
1014 993 page_unlock(pp);
1015 994 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
1016 995 SE_EXCL);
1017 996 }
1018 997 #else
1019 998 pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
1020 999 #endif
1021 1000 if (pp == NULL)
1022 1001 panic("segkmem_free: page not found");
1023 1002 /* Clear p_lckcnt so page_destroy() doesn't update availrmem */
1024 1003 pp->p_lckcnt = 0;
1025 1004 if (func)
1026 1005 func(pp);
1027 1006 else
1028 1007 page_destroy(pp, 0);
1029 1008 }
1030 1009 if (func == NULL)
1031 1010 page_unresv(npages);
1032 1011
1033 1012 if (vmp != NULL)
1034 1013 vmem_free(vmp, inaddr, size);
1035 1014
1036 1015 }
1037 1016
1038 1017 void
1039 1018 segkmem_xfree(vmem_t *vmp, void *inaddr, size_t size, void (*func)(page_t *))
1040 1019 {
1041 1020 segkmem_free_vn(vmp, inaddr, size, &kvp, func);
1042 1021 }
1043 1022
1044 1023 void
1045 1024 segkmem_free(vmem_t *vmp, void *inaddr, size_t size)
1046 1025 {
1047 1026 segkmem_free_vn(vmp, inaddr, size, &kvp, NULL);
1048 1027 }
1049 1028
1050 1029 void
1051 1030 segkmem_zio_free(vmem_t *vmp, void *inaddr, size_t size)
1052 1031 {
1053 1032 segkmem_free_vn(vmp, inaddr, size, &zvp, NULL);
1054 1033 }
1055 1034
1056 1035 void
1057 1036 segkmem_gc(void)
1058 1037 {
1059 1038 ASSERT(kvseg.s_base != NULL);
1060 1039 while (segkmem_gc_list != NULL) {
1061 1040 segkmem_gc_list_t *gc = segkmem_gc_list;
1062 1041 segkmem_gc_list = gc->gc_next;
1063 1042 segkmem_free(gc->gc_arena, gc, gc->gc_size);
1064 1043 }
1065 1044 }
1066 1045
1067 1046 /*
1068 1047 * Legacy entry points from here to end of file.
1069 1048 */
1070 1049 void
1071 1050 segkmem_mapin(struct seg *seg, void *addr, size_t size, uint_t vprot,
1072 1051 pfn_t pfn, uint_t flags)
1073 1052 {
1074 1053 hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1075 1054 hat_devload(seg->s_as->a_hat, addr, size, pfn, vprot,
1076 1055 flags | HAT_LOAD_LOCK);
1077 1056 }
1078 1057
1079 1058 void
1080 1059 segkmem_mapout(struct seg *seg, void *addr, size_t size)
1081 1060 {
1082 1061 hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1083 1062 }
1084 1063
1085 1064 void *
1086 1065 kmem_getpages(pgcnt_t npages, int kmflag)
1087 1066 {
1088 1067 return (kmem_alloc(ptob(npages), kmflag));
1089 1068 }
1090 1069
1091 1070 void
1092 1071 kmem_freepages(void *addr, pgcnt_t npages)
1093 1072 {
1094 1073 kmem_free(addr, ptob(npages));
1095 1074 }
1096 1075
1097 1076 /*
1098 1077 * segkmem_page_create_large() allocates a large page to be used for the kmem
1099 1078 * caches. If kpr is enabled we ask for a relocatable page unless requested
1100 1079 * otherwise. If kpr is disabled we have to ask for a non-reloc page
1101 1080 */
1102 1081 static page_t *
1103 1082 segkmem_page_create_large(void *addr, size_t size, int vmflag, void *arg)
1104 1083 {
1105 1084 int pgflags;
1106 1085
1107 1086 pgflags = PG_EXCL;
1108 1087
1109 1088 if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
1110 1089 pgflags |= PG_NORELOC;
1111 1090 if (!(vmflag & VM_NOSLEEP))
1112 1091 pgflags |= PG_WAIT;
1113 1092 if (vmflag & VM_PUSHPAGE)
1114 1093 pgflags |= PG_PUSHPAGE;
1115 1094 if (vmflag & VM_NORMALPRI)
1116 1095 pgflags |= PG_NORMALPRI;
1117 1096
1118 1097 return (page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size,
1119 1098 pgflags, &kvseg, addr, arg));
1120 1099 }
1121 1100
1122 1101 /*
1123 1102 * Allocate a large page to back the virtual address range
1124 1103 * [addr, addr + size). If addr is NULL, allocate the virtual address
1125 1104 * space as well.
1126 1105 */
1127 1106 static void *
1128 1107 segkmem_xalloc_lp(vmem_t *vmp, void *inaddr, size_t size, int vmflag,
1129 1108 uint_t attr, page_t *(*page_create_func)(void *, size_t, int, void *),
1130 1109 void *pcarg)
1131 1110 {
1132 1111 caddr_t addr = inaddr, pa;
1133 1112 size_t lpsize = segkmem_lpsize;
1134 1113 pgcnt_t npages = btopr(size);
1135 1114 pgcnt_t nbpages = btop(lpsize);
1136 1115 pgcnt_t nlpages = size >> segkmem_lpshift;
1137 1116 size_t ppasize = nbpages * sizeof (page_t *);
1138 1117 page_t *pp, *rootpp, **ppa, *pplist = NULL;
1139 1118 int i;
1140 1119
1141 1120 vmflag |= VM_NOSLEEP;
1142 1121
1143 1122 if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
1144 1123 return (NULL);
1145 1124 }
1146 1125
1147 1126 /*
1148 1127 * allocate an array we need for hat_memload_array.
1149 1128 * we use a separate arena to avoid recursion.
1150 1129 * we will not need this array when hat_memload_array learns pp++
1151 1130 */
1152 1131 if ((ppa = vmem_alloc(segkmem_ppa_arena, ppasize, vmflag)) == NULL) {
1153 1132 goto fail_array_alloc;
1154 1133 }
1155 1134
1156 1135 if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
1157 1136 goto fail_vmem_alloc;
1158 1137
1159 1138 ASSERT(((uintptr_t)addr & (lpsize - 1)) == 0);
1160 1139
1161 1140 /* create all the pages */
1162 1141 for (pa = addr, i = 0; i < nlpages; i++, pa += lpsize) {
1163 1142 if ((pp = page_create_func(pa, lpsize, vmflag, pcarg)) == NULL)
1164 1143 goto fail_page_create;
1165 1144 page_list_concat(&pplist, &pp);
1166 1145 }
1167 1146
1168 1147 /* at this point we have all the resource to complete the request */
1169 1148 while ((rootpp = pplist) != NULL) {
1170 1149 for (i = 0; i < nbpages; i++) {
1171 1150 ASSERT(pplist != NULL);
1172 1151 pp = pplist;
1173 1152 page_sub(&pplist, pp);
1174 1153 ASSERT(page_iolock_assert(pp));
1175 1154 page_io_unlock(pp);
1176 1155 ppa[i] = pp;
1177 1156 }
1178 1157 /*
1179 1158 * Load the locked entry. It's OK to preload the entry into the
1180 1159 * TSB since we now support large mappings in the kernel TSB.
1181 1160 */
1182 1161 hat_memload_array(kas.a_hat,
1183 1162 (caddr_t)(uintptr_t)rootpp->p_offset, lpsize,
1184 1163 ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
1185 1164 HAT_LOAD_LOCK);
1186 1165
1187 1166 for (--i; i >= 0; --i) {
1188 1167 ppa[i]->p_lckcnt = 1;
1189 1168 page_unlock(ppa[i]);
1190 1169 }
1191 1170 }
1192 1171
1193 1172 vmem_free(segkmem_ppa_arena, ppa, ppasize);
1194 1173 return (addr);
1195 1174
1196 1175 fail_page_create:
1197 1176 while ((rootpp = pplist) != NULL) {
1198 1177 for (i = 0, pp = pplist; i < nbpages; i++, pp = pplist) {
1199 1178 ASSERT(pp != NULL);
1200 1179 page_sub(&pplist, pp);
1201 1180 ASSERT(page_iolock_assert(pp));
1202 1181 page_io_unlock(pp);
1203 1182 }
1204 1183 page_destroy_pages(rootpp);
1205 1184 }
1206 1185
1207 1186 if (inaddr == NULL)
1208 1187 vmem_free(vmp, addr, size);
1209 1188
1210 1189 fail_vmem_alloc:
1211 1190 vmem_free(segkmem_ppa_arena, ppa, ppasize);
1212 1191
1213 1192 fail_array_alloc:
1214 1193 page_unresv(npages);
1215 1194
1216 1195 return (NULL);
1217 1196 }
1218 1197
1219 1198 static void
1220 1199 segkmem_free_one_lp(caddr_t addr, size_t size)
1221 1200 {
1222 1201 page_t *pp, *rootpp = NULL;
1223 1202 pgcnt_t pgs_left = btopr(size);
1224 1203
1225 1204 ASSERT(size == segkmem_lpsize);
1226 1205
1227 1206 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1228 1207
1229 1208 for (; pgs_left > 0; addr += PAGESIZE, pgs_left--) {
1230 1209 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
1231 1210 if (pp == NULL)
1232 1211 panic("segkmem_free_one_lp: page not found");
1233 1212 ASSERT(PAGE_EXCL(pp));
1234 1213 pp->p_lckcnt = 0;
1235 1214 if (rootpp == NULL)
1236 1215 rootpp = pp;
1237 1216 }
1238 1217 ASSERT(rootpp != NULL);
1239 1218 page_destroy_pages(rootpp);
1240 1219
1241 1220 /* page_unresv() is done by the caller */
1242 1221 }
1243 1222
1244 1223 /*
1245 1224 * This function is called to import new spans into the vmem arenas like
1246 1225 * kmem_default_arena and kmem_oversize_arena. It first tries to import
1247 1226 * spans from large page arena - kmem_lp_arena. In order to do this it might
1248 1227 * have to "upgrade the requested size" to kmem_lp_arena quantum. If
1249 1228 * it was not able to satisfy the upgraded request it then calls regular
1250 1229 * segkmem_alloc() that satisfies the request by importing from "*vmp" arena
1251 1230 */
1252 1231 /*ARGSUSED*/
1253 1232 void *
1254 1233 segkmem_alloc_lp(vmem_t *vmp, size_t *sizep, size_t align, int vmflag)
1255 1234 {
1256 1235 size_t size;
1257 1236 kthread_t *t = curthread;
1258 1237 segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1259 1238
1260 1239 ASSERT(sizep != NULL);
1261 1240
1262 1241 size = *sizep;
1263 1242
1264 1243 if (lpcb->lp_uselp && !(t->t_flag & T_PANIC) &&
1265 1244 !(vmflag & SEGKMEM_SHARELOCKED)) {
1266 1245
1267 1246 size_t kmemlp_qnt = segkmem_kmemlp_quantum;
1268 1247 size_t asize = P2ROUNDUP(size, kmemlp_qnt);
1269 1248 void *addr = NULL;
1270 1249 ulong_t *lpthrtp = &lpcb->lp_throttle;
1271 1250 ulong_t lpthrt = *lpthrtp;
1272 1251 int dowakeup = 0;
1273 1252 int doalloc = 1;
1274 1253
1275 1254 ASSERT(kmem_lp_arena != NULL);
1276 1255 ASSERT(asize >= size);
1277 1256
1278 1257 if (lpthrt != 0) {
1279 1258 /* try to update the throttle value */
1280 1259 lpthrt = atomic_inc_ulong_nv(lpthrtp);
1281 1260 if (lpthrt >= segkmem_lpthrottle_max) {
1282 1261 lpthrt = atomic_cas_ulong(lpthrtp, lpthrt,
1283 1262 segkmem_lpthrottle_max / 4);
1284 1263 }
1285 1264
1286 1265 /*
1287 1266 * when we get above throttle start do an exponential
1288 1267 * backoff at trying large pages and reaping
1289 1268 */
1290 1269 if (lpthrt > segkmem_lpthrottle_start &&
1291 1270 !ISP2(lpthrt)) {
1292 1271 lpcb->allocs_throttled++;
1293 1272 lpthrt--;
1294 1273 if (ISP2(lpthrt))
1295 1274 kmem_reap();
1296 1275 return (segkmem_alloc(vmp, size, vmflag));
1297 1276 }
1298 1277 }
1299 1278
1300 1279 if (!(vmflag & VM_NOSLEEP) &&
1301 1280 segkmem_heaplp_quantum >= (8 * kmemlp_qnt) &&
1302 1281 vmem_size(kmem_lp_arena, VMEM_FREE) <= kmemlp_qnt &&
1303 1282 asize < (segkmem_heaplp_quantum - kmemlp_qnt)) {
1304 1283
1305 1284 /*
1306 1285 * we are low on free memory in kmem_lp_arena
1307 1286 * we let only one guy to allocate heap_lp
1308 1287 * quantum size chunk that everybody is going to
1309 1288 * share
1310 1289 */
1311 1290 mutex_enter(&lpcb->lp_lock);
1312 1291
1313 1292 if (lpcb->lp_wait) {
1314 1293
1315 1294 /* we are not the first one - wait */
1316 1295 cv_wait(&lpcb->lp_cv, &lpcb->lp_lock);
1317 1296 if (vmem_size(kmem_lp_arena, VMEM_FREE) <
1318 1297 kmemlp_qnt) {
1319 1298 doalloc = 0;
1320 1299 }
1321 1300 } else if (vmem_size(kmem_lp_arena, VMEM_FREE) <=
1322 1301 kmemlp_qnt) {
1323 1302
1324 1303 /*
1325 1304 * we are the first one, make sure we import
1326 1305 * a large page
1327 1306 */
1328 1307 if (asize == kmemlp_qnt)
1329 1308 asize += kmemlp_qnt;
1330 1309 dowakeup = 1;
1331 1310 lpcb->lp_wait = 1;
1332 1311 }
1333 1312
1334 1313 mutex_exit(&lpcb->lp_lock);
1335 1314 }
1336 1315
1337 1316 /*
1338 1317 * VM_ABORT flag prevents sleeps in vmem_xalloc when
1339 1318 * large pages are not available. In that case this allocation
1340 1319 * attempt will fail and we will retry allocation with small
1341 1320 * pages. We also do not want to panic if this allocation fails
1342 1321 * because we are going to retry.
1343 1322 */
1344 1323 if (doalloc) {
1345 1324 addr = vmem_alloc(kmem_lp_arena, asize,
1346 1325 (vmflag | VM_ABORT) & ~VM_PANIC);
1347 1326
1348 1327 if (dowakeup) {
1349 1328 mutex_enter(&lpcb->lp_lock);
1350 1329 ASSERT(lpcb->lp_wait != 0);
1351 1330 lpcb->lp_wait = 0;
1352 1331 cv_broadcast(&lpcb->lp_cv);
1353 1332 mutex_exit(&lpcb->lp_lock);
1354 1333 }
1355 1334 }
1356 1335
1357 1336 if (addr != NULL) {
1358 1337 *sizep = asize;
1359 1338 *lpthrtp = 0;
1360 1339 return (addr);
1361 1340 }
1362 1341
1363 1342 if (vmflag & VM_NOSLEEP)
1364 1343 lpcb->nosleep_allocs_failed++;
1365 1344 else
1366 1345 lpcb->sleep_allocs_failed++;
1367 1346 lpcb->alloc_bytes_failed += size;
1368 1347
1369 1348 /* if large page throttling is not started yet do it */
1370 1349 if (segkmem_use_lpthrottle && lpthrt == 0) {
1371 1350 lpthrt = atomic_cas_ulong(lpthrtp, lpthrt, 1);
1372 1351 }
1373 1352 }
1374 1353 return (segkmem_alloc(vmp, size, vmflag));
1375 1354 }
1376 1355
1377 1356 void
1378 1357 segkmem_free_lp(vmem_t *vmp, void *inaddr, size_t size)
1379 1358 {
1380 1359 if (kmem_lp_arena == NULL || !IS_KMEM_VA_LARGEPAGE((caddr_t)inaddr)) {
1381 1360 segkmem_free(vmp, inaddr, size);
1382 1361 } else {
1383 1362 vmem_free(kmem_lp_arena, inaddr, size);
1384 1363 }
1385 1364 }
1386 1365
1387 1366 /*
1388 1367 * segkmem_alloc_lpi() imports virtual memory from large page heap arena
1389 1368 * into kmem_lp arena. In the process it maps the imported segment with
1390 1369 * large pages
1391 1370 */
1392 1371 static void *
1393 1372 segkmem_alloc_lpi(vmem_t *vmp, size_t size, int vmflag)
1394 1373 {
1395 1374 segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1396 1375 void *addr;
1397 1376
1398 1377 ASSERT(size != 0);
1399 1378 ASSERT(vmp == heap_lp_arena);
1400 1379
1401 1380 /* do not allow large page heap grow beyound limits */
1402 1381 if (vmem_size(vmp, VMEM_ALLOC) >= segkmem_kmemlp_max) {
1403 1382 lpcb->allocs_limited++;
1404 1383 return (NULL);
1405 1384 }
1406 1385
1407 1386 addr = segkmem_xalloc_lp(vmp, NULL, size, vmflag, 0,
1408 1387 segkmem_page_create_large, NULL);
1409 1388 return (addr);
1410 1389 }
1411 1390
1412 1391 /*
1413 1392 * segkmem_free_lpi() returns virtual memory back into large page heap arena
1414 1393 * from kmem_lp arena. Beore doing this it unmaps the segment and frees
1415 1394 * large pages used to map it.
1416 1395 */
1417 1396 static void
1418 1397 segkmem_free_lpi(vmem_t *vmp, void *inaddr, size_t size)
1419 1398 {
1420 1399 pgcnt_t nlpages = size >> segkmem_lpshift;
1421 1400 size_t lpsize = segkmem_lpsize;
1422 1401 caddr_t addr = inaddr;
1423 1402 pgcnt_t npages = btopr(size);
1424 1403 int i;
1425 1404
1426 1405 ASSERT(vmp == heap_lp_arena);
1427 1406 ASSERT(IS_KMEM_VA_LARGEPAGE(addr));
1428 1407 ASSERT(((uintptr_t)inaddr & (lpsize - 1)) == 0);
1429 1408
1430 1409 for (i = 0; i < nlpages; i++) {
1431 1410 segkmem_free_one_lp(addr, lpsize);
1432 1411 addr += lpsize;
1433 1412 }
1434 1413
1435 1414 page_unresv(npages);
1436 1415
1437 1416 vmem_free(vmp, inaddr, size);
1438 1417 }
1439 1418
1440 1419 /*
1441 1420 * This function is called at system boot time by kmem_init right after
1442 1421 * /etc/system file has been read. It checks based on hardware configuration
1443 1422 * and /etc/system settings if system is going to use large pages. The
1444 1423 * initialiazation necessary to actually start using large pages
1445 1424 * happens later in the process after segkmem_heap_lp_init() is called.
1446 1425 */
1447 1426 int
1448 1427 segkmem_lpsetup()
1449 1428 {
1450 1429 int use_large_pages = 0;
1451 1430
1452 1431 #ifdef __sparc
1453 1432
1454 1433 size_t memtotal = physmem * PAGESIZE;
1455 1434
1456 1435 if (heap_lp_base == NULL) {
1457 1436 segkmem_lpsize = PAGESIZE;
1458 1437 return (0);
1459 1438 }
1460 1439
1461 1440 /* get a platform dependent value of large page size for kernel heap */
1462 1441 segkmem_lpsize = get_segkmem_lpsize(segkmem_lpsize);
1463 1442
1464 1443 if (segkmem_lpsize <= PAGESIZE) {
1465 1444 /*
1466 1445 * put virtual space reserved for the large page kernel
1467 1446 * back to the regular heap
1468 1447 */
1469 1448 vmem_xfree(heap_arena, heap_lp_base,
1470 1449 heap_lp_end - heap_lp_base);
1471 1450 heap_lp_base = NULL;
1472 1451 heap_lp_end = NULL;
1473 1452 segkmem_lpsize = PAGESIZE;
1474 1453 return (0);
1475 1454 }
1476 1455
1477 1456 /* set heap_lp quantum if necessary */
1478 1457 if (segkmem_heaplp_quantum == 0 || !ISP2(segkmem_heaplp_quantum) ||
1479 1458 P2PHASE(segkmem_heaplp_quantum, segkmem_lpsize)) {
1480 1459 segkmem_heaplp_quantum = segkmem_lpsize;
1481 1460 }
1482 1461
1483 1462 /* set kmem_lp quantum if necessary */
1484 1463 if (segkmem_kmemlp_quantum == 0 || !ISP2(segkmem_kmemlp_quantum) ||
1485 1464 segkmem_kmemlp_quantum > segkmem_heaplp_quantum) {
1486 1465 segkmem_kmemlp_quantum = segkmem_heaplp_quantum;
1487 1466 }
1488 1467
1489 1468 /* set total amount of memory allowed for large page kernel heap */
1490 1469 if (segkmem_kmemlp_max == 0) {
1491 1470 if (segkmem_kmemlp_pcnt == 0 || segkmem_kmemlp_pcnt > 100)
1492 1471 segkmem_kmemlp_pcnt = 12;
1493 1472 segkmem_kmemlp_max = (memtotal * segkmem_kmemlp_pcnt) / 100;
1494 1473 }
1495 1474 segkmem_kmemlp_max = P2ROUNDUP(segkmem_kmemlp_max,
1496 1475 segkmem_heaplp_quantum);
1497 1476
1498 1477 /* fix lp kmem preallocation request if necesssary */
1499 1478 if (segkmem_kmemlp_min) {
1500 1479 segkmem_kmemlp_min = P2ROUNDUP(segkmem_kmemlp_min,
1501 1480 segkmem_heaplp_quantum);
1502 1481 if (segkmem_kmemlp_min > segkmem_kmemlp_max)
1503 1482 segkmem_kmemlp_min = segkmem_kmemlp_max;
1504 1483 }
1505 1484
1506 1485 use_large_pages = 1;
1507 1486 segkmem_lpszc = page_szc(segkmem_lpsize);
1508 1487 segkmem_lpshift = page_get_shift(segkmem_lpszc);
1509 1488
1510 1489 #endif
1511 1490 return (use_large_pages);
1512 1491 }
1513 1492
1514 1493 void
1515 1494 segkmem_zio_init(void *zio_mem_base, size_t zio_mem_size)
1516 1495 {
1517 1496 ASSERT(zio_mem_base != NULL);
1518 1497 ASSERT(zio_mem_size != 0);
1519 1498
1520 1499 /*
1521 1500 * To reduce VA space fragmentation, we set up quantum caches for the
1522 1501 * smaller sizes; we chose 32k because that translates to 128k VA
1523 1502 * slabs, which matches nicely with the common 128k zio_data bufs.
1524 1503 */
1525 1504 zio_arena = vmem_create("zfs_file_data", zio_mem_base, zio_mem_size,
1526 1505 PAGESIZE, NULL, NULL, NULL, 32 * 1024, VM_SLEEP);
1527 1506
1528 1507 zio_alloc_arena = vmem_create("zfs_file_data_buf", NULL, 0, PAGESIZE,
1529 1508 segkmem_zio_alloc, segkmem_zio_free, zio_arena, 0, VM_SLEEP);
1530 1509
1531 1510 ASSERT(zio_arena != NULL);
1532 1511 ASSERT(zio_alloc_arena != NULL);
1533 1512 }
1534 1513
1535 1514 #ifdef __sparc
1536 1515
1537 1516
1538 1517 static void *
1539 1518 segkmem_alloc_ppa(vmem_t *vmp, size_t size, int vmflag)
1540 1519 {
1541 1520 size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
1542 1521 void *addr;
1543 1522
1544 1523 if (ppaquantum <= PAGESIZE)
1545 1524 return (segkmem_alloc(vmp, size, vmflag));
1546 1525
1547 1526 ASSERT((size & (ppaquantum - 1)) == 0);
1548 1527
1549 1528 addr = vmem_xalloc(vmp, size, ppaquantum, 0, 0, NULL, NULL, vmflag);
1550 1529 if (addr != NULL && segkmem_xalloc(vmp, addr, size, vmflag, 0,
1551 1530 segkmem_page_create, NULL) == NULL) {
1552 1531 vmem_xfree(vmp, addr, size);
1553 1532 addr = NULL;
1554 1533 }
1555 1534
1556 1535 return (addr);
1557 1536 }
1558 1537
1559 1538 static void
1560 1539 segkmem_free_ppa(vmem_t *vmp, void *addr, size_t size)
1561 1540 {
1562 1541 size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
1563 1542
1564 1543 ASSERT(addr != NULL);
1565 1544
1566 1545 if (ppaquantum <= PAGESIZE) {
1567 1546 segkmem_free(vmp, addr, size);
1568 1547 } else {
1569 1548 segkmem_free(NULL, addr, size);
1570 1549 vmem_xfree(vmp, addr, size);
1571 1550 }
1572 1551 }
1573 1552
1574 1553 void
1575 1554 segkmem_heap_lp_init()
1576 1555 {
1577 1556 segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1578 1557 size_t heap_lp_size = heap_lp_end - heap_lp_base;
1579 1558 size_t lpsize = segkmem_lpsize;
1580 1559 size_t ppaquantum;
1581 1560 void *addr;
1582 1561
1583 1562 if (segkmem_lpsize <= PAGESIZE) {
1584 1563 ASSERT(heap_lp_base == NULL);
1585 1564 ASSERT(heap_lp_end == NULL);
1586 1565 return;
1587 1566 }
1588 1567
1589 1568 ASSERT(segkmem_heaplp_quantum >= lpsize);
1590 1569 ASSERT((segkmem_heaplp_quantum & (lpsize - 1)) == 0);
1591 1570 ASSERT(lpcb->lp_uselp == 0);
1592 1571 ASSERT(heap_lp_base != NULL);
1593 1572 ASSERT(heap_lp_end != NULL);
1594 1573 ASSERT(heap_lp_base < heap_lp_end);
1595 1574 ASSERT(heap_lp_arena == NULL);
1596 1575 ASSERT(((uintptr_t)heap_lp_base & (lpsize - 1)) == 0);
1597 1576 ASSERT(((uintptr_t)heap_lp_end & (lpsize - 1)) == 0);
1598 1577
1599 1578 /* create large page heap arena */
1600 1579 heap_lp_arena = vmem_create("heap_lp", heap_lp_base, heap_lp_size,
1601 1580 segkmem_heaplp_quantum, NULL, NULL, NULL, 0, VM_SLEEP);
1602 1581
1603 1582 ASSERT(heap_lp_arena != NULL);
1604 1583
1605 1584 /* This arena caches memory already mapped by large pages */
1606 1585 kmem_lp_arena = vmem_create("kmem_lp", NULL, 0, segkmem_kmemlp_quantum,
1607 1586 segkmem_alloc_lpi, segkmem_free_lpi, heap_lp_arena, 0, VM_SLEEP);
1608 1587
1609 1588 ASSERT(kmem_lp_arena != NULL);
1610 1589
1611 1590 mutex_init(&lpcb->lp_lock, NULL, MUTEX_DEFAULT, NULL);
1612 1591 cv_init(&lpcb->lp_cv, NULL, CV_DEFAULT, NULL);
1613 1592
1614 1593 /*
1615 1594 * this arena is used for the array of page_t pointers necessary
1616 1595 * to call hat_mem_load_array
1617 1596 */
1618 1597 ppaquantum = btopr(lpsize) * sizeof (page_t *);
1619 1598 segkmem_ppa_arena = vmem_create("segkmem_ppa", NULL, 0, ppaquantum,
1620 1599 segkmem_alloc_ppa, segkmem_free_ppa, heap_arena, ppaquantum,
1621 1600 VM_SLEEP);
1622 1601
1623 1602 ASSERT(segkmem_ppa_arena != NULL);
1624 1603
1625 1604 /* prealloacate some memory for the lp kernel heap */
1626 1605 if (segkmem_kmemlp_min) {
1627 1606
1628 1607 ASSERT(P2PHASE(segkmem_kmemlp_min,
1629 1608 segkmem_heaplp_quantum) == 0);
1630 1609
1631 1610 if ((addr = segkmem_alloc_lpi(heap_lp_arena,
1632 1611 segkmem_kmemlp_min, VM_SLEEP)) != NULL) {
1633 1612
1634 1613 addr = vmem_add(kmem_lp_arena, addr,
1635 1614 segkmem_kmemlp_min, VM_SLEEP);
1636 1615 ASSERT(addr != NULL);
1637 1616 }
1638 1617 }
1639 1618
1640 1619 lpcb->lp_uselp = 1;
1641 1620 }
1642 1621
1643 1622 #endif
↓ open down ↓ |
838 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX