1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 26 /* All Rights Reserved */ 27 28 /* 29 * Portions of this source code were derived from Berkeley 4.3 BSD 30 * under license from the Regents of the University of California. 31 */ 32 33 /* 34 * segkp is a segment driver that administers the allocation and deallocation 35 * of pageable variable size chunks of kernel virtual address space. Each 36 * allocated resource is page-aligned. 37 * 38 * The user may specify whether the resource should be initialized to 0, 39 * include a redzone, or locked in memory. 40 */ 41 42 #include <sys/types.h> 43 #include <sys/t_lock.h> 44 #include <sys/thread.h> 45 #include <sys/param.h> 46 #include <sys/errno.h> 47 #include <sys/sysmacros.h> 48 #include <sys/systm.h> 49 #include <sys/buf.h> 50 #include <sys/mman.h> 51 #include <sys/vnode.h> 52 #include <sys/cmn_err.h> 53 #include <sys/swap.h> 54 #include <sys/tuneable.h> 55 #include <sys/kmem.h> 56 #include <sys/vmem.h> 57 #include <sys/cred.h> 58 #include <sys/dumphdr.h> 59 #include <sys/debug.h> 60 #include <sys/vtrace.h> 61 #include <sys/stack.h> 62 #include <sys/atomic.h> 63 #include <sys/archsystm.h> 64 #include <sys/lgrp.h> 65 66 #include <vm/as.h> 67 #include <vm/seg.h> 68 #include <vm/seg_kp.h> 69 #include <vm/seg_kmem.h> 70 #include <vm/anon.h> 71 #include <vm/page.h> 72 #include <vm/hat.h> 73 #include <sys/bitmap.h> 74 75 /* 76 * Private seg op routines 77 */ 78 static void segkp_dump(struct seg *seg); 79 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len, 80 uint_t prot); 81 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta); 82 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len, 83 struct page ***page, enum lock_type type, 84 enum seg_rw rw); 85 static void segkp_insert(struct seg *seg, struct segkp_data *kpd); 86 static void segkp_delete(struct seg *seg, struct segkp_data *kpd); 87 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags, 88 struct segkp_data **tkpd, struct anon_map *amp); 89 static void segkp_release_internal(struct seg *seg, 90 struct segkp_data *kpd, size_t len); 91 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr, 92 size_t len, struct segkp_data *kpd, uint_t flags); 93 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr, 94 size_t len, struct segkp_data *kpd, uint_t flags); 95 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr); 96 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp); 97 98 /* 99 * Lock used to protect the hash table(s) and caches. 100 */ 101 static kmutex_t segkp_lock; 102 103 /* 104 * The segkp caches 105 */ 106 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE]; 107 108 /* 109 * When there are fewer than red_minavail bytes left on the stack, 110 * segkp_map_red() will map in the redzone (if called). 5000 seems 111 * to work reasonably well... 112 */ 113 long red_minavail = 5000; 114 115 /* 116 * will be set to 1 for 32 bit x86 systems only, in startup.c 117 */ 118 int segkp_fromheap = 0; 119 ulong_t *segkp_bitmap; 120 121 /* 122 * If segkp_map_red() is called with the redzone already mapped and 123 * with less than RED_DEEP_THRESHOLD bytes available on the stack, 124 * then the stack situation has become quite serious; if much more stack 125 * is consumed, we have the potential of scrogging the next thread/LWP 126 * structure. To help debug the "can't happen" panics which may 127 * result from this condition, we record hrestime and the calling thread 128 * in red_deep_hires and red_deep_thread respectively. 129 */ 130 #define RED_DEEP_THRESHOLD 2000 131 132 hrtime_t red_deep_hires; 133 kthread_t *red_deep_thread; 134 135 uint32_t red_nmapped; 136 uint32_t red_closest = UINT_MAX; 137 uint32_t red_ndoubles; 138 139 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */ 140 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */ 141 142 static struct seg_ops segkp_ops = { 143 .fault = segkp_fault, 144 .checkprot = segkp_checkprot, 145 .kluster = segkp_kluster, 146 .dump = segkp_dump, 147 .pagelock = segkp_pagelock, 148 .getmemid = segkp_getmemid, 149 }; 150 151 152 static void segkpinit_mem_config(struct seg *); 153 154 static uint32_t segkp_indel; 155 156 /* 157 * Allocate the segment specific private data struct and fill it in 158 * with the per kp segment mutex, anon ptr. array and hash table. 159 */ 160 int 161 segkp_create(struct seg *seg) 162 { 163 struct segkp_segdata *kpsd; 164 size_t np; 165 166 ASSERT(seg != NULL && seg->s_as == &kas); 167 ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock)); 168 169 if (seg->s_size & PAGEOFFSET) { 170 panic("Bad segkp size"); 171 /*NOTREACHED*/ 172 } 173 174 kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP); 175 176 /* 177 * Allocate the virtual memory for segkp and initialize it 178 */ 179 if (segkp_fromheap) { 180 np = btop(kvseg.s_size); 181 segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP); 182 kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE, 183 vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP); 184 } else { 185 segkp_bitmap = NULL; 186 np = btop(seg->s_size); 187 kpsd->kpsd_arena = vmem_create("segkp", seg->s_base, 188 seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE, 189 VM_SLEEP); 190 } 191 192 kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE); 193 194 kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *), 195 KM_SLEEP); 196 seg->s_data = (void *)kpsd; 197 seg->s_ops = &segkp_ops; 198 segkpinit_mem_config(seg); 199 return (0); 200 } 201 202 203 /* 204 * Find a free 'freelist' and initialize it with the appropriate attributes 205 */ 206 void * 207 segkp_cache_init(struct seg *seg, int maxsize, size_t len, uint_t flags) 208 { 209 int i; 210 211 if ((flags & KPD_NO_ANON) && !(flags & KPD_LOCKED)) 212 return ((void *)-1); 213 214 mutex_enter(&segkp_lock); 215 for (i = 0; i < SEGKP_MAX_CACHE; i++) { 216 if (segkp_cache[i].kpf_inuse) 217 continue; 218 segkp_cache[i].kpf_inuse = 1; 219 segkp_cache[i].kpf_max = maxsize; 220 segkp_cache[i].kpf_flags = flags; 221 segkp_cache[i].kpf_seg = seg; 222 segkp_cache[i].kpf_len = len; 223 mutex_exit(&segkp_lock); 224 return ((void *)(uintptr_t)i); 225 } 226 mutex_exit(&segkp_lock); 227 return ((void *)-1); 228 } 229 230 /* 231 * Free all the cache resources. 232 */ 233 void 234 segkp_cache_free(void) 235 { 236 struct segkp_data *kpd; 237 struct seg *seg; 238 int i; 239 240 mutex_enter(&segkp_lock); 241 for (i = 0; i < SEGKP_MAX_CACHE; i++) { 242 if (!segkp_cache[i].kpf_inuse) 243 continue; 244 /* 245 * Disconnect the freelist and process each element 246 */ 247 kpd = segkp_cache[i].kpf_list; 248 seg = segkp_cache[i].kpf_seg; 249 segkp_cache[i].kpf_list = NULL; 250 segkp_cache[i].kpf_count = 0; 251 mutex_exit(&segkp_lock); 252 253 while (kpd != NULL) { 254 struct segkp_data *next; 255 256 next = kpd->kp_next; 257 segkp_release_internal(seg, kpd, kpd->kp_len); 258 kpd = next; 259 } 260 mutex_enter(&segkp_lock); 261 } 262 mutex_exit(&segkp_lock); 263 } 264 265 /* 266 * There are 2 entries into segkp_get_internal. The first includes a cookie 267 * used to access a pool of cached segkp resources. The second does not 268 * use the cache. 269 */ 270 caddr_t 271 segkp_get(struct seg *seg, size_t len, uint_t flags) 272 { 273 struct segkp_data *kpd = NULL; 274 275 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) { 276 kpd->kp_cookie = -1; 277 return (stom(kpd->kp_base, flags)); 278 } 279 return (NULL); 280 } 281 282 /* 283 * Return a 'cached' segkp address 284 */ 285 caddr_t 286 segkp_cache_get(void *cookie) 287 { 288 struct segkp_cache *freelist = NULL; 289 struct segkp_data *kpd = NULL; 290 int index = (int)(uintptr_t)cookie; 291 struct seg *seg; 292 size_t len; 293 uint_t flags; 294 295 if (index < 0 || index >= SEGKP_MAX_CACHE) 296 return (NULL); 297 freelist = &segkp_cache[index]; 298 299 mutex_enter(&segkp_lock); 300 seg = freelist->kpf_seg; 301 flags = freelist->kpf_flags; 302 if (freelist->kpf_list != NULL) { 303 kpd = freelist->kpf_list; 304 freelist->kpf_list = kpd->kp_next; 305 freelist->kpf_count--; 306 mutex_exit(&segkp_lock); 307 kpd->kp_next = NULL; 308 segkp_insert(seg, kpd); 309 return (stom(kpd->kp_base, flags)); 310 } 311 len = freelist->kpf_len; 312 mutex_exit(&segkp_lock); 313 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) { 314 kpd->kp_cookie = index; 315 return (stom(kpd->kp_base, flags)); 316 } 317 return (NULL); 318 } 319 320 caddr_t 321 segkp_get_withanonmap( 322 struct seg *seg, 323 size_t len, 324 uint_t flags, 325 struct anon_map *amp) 326 { 327 struct segkp_data *kpd = NULL; 328 329 ASSERT(amp != NULL); 330 flags |= KPD_HASAMP; 331 if (segkp_get_internal(seg, len, flags, &kpd, amp) != NULL) { 332 kpd->kp_cookie = -1; 333 return (stom(kpd->kp_base, flags)); 334 } 335 return (NULL); 336 } 337 338 /* 339 * This does the real work of segkp allocation. 340 * Return to client base addr. len must be page-aligned. A null value is 341 * returned if there are no more vm resources (e.g. pages, swap). The len 342 * and base recorded in the private data structure include the redzone 343 * and the redzone length (if applicable). If the user requests a redzone 344 * either the first or last page is left unmapped depending whether stacks 345 * grow to low or high memory. 346 * 347 * The client may also specify a no-wait flag. If that is set then the 348 * request will choose a non-blocking path when requesting resources. 349 * The default is make the client wait. 350 */ 351 static caddr_t 352 segkp_get_internal( 353 struct seg *seg, 354 size_t len, 355 uint_t flags, 356 struct segkp_data **tkpd, 357 struct anon_map *amp) 358 { 359 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 360 struct segkp_data *kpd; 361 caddr_t vbase = NULL; /* always first virtual, may not be mapped */ 362 pgcnt_t np = 0; /* number of pages in the resource */ 363 pgcnt_t segkpindex; 364 long i; 365 caddr_t va; 366 pgcnt_t pages = 0; 367 ulong_t anon_idx = 0; 368 int kmflag = (flags & KPD_NOWAIT) ? KM_NOSLEEP : KM_SLEEP; 369 caddr_t s_base = (segkp_fromheap) ? kvseg.s_base : seg->s_base; 370 371 if (len & PAGEOFFSET) { 372 panic("segkp_get: len is not page-aligned"); 373 /*NOTREACHED*/ 374 } 375 376 ASSERT(((flags & KPD_HASAMP) == 0) == (amp == NULL)); 377 378 /* Only allow KPD_NO_ANON if we are going to lock it down */ 379 if ((flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON) 380 return (NULL); 381 382 if ((kpd = kmem_zalloc(sizeof (struct segkp_data), kmflag)) == NULL) 383 return (NULL); 384 /* 385 * Fix up the len to reflect the REDZONE if applicable 386 */ 387 if (flags & KPD_HASREDZONE) 388 len += PAGESIZE; 389 np = btop(len); 390 391 vbase = vmem_alloc(SEGKP_VMEM(seg), len, kmflag | VM_BESTFIT); 392 if (vbase == NULL) { 393 kmem_free(kpd, sizeof (struct segkp_data)); 394 return (NULL); 395 } 396 397 /* If locking, reserve physical memory */ 398 if (flags & KPD_LOCKED) { 399 pages = btop(SEGKP_MAPLEN(len, flags)); 400 if (page_resv(pages, kmflag) == 0) { 401 vmem_free(SEGKP_VMEM(seg), vbase, len); 402 kmem_free(kpd, sizeof (struct segkp_data)); 403 return (NULL); 404 } 405 if ((flags & KPD_NO_ANON) == 0) 406 atomic_add_long(&anon_segkp_pages_locked, pages); 407 } 408 409 /* 410 * Reserve sufficient swap space for this vm resource. We'll 411 * actually allocate it in the loop below, but reserving it 412 * here allows us to back out more gracefully than if we 413 * had an allocation failure in the body of the loop. 414 * 415 * Note that we don't need swap space for the red zone page. 416 */ 417 if (amp != NULL) { 418 /* 419 * The swap reservation has been done, if required, and the 420 * anon_hdr is separate. 421 */ 422 anon_idx = 0; 423 kpd->kp_anon_idx = anon_idx; 424 kpd->kp_anon = amp->ahp; 425 426 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 427 kpd, vbase, len, flags, 1); 428 429 } else if ((flags & KPD_NO_ANON) == 0) { 430 if (anon_resv_zone(SEGKP_MAPLEN(len, flags), NULL) == 0) { 431 if (flags & KPD_LOCKED) { 432 atomic_add_long(&anon_segkp_pages_locked, 433 -pages); 434 page_unresv(pages); 435 } 436 vmem_free(SEGKP_VMEM(seg), vbase, len); 437 kmem_free(kpd, sizeof (struct segkp_data)); 438 return (NULL); 439 } 440 atomic_add_long(&anon_segkp_pages_resv, 441 btop(SEGKP_MAPLEN(len, flags))); 442 anon_idx = ((uintptr_t)(vbase - s_base)) >> PAGESHIFT; 443 kpd->kp_anon_idx = anon_idx; 444 kpd->kp_anon = kpsd->kpsd_anon; 445 446 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 447 kpd, vbase, len, flags, 1); 448 } else { 449 kpd->kp_anon = NULL; 450 kpd->kp_anon_idx = 0; 451 } 452 453 /* 454 * Allocate page and anon resources for the virtual address range 455 * except the redzone 456 */ 457 if (segkp_fromheap) 458 segkpindex = btop((uintptr_t)(vbase - kvseg.s_base)); 459 for (i = 0, va = vbase; i < np; i++, va += PAGESIZE) { 460 page_t *pl[2]; 461 struct vnode *vp; 462 anoff_t off; 463 int err; 464 page_t *pp = NULL; 465 466 /* 467 * Mark this page to be a segkp page in the bitmap. 468 */ 469 if (segkp_fromheap) { 470 BT_ATOMIC_SET(segkp_bitmap, segkpindex); 471 segkpindex++; 472 } 473 474 /* 475 * If this page is the red zone page, we don't need swap 476 * space for it. Note that we skip over the code that 477 * establishes MMU mappings, so that the page remains 478 * invalid. 479 */ 480 if ((flags & KPD_HASREDZONE) && KPD_REDZONE(kpd) == i) 481 continue; 482 483 if (kpd->kp_anon != NULL) { 484 struct anon *ap; 485 486 ASSERT(anon_get_ptr(kpd->kp_anon, anon_idx + i) 487 == NULL); 488 /* 489 * Determine the "vp" and "off" of the anon slot. 490 */ 491 ap = anon_alloc(NULL, 0); 492 if (amp != NULL) 493 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 494 (void) anon_set_ptr(kpd->kp_anon, anon_idx + i, 495 ap, ANON_SLEEP); 496 if (amp != NULL) 497 ANON_LOCK_EXIT(&->a_rwlock); 498 swap_xlate(ap, &vp, &off); 499 500 /* 501 * Create a page with the specified identity. The 502 * page is returned with the "shared" lock held. 503 */ 504 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, 505 NULL, pl, PAGESIZE, seg, va, S_CREATE, 506 kcred, NULL); 507 if (err) { 508 /* 509 * XXX - This should not fail. 510 */ 511 panic("segkp_get: no pages"); 512 /*NOTREACHED*/ 513 } 514 pp = pl[0]; 515 } else { 516 ASSERT(page_exists(&kvp, 517 (u_offset_t)(uintptr_t)va) == NULL); 518 519 if ((pp = page_create_va(&kvp, 520 (u_offset_t)(uintptr_t)va, PAGESIZE, 521 (flags & KPD_NOWAIT ? 0 : PG_WAIT) | PG_EXCL | 522 PG_NORELOC, seg, va)) == NULL) { 523 /* 524 * Legitimize resource; then destroy it. 525 * Easier than trying to unwind here. 526 */ 527 kpd->kp_flags = flags; 528 kpd->kp_base = vbase; 529 kpd->kp_len = len; 530 segkp_release_internal(seg, kpd, va - vbase); 531 return (NULL); 532 } 533 page_io_unlock(pp); 534 } 535 536 if (flags & KPD_ZERO) 537 pagezero(pp, 0, PAGESIZE); 538 539 /* 540 * Load and lock an MMU translation for the page. 541 */ 542 hat_memload(seg->s_as->a_hat, va, pp, (PROT_READ|PROT_WRITE), 543 ((flags & KPD_LOCKED) ? HAT_LOAD_LOCK : HAT_LOAD)); 544 545 /* 546 * Now, release lock on the page. 547 */ 548 if (flags & KPD_LOCKED) { 549 /* 550 * Indicate to page_retire framework that this 551 * page can only be retired when it is freed. 552 */ 553 PP_SETRAF(pp); 554 page_downgrade(pp); 555 } else 556 page_unlock(pp); 557 } 558 559 kpd->kp_flags = flags; 560 kpd->kp_base = vbase; 561 kpd->kp_len = len; 562 segkp_insert(seg, kpd); 563 *tkpd = kpd; 564 return (stom(kpd->kp_base, flags)); 565 } 566 567 /* 568 * Release the resource to cache if the pool(designate by the cookie) 569 * has less than the maximum allowable. If inserted in cache, 570 * segkp_delete insures element is taken off of active list. 571 */ 572 void 573 segkp_release(struct seg *seg, caddr_t vaddr) 574 { 575 struct segkp_cache *freelist; 576 struct segkp_data *kpd = NULL; 577 578 if ((kpd = segkp_find(seg, vaddr)) == NULL) { 579 panic("segkp_release: null kpd"); 580 /*NOTREACHED*/ 581 } 582 583 if (kpd->kp_cookie != -1) { 584 freelist = &segkp_cache[kpd->kp_cookie]; 585 mutex_enter(&segkp_lock); 586 if (!segkp_indel && freelist->kpf_count < freelist->kpf_max) { 587 segkp_delete(seg, kpd); 588 kpd->kp_next = freelist->kpf_list; 589 freelist->kpf_list = kpd; 590 freelist->kpf_count++; 591 mutex_exit(&segkp_lock); 592 return; 593 } else { 594 mutex_exit(&segkp_lock); 595 kpd->kp_cookie = -1; 596 } 597 } 598 segkp_release_internal(seg, kpd, kpd->kp_len); 599 } 600 601 /* 602 * Free the entire resource. segkp_unlock gets called with the start of the 603 * mapped portion of the resource. The length is the size of the mapped 604 * portion 605 */ 606 static void 607 segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len) 608 { 609 caddr_t va; 610 long i; 611 long redzone; 612 size_t np; 613 page_t *pp; 614 struct vnode *vp; 615 anoff_t off; 616 struct anon *ap; 617 pgcnt_t segkpindex; 618 619 ASSERT(kpd != NULL); 620 ASSERT((kpd->kp_flags & KPD_HASAMP) == 0 || kpd->kp_cookie == -1); 621 np = btop(len); 622 623 /* Remove from active hash list */ 624 if (kpd->kp_cookie == -1) { 625 mutex_enter(&segkp_lock); 626 segkp_delete(seg, kpd); 627 mutex_exit(&segkp_lock); 628 } 629 630 /* 631 * Precompute redzone page index. 632 */ 633 redzone = -1; 634 if (kpd->kp_flags & KPD_HASREDZONE) 635 redzone = KPD_REDZONE(kpd); 636 637 638 va = kpd->kp_base; 639 640 hat_unload(seg->s_as->a_hat, va, (np << PAGESHIFT), 641 ((kpd->kp_flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD)); 642 /* 643 * Free up those anon resources that are quiescent. 644 */ 645 if (segkp_fromheap) 646 segkpindex = btop((uintptr_t)(va - kvseg.s_base)); 647 for (i = 0; i < np; i++, va += PAGESIZE) { 648 649 /* 650 * Clear the bit for this page from the bitmap. 651 */ 652 if (segkp_fromheap) { 653 BT_ATOMIC_CLEAR(segkp_bitmap, segkpindex); 654 segkpindex++; 655 } 656 657 if (i == redzone) 658 continue; 659 if (kpd->kp_anon) { 660 /* 661 * Free up anon resources and destroy the 662 * associated pages. 663 * 664 * Release the lock if there is one. Have to get the 665 * page to do this, unfortunately. 666 */ 667 if (kpd->kp_flags & KPD_LOCKED) { 668 ap = anon_get_ptr(kpd->kp_anon, 669 kpd->kp_anon_idx + i); 670 swap_xlate(ap, &vp, &off); 671 /* Find the shared-locked page. */ 672 pp = page_find(vp, (u_offset_t)off); 673 if (pp == NULL) { 674 panic("segkp_release: " 675 "kp_anon: no page to unlock "); 676 /*NOTREACHED*/ 677 } 678 if (PP_ISRAF(pp)) 679 PP_CLRRAF(pp); 680 681 page_unlock(pp); 682 } 683 if ((kpd->kp_flags & KPD_HASAMP) == 0) { 684 anon_free(kpd->kp_anon, kpd->kp_anon_idx + i, 685 PAGESIZE); 686 anon_unresv_zone(PAGESIZE, NULL); 687 atomic_dec_ulong(&anon_segkp_pages_resv); 688 } 689 TRACE_5(TR_FAC_VM, 690 TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 691 kpd, va, PAGESIZE, 0, 0); 692 } else { 693 if (kpd->kp_flags & KPD_LOCKED) { 694 pp = page_find(&kvp, (u_offset_t)(uintptr_t)va); 695 if (pp == NULL) { 696 panic("segkp_release: " 697 "no page to unlock"); 698 /*NOTREACHED*/ 699 } 700 if (PP_ISRAF(pp)) 701 PP_CLRRAF(pp); 702 /* 703 * We should just upgrade the lock here 704 * but there is no upgrade that waits. 705 */ 706 page_unlock(pp); 707 } 708 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)va, 709 SE_EXCL); 710 if (pp != NULL) 711 page_destroy(pp, 0); 712 } 713 } 714 715 /* If locked, release physical memory reservation */ 716 if (kpd->kp_flags & KPD_LOCKED) { 717 pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)); 718 if ((kpd->kp_flags & KPD_NO_ANON) == 0) 719 atomic_add_long(&anon_segkp_pages_locked, -pages); 720 page_unresv(pages); 721 } 722 723 vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len); 724 kmem_free(kpd, sizeof (struct segkp_data)); 725 } 726 727 /* 728 * segkp_map_red() will check the current frame pointer against the 729 * stack base. If the amount of stack remaining is questionable 730 * (less than red_minavail), then segkp_map_red() will map in the redzone 731 * and return 1. Otherwise, it will return 0. segkp_map_red() can 732 * _only_ be called when it is safe to sleep on page_create_va(). 733 * 734 * It is up to the caller to remember whether segkp_map_red() successfully 735 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later 736 * time. 737 * 738 * Currently, this routine is only called from pagefault() (which necessarily 739 * satisfies the above conditions). 740 */ 741 #if defined(STACK_GROWTH_DOWN) 742 int 743 segkp_map_red(void) 744 { 745 uintptr_t fp = STACK_BIAS + (uintptr_t)getfp(); 746 #ifndef _LP64 747 caddr_t stkbase; 748 #endif 749 750 /* 751 * Optimize for the common case where we simply return. 752 */ 753 if ((curthread->t_red_pp == NULL) && 754 (fp - (uintptr_t)curthread->t_stkbase >= red_minavail)) 755 return (0); 756 757 #if defined(_LP64) 758 /* 759 * XXX We probably need something better than this. 760 */ 761 panic("kernel stack overflow"); 762 /*NOTREACHED*/ 763 #else /* _LP64 */ 764 if (curthread->t_red_pp == NULL) { 765 page_t *red_pp; 766 struct seg kseg; 767 768 caddr_t red_va = (caddr_t) 769 (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) - 770 PAGESIZE); 771 772 ASSERT(page_exists(&kvp, (u_offset_t)(uintptr_t)red_va) == 773 NULL); 774 775 /* 776 * Allocate the physical for the red page. 777 */ 778 /* 779 * No PG_NORELOC here to avoid waits. Unlikely to get 780 * a relocate happening in the short time the page exists 781 * and it will be OK anyway. 782 */ 783 784 kseg.s_as = &kas; 785 red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va, 786 PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va); 787 ASSERT(red_pp != NULL); 788 789 /* 790 * So we now have a page to jam into the redzone... 791 */ 792 page_io_unlock(red_pp); 793 794 hat_memload(kas.a_hat, red_va, red_pp, 795 (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK); 796 page_downgrade(red_pp); 797 798 /* 799 * The page is left SE_SHARED locked so we can hold on to 800 * the page_t pointer. 801 */ 802 curthread->t_red_pp = red_pp; 803 804 atomic_inc_32(&red_nmapped); 805 while (fp - (uintptr_t)curthread->t_stkbase < red_closest) { 806 (void) atomic_cas_32(&red_closest, red_closest, 807 (uint32_t)(fp - (uintptr_t)curthread->t_stkbase)); 808 } 809 return (1); 810 } 811 812 stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase & 813 (uintptr_t)PAGEMASK) - PAGESIZE); 814 815 atomic_inc_32(&red_ndoubles); 816 817 if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) { 818 /* 819 * Oh boy. We're already deep within the mapped-in 820 * redzone page, and the caller is trying to prepare 821 * for a deep stack run. We're running without a 822 * redzone right now: if the caller plows off the 823 * end of the stack, it'll plow another thread or 824 * LWP structure. That situation could result in 825 * a very hard-to-debug panic, so, in the spirit of 826 * recording the name of one's killer in one's own 827 * blood, we're going to record hrestime and the calling 828 * thread. 829 */ 830 red_deep_hires = hrestime.tv_nsec; 831 red_deep_thread = curthread; 832 } 833 834 /* 835 * If this is a DEBUG kernel, and we've run too deep for comfort, toss. 836 */ 837 ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD); 838 return (0); 839 #endif /* _LP64 */ 840 } 841 842 void 843 segkp_unmap_red(void) 844 { 845 page_t *pp; 846 caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase & 847 (uintptr_t)PAGEMASK) - PAGESIZE); 848 849 ASSERT(curthread->t_red_pp != NULL); 850 851 /* 852 * Because we locked the mapping down, we can't simply rely 853 * on page_destroy() to clean everything up; we need to call 854 * hat_unload() to explicitly unlock the mapping resources. 855 */ 856 hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK); 857 858 pp = curthread->t_red_pp; 859 860 ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va)); 861 862 /* 863 * Need to upgrade the SE_SHARED lock to SE_EXCL. 864 */ 865 if (!page_tryupgrade(pp)) { 866 /* 867 * As there is now wait for upgrade, release the 868 * SE_SHARED lock and wait for SE_EXCL. 869 */ 870 page_unlock(pp); 871 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)red_va, SE_EXCL); 872 /* pp may be NULL here, hence the test below */ 873 } 874 875 /* 876 * Destroy the page, with dontfree set to zero (i.e. free it). 877 */ 878 if (pp != NULL) 879 page_destroy(pp, 0); 880 curthread->t_red_pp = NULL; 881 } 882 #else 883 #error Red stacks only supported with downwards stack growth. 884 #endif 885 886 /* 887 * Handle a fault on an address corresponding to one of the 888 * resources in the segkp segment. 889 */ 890 faultcode_t 891 segkp_fault( 892 struct hat *hat, 893 struct seg *seg, 894 caddr_t vaddr, 895 size_t len, 896 enum fault_type type, 897 enum seg_rw rw) 898 { 899 struct segkp_data *kpd = NULL; 900 int err; 901 902 ASSERT(seg->s_as == &kas && RW_READ_HELD(&seg->s_as->a_lock)); 903 904 /* 905 * Sanity checks. 906 */ 907 if (type == F_PROT) { 908 panic("segkp_fault: unexpected F_PROT fault"); 909 /*NOTREACHED*/ 910 } 911 912 if ((kpd = segkp_find(seg, vaddr)) == NULL) 913 return (FC_NOMAP); 914 915 mutex_enter(&kpd->kp_lock); 916 917 if (type == F_SOFTLOCK) { 918 ASSERT(!(kpd->kp_flags & KPD_LOCKED)); 919 /* 920 * The F_SOFTLOCK case has more stringent 921 * range requirements: the given range must exactly coincide 922 * with the resource's mapped portion. Note reference to 923 * redzone is handled since vaddr would not equal base 924 */ 925 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) || 926 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) { 927 mutex_exit(&kpd->kp_lock); 928 return (FC_MAKE_ERR(EFAULT)); 929 } 930 931 if ((err = segkp_load(hat, seg, vaddr, len, kpd, KPD_LOCKED))) { 932 mutex_exit(&kpd->kp_lock); 933 return (FC_MAKE_ERR(err)); 934 } 935 kpd->kp_flags |= KPD_LOCKED; 936 mutex_exit(&kpd->kp_lock); 937 return (0); 938 } 939 940 if (type == F_INVAL) { 941 ASSERT(!(kpd->kp_flags & KPD_NO_ANON)); 942 943 /* 944 * Check if we touched the redzone. Somewhat optimistic 945 * here if we are touching the redzone of our own stack 946 * since we wouldn't have a stack to get this far... 947 */ 948 if ((kpd->kp_flags & KPD_HASREDZONE) && 949 btop((uintptr_t)(vaddr - kpd->kp_base)) == KPD_REDZONE(kpd)) 950 panic("segkp_fault: accessing redzone"); 951 952 /* 953 * This fault may occur while the page is being F_SOFTLOCK'ed. 954 * Return since a 2nd segkp_load is unnecessary and also would 955 * result in the page being locked twice and eventually 956 * hang the thread_reaper thread. 957 */ 958 if (kpd->kp_flags & KPD_LOCKED) { 959 mutex_exit(&kpd->kp_lock); 960 return (0); 961 } 962 963 err = segkp_load(hat, seg, vaddr, len, kpd, kpd->kp_flags); 964 mutex_exit(&kpd->kp_lock); 965 return (err ? FC_MAKE_ERR(err) : 0); 966 } 967 968 if (type == F_SOFTUNLOCK) { 969 uint_t flags; 970 971 /* 972 * Make sure the addr is LOCKED and it has anon backing 973 * before unlocking 974 */ 975 if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) != KPD_LOCKED) { 976 panic("segkp_fault: bad unlock"); 977 /*NOTREACHED*/ 978 } 979 980 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) || 981 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) { 982 panic("segkp_fault: bad range"); 983 /*NOTREACHED*/ 984 } 985 986 if (rw == S_WRITE) 987 flags = kpd->kp_flags | KPD_WRITEDIRTY; 988 else 989 flags = kpd->kp_flags; 990 err = segkp_unlock(hat, seg, vaddr, len, kpd, flags); 991 kpd->kp_flags &= ~KPD_LOCKED; 992 mutex_exit(&kpd->kp_lock); 993 return (err ? FC_MAKE_ERR(err) : 0); 994 } 995 mutex_exit(&kpd->kp_lock); 996 panic("segkp_fault: bogus fault type: %d\n", type); 997 /*NOTREACHED*/ 998 } 999 1000 /* 1001 * Check that the given protections suffice over the range specified by 1002 * vaddr and len. For this segment type, the only issue is whether or 1003 * not the range lies completely within the mapped part of an allocated 1004 * resource. 1005 */ 1006 /* ARGSUSED */ 1007 static int 1008 segkp_checkprot(struct seg *seg, caddr_t vaddr, size_t len, uint_t prot) 1009 { 1010 struct segkp_data *kpd = NULL; 1011 caddr_t mbase; 1012 size_t mlen; 1013 1014 if ((kpd = segkp_find(seg, vaddr)) == NULL) 1015 return (EACCES); 1016 1017 mutex_enter(&kpd->kp_lock); 1018 mbase = stom(kpd->kp_base, kpd->kp_flags); 1019 mlen = SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags); 1020 if (len > mlen || vaddr < mbase || 1021 ((vaddr + len) > (mbase + mlen))) { 1022 mutex_exit(&kpd->kp_lock); 1023 return (EACCES); 1024 } 1025 mutex_exit(&kpd->kp_lock); 1026 return (0); 1027 } 1028 1029 1030 /* 1031 * Check to see if it makes sense to do kluster/read ahead to 1032 * addr + delta relative to the mapping at addr. We assume here 1033 * that delta is a signed PAGESIZE'd multiple (which can be negative). 1034 * 1035 * For seg_u we always "approve" of this action from our standpoint. 1036 */ 1037 /*ARGSUSED*/ 1038 static int 1039 segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 1040 { 1041 return (0); 1042 } 1043 1044 /* 1045 * Load and possibly lock intra-slot resources in the range given by 1046 * vaddr and len. 1047 */ 1048 static int 1049 segkp_load( 1050 struct hat *hat, 1051 struct seg *seg, 1052 caddr_t vaddr, 1053 size_t len, 1054 struct segkp_data *kpd, 1055 uint_t flags) 1056 { 1057 caddr_t va; 1058 caddr_t vlim; 1059 ulong_t i; 1060 uint_t lock; 1061 1062 ASSERT(MUTEX_HELD(&kpd->kp_lock)); 1063 1064 len = P2ROUNDUP(len, PAGESIZE); 1065 1066 /* If locking, reserve physical memory */ 1067 if (flags & KPD_LOCKED) { 1068 pgcnt_t pages = btop(len); 1069 if ((kpd->kp_flags & KPD_NO_ANON) == 0) 1070 atomic_add_long(&anon_segkp_pages_locked, pages); 1071 (void) page_resv(pages, KM_SLEEP); 1072 } 1073 1074 /* 1075 * Loop through the pages in the given range. 1076 */ 1077 va = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK); 1078 vaddr = va; 1079 vlim = va + len; 1080 lock = flags & KPD_LOCKED; 1081 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT; 1082 for (; va < vlim; va += PAGESIZE, i++) { 1083 page_t *pl[2]; /* second element NULL terminator */ 1084 struct vnode *vp; 1085 anoff_t off; 1086 int err; 1087 struct anon *ap; 1088 1089 /* 1090 * Summon the page. If it's not resident, arrange 1091 * for synchronous i/o to pull it in. 1092 */ 1093 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i); 1094 swap_xlate(ap, &vp, &off); 1095 1096 /* 1097 * The returned page list will have exactly one entry, 1098 * which is returned to us already kept. 1099 */ 1100 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, NULL, 1101 pl, PAGESIZE, seg, va, S_READ, kcred, NULL); 1102 1103 if (err) { 1104 /* 1105 * Back out of what we've done so far. 1106 */ 1107 (void) segkp_unlock(hat, seg, vaddr, 1108 (va - vaddr), kpd, flags); 1109 return (err); 1110 } 1111 1112 /* 1113 * Load an MMU translation for the page. 1114 */ 1115 hat_memload(hat, va, pl[0], (PROT_READ|PROT_WRITE), 1116 lock ? HAT_LOAD_LOCK : HAT_LOAD); 1117 1118 if (!lock) { 1119 /* 1120 * Now, release "shared" lock on the page. 1121 */ 1122 page_unlock(pl[0]); 1123 } 1124 } 1125 return (0); 1126 } 1127 1128 /* 1129 * At the very least unload the mmu-translations and unlock the range if locked 1130 * Can be called with the following flag value KPD_WRITEDIRTY which specifies 1131 * any dirty pages should be written to disk. 1132 */ 1133 static int 1134 segkp_unlock( 1135 struct hat *hat, 1136 struct seg *seg, 1137 caddr_t vaddr, 1138 size_t len, 1139 struct segkp_data *kpd, 1140 uint_t flags) 1141 { 1142 caddr_t va; 1143 caddr_t vlim; 1144 ulong_t i; 1145 struct page *pp; 1146 struct vnode *vp; 1147 anoff_t off; 1148 struct anon *ap; 1149 1150 #ifdef lint 1151 seg = seg; 1152 #endif /* lint */ 1153 1154 ASSERT(MUTEX_HELD(&kpd->kp_lock)); 1155 1156 /* 1157 * Loop through the pages in the given range. It is assumed 1158 * segkp_unlock is called with page aligned base 1159 */ 1160 va = vaddr; 1161 vlim = va + len; 1162 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT; 1163 hat_unload(hat, va, len, 1164 ((flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD)); 1165 for (; va < vlim; va += PAGESIZE, i++) { 1166 /* 1167 * Find the page associated with this part of the 1168 * slot, tracking it down through its associated swap 1169 * space. 1170 */ 1171 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i); 1172 swap_xlate(ap, &vp, &off); 1173 1174 if (flags & KPD_LOCKED) { 1175 if ((pp = page_find(vp, off)) == NULL) { 1176 if (flags & KPD_LOCKED) { 1177 panic("segkp_softunlock: missing page"); 1178 /*NOTREACHED*/ 1179 } 1180 } 1181 } else { 1182 /* 1183 * Nothing to do if the slot is not locked and the 1184 * page doesn't exist. 1185 */ 1186 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) 1187 continue; 1188 } 1189 1190 /* 1191 * If the page doesn't have any translations, is 1192 * dirty and not being shared, then push it out 1193 * asynchronously and avoid waiting for the 1194 * pageout daemon to do it for us. 1195 * 1196 * XXX - Do we really need to get the "exclusive" 1197 * lock via an upgrade? 1198 */ 1199 if ((flags & KPD_WRITEDIRTY) && !hat_page_is_mapped(pp) && 1200 hat_ismod(pp) && page_tryupgrade(pp)) { 1201 /* 1202 * Hold the vnode before releasing the page lock to 1203 * prevent it from being freed and re-used by some 1204 * other thread. 1205 */ 1206 VN_HOLD(vp); 1207 page_unlock(pp); 1208 1209 /* 1210 * Want most powerful credentials we can get so 1211 * use kcred. 1212 */ 1213 (void) VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE, 1214 B_ASYNC | B_FREE, kcred, NULL); 1215 VN_RELE(vp); 1216 } else { 1217 page_unlock(pp); 1218 } 1219 } 1220 1221 /* If unlocking, release physical memory */ 1222 if (flags & KPD_LOCKED) { 1223 pgcnt_t pages = btopr(len); 1224 if ((kpd->kp_flags & KPD_NO_ANON) == 0) 1225 atomic_add_long(&anon_segkp_pages_locked, -pages); 1226 page_unresv(pages); 1227 } 1228 return (0); 1229 } 1230 1231 /* 1232 * Insert the kpd in the hash table. 1233 */ 1234 static void 1235 segkp_insert(struct seg *seg, struct segkp_data *kpd) 1236 { 1237 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1238 int index; 1239 1240 /* 1241 * Insert the kpd based on the address that will be returned 1242 * via segkp_release. 1243 */ 1244 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags)); 1245 mutex_enter(&segkp_lock); 1246 kpd->kp_next = kpsd->kpsd_hash[index]; 1247 kpsd->kpsd_hash[index] = kpd; 1248 mutex_exit(&segkp_lock); 1249 } 1250 1251 /* 1252 * Remove kpd from the hash table. 1253 */ 1254 static void 1255 segkp_delete(struct seg *seg, struct segkp_data *kpd) 1256 { 1257 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1258 struct segkp_data **kpp; 1259 int index; 1260 1261 ASSERT(MUTEX_HELD(&segkp_lock)); 1262 1263 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags)); 1264 for (kpp = &kpsd->kpsd_hash[index]; 1265 *kpp != NULL; kpp = &((*kpp)->kp_next)) { 1266 if (*kpp == kpd) { 1267 *kpp = kpd->kp_next; 1268 return; 1269 } 1270 } 1271 panic("segkp_delete: unable to find element to delete"); 1272 /*NOTREACHED*/ 1273 } 1274 1275 /* 1276 * Find the kpd associated with a vaddr. 1277 * 1278 * Most of the callers of segkp_find will pass the vaddr that 1279 * hashes to the desired index, but there are cases where 1280 * this is not true in which case we have to (potentially) scan 1281 * the whole table looking for it. This should be very rare 1282 * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the 1283 * middle of the segkp_data region). 1284 */ 1285 static struct segkp_data * 1286 segkp_find(struct seg *seg, caddr_t vaddr) 1287 { 1288 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1289 struct segkp_data *kpd; 1290 int i; 1291 int stop; 1292 1293 i = stop = SEGKP_HASH(vaddr); 1294 mutex_enter(&segkp_lock); 1295 do { 1296 for (kpd = kpsd->kpsd_hash[i]; kpd != NULL; 1297 kpd = kpd->kp_next) { 1298 if (vaddr >= kpd->kp_base && 1299 vaddr < kpd->kp_base + kpd->kp_len) { 1300 mutex_exit(&segkp_lock); 1301 return (kpd); 1302 } 1303 } 1304 if (--i < 0) 1305 i = SEGKP_HASHSZ - 1; /* Wrap */ 1306 } while (i != stop); 1307 mutex_exit(&segkp_lock); 1308 return (NULL); /* Not found */ 1309 } 1310 1311 /* 1312 * returns size of swappable area. 1313 */ 1314 size_t 1315 swapsize(caddr_t v) 1316 { 1317 struct segkp_data *kpd; 1318 1319 if ((kpd = segkp_find(segkp, v)) != NULL) 1320 return (SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)); 1321 else 1322 return (NULL); 1323 } 1324 1325 /* 1326 * Dump out all the active segkp pages 1327 */ 1328 static void 1329 segkp_dump(struct seg *seg) 1330 { 1331 int i; 1332 struct segkp_data *kpd; 1333 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1334 1335 for (i = 0; i < SEGKP_HASHSZ; i++) { 1336 for (kpd = kpsd->kpsd_hash[i]; 1337 kpd != NULL; kpd = kpd->kp_next) { 1338 pfn_t pfn; 1339 caddr_t addr; 1340 caddr_t eaddr; 1341 1342 addr = kpd->kp_base; 1343 eaddr = addr + kpd->kp_len; 1344 while (addr < eaddr) { 1345 ASSERT(seg->s_as == &kas); 1346 pfn = hat_getpfnum(seg->s_as->a_hat, addr); 1347 if (pfn != PFN_INVALID) 1348 dump_addpage(seg->s_as, addr, pfn); 1349 addr += PAGESIZE; 1350 dump_timeleft = dump_timeout; 1351 } 1352 } 1353 } 1354 } 1355 1356 /*ARGSUSED*/ 1357 static int 1358 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len, 1359 struct page ***ppp, enum lock_type type, enum seg_rw rw) 1360 { 1361 return (ENOTSUP); 1362 } 1363 1364 /*ARGSUSED*/ 1365 static int 1366 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 1367 { 1368 return (ENODEV); 1369 } 1370 1371 #include <sys/mem_config.h> 1372 1373 /*ARGSUSED*/ 1374 static void 1375 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages) 1376 {} 1377 1378 /* 1379 * During memory delete, turn off caches so that pages are not held. 1380 * A better solution may be to unlock the pages while they are 1381 * in the cache so that they may be collected naturally. 1382 */ 1383 1384 /*ARGSUSED*/ 1385 static int 1386 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages) 1387 { 1388 atomic_inc_32(&segkp_indel); 1389 segkp_cache_free(); 1390 return (0); 1391 } 1392 1393 /*ARGSUSED*/ 1394 static void 1395 segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 1396 { 1397 atomic_dec_32(&segkp_indel); 1398 } 1399 1400 static kphysm_setup_vector_t segkp_mem_config_vec = { 1401 KPHYSM_SETUP_VECTOR_VERSION, 1402 segkp_mem_config_post_add, 1403 segkp_mem_config_pre_del, 1404 segkp_mem_config_post_del, 1405 }; 1406 1407 static void 1408 segkpinit_mem_config(struct seg *seg) 1409 { 1410 int ret; 1411 1412 ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg); 1413 ASSERT(ret == 0); 1414 }