1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 26 /* All Rights Reserved */ 27 28 /* 29 * Portions of this source code were derived from Berkeley 4.3 BSD 30 * under license from the Regents of the University of California. 31 */ 32 33 /* 34 * segkp is a segment driver that administers the allocation and deallocation 35 * of pageable variable size chunks of kernel virtual address space. Each 36 * allocated resource is page-aligned. 37 * 38 * The user may specify whether the resource should be initialized to 0, 39 * include a redzone, or locked in memory. 40 */ 41 42 #include <sys/types.h> 43 #include <sys/t_lock.h> 44 #include <sys/thread.h> 45 #include <sys/param.h> 46 #include <sys/errno.h> 47 #include <sys/sysmacros.h> 48 #include <sys/systm.h> 49 #include <sys/buf.h> 50 #include <sys/mman.h> 51 #include <sys/vnode.h> 52 #include <sys/cmn_err.h> 53 #include <sys/swap.h> 54 #include <sys/tuneable.h> 55 #include <sys/kmem.h> 56 #include <sys/vmem.h> 57 #include <sys/cred.h> 58 #include <sys/dumphdr.h> 59 #include <sys/debug.h> 60 #include <sys/vtrace.h> 61 #include <sys/stack.h> 62 #include <sys/atomic.h> 63 #include <sys/archsystm.h> 64 #include <sys/lgrp.h> 65 66 #include <vm/as.h> 67 #include <vm/seg.h> 68 #include <vm/seg_kp.h> 69 #include <vm/seg_kmem.h> 70 #include <vm/anon.h> 71 #include <vm/page.h> 72 #include <vm/hat.h> 73 #include <sys/bitmap.h> 74 75 /* 76 * Private seg op routines 77 */ 78 static void segkp_dump(struct seg *seg); 79 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len, 80 uint_t prot); 81 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta); 82 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len, 83 struct page ***page, enum lock_type type, 84 enum seg_rw rw); 85 static void segkp_insert(struct seg *seg, struct segkp_data *kpd); 86 static void segkp_delete(struct seg *seg, struct segkp_data *kpd); 87 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags, 88 struct segkp_data **tkpd, struct anon_map *amp); 89 static void segkp_release_internal(struct seg *seg, 90 struct segkp_data *kpd, size_t len); 91 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr, 92 size_t len, struct segkp_data *kpd, uint_t flags); 93 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr, 94 size_t len, struct segkp_data *kpd, uint_t flags); 95 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr); 96 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp); 97 static lgrp_mem_policy_info_t *segkp_getpolicy(struct seg *seg, 98 caddr_t addr); 99 static int segkp_capable(struct seg *seg, segcapability_t capability); 100 101 /* 102 * Lock used to protect the hash table(s) and caches. 103 */ 104 static kmutex_t segkp_lock; 105 106 /* 107 * The segkp caches 108 */ 109 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE]; 110 111 /* 112 * When there are fewer than red_minavail bytes left on the stack, 113 * segkp_map_red() will map in the redzone (if called). 5000 seems 114 * to work reasonably well... 115 */ 116 long red_minavail = 5000; 117 118 /* 119 * will be set to 1 for 32 bit x86 systems only, in startup.c 120 */ 121 int segkp_fromheap = 0; 122 ulong_t *segkp_bitmap; 123 124 /* 125 * If segkp_map_red() is called with the redzone already mapped and 126 * with less than RED_DEEP_THRESHOLD bytes available on the stack, 127 * then the stack situation has become quite serious; if much more stack 128 * is consumed, we have the potential of scrogging the next thread/LWP 129 * structure. To help debug the "can't happen" panics which may 130 * result from this condition, we record hrestime and the calling thread 131 * in red_deep_hires and red_deep_thread respectively. 132 */ 133 #define RED_DEEP_THRESHOLD 2000 134 135 hrtime_t red_deep_hires; 136 kthread_t *red_deep_thread; 137 138 uint32_t red_nmapped; 139 uint32_t red_closest = UINT_MAX; 140 uint32_t red_ndoubles; 141 142 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */ 143 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */ 144 145 static struct seg_ops segkp_ops = { 146 .fault = segkp_fault, 147 .checkprot = segkp_checkprot, 148 .kluster = segkp_kluster, 149 .dump = segkp_dump, 150 .pagelock = segkp_pagelock, 151 .getmemid = segkp_getmemid, 152 .getpolicy = segkp_getpolicy, 153 .capable = segkp_capable, 154 }; 155 156 157 static void segkpinit_mem_config(struct seg *); 158 159 static uint32_t segkp_indel; 160 161 /* 162 * Allocate the segment specific private data struct and fill it in 163 * with the per kp segment mutex, anon ptr. array and hash table. 164 */ 165 int 166 segkp_create(struct seg *seg) 167 { 168 struct segkp_segdata *kpsd; 169 size_t np; 170 171 ASSERT(seg != NULL && seg->s_as == &kas); 172 ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock)); 173 174 if (seg->s_size & PAGEOFFSET) { 175 panic("Bad segkp size"); 176 /*NOTREACHED*/ 177 } 178 179 kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP); 180 181 /* 182 * Allocate the virtual memory for segkp and initialize it 183 */ 184 if (segkp_fromheap) { 185 np = btop(kvseg.s_size); 186 segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP); 187 kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE, 188 vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP); 189 } else { 190 segkp_bitmap = NULL; 191 np = btop(seg->s_size); 192 kpsd->kpsd_arena = vmem_create("segkp", seg->s_base, 193 seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE, 194 VM_SLEEP); 195 } 196 197 kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE); 198 199 kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *), 200 KM_SLEEP); 201 seg->s_data = (void *)kpsd; 202 seg->s_ops = &segkp_ops; 203 segkpinit_mem_config(seg); 204 return (0); 205 } 206 207 208 /* 209 * Find a free 'freelist' and initialize it with the appropriate attributes 210 */ 211 void * 212 segkp_cache_init(struct seg *seg, int maxsize, size_t len, uint_t flags) 213 { 214 int i; 215 216 if ((flags & KPD_NO_ANON) && !(flags & KPD_LOCKED)) 217 return ((void *)-1); 218 219 mutex_enter(&segkp_lock); 220 for (i = 0; i < SEGKP_MAX_CACHE; i++) { 221 if (segkp_cache[i].kpf_inuse) 222 continue; 223 segkp_cache[i].kpf_inuse = 1; 224 segkp_cache[i].kpf_max = maxsize; 225 segkp_cache[i].kpf_flags = flags; 226 segkp_cache[i].kpf_seg = seg; 227 segkp_cache[i].kpf_len = len; 228 mutex_exit(&segkp_lock); 229 return ((void *)(uintptr_t)i); 230 } 231 mutex_exit(&segkp_lock); 232 return ((void *)-1); 233 } 234 235 /* 236 * Free all the cache resources. 237 */ 238 void 239 segkp_cache_free(void) 240 { 241 struct segkp_data *kpd; 242 struct seg *seg; 243 int i; 244 245 mutex_enter(&segkp_lock); 246 for (i = 0; i < SEGKP_MAX_CACHE; i++) { 247 if (!segkp_cache[i].kpf_inuse) 248 continue; 249 /* 250 * Disconnect the freelist and process each element 251 */ 252 kpd = segkp_cache[i].kpf_list; 253 seg = segkp_cache[i].kpf_seg; 254 segkp_cache[i].kpf_list = NULL; 255 segkp_cache[i].kpf_count = 0; 256 mutex_exit(&segkp_lock); 257 258 while (kpd != NULL) { 259 struct segkp_data *next; 260 261 next = kpd->kp_next; 262 segkp_release_internal(seg, kpd, kpd->kp_len); 263 kpd = next; 264 } 265 mutex_enter(&segkp_lock); 266 } 267 mutex_exit(&segkp_lock); 268 } 269 270 /* 271 * There are 2 entries into segkp_get_internal. The first includes a cookie 272 * used to access a pool of cached segkp resources. The second does not 273 * use the cache. 274 */ 275 caddr_t 276 segkp_get(struct seg *seg, size_t len, uint_t flags) 277 { 278 struct segkp_data *kpd = NULL; 279 280 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) { 281 kpd->kp_cookie = -1; 282 return (stom(kpd->kp_base, flags)); 283 } 284 return (NULL); 285 } 286 287 /* 288 * Return a 'cached' segkp address 289 */ 290 caddr_t 291 segkp_cache_get(void *cookie) 292 { 293 struct segkp_cache *freelist = NULL; 294 struct segkp_data *kpd = NULL; 295 int index = (int)(uintptr_t)cookie; 296 struct seg *seg; 297 size_t len; 298 uint_t flags; 299 300 if (index < 0 || index >= SEGKP_MAX_CACHE) 301 return (NULL); 302 freelist = &segkp_cache[index]; 303 304 mutex_enter(&segkp_lock); 305 seg = freelist->kpf_seg; 306 flags = freelist->kpf_flags; 307 if (freelist->kpf_list != NULL) { 308 kpd = freelist->kpf_list; 309 freelist->kpf_list = kpd->kp_next; 310 freelist->kpf_count--; 311 mutex_exit(&segkp_lock); 312 kpd->kp_next = NULL; 313 segkp_insert(seg, kpd); 314 return (stom(kpd->kp_base, flags)); 315 } 316 len = freelist->kpf_len; 317 mutex_exit(&segkp_lock); 318 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) { 319 kpd->kp_cookie = index; 320 return (stom(kpd->kp_base, flags)); 321 } 322 return (NULL); 323 } 324 325 caddr_t 326 segkp_get_withanonmap( 327 struct seg *seg, 328 size_t len, 329 uint_t flags, 330 struct anon_map *amp) 331 { 332 struct segkp_data *kpd = NULL; 333 334 ASSERT(amp != NULL); 335 flags |= KPD_HASAMP; 336 if (segkp_get_internal(seg, len, flags, &kpd, amp) != NULL) { 337 kpd->kp_cookie = -1; 338 return (stom(kpd->kp_base, flags)); 339 } 340 return (NULL); 341 } 342 343 /* 344 * This does the real work of segkp allocation. 345 * Return to client base addr. len must be page-aligned. A null value is 346 * returned if there are no more vm resources (e.g. pages, swap). The len 347 * and base recorded in the private data structure include the redzone 348 * and the redzone length (if applicable). If the user requests a redzone 349 * either the first or last page is left unmapped depending whether stacks 350 * grow to low or high memory. 351 * 352 * The client may also specify a no-wait flag. If that is set then the 353 * request will choose a non-blocking path when requesting resources. 354 * The default is make the client wait. 355 */ 356 static caddr_t 357 segkp_get_internal( 358 struct seg *seg, 359 size_t len, 360 uint_t flags, 361 struct segkp_data **tkpd, 362 struct anon_map *amp) 363 { 364 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 365 struct segkp_data *kpd; 366 caddr_t vbase = NULL; /* always first virtual, may not be mapped */ 367 pgcnt_t np = 0; /* number of pages in the resource */ 368 pgcnt_t segkpindex; 369 long i; 370 caddr_t va; 371 pgcnt_t pages = 0; 372 ulong_t anon_idx = 0; 373 int kmflag = (flags & KPD_NOWAIT) ? KM_NOSLEEP : KM_SLEEP; 374 caddr_t s_base = (segkp_fromheap) ? kvseg.s_base : seg->s_base; 375 376 if (len & PAGEOFFSET) { 377 panic("segkp_get: len is not page-aligned"); 378 /*NOTREACHED*/ 379 } 380 381 ASSERT(((flags & KPD_HASAMP) == 0) == (amp == NULL)); 382 383 /* Only allow KPD_NO_ANON if we are going to lock it down */ 384 if ((flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON) 385 return (NULL); 386 387 if ((kpd = kmem_zalloc(sizeof (struct segkp_data), kmflag)) == NULL) 388 return (NULL); 389 /* 390 * Fix up the len to reflect the REDZONE if applicable 391 */ 392 if (flags & KPD_HASREDZONE) 393 len += PAGESIZE; 394 np = btop(len); 395 396 vbase = vmem_alloc(SEGKP_VMEM(seg), len, kmflag | VM_BESTFIT); 397 if (vbase == NULL) { 398 kmem_free(kpd, sizeof (struct segkp_data)); 399 return (NULL); 400 } 401 402 /* If locking, reserve physical memory */ 403 if (flags & KPD_LOCKED) { 404 pages = btop(SEGKP_MAPLEN(len, flags)); 405 if (page_resv(pages, kmflag) == 0) { 406 vmem_free(SEGKP_VMEM(seg), vbase, len); 407 kmem_free(kpd, sizeof (struct segkp_data)); 408 return (NULL); 409 } 410 if ((flags & KPD_NO_ANON) == 0) 411 atomic_add_long(&anon_segkp_pages_locked, pages); 412 } 413 414 /* 415 * Reserve sufficient swap space for this vm resource. We'll 416 * actually allocate it in the loop below, but reserving it 417 * here allows us to back out more gracefully than if we 418 * had an allocation failure in the body of the loop. 419 * 420 * Note that we don't need swap space for the red zone page. 421 */ 422 if (amp != NULL) { 423 /* 424 * The swap reservation has been done, if required, and the 425 * anon_hdr is separate. 426 */ 427 anon_idx = 0; 428 kpd->kp_anon_idx = anon_idx; 429 kpd->kp_anon = amp->ahp; 430 431 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 432 kpd, vbase, len, flags, 1); 433 434 } else if ((flags & KPD_NO_ANON) == 0) { 435 if (anon_resv_zone(SEGKP_MAPLEN(len, flags), NULL) == 0) { 436 if (flags & KPD_LOCKED) { 437 atomic_add_long(&anon_segkp_pages_locked, 438 -pages); 439 page_unresv(pages); 440 } 441 vmem_free(SEGKP_VMEM(seg), vbase, len); 442 kmem_free(kpd, sizeof (struct segkp_data)); 443 return (NULL); 444 } 445 atomic_add_long(&anon_segkp_pages_resv, 446 btop(SEGKP_MAPLEN(len, flags))); 447 anon_idx = ((uintptr_t)(vbase - s_base)) >> PAGESHIFT; 448 kpd->kp_anon_idx = anon_idx; 449 kpd->kp_anon = kpsd->kpsd_anon; 450 451 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 452 kpd, vbase, len, flags, 1); 453 } else { 454 kpd->kp_anon = NULL; 455 kpd->kp_anon_idx = 0; 456 } 457 458 /* 459 * Allocate page and anon resources for the virtual address range 460 * except the redzone 461 */ 462 if (segkp_fromheap) 463 segkpindex = btop((uintptr_t)(vbase - kvseg.s_base)); 464 for (i = 0, va = vbase; i < np; i++, va += PAGESIZE) { 465 page_t *pl[2]; 466 struct vnode *vp; 467 anoff_t off; 468 int err; 469 page_t *pp = NULL; 470 471 /* 472 * Mark this page to be a segkp page in the bitmap. 473 */ 474 if (segkp_fromheap) { 475 BT_ATOMIC_SET(segkp_bitmap, segkpindex); 476 segkpindex++; 477 } 478 479 /* 480 * If this page is the red zone page, we don't need swap 481 * space for it. Note that we skip over the code that 482 * establishes MMU mappings, so that the page remains 483 * invalid. 484 */ 485 if ((flags & KPD_HASREDZONE) && KPD_REDZONE(kpd) == i) 486 continue; 487 488 if (kpd->kp_anon != NULL) { 489 struct anon *ap; 490 491 ASSERT(anon_get_ptr(kpd->kp_anon, anon_idx + i) 492 == NULL); 493 /* 494 * Determine the "vp" and "off" of the anon slot. 495 */ 496 ap = anon_alloc(NULL, 0); 497 if (amp != NULL) 498 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 499 (void) anon_set_ptr(kpd->kp_anon, anon_idx + i, 500 ap, ANON_SLEEP); 501 if (amp != NULL) 502 ANON_LOCK_EXIT(&->a_rwlock); 503 swap_xlate(ap, &vp, &off); 504 505 /* 506 * Create a page with the specified identity. The 507 * page is returned with the "shared" lock held. 508 */ 509 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, 510 NULL, pl, PAGESIZE, seg, va, S_CREATE, 511 kcred, NULL); 512 if (err) { 513 /* 514 * XXX - This should not fail. 515 */ 516 panic("segkp_get: no pages"); 517 /*NOTREACHED*/ 518 } 519 pp = pl[0]; 520 } else { 521 ASSERT(page_exists(&kvp, 522 (u_offset_t)(uintptr_t)va) == NULL); 523 524 if ((pp = page_create_va(&kvp, 525 (u_offset_t)(uintptr_t)va, PAGESIZE, 526 (flags & KPD_NOWAIT ? 0 : PG_WAIT) | PG_EXCL | 527 PG_NORELOC, seg, va)) == NULL) { 528 /* 529 * Legitimize resource; then destroy it. 530 * Easier than trying to unwind here. 531 */ 532 kpd->kp_flags = flags; 533 kpd->kp_base = vbase; 534 kpd->kp_len = len; 535 segkp_release_internal(seg, kpd, va - vbase); 536 return (NULL); 537 } 538 page_io_unlock(pp); 539 } 540 541 if (flags & KPD_ZERO) 542 pagezero(pp, 0, PAGESIZE); 543 544 /* 545 * Load and lock an MMU translation for the page. 546 */ 547 hat_memload(seg->s_as->a_hat, va, pp, (PROT_READ|PROT_WRITE), 548 ((flags & KPD_LOCKED) ? HAT_LOAD_LOCK : HAT_LOAD)); 549 550 /* 551 * Now, release lock on the page. 552 */ 553 if (flags & KPD_LOCKED) { 554 /* 555 * Indicate to page_retire framework that this 556 * page can only be retired when it is freed. 557 */ 558 PP_SETRAF(pp); 559 page_downgrade(pp); 560 } else 561 page_unlock(pp); 562 } 563 564 kpd->kp_flags = flags; 565 kpd->kp_base = vbase; 566 kpd->kp_len = len; 567 segkp_insert(seg, kpd); 568 *tkpd = kpd; 569 return (stom(kpd->kp_base, flags)); 570 } 571 572 /* 573 * Release the resource to cache if the pool(designate by the cookie) 574 * has less than the maximum allowable. If inserted in cache, 575 * segkp_delete insures element is taken off of active list. 576 */ 577 void 578 segkp_release(struct seg *seg, caddr_t vaddr) 579 { 580 struct segkp_cache *freelist; 581 struct segkp_data *kpd = NULL; 582 583 if ((kpd = segkp_find(seg, vaddr)) == NULL) { 584 panic("segkp_release: null kpd"); 585 /*NOTREACHED*/ 586 } 587 588 if (kpd->kp_cookie != -1) { 589 freelist = &segkp_cache[kpd->kp_cookie]; 590 mutex_enter(&segkp_lock); 591 if (!segkp_indel && freelist->kpf_count < freelist->kpf_max) { 592 segkp_delete(seg, kpd); 593 kpd->kp_next = freelist->kpf_list; 594 freelist->kpf_list = kpd; 595 freelist->kpf_count++; 596 mutex_exit(&segkp_lock); 597 return; 598 } else { 599 mutex_exit(&segkp_lock); 600 kpd->kp_cookie = -1; 601 } 602 } 603 segkp_release_internal(seg, kpd, kpd->kp_len); 604 } 605 606 /* 607 * Free the entire resource. segkp_unlock gets called with the start of the 608 * mapped portion of the resource. The length is the size of the mapped 609 * portion 610 */ 611 static void 612 segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len) 613 { 614 caddr_t va; 615 long i; 616 long redzone; 617 size_t np; 618 page_t *pp; 619 struct vnode *vp; 620 anoff_t off; 621 struct anon *ap; 622 pgcnt_t segkpindex; 623 624 ASSERT(kpd != NULL); 625 ASSERT((kpd->kp_flags & KPD_HASAMP) == 0 || kpd->kp_cookie == -1); 626 np = btop(len); 627 628 /* Remove from active hash list */ 629 if (kpd->kp_cookie == -1) { 630 mutex_enter(&segkp_lock); 631 segkp_delete(seg, kpd); 632 mutex_exit(&segkp_lock); 633 } 634 635 /* 636 * Precompute redzone page index. 637 */ 638 redzone = -1; 639 if (kpd->kp_flags & KPD_HASREDZONE) 640 redzone = KPD_REDZONE(kpd); 641 642 643 va = kpd->kp_base; 644 645 hat_unload(seg->s_as->a_hat, va, (np << PAGESHIFT), 646 ((kpd->kp_flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD)); 647 /* 648 * Free up those anon resources that are quiescent. 649 */ 650 if (segkp_fromheap) 651 segkpindex = btop((uintptr_t)(va - kvseg.s_base)); 652 for (i = 0; i < np; i++, va += PAGESIZE) { 653 654 /* 655 * Clear the bit for this page from the bitmap. 656 */ 657 if (segkp_fromheap) { 658 BT_ATOMIC_CLEAR(segkp_bitmap, segkpindex); 659 segkpindex++; 660 } 661 662 if (i == redzone) 663 continue; 664 if (kpd->kp_anon) { 665 /* 666 * Free up anon resources and destroy the 667 * associated pages. 668 * 669 * Release the lock if there is one. Have to get the 670 * page to do this, unfortunately. 671 */ 672 if (kpd->kp_flags & KPD_LOCKED) { 673 ap = anon_get_ptr(kpd->kp_anon, 674 kpd->kp_anon_idx + i); 675 swap_xlate(ap, &vp, &off); 676 /* Find the shared-locked page. */ 677 pp = page_find(vp, (u_offset_t)off); 678 if (pp == NULL) { 679 panic("segkp_release: " 680 "kp_anon: no page to unlock "); 681 /*NOTREACHED*/ 682 } 683 if (PP_ISRAF(pp)) 684 PP_CLRRAF(pp); 685 686 page_unlock(pp); 687 } 688 if ((kpd->kp_flags & KPD_HASAMP) == 0) { 689 anon_free(kpd->kp_anon, kpd->kp_anon_idx + i, 690 PAGESIZE); 691 anon_unresv_zone(PAGESIZE, NULL); 692 atomic_dec_ulong(&anon_segkp_pages_resv); 693 } 694 TRACE_5(TR_FAC_VM, 695 TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 696 kpd, va, PAGESIZE, 0, 0); 697 } else { 698 if (kpd->kp_flags & KPD_LOCKED) { 699 pp = page_find(&kvp, (u_offset_t)(uintptr_t)va); 700 if (pp == NULL) { 701 panic("segkp_release: " 702 "no page to unlock"); 703 /*NOTREACHED*/ 704 } 705 if (PP_ISRAF(pp)) 706 PP_CLRRAF(pp); 707 /* 708 * We should just upgrade the lock here 709 * but there is no upgrade that waits. 710 */ 711 page_unlock(pp); 712 } 713 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)va, 714 SE_EXCL); 715 if (pp != NULL) 716 page_destroy(pp, 0); 717 } 718 } 719 720 /* If locked, release physical memory reservation */ 721 if (kpd->kp_flags & KPD_LOCKED) { 722 pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)); 723 if ((kpd->kp_flags & KPD_NO_ANON) == 0) 724 atomic_add_long(&anon_segkp_pages_locked, -pages); 725 page_unresv(pages); 726 } 727 728 vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len); 729 kmem_free(kpd, sizeof (struct segkp_data)); 730 } 731 732 /* 733 * segkp_map_red() will check the current frame pointer against the 734 * stack base. If the amount of stack remaining is questionable 735 * (less than red_minavail), then segkp_map_red() will map in the redzone 736 * and return 1. Otherwise, it will return 0. segkp_map_red() can 737 * _only_ be called when it is safe to sleep on page_create_va(). 738 * 739 * It is up to the caller to remember whether segkp_map_red() successfully 740 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later 741 * time. 742 * 743 * Currently, this routine is only called from pagefault() (which necessarily 744 * satisfies the above conditions). 745 */ 746 #if defined(STACK_GROWTH_DOWN) 747 int 748 segkp_map_red(void) 749 { 750 uintptr_t fp = STACK_BIAS + (uintptr_t)getfp(); 751 #ifndef _LP64 752 caddr_t stkbase; 753 #endif 754 755 /* 756 * Optimize for the common case where we simply return. 757 */ 758 if ((curthread->t_red_pp == NULL) && 759 (fp - (uintptr_t)curthread->t_stkbase >= red_minavail)) 760 return (0); 761 762 #if defined(_LP64) 763 /* 764 * XXX We probably need something better than this. 765 */ 766 panic("kernel stack overflow"); 767 /*NOTREACHED*/ 768 #else /* _LP64 */ 769 if (curthread->t_red_pp == NULL) { 770 page_t *red_pp; 771 struct seg kseg; 772 773 caddr_t red_va = (caddr_t) 774 (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) - 775 PAGESIZE); 776 777 ASSERT(page_exists(&kvp, (u_offset_t)(uintptr_t)red_va) == 778 NULL); 779 780 /* 781 * Allocate the physical for the red page. 782 */ 783 /* 784 * No PG_NORELOC here to avoid waits. Unlikely to get 785 * a relocate happening in the short time the page exists 786 * and it will be OK anyway. 787 */ 788 789 kseg.s_as = &kas; 790 red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va, 791 PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va); 792 ASSERT(red_pp != NULL); 793 794 /* 795 * So we now have a page to jam into the redzone... 796 */ 797 page_io_unlock(red_pp); 798 799 hat_memload(kas.a_hat, red_va, red_pp, 800 (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK); 801 page_downgrade(red_pp); 802 803 /* 804 * The page is left SE_SHARED locked so we can hold on to 805 * the page_t pointer. 806 */ 807 curthread->t_red_pp = red_pp; 808 809 atomic_inc_32(&red_nmapped); 810 while (fp - (uintptr_t)curthread->t_stkbase < red_closest) { 811 (void) atomic_cas_32(&red_closest, red_closest, 812 (uint32_t)(fp - (uintptr_t)curthread->t_stkbase)); 813 } 814 return (1); 815 } 816 817 stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase & 818 (uintptr_t)PAGEMASK) - PAGESIZE); 819 820 atomic_inc_32(&red_ndoubles); 821 822 if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) { 823 /* 824 * Oh boy. We're already deep within the mapped-in 825 * redzone page, and the caller is trying to prepare 826 * for a deep stack run. We're running without a 827 * redzone right now: if the caller plows off the 828 * end of the stack, it'll plow another thread or 829 * LWP structure. That situation could result in 830 * a very hard-to-debug panic, so, in the spirit of 831 * recording the name of one's killer in one's own 832 * blood, we're going to record hrestime and the calling 833 * thread. 834 */ 835 red_deep_hires = hrestime.tv_nsec; 836 red_deep_thread = curthread; 837 } 838 839 /* 840 * If this is a DEBUG kernel, and we've run too deep for comfort, toss. 841 */ 842 ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD); 843 return (0); 844 #endif /* _LP64 */ 845 } 846 847 void 848 segkp_unmap_red(void) 849 { 850 page_t *pp; 851 caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase & 852 (uintptr_t)PAGEMASK) - PAGESIZE); 853 854 ASSERT(curthread->t_red_pp != NULL); 855 856 /* 857 * Because we locked the mapping down, we can't simply rely 858 * on page_destroy() to clean everything up; we need to call 859 * hat_unload() to explicitly unlock the mapping resources. 860 */ 861 hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK); 862 863 pp = curthread->t_red_pp; 864 865 ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va)); 866 867 /* 868 * Need to upgrade the SE_SHARED lock to SE_EXCL. 869 */ 870 if (!page_tryupgrade(pp)) { 871 /* 872 * As there is now wait for upgrade, release the 873 * SE_SHARED lock and wait for SE_EXCL. 874 */ 875 page_unlock(pp); 876 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)red_va, SE_EXCL); 877 /* pp may be NULL here, hence the test below */ 878 } 879 880 /* 881 * Destroy the page, with dontfree set to zero (i.e. free it). 882 */ 883 if (pp != NULL) 884 page_destroy(pp, 0); 885 curthread->t_red_pp = NULL; 886 } 887 #else 888 #error Red stacks only supported with downwards stack growth. 889 #endif 890 891 /* 892 * Handle a fault on an address corresponding to one of the 893 * resources in the segkp segment. 894 */ 895 faultcode_t 896 segkp_fault( 897 struct hat *hat, 898 struct seg *seg, 899 caddr_t vaddr, 900 size_t len, 901 enum fault_type type, 902 enum seg_rw rw) 903 { 904 struct segkp_data *kpd = NULL; 905 int err; 906 907 ASSERT(seg->s_as == &kas && RW_READ_HELD(&seg->s_as->a_lock)); 908 909 /* 910 * Sanity checks. 911 */ 912 if (type == F_PROT) { 913 panic("segkp_fault: unexpected F_PROT fault"); 914 /*NOTREACHED*/ 915 } 916 917 if ((kpd = segkp_find(seg, vaddr)) == NULL) 918 return (FC_NOMAP); 919 920 mutex_enter(&kpd->kp_lock); 921 922 if (type == F_SOFTLOCK) { 923 ASSERT(!(kpd->kp_flags & KPD_LOCKED)); 924 /* 925 * The F_SOFTLOCK case has more stringent 926 * range requirements: the given range must exactly coincide 927 * with the resource's mapped portion. Note reference to 928 * redzone is handled since vaddr would not equal base 929 */ 930 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) || 931 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) { 932 mutex_exit(&kpd->kp_lock); 933 return (FC_MAKE_ERR(EFAULT)); 934 } 935 936 if ((err = segkp_load(hat, seg, vaddr, len, kpd, KPD_LOCKED))) { 937 mutex_exit(&kpd->kp_lock); 938 return (FC_MAKE_ERR(err)); 939 } 940 kpd->kp_flags |= KPD_LOCKED; 941 mutex_exit(&kpd->kp_lock); 942 return (0); 943 } 944 945 if (type == F_INVAL) { 946 ASSERT(!(kpd->kp_flags & KPD_NO_ANON)); 947 948 /* 949 * Check if we touched the redzone. Somewhat optimistic 950 * here if we are touching the redzone of our own stack 951 * since we wouldn't have a stack to get this far... 952 */ 953 if ((kpd->kp_flags & KPD_HASREDZONE) && 954 btop((uintptr_t)(vaddr - kpd->kp_base)) == KPD_REDZONE(kpd)) 955 panic("segkp_fault: accessing redzone"); 956 957 /* 958 * This fault may occur while the page is being F_SOFTLOCK'ed. 959 * Return since a 2nd segkp_load is unnecessary and also would 960 * result in the page being locked twice and eventually 961 * hang the thread_reaper thread. 962 */ 963 if (kpd->kp_flags & KPD_LOCKED) { 964 mutex_exit(&kpd->kp_lock); 965 return (0); 966 } 967 968 err = segkp_load(hat, seg, vaddr, len, kpd, kpd->kp_flags); 969 mutex_exit(&kpd->kp_lock); 970 return (err ? FC_MAKE_ERR(err) : 0); 971 } 972 973 if (type == F_SOFTUNLOCK) { 974 uint_t flags; 975 976 /* 977 * Make sure the addr is LOCKED and it has anon backing 978 * before unlocking 979 */ 980 if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) != KPD_LOCKED) { 981 panic("segkp_fault: bad unlock"); 982 /*NOTREACHED*/ 983 } 984 985 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) || 986 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) { 987 panic("segkp_fault: bad range"); 988 /*NOTREACHED*/ 989 } 990 991 if (rw == S_WRITE) 992 flags = kpd->kp_flags | KPD_WRITEDIRTY; 993 else 994 flags = kpd->kp_flags; 995 err = segkp_unlock(hat, seg, vaddr, len, kpd, flags); 996 kpd->kp_flags &= ~KPD_LOCKED; 997 mutex_exit(&kpd->kp_lock); 998 return (err ? FC_MAKE_ERR(err) : 0); 999 } 1000 mutex_exit(&kpd->kp_lock); 1001 panic("segkp_fault: bogus fault type: %d\n", type); 1002 /*NOTREACHED*/ 1003 } 1004 1005 /* 1006 * Check that the given protections suffice over the range specified by 1007 * vaddr and len. For this segment type, the only issue is whether or 1008 * not the range lies completely within the mapped part of an allocated 1009 * resource. 1010 */ 1011 /* ARGSUSED */ 1012 static int 1013 segkp_checkprot(struct seg *seg, caddr_t vaddr, size_t len, uint_t prot) 1014 { 1015 struct segkp_data *kpd = NULL; 1016 caddr_t mbase; 1017 size_t mlen; 1018 1019 if ((kpd = segkp_find(seg, vaddr)) == NULL) 1020 return (EACCES); 1021 1022 mutex_enter(&kpd->kp_lock); 1023 mbase = stom(kpd->kp_base, kpd->kp_flags); 1024 mlen = SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags); 1025 if (len > mlen || vaddr < mbase || 1026 ((vaddr + len) > (mbase + mlen))) { 1027 mutex_exit(&kpd->kp_lock); 1028 return (EACCES); 1029 } 1030 mutex_exit(&kpd->kp_lock); 1031 return (0); 1032 } 1033 1034 1035 /* 1036 * Check to see if it makes sense to do kluster/read ahead to 1037 * addr + delta relative to the mapping at addr. We assume here 1038 * that delta is a signed PAGESIZE'd multiple (which can be negative). 1039 * 1040 * For seg_u we always "approve" of this action from our standpoint. 1041 */ 1042 /*ARGSUSED*/ 1043 static int 1044 segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 1045 { 1046 return (0); 1047 } 1048 1049 /* 1050 * Load and possibly lock intra-slot resources in the range given by 1051 * vaddr and len. 1052 */ 1053 static int 1054 segkp_load( 1055 struct hat *hat, 1056 struct seg *seg, 1057 caddr_t vaddr, 1058 size_t len, 1059 struct segkp_data *kpd, 1060 uint_t flags) 1061 { 1062 caddr_t va; 1063 caddr_t vlim; 1064 ulong_t i; 1065 uint_t lock; 1066 1067 ASSERT(MUTEX_HELD(&kpd->kp_lock)); 1068 1069 len = P2ROUNDUP(len, PAGESIZE); 1070 1071 /* If locking, reserve physical memory */ 1072 if (flags & KPD_LOCKED) { 1073 pgcnt_t pages = btop(len); 1074 if ((kpd->kp_flags & KPD_NO_ANON) == 0) 1075 atomic_add_long(&anon_segkp_pages_locked, pages); 1076 (void) page_resv(pages, KM_SLEEP); 1077 } 1078 1079 /* 1080 * Loop through the pages in the given range. 1081 */ 1082 va = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK); 1083 vaddr = va; 1084 vlim = va + len; 1085 lock = flags & KPD_LOCKED; 1086 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT; 1087 for (; va < vlim; va += PAGESIZE, i++) { 1088 page_t *pl[2]; /* second element NULL terminator */ 1089 struct vnode *vp; 1090 anoff_t off; 1091 int err; 1092 struct anon *ap; 1093 1094 /* 1095 * Summon the page. If it's not resident, arrange 1096 * for synchronous i/o to pull it in. 1097 */ 1098 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i); 1099 swap_xlate(ap, &vp, &off); 1100 1101 /* 1102 * The returned page list will have exactly one entry, 1103 * which is returned to us already kept. 1104 */ 1105 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, NULL, 1106 pl, PAGESIZE, seg, va, S_READ, kcred, NULL); 1107 1108 if (err) { 1109 /* 1110 * Back out of what we've done so far. 1111 */ 1112 (void) segkp_unlock(hat, seg, vaddr, 1113 (va - vaddr), kpd, flags); 1114 return (err); 1115 } 1116 1117 /* 1118 * Load an MMU translation for the page. 1119 */ 1120 hat_memload(hat, va, pl[0], (PROT_READ|PROT_WRITE), 1121 lock ? HAT_LOAD_LOCK : HAT_LOAD); 1122 1123 if (!lock) { 1124 /* 1125 * Now, release "shared" lock on the page. 1126 */ 1127 page_unlock(pl[0]); 1128 } 1129 } 1130 return (0); 1131 } 1132 1133 /* 1134 * At the very least unload the mmu-translations and unlock the range if locked 1135 * Can be called with the following flag value KPD_WRITEDIRTY which specifies 1136 * any dirty pages should be written to disk. 1137 */ 1138 static int 1139 segkp_unlock( 1140 struct hat *hat, 1141 struct seg *seg, 1142 caddr_t vaddr, 1143 size_t len, 1144 struct segkp_data *kpd, 1145 uint_t flags) 1146 { 1147 caddr_t va; 1148 caddr_t vlim; 1149 ulong_t i; 1150 struct page *pp; 1151 struct vnode *vp; 1152 anoff_t off; 1153 struct anon *ap; 1154 1155 #ifdef lint 1156 seg = seg; 1157 #endif /* lint */ 1158 1159 ASSERT(MUTEX_HELD(&kpd->kp_lock)); 1160 1161 /* 1162 * Loop through the pages in the given range. It is assumed 1163 * segkp_unlock is called with page aligned base 1164 */ 1165 va = vaddr; 1166 vlim = va + len; 1167 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT; 1168 hat_unload(hat, va, len, 1169 ((flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD)); 1170 for (; va < vlim; va += PAGESIZE, i++) { 1171 /* 1172 * Find the page associated with this part of the 1173 * slot, tracking it down through its associated swap 1174 * space. 1175 */ 1176 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i); 1177 swap_xlate(ap, &vp, &off); 1178 1179 if (flags & KPD_LOCKED) { 1180 if ((pp = page_find(vp, off)) == NULL) { 1181 if (flags & KPD_LOCKED) { 1182 panic("segkp_softunlock: missing page"); 1183 /*NOTREACHED*/ 1184 } 1185 } 1186 } else { 1187 /* 1188 * Nothing to do if the slot is not locked and the 1189 * page doesn't exist. 1190 */ 1191 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) 1192 continue; 1193 } 1194 1195 /* 1196 * If the page doesn't have any translations, is 1197 * dirty and not being shared, then push it out 1198 * asynchronously and avoid waiting for the 1199 * pageout daemon to do it for us. 1200 * 1201 * XXX - Do we really need to get the "exclusive" 1202 * lock via an upgrade? 1203 */ 1204 if ((flags & KPD_WRITEDIRTY) && !hat_page_is_mapped(pp) && 1205 hat_ismod(pp) && page_tryupgrade(pp)) { 1206 /* 1207 * Hold the vnode before releasing the page lock to 1208 * prevent it from being freed and re-used by some 1209 * other thread. 1210 */ 1211 VN_HOLD(vp); 1212 page_unlock(pp); 1213 1214 /* 1215 * Want most powerful credentials we can get so 1216 * use kcred. 1217 */ 1218 (void) VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE, 1219 B_ASYNC | B_FREE, kcred, NULL); 1220 VN_RELE(vp); 1221 } else { 1222 page_unlock(pp); 1223 } 1224 } 1225 1226 /* If unlocking, release physical memory */ 1227 if (flags & KPD_LOCKED) { 1228 pgcnt_t pages = btopr(len); 1229 if ((kpd->kp_flags & KPD_NO_ANON) == 0) 1230 atomic_add_long(&anon_segkp_pages_locked, -pages); 1231 page_unresv(pages); 1232 } 1233 return (0); 1234 } 1235 1236 /* 1237 * Insert the kpd in the hash table. 1238 */ 1239 static void 1240 segkp_insert(struct seg *seg, struct segkp_data *kpd) 1241 { 1242 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1243 int index; 1244 1245 /* 1246 * Insert the kpd based on the address that will be returned 1247 * via segkp_release. 1248 */ 1249 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags)); 1250 mutex_enter(&segkp_lock); 1251 kpd->kp_next = kpsd->kpsd_hash[index]; 1252 kpsd->kpsd_hash[index] = kpd; 1253 mutex_exit(&segkp_lock); 1254 } 1255 1256 /* 1257 * Remove kpd from the hash table. 1258 */ 1259 static void 1260 segkp_delete(struct seg *seg, struct segkp_data *kpd) 1261 { 1262 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1263 struct segkp_data **kpp; 1264 int index; 1265 1266 ASSERT(MUTEX_HELD(&segkp_lock)); 1267 1268 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags)); 1269 for (kpp = &kpsd->kpsd_hash[index]; 1270 *kpp != NULL; kpp = &((*kpp)->kp_next)) { 1271 if (*kpp == kpd) { 1272 *kpp = kpd->kp_next; 1273 return; 1274 } 1275 } 1276 panic("segkp_delete: unable to find element to delete"); 1277 /*NOTREACHED*/ 1278 } 1279 1280 /* 1281 * Find the kpd associated with a vaddr. 1282 * 1283 * Most of the callers of segkp_find will pass the vaddr that 1284 * hashes to the desired index, but there are cases where 1285 * this is not true in which case we have to (potentially) scan 1286 * the whole table looking for it. This should be very rare 1287 * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the 1288 * middle of the segkp_data region). 1289 */ 1290 static struct segkp_data * 1291 segkp_find(struct seg *seg, caddr_t vaddr) 1292 { 1293 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1294 struct segkp_data *kpd; 1295 int i; 1296 int stop; 1297 1298 i = stop = SEGKP_HASH(vaddr); 1299 mutex_enter(&segkp_lock); 1300 do { 1301 for (kpd = kpsd->kpsd_hash[i]; kpd != NULL; 1302 kpd = kpd->kp_next) { 1303 if (vaddr >= kpd->kp_base && 1304 vaddr < kpd->kp_base + kpd->kp_len) { 1305 mutex_exit(&segkp_lock); 1306 return (kpd); 1307 } 1308 } 1309 if (--i < 0) 1310 i = SEGKP_HASHSZ - 1; /* Wrap */ 1311 } while (i != stop); 1312 mutex_exit(&segkp_lock); 1313 return (NULL); /* Not found */ 1314 } 1315 1316 /* 1317 * returns size of swappable area. 1318 */ 1319 size_t 1320 swapsize(caddr_t v) 1321 { 1322 struct segkp_data *kpd; 1323 1324 if ((kpd = segkp_find(segkp, v)) != NULL) 1325 return (SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)); 1326 else 1327 return (NULL); 1328 } 1329 1330 /* 1331 * Dump out all the active segkp pages 1332 */ 1333 static void 1334 segkp_dump(struct seg *seg) 1335 { 1336 int i; 1337 struct segkp_data *kpd; 1338 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1339 1340 for (i = 0; i < SEGKP_HASHSZ; i++) { 1341 for (kpd = kpsd->kpsd_hash[i]; 1342 kpd != NULL; kpd = kpd->kp_next) { 1343 pfn_t pfn; 1344 caddr_t addr; 1345 caddr_t eaddr; 1346 1347 addr = kpd->kp_base; 1348 eaddr = addr + kpd->kp_len; 1349 while (addr < eaddr) { 1350 ASSERT(seg->s_as == &kas); 1351 pfn = hat_getpfnum(seg->s_as->a_hat, addr); 1352 if (pfn != PFN_INVALID) 1353 dump_addpage(seg->s_as, addr, pfn); 1354 addr += PAGESIZE; 1355 dump_timeleft = dump_timeout; 1356 } 1357 } 1358 } 1359 } 1360 1361 /*ARGSUSED*/ 1362 static int 1363 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len, 1364 struct page ***ppp, enum lock_type type, enum seg_rw rw) 1365 { 1366 return (ENOTSUP); 1367 } 1368 1369 /*ARGSUSED*/ 1370 static int 1371 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 1372 { 1373 return (ENODEV); 1374 } 1375 1376 /*ARGSUSED*/ 1377 static lgrp_mem_policy_info_t * 1378 segkp_getpolicy(struct seg *seg, caddr_t addr) 1379 { 1380 return (NULL); 1381 } 1382 1383 /*ARGSUSED*/ 1384 static int 1385 segkp_capable(struct seg *seg, segcapability_t capability) 1386 { 1387 return (0); 1388 } 1389 1390 #include <sys/mem_config.h> 1391 1392 /*ARGSUSED*/ 1393 static void 1394 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages) 1395 {} 1396 1397 /* 1398 * During memory delete, turn off caches so that pages are not held. 1399 * A better solution may be to unlock the pages while they are 1400 * in the cache so that they may be collected naturally. 1401 */ 1402 1403 /*ARGSUSED*/ 1404 static int 1405 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages) 1406 { 1407 atomic_inc_32(&segkp_indel); 1408 segkp_cache_free(); 1409 return (0); 1410 } 1411 1412 /*ARGSUSED*/ 1413 static void 1414 segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 1415 { 1416 atomic_dec_32(&segkp_indel); 1417 } 1418 1419 static kphysm_setup_vector_t segkp_mem_config_vec = { 1420 KPHYSM_SETUP_VECTOR_VERSION, 1421 segkp_mem_config_post_add, 1422 segkp_mem_config_pre_del, 1423 segkp_mem_config_post_del, 1424 }; 1425 1426 static void 1427 segkpinit_mem_config(struct seg *seg) 1428 { 1429 int ret; 1430 1431 ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg); 1432 ASSERT(ret == 0); 1433 }