1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 26 /* All Rights Reserved */ 27 28 /* 29 * Portions of this source code were derived from Berkeley 4.3 BSD 30 * under license from the Regents of the University of California. 31 */ 32 33 /* 34 * segkp is a segment driver that administers the allocation and deallocation 35 * of pageable variable size chunks of kernel virtual address space. Each 36 * allocated resource is page-aligned. 37 * 38 * The user may specify whether the resource should be initialized to 0, 39 * include a redzone, or locked in memory. 40 */ 41 42 #include <sys/types.h> 43 #include <sys/t_lock.h> 44 #include <sys/thread.h> 45 #include <sys/param.h> 46 #include <sys/errno.h> 47 #include <sys/sysmacros.h> 48 #include <sys/systm.h> 49 #include <sys/buf.h> 50 #include <sys/mman.h> 51 #include <sys/vnode.h> 52 #include <sys/cmn_err.h> 53 #include <sys/swap.h> 54 #include <sys/tuneable.h> 55 #include <sys/kmem.h> 56 #include <sys/vmem.h> 57 #include <sys/cred.h> 58 #include <sys/dumphdr.h> 59 #include <sys/debug.h> 60 #include <sys/vtrace.h> 61 #include <sys/stack.h> 62 #include <sys/atomic.h> 63 #include <sys/archsystm.h> 64 #include <sys/lgrp.h> 65 66 #include <vm/as.h> 67 #include <vm/seg.h> 68 #include <vm/seg_kp.h> 69 #include <vm/seg_kmem.h> 70 #include <vm/anon.h> 71 #include <vm/page.h> 72 #include <vm/hat.h> 73 #include <sys/bitmap.h> 74 75 /* 76 * Private seg op routines 77 */ 78 static void segkp_dump(struct seg *seg); 79 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len, 80 uint_t prot); 81 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta); 82 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len, 83 struct page ***page, enum lock_type type, 84 enum seg_rw rw); 85 static void segkp_insert(struct seg *seg, struct segkp_data *kpd); 86 static void segkp_delete(struct seg *seg, struct segkp_data *kpd); 87 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags, 88 struct segkp_data **tkpd, struct anon_map *amp); 89 static void segkp_release_internal(struct seg *seg, 90 struct segkp_data *kpd, size_t len); 91 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr, 92 size_t len, struct segkp_data *kpd, uint_t flags); 93 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr, 94 size_t len, struct segkp_data *kpd, uint_t flags); 95 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr); 96 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp); 97 static lgrp_mem_policy_info_t *segkp_getpolicy(struct seg *seg, 98 caddr_t addr); 99 static int segkp_capable(struct seg *seg, segcapability_t capability); 100 101 /* 102 * Lock used to protect the hash table(s) and caches. 103 */ 104 static kmutex_t segkp_lock; 105 106 /* 107 * The segkp caches 108 */ 109 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE]; 110 111 /* 112 * When there are fewer than red_minavail bytes left on the stack, 113 * segkp_map_red() will map in the redzone (if called). 5000 seems 114 * to work reasonably well... 115 */ 116 long red_minavail = 5000; 117 118 /* 119 * will be set to 1 for 32 bit x86 systems only, in startup.c 120 */ 121 int segkp_fromheap = 0; 122 ulong_t *segkp_bitmap; 123 124 /* 125 * If segkp_map_red() is called with the redzone already mapped and 126 * with less than RED_DEEP_THRESHOLD bytes available on the stack, 127 * then the stack situation has become quite serious; if much more stack 128 * is consumed, we have the potential of scrogging the next thread/LWP 129 * structure. To help debug the "can't happen" panics which may 130 * result from this condition, we record hrestime and the calling thread 131 * in red_deep_hires and red_deep_thread respectively. 132 */ 133 #define RED_DEEP_THRESHOLD 2000 134 135 hrtime_t red_deep_hires; 136 kthread_t *red_deep_thread; 137 138 uint32_t red_nmapped; 139 uint32_t red_closest = UINT_MAX; 140 uint32_t red_ndoubles; 141 142 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */ 143 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */ 144 145 static struct seg_ops segkp_ops = { 146 .fault = segkp_fault, 147 .checkprot = segkp_checkprot, 148 .kluster = segkp_kluster, 149 .dump = segkp_dump, 150 .pagelock = segkp_pagelock, 151 .getmemid = segkp_getmemid, 152 .getpolicy = segkp_getpolicy, 153 .capable = segkp_capable, 154 .inherit = seg_inherit_notsup, 155 }; 156 157 158 static void segkpinit_mem_config(struct seg *); 159 160 static uint32_t segkp_indel; 161 162 /* 163 * Allocate the segment specific private data struct and fill it in 164 * with the per kp segment mutex, anon ptr. array and hash table. 165 */ 166 int 167 segkp_create(struct seg *seg) 168 { 169 struct segkp_segdata *kpsd; 170 size_t np; 171 172 ASSERT(seg != NULL && seg->s_as == &kas); 173 ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock)); 174 175 if (seg->s_size & PAGEOFFSET) { 176 panic("Bad segkp size"); 177 /*NOTREACHED*/ 178 } 179 180 kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP); 181 182 /* 183 * Allocate the virtual memory for segkp and initialize it 184 */ 185 if (segkp_fromheap) { 186 np = btop(kvseg.s_size); 187 segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP); 188 kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE, 189 vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP); 190 } else { 191 segkp_bitmap = NULL; 192 np = btop(seg->s_size); 193 kpsd->kpsd_arena = vmem_create("segkp", seg->s_base, 194 seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE, 195 VM_SLEEP); 196 } 197 198 kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE); 199 200 kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *), 201 KM_SLEEP); 202 seg->s_data = (void *)kpsd; 203 seg->s_ops = &segkp_ops; 204 segkpinit_mem_config(seg); 205 return (0); 206 } 207 208 209 /* 210 * Find a free 'freelist' and initialize it with the appropriate attributes 211 */ 212 void * 213 segkp_cache_init(struct seg *seg, int maxsize, size_t len, uint_t flags) 214 { 215 int i; 216 217 if ((flags & KPD_NO_ANON) && !(flags & KPD_LOCKED)) 218 return ((void *)-1); 219 220 mutex_enter(&segkp_lock); 221 for (i = 0; i < SEGKP_MAX_CACHE; i++) { 222 if (segkp_cache[i].kpf_inuse) 223 continue; 224 segkp_cache[i].kpf_inuse = 1; 225 segkp_cache[i].kpf_max = maxsize; 226 segkp_cache[i].kpf_flags = flags; 227 segkp_cache[i].kpf_seg = seg; 228 segkp_cache[i].kpf_len = len; 229 mutex_exit(&segkp_lock); 230 return ((void *)(uintptr_t)i); 231 } 232 mutex_exit(&segkp_lock); 233 return ((void *)-1); 234 } 235 236 /* 237 * Free all the cache resources. 238 */ 239 void 240 segkp_cache_free(void) 241 { 242 struct segkp_data *kpd; 243 struct seg *seg; 244 int i; 245 246 mutex_enter(&segkp_lock); 247 for (i = 0; i < SEGKP_MAX_CACHE; i++) { 248 if (!segkp_cache[i].kpf_inuse) 249 continue; 250 /* 251 * Disconnect the freelist and process each element 252 */ 253 kpd = segkp_cache[i].kpf_list; 254 seg = segkp_cache[i].kpf_seg; 255 segkp_cache[i].kpf_list = NULL; 256 segkp_cache[i].kpf_count = 0; 257 mutex_exit(&segkp_lock); 258 259 while (kpd != NULL) { 260 struct segkp_data *next; 261 262 next = kpd->kp_next; 263 segkp_release_internal(seg, kpd, kpd->kp_len); 264 kpd = next; 265 } 266 mutex_enter(&segkp_lock); 267 } 268 mutex_exit(&segkp_lock); 269 } 270 271 /* 272 * There are 2 entries into segkp_get_internal. The first includes a cookie 273 * used to access a pool of cached segkp resources. The second does not 274 * use the cache. 275 */ 276 caddr_t 277 segkp_get(struct seg *seg, size_t len, uint_t flags) 278 { 279 struct segkp_data *kpd = NULL; 280 281 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) { 282 kpd->kp_cookie = -1; 283 return (stom(kpd->kp_base, flags)); 284 } 285 return (NULL); 286 } 287 288 /* 289 * Return a 'cached' segkp address 290 */ 291 caddr_t 292 segkp_cache_get(void *cookie) 293 { 294 struct segkp_cache *freelist = NULL; 295 struct segkp_data *kpd = NULL; 296 int index = (int)(uintptr_t)cookie; 297 struct seg *seg; 298 size_t len; 299 uint_t flags; 300 301 if (index < 0 || index >= SEGKP_MAX_CACHE) 302 return (NULL); 303 freelist = &segkp_cache[index]; 304 305 mutex_enter(&segkp_lock); 306 seg = freelist->kpf_seg; 307 flags = freelist->kpf_flags; 308 if (freelist->kpf_list != NULL) { 309 kpd = freelist->kpf_list; 310 freelist->kpf_list = kpd->kp_next; 311 freelist->kpf_count--; 312 mutex_exit(&segkp_lock); 313 kpd->kp_next = NULL; 314 segkp_insert(seg, kpd); 315 return (stom(kpd->kp_base, flags)); 316 } 317 len = freelist->kpf_len; 318 mutex_exit(&segkp_lock); 319 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) { 320 kpd->kp_cookie = index; 321 return (stom(kpd->kp_base, flags)); 322 } 323 return (NULL); 324 } 325 326 caddr_t 327 segkp_get_withanonmap( 328 struct seg *seg, 329 size_t len, 330 uint_t flags, 331 struct anon_map *amp) 332 { 333 struct segkp_data *kpd = NULL; 334 335 ASSERT(amp != NULL); 336 flags |= KPD_HASAMP; 337 if (segkp_get_internal(seg, len, flags, &kpd, amp) != NULL) { 338 kpd->kp_cookie = -1; 339 return (stom(kpd->kp_base, flags)); 340 } 341 return (NULL); 342 } 343 344 /* 345 * This does the real work of segkp allocation. 346 * Return to client base addr. len must be page-aligned. A null value is 347 * returned if there are no more vm resources (e.g. pages, swap). The len 348 * and base recorded in the private data structure include the redzone 349 * and the redzone length (if applicable). If the user requests a redzone 350 * either the first or last page is left unmapped depending whether stacks 351 * grow to low or high memory. 352 * 353 * The client may also specify a no-wait flag. If that is set then the 354 * request will choose a non-blocking path when requesting resources. 355 * The default is make the client wait. 356 */ 357 static caddr_t 358 segkp_get_internal( 359 struct seg *seg, 360 size_t len, 361 uint_t flags, 362 struct segkp_data **tkpd, 363 struct anon_map *amp) 364 { 365 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 366 struct segkp_data *kpd; 367 caddr_t vbase = NULL; /* always first virtual, may not be mapped */ 368 pgcnt_t np = 0; /* number of pages in the resource */ 369 pgcnt_t segkpindex; 370 long i; 371 caddr_t va; 372 pgcnt_t pages = 0; 373 ulong_t anon_idx = 0; 374 int kmflag = (flags & KPD_NOWAIT) ? KM_NOSLEEP : KM_SLEEP; 375 caddr_t s_base = (segkp_fromheap) ? kvseg.s_base : seg->s_base; 376 377 if (len & PAGEOFFSET) { 378 panic("segkp_get: len is not page-aligned"); 379 /*NOTREACHED*/ 380 } 381 382 ASSERT(((flags & KPD_HASAMP) == 0) == (amp == NULL)); 383 384 /* Only allow KPD_NO_ANON if we are going to lock it down */ 385 if ((flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON) 386 return (NULL); 387 388 if ((kpd = kmem_zalloc(sizeof (struct segkp_data), kmflag)) == NULL) 389 return (NULL); 390 /* 391 * Fix up the len to reflect the REDZONE if applicable 392 */ 393 if (flags & KPD_HASREDZONE) 394 len += PAGESIZE; 395 np = btop(len); 396 397 vbase = vmem_alloc(SEGKP_VMEM(seg), len, kmflag | VM_BESTFIT); 398 if (vbase == NULL) { 399 kmem_free(kpd, sizeof (struct segkp_data)); 400 return (NULL); 401 } 402 403 /* If locking, reserve physical memory */ 404 if (flags & KPD_LOCKED) { 405 pages = btop(SEGKP_MAPLEN(len, flags)); 406 if (page_resv(pages, kmflag) == 0) { 407 vmem_free(SEGKP_VMEM(seg), vbase, len); 408 kmem_free(kpd, sizeof (struct segkp_data)); 409 return (NULL); 410 } 411 if ((flags & KPD_NO_ANON) == 0) 412 atomic_add_long(&anon_segkp_pages_locked, pages); 413 } 414 415 /* 416 * Reserve sufficient swap space for this vm resource. We'll 417 * actually allocate it in the loop below, but reserving it 418 * here allows us to back out more gracefully than if we 419 * had an allocation failure in the body of the loop. 420 * 421 * Note that we don't need swap space for the red zone page. 422 */ 423 if (amp != NULL) { 424 /* 425 * The swap reservation has been done, if required, and the 426 * anon_hdr is separate. 427 */ 428 anon_idx = 0; 429 kpd->kp_anon_idx = anon_idx; 430 kpd->kp_anon = amp->ahp; 431 432 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 433 kpd, vbase, len, flags, 1); 434 435 } else if ((flags & KPD_NO_ANON) == 0) { 436 if (anon_resv_zone(SEGKP_MAPLEN(len, flags), NULL) == 0) { 437 if (flags & KPD_LOCKED) { 438 atomic_add_long(&anon_segkp_pages_locked, 439 -pages); 440 page_unresv(pages); 441 } 442 vmem_free(SEGKP_VMEM(seg), vbase, len); 443 kmem_free(kpd, sizeof (struct segkp_data)); 444 return (NULL); 445 } 446 atomic_add_long(&anon_segkp_pages_resv, 447 btop(SEGKP_MAPLEN(len, flags))); 448 anon_idx = ((uintptr_t)(vbase - s_base)) >> PAGESHIFT; 449 kpd->kp_anon_idx = anon_idx; 450 kpd->kp_anon = kpsd->kpsd_anon; 451 452 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 453 kpd, vbase, len, flags, 1); 454 } else { 455 kpd->kp_anon = NULL; 456 kpd->kp_anon_idx = 0; 457 } 458 459 /* 460 * Allocate page and anon resources for the virtual address range 461 * except the redzone 462 */ 463 if (segkp_fromheap) 464 segkpindex = btop((uintptr_t)(vbase - kvseg.s_base)); 465 for (i = 0, va = vbase; i < np; i++, va += PAGESIZE) { 466 page_t *pl[2]; 467 struct vnode *vp; 468 anoff_t off; 469 int err; 470 page_t *pp = NULL; 471 472 /* 473 * Mark this page to be a segkp page in the bitmap. 474 */ 475 if (segkp_fromheap) { 476 BT_ATOMIC_SET(segkp_bitmap, segkpindex); 477 segkpindex++; 478 } 479 480 /* 481 * If this page is the red zone page, we don't need swap 482 * space for it. Note that we skip over the code that 483 * establishes MMU mappings, so that the page remains 484 * invalid. 485 */ 486 if ((flags & KPD_HASREDZONE) && KPD_REDZONE(kpd) == i) 487 continue; 488 489 if (kpd->kp_anon != NULL) { 490 struct anon *ap; 491 492 ASSERT(anon_get_ptr(kpd->kp_anon, anon_idx + i) 493 == NULL); 494 /* 495 * Determine the "vp" and "off" of the anon slot. 496 */ 497 ap = anon_alloc(NULL, 0); 498 if (amp != NULL) 499 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 500 (void) anon_set_ptr(kpd->kp_anon, anon_idx + i, 501 ap, ANON_SLEEP); 502 if (amp != NULL) 503 ANON_LOCK_EXIT(&->a_rwlock); 504 swap_xlate(ap, &vp, &off); 505 506 /* 507 * Create a page with the specified identity. The 508 * page is returned with the "shared" lock held. 509 */ 510 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, 511 NULL, pl, PAGESIZE, seg, va, S_CREATE, 512 kcred, NULL); 513 if (err) { 514 /* 515 * XXX - This should not fail. 516 */ 517 panic("segkp_get: no pages"); 518 /*NOTREACHED*/ 519 } 520 pp = pl[0]; 521 } else { 522 ASSERT(page_exists(&kvp, 523 (u_offset_t)(uintptr_t)va) == NULL); 524 525 if ((pp = page_create_va(&kvp, 526 (u_offset_t)(uintptr_t)va, PAGESIZE, 527 (flags & KPD_NOWAIT ? 0 : PG_WAIT) | PG_EXCL | 528 PG_NORELOC, seg, va)) == NULL) { 529 /* 530 * Legitimize resource; then destroy it. 531 * Easier than trying to unwind here. 532 */ 533 kpd->kp_flags = flags; 534 kpd->kp_base = vbase; 535 kpd->kp_len = len; 536 segkp_release_internal(seg, kpd, va - vbase); 537 return (NULL); 538 } 539 page_io_unlock(pp); 540 } 541 542 if (flags & KPD_ZERO) 543 pagezero(pp, 0, PAGESIZE); 544 545 /* 546 * Load and lock an MMU translation for the page. 547 */ 548 hat_memload(seg->s_as->a_hat, va, pp, (PROT_READ|PROT_WRITE), 549 ((flags & KPD_LOCKED) ? HAT_LOAD_LOCK : HAT_LOAD)); 550 551 /* 552 * Now, release lock on the page. 553 */ 554 if (flags & KPD_LOCKED) { 555 /* 556 * Indicate to page_retire framework that this 557 * page can only be retired when it is freed. 558 */ 559 PP_SETRAF(pp); 560 page_downgrade(pp); 561 } else 562 page_unlock(pp); 563 } 564 565 kpd->kp_flags = flags; 566 kpd->kp_base = vbase; 567 kpd->kp_len = len; 568 segkp_insert(seg, kpd); 569 *tkpd = kpd; 570 return (stom(kpd->kp_base, flags)); 571 } 572 573 /* 574 * Release the resource to cache if the pool(designate by the cookie) 575 * has less than the maximum allowable. If inserted in cache, 576 * segkp_delete insures element is taken off of active list. 577 */ 578 void 579 segkp_release(struct seg *seg, caddr_t vaddr) 580 { 581 struct segkp_cache *freelist; 582 struct segkp_data *kpd = NULL; 583 584 if ((kpd = segkp_find(seg, vaddr)) == NULL) { 585 panic("segkp_release: null kpd"); 586 /*NOTREACHED*/ 587 } 588 589 if (kpd->kp_cookie != -1) { 590 freelist = &segkp_cache[kpd->kp_cookie]; 591 mutex_enter(&segkp_lock); 592 if (!segkp_indel && freelist->kpf_count < freelist->kpf_max) { 593 segkp_delete(seg, kpd); 594 kpd->kp_next = freelist->kpf_list; 595 freelist->kpf_list = kpd; 596 freelist->kpf_count++; 597 mutex_exit(&segkp_lock); 598 return; 599 } else { 600 mutex_exit(&segkp_lock); 601 kpd->kp_cookie = -1; 602 } 603 } 604 segkp_release_internal(seg, kpd, kpd->kp_len); 605 } 606 607 /* 608 * Free the entire resource. segkp_unlock gets called with the start of the 609 * mapped portion of the resource. The length is the size of the mapped 610 * portion 611 */ 612 static void 613 segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len) 614 { 615 caddr_t va; 616 long i; 617 long redzone; 618 size_t np; 619 page_t *pp; 620 struct vnode *vp; 621 anoff_t off; 622 struct anon *ap; 623 pgcnt_t segkpindex; 624 625 ASSERT(kpd != NULL); 626 ASSERT((kpd->kp_flags & KPD_HASAMP) == 0 || kpd->kp_cookie == -1); 627 np = btop(len); 628 629 /* Remove from active hash list */ 630 if (kpd->kp_cookie == -1) { 631 mutex_enter(&segkp_lock); 632 segkp_delete(seg, kpd); 633 mutex_exit(&segkp_lock); 634 } 635 636 /* 637 * Precompute redzone page index. 638 */ 639 redzone = -1; 640 if (kpd->kp_flags & KPD_HASREDZONE) 641 redzone = KPD_REDZONE(kpd); 642 643 644 va = kpd->kp_base; 645 646 hat_unload(seg->s_as->a_hat, va, (np << PAGESHIFT), 647 ((kpd->kp_flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD)); 648 /* 649 * Free up those anon resources that are quiescent. 650 */ 651 if (segkp_fromheap) 652 segkpindex = btop((uintptr_t)(va - kvseg.s_base)); 653 for (i = 0; i < np; i++, va += PAGESIZE) { 654 655 /* 656 * Clear the bit for this page from the bitmap. 657 */ 658 if (segkp_fromheap) { 659 BT_ATOMIC_CLEAR(segkp_bitmap, segkpindex); 660 segkpindex++; 661 } 662 663 if (i == redzone) 664 continue; 665 if (kpd->kp_anon) { 666 /* 667 * Free up anon resources and destroy the 668 * associated pages. 669 * 670 * Release the lock if there is one. Have to get the 671 * page to do this, unfortunately. 672 */ 673 if (kpd->kp_flags & KPD_LOCKED) { 674 ap = anon_get_ptr(kpd->kp_anon, 675 kpd->kp_anon_idx + i); 676 swap_xlate(ap, &vp, &off); 677 /* Find the shared-locked page. */ 678 pp = page_find(vp, (u_offset_t)off); 679 if (pp == NULL) { 680 panic("segkp_release: " 681 "kp_anon: no page to unlock "); 682 /*NOTREACHED*/ 683 } 684 if (PP_ISRAF(pp)) 685 PP_CLRRAF(pp); 686 687 page_unlock(pp); 688 } 689 if ((kpd->kp_flags & KPD_HASAMP) == 0) { 690 anon_free(kpd->kp_anon, kpd->kp_anon_idx + i, 691 PAGESIZE); 692 anon_unresv_zone(PAGESIZE, NULL); 693 atomic_dec_ulong(&anon_segkp_pages_resv); 694 } 695 TRACE_5(TR_FAC_VM, 696 TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 697 kpd, va, PAGESIZE, 0, 0); 698 } else { 699 if (kpd->kp_flags & KPD_LOCKED) { 700 pp = page_find(&kvp, (u_offset_t)(uintptr_t)va); 701 if (pp == NULL) { 702 panic("segkp_release: " 703 "no page to unlock"); 704 /*NOTREACHED*/ 705 } 706 if (PP_ISRAF(pp)) 707 PP_CLRRAF(pp); 708 /* 709 * We should just upgrade the lock here 710 * but there is no upgrade that waits. 711 */ 712 page_unlock(pp); 713 } 714 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)va, 715 SE_EXCL); 716 if (pp != NULL) 717 page_destroy(pp, 0); 718 } 719 } 720 721 /* If locked, release physical memory reservation */ 722 if (kpd->kp_flags & KPD_LOCKED) { 723 pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)); 724 if ((kpd->kp_flags & KPD_NO_ANON) == 0) 725 atomic_add_long(&anon_segkp_pages_locked, -pages); 726 page_unresv(pages); 727 } 728 729 vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len); 730 kmem_free(kpd, sizeof (struct segkp_data)); 731 } 732 733 /* 734 * segkp_map_red() will check the current frame pointer against the 735 * stack base. If the amount of stack remaining is questionable 736 * (less than red_minavail), then segkp_map_red() will map in the redzone 737 * and return 1. Otherwise, it will return 0. segkp_map_red() can 738 * _only_ be called when it is safe to sleep on page_create_va(). 739 * 740 * It is up to the caller to remember whether segkp_map_red() successfully 741 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later 742 * time. 743 * 744 * Currently, this routine is only called from pagefault() (which necessarily 745 * satisfies the above conditions). 746 */ 747 #if defined(STACK_GROWTH_DOWN) 748 int 749 segkp_map_red(void) 750 { 751 uintptr_t fp = STACK_BIAS + (uintptr_t)getfp(); 752 #ifndef _LP64 753 caddr_t stkbase; 754 #endif 755 756 /* 757 * Optimize for the common case where we simply return. 758 */ 759 if ((curthread->t_red_pp == NULL) && 760 (fp - (uintptr_t)curthread->t_stkbase >= red_minavail)) 761 return (0); 762 763 #if defined(_LP64) 764 /* 765 * XXX We probably need something better than this. 766 */ 767 panic("kernel stack overflow"); 768 /*NOTREACHED*/ 769 #else /* _LP64 */ 770 if (curthread->t_red_pp == NULL) { 771 page_t *red_pp; 772 struct seg kseg; 773 774 caddr_t red_va = (caddr_t) 775 (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) - 776 PAGESIZE); 777 778 ASSERT(page_exists(&kvp, (u_offset_t)(uintptr_t)red_va) == 779 NULL); 780 781 /* 782 * Allocate the physical for the red page. 783 */ 784 /* 785 * No PG_NORELOC here to avoid waits. Unlikely to get 786 * a relocate happening in the short time the page exists 787 * and it will be OK anyway. 788 */ 789 790 kseg.s_as = &kas; 791 red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va, 792 PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va); 793 ASSERT(red_pp != NULL); 794 795 /* 796 * So we now have a page to jam into the redzone... 797 */ 798 page_io_unlock(red_pp); 799 800 hat_memload(kas.a_hat, red_va, red_pp, 801 (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK); 802 page_downgrade(red_pp); 803 804 /* 805 * The page is left SE_SHARED locked so we can hold on to 806 * the page_t pointer. 807 */ 808 curthread->t_red_pp = red_pp; 809 810 atomic_inc_32(&red_nmapped); 811 while (fp - (uintptr_t)curthread->t_stkbase < red_closest) { 812 (void) atomic_cas_32(&red_closest, red_closest, 813 (uint32_t)(fp - (uintptr_t)curthread->t_stkbase)); 814 } 815 return (1); 816 } 817 818 stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase & 819 (uintptr_t)PAGEMASK) - PAGESIZE); 820 821 atomic_inc_32(&red_ndoubles); 822 823 if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) { 824 /* 825 * Oh boy. We're already deep within the mapped-in 826 * redzone page, and the caller is trying to prepare 827 * for a deep stack run. We're running without a 828 * redzone right now: if the caller plows off the 829 * end of the stack, it'll plow another thread or 830 * LWP structure. That situation could result in 831 * a very hard-to-debug panic, so, in the spirit of 832 * recording the name of one's killer in one's own 833 * blood, we're going to record hrestime and the calling 834 * thread. 835 */ 836 red_deep_hires = hrestime.tv_nsec; 837 red_deep_thread = curthread; 838 } 839 840 /* 841 * If this is a DEBUG kernel, and we've run too deep for comfort, toss. 842 */ 843 ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD); 844 return (0); 845 #endif /* _LP64 */ 846 } 847 848 void 849 segkp_unmap_red(void) 850 { 851 page_t *pp; 852 caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase & 853 (uintptr_t)PAGEMASK) - PAGESIZE); 854 855 ASSERT(curthread->t_red_pp != NULL); 856 857 /* 858 * Because we locked the mapping down, we can't simply rely 859 * on page_destroy() to clean everything up; we need to call 860 * hat_unload() to explicitly unlock the mapping resources. 861 */ 862 hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK); 863 864 pp = curthread->t_red_pp; 865 866 ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va)); 867 868 /* 869 * Need to upgrade the SE_SHARED lock to SE_EXCL. 870 */ 871 if (!page_tryupgrade(pp)) { 872 /* 873 * As there is now wait for upgrade, release the 874 * SE_SHARED lock and wait for SE_EXCL. 875 */ 876 page_unlock(pp); 877 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)red_va, SE_EXCL); 878 /* pp may be NULL here, hence the test below */ 879 } 880 881 /* 882 * Destroy the page, with dontfree set to zero (i.e. free it). 883 */ 884 if (pp != NULL) 885 page_destroy(pp, 0); 886 curthread->t_red_pp = NULL; 887 } 888 #else 889 #error Red stacks only supported with downwards stack growth. 890 #endif 891 892 /* 893 * Handle a fault on an address corresponding to one of the 894 * resources in the segkp segment. 895 */ 896 faultcode_t 897 segkp_fault( 898 struct hat *hat, 899 struct seg *seg, 900 caddr_t vaddr, 901 size_t len, 902 enum fault_type type, 903 enum seg_rw rw) 904 { 905 struct segkp_data *kpd = NULL; 906 int err; 907 908 ASSERT(seg->s_as == &kas && RW_READ_HELD(&seg->s_as->a_lock)); 909 910 /* 911 * Sanity checks. 912 */ 913 if (type == F_PROT) { 914 panic("segkp_fault: unexpected F_PROT fault"); 915 /*NOTREACHED*/ 916 } 917 918 if ((kpd = segkp_find(seg, vaddr)) == NULL) 919 return (FC_NOMAP); 920 921 mutex_enter(&kpd->kp_lock); 922 923 if (type == F_SOFTLOCK) { 924 ASSERT(!(kpd->kp_flags & KPD_LOCKED)); 925 /* 926 * The F_SOFTLOCK case has more stringent 927 * range requirements: the given range must exactly coincide 928 * with the resource's mapped portion. Note reference to 929 * redzone is handled since vaddr would not equal base 930 */ 931 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) || 932 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) { 933 mutex_exit(&kpd->kp_lock); 934 return (FC_MAKE_ERR(EFAULT)); 935 } 936 937 if ((err = segkp_load(hat, seg, vaddr, len, kpd, KPD_LOCKED))) { 938 mutex_exit(&kpd->kp_lock); 939 return (FC_MAKE_ERR(err)); 940 } 941 kpd->kp_flags |= KPD_LOCKED; 942 mutex_exit(&kpd->kp_lock); 943 return (0); 944 } 945 946 if (type == F_INVAL) { 947 ASSERT(!(kpd->kp_flags & KPD_NO_ANON)); 948 949 /* 950 * Check if we touched the redzone. Somewhat optimistic 951 * here if we are touching the redzone of our own stack 952 * since we wouldn't have a stack to get this far... 953 */ 954 if ((kpd->kp_flags & KPD_HASREDZONE) && 955 btop((uintptr_t)(vaddr - kpd->kp_base)) == KPD_REDZONE(kpd)) 956 panic("segkp_fault: accessing redzone"); 957 958 /* 959 * This fault may occur while the page is being F_SOFTLOCK'ed. 960 * Return since a 2nd segkp_load is unnecessary and also would 961 * result in the page being locked twice and eventually 962 * hang the thread_reaper thread. 963 */ 964 if (kpd->kp_flags & KPD_LOCKED) { 965 mutex_exit(&kpd->kp_lock); 966 return (0); 967 } 968 969 err = segkp_load(hat, seg, vaddr, len, kpd, kpd->kp_flags); 970 mutex_exit(&kpd->kp_lock); 971 return (err ? FC_MAKE_ERR(err) : 0); 972 } 973 974 if (type == F_SOFTUNLOCK) { 975 uint_t flags; 976 977 /* 978 * Make sure the addr is LOCKED and it has anon backing 979 * before unlocking 980 */ 981 if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) != KPD_LOCKED) { 982 panic("segkp_fault: bad unlock"); 983 /*NOTREACHED*/ 984 } 985 986 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) || 987 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) { 988 panic("segkp_fault: bad range"); 989 /*NOTREACHED*/ 990 } 991 992 if (rw == S_WRITE) 993 flags = kpd->kp_flags | KPD_WRITEDIRTY; 994 else 995 flags = kpd->kp_flags; 996 err = segkp_unlock(hat, seg, vaddr, len, kpd, flags); 997 kpd->kp_flags &= ~KPD_LOCKED; 998 mutex_exit(&kpd->kp_lock); 999 return (err ? FC_MAKE_ERR(err) : 0); 1000 } 1001 mutex_exit(&kpd->kp_lock); 1002 panic("segkp_fault: bogus fault type: %d\n", type); 1003 /*NOTREACHED*/ 1004 } 1005 1006 /* 1007 * Check that the given protections suffice over the range specified by 1008 * vaddr and len. For this segment type, the only issue is whether or 1009 * not the range lies completely within the mapped part of an allocated 1010 * resource. 1011 */ 1012 /* ARGSUSED */ 1013 static int 1014 segkp_checkprot(struct seg *seg, caddr_t vaddr, size_t len, uint_t prot) 1015 { 1016 struct segkp_data *kpd = NULL; 1017 caddr_t mbase; 1018 size_t mlen; 1019 1020 if ((kpd = segkp_find(seg, vaddr)) == NULL) 1021 return (EACCES); 1022 1023 mutex_enter(&kpd->kp_lock); 1024 mbase = stom(kpd->kp_base, kpd->kp_flags); 1025 mlen = SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags); 1026 if (len > mlen || vaddr < mbase || 1027 ((vaddr + len) > (mbase + mlen))) { 1028 mutex_exit(&kpd->kp_lock); 1029 return (EACCES); 1030 } 1031 mutex_exit(&kpd->kp_lock); 1032 return (0); 1033 } 1034 1035 1036 /* 1037 * Check to see if it makes sense to do kluster/read ahead to 1038 * addr + delta relative to the mapping at addr. We assume here 1039 * that delta is a signed PAGESIZE'd multiple (which can be negative). 1040 * 1041 * For seg_u we always "approve" of this action from our standpoint. 1042 */ 1043 /*ARGSUSED*/ 1044 static int 1045 segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 1046 { 1047 return (0); 1048 } 1049 1050 /* 1051 * Load and possibly lock intra-slot resources in the range given by 1052 * vaddr and len. 1053 */ 1054 static int 1055 segkp_load( 1056 struct hat *hat, 1057 struct seg *seg, 1058 caddr_t vaddr, 1059 size_t len, 1060 struct segkp_data *kpd, 1061 uint_t flags) 1062 { 1063 caddr_t va; 1064 caddr_t vlim; 1065 ulong_t i; 1066 uint_t lock; 1067 1068 ASSERT(MUTEX_HELD(&kpd->kp_lock)); 1069 1070 len = P2ROUNDUP(len, PAGESIZE); 1071 1072 /* If locking, reserve physical memory */ 1073 if (flags & KPD_LOCKED) { 1074 pgcnt_t pages = btop(len); 1075 if ((kpd->kp_flags & KPD_NO_ANON) == 0) 1076 atomic_add_long(&anon_segkp_pages_locked, pages); 1077 (void) page_resv(pages, KM_SLEEP); 1078 } 1079 1080 /* 1081 * Loop through the pages in the given range. 1082 */ 1083 va = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK); 1084 vaddr = va; 1085 vlim = va + len; 1086 lock = flags & KPD_LOCKED; 1087 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT; 1088 for (; va < vlim; va += PAGESIZE, i++) { 1089 page_t *pl[2]; /* second element NULL terminator */ 1090 struct vnode *vp; 1091 anoff_t off; 1092 int err; 1093 struct anon *ap; 1094 1095 /* 1096 * Summon the page. If it's not resident, arrange 1097 * for synchronous i/o to pull it in. 1098 */ 1099 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i); 1100 swap_xlate(ap, &vp, &off); 1101 1102 /* 1103 * The returned page list will have exactly one entry, 1104 * which is returned to us already kept. 1105 */ 1106 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, NULL, 1107 pl, PAGESIZE, seg, va, S_READ, kcred, NULL); 1108 1109 if (err) { 1110 /* 1111 * Back out of what we've done so far. 1112 */ 1113 (void) segkp_unlock(hat, seg, vaddr, 1114 (va - vaddr), kpd, flags); 1115 return (err); 1116 } 1117 1118 /* 1119 * Load an MMU translation for the page. 1120 */ 1121 hat_memload(hat, va, pl[0], (PROT_READ|PROT_WRITE), 1122 lock ? HAT_LOAD_LOCK : HAT_LOAD); 1123 1124 if (!lock) { 1125 /* 1126 * Now, release "shared" lock on the page. 1127 */ 1128 page_unlock(pl[0]); 1129 } 1130 } 1131 return (0); 1132 } 1133 1134 /* 1135 * At the very least unload the mmu-translations and unlock the range if locked 1136 * Can be called with the following flag value KPD_WRITEDIRTY which specifies 1137 * any dirty pages should be written to disk. 1138 */ 1139 static int 1140 segkp_unlock( 1141 struct hat *hat, 1142 struct seg *seg, 1143 caddr_t vaddr, 1144 size_t len, 1145 struct segkp_data *kpd, 1146 uint_t flags) 1147 { 1148 caddr_t va; 1149 caddr_t vlim; 1150 ulong_t i; 1151 struct page *pp; 1152 struct vnode *vp; 1153 anoff_t off; 1154 struct anon *ap; 1155 1156 #ifdef lint 1157 seg = seg; 1158 #endif /* lint */ 1159 1160 ASSERT(MUTEX_HELD(&kpd->kp_lock)); 1161 1162 /* 1163 * Loop through the pages in the given range. It is assumed 1164 * segkp_unlock is called with page aligned base 1165 */ 1166 va = vaddr; 1167 vlim = va + len; 1168 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT; 1169 hat_unload(hat, va, len, 1170 ((flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD)); 1171 for (; va < vlim; va += PAGESIZE, i++) { 1172 /* 1173 * Find the page associated with this part of the 1174 * slot, tracking it down through its associated swap 1175 * space. 1176 */ 1177 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i); 1178 swap_xlate(ap, &vp, &off); 1179 1180 if (flags & KPD_LOCKED) { 1181 if ((pp = page_find(vp, off)) == NULL) { 1182 if (flags & KPD_LOCKED) { 1183 panic("segkp_softunlock: missing page"); 1184 /*NOTREACHED*/ 1185 } 1186 } 1187 } else { 1188 /* 1189 * Nothing to do if the slot is not locked and the 1190 * page doesn't exist. 1191 */ 1192 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) 1193 continue; 1194 } 1195 1196 /* 1197 * If the page doesn't have any translations, is 1198 * dirty and not being shared, then push it out 1199 * asynchronously and avoid waiting for the 1200 * pageout daemon to do it for us. 1201 * 1202 * XXX - Do we really need to get the "exclusive" 1203 * lock via an upgrade? 1204 */ 1205 if ((flags & KPD_WRITEDIRTY) && !hat_page_is_mapped(pp) && 1206 hat_ismod(pp) && page_tryupgrade(pp)) { 1207 /* 1208 * Hold the vnode before releasing the page lock to 1209 * prevent it from being freed and re-used by some 1210 * other thread. 1211 */ 1212 VN_HOLD(vp); 1213 page_unlock(pp); 1214 1215 /* 1216 * Want most powerful credentials we can get so 1217 * use kcred. 1218 */ 1219 (void) VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE, 1220 B_ASYNC | B_FREE, kcred, NULL); 1221 VN_RELE(vp); 1222 } else { 1223 page_unlock(pp); 1224 } 1225 } 1226 1227 /* If unlocking, release physical memory */ 1228 if (flags & KPD_LOCKED) { 1229 pgcnt_t pages = btopr(len); 1230 if ((kpd->kp_flags & KPD_NO_ANON) == 0) 1231 atomic_add_long(&anon_segkp_pages_locked, -pages); 1232 page_unresv(pages); 1233 } 1234 return (0); 1235 } 1236 1237 /* 1238 * Insert the kpd in the hash table. 1239 */ 1240 static void 1241 segkp_insert(struct seg *seg, struct segkp_data *kpd) 1242 { 1243 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1244 int index; 1245 1246 /* 1247 * Insert the kpd based on the address that will be returned 1248 * via segkp_release. 1249 */ 1250 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags)); 1251 mutex_enter(&segkp_lock); 1252 kpd->kp_next = kpsd->kpsd_hash[index]; 1253 kpsd->kpsd_hash[index] = kpd; 1254 mutex_exit(&segkp_lock); 1255 } 1256 1257 /* 1258 * Remove kpd from the hash table. 1259 */ 1260 static void 1261 segkp_delete(struct seg *seg, struct segkp_data *kpd) 1262 { 1263 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1264 struct segkp_data **kpp; 1265 int index; 1266 1267 ASSERT(MUTEX_HELD(&segkp_lock)); 1268 1269 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags)); 1270 for (kpp = &kpsd->kpsd_hash[index]; 1271 *kpp != NULL; kpp = &((*kpp)->kp_next)) { 1272 if (*kpp == kpd) { 1273 *kpp = kpd->kp_next; 1274 return; 1275 } 1276 } 1277 panic("segkp_delete: unable to find element to delete"); 1278 /*NOTREACHED*/ 1279 } 1280 1281 /* 1282 * Find the kpd associated with a vaddr. 1283 * 1284 * Most of the callers of segkp_find will pass the vaddr that 1285 * hashes to the desired index, but there are cases where 1286 * this is not true in which case we have to (potentially) scan 1287 * the whole table looking for it. This should be very rare 1288 * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the 1289 * middle of the segkp_data region). 1290 */ 1291 static struct segkp_data * 1292 segkp_find(struct seg *seg, caddr_t vaddr) 1293 { 1294 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1295 struct segkp_data *kpd; 1296 int i; 1297 int stop; 1298 1299 i = stop = SEGKP_HASH(vaddr); 1300 mutex_enter(&segkp_lock); 1301 do { 1302 for (kpd = kpsd->kpsd_hash[i]; kpd != NULL; 1303 kpd = kpd->kp_next) { 1304 if (vaddr >= kpd->kp_base && 1305 vaddr < kpd->kp_base + kpd->kp_len) { 1306 mutex_exit(&segkp_lock); 1307 return (kpd); 1308 } 1309 } 1310 if (--i < 0) 1311 i = SEGKP_HASHSZ - 1; /* Wrap */ 1312 } while (i != stop); 1313 mutex_exit(&segkp_lock); 1314 return (NULL); /* Not found */ 1315 } 1316 1317 /* 1318 * returns size of swappable area. 1319 */ 1320 size_t 1321 swapsize(caddr_t v) 1322 { 1323 struct segkp_data *kpd; 1324 1325 if ((kpd = segkp_find(segkp, v)) != NULL) 1326 return (SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)); 1327 else 1328 return (NULL); 1329 } 1330 1331 /* 1332 * Dump out all the active segkp pages 1333 */ 1334 static void 1335 segkp_dump(struct seg *seg) 1336 { 1337 int i; 1338 struct segkp_data *kpd; 1339 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1340 1341 for (i = 0; i < SEGKP_HASHSZ; i++) { 1342 for (kpd = kpsd->kpsd_hash[i]; 1343 kpd != NULL; kpd = kpd->kp_next) { 1344 pfn_t pfn; 1345 caddr_t addr; 1346 caddr_t eaddr; 1347 1348 addr = kpd->kp_base; 1349 eaddr = addr + kpd->kp_len; 1350 while (addr < eaddr) { 1351 ASSERT(seg->s_as == &kas); 1352 pfn = hat_getpfnum(seg->s_as->a_hat, addr); 1353 if (pfn != PFN_INVALID) 1354 dump_addpage(seg->s_as, addr, pfn); 1355 addr += PAGESIZE; 1356 dump_timeleft = dump_timeout; 1357 } 1358 } 1359 } 1360 } 1361 1362 /*ARGSUSED*/ 1363 static int 1364 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len, 1365 struct page ***ppp, enum lock_type type, enum seg_rw rw) 1366 { 1367 return (ENOTSUP); 1368 } 1369 1370 /*ARGSUSED*/ 1371 static int 1372 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 1373 { 1374 return (ENODEV); 1375 } 1376 1377 /*ARGSUSED*/ 1378 static lgrp_mem_policy_info_t * 1379 segkp_getpolicy(struct seg *seg, caddr_t addr) 1380 { 1381 return (NULL); 1382 } 1383 1384 /*ARGSUSED*/ 1385 static int 1386 segkp_capable(struct seg *seg, segcapability_t capability) 1387 { 1388 return (0); 1389 } 1390 1391 #include <sys/mem_config.h> 1392 1393 /*ARGSUSED*/ 1394 static void 1395 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages) 1396 {} 1397 1398 /* 1399 * During memory delete, turn off caches so that pages are not held. 1400 * A better solution may be to unlock the pages while they are 1401 * in the cache so that they may be collected naturally. 1402 */ 1403 1404 /*ARGSUSED*/ 1405 static int 1406 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages) 1407 { 1408 atomic_inc_32(&segkp_indel); 1409 segkp_cache_free(); 1410 return (0); 1411 } 1412 1413 /*ARGSUSED*/ 1414 static void 1415 segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 1416 { 1417 atomic_dec_32(&segkp_indel); 1418 } 1419 1420 static kphysm_setup_vector_t segkp_mem_config_vec = { 1421 KPHYSM_SETUP_VECTOR_VERSION, 1422 segkp_mem_config_post_add, 1423 segkp_mem_config_pre_del, 1424 segkp_mem_config_post_del, 1425 }; 1426 1427 static void 1428 segkpinit_mem_config(struct seg *seg) 1429 { 1430 int ret; 1431 1432 ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg); 1433 ASSERT(ret == 0); 1434 }