1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved. 23 */ 24 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 26 /* All Rights Reserved */ 27 28 /* 29 * Portions of this source code were derived from Berkeley 4.3 BSD 30 * under license from the Regents of the University of California. 31 */ 32 33 /* 34 * segkp is a segment driver that administers the allocation and deallocation 35 * of pageable variable size chunks of kernel virtual address space. Each 36 * allocated resource is page-aligned. 37 * 38 * The user may specify whether the resource should be initialized to 0, 39 * include a redzone, or locked in memory. 40 */ 41 42 #include <sys/types.h> 43 #include <sys/t_lock.h> 44 #include <sys/thread.h> 45 #include <sys/param.h> 46 #include <sys/errno.h> 47 #include <sys/sysmacros.h> 48 #include <sys/systm.h> 49 #include <sys/buf.h> 50 #include <sys/mman.h> 51 #include <sys/vnode.h> 52 #include <sys/cmn_err.h> 53 #include <sys/swap.h> 54 #include <sys/tuneable.h> 55 #include <sys/kmem.h> 56 #include <sys/vmem.h> 57 #include <sys/cred.h> 58 #include <sys/dumphdr.h> 59 #include <sys/debug.h> 60 #include <sys/vtrace.h> 61 #include <sys/stack.h> 62 #include <sys/atomic.h> 63 #include <sys/archsystm.h> 64 #include <sys/lgrp.h> 65 66 #include <vm/as.h> 67 #include <vm/seg.h> 68 #include <vm/seg_kp.h> 69 #include <vm/seg_kmem.h> 70 #include <vm/anon.h> 71 #include <vm/page.h> 72 #include <vm/hat.h> 73 #include <sys/bitmap.h> 74 75 /* 76 * Private seg op routines 77 */ 78 static void segkp_dump(struct seg *seg); 79 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len, 80 uint_t prot); 81 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta); 82 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len, 83 struct page ***page, enum lock_type type, 84 enum seg_rw rw); 85 static void segkp_insert(struct seg *seg, struct segkp_data *kpd); 86 static void segkp_delete(struct seg *seg, struct segkp_data *kpd); 87 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags, 88 struct segkp_data **tkpd, struct anon_map *amp); 89 static void segkp_release_internal(struct seg *seg, 90 struct segkp_data *kpd, size_t len); 91 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr, 92 size_t len, struct segkp_data *kpd, uint_t flags); 93 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr, 94 size_t len, struct segkp_data *kpd, uint_t flags); 95 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr); 96 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp); 97 static int segkp_capable(struct seg *seg, segcapability_t capability); 98 99 /* 100 * Lock used to protect the hash table(s) and caches. 101 */ 102 static kmutex_t segkp_lock; 103 104 /* 105 * The segkp caches 106 */ 107 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE]; 108 109 /* 110 * When there are fewer than red_minavail bytes left on the stack, 111 * segkp_map_red() will map in the redzone (if called). 5000 seems 112 * to work reasonably well... 113 */ 114 long red_minavail = 5000; 115 116 /* 117 * will be set to 1 for 32 bit x86 systems only, in startup.c 118 */ 119 int segkp_fromheap = 0; 120 ulong_t *segkp_bitmap; 121 122 /* 123 * If segkp_map_red() is called with the redzone already mapped and 124 * with less than RED_DEEP_THRESHOLD bytes available on the stack, 125 * then the stack situation has become quite serious; if much more stack 126 * is consumed, we have the potential of scrogging the next thread/LWP 127 * structure. To help debug the "can't happen" panics which may 128 * result from this condition, we record hrestime and the calling thread 129 * in red_deep_hires and red_deep_thread respectively. 130 */ 131 #define RED_DEEP_THRESHOLD 2000 132 133 hrtime_t red_deep_hires; 134 kthread_t *red_deep_thread; 135 136 uint32_t red_nmapped; 137 uint32_t red_closest = UINT_MAX; 138 uint32_t red_ndoubles; 139 140 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */ 141 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */ 142 143 static struct seg_ops segkp_ops = { 144 .fault = segkp_fault, 145 .checkprot = segkp_checkprot, 146 .kluster = segkp_kluster, 147 .dump = segkp_dump, 148 .pagelock = segkp_pagelock, 149 .getmemid = segkp_getmemid, 150 .capable = segkp_capable, 151 }; 152 153 154 static void segkpinit_mem_config(struct seg *); 155 156 static uint32_t segkp_indel; 157 158 /* 159 * Allocate the segment specific private data struct and fill it in 160 * with the per kp segment mutex, anon ptr. array and hash table. 161 */ 162 int 163 segkp_create(struct seg *seg) 164 { 165 struct segkp_segdata *kpsd; 166 size_t np; 167 168 ASSERT(seg != NULL && seg->s_as == &kas); 169 ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock)); 170 171 if (seg->s_size & PAGEOFFSET) { 172 panic("Bad segkp size"); 173 /*NOTREACHED*/ 174 } 175 176 kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP); 177 178 /* 179 * Allocate the virtual memory for segkp and initialize it 180 */ 181 if (segkp_fromheap) { 182 np = btop(kvseg.s_size); 183 segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP); 184 kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE, 185 vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP); 186 } else { 187 segkp_bitmap = NULL; 188 np = btop(seg->s_size); 189 kpsd->kpsd_arena = vmem_create("segkp", seg->s_base, 190 seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE, 191 VM_SLEEP); 192 } 193 194 kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE); 195 196 kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *), 197 KM_SLEEP); 198 seg->s_data = (void *)kpsd; 199 seg->s_ops = &segkp_ops; 200 segkpinit_mem_config(seg); 201 return (0); 202 } 203 204 205 /* 206 * Find a free 'freelist' and initialize it with the appropriate attributes 207 */ 208 void * 209 segkp_cache_init(struct seg *seg, int maxsize, size_t len, uint_t flags) 210 { 211 int i; 212 213 if ((flags & KPD_NO_ANON) && !(flags & KPD_LOCKED)) 214 return ((void *)-1); 215 216 mutex_enter(&segkp_lock); 217 for (i = 0; i < SEGKP_MAX_CACHE; i++) { 218 if (segkp_cache[i].kpf_inuse) 219 continue; 220 segkp_cache[i].kpf_inuse = 1; 221 segkp_cache[i].kpf_max = maxsize; 222 segkp_cache[i].kpf_flags = flags; 223 segkp_cache[i].kpf_seg = seg; 224 segkp_cache[i].kpf_len = len; 225 mutex_exit(&segkp_lock); 226 return ((void *)(uintptr_t)i); 227 } 228 mutex_exit(&segkp_lock); 229 return ((void *)-1); 230 } 231 232 /* 233 * Free all the cache resources. 234 */ 235 void 236 segkp_cache_free(void) 237 { 238 struct segkp_data *kpd; 239 struct seg *seg; 240 int i; 241 242 mutex_enter(&segkp_lock); 243 for (i = 0; i < SEGKP_MAX_CACHE; i++) { 244 if (!segkp_cache[i].kpf_inuse) 245 continue; 246 /* 247 * Disconnect the freelist and process each element 248 */ 249 kpd = segkp_cache[i].kpf_list; 250 seg = segkp_cache[i].kpf_seg; 251 segkp_cache[i].kpf_list = NULL; 252 segkp_cache[i].kpf_count = 0; 253 mutex_exit(&segkp_lock); 254 255 while (kpd != NULL) { 256 struct segkp_data *next; 257 258 next = kpd->kp_next; 259 segkp_release_internal(seg, kpd, kpd->kp_len); 260 kpd = next; 261 } 262 mutex_enter(&segkp_lock); 263 } 264 mutex_exit(&segkp_lock); 265 } 266 267 /* 268 * There are 2 entries into segkp_get_internal. The first includes a cookie 269 * used to access a pool of cached segkp resources. The second does not 270 * use the cache. 271 */ 272 caddr_t 273 segkp_get(struct seg *seg, size_t len, uint_t flags) 274 { 275 struct segkp_data *kpd = NULL; 276 277 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) { 278 kpd->kp_cookie = -1; 279 return (stom(kpd->kp_base, flags)); 280 } 281 return (NULL); 282 } 283 284 /* 285 * Return a 'cached' segkp address 286 */ 287 caddr_t 288 segkp_cache_get(void *cookie) 289 { 290 struct segkp_cache *freelist = NULL; 291 struct segkp_data *kpd = NULL; 292 int index = (int)(uintptr_t)cookie; 293 struct seg *seg; 294 size_t len; 295 uint_t flags; 296 297 if (index < 0 || index >= SEGKP_MAX_CACHE) 298 return (NULL); 299 freelist = &segkp_cache[index]; 300 301 mutex_enter(&segkp_lock); 302 seg = freelist->kpf_seg; 303 flags = freelist->kpf_flags; 304 if (freelist->kpf_list != NULL) { 305 kpd = freelist->kpf_list; 306 freelist->kpf_list = kpd->kp_next; 307 freelist->kpf_count--; 308 mutex_exit(&segkp_lock); 309 kpd->kp_next = NULL; 310 segkp_insert(seg, kpd); 311 return (stom(kpd->kp_base, flags)); 312 } 313 len = freelist->kpf_len; 314 mutex_exit(&segkp_lock); 315 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) { 316 kpd->kp_cookie = index; 317 return (stom(kpd->kp_base, flags)); 318 } 319 return (NULL); 320 } 321 322 caddr_t 323 segkp_get_withanonmap( 324 struct seg *seg, 325 size_t len, 326 uint_t flags, 327 struct anon_map *amp) 328 { 329 struct segkp_data *kpd = NULL; 330 331 ASSERT(amp != NULL); 332 flags |= KPD_HASAMP; 333 if (segkp_get_internal(seg, len, flags, &kpd, amp) != NULL) { 334 kpd->kp_cookie = -1; 335 return (stom(kpd->kp_base, flags)); 336 } 337 return (NULL); 338 } 339 340 /* 341 * This does the real work of segkp allocation. 342 * Return to client base addr. len must be page-aligned. A null value is 343 * returned if there are no more vm resources (e.g. pages, swap). The len 344 * and base recorded in the private data structure include the redzone 345 * and the redzone length (if applicable). If the user requests a redzone 346 * either the first or last page is left unmapped depending whether stacks 347 * grow to low or high memory. 348 * 349 * The client may also specify a no-wait flag. If that is set then the 350 * request will choose a non-blocking path when requesting resources. 351 * The default is make the client wait. 352 */ 353 static caddr_t 354 segkp_get_internal( 355 struct seg *seg, 356 size_t len, 357 uint_t flags, 358 struct segkp_data **tkpd, 359 struct anon_map *amp) 360 { 361 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 362 struct segkp_data *kpd; 363 caddr_t vbase = NULL; /* always first virtual, may not be mapped */ 364 pgcnt_t np = 0; /* number of pages in the resource */ 365 pgcnt_t segkpindex; 366 long i; 367 caddr_t va; 368 pgcnt_t pages = 0; 369 ulong_t anon_idx = 0; 370 int kmflag = (flags & KPD_NOWAIT) ? KM_NOSLEEP : KM_SLEEP; 371 caddr_t s_base = (segkp_fromheap) ? kvseg.s_base : seg->s_base; 372 373 if (len & PAGEOFFSET) { 374 panic("segkp_get: len is not page-aligned"); 375 /*NOTREACHED*/ 376 } 377 378 ASSERT(((flags & KPD_HASAMP) == 0) == (amp == NULL)); 379 380 /* Only allow KPD_NO_ANON if we are going to lock it down */ 381 if ((flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON) 382 return (NULL); 383 384 if ((kpd = kmem_zalloc(sizeof (struct segkp_data), kmflag)) == NULL) 385 return (NULL); 386 /* 387 * Fix up the len to reflect the REDZONE if applicable 388 */ 389 if (flags & KPD_HASREDZONE) 390 len += PAGESIZE; 391 np = btop(len); 392 393 vbase = vmem_alloc(SEGKP_VMEM(seg), len, kmflag | VM_BESTFIT); 394 if (vbase == NULL) { 395 kmem_free(kpd, sizeof (struct segkp_data)); 396 return (NULL); 397 } 398 399 /* If locking, reserve physical memory */ 400 if (flags & KPD_LOCKED) { 401 pages = btop(SEGKP_MAPLEN(len, flags)); 402 if (page_resv(pages, kmflag) == 0) { 403 vmem_free(SEGKP_VMEM(seg), vbase, len); 404 kmem_free(kpd, sizeof (struct segkp_data)); 405 return (NULL); 406 } 407 if ((flags & KPD_NO_ANON) == 0) 408 atomic_add_long(&anon_segkp_pages_locked, pages); 409 } 410 411 /* 412 * Reserve sufficient swap space for this vm resource. We'll 413 * actually allocate it in the loop below, but reserving it 414 * here allows us to back out more gracefully than if we 415 * had an allocation failure in the body of the loop. 416 * 417 * Note that we don't need swap space for the red zone page. 418 */ 419 if (amp != NULL) { 420 /* 421 * The swap reservation has been done, if required, and the 422 * anon_hdr is separate. 423 */ 424 anon_idx = 0; 425 kpd->kp_anon_idx = anon_idx; 426 kpd->kp_anon = amp->ahp; 427 428 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 429 kpd, vbase, len, flags, 1); 430 431 } else if ((flags & KPD_NO_ANON) == 0) { 432 if (anon_resv_zone(SEGKP_MAPLEN(len, flags), NULL) == 0) { 433 if (flags & KPD_LOCKED) { 434 atomic_add_long(&anon_segkp_pages_locked, 435 -pages); 436 page_unresv(pages); 437 } 438 vmem_free(SEGKP_VMEM(seg), vbase, len); 439 kmem_free(kpd, sizeof (struct segkp_data)); 440 return (NULL); 441 } 442 atomic_add_long(&anon_segkp_pages_resv, 443 btop(SEGKP_MAPLEN(len, flags))); 444 anon_idx = ((uintptr_t)(vbase - s_base)) >> PAGESHIFT; 445 kpd->kp_anon_idx = anon_idx; 446 kpd->kp_anon = kpsd->kpsd_anon; 447 448 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 449 kpd, vbase, len, flags, 1); 450 } else { 451 kpd->kp_anon = NULL; 452 kpd->kp_anon_idx = 0; 453 } 454 455 /* 456 * Allocate page and anon resources for the virtual address range 457 * except the redzone 458 */ 459 if (segkp_fromheap) 460 segkpindex = btop((uintptr_t)(vbase - kvseg.s_base)); 461 for (i = 0, va = vbase; i < np; i++, va += PAGESIZE) { 462 page_t *pl[2]; 463 struct vnode *vp; 464 anoff_t off; 465 int err; 466 page_t *pp = NULL; 467 468 /* 469 * Mark this page to be a segkp page in the bitmap. 470 */ 471 if (segkp_fromheap) { 472 BT_ATOMIC_SET(segkp_bitmap, segkpindex); 473 segkpindex++; 474 } 475 476 /* 477 * If this page is the red zone page, we don't need swap 478 * space for it. Note that we skip over the code that 479 * establishes MMU mappings, so that the page remains 480 * invalid. 481 */ 482 if ((flags & KPD_HASREDZONE) && KPD_REDZONE(kpd) == i) 483 continue; 484 485 if (kpd->kp_anon != NULL) { 486 struct anon *ap; 487 488 ASSERT(anon_get_ptr(kpd->kp_anon, anon_idx + i) 489 == NULL); 490 /* 491 * Determine the "vp" and "off" of the anon slot. 492 */ 493 ap = anon_alloc(NULL, 0); 494 if (amp != NULL) 495 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 496 (void) anon_set_ptr(kpd->kp_anon, anon_idx + i, 497 ap, ANON_SLEEP); 498 if (amp != NULL) 499 ANON_LOCK_EXIT(&->a_rwlock); 500 swap_xlate(ap, &vp, &off); 501 502 /* 503 * Create a page with the specified identity. The 504 * page is returned with the "shared" lock held. 505 */ 506 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, 507 NULL, pl, PAGESIZE, seg, va, S_CREATE, 508 kcred, NULL); 509 if (err) { 510 /* 511 * XXX - This should not fail. 512 */ 513 panic("segkp_get: no pages"); 514 /*NOTREACHED*/ 515 } 516 pp = pl[0]; 517 } else { 518 ASSERT(page_exists(&kvp, 519 (u_offset_t)(uintptr_t)va) == NULL); 520 521 if ((pp = page_create_va(&kvp, 522 (u_offset_t)(uintptr_t)va, PAGESIZE, 523 (flags & KPD_NOWAIT ? 0 : PG_WAIT) | PG_EXCL | 524 PG_NORELOC, seg, va)) == NULL) { 525 /* 526 * Legitimize resource; then destroy it. 527 * Easier than trying to unwind here. 528 */ 529 kpd->kp_flags = flags; 530 kpd->kp_base = vbase; 531 kpd->kp_len = len; 532 segkp_release_internal(seg, kpd, va - vbase); 533 return (NULL); 534 } 535 page_io_unlock(pp); 536 } 537 538 if (flags & KPD_ZERO) 539 pagezero(pp, 0, PAGESIZE); 540 541 /* 542 * Load and lock an MMU translation for the page. 543 */ 544 hat_memload(seg->s_as->a_hat, va, pp, (PROT_READ|PROT_WRITE), 545 ((flags & KPD_LOCKED) ? HAT_LOAD_LOCK : HAT_LOAD)); 546 547 /* 548 * Now, release lock on the page. 549 */ 550 if (flags & KPD_LOCKED) { 551 /* 552 * Indicate to page_retire framework that this 553 * page can only be retired when it is freed. 554 */ 555 PP_SETRAF(pp); 556 page_downgrade(pp); 557 } else 558 page_unlock(pp); 559 } 560 561 kpd->kp_flags = flags; 562 kpd->kp_base = vbase; 563 kpd->kp_len = len; 564 segkp_insert(seg, kpd); 565 *tkpd = kpd; 566 return (stom(kpd->kp_base, flags)); 567 } 568 569 /* 570 * Release the resource to cache if the pool(designate by the cookie) 571 * has less than the maximum allowable. If inserted in cache, 572 * segkp_delete insures element is taken off of active list. 573 */ 574 void 575 segkp_release(struct seg *seg, caddr_t vaddr) 576 { 577 struct segkp_cache *freelist; 578 struct segkp_data *kpd = NULL; 579 580 if ((kpd = segkp_find(seg, vaddr)) == NULL) { 581 panic("segkp_release: null kpd"); 582 /*NOTREACHED*/ 583 } 584 585 if (kpd->kp_cookie != -1) { 586 freelist = &segkp_cache[kpd->kp_cookie]; 587 mutex_enter(&segkp_lock); 588 if (!segkp_indel && freelist->kpf_count < freelist->kpf_max) { 589 segkp_delete(seg, kpd); 590 kpd->kp_next = freelist->kpf_list; 591 freelist->kpf_list = kpd; 592 freelist->kpf_count++; 593 mutex_exit(&segkp_lock); 594 return; 595 } else { 596 mutex_exit(&segkp_lock); 597 kpd->kp_cookie = -1; 598 } 599 } 600 segkp_release_internal(seg, kpd, kpd->kp_len); 601 } 602 603 /* 604 * Free the entire resource. segkp_unlock gets called with the start of the 605 * mapped portion of the resource. The length is the size of the mapped 606 * portion 607 */ 608 static void 609 segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len) 610 { 611 caddr_t va; 612 long i; 613 long redzone; 614 size_t np; 615 page_t *pp; 616 struct vnode *vp; 617 anoff_t off; 618 struct anon *ap; 619 pgcnt_t segkpindex; 620 621 ASSERT(kpd != NULL); 622 ASSERT((kpd->kp_flags & KPD_HASAMP) == 0 || kpd->kp_cookie == -1); 623 np = btop(len); 624 625 /* Remove from active hash list */ 626 if (kpd->kp_cookie == -1) { 627 mutex_enter(&segkp_lock); 628 segkp_delete(seg, kpd); 629 mutex_exit(&segkp_lock); 630 } 631 632 /* 633 * Precompute redzone page index. 634 */ 635 redzone = -1; 636 if (kpd->kp_flags & KPD_HASREDZONE) 637 redzone = KPD_REDZONE(kpd); 638 639 640 va = kpd->kp_base; 641 642 hat_unload(seg->s_as->a_hat, va, (np << PAGESHIFT), 643 ((kpd->kp_flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD)); 644 /* 645 * Free up those anon resources that are quiescent. 646 */ 647 if (segkp_fromheap) 648 segkpindex = btop((uintptr_t)(va - kvseg.s_base)); 649 for (i = 0; i < np; i++, va += PAGESIZE) { 650 651 /* 652 * Clear the bit for this page from the bitmap. 653 */ 654 if (segkp_fromheap) { 655 BT_ATOMIC_CLEAR(segkp_bitmap, segkpindex); 656 segkpindex++; 657 } 658 659 if (i == redzone) 660 continue; 661 if (kpd->kp_anon) { 662 /* 663 * Free up anon resources and destroy the 664 * associated pages. 665 * 666 * Release the lock if there is one. Have to get the 667 * page to do this, unfortunately. 668 */ 669 if (kpd->kp_flags & KPD_LOCKED) { 670 ap = anon_get_ptr(kpd->kp_anon, 671 kpd->kp_anon_idx + i); 672 swap_xlate(ap, &vp, &off); 673 /* Find the shared-locked page. */ 674 pp = page_find(vp, (u_offset_t)off); 675 if (pp == NULL) { 676 panic("segkp_release: " 677 "kp_anon: no page to unlock "); 678 /*NOTREACHED*/ 679 } 680 if (PP_ISRAF(pp)) 681 PP_CLRRAF(pp); 682 683 page_unlock(pp); 684 } 685 if ((kpd->kp_flags & KPD_HASAMP) == 0) { 686 anon_free(kpd->kp_anon, kpd->kp_anon_idx + i, 687 PAGESIZE); 688 anon_unresv_zone(PAGESIZE, NULL); 689 atomic_dec_ulong(&anon_segkp_pages_resv); 690 } 691 TRACE_5(TR_FAC_VM, 692 TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u", 693 kpd, va, PAGESIZE, 0, 0); 694 } else { 695 if (kpd->kp_flags & KPD_LOCKED) { 696 pp = page_find(&kvp, (u_offset_t)(uintptr_t)va); 697 if (pp == NULL) { 698 panic("segkp_release: " 699 "no page to unlock"); 700 /*NOTREACHED*/ 701 } 702 if (PP_ISRAF(pp)) 703 PP_CLRRAF(pp); 704 /* 705 * We should just upgrade the lock here 706 * but there is no upgrade that waits. 707 */ 708 page_unlock(pp); 709 } 710 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)va, 711 SE_EXCL); 712 if (pp != NULL) 713 page_destroy(pp, 0); 714 } 715 } 716 717 /* If locked, release physical memory reservation */ 718 if (kpd->kp_flags & KPD_LOCKED) { 719 pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)); 720 if ((kpd->kp_flags & KPD_NO_ANON) == 0) 721 atomic_add_long(&anon_segkp_pages_locked, -pages); 722 page_unresv(pages); 723 } 724 725 vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len); 726 kmem_free(kpd, sizeof (struct segkp_data)); 727 } 728 729 /* 730 * segkp_map_red() will check the current frame pointer against the 731 * stack base. If the amount of stack remaining is questionable 732 * (less than red_minavail), then segkp_map_red() will map in the redzone 733 * and return 1. Otherwise, it will return 0. segkp_map_red() can 734 * _only_ be called when it is safe to sleep on page_create_va(). 735 * 736 * It is up to the caller to remember whether segkp_map_red() successfully 737 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later 738 * time. 739 * 740 * Currently, this routine is only called from pagefault() (which necessarily 741 * satisfies the above conditions). 742 */ 743 #if defined(STACK_GROWTH_DOWN) 744 int 745 segkp_map_red(void) 746 { 747 uintptr_t fp = STACK_BIAS + (uintptr_t)getfp(); 748 #ifndef _LP64 749 caddr_t stkbase; 750 #endif 751 752 /* 753 * Optimize for the common case where we simply return. 754 */ 755 if ((curthread->t_red_pp == NULL) && 756 (fp - (uintptr_t)curthread->t_stkbase >= red_minavail)) 757 return (0); 758 759 #if defined(_LP64) 760 /* 761 * XXX We probably need something better than this. 762 */ 763 panic("kernel stack overflow"); 764 /*NOTREACHED*/ 765 #else /* _LP64 */ 766 if (curthread->t_red_pp == NULL) { 767 page_t *red_pp; 768 struct seg kseg; 769 770 caddr_t red_va = (caddr_t) 771 (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) - 772 PAGESIZE); 773 774 ASSERT(page_exists(&kvp, (u_offset_t)(uintptr_t)red_va) == 775 NULL); 776 777 /* 778 * Allocate the physical for the red page. 779 */ 780 /* 781 * No PG_NORELOC here to avoid waits. Unlikely to get 782 * a relocate happening in the short time the page exists 783 * and it will be OK anyway. 784 */ 785 786 kseg.s_as = &kas; 787 red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va, 788 PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va); 789 ASSERT(red_pp != NULL); 790 791 /* 792 * So we now have a page to jam into the redzone... 793 */ 794 page_io_unlock(red_pp); 795 796 hat_memload(kas.a_hat, red_va, red_pp, 797 (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK); 798 page_downgrade(red_pp); 799 800 /* 801 * The page is left SE_SHARED locked so we can hold on to 802 * the page_t pointer. 803 */ 804 curthread->t_red_pp = red_pp; 805 806 atomic_inc_32(&red_nmapped); 807 while (fp - (uintptr_t)curthread->t_stkbase < red_closest) { 808 (void) atomic_cas_32(&red_closest, red_closest, 809 (uint32_t)(fp - (uintptr_t)curthread->t_stkbase)); 810 } 811 return (1); 812 } 813 814 stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase & 815 (uintptr_t)PAGEMASK) - PAGESIZE); 816 817 atomic_inc_32(&red_ndoubles); 818 819 if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) { 820 /* 821 * Oh boy. We're already deep within the mapped-in 822 * redzone page, and the caller is trying to prepare 823 * for a deep stack run. We're running without a 824 * redzone right now: if the caller plows off the 825 * end of the stack, it'll plow another thread or 826 * LWP structure. That situation could result in 827 * a very hard-to-debug panic, so, in the spirit of 828 * recording the name of one's killer in one's own 829 * blood, we're going to record hrestime and the calling 830 * thread. 831 */ 832 red_deep_hires = hrestime.tv_nsec; 833 red_deep_thread = curthread; 834 } 835 836 /* 837 * If this is a DEBUG kernel, and we've run too deep for comfort, toss. 838 */ 839 ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD); 840 return (0); 841 #endif /* _LP64 */ 842 } 843 844 void 845 segkp_unmap_red(void) 846 { 847 page_t *pp; 848 caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase & 849 (uintptr_t)PAGEMASK) - PAGESIZE); 850 851 ASSERT(curthread->t_red_pp != NULL); 852 853 /* 854 * Because we locked the mapping down, we can't simply rely 855 * on page_destroy() to clean everything up; we need to call 856 * hat_unload() to explicitly unlock the mapping resources. 857 */ 858 hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK); 859 860 pp = curthread->t_red_pp; 861 862 ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va)); 863 864 /* 865 * Need to upgrade the SE_SHARED lock to SE_EXCL. 866 */ 867 if (!page_tryupgrade(pp)) { 868 /* 869 * As there is now wait for upgrade, release the 870 * SE_SHARED lock and wait for SE_EXCL. 871 */ 872 page_unlock(pp); 873 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)red_va, SE_EXCL); 874 /* pp may be NULL here, hence the test below */ 875 } 876 877 /* 878 * Destroy the page, with dontfree set to zero (i.e. free it). 879 */ 880 if (pp != NULL) 881 page_destroy(pp, 0); 882 curthread->t_red_pp = NULL; 883 } 884 #else 885 #error Red stacks only supported with downwards stack growth. 886 #endif 887 888 /* 889 * Handle a fault on an address corresponding to one of the 890 * resources in the segkp segment. 891 */ 892 faultcode_t 893 segkp_fault( 894 struct hat *hat, 895 struct seg *seg, 896 caddr_t vaddr, 897 size_t len, 898 enum fault_type type, 899 enum seg_rw rw) 900 { 901 struct segkp_data *kpd = NULL; 902 int err; 903 904 ASSERT(seg->s_as == &kas && RW_READ_HELD(&seg->s_as->a_lock)); 905 906 /* 907 * Sanity checks. 908 */ 909 if (type == F_PROT) { 910 panic("segkp_fault: unexpected F_PROT fault"); 911 /*NOTREACHED*/ 912 } 913 914 if ((kpd = segkp_find(seg, vaddr)) == NULL) 915 return (FC_NOMAP); 916 917 mutex_enter(&kpd->kp_lock); 918 919 if (type == F_SOFTLOCK) { 920 ASSERT(!(kpd->kp_flags & KPD_LOCKED)); 921 /* 922 * The F_SOFTLOCK case has more stringent 923 * range requirements: the given range must exactly coincide 924 * with the resource's mapped portion. Note reference to 925 * redzone is handled since vaddr would not equal base 926 */ 927 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) || 928 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) { 929 mutex_exit(&kpd->kp_lock); 930 return (FC_MAKE_ERR(EFAULT)); 931 } 932 933 if ((err = segkp_load(hat, seg, vaddr, len, kpd, KPD_LOCKED))) { 934 mutex_exit(&kpd->kp_lock); 935 return (FC_MAKE_ERR(err)); 936 } 937 kpd->kp_flags |= KPD_LOCKED; 938 mutex_exit(&kpd->kp_lock); 939 return (0); 940 } 941 942 if (type == F_INVAL) { 943 ASSERT(!(kpd->kp_flags & KPD_NO_ANON)); 944 945 /* 946 * Check if we touched the redzone. Somewhat optimistic 947 * here if we are touching the redzone of our own stack 948 * since we wouldn't have a stack to get this far... 949 */ 950 if ((kpd->kp_flags & KPD_HASREDZONE) && 951 btop((uintptr_t)(vaddr - kpd->kp_base)) == KPD_REDZONE(kpd)) 952 panic("segkp_fault: accessing redzone"); 953 954 /* 955 * This fault may occur while the page is being F_SOFTLOCK'ed. 956 * Return since a 2nd segkp_load is unnecessary and also would 957 * result in the page being locked twice and eventually 958 * hang the thread_reaper thread. 959 */ 960 if (kpd->kp_flags & KPD_LOCKED) { 961 mutex_exit(&kpd->kp_lock); 962 return (0); 963 } 964 965 err = segkp_load(hat, seg, vaddr, len, kpd, kpd->kp_flags); 966 mutex_exit(&kpd->kp_lock); 967 return (err ? FC_MAKE_ERR(err) : 0); 968 } 969 970 if (type == F_SOFTUNLOCK) { 971 uint_t flags; 972 973 /* 974 * Make sure the addr is LOCKED and it has anon backing 975 * before unlocking 976 */ 977 if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) != KPD_LOCKED) { 978 panic("segkp_fault: bad unlock"); 979 /*NOTREACHED*/ 980 } 981 982 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) || 983 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) { 984 panic("segkp_fault: bad range"); 985 /*NOTREACHED*/ 986 } 987 988 if (rw == S_WRITE) 989 flags = kpd->kp_flags | KPD_WRITEDIRTY; 990 else 991 flags = kpd->kp_flags; 992 err = segkp_unlock(hat, seg, vaddr, len, kpd, flags); 993 kpd->kp_flags &= ~KPD_LOCKED; 994 mutex_exit(&kpd->kp_lock); 995 return (err ? FC_MAKE_ERR(err) : 0); 996 } 997 mutex_exit(&kpd->kp_lock); 998 panic("segkp_fault: bogus fault type: %d\n", type); 999 /*NOTREACHED*/ 1000 } 1001 1002 /* 1003 * Check that the given protections suffice over the range specified by 1004 * vaddr and len. For this segment type, the only issue is whether or 1005 * not the range lies completely within the mapped part of an allocated 1006 * resource. 1007 */ 1008 /* ARGSUSED */ 1009 static int 1010 segkp_checkprot(struct seg *seg, caddr_t vaddr, size_t len, uint_t prot) 1011 { 1012 struct segkp_data *kpd = NULL; 1013 caddr_t mbase; 1014 size_t mlen; 1015 1016 if ((kpd = segkp_find(seg, vaddr)) == NULL) 1017 return (EACCES); 1018 1019 mutex_enter(&kpd->kp_lock); 1020 mbase = stom(kpd->kp_base, kpd->kp_flags); 1021 mlen = SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags); 1022 if (len > mlen || vaddr < mbase || 1023 ((vaddr + len) > (mbase + mlen))) { 1024 mutex_exit(&kpd->kp_lock); 1025 return (EACCES); 1026 } 1027 mutex_exit(&kpd->kp_lock); 1028 return (0); 1029 } 1030 1031 1032 /* 1033 * Check to see if it makes sense to do kluster/read ahead to 1034 * addr + delta relative to the mapping at addr. We assume here 1035 * that delta is a signed PAGESIZE'd multiple (which can be negative). 1036 * 1037 * For seg_u we always "approve" of this action from our standpoint. 1038 */ 1039 /*ARGSUSED*/ 1040 static int 1041 segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 1042 { 1043 return (0); 1044 } 1045 1046 /* 1047 * Load and possibly lock intra-slot resources in the range given by 1048 * vaddr and len. 1049 */ 1050 static int 1051 segkp_load( 1052 struct hat *hat, 1053 struct seg *seg, 1054 caddr_t vaddr, 1055 size_t len, 1056 struct segkp_data *kpd, 1057 uint_t flags) 1058 { 1059 caddr_t va; 1060 caddr_t vlim; 1061 ulong_t i; 1062 uint_t lock; 1063 1064 ASSERT(MUTEX_HELD(&kpd->kp_lock)); 1065 1066 len = P2ROUNDUP(len, PAGESIZE); 1067 1068 /* If locking, reserve physical memory */ 1069 if (flags & KPD_LOCKED) { 1070 pgcnt_t pages = btop(len); 1071 if ((kpd->kp_flags & KPD_NO_ANON) == 0) 1072 atomic_add_long(&anon_segkp_pages_locked, pages); 1073 (void) page_resv(pages, KM_SLEEP); 1074 } 1075 1076 /* 1077 * Loop through the pages in the given range. 1078 */ 1079 va = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK); 1080 vaddr = va; 1081 vlim = va + len; 1082 lock = flags & KPD_LOCKED; 1083 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT; 1084 for (; va < vlim; va += PAGESIZE, i++) { 1085 page_t *pl[2]; /* second element NULL terminator */ 1086 struct vnode *vp; 1087 anoff_t off; 1088 int err; 1089 struct anon *ap; 1090 1091 /* 1092 * Summon the page. If it's not resident, arrange 1093 * for synchronous i/o to pull it in. 1094 */ 1095 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i); 1096 swap_xlate(ap, &vp, &off); 1097 1098 /* 1099 * The returned page list will have exactly one entry, 1100 * which is returned to us already kept. 1101 */ 1102 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, NULL, 1103 pl, PAGESIZE, seg, va, S_READ, kcred, NULL); 1104 1105 if (err) { 1106 /* 1107 * Back out of what we've done so far. 1108 */ 1109 (void) segkp_unlock(hat, seg, vaddr, 1110 (va - vaddr), kpd, flags); 1111 return (err); 1112 } 1113 1114 /* 1115 * Load an MMU translation for the page. 1116 */ 1117 hat_memload(hat, va, pl[0], (PROT_READ|PROT_WRITE), 1118 lock ? HAT_LOAD_LOCK : HAT_LOAD); 1119 1120 if (!lock) { 1121 /* 1122 * Now, release "shared" lock on the page. 1123 */ 1124 page_unlock(pl[0]); 1125 } 1126 } 1127 return (0); 1128 } 1129 1130 /* 1131 * At the very least unload the mmu-translations and unlock the range if locked 1132 * Can be called with the following flag value KPD_WRITEDIRTY which specifies 1133 * any dirty pages should be written to disk. 1134 */ 1135 static int 1136 segkp_unlock( 1137 struct hat *hat, 1138 struct seg *seg, 1139 caddr_t vaddr, 1140 size_t len, 1141 struct segkp_data *kpd, 1142 uint_t flags) 1143 { 1144 caddr_t va; 1145 caddr_t vlim; 1146 ulong_t i; 1147 struct page *pp; 1148 struct vnode *vp; 1149 anoff_t off; 1150 struct anon *ap; 1151 1152 #ifdef lint 1153 seg = seg; 1154 #endif /* lint */ 1155 1156 ASSERT(MUTEX_HELD(&kpd->kp_lock)); 1157 1158 /* 1159 * Loop through the pages in the given range. It is assumed 1160 * segkp_unlock is called with page aligned base 1161 */ 1162 va = vaddr; 1163 vlim = va + len; 1164 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT; 1165 hat_unload(hat, va, len, 1166 ((flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD)); 1167 for (; va < vlim; va += PAGESIZE, i++) { 1168 /* 1169 * Find the page associated with this part of the 1170 * slot, tracking it down through its associated swap 1171 * space. 1172 */ 1173 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i); 1174 swap_xlate(ap, &vp, &off); 1175 1176 if (flags & KPD_LOCKED) { 1177 if ((pp = page_find(vp, off)) == NULL) { 1178 if (flags & KPD_LOCKED) { 1179 panic("segkp_softunlock: missing page"); 1180 /*NOTREACHED*/ 1181 } 1182 } 1183 } else { 1184 /* 1185 * Nothing to do if the slot is not locked and the 1186 * page doesn't exist. 1187 */ 1188 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) 1189 continue; 1190 } 1191 1192 /* 1193 * If the page doesn't have any translations, is 1194 * dirty and not being shared, then push it out 1195 * asynchronously and avoid waiting for the 1196 * pageout daemon to do it for us. 1197 * 1198 * XXX - Do we really need to get the "exclusive" 1199 * lock via an upgrade? 1200 */ 1201 if ((flags & KPD_WRITEDIRTY) && !hat_page_is_mapped(pp) && 1202 hat_ismod(pp) && page_tryupgrade(pp)) { 1203 /* 1204 * Hold the vnode before releasing the page lock to 1205 * prevent it from being freed and re-used by some 1206 * other thread. 1207 */ 1208 VN_HOLD(vp); 1209 page_unlock(pp); 1210 1211 /* 1212 * Want most powerful credentials we can get so 1213 * use kcred. 1214 */ 1215 (void) VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE, 1216 B_ASYNC | B_FREE, kcred, NULL); 1217 VN_RELE(vp); 1218 } else { 1219 page_unlock(pp); 1220 } 1221 } 1222 1223 /* If unlocking, release physical memory */ 1224 if (flags & KPD_LOCKED) { 1225 pgcnt_t pages = btopr(len); 1226 if ((kpd->kp_flags & KPD_NO_ANON) == 0) 1227 atomic_add_long(&anon_segkp_pages_locked, -pages); 1228 page_unresv(pages); 1229 } 1230 return (0); 1231 } 1232 1233 /* 1234 * Insert the kpd in the hash table. 1235 */ 1236 static void 1237 segkp_insert(struct seg *seg, struct segkp_data *kpd) 1238 { 1239 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1240 int index; 1241 1242 /* 1243 * Insert the kpd based on the address that will be returned 1244 * via segkp_release. 1245 */ 1246 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags)); 1247 mutex_enter(&segkp_lock); 1248 kpd->kp_next = kpsd->kpsd_hash[index]; 1249 kpsd->kpsd_hash[index] = kpd; 1250 mutex_exit(&segkp_lock); 1251 } 1252 1253 /* 1254 * Remove kpd from the hash table. 1255 */ 1256 static void 1257 segkp_delete(struct seg *seg, struct segkp_data *kpd) 1258 { 1259 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1260 struct segkp_data **kpp; 1261 int index; 1262 1263 ASSERT(MUTEX_HELD(&segkp_lock)); 1264 1265 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags)); 1266 for (kpp = &kpsd->kpsd_hash[index]; 1267 *kpp != NULL; kpp = &((*kpp)->kp_next)) { 1268 if (*kpp == kpd) { 1269 *kpp = kpd->kp_next; 1270 return; 1271 } 1272 } 1273 panic("segkp_delete: unable to find element to delete"); 1274 /*NOTREACHED*/ 1275 } 1276 1277 /* 1278 * Find the kpd associated with a vaddr. 1279 * 1280 * Most of the callers of segkp_find will pass the vaddr that 1281 * hashes to the desired index, but there are cases where 1282 * this is not true in which case we have to (potentially) scan 1283 * the whole table looking for it. This should be very rare 1284 * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the 1285 * middle of the segkp_data region). 1286 */ 1287 static struct segkp_data * 1288 segkp_find(struct seg *seg, caddr_t vaddr) 1289 { 1290 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1291 struct segkp_data *kpd; 1292 int i; 1293 int stop; 1294 1295 i = stop = SEGKP_HASH(vaddr); 1296 mutex_enter(&segkp_lock); 1297 do { 1298 for (kpd = kpsd->kpsd_hash[i]; kpd != NULL; 1299 kpd = kpd->kp_next) { 1300 if (vaddr >= kpd->kp_base && 1301 vaddr < kpd->kp_base + kpd->kp_len) { 1302 mutex_exit(&segkp_lock); 1303 return (kpd); 1304 } 1305 } 1306 if (--i < 0) 1307 i = SEGKP_HASHSZ - 1; /* Wrap */ 1308 } while (i != stop); 1309 mutex_exit(&segkp_lock); 1310 return (NULL); /* Not found */ 1311 } 1312 1313 /* 1314 * returns size of swappable area. 1315 */ 1316 size_t 1317 swapsize(caddr_t v) 1318 { 1319 struct segkp_data *kpd; 1320 1321 if ((kpd = segkp_find(segkp, v)) != NULL) 1322 return (SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)); 1323 else 1324 return (NULL); 1325 } 1326 1327 /* 1328 * Dump out all the active segkp pages 1329 */ 1330 static void 1331 segkp_dump(struct seg *seg) 1332 { 1333 int i; 1334 struct segkp_data *kpd; 1335 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data; 1336 1337 for (i = 0; i < SEGKP_HASHSZ; i++) { 1338 for (kpd = kpsd->kpsd_hash[i]; 1339 kpd != NULL; kpd = kpd->kp_next) { 1340 pfn_t pfn; 1341 caddr_t addr; 1342 caddr_t eaddr; 1343 1344 addr = kpd->kp_base; 1345 eaddr = addr + kpd->kp_len; 1346 while (addr < eaddr) { 1347 ASSERT(seg->s_as == &kas); 1348 pfn = hat_getpfnum(seg->s_as->a_hat, addr); 1349 if (pfn != PFN_INVALID) 1350 dump_addpage(seg->s_as, addr, pfn); 1351 addr += PAGESIZE; 1352 dump_timeleft = dump_timeout; 1353 } 1354 } 1355 } 1356 } 1357 1358 /*ARGSUSED*/ 1359 static int 1360 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len, 1361 struct page ***ppp, enum lock_type type, enum seg_rw rw) 1362 { 1363 return (ENOTSUP); 1364 } 1365 1366 /*ARGSUSED*/ 1367 static int 1368 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 1369 { 1370 return (ENODEV); 1371 } 1372 1373 /*ARGSUSED*/ 1374 static int 1375 segkp_capable(struct seg *seg, segcapability_t capability) 1376 { 1377 return (0); 1378 } 1379 1380 #include <sys/mem_config.h> 1381 1382 /*ARGSUSED*/ 1383 static void 1384 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages) 1385 {} 1386 1387 /* 1388 * During memory delete, turn off caches so that pages are not held. 1389 * A better solution may be to unlock the pages while they are 1390 * in the cache so that they may be collected naturally. 1391 */ 1392 1393 /*ARGSUSED*/ 1394 static int 1395 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages) 1396 { 1397 atomic_inc_32(&segkp_indel); 1398 segkp_cache_free(); 1399 return (0); 1400 } 1401 1402 /*ARGSUSED*/ 1403 static void 1404 segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled) 1405 { 1406 atomic_dec_32(&segkp_indel); 1407 } 1408 1409 static kphysm_setup_vector_t segkp_mem_config_vec = { 1410 KPHYSM_SETUP_VECTOR_VERSION, 1411 segkp_mem_config_post_add, 1412 segkp_mem_config_pre_del, 1413 segkp_mem_config_post_del, 1414 }; 1415 1416 static void 1417 segkpinit_mem_config(struct seg *seg) 1418 { 1419 int ret; 1420 1421 ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg); 1422 ASSERT(ret == 0); 1423 }