1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 
  25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
  26 /*      All Rights Reserved   */
  27 
  28 /*
  29  * Portions of this source code were derived from Berkeley 4.3 BSD
  30  * under license from the Regents of the University of California.
  31  */
  32 
  33 /*
  34  * segkp is a segment driver that administers the allocation and deallocation
  35  * of pageable variable size chunks of kernel virtual address space. Each
  36  * allocated resource is page-aligned.
  37  *
  38  * The user may specify whether the resource should be initialized to 0,
  39  * include a redzone, or locked in memory.
  40  */
  41 
  42 #include <sys/types.h>
  43 #include <sys/t_lock.h>
  44 #include <sys/thread.h>
  45 #include <sys/param.h>
  46 #include <sys/errno.h>
  47 #include <sys/sysmacros.h>
  48 #include <sys/systm.h>
  49 #include <sys/buf.h>
  50 #include <sys/mman.h>
  51 #include <sys/vnode.h>
  52 #include <sys/cmn_err.h>
  53 #include <sys/swap.h>
  54 #include <sys/tuneable.h>
  55 #include <sys/kmem.h>
  56 #include <sys/vmem.h>
  57 #include <sys/cred.h>
  58 #include <sys/dumphdr.h>
  59 #include <sys/debug.h>
  60 #include <sys/vtrace.h>
  61 #include <sys/stack.h>
  62 #include <sys/atomic.h>
  63 #include <sys/archsystm.h>
  64 #include <sys/lgrp.h>
  65 
  66 #include <vm/as.h>
  67 #include <vm/seg.h>
  68 #include <vm/seg_kp.h>
  69 #include <vm/seg_kmem.h>
  70 #include <vm/anon.h>
  71 #include <vm/page.h>
  72 #include <vm/hat.h>
  73 #include <sys/bitmap.h>
  74 
  75 /*
  76  * Private seg op routines
  77  */
  78 static void     segkp_badop(void);
  79 static void     segkp_dump(struct seg *seg);
  80 static int      segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
  81                         uint_t prot);
  82 static int      segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
  83 static int      segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
  84                         struct page ***page, enum lock_type type,
  85                         enum seg_rw rw);
  86 static void     segkp_insert(struct seg *seg, struct segkp_data *kpd);
  87 static void     segkp_delete(struct seg *seg, struct segkp_data *kpd);
  88 static caddr_t  segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
  89                         struct segkp_data **tkpd, struct anon_map *amp);
  90 static void     segkp_release_internal(struct seg *seg,
  91                         struct segkp_data *kpd, size_t len);
  92 static int      segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
  93                         size_t len, struct segkp_data *kpd, uint_t flags);
  94 static int      segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
  95                         size_t len, struct segkp_data *kpd, uint_t flags);
  96 static struct   segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
  97 static int      segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
  98 static lgrp_mem_policy_info_t   *segkp_getpolicy(struct seg *seg,
  99     caddr_t addr);
 100 static int      segkp_capable(struct seg *seg, segcapability_t capability);
 101 
 102 /*
 103  * Lock used to protect the hash table(s) and caches.
 104  */
 105 static kmutex_t segkp_lock;
 106 
 107 /*
 108  * The segkp caches
 109  */
 110 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
 111 
 112 #define SEGKP_BADOP(t)  (t(*)())segkp_badop
 113 
 114 /*
 115  * When there are fewer than red_minavail bytes left on the stack,
 116  * segkp_map_red() will map in the redzone (if called).  5000 seems
 117  * to work reasonably well...
 118  */
 119 long            red_minavail = 5000;
 120 
 121 /*
 122  * will be set to 1 for 32 bit x86 systems only, in startup.c
 123  */
 124 int     segkp_fromheap = 0;
 125 ulong_t *segkp_bitmap;
 126 
 127 /*
 128  * If segkp_map_red() is called with the redzone already mapped and
 129  * with less than RED_DEEP_THRESHOLD bytes available on the stack,
 130  * then the stack situation has become quite serious;  if much more stack
 131  * is consumed, we have the potential of scrogging the next thread/LWP
 132  * structure.  To help debug the "can't happen" panics which may
 133  * result from this condition, we record hrestime and the calling thread
 134  * in red_deep_hires and red_deep_thread respectively.
 135  */
 136 #define RED_DEEP_THRESHOLD      2000
 137 
 138 hrtime_t        red_deep_hires;
 139 kthread_t       *red_deep_thread;
 140 
 141 uint32_t        red_nmapped;
 142 uint32_t        red_closest = UINT_MAX;
 143 uint32_t        red_ndoubles;
 144 
 145 pgcnt_t anon_segkp_pages_locked;        /* See vm/anon.h */
 146 pgcnt_t anon_segkp_pages_resv;          /* anon reserved by seg_kp */
 147 
 148 static struct   seg_ops segkp_ops = {
 149         SEGKP_BADOP(int),               /* dup */
 150         SEGKP_BADOP(int),               /* unmap */
 151         SEGKP_BADOP(void),              /* free */
 152         segkp_fault,
 153         SEGKP_BADOP(faultcode_t),       /* faulta */
 154         SEGKP_BADOP(int),               /* setprot */
 155         segkp_checkprot,
 156         segkp_kluster,
 157         SEGKP_BADOP(size_t),            /* swapout */
 158         SEGKP_BADOP(int),               /* sync */
 159         SEGKP_BADOP(size_t),            /* incore */
 160         SEGKP_BADOP(int),               /* lockop */
 161         SEGKP_BADOP(int),               /* getprot */
 162         SEGKP_BADOP(u_offset_t),                /* getoffset */
 163         SEGKP_BADOP(int),               /* gettype */
 164         SEGKP_BADOP(int),               /* getvp */
 165         SEGKP_BADOP(int),               /* advise */
 166         segkp_dump,                     /* dump */
 167         segkp_pagelock,                 /* pagelock */
 168         SEGKP_BADOP(int),               /* setpgsz */
 169         segkp_getmemid,                 /* getmemid */
 170         segkp_getpolicy,                /* getpolicy */
 171         segkp_capable,                  /* capable */
 172 };
 173 
 174 
 175 static void
 176 segkp_badop(void)
 177 {
 178         panic("segkp_badop");
 179         /*NOTREACHED*/
 180 }
 181 
 182 static void segkpinit_mem_config(struct seg *);
 183 
 184 static uint32_t segkp_indel;
 185 
 186 /*
 187  * Allocate the segment specific private data struct and fill it in
 188  * with the per kp segment mutex, anon ptr. array and hash table.
 189  */
 190 int
 191 segkp_create(struct seg *seg)
 192 {
 193         struct segkp_segdata *kpsd;
 194         size_t  np;
 195 
 196         ASSERT(seg != NULL && seg->s_as == &kas);
 197         ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));
 198 
 199         if (seg->s_size & PAGEOFFSET) {
 200                 panic("Bad segkp size");
 201                 /*NOTREACHED*/
 202         }
 203 
 204         kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP);
 205 
 206         /*
 207          * Allocate the virtual memory for segkp and initialize it
 208          */
 209         if (segkp_fromheap) {
 210                 np = btop(kvseg.s_size);
 211                 segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP);
 212                 kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE,
 213                     vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP);
 214         } else {
 215                 segkp_bitmap = NULL;
 216                 np = btop(seg->s_size);
 217                 kpsd->kpsd_arena = vmem_create("segkp", seg->s_base,
 218                     seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE,
 219                     VM_SLEEP);
 220         }
 221 
 222         kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE);
 223 
 224         kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *),
 225             KM_SLEEP);
 226         seg->s_data = (void *)kpsd;
 227         seg->s_ops = &segkp_ops;
 228         segkpinit_mem_config(seg);
 229         return (0);
 230 }
 231 
 232 
 233 /*
 234  * Find a free 'freelist' and initialize it with the appropriate attributes
 235  */
 236 void *
 237 segkp_cache_init(struct seg *seg, int maxsize, size_t len, uint_t flags)
 238 {
 239         int i;
 240 
 241         if ((flags & KPD_NO_ANON) && !(flags & KPD_LOCKED))
 242                 return ((void *)-1);
 243 
 244         mutex_enter(&segkp_lock);
 245         for (i = 0; i < SEGKP_MAX_CACHE; i++) {
 246                 if (segkp_cache[i].kpf_inuse)
 247                         continue;
 248                 segkp_cache[i].kpf_inuse = 1;
 249                 segkp_cache[i].kpf_max = maxsize;
 250                 segkp_cache[i].kpf_flags = flags;
 251                 segkp_cache[i].kpf_seg = seg;
 252                 segkp_cache[i].kpf_len = len;
 253                 mutex_exit(&segkp_lock);
 254                 return ((void *)(uintptr_t)i);
 255         }
 256         mutex_exit(&segkp_lock);
 257         return ((void *)-1);
 258 }
 259 
 260 /*
 261  * Free all the cache resources.
 262  */
 263 void
 264 segkp_cache_free(void)
 265 {
 266         struct segkp_data *kpd;
 267         struct seg *seg;
 268         int i;
 269 
 270         mutex_enter(&segkp_lock);
 271         for (i = 0; i < SEGKP_MAX_CACHE; i++) {
 272                 if (!segkp_cache[i].kpf_inuse)
 273                         continue;
 274                 /*
 275                  * Disconnect the freelist and process each element
 276                  */
 277                 kpd = segkp_cache[i].kpf_list;
 278                 seg = segkp_cache[i].kpf_seg;
 279                 segkp_cache[i].kpf_list = NULL;
 280                 segkp_cache[i].kpf_count = 0;
 281                 mutex_exit(&segkp_lock);
 282 
 283                 while (kpd != NULL) {
 284                         struct segkp_data *next;
 285 
 286                         next = kpd->kp_next;
 287                         segkp_release_internal(seg, kpd, kpd->kp_len);
 288                         kpd = next;
 289                 }
 290                 mutex_enter(&segkp_lock);
 291         }
 292         mutex_exit(&segkp_lock);
 293 }
 294 
 295 /*
 296  * There are 2 entries into segkp_get_internal. The first includes a cookie
 297  * used to access a pool of cached segkp resources. The second does not
 298  * use the cache.
 299  */
 300 caddr_t
 301 segkp_get(struct seg *seg, size_t len, uint_t flags)
 302 {
 303         struct segkp_data *kpd = NULL;
 304 
 305         if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
 306                 kpd->kp_cookie = -1;
 307                 return (stom(kpd->kp_base, flags));
 308         }
 309         return (NULL);
 310 }
 311 
 312 /*
 313  * Return a 'cached' segkp address
 314  */
 315 caddr_t
 316 segkp_cache_get(void *cookie)
 317 {
 318         struct segkp_cache *freelist = NULL;
 319         struct segkp_data *kpd = NULL;
 320         int index = (int)(uintptr_t)cookie;
 321         struct seg *seg;
 322         size_t len;
 323         uint_t flags;
 324 
 325         if (index < 0 || index >= SEGKP_MAX_CACHE)
 326                 return (NULL);
 327         freelist = &segkp_cache[index];
 328 
 329         mutex_enter(&segkp_lock);
 330         seg = freelist->kpf_seg;
 331         flags = freelist->kpf_flags;
 332         if (freelist->kpf_list != NULL) {
 333                 kpd = freelist->kpf_list;
 334                 freelist->kpf_list = kpd->kp_next;
 335                 freelist->kpf_count--;
 336                 mutex_exit(&segkp_lock);
 337                 kpd->kp_next = NULL;
 338                 segkp_insert(seg, kpd);
 339                 return (stom(kpd->kp_base, flags));
 340         }
 341         len = freelist->kpf_len;
 342         mutex_exit(&segkp_lock);
 343         if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
 344                 kpd->kp_cookie = index;
 345                 return (stom(kpd->kp_base, flags));
 346         }
 347         return (NULL);
 348 }
 349 
 350 caddr_t
 351 segkp_get_withanonmap(
 352         struct seg *seg,
 353         size_t len,
 354         uint_t flags,
 355         struct anon_map *amp)
 356 {
 357         struct segkp_data *kpd = NULL;
 358 
 359         ASSERT(amp != NULL);
 360         flags |= KPD_HASAMP;
 361         if (segkp_get_internal(seg, len, flags, &kpd, amp) != NULL) {
 362                 kpd->kp_cookie = -1;
 363                 return (stom(kpd->kp_base, flags));
 364         }
 365         return (NULL);
 366 }
 367 
 368 /*
 369  * This does the real work of segkp allocation.
 370  * Return to client base addr. len must be page-aligned. A null value is
 371  * returned if there are no more vm resources (e.g. pages, swap). The len
 372  * and base recorded in the private data structure include the redzone
 373  * and the redzone length (if applicable). If the user requests a redzone
 374  * either the first or last page is left unmapped depending whether stacks
 375  * grow to low or high memory.
 376  *
 377  * The client may also specify a no-wait flag. If that is set then the
 378  * request will choose a non-blocking path when requesting resources.
 379  * The default is make the client wait.
 380  */
 381 static caddr_t
 382 segkp_get_internal(
 383         struct seg *seg,
 384         size_t len,
 385         uint_t flags,
 386         struct segkp_data **tkpd,
 387         struct anon_map *amp)
 388 {
 389         struct segkp_segdata    *kpsd = (struct segkp_segdata *)seg->s_data;
 390         struct segkp_data       *kpd;
 391         caddr_t vbase = NULL;   /* always first virtual, may not be mapped */
 392         pgcnt_t np = 0;         /* number of pages in the resource */
 393         pgcnt_t segkpindex;
 394         long i;
 395         caddr_t va;
 396         pgcnt_t pages = 0;
 397         ulong_t anon_idx = 0;
 398         int kmflag = (flags & KPD_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
 399         caddr_t s_base = (segkp_fromheap) ? kvseg.s_base : seg->s_base;
 400 
 401         if (len & PAGEOFFSET) {
 402                 panic("segkp_get: len is not page-aligned");
 403                 /*NOTREACHED*/
 404         }
 405 
 406         ASSERT(((flags & KPD_HASAMP) == 0) == (amp == NULL));
 407 
 408         /* Only allow KPD_NO_ANON if we are going to lock it down */
 409         if ((flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON)
 410                 return (NULL);
 411 
 412         if ((kpd = kmem_zalloc(sizeof (struct segkp_data), kmflag)) == NULL)
 413                 return (NULL);
 414         /*
 415          * Fix up the len to reflect the REDZONE if applicable
 416          */
 417         if (flags & KPD_HASREDZONE)
 418                 len += PAGESIZE;
 419         np = btop(len);
 420 
 421         vbase = vmem_alloc(SEGKP_VMEM(seg), len, kmflag | VM_BESTFIT);
 422         if (vbase == NULL) {
 423                 kmem_free(kpd, sizeof (struct segkp_data));
 424                 return (NULL);
 425         }
 426 
 427         /* If locking, reserve physical memory */
 428         if (flags & KPD_LOCKED) {
 429                 pages = btop(SEGKP_MAPLEN(len, flags));
 430                 if (page_resv(pages, kmflag) == 0) {
 431                         vmem_free(SEGKP_VMEM(seg), vbase, len);
 432                         kmem_free(kpd, sizeof (struct segkp_data));
 433                         return (NULL);
 434                 }
 435                 if ((flags & KPD_NO_ANON) == 0)
 436                         atomic_add_long(&anon_segkp_pages_locked, pages);
 437         }
 438 
 439         /*
 440          * Reserve sufficient swap space for this vm resource.  We'll
 441          * actually allocate it in the loop below, but reserving it
 442          * here allows us to back out more gracefully than if we
 443          * had an allocation failure in the body of the loop.
 444          *
 445          * Note that we don't need swap space for the red zone page.
 446          */
 447         if (amp != NULL) {
 448                 /*
 449                  * The swap reservation has been done, if required, and the
 450                  * anon_hdr is separate.
 451                  */
 452                 anon_idx = 0;
 453                 kpd->kp_anon_idx = anon_idx;
 454                 kpd->kp_anon = amp->ahp;
 455 
 456                 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
 457                     kpd, vbase, len, flags, 1);
 458 
 459         } else if ((flags & KPD_NO_ANON) == 0) {
 460                 if (anon_resv_zone(SEGKP_MAPLEN(len, flags), NULL) == 0) {
 461                         if (flags & KPD_LOCKED) {
 462                                 atomic_add_long(&anon_segkp_pages_locked,
 463                                     -pages);
 464                                 page_unresv(pages);
 465                         }
 466                         vmem_free(SEGKP_VMEM(seg), vbase, len);
 467                         kmem_free(kpd, sizeof (struct segkp_data));
 468                         return (NULL);
 469                 }
 470                 atomic_add_long(&anon_segkp_pages_resv,
 471                     btop(SEGKP_MAPLEN(len, flags)));
 472                 anon_idx = ((uintptr_t)(vbase - s_base)) >> PAGESHIFT;
 473                 kpd->kp_anon_idx = anon_idx;
 474                 kpd->kp_anon = kpsd->kpsd_anon;
 475 
 476                 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
 477                     kpd, vbase, len, flags, 1);
 478         } else {
 479                 kpd->kp_anon = NULL;
 480                 kpd->kp_anon_idx = 0;
 481         }
 482 
 483         /*
 484          * Allocate page and anon resources for the virtual address range
 485          * except the redzone
 486          */
 487         if (segkp_fromheap)
 488                 segkpindex = btop((uintptr_t)(vbase - kvseg.s_base));
 489         for (i = 0, va = vbase; i < np; i++, va += PAGESIZE) {
 490                 page_t          *pl[2];
 491                 struct vnode    *vp;
 492                 anoff_t         off;
 493                 int             err;
 494                 page_t          *pp = NULL;
 495 
 496                 /*
 497                  * Mark this page to be a segkp page in the bitmap.
 498                  */
 499                 if (segkp_fromheap) {
 500                         BT_ATOMIC_SET(segkp_bitmap, segkpindex);
 501                         segkpindex++;
 502                 }
 503 
 504                 /*
 505                  * If this page is the red zone page, we don't need swap
 506                  * space for it.  Note that we skip over the code that
 507                  * establishes MMU mappings, so that the page remains
 508                  * invalid.
 509                  */
 510                 if ((flags & KPD_HASREDZONE) && KPD_REDZONE(kpd) == i)
 511                         continue;
 512 
 513                 if (kpd->kp_anon != NULL) {
 514                         struct anon *ap;
 515 
 516                         ASSERT(anon_get_ptr(kpd->kp_anon, anon_idx + i)
 517                             == NULL);
 518                         /*
 519                          * Determine the "vp" and "off" of the anon slot.
 520                          */
 521                         ap = anon_alloc(NULL, 0);
 522                         if (amp != NULL)
 523                                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 524                         (void) anon_set_ptr(kpd->kp_anon, anon_idx + i,
 525                             ap, ANON_SLEEP);
 526                         if (amp != NULL)
 527                                 ANON_LOCK_EXIT(&amp->a_rwlock);
 528                         swap_xlate(ap, &vp, &off);
 529 
 530                         /*
 531                          * Create a page with the specified identity.  The
 532                          * page is returned with the "shared" lock held.
 533                          */
 534                         err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
 535                             NULL, pl, PAGESIZE, seg, va, S_CREATE,
 536                             kcred, NULL);
 537                         if (err) {
 538                                 /*
 539                                  * XXX - This should not fail.
 540                                  */
 541                                 panic("segkp_get: no pages");
 542                                 /*NOTREACHED*/
 543                         }
 544                         pp = pl[0];
 545                 } else {
 546                         ASSERT(page_exists(&kvp,
 547                             (u_offset_t)(uintptr_t)va) == NULL);
 548 
 549                         if ((pp = page_create_va(&kvp,
 550                             (u_offset_t)(uintptr_t)va, PAGESIZE,
 551                             (flags & KPD_NOWAIT ? 0 : PG_WAIT) | PG_EXCL |
 552                             PG_NORELOC, seg, va)) == NULL) {
 553                                 /*
 554                                  * Legitimize resource; then destroy it.
 555                                  * Easier than trying to unwind here.
 556                                  */
 557                                 kpd->kp_flags = flags;
 558                                 kpd->kp_base = vbase;
 559                                 kpd->kp_len = len;
 560                                 segkp_release_internal(seg, kpd, va - vbase);
 561                                 return (NULL);
 562                         }
 563                         page_io_unlock(pp);
 564                 }
 565 
 566                 if (flags & KPD_ZERO)
 567                         pagezero(pp, 0, PAGESIZE);
 568 
 569                 /*
 570                  * Load and lock an MMU translation for the page.
 571                  */
 572                 hat_memload(seg->s_as->a_hat, va, pp, (PROT_READ|PROT_WRITE),
 573                     ((flags & KPD_LOCKED) ? HAT_LOAD_LOCK : HAT_LOAD));
 574 
 575                 /*
 576                  * Now, release lock on the page.
 577                  */
 578                 if (flags & KPD_LOCKED) {
 579                         /*
 580                          * Indicate to page_retire framework that this
 581                          * page can only be retired when it is freed.
 582                          */
 583                         PP_SETRAF(pp);
 584                         page_downgrade(pp);
 585                 } else
 586                         page_unlock(pp);
 587         }
 588 
 589         kpd->kp_flags = flags;
 590         kpd->kp_base = vbase;
 591         kpd->kp_len = len;
 592         segkp_insert(seg, kpd);
 593         *tkpd = kpd;
 594         return (stom(kpd->kp_base, flags));
 595 }
 596 
 597 /*
 598  * Release the resource to cache if the pool(designate by the cookie)
 599  * has less than the maximum allowable. If inserted in cache,
 600  * segkp_delete insures element is taken off of active list.
 601  */
 602 void
 603 segkp_release(struct seg *seg, caddr_t vaddr)
 604 {
 605         struct segkp_cache *freelist;
 606         struct segkp_data *kpd = NULL;
 607 
 608         if ((kpd = segkp_find(seg, vaddr)) == NULL) {
 609                 panic("segkp_release: null kpd");
 610                 /*NOTREACHED*/
 611         }
 612 
 613         if (kpd->kp_cookie != -1) {
 614                 freelist = &segkp_cache[kpd->kp_cookie];
 615                 mutex_enter(&segkp_lock);
 616                 if (!segkp_indel && freelist->kpf_count < freelist->kpf_max) {
 617                         segkp_delete(seg, kpd);
 618                         kpd->kp_next = freelist->kpf_list;
 619                         freelist->kpf_list = kpd;
 620                         freelist->kpf_count++;
 621                         mutex_exit(&segkp_lock);
 622                         return;
 623                 } else {
 624                         mutex_exit(&segkp_lock);
 625                         kpd->kp_cookie = -1;
 626                 }
 627         }
 628         segkp_release_internal(seg, kpd, kpd->kp_len);
 629 }
 630 
 631 /*
 632  * Free the entire resource. segkp_unlock gets called with the start of the
 633  * mapped portion of the resource. The length is the size of the mapped
 634  * portion
 635  */
 636 static void
 637 segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len)
 638 {
 639         caddr_t         va;
 640         long            i;
 641         long            redzone;
 642         size_t          np;
 643         page_t          *pp;
 644         struct vnode    *vp;
 645         anoff_t         off;
 646         struct anon     *ap;
 647         pgcnt_t         segkpindex;
 648 
 649         ASSERT(kpd != NULL);
 650         ASSERT((kpd->kp_flags & KPD_HASAMP) == 0 || kpd->kp_cookie == -1);
 651         np = btop(len);
 652 
 653         /* Remove from active hash list */
 654         if (kpd->kp_cookie == -1) {
 655                 mutex_enter(&segkp_lock);
 656                 segkp_delete(seg, kpd);
 657                 mutex_exit(&segkp_lock);
 658         }
 659 
 660         /*
 661          * Precompute redzone page index.
 662          */
 663         redzone = -1;
 664         if (kpd->kp_flags & KPD_HASREDZONE)
 665                 redzone = KPD_REDZONE(kpd);
 666 
 667 
 668         va = kpd->kp_base;
 669 
 670         hat_unload(seg->s_as->a_hat, va, (np << PAGESHIFT),
 671             ((kpd->kp_flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
 672         /*
 673          * Free up those anon resources that are quiescent.
 674          */
 675         if (segkp_fromheap)
 676                 segkpindex = btop((uintptr_t)(va - kvseg.s_base));
 677         for (i = 0; i < np; i++, va += PAGESIZE) {
 678 
 679                 /*
 680                  * Clear the bit for this page from the bitmap.
 681                  */
 682                 if (segkp_fromheap) {
 683                         BT_ATOMIC_CLEAR(segkp_bitmap, segkpindex);
 684                         segkpindex++;
 685                 }
 686 
 687                 if (i == redzone)
 688                         continue;
 689                 if (kpd->kp_anon) {
 690                         /*
 691                          * Free up anon resources and destroy the
 692                          * associated pages.
 693                          *
 694                          * Release the lock if there is one. Have to get the
 695                          * page to do this, unfortunately.
 696                          */
 697                         if (kpd->kp_flags & KPD_LOCKED) {
 698                                 ap = anon_get_ptr(kpd->kp_anon,
 699                                     kpd->kp_anon_idx + i);
 700                                 swap_xlate(ap, &vp, &off);
 701                                 /* Find the shared-locked page. */
 702                                 pp = page_find(vp, (u_offset_t)off);
 703                                 if (pp == NULL) {
 704                                         panic("segkp_release: "
 705                                             "kp_anon: no page to unlock ");
 706                                         /*NOTREACHED*/
 707                                 }
 708                                 if (PP_ISRAF(pp))
 709                                         PP_CLRRAF(pp);
 710 
 711                                 page_unlock(pp);
 712                         }
 713                         if ((kpd->kp_flags & KPD_HASAMP) == 0) {
 714                                 anon_free(kpd->kp_anon, kpd->kp_anon_idx + i,
 715                                     PAGESIZE);
 716                                 anon_unresv_zone(PAGESIZE, NULL);
 717                                 atomic_add_long(&anon_segkp_pages_resv,
 718                                     -1);
 719                         }
 720                         TRACE_5(TR_FAC_VM,
 721                             TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
 722                             kpd, va, PAGESIZE, 0, 0);
 723                 } else {
 724                         if (kpd->kp_flags & KPD_LOCKED) {
 725                                 pp = page_find(&kvp, (u_offset_t)(uintptr_t)va);
 726                                 if (pp == NULL) {
 727                                         panic("segkp_release: "
 728                                             "no page to unlock");
 729                                         /*NOTREACHED*/
 730                                 }
 731                                 if (PP_ISRAF(pp))
 732                                         PP_CLRRAF(pp);
 733                                 /*
 734                                  * We should just upgrade the lock here
 735                                  * but there is no upgrade that waits.
 736                                  */
 737                                 page_unlock(pp);
 738                         }
 739                         pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)va,
 740                             SE_EXCL);
 741                         if (pp != NULL)
 742                                 page_destroy(pp, 0);
 743                 }
 744         }
 745 
 746         /* If locked, release physical memory reservation */
 747         if (kpd->kp_flags & KPD_LOCKED) {
 748                 pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
 749                 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
 750                         atomic_add_long(&anon_segkp_pages_locked, -pages);
 751                 page_unresv(pages);
 752         }
 753 
 754         vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len);
 755         kmem_free(kpd, sizeof (struct segkp_data));
 756 }
 757 
 758 /*
 759  * segkp_map_red() will check the current frame pointer against the
 760  * stack base.  If the amount of stack remaining is questionable
 761  * (less than red_minavail), then segkp_map_red() will map in the redzone
 762  * and return 1.  Otherwise, it will return 0.  segkp_map_red() can
 763  * _only_ be called when it is safe to sleep on page_create_va().
 764  *
 765  * It is up to the caller to remember whether segkp_map_red() successfully
 766  * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
 767  * time.
 768  *
 769  * Currently, this routine is only called from pagefault() (which necessarily
 770  * satisfies the above conditions).
 771  */
 772 #if defined(STACK_GROWTH_DOWN)
 773 int
 774 segkp_map_red(void)
 775 {
 776         uintptr_t fp = STACK_BIAS + (uintptr_t)getfp();
 777 #ifndef _LP64
 778         caddr_t stkbase;
 779 #endif
 780 
 781         /*
 782          * Optimize for the common case where we simply return.
 783          */
 784         if ((curthread->t_red_pp == NULL) &&
 785             (fp - (uintptr_t)curthread->t_stkbase >= red_minavail))
 786                 return (0);
 787 
 788 #if defined(_LP64)
 789         /*
 790          * XXX  We probably need something better than this.
 791          */
 792         panic("kernel stack overflow");
 793         /*NOTREACHED*/
 794 #else /* _LP64 */
 795         if (curthread->t_red_pp == NULL) {
 796                 page_t *red_pp;
 797                 struct seg kseg;
 798 
 799                 caddr_t red_va = (caddr_t)
 800                     (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) -
 801                     PAGESIZE);
 802 
 803                 ASSERT(page_exists(&kvp, (u_offset_t)(uintptr_t)red_va) ==
 804                     NULL);
 805 
 806                 /*
 807                  * Allocate the physical for the red page.
 808                  */
 809                 /*
 810                  * No PG_NORELOC here to avoid waits. Unlikely to get
 811                  * a relocate happening in the short time the page exists
 812                  * and it will be OK anyway.
 813                  */
 814 
 815                 kseg.s_as = &kas;
 816                 red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va,
 817                     PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va);
 818                 ASSERT(red_pp != NULL);
 819 
 820                 /*
 821                  * So we now have a page to jam into the redzone...
 822                  */
 823                 page_io_unlock(red_pp);
 824 
 825                 hat_memload(kas.a_hat, red_va, red_pp,
 826                     (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK);
 827                 page_downgrade(red_pp);
 828 
 829                 /*
 830                  * The page is left SE_SHARED locked so we can hold on to
 831                  * the page_t pointer.
 832                  */
 833                 curthread->t_red_pp = red_pp;
 834 
 835                 atomic_add_32(&red_nmapped, 1);
 836                 while (fp - (uintptr_t)curthread->t_stkbase < red_closest) {
 837                         (void) cas32(&red_closest, red_closest,
 838                             (uint32_t)(fp - (uintptr_t)curthread->t_stkbase));
 839                 }
 840                 return (1);
 841         }
 842 
 843         stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase &
 844             (uintptr_t)PAGEMASK) - PAGESIZE);
 845 
 846         atomic_add_32(&red_ndoubles, 1);
 847 
 848         if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) {
 849                 /*
 850                  * Oh boy.  We're already deep within the mapped-in
 851                  * redzone page, and the caller is trying to prepare
 852                  * for a deep stack run.  We're running without a
 853                  * redzone right now:  if the caller plows off the
 854                  * end of the stack, it'll plow another thread or
 855                  * LWP structure.  That situation could result in
 856                  * a very hard-to-debug panic, so, in the spirit of
 857                  * recording the name of one's killer in one's own
 858                  * blood, we're going to record hrestime and the calling
 859                  * thread.
 860                  */
 861                 red_deep_hires = hrestime.tv_nsec;
 862                 red_deep_thread = curthread;
 863         }
 864 
 865         /*
 866          * If this is a DEBUG kernel, and we've run too deep for comfort, toss.
 867          */
 868         ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD);
 869         return (0);
 870 #endif /* _LP64 */
 871 }
 872 
 873 void
 874 segkp_unmap_red(void)
 875 {
 876         page_t *pp;
 877         caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase &
 878             (uintptr_t)PAGEMASK) - PAGESIZE);
 879 
 880         ASSERT(curthread->t_red_pp != NULL);
 881 
 882         /*
 883          * Because we locked the mapping down, we can't simply rely
 884          * on page_destroy() to clean everything up;  we need to call
 885          * hat_unload() to explicitly unlock the mapping resources.
 886          */
 887         hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK);
 888 
 889         pp = curthread->t_red_pp;
 890 
 891         ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va));
 892 
 893         /*
 894          * Need to upgrade the SE_SHARED lock to SE_EXCL.
 895          */
 896         if (!page_tryupgrade(pp)) {
 897                 /*
 898                  * As there is now wait for upgrade, release the
 899                  * SE_SHARED lock and wait for SE_EXCL.
 900                  */
 901                 page_unlock(pp);
 902                 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)red_va, SE_EXCL);
 903                 /* pp may be NULL here, hence the test below */
 904         }
 905 
 906         /*
 907          * Destroy the page, with dontfree set to zero (i.e. free it).
 908          */
 909         if (pp != NULL)
 910                 page_destroy(pp, 0);
 911         curthread->t_red_pp = NULL;
 912 }
 913 #else
 914 #error Red stacks only supported with downwards stack growth.
 915 #endif
 916 
 917 /*
 918  * Handle a fault on an address corresponding to one of the
 919  * resources in the segkp segment.
 920  */
 921 faultcode_t
 922 segkp_fault(
 923         struct hat      *hat,
 924         struct seg      *seg,
 925         caddr_t         vaddr,
 926         size_t          len,
 927         enum fault_type type,
 928         enum seg_rw rw)
 929 {
 930         struct segkp_data       *kpd = NULL;
 931         int                     err;
 932 
 933         ASSERT(seg->s_as == &kas && RW_READ_HELD(&seg->s_as->a_lock));
 934 
 935         /*
 936          * Sanity checks.
 937          */
 938         if (type == F_PROT) {
 939                 panic("segkp_fault: unexpected F_PROT fault");
 940                 /*NOTREACHED*/
 941         }
 942 
 943         if ((kpd = segkp_find(seg, vaddr)) == NULL)
 944                 return (FC_NOMAP);
 945 
 946         mutex_enter(&kpd->kp_lock);
 947 
 948         if (type == F_SOFTLOCK) {
 949                 ASSERT(!(kpd->kp_flags & KPD_LOCKED));
 950                 /*
 951                  * The F_SOFTLOCK case has more stringent
 952                  * range requirements: the given range must exactly coincide
 953                  * with the resource's mapped portion. Note reference to
 954                  * redzone is handled since vaddr would not equal base
 955                  */
 956                 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
 957                     len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
 958                         mutex_exit(&kpd->kp_lock);
 959                         return (FC_MAKE_ERR(EFAULT));
 960                 }
 961 
 962                 if ((err = segkp_load(hat, seg, vaddr, len, kpd, KPD_LOCKED))) {
 963                         mutex_exit(&kpd->kp_lock);
 964                         return (FC_MAKE_ERR(err));
 965                 }
 966                 kpd->kp_flags |= KPD_LOCKED;
 967                 mutex_exit(&kpd->kp_lock);
 968                 return (0);
 969         }
 970 
 971         if (type == F_INVAL) {
 972                 ASSERT(!(kpd->kp_flags & KPD_NO_ANON));
 973 
 974                 /*
 975                  * Check if we touched the redzone. Somewhat optimistic
 976                  * here if we are touching the redzone of our own stack
 977                  * since we wouldn't have a stack to get this far...
 978                  */
 979                 if ((kpd->kp_flags & KPD_HASREDZONE) &&
 980                     btop((uintptr_t)(vaddr - kpd->kp_base)) == KPD_REDZONE(kpd))
 981                         panic("segkp_fault: accessing redzone");
 982 
 983                 /*
 984                  * This fault may occur while the page is being F_SOFTLOCK'ed.
 985                  * Return since a 2nd segkp_load is unnecessary and also would
 986                  * result in the page being locked twice and eventually
 987                  * hang the thread_reaper thread.
 988                  */
 989                 if (kpd->kp_flags & KPD_LOCKED) {
 990                         mutex_exit(&kpd->kp_lock);
 991                         return (0);
 992                 }
 993 
 994                 err = segkp_load(hat, seg, vaddr, len, kpd, kpd->kp_flags);
 995                 mutex_exit(&kpd->kp_lock);
 996                 return (err ? FC_MAKE_ERR(err) : 0);
 997         }
 998 
 999         if (type == F_SOFTUNLOCK) {
1000                 uint_t  flags;
1001 
1002                 /*
1003                  * Make sure the addr is LOCKED and it has anon backing
1004                  * before unlocking
1005                  */
1006                 if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) != KPD_LOCKED) {
1007                         panic("segkp_fault: bad unlock");
1008                         /*NOTREACHED*/
1009                 }
1010 
1011                 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
1012                     len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
1013                         panic("segkp_fault: bad range");
1014                         /*NOTREACHED*/
1015                 }
1016 
1017                 if (rw == S_WRITE)
1018                         flags = kpd->kp_flags | KPD_WRITEDIRTY;
1019                 else
1020                         flags = kpd->kp_flags;
1021                 err = segkp_unlock(hat, seg, vaddr, len, kpd, flags);
1022                 kpd->kp_flags &= ~KPD_LOCKED;
1023                 mutex_exit(&kpd->kp_lock);
1024                 return (err ? FC_MAKE_ERR(err) : 0);
1025         }
1026         mutex_exit(&kpd->kp_lock);
1027         panic("segkp_fault: bogus fault type: %d\n", type);
1028         /*NOTREACHED*/
1029 }
1030 
1031 /*
1032  * Check that the given protections suffice over the range specified by
1033  * vaddr and len.  For this segment type, the only issue is whether or
1034  * not the range lies completely within the mapped part of an allocated
1035  * resource.
1036  */
1037 /* ARGSUSED */
1038 static int
1039 segkp_checkprot(struct seg *seg, caddr_t vaddr, size_t len, uint_t prot)
1040 {
1041         struct segkp_data *kpd = NULL;
1042         caddr_t mbase;
1043         size_t mlen;
1044 
1045         if ((kpd = segkp_find(seg, vaddr)) == NULL)
1046                 return (EACCES);
1047 
1048         mutex_enter(&kpd->kp_lock);
1049         mbase = stom(kpd->kp_base, kpd->kp_flags);
1050         mlen = SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags);
1051         if (len > mlen || vaddr < mbase ||
1052             ((vaddr + len) > (mbase + mlen))) {
1053                 mutex_exit(&kpd->kp_lock);
1054                 return (EACCES);
1055         }
1056         mutex_exit(&kpd->kp_lock);
1057         return (0);
1058 }
1059 
1060 
1061 /*
1062  * Check to see if it makes sense to do kluster/read ahead to
1063  * addr + delta relative to the mapping at addr.  We assume here
1064  * that delta is a signed PAGESIZE'd multiple (which can be negative).
1065  *
1066  * For seg_u we always "approve" of this action from our standpoint.
1067  */
1068 /*ARGSUSED*/
1069 static int
1070 segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
1071 {
1072         return (0);
1073 }
1074 
1075 /*
1076  * Load and possibly lock intra-slot resources in the range given by
1077  * vaddr and len.
1078  */
1079 static int
1080 segkp_load(
1081         struct hat *hat,
1082         struct seg *seg,
1083         caddr_t vaddr,
1084         size_t len,
1085         struct segkp_data *kpd,
1086         uint_t flags)
1087 {
1088         caddr_t va;
1089         caddr_t vlim;
1090         ulong_t i;
1091         uint_t lock;
1092 
1093         ASSERT(MUTEX_HELD(&kpd->kp_lock));
1094 
1095         len = P2ROUNDUP(len, PAGESIZE);
1096 
1097         /* If locking, reserve physical memory */
1098         if (flags & KPD_LOCKED) {
1099                 pgcnt_t pages = btop(len);
1100                 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
1101                         atomic_add_long(&anon_segkp_pages_locked, pages);
1102                 (void) page_resv(pages, KM_SLEEP);
1103         }
1104 
1105         /*
1106          * Loop through the pages in the given range.
1107          */
1108         va = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK);
1109         vaddr = va;
1110         vlim = va + len;
1111         lock = flags & KPD_LOCKED;
1112         i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
1113         for (; va < vlim; va += PAGESIZE, i++) {
1114                 page_t          *pl[2]; /* second element NULL terminator */
1115                 struct vnode    *vp;
1116                 anoff_t         off;
1117                 int             err;
1118                 struct anon     *ap;
1119 
1120                 /*
1121                  * Summon the page.  If it's not resident, arrange
1122                  * for synchronous i/o to pull it in.
1123                  */
1124                 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
1125                 swap_xlate(ap, &vp, &off);
1126 
1127                 /*
1128                  * The returned page list will have exactly one entry,
1129                  * which is returned to us already kept.
1130                  */
1131                 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, NULL,
1132                     pl, PAGESIZE, seg, va, S_READ, kcred, NULL);
1133 
1134                 if (err) {
1135                         /*
1136                          * Back out of what we've done so far.
1137                          */
1138                         (void) segkp_unlock(hat, seg, vaddr,
1139                             (va - vaddr), kpd, flags);
1140                         return (err);
1141                 }
1142 
1143                 /*
1144                  * Load an MMU translation for the page.
1145                  */
1146                 hat_memload(hat, va, pl[0], (PROT_READ|PROT_WRITE),
1147                     lock ? HAT_LOAD_LOCK : HAT_LOAD);
1148 
1149                 if (!lock) {
1150                         /*
1151                          * Now, release "shared" lock on the page.
1152                          */
1153                         page_unlock(pl[0]);
1154                 }
1155         }
1156         return (0);
1157 }
1158 
1159 /*
1160  * At the very least unload the mmu-translations and unlock the range if locked
1161  * Can be called with the following flag value KPD_WRITEDIRTY which specifies
1162  * any dirty pages should be written to disk.
1163  */
1164 static int
1165 segkp_unlock(
1166         struct hat *hat,
1167         struct seg *seg,
1168         caddr_t vaddr,
1169         size_t len,
1170         struct segkp_data *kpd,
1171         uint_t flags)
1172 {
1173         caddr_t va;
1174         caddr_t vlim;
1175         ulong_t i;
1176         struct page *pp;
1177         struct vnode *vp;
1178         anoff_t off;
1179         struct anon *ap;
1180 
1181 #ifdef lint
1182         seg = seg;
1183 #endif /* lint */
1184 
1185         ASSERT(MUTEX_HELD(&kpd->kp_lock));
1186 
1187         /*
1188          * Loop through the pages in the given range. It is assumed
1189          * segkp_unlock is called with page aligned base
1190          */
1191         va = vaddr;
1192         vlim = va + len;
1193         i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
1194         hat_unload(hat, va, len,
1195             ((flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
1196         for (; va < vlim; va += PAGESIZE, i++) {
1197                 /*
1198                  * Find the page associated with this part of the
1199                  * slot, tracking it down through its associated swap
1200                  * space.
1201                  */
1202                 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
1203                 swap_xlate(ap, &vp, &off);
1204 
1205                 if (flags & KPD_LOCKED) {
1206                         if ((pp = page_find(vp, off)) == NULL) {
1207                                 if (flags & KPD_LOCKED) {
1208                                         panic("segkp_softunlock: missing page");
1209                                         /*NOTREACHED*/
1210                                 }
1211                         }
1212                 } else {
1213                         /*
1214                          * Nothing to do if the slot is not locked and the
1215                          * page doesn't exist.
1216                          */
1217                         if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL)
1218                                 continue;
1219                 }
1220 
1221                 /*
1222                  * If the page doesn't have any translations, is
1223                  * dirty and not being shared, then push it out
1224                  * asynchronously and avoid waiting for the
1225                  * pageout daemon to do it for us.
1226                  *
1227                  * XXX - Do we really need to get the "exclusive"
1228                  * lock via an upgrade?
1229                  */
1230                 if ((flags & KPD_WRITEDIRTY) && !hat_page_is_mapped(pp) &&
1231                     hat_ismod(pp) && page_tryupgrade(pp)) {
1232                         /*
1233                          * Hold the vnode before releasing the page lock to
1234                          * prevent it from being freed and re-used by some
1235                          * other thread.
1236                          */
1237                         VN_HOLD(vp);
1238                         page_unlock(pp);
1239 
1240                         /*
1241                          * Want most powerful credentials we can get so
1242                          * use kcred.
1243                          */
1244                         (void) VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
1245                             B_ASYNC | B_FREE, kcred, NULL);
1246                         VN_RELE(vp);
1247                 } else {
1248                         page_unlock(pp);
1249                 }
1250         }
1251 
1252         /* If unlocking, release physical memory */
1253         if (flags & KPD_LOCKED) {
1254                 pgcnt_t pages = btopr(len);
1255                 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
1256                         atomic_add_long(&anon_segkp_pages_locked, -pages);
1257                 page_unresv(pages);
1258         }
1259         return (0);
1260 }
1261 
1262 /*
1263  * Insert the kpd in the hash table.
1264  */
1265 static void
1266 segkp_insert(struct seg *seg, struct segkp_data *kpd)
1267 {
1268         struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1269         int index;
1270 
1271         /*
1272          * Insert the kpd based on the address that will be returned
1273          * via segkp_release.
1274          */
1275         index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
1276         mutex_enter(&segkp_lock);
1277         kpd->kp_next = kpsd->kpsd_hash[index];
1278         kpsd->kpsd_hash[index] = kpd;
1279         mutex_exit(&segkp_lock);
1280 }
1281 
1282 /*
1283  * Remove kpd from the hash table.
1284  */
1285 static void
1286 segkp_delete(struct seg *seg, struct segkp_data *kpd)
1287 {
1288         struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1289         struct segkp_data **kpp;
1290         int index;
1291 
1292         ASSERT(MUTEX_HELD(&segkp_lock));
1293 
1294         index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
1295         for (kpp = &kpsd->kpsd_hash[index];
1296             *kpp != NULL; kpp = &((*kpp)->kp_next)) {
1297                 if (*kpp == kpd) {
1298                         *kpp = kpd->kp_next;
1299                         return;
1300                 }
1301         }
1302         panic("segkp_delete: unable to find element to delete");
1303         /*NOTREACHED*/
1304 }
1305 
1306 /*
1307  * Find the kpd associated with a vaddr.
1308  *
1309  * Most of the callers of segkp_find will pass the vaddr that
1310  * hashes to the desired index, but there are cases where
1311  * this is not true in which case we have to (potentially) scan
1312  * the whole table looking for it. This should be very rare
1313  * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the
1314  * middle of the segkp_data region).
1315  */
1316 static struct segkp_data *
1317 segkp_find(struct seg *seg, caddr_t vaddr)
1318 {
1319         struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1320         struct segkp_data *kpd;
1321         int     i;
1322         int     stop;
1323 
1324         i = stop = SEGKP_HASH(vaddr);
1325         mutex_enter(&segkp_lock);
1326         do {
1327                 for (kpd = kpsd->kpsd_hash[i]; kpd != NULL;
1328                     kpd = kpd->kp_next) {
1329                         if (vaddr >= kpd->kp_base &&
1330                             vaddr < kpd->kp_base + kpd->kp_len) {
1331                                 mutex_exit(&segkp_lock);
1332                                 return (kpd);
1333                         }
1334                 }
1335                 if (--i < 0)
1336                         i = SEGKP_HASHSZ - 1;   /* Wrap */
1337         } while (i != stop);
1338         mutex_exit(&segkp_lock);
1339         return (NULL);          /* Not found */
1340 }
1341 
1342 /*
1343  * returns size of swappable area.
1344  */
1345 size_t
1346 swapsize(caddr_t v)
1347 {
1348         struct segkp_data *kpd;
1349 
1350         if ((kpd = segkp_find(segkp, v)) != NULL)
1351                 return (SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
1352         else
1353                 return (NULL);
1354 }
1355 
1356 /*
1357  * Dump out all the active segkp pages
1358  */
1359 static void
1360 segkp_dump(struct seg *seg)
1361 {
1362         int i;
1363         struct segkp_data *kpd;
1364         struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1365 
1366         for (i = 0; i < SEGKP_HASHSZ; i++) {
1367                 for (kpd = kpsd->kpsd_hash[i];
1368                     kpd != NULL; kpd = kpd->kp_next) {
1369                         pfn_t pfn;
1370                         caddr_t addr;
1371                         caddr_t eaddr;
1372 
1373                         addr = kpd->kp_base;
1374                         eaddr = addr + kpd->kp_len;
1375                         while (addr < eaddr) {
1376                                 ASSERT(seg->s_as == &kas);
1377                                 pfn = hat_getpfnum(seg->s_as->a_hat, addr);
1378                                 if (pfn != PFN_INVALID)
1379                                         dump_addpage(seg->s_as, addr, pfn);
1380                                 addr += PAGESIZE;
1381                                 dump_timeleft = dump_timeout;
1382                         }
1383                 }
1384         }
1385 }
1386 
1387 /*ARGSUSED*/
1388 static int
1389 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
1390     struct page ***ppp, enum lock_type type, enum seg_rw rw)
1391 {
1392         return (ENOTSUP);
1393 }
1394 
1395 /*ARGSUSED*/
1396 static int
1397 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1398 {
1399         return (ENODEV);
1400 }
1401 
1402 /*ARGSUSED*/
1403 static lgrp_mem_policy_info_t   *
1404 segkp_getpolicy(struct seg *seg, caddr_t addr)
1405 {
1406         return (NULL);
1407 }
1408 
1409 /*ARGSUSED*/
1410 static int
1411 segkp_capable(struct seg *seg, segcapability_t capability)
1412 {
1413         return (0);
1414 }
1415 
1416 #include <sys/mem_config.h>
1417 
1418 /*ARGSUSED*/
1419 static void
1420 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1421 {}
1422 
1423 /*
1424  * During memory delete, turn off caches so that pages are not held.
1425  * A better solution may be to unlock the pages while they are
1426  * in the cache so that they may be collected naturally.
1427  */
1428 
1429 /*ARGSUSED*/
1430 static int
1431 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1432 {
1433         atomic_add_32(&segkp_indel, 1);
1434         segkp_cache_free();
1435         return (0);
1436 }
1437 
1438 /*ARGSUSED*/
1439 static void
1440 segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
1441 {
1442         atomic_add_32(&segkp_indel, -1);
1443 }
1444 
1445 static kphysm_setup_vector_t segkp_mem_config_vec = {
1446         KPHYSM_SETUP_VECTOR_VERSION,
1447         segkp_mem_config_post_add,
1448         segkp_mem_config_pre_del,
1449         segkp_mem_config_post_del,
1450 };
1451 
1452 static void
1453 segkpinit_mem_config(struct seg *seg)
1454 {
1455         int ret;
1456 
1457         ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg);
1458         ASSERT(ret == 0);
1459 }