1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 
  25 #include <sys/param.h>
  26 #include <sys/user.h>
  27 #include <sys/mman.h>
  28 #include <sys/kmem.h>
  29 #include <sys/sysmacros.h>
  30 #include <sys/cmn_err.h>
  31 #include <sys/systm.h>
  32 #include <sys/tuneable.h>
  33 #include <vm/hat.h>
  34 #include <vm/seg.h>
  35 #include <vm/as.h>
  36 #include <vm/anon.h>
  37 #include <vm/page.h>
  38 #include <sys/buf.h>
  39 #include <sys/swap.h>
  40 #include <sys/atomic.h>
  41 #include <vm/seg_spt.h>
  42 #include <sys/debug.h>
  43 #include <sys/vtrace.h>
  44 #include <sys/shm.h>
  45 #include <sys/shm_impl.h>
  46 #include <sys/lgrp.h>
  47 #include <sys/vmsystm.h>
  48 #include <sys/policy.h>
  49 #include <sys/project.h>
  50 #include <sys/tnf_probe.h>
  51 #include <sys/zone.h>
  52 
  53 #define SEGSPTADDR      (caddr_t)0x0
  54 
  55 /*
  56  * # pages used for spt
  57  */
  58 size_t  spt_used;
  59 
  60 /*
  61  * segspt_minfree is the memory left for system after ISM
  62  * locked its pages; it is set up to 5% of availrmem in
  63  * sptcreate when ISM is created.  ISM should not use more
  64  * than ~90% of availrmem; if it does, then the performance
  65  * of the system may decrease. Machines with large memories may
  66  * be able to use up more memory for ISM so we set the default
  67  * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
  68  * If somebody wants even more memory for ISM (risking hanging
  69  * the system) they can patch the segspt_minfree to smaller number.
  70  */
  71 pgcnt_t segspt_minfree = 0;
  72 
  73 static int segspt_create(struct seg *seg, caddr_t argsp);
  74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
  75 static void segspt_free(struct seg *seg);
  76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
  77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
  78 
  79 struct seg_ops segspt_ops = {
  80         .unmap          = segspt_unmap,
  81         .free           = segspt_free,
  82         .getpolicy      = segspt_getpolicy,
  83         .inherit        = seg_inherit_notsup,
  84 };
  85 
  86 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
  87 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
  88 static void segspt_shmfree(struct seg *seg);
  89 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
  90                 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
  91 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
  92 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
  93                         register size_t len, register uint_t prot);
  94 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
  95                         uint_t prot);
  96 static int      segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
  97 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
  98                         register char *vec);
  99 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
 100                         int attr, uint_t flags);
 101 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
 102                         int attr, int op, ulong_t *lockmap, size_t pos);
 103 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
 104                         uint_t *protv);
 105 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
 106 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
 107 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
 108 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
 109                         uint_t behav);
 110 static void segspt_shmdump(struct seg *seg);
 111 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
 112                         struct page ***, enum lock_type, enum seg_rw);
 113 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
 114 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
 115 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
 116 static int segspt_shmcapable(struct seg *, segcapability_t);
 117 
 118 struct seg_ops segspt_shmops = {
 119         .dup            = segspt_shmdup,
 120         .unmap          = segspt_shmunmap,
 121         .free           = segspt_shmfree,
 122         .fault          = segspt_shmfault,
 123         .faulta         = segspt_shmfaulta,
 124         .setprot        = segspt_shmsetprot,
 125         .checkprot      = segspt_shmcheckprot,
 126         .kluster        = segspt_shmkluster,
 127         .sync           = segspt_shmsync,
 128         .incore         = segspt_shmincore,
 129         .lockop         = segspt_shmlockop,
 130         .getprot        = segspt_shmgetprot,
 131         .getoffset      = segspt_shmgetoffset,
 132         .gettype        = segspt_shmgettype,
 133         .getvp          = segspt_shmgetvp,
 134         .advise         = segspt_shmadvise,
 135         .dump           = segspt_shmdump,
 136         .pagelock       = segspt_shmpagelock,
 137         .setpagesize    = segspt_shmsetpgsz,
 138         .getmemid       = segspt_shmgetmemid,
 139         .getpolicy      = segspt_shmgetpolicy,
 140         .capable        = segspt_shmcapable,
 141         .inherit        = seg_inherit_notsup,
 142 };
 143 
 144 static void segspt_purge(struct seg *seg);
 145 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
 146                 enum seg_rw, int);
 147 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
 148                 page_t **ppa);
 149 
 150 
 151 
 152 /*ARGSUSED*/
 153 int
 154 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
 155         uint_t prot, uint_t flags, uint_t share_szc)
 156 {
 157         int     err;
 158         struct  as      *newas;
 159         struct  segspt_crargs sptcargs;
 160 
 161 #ifdef DEBUG
 162         TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
 163                         tnf_ulong, size, size );
 164 #endif
 165         if (segspt_minfree == 0)        /* leave min 5% of availrmem for */
 166                 segspt_minfree = availrmem/20;  /* for the system */
 167 
 168         if (!hat_supported(HAT_SHARED_PT, (void *)0))
 169                 return (EINVAL);
 170 
 171         /*
 172          * get a new as for this shared memory segment
 173          */
 174         newas = as_alloc();
 175         newas->a_proc = NULL;
 176         sptcargs.amp = amp;
 177         sptcargs.prot = prot;
 178         sptcargs.flags = flags;
 179         sptcargs.szc = share_szc;
 180         /*
 181          * create a shared page table (spt) segment
 182          */
 183 
 184         if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
 185                 as_free(newas);
 186                 return (err);
 187         }
 188         *sptseg = sptcargs.seg_spt;
 189         return (0);
 190 }
 191 
 192 void
 193 sptdestroy(struct as *as, struct anon_map *amp)
 194 {
 195 
 196 #ifdef DEBUG
 197         TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
 198 #endif
 199         (void) as_unmap(as, SEGSPTADDR, amp->size);
 200         as_free(as);
 201 }
 202 
 203 /*
 204  * called from seg_free().
 205  * free (i.e., unlock, unmap, return to free list)
 206  *  all the pages in the given seg.
 207  */
 208 void
 209 segspt_free(struct seg  *seg)
 210 {
 211         struct spt_data *sptd = (struct spt_data *)seg->s_data;
 212 
 213         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 214 
 215         if (sptd != NULL) {
 216                 if (sptd->spt_realsize)
 217                         segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
 218 
 219         if (sptd->spt_ppa_lckcnt)
 220                 kmem_free(sptd->spt_ppa_lckcnt,
 221                     sizeof (*sptd->spt_ppa_lckcnt)
 222                     * btopr(sptd->spt_amp->size));
 223                 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
 224                 cv_destroy(&sptd->spt_cv);
 225                 mutex_destroy(&sptd->spt_lock);
 226                 kmem_free(sptd, sizeof (*sptd));
 227         }
 228 }
 229 
 230 /*ARGSUSED*/
 231 static int
 232 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
 233         uint_t flags)
 234 {
 235         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
 236 
 237         return (0);
 238 }
 239 
 240 /*ARGSUSED*/
 241 static size_t
 242 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
 243 {
 244         caddr_t eo_seg;
 245         pgcnt_t npages;
 246         struct shm_data *shmd = (struct shm_data *)seg->s_data;
 247         struct seg      *sptseg;
 248         struct spt_data *sptd;
 249 
 250         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
 251 #ifdef lint
 252         seg = seg;
 253 #endif
 254         sptseg = shmd->shm_sptseg;
 255         sptd = sptseg->s_data;
 256 
 257         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 258                 eo_seg = addr + len;
 259                 while (addr < eo_seg) {
 260                         /* page exists, and it's locked. */
 261                         *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
 262                             SEG_PAGE_ANON;
 263                         addr += PAGESIZE;
 264                 }
 265                 return (len);
 266         } else {
 267                 struct  anon_map *amp = shmd->shm_amp;
 268                 struct  anon    *ap;
 269                 page_t          *pp;
 270                 pgcnt_t         anon_index;
 271                 struct vnode    *vp;
 272                 u_offset_t      off;
 273                 ulong_t         i;
 274                 int             ret;
 275                 anon_sync_obj_t cookie;
 276 
 277                 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
 278                 anon_index = seg_page(seg, addr);
 279                 npages = btopr(len);
 280                 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
 281                         return (EINVAL);
 282                 }
 283                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
 284                 for (i = 0; i < npages; i++, anon_index++) {
 285                         ret = 0;
 286                         anon_array_enter(amp, anon_index, &cookie);
 287                         ap = anon_get_ptr(amp->ahp, anon_index);
 288                         if (ap != NULL) {
 289                                 swap_xlate(ap, &vp, &off);
 290                                 anon_array_exit(&cookie);
 291                                 pp = page_lookup_nowait(vp, off, SE_SHARED);
 292                                 if (pp != NULL) {
 293                                         ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
 294                                         page_unlock(pp);
 295                                 }
 296                         } else {
 297                                 anon_array_exit(&cookie);
 298                         }
 299                         if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
 300                                 ret |= SEG_PAGE_LOCKED;
 301                         }
 302                         *vec++ = (char)ret;
 303                 }
 304                 ANON_LOCK_EXIT(&amp->a_rwlock);
 305                 return (len);
 306         }
 307 }
 308 
 309 static int
 310 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
 311 {
 312         size_t share_size;
 313 
 314         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 315 
 316         /*
 317          * seg.s_size may have been rounded up to the largest page size
 318          * in shmat().
 319          * XXX This should be cleanedup. sptdestroy should take a length
 320          * argument which should be the same as sptcreate. Then
 321          * this rounding would not be needed (or is done in shm.c)
 322          * Only the check for full segment will be needed.
 323          *
 324          * XXX -- shouldn't raddr == 0 always? These tests don't seem
 325          * to be useful at all.
 326          */
 327         share_size = page_get_pagesize(seg->s_szc);
 328         ssize = P2ROUNDUP(ssize, share_size);
 329 
 330         if (raddr == seg->s_base && ssize == seg->s_size) {
 331                 seg_free(seg);
 332                 return (0);
 333         } else
 334                 return (EINVAL);
 335 }
 336 
 337 int
 338 segspt_create(struct seg *seg, caddr_t argsp)
 339 {
 340         int             err;
 341         caddr_t         addr = seg->s_base;
 342         struct spt_data *sptd;
 343         struct  segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
 344         struct anon_map *amp = sptcargs->amp;
 345         struct kshmid   *sp = amp->a_sp;
 346         struct  cred    *cred = CRED();
 347         ulong_t         i, j, anon_index = 0;
 348         pgcnt_t         npages = btopr(amp->size);
 349         struct vnode    *vp;
 350         page_t          **ppa;
 351         uint_t          hat_flags;
 352         size_t          pgsz;
 353         pgcnt_t         pgcnt;
 354         caddr_t         a;
 355         pgcnt_t         pidx;
 356         size_t          sz;
 357         proc_t          *procp = curproc;
 358         rctl_qty_t      lockedbytes = 0;
 359         kproject_t      *proj;
 360 
 361         /*
 362          * We are holding the a_lock on the underlying dummy as,
 363          * so we can make calls to the HAT layer.
 364          */
 365         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 366         ASSERT(sp != NULL);
 367 
 368 #ifdef DEBUG
 369         TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
 370             tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
 371 #endif
 372         if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
 373                 if (err = anon_swap_adjust(npages))
 374                         return (err);
 375         }
 376         err = ENOMEM;
 377 
 378         if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
 379                 goto out1;
 380 
 381         if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
 382                 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
 383                     KM_NOSLEEP)) == NULL)
 384                         goto out2;
 385         }
 386 
 387         mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
 388 
 389         if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
 390                 goto out3;
 391 
 392         seg->s_ops = &segspt_ops;
 393         sptd->spt_vp = vp;
 394         sptd->spt_amp = amp;
 395         sptd->spt_prot = sptcargs->prot;
 396         sptd->spt_flags = sptcargs->flags;
 397         seg->s_data = (caddr_t)sptd;
 398         sptd->spt_ppa = NULL;
 399         sptd->spt_ppa_lckcnt = NULL;
 400         seg->s_szc = sptcargs->szc;
 401         cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
 402         sptd->spt_gen = 0;
 403 
 404         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 405         if (seg->s_szc > amp->a_szc) {
 406                 amp->a_szc = seg->s_szc;
 407         }
 408         ANON_LOCK_EXIT(&amp->a_rwlock);
 409 
 410         /*
 411          * Set policy to affect initial allocation of pages in
 412          * anon_map_createpages()
 413          */
 414         (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
 415             NULL, 0, ptob(npages));
 416 
 417         if (sptcargs->flags & SHM_PAGEABLE) {
 418                 size_t  share_sz;
 419                 pgcnt_t new_npgs, more_pgs;
 420                 struct anon_hdr *nahp;
 421                 zone_t *zone;
 422 
 423                 share_sz = page_get_pagesize(seg->s_szc);
 424                 if (!IS_P2ALIGNED(amp->size, share_sz)) {
 425                         /*
 426                          * We are rounding up the size of the anon array
 427                          * on 4 M boundary because we always create 4 M
 428                          * of page(s) when locking, faulting pages and we
 429                          * don't have to check for all corner cases e.g.
 430                          * if there is enough space to allocate 4 M
 431                          * page.
 432                          */
 433                         new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
 434                         more_pgs = new_npgs - npages;
 435 
 436                         /*
 437                          * The zone will never be NULL, as a fully created
 438                          * shm always has an owning zone.
 439                          */
 440                         zone = sp->shm_perm.ipc_zone_ref.zref_zone;
 441                         ASSERT(zone != NULL);
 442                         if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
 443                                 err = ENOMEM;
 444                                 goto out4;
 445                         }
 446 
 447                         nahp = anon_create(new_npgs, ANON_SLEEP);
 448                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 449                         (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
 450                             ANON_SLEEP);
 451                         anon_release(amp->ahp, npages);
 452                         amp->ahp = nahp;
 453                         ASSERT(amp->swresv == ptob(npages));
 454                         amp->swresv = amp->size = ptob(new_npgs);
 455                         ANON_LOCK_EXIT(&amp->a_rwlock);
 456                         npages = new_npgs;
 457                 }
 458 
 459                 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
 460                     sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
 461                 sptd->spt_pcachecnt = 0;
 462                 sptd->spt_realsize = ptob(npages);
 463                 sptcargs->seg_spt = seg;
 464                 return (0);
 465         }
 466 
 467         /*
 468          * get array of pages for each anon slot in amp
 469          */
 470         if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
 471             seg, addr, S_CREATE, cred)) != 0)
 472                 goto out4;
 473 
 474         mutex_enter(&sp->shm_mlock);
 475 
 476         /* May be partially locked, so, count bytes to charge for locking */
 477         for (i = 0; i < npages; i++)
 478                 if (ppa[i]->p_lckcnt == 0)
 479                         lockedbytes += PAGESIZE;
 480 
 481         proj = sp->shm_perm.ipc_proj;
 482 
 483         if (lockedbytes > 0) {
 484                 mutex_enter(&procp->p_lock);
 485                 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
 486                         mutex_exit(&procp->p_lock);
 487                         mutex_exit(&sp->shm_mlock);
 488                         for (i = 0; i < npages; i++)
 489                                 page_unlock(ppa[i]);
 490                         err = ENOMEM;
 491                         goto out4;
 492                 }
 493                 mutex_exit(&procp->p_lock);
 494         }
 495 
 496         /*
 497          * addr is initial address corresponding to the first page on ppa list
 498          */
 499         for (i = 0; i < npages; i++) {
 500                 /* attempt to lock all pages */
 501                 if (page_pp_lock(ppa[i], 0, 1) == 0) {
 502                         /*
 503                          * if unable to lock any page, unlock all
 504                          * of them and return error
 505                          */
 506                         for (j = 0; j < i; j++)
 507                                 page_pp_unlock(ppa[j], 0, 1);
 508                         for (i = 0; i < npages; i++)
 509                                 page_unlock(ppa[i]);
 510                         rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
 511                         mutex_exit(&sp->shm_mlock);
 512                         err = ENOMEM;
 513                         goto out4;
 514                 }
 515         }
 516         mutex_exit(&sp->shm_mlock);
 517 
 518         /*
 519          * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
 520          * for the entire life of the segment. For example platforms
 521          * that do not support Dynamic Reconfiguration.
 522          */
 523         hat_flags = HAT_LOAD_SHARE;
 524         if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
 525                 hat_flags |= HAT_LOAD_LOCK;
 526 
 527         /*
 528          * Load translations one lare page at a time
 529          * to make sure we don't create mappings bigger than
 530          * segment's size code in case underlying pages
 531          * are shared with segvn's segment that uses bigger
 532          * size code than we do.
 533          */
 534         pgsz = page_get_pagesize(seg->s_szc);
 535         pgcnt = page_get_pagecnt(seg->s_szc);
 536         for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
 537                 sz = MIN(pgsz, ptob(npages - pidx));
 538                 hat_memload_array(seg->s_as->a_hat, a, sz,
 539                     &ppa[pidx], sptd->spt_prot, hat_flags);
 540         }
 541 
 542         /*
 543          * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
 544          * we will leave the pages locked SE_SHARED for the life
 545          * of the ISM segment. This will prevent any calls to
 546          * hat_pageunload() on this ISM segment for those platforms.
 547          */
 548         if (!(hat_flags & HAT_LOAD_LOCK)) {
 549                 /*
 550                  * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
 551                  * we no longer need to hold the SE_SHARED lock on the pages,
 552                  * since L_PAGELOCK and F_SOFTLOCK calls will grab the
 553                  * SE_SHARED lock on the pages as necessary.
 554                  */
 555                 for (i = 0; i < npages; i++)
 556                         page_unlock(ppa[i]);
 557         }
 558         sptd->spt_pcachecnt = 0;
 559         kmem_free(ppa, ((sizeof (page_t *)) * npages));
 560         sptd->spt_realsize = ptob(npages);
 561         atomic_add_long(&spt_used, npages);
 562         sptcargs->seg_spt = seg;
 563         return (0);
 564 
 565 out4:
 566         seg->s_data = NULL;
 567         kmem_free(vp, sizeof (*vp));
 568         cv_destroy(&sptd->spt_cv);
 569 out3:
 570         mutex_destroy(&sptd->spt_lock);
 571         if ((sptcargs->flags & SHM_PAGEABLE) == 0)
 572                 kmem_free(ppa, (sizeof (*ppa) * npages));
 573 out2:
 574         kmem_free(sptd, sizeof (*sptd));
 575 out1:
 576         if ((sptcargs->flags & SHM_PAGEABLE) == 0)
 577                 anon_swap_restore(npages);
 578         return (err);
 579 }
 580 
 581 /*ARGSUSED*/
 582 void
 583 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
 584 {
 585         struct page     *pp;
 586         struct spt_data *sptd = (struct spt_data *)seg->s_data;
 587         pgcnt_t         npages;
 588         ulong_t         anon_idx;
 589         struct anon_map *amp;
 590         struct anon     *ap;
 591         struct vnode    *vp;
 592         u_offset_t      off;
 593         uint_t          hat_flags;
 594         int             root = 0;
 595         pgcnt_t         pgs, curnpgs = 0;
 596         page_t          *rootpp;
 597         rctl_qty_t      unlocked_bytes = 0;
 598         kproject_t      *proj;
 599         kshmid_t        *sp;
 600 
 601         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 602 
 603         len = P2ROUNDUP(len, PAGESIZE);
 604 
 605         npages = btop(len);
 606 
 607         hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
 608         if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
 609             (sptd->spt_flags & SHM_PAGEABLE)) {
 610                 hat_flags = HAT_UNLOAD_UNMAP;
 611         }
 612 
 613         hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
 614 
 615         amp = sptd->spt_amp;
 616         if (sptd->spt_flags & SHM_PAGEABLE)
 617                 npages = btop(amp->size);
 618 
 619         ASSERT(amp != NULL);
 620 
 621         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 622                 sp = amp->a_sp;
 623                 proj = sp->shm_perm.ipc_proj;
 624                 mutex_enter(&sp->shm_mlock);
 625         }
 626         for (anon_idx = 0; anon_idx < npages; anon_idx++) {
 627                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 628                         if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
 629                                 panic("segspt_free_pages: null app");
 630                                 /*NOTREACHED*/
 631                         }
 632                 } else {
 633                         if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
 634                             == NULL)
 635                                 continue;
 636                 }
 637                 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
 638                 swap_xlate(ap, &vp, &off);
 639 
 640                 /*
 641                  * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
 642                  * the pages won't be having SE_SHARED lock at this
 643                  * point.
 644                  *
 645                  * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
 646                  * the pages are still held SE_SHARED locked from the
 647                  * original segspt_create()
 648                  *
 649                  * Our goal is to get SE_EXCL lock on each page, remove
 650                  * permanent lock on it and invalidate the page.
 651                  */
 652                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 653                         if (hat_flags == HAT_UNLOAD_UNMAP)
 654                                 pp = page_lookup(vp, off, SE_EXCL);
 655                         else {
 656                                 if ((pp = page_find(vp, off)) == NULL) {
 657                                         panic("segspt_free_pages: "
 658                                             "page not locked");
 659                                         /*NOTREACHED*/
 660                                 }
 661                                 if (!page_tryupgrade(pp)) {
 662                                         page_unlock(pp);
 663                                         pp = page_lookup(vp, off, SE_EXCL);
 664                                 }
 665                         }
 666                         if (pp == NULL) {
 667                                 panic("segspt_free_pages: "
 668                                     "page not in the system");
 669                                 /*NOTREACHED*/
 670                         }
 671                         ASSERT(pp->p_lckcnt > 0);
 672                         page_pp_unlock(pp, 0, 1);
 673                         if (pp->p_lckcnt == 0)
 674                                 unlocked_bytes += PAGESIZE;
 675                 } else {
 676                         if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
 677                                 continue;
 678                 }
 679                 /*
 680                  * It's logical to invalidate the pages here as in most cases
 681                  * these were created by segspt.
 682                  */
 683                 if (pp->p_szc != 0) {
 684                         if (root == 0) {
 685                                 ASSERT(curnpgs == 0);
 686                                 root = 1;
 687                                 rootpp = pp;
 688                                 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
 689                                 ASSERT(pgs > 1);
 690                                 ASSERT(IS_P2ALIGNED(pgs, pgs));
 691                                 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
 692                                 curnpgs--;
 693                         } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
 694                                 ASSERT(curnpgs == 1);
 695                                 ASSERT(page_pptonum(pp) ==
 696                                     page_pptonum(rootpp) + (pgs - 1));
 697                                 page_destroy_pages(rootpp);
 698                                 root = 0;
 699                                 curnpgs = 0;
 700                         } else {
 701                                 ASSERT(curnpgs > 1);
 702                                 ASSERT(page_pptonum(pp) ==
 703                                     page_pptonum(rootpp) + (pgs - curnpgs));
 704                                 curnpgs--;
 705                         }
 706                 } else {
 707                         if (root != 0 || curnpgs != 0) {
 708                                 panic("segspt_free_pages: bad large page");
 709                                 /*NOTREACHED*/
 710                         }
 711                         /*
 712                          * Before destroying the pages, we need to take care
 713                          * of the rctl locked memory accounting. For that
 714                          * we need to calculte the unlocked_bytes.
 715                          */
 716                         if (pp->p_lckcnt > 0)
 717                                 unlocked_bytes += PAGESIZE;
 718                         /*LINTED: constant in conditional context */
 719                         VN_DISPOSE(pp, B_INVAL, 0, kcred);
 720                 }
 721         }
 722         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 723                 if (unlocked_bytes > 0)
 724                         rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
 725                 mutex_exit(&sp->shm_mlock);
 726         }
 727         if (root != 0 || curnpgs != 0) {
 728                 panic("segspt_free_pages: bad large page");
 729                 /*NOTREACHED*/
 730         }
 731 
 732         /*
 733          * mark that pages have been released
 734          */
 735         sptd->spt_realsize = 0;
 736 
 737         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 738                 atomic_add_long(&spt_used, -npages);
 739                 anon_swap_restore(npages);
 740         }
 741 }
 742 
 743 /*
 744  * Get memory allocation policy info for specified address in given segment
 745  */
 746 static lgrp_mem_policy_info_t *
 747 segspt_getpolicy(struct seg *seg, caddr_t addr)
 748 {
 749         struct anon_map         *amp;
 750         ulong_t                 anon_index;
 751         lgrp_mem_policy_info_t  *policy_info;
 752         struct spt_data         *spt_data;
 753 
 754         ASSERT(seg != NULL);
 755 
 756         /*
 757          * Get anon_map from segspt
 758          *
 759          * Assume that no lock needs to be held on anon_map, since
 760          * it should be protected by its reference count which must be
 761          * nonzero for an existing segment
 762          * Need to grab readers lock on policy tree though
 763          */
 764         spt_data = (struct spt_data *)seg->s_data;
 765         if (spt_data == NULL)
 766                 return (NULL);
 767         amp = spt_data->spt_amp;
 768         ASSERT(amp->refcnt != 0);
 769 
 770         /*
 771          * Get policy info
 772          *
 773          * Assume starting anon index of 0
 774          */
 775         anon_index = seg_page(seg, addr);
 776         policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
 777 
 778         return (policy_info);
 779 }
 780 
 781 /*
 782  * DISM only.
 783  * Return locked pages over a given range.
 784  *
 785  * We will cache all DISM locked pages and save the pplist for the
 786  * entire segment in the ppa field of the underlying DISM segment structure.
 787  * Later, during a call to segspt_reclaim() we will use this ppa array
 788  * to page_unlock() all of the pages and then we will free this ppa list.
 789  */
 790 /*ARGSUSED*/
 791 static int
 792 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
 793     struct page ***ppp, enum lock_type type, enum seg_rw rw)
 794 {
 795         struct  shm_data *shmd = (struct shm_data *)seg->s_data;
 796         struct  seg     *sptseg = shmd->shm_sptseg;
 797         struct  spt_data *sptd = sptseg->s_data;
 798         pgcnt_t pg_idx, npages, tot_npages, npgs;
 799         struct  page **pplist, **pl, **ppa, *pp;
 800         struct  anon_map *amp;
 801         spgcnt_t        an_idx;
 802         int     ret = ENOTSUP;
 803         uint_t  pl_built = 0;
 804         struct  anon *ap;
 805         struct  vnode *vp;
 806         u_offset_t off;
 807         pgcnt_t claim_availrmem = 0;
 808         uint_t  szc;
 809 
 810         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
 811         ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
 812 
 813         /*
 814          * We want to lock/unlock the entire ISM segment. Therefore,
 815          * we will be using the underlying sptseg and it's base address
 816          * and length for the caching arguments.
 817          */
 818         ASSERT(sptseg);
 819         ASSERT(sptd);
 820 
 821         pg_idx = seg_page(seg, addr);
 822         npages = btopr(len);
 823 
 824         /*
 825          * check if the request is larger than number of pages covered
 826          * by amp
 827          */
 828         if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
 829                 *ppp = NULL;
 830                 return (ENOTSUP);
 831         }
 832 
 833         if (type == L_PAGEUNLOCK) {
 834                 ASSERT(sptd->spt_ppa != NULL);
 835 
 836                 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
 837                     sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 838 
 839                 /*
 840                  * If someone is blocked while unmapping, we purge
 841                  * segment page cache and thus reclaim pplist synchronously
 842                  * without waiting for seg_pasync_thread. This speeds up
 843                  * unmapping in cases where munmap(2) is called, while
 844                  * raw async i/o is still in progress or where a thread
 845                  * exits on data fault in a multithreaded application.
 846                  */
 847                 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
 848                     (AS_ISUNMAPWAIT(seg->s_as) &&
 849                     shmd->shm_softlockcnt > 0)) {
 850                         segspt_purge(seg);
 851                 }
 852                 return (0);
 853         }
 854 
 855         /* The L_PAGELOCK case ... */
 856 
 857         if (sptd->spt_flags & DISM_PPA_CHANGED) {
 858                 segspt_purge(seg);
 859                 /*
 860                  * for DISM ppa needs to be rebuild since
 861                  * number of locked pages could be changed
 862                  */
 863                 *ppp = NULL;
 864                 return (ENOTSUP);
 865         }
 866 
 867         /*
 868          * First try to find pages in segment page cache, without
 869          * holding the segment lock.
 870          */
 871         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
 872             S_WRITE, SEGP_FORCE_WIRED);
 873         if (pplist != NULL) {
 874                 ASSERT(sptd->spt_ppa != NULL);
 875                 ASSERT(sptd->spt_ppa == pplist);
 876                 ppa = sptd->spt_ppa;
 877                 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
 878                         if (ppa[an_idx] == NULL) {
 879                                 seg_pinactive(seg, NULL, seg->s_base,
 880                                     sptd->spt_amp->size, ppa,
 881                                     S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 882                                 *ppp = NULL;
 883                                 return (ENOTSUP);
 884                         }
 885                         if ((szc = ppa[an_idx]->p_szc) != 0) {
 886                                 npgs = page_get_pagecnt(szc);
 887                                 an_idx = P2ROUNDUP(an_idx + 1, npgs);
 888                         } else {
 889                                 an_idx++;
 890                         }
 891                 }
 892                 /*
 893                  * Since we cache the entire DISM segment, we want to
 894                  * set ppp to point to the first slot that corresponds
 895                  * to the requested addr, i.e. pg_idx.
 896                  */
 897                 *ppp = &(sptd->spt_ppa[pg_idx]);
 898                 return (0);
 899         }
 900 
 901         mutex_enter(&sptd->spt_lock);
 902         /*
 903          * try to find pages in segment page cache with mutex
 904          */
 905         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
 906             S_WRITE, SEGP_FORCE_WIRED);
 907         if (pplist != NULL) {
 908                 ASSERT(sptd->spt_ppa != NULL);
 909                 ASSERT(sptd->spt_ppa == pplist);
 910                 ppa = sptd->spt_ppa;
 911                 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
 912                         if (ppa[an_idx] == NULL) {
 913                                 mutex_exit(&sptd->spt_lock);
 914                                 seg_pinactive(seg, NULL, seg->s_base,
 915                                     sptd->spt_amp->size, ppa,
 916                                     S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 917                                 *ppp = NULL;
 918                                 return (ENOTSUP);
 919                         }
 920                         if ((szc = ppa[an_idx]->p_szc) != 0) {
 921                                 npgs = page_get_pagecnt(szc);
 922                                 an_idx = P2ROUNDUP(an_idx + 1, npgs);
 923                         } else {
 924                                 an_idx++;
 925                         }
 926                 }
 927                 /*
 928                  * Since we cache the entire DISM segment, we want to
 929                  * set ppp to point to the first slot that corresponds
 930                  * to the requested addr, i.e. pg_idx.
 931                  */
 932                 mutex_exit(&sptd->spt_lock);
 933                 *ppp = &(sptd->spt_ppa[pg_idx]);
 934                 return (0);
 935         }
 936         if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
 937             SEGP_FORCE_WIRED) == SEGP_FAIL) {
 938                 mutex_exit(&sptd->spt_lock);
 939                 *ppp = NULL;
 940                 return (ENOTSUP);
 941         }
 942 
 943         /*
 944          * No need to worry about protections because DISM pages are always rw.
 945          */
 946         pl = pplist = NULL;
 947         amp = sptd->spt_amp;
 948 
 949         /*
 950          * Do we need to build the ppa array?
 951          */
 952         if (sptd->spt_ppa == NULL) {
 953                 pgcnt_t lpg_cnt = 0;
 954 
 955                 pl_built = 1;
 956                 tot_npages = btopr(sptd->spt_amp->size);
 957 
 958                 ASSERT(sptd->spt_pcachecnt == 0);
 959                 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
 960                 pl = pplist;
 961 
 962                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 963                 for (an_idx = 0; an_idx < tot_npages; ) {
 964                         ap = anon_get_ptr(amp->ahp, an_idx);
 965                         /*
 966                          * Cache only mlocked pages. For large pages
 967                          * if one (constituent) page is mlocked
 968                          * all pages for that large page
 969                          * are cached also. This is for quick
 970                          * lookups of ppa array;
 971                          */
 972                         if ((ap != NULL) && (lpg_cnt != 0 ||
 973                             (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
 974 
 975                                 swap_xlate(ap, &vp, &off);
 976                                 pp = page_lookup(vp, off, SE_SHARED);
 977                                 ASSERT(pp != NULL);
 978                                 if (lpg_cnt == 0) {
 979                                         lpg_cnt++;
 980                                         /*
 981                                          * For a small page, we are done --
 982                                          * lpg_count is reset to 0 below.
 983                                          *
 984                                          * For a large page, we are guaranteed
 985                                          * to find the anon structures of all
 986                                          * constituent pages and a non-zero
 987                                          * lpg_cnt ensures that we don't test
 988                                          * for mlock for these. We are done
 989                                          * when lpg_count reaches (npgs + 1).
 990                                          * If we are not the first constituent
 991                                          * page, restart at the first one.
 992                                          */
 993                                         npgs = page_get_pagecnt(pp->p_szc);
 994                                         if (!IS_P2ALIGNED(an_idx, npgs)) {
 995                                                 an_idx = P2ALIGN(an_idx, npgs);
 996                                                 page_unlock(pp);
 997                                                 continue;
 998                                         }
 999                                 }
1000                                 if (++lpg_cnt > npgs)
1001                                         lpg_cnt = 0;
1002 
1003                                 /*
1004                                  * availrmem is decremented only
1005                                  * for unlocked pages
1006                                  */
1007                                 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1008                                         claim_availrmem++;
1009                                 pplist[an_idx] = pp;
1010                         }
1011                         an_idx++;
1012                 }
1013                 ANON_LOCK_EXIT(&amp->a_rwlock);
1014 
1015                 if (claim_availrmem) {
1016                         mutex_enter(&freemem_lock);
1017                         if (availrmem < tune.t_minarmem + claim_availrmem) {
1018                                 mutex_exit(&freemem_lock);
1019                                 ret = ENOTSUP;
1020                                 claim_availrmem = 0;
1021                                 goto insert_fail;
1022                         } else {
1023                                 availrmem -= claim_availrmem;
1024                         }
1025                         mutex_exit(&freemem_lock);
1026                 }
1027 
1028                 sptd->spt_ppa = pl;
1029         } else {
1030                 /*
1031                  * We already have a valid ppa[].
1032                  */
1033                 pl = sptd->spt_ppa;
1034         }
1035 
1036         ASSERT(pl != NULL);
1037 
1038         ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1039             sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1040             segspt_reclaim);
1041         if (ret == SEGP_FAIL) {
1042                 /*
1043                  * seg_pinsert failed. We return
1044                  * ENOTSUP, so that the as_pagelock() code will
1045                  * then try the slower F_SOFTLOCK path.
1046                  */
1047                 if (pl_built) {
1048                         /*
1049                          * No one else has referenced the ppa[].
1050                          * We created it and we need to destroy it.
1051                          */
1052                         sptd->spt_ppa = NULL;
1053                 }
1054                 ret = ENOTSUP;
1055                 goto insert_fail;
1056         }
1057 
1058         /*
1059          * In either case, we increment softlockcnt on the 'real' segment.
1060          */
1061         sptd->spt_pcachecnt++;
1062         atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1063 
1064         ppa = sptd->spt_ppa;
1065         for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1066                 if (ppa[an_idx] == NULL) {
1067                         mutex_exit(&sptd->spt_lock);
1068                         seg_pinactive(seg, NULL, seg->s_base,
1069                             sptd->spt_amp->size,
1070                             pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1071                         *ppp = NULL;
1072                         return (ENOTSUP);
1073                 }
1074                 if ((szc = ppa[an_idx]->p_szc) != 0) {
1075                         npgs = page_get_pagecnt(szc);
1076                         an_idx = P2ROUNDUP(an_idx + 1, npgs);
1077                 } else {
1078                         an_idx++;
1079                 }
1080         }
1081         /*
1082          * We can now drop the sptd->spt_lock since the ppa[]
1083          * exists and he have incremented pacachecnt.
1084          */
1085         mutex_exit(&sptd->spt_lock);
1086 
1087         /*
1088          * Since we cache the entire segment, we want to
1089          * set ppp to point to the first slot that corresponds
1090          * to the requested addr, i.e. pg_idx.
1091          */
1092         *ppp = &(sptd->spt_ppa[pg_idx]);
1093         return (0);
1094 
1095 insert_fail:
1096         /*
1097          * We will only reach this code if we tried and failed.
1098          *
1099          * And we can drop the lock on the dummy seg, once we've failed
1100          * to set up a new ppa[].
1101          */
1102         mutex_exit(&sptd->spt_lock);
1103 
1104         if (pl_built) {
1105                 if (claim_availrmem) {
1106                         mutex_enter(&freemem_lock);
1107                         availrmem += claim_availrmem;
1108                         mutex_exit(&freemem_lock);
1109                 }
1110 
1111                 /*
1112                  * We created pl and we need to destroy it.
1113                  */
1114                 pplist = pl;
1115                 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1116                         if (pplist[an_idx] != NULL)
1117                                 page_unlock(pplist[an_idx]);
1118                 }
1119                 kmem_free(pl, sizeof (page_t *) * tot_npages);
1120         }
1121 
1122         if (shmd->shm_softlockcnt <= 0) {
1123                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1124                         mutex_enter(&seg->s_as->a_contents);
1125                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1126                                 AS_CLRUNMAPWAIT(seg->s_as);
1127                                 cv_broadcast(&seg->s_as->a_cv);
1128                         }
1129                         mutex_exit(&seg->s_as->a_contents);
1130                 }
1131         }
1132         *ppp = NULL;
1133         return (ret);
1134 }
1135 
1136 
1137 
1138 /*
1139  * return locked pages over a given range.
1140  *
1141  * We will cache the entire ISM segment and save the pplist for the
1142  * entire segment in the ppa field of the underlying ISM segment structure.
1143  * Later, during a call to segspt_reclaim() we will use this ppa array
1144  * to page_unlock() all of the pages and then we will free this ppa list.
1145  */
1146 /*ARGSUSED*/
1147 static int
1148 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1149     struct page ***ppp, enum lock_type type, enum seg_rw rw)
1150 {
1151         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1152         struct seg      *sptseg = shmd->shm_sptseg;
1153         struct spt_data *sptd = sptseg->s_data;
1154         pgcnt_t np, page_index, npages;
1155         caddr_t a, spt_base;
1156         struct page **pplist, **pl, *pp;
1157         struct anon_map *amp;
1158         ulong_t anon_index;
1159         int ret = ENOTSUP;
1160         uint_t  pl_built = 0;
1161         struct anon *ap;
1162         struct vnode *vp;
1163         u_offset_t off;
1164 
1165         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1166         ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1167 
1168 
1169         /*
1170          * We want to lock/unlock the entire ISM segment. Therefore,
1171          * we will be using the underlying sptseg and it's base address
1172          * and length for the caching arguments.
1173          */
1174         ASSERT(sptseg);
1175         ASSERT(sptd);
1176 
1177         if (sptd->spt_flags & SHM_PAGEABLE) {
1178                 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1179         }
1180 
1181         page_index = seg_page(seg, addr);
1182         npages = btopr(len);
1183 
1184         /*
1185          * check if the request is larger than number of pages covered
1186          * by amp
1187          */
1188         if (page_index + npages > btopr(sptd->spt_amp->size)) {
1189                 *ppp = NULL;
1190                 return (ENOTSUP);
1191         }
1192 
1193         if (type == L_PAGEUNLOCK) {
1194 
1195                 ASSERT(sptd->spt_ppa != NULL);
1196 
1197                 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1198                     sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1199 
1200                 /*
1201                  * If someone is blocked while unmapping, we purge
1202                  * segment page cache and thus reclaim pplist synchronously
1203                  * without waiting for seg_pasync_thread. This speeds up
1204                  * unmapping in cases where munmap(2) is called, while
1205                  * raw async i/o is still in progress or where a thread
1206                  * exits on data fault in a multithreaded application.
1207                  */
1208                 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1209                         segspt_purge(seg);
1210                 }
1211                 return (0);
1212         }
1213 
1214         /* The L_PAGELOCK case... */
1215 
1216         /*
1217          * First try to find pages in segment page cache, without
1218          * holding the segment lock.
1219          */
1220         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1221             S_WRITE, SEGP_FORCE_WIRED);
1222         if (pplist != NULL) {
1223                 ASSERT(sptd->spt_ppa == pplist);
1224                 ASSERT(sptd->spt_ppa[page_index]);
1225                 /*
1226                  * Since we cache the entire ISM segment, we want to
1227                  * set ppp to point to the first slot that corresponds
1228                  * to the requested addr, i.e. page_index.
1229                  */
1230                 *ppp = &(sptd->spt_ppa[page_index]);
1231                 return (0);
1232         }
1233 
1234         mutex_enter(&sptd->spt_lock);
1235 
1236         /*
1237          * try to find pages in segment page cache
1238          */
1239         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1240             S_WRITE, SEGP_FORCE_WIRED);
1241         if (pplist != NULL) {
1242                 ASSERT(sptd->spt_ppa == pplist);
1243                 /*
1244                  * Since we cache the entire segment, we want to
1245                  * set ppp to point to the first slot that corresponds
1246                  * to the requested addr, i.e. page_index.
1247                  */
1248                 mutex_exit(&sptd->spt_lock);
1249                 *ppp = &(sptd->spt_ppa[page_index]);
1250                 return (0);
1251         }
1252 
1253         if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1254             SEGP_FORCE_WIRED) == SEGP_FAIL) {
1255                 mutex_exit(&sptd->spt_lock);
1256                 *ppp = NULL;
1257                 return (ENOTSUP);
1258         }
1259 
1260         /*
1261          * No need to worry about protections because ISM pages
1262          * are always rw.
1263          */
1264         pl = pplist = NULL;
1265 
1266         /*
1267          * Do we need to build the ppa array?
1268          */
1269         if (sptd->spt_ppa == NULL) {
1270                 ASSERT(sptd->spt_ppa == pplist);
1271 
1272                 spt_base = sptseg->s_base;
1273                 pl_built = 1;
1274 
1275                 /*
1276                  * availrmem is decremented once during anon_swap_adjust()
1277                  * and is incremented during the anon_unresv(), which is
1278                  * called from shm_rm_amp() when the segment is destroyed.
1279                  */
1280                 amp = sptd->spt_amp;
1281                 ASSERT(amp != NULL);
1282 
1283                 /* pcachecnt is protected by sptd->spt_lock */
1284                 ASSERT(sptd->spt_pcachecnt == 0);
1285                 pplist = kmem_zalloc(sizeof (page_t *)
1286                     * btopr(sptd->spt_amp->size), KM_SLEEP);
1287                 pl = pplist;
1288 
1289                 anon_index = seg_page(sptseg, spt_base);
1290 
1291                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
1292                 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1293                     a += PAGESIZE, anon_index++, pplist++) {
1294                         ap = anon_get_ptr(amp->ahp, anon_index);
1295                         ASSERT(ap != NULL);
1296                         swap_xlate(ap, &vp, &off);
1297                         pp = page_lookup(vp, off, SE_SHARED);
1298                         ASSERT(pp != NULL);
1299                         *pplist = pp;
1300                 }
1301                 ANON_LOCK_EXIT(&amp->a_rwlock);
1302 
1303                 if (a < (spt_base + sptd->spt_amp->size)) {
1304                         ret = ENOTSUP;
1305                         goto insert_fail;
1306                 }
1307                 sptd->spt_ppa = pl;
1308         } else {
1309                 /*
1310                  * We already have a valid ppa[].
1311                  */
1312                 pl = sptd->spt_ppa;
1313         }
1314 
1315         ASSERT(pl != NULL);
1316 
1317         ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1318             sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1319             segspt_reclaim);
1320         if (ret == SEGP_FAIL) {
1321                 /*
1322                  * seg_pinsert failed. We return
1323                  * ENOTSUP, so that the as_pagelock() code will
1324                  * then try the slower F_SOFTLOCK path.
1325                  */
1326                 if (pl_built) {
1327                         /*
1328                          * No one else has referenced the ppa[].
1329                          * We created it and we need to destroy it.
1330                          */
1331                         sptd->spt_ppa = NULL;
1332                 }
1333                 ret = ENOTSUP;
1334                 goto insert_fail;
1335         }
1336 
1337         /*
1338          * In either case, we increment softlockcnt on the 'real' segment.
1339          */
1340         sptd->spt_pcachecnt++;
1341         atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1342 
1343         /*
1344          * We can now drop the sptd->spt_lock since the ppa[]
1345          * exists and he have incremented pacachecnt.
1346          */
1347         mutex_exit(&sptd->spt_lock);
1348 
1349         /*
1350          * Since we cache the entire segment, we want to
1351          * set ppp to point to the first slot that corresponds
1352          * to the requested addr, i.e. page_index.
1353          */
1354         *ppp = &(sptd->spt_ppa[page_index]);
1355         return (0);
1356 
1357 insert_fail:
1358         /*
1359          * We will only reach this code if we tried and failed.
1360          *
1361          * And we can drop the lock on the dummy seg, once we've failed
1362          * to set up a new ppa[].
1363          */
1364         mutex_exit(&sptd->spt_lock);
1365 
1366         if (pl_built) {
1367                 /*
1368                  * We created pl and we need to destroy it.
1369                  */
1370                 pplist = pl;
1371                 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1372                 while (np) {
1373                         page_unlock(*pplist);
1374                         np--;
1375                         pplist++;
1376                 }
1377                 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1378         }
1379         if (shmd->shm_softlockcnt <= 0) {
1380                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1381                         mutex_enter(&seg->s_as->a_contents);
1382                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1383                                 AS_CLRUNMAPWAIT(seg->s_as);
1384                                 cv_broadcast(&seg->s_as->a_cv);
1385                         }
1386                         mutex_exit(&seg->s_as->a_contents);
1387                 }
1388         }
1389         *ppp = NULL;
1390         return (ret);
1391 }
1392 
1393 /*
1394  * purge any cached pages in the I/O page cache
1395  */
1396 static void
1397 segspt_purge(struct seg *seg)
1398 {
1399         seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1400 }
1401 
1402 static int
1403 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1404         enum seg_rw rw, int async)
1405 {
1406         struct seg *seg = (struct seg *)ptag;
1407         struct  shm_data *shmd = (struct shm_data *)seg->s_data;
1408         struct  seg     *sptseg;
1409         struct  spt_data *sptd;
1410         pgcnt_t npages, i, free_availrmem = 0;
1411         int     done = 0;
1412 
1413 #ifdef lint
1414         addr = addr;
1415 #endif
1416         sptseg = shmd->shm_sptseg;
1417         sptd = sptseg->s_data;
1418         npages = (len >> PAGESHIFT);
1419         ASSERT(npages);
1420         ASSERT(sptd->spt_pcachecnt != 0);
1421         ASSERT(sptd->spt_ppa == pplist);
1422         ASSERT(npages == btopr(sptd->spt_amp->size));
1423         ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1424 
1425         /*
1426          * Acquire the lock on the dummy seg and destroy the
1427          * ppa array IF this is the last pcachecnt.
1428          */
1429         mutex_enter(&sptd->spt_lock);
1430         if (--sptd->spt_pcachecnt == 0) {
1431                 for (i = 0; i < npages; i++) {
1432                         if (pplist[i] == NULL) {
1433                                 continue;
1434                         }
1435                         if (rw == S_WRITE) {
1436                                 hat_setrefmod(pplist[i]);
1437                         } else {
1438                                 hat_setref(pplist[i]);
1439                         }
1440                         if ((sptd->spt_flags & SHM_PAGEABLE) &&
1441                             (sptd->spt_ppa_lckcnt[i] == 0))
1442                                 free_availrmem++;
1443                         page_unlock(pplist[i]);
1444                 }
1445                 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1446                         mutex_enter(&freemem_lock);
1447                         availrmem += free_availrmem;
1448                         mutex_exit(&freemem_lock);
1449                 }
1450                 /*
1451                  * Since we want to cach/uncache the entire ISM segment,
1452                  * we will track the pplist in a segspt specific field
1453                  * ppa, that is initialized at the time we add an entry to
1454                  * the cache.
1455                  */
1456                 ASSERT(sptd->spt_pcachecnt == 0);
1457                 kmem_free(pplist, sizeof (page_t *) * npages);
1458                 sptd->spt_ppa = NULL;
1459                 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1460                 sptd->spt_gen++;
1461                 cv_broadcast(&sptd->spt_cv);
1462                 done = 1;
1463         }
1464         mutex_exit(&sptd->spt_lock);
1465 
1466         /*
1467          * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1468          * may not hold AS lock (in this case async argument is not 0). This
1469          * means if softlockcnt drops to 0 after the decrement below address
1470          * space may get freed. We can't allow it since after softlock
1471          * derement to 0 we still need to access as structure for possible
1472          * wakeup of unmap waiters. To prevent the disappearance of as we take
1473          * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1474          * this mutex as a barrier to make sure this routine completes before
1475          * segment is freed.
1476          *
1477          * The second complication we have to deal with in async case is a
1478          * possibility of missed wake up of unmap wait thread. When we don't
1479          * hold as lock here we may take a_contents lock before unmap wait
1480          * thread that was first to see softlockcnt was still not 0. As a
1481          * result we'll fail to wake up an unmap wait thread. To avoid this
1482          * race we set nounmapwait flag in as structure if we drop softlockcnt
1483          * to 0 if async is not 0.  unmapwait thread
1484          * will not block if this flag is set.
1485          */
1486         if (async)
1487                 mutex_enter(&shmd->shm_segfree_syncmtx);
1488 
1489         /*
1490          * Now decrement softlockcnt.
1491          */
1492         ASSERT(shmd->shm_softlockcnt > 0);
1493         atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1494 
1495         if (shmd->shm_softlockcnt <= 0) {
1496                 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1497                         mutex_enter(&seg->s_as->a_contents);
1498                         if (async)
1499                                 AS_SETNOUNMAPWAIT(seg->s_as);
1500                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1501                                 AS_CLRUNMAPWAIT(seg->s_as);
1502                                 cv_broadcast(&seg->s_as->a_cv);
1503                         }
1504                         mutex_exit(&seg->s_as->a_contents);
1505                 }
1506         }
1507 
1508         if (async)
1509                 mutex_exit(&shmd->shm_segfree_syncmtx);
1510 
1511         return (done);
1512 }
1513 
1514 /*
1515  * Do a F_SOFTUNLOCK call over the range requested.
1516  * The range must have already been F_SOFTLOCK'ed.
1517  *
1518  * The calls to acquire and release the anon map lock mutex were
1519  * removed in order to avoid a deadly embrace during a DR
1520  * memory delete operation.  (Eg. DR blocks while waiting for a
1521  * exclusive lock on a page that is being used for kaio; the
1522  * thread that will complete the kaio and call segspt_softunlock
1523  * blocks on the anon map lock; another thread holding the anon
1524  * map lock blocks on another page lock via the segspt_shmfault
1525  * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1526  *
1527  * The appropriateness of the removal is based upon the following:
1528  * 1. If we are holding a segment's reader lock and the page is held
1529  * shared, then the corresponding element in anonmap which points to
1530  * anon struct cannot change and there is no need to acquire the
1531  * anonymous map lock.
1532  * 2. Threads in segspt_softunlock have a reader lock on the segment
1533  * and already have the shared page lock, so we are guaranteed that
1534  * the anon map slot cannot change and therefore can call anon_get_ptr()
1535  * without grabbing the anonymous map lock.
1536  * 3. Threads that softlock a shared page break copy-on-write, even if
1537  * its a read.  Thus cow faults can be ignored with respect to soft
1538  * unlocking, since the breaking of cow means that the anon slot(s) will
1539  * not be shared.
1540  */
1541 static void
1542 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1543         size_t len, enum seg_rw rw)
1544 {
1545         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1546         struct seg      *sptseg;
1547         struct spt_data *sptd;
1548         page_t *pp;
1549         caddr_t adr;
1550         struct vnode *vp;
1551         u_offset_t offset;
1552         ulong_t anon_index;
1553         struct anon_map *amp;           /* XXX - for locknest */
1554         struct anon *ap = NULL;
1555         pgcnt_t npages;
1556 
1557         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1558 
1559         sptseg = shmd->shm_sptseg;
1560         sptd = sptseg->s_data;
1561 
1562         /*
1563          * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1564          * and therefore their pages are SE_SHARED locked
1565          * for the entire life of the segment.
1566          */
1567         if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1568             ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1569                 goto softlock_decrement;
1570         }
1571 
1572         /*
1573          * Any thread is free to do a page_find and
1574          * page_unlock() on the pages within this seg.
1575          *
1576          * We are already holding the as->a_lock on the user's
1577          * real segment, but we need to hold the a_lock on the
1578          * underlying dummy as. This is mostly to satisfy the
1579          * underlying HAT layer.
1580          */
1581         AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1582         hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1583         AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1584 
1585         amp = sptd->spt_amp;
1586         ASSERT(amp != NULL);
1587         anon_index = seg_page(sptseg, sptseg_addr);
1588 
1589         for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1590                 ap = anon_get_ptr(amp->ahp, anon_index++);
1591                 ASSERT(ap != NULL);
1592                 swap_xlate(ap, &vp, &offset);
1593 
1594                 /*
1595                  * Use page_find() instead of page_lookup() to
1596                  * find the page since we know that it has a
1597                  * "shared" lock.
1598                  */
1599                 pp = page_find(vp, offset);
1600                 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1601                 if (pp == NULL) {
1602                         panic("segspt_softunlock: "
1603                             "addr %p, ap %p, vp %p, off %llx",
1604                             (void *)adr, (void *)ap, (void *)vp, offset);
1605                         /*NOTREACHED*/
1606                 }
1607 
1608                 if (rw == S_WRITE) {
1609                         hat_setrefmod(pp);
1610                 } else if (rw != S_OTHER) {
1611                         hat_setref(pp);
1612                 }
1613                 page_unlock(pp);
1614         }
1615 
1616 softlock_decrement:
1617         npages = btopr(len);
1618         ASSERT(shmd->shm_softlockcnt >= npages);
1619         atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1620         if (shmd->shm_softlockcnt == 0) {
1621                 /*
1622                  * All SOFTLOCKS are gone. Wakeup any waiting
1623                  * unmappers so they can try again to unmap.
1624                  * Check for waiters first without the mutex
1625                  * held so we don't always grab the mutex on
1626                  * softunlocks.
1627                  */
1628                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1629                         mutex_enter(&seg->s_as->a_contents);
1630                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1631                                 AS_CLRUNMAPWAIT(seg->s_as);
1632                                 cv_broadcast(&seg->s_as->a_cv);
1633                         }
1634                         mutex_exit(&seg->s_as->a_contents);
1635                 }
1636         }
1637 }
1638 
1639 int
1640 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1641 {
1642         struct shm_data *shmd_arg = (struct shm_data *)argsp;
1643         struct shm_data *shmd;
1644         struct anon_map *shm_amp = shmd_arg->shm_amp;
1645         struct spt_data *sptd;
1646         int error = 0;
1647 
1648         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1649 
1650         shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1651         if (shmd == NULL)
1652                 return (ENOMEM);
1653 
1654         shmd->shm_sptas = shmd_arg->shm_sptas;
1655         shmd->shm_amp = shm_amp;
1656         shmd->shm_sptseg = shmd_arg->shm_sptseg;
1657 
1658         (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1659             NULL, 0, seg->s_size);
1660 
1661         mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1662 
1663         seg->s_data = (void *)shmd;
1664         seg->s_ops = &segspt_shmops;
1665         seg->s_szc = shmd->shm_sptseg->s_szc;
1666         sptd = shmd->shm_sptseg->s_data;
1667 
1668         if (sptd->spt_flags & SHM_PAGEABLE) {
1669                 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1670                     KM_NOSLEEP)) == NULL) {
1671                         seg->s_data = (void *)NULL;
1672                         kmem_free(shmd, (sizeof (*shmd)));
1673                         return (ENOMEM);
1674                 }
1675                 shmd->shm_lckpgs = 0;
1676                 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1677                         if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1678                             shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1679                             seg->s_size, seg->s_szc)) != 0) {
1680                                 kmem_free(shmd->shm_vpage,
1681                                     btopr(shm_amp->size));
1682                         }
1683                 }
1684         } else {
1685                 error = hat_share(seg->s_as->a_hat, seg->s_base,
1686                     shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1687                     seg->s_size, seg->s_szc);
1688         }
1689         if (error) {
1690                 seg->s_szc = 0;
1691                 seg->s_data = (void *)NULL;
1692                 kmem_free(shmd, (sizeof (*shmd)));
1693         } else {
1694                 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1695                 shm_amp->refcnt++;
1696                 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1697         }
1698         return (error);
1699 }
1700 
1701 int
1702 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1703 {
1704         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1705         int reclaim = 1;
1706 
1707         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1708 retry:
1709         if (shmd->shm_softlockcnt > 0) {
1710                 if (reclaim == 1) {
1711                         segspt_purge(seg);
1712                         reclaim = 0;
1713                         goto retry;
1714                 }
1715                 return (EAGAIN);
1716         }
1717 
1718         if (ssize != seg->s_size) {
1719 #ifdef DEBUG
1720                 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1721                     ssize, seg->s_size);
1722 #endif
1723                 return (EINVAL);
1724         }
1725 
1726         (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1727             NULL, 0);
1728         hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1729 
1730         seg_free(seg);
1731 
1732         return (0);
1733 }
1734 
1735 void
1736 segspt_shmfree(struct seg *seg)
1737 {
1738         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1739         struct anon_map *shm_amp = shmd->shm_amp;
1740 
1741         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1742 
1743         (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1744             MC_UNLOCK, NULL, 0);
1745 
1746         /*
1747          * Need to increment refcnt when attaching
1748          * and decrement when detaching because of dup().
1749          */
1750         ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1751         shm_amp->refcnt--;
1752         ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1753 
1754         if (shmd->shm_vpage) {       /* only for DISM */
1755                 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1756                 shmd->shm_vpage = NULL;
1757         }
1758 
1759         /*
1760          * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1761          * still working with this segment without holding as lock.
1762          */
1763         ASSERT(shmd->shm_softlockcnt == 0);
1764         mutex_enter(&shmd->shm_segfree_syncmtx);
1765         mutex_destroy(&shmd->shm_segfree_syncmtx);
1766 
1767         kmem_free(shmd, sizeof (*shmd));
1768 }
1769 
1770 /*ARGSUSED*/
1771 int
1772 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1773 {
1774         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1775 
1776         /*
1777          * Shared page table is more than shared mapping.
1778          *  Individual process sharing page tables can't change prot
1779          *  because there is only one set of page tables.
1780          *  This will be allowed after private page table is
1781          *  supported.
1782          */
1783 /* need to return correct status error? */
1784         return (0);
1785 }
1786 
1787 
1788 faultcode_t
1789 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1790     size_t len, enum fault_type type, enum seg_rw rw)
1791 {
1792         struct  shm_data        *shmd = (struct shm_data *)seg->s_data;
1793         struct  seg             *sptseg = shmd->shm_sptseg;
1794         struct  as              *curspt = shmd->shm_sptas;
1795         struct  spt_data        *sptd = sptseg->s_data;
1796         pgcnt_t npages;
1797         size_t  size;
1798         caddr_t segspt_addr, shm_addr;
1799         page_t  **ppa;
1800         int     i;
1801         ulong_t an_idx = 0;
1802         int     err = 0;
1803         int     dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1804         size_t  pgsz;
1805         pgcnt_t pgcnt;
1806         caddr_t a;
1807         pgcnt_t pidx;
1808 
1809 #ifdef lint
1810         hat = hat;
1811 #endif
1812         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1813 
1814         /*
1815          * Because of the way spt is implemented
1816          * the realsize of the segment does not have to be
1817          * equal to the segment size itself. The segment size is
1818          * often in multiples of a page size larger than PAGESIZE.
1819          * The realsize is rounded up to the nearest PAGESIZE
1820          * based on what the user requested. This is a bit of
1821          * ungliness that is historical but not easily fixed
1822          * without re-designing the higher levels of ISM.
1823          */
1824         ASSERT(addr >= seg->s_base);
1825         if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1826                 return (FC_NOMAP);
1827         /*
1828          * For all of the following cases except F_PROT, we need to
1829          * make any necessary adjustments to addr and len
1830          * and get all of the necessary page_t's into an array called ppa[].
1831          *
1832          * The code in shmat() forces base addr and len of ISM segment
1833          * to be aligned to largest page size supported. Therefore,
1834          * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1835          * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1836          * in large pagesize chunks, or else we will screw up the HAT
1837          * layer by calling hat_memload_array() with differing page sizes
1838          * over a given virtual range.
1839          */
1840         pgsz = page_get_pagesize(sptseg->s_szc);
1841         pgcnt = page_get_pagecnt(sptseg->s_szc);
1842         shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1843         size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1844         npages = btopr(size);
1845 
1846         /*
1847          * Now we need to convert from addr in segshm to addr in segspt.
1848          */
1849         an_idx = seg_page(seg, shm_addr);
1850         segspt_addr = sptseg->s_base + ptob(an_idx);
1851 
1852         ASSERT((segspt_addr + ptob(npages)) <=
1853             (sptseg->s_base + sptd->spt_realsize));
1854         ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1855 
1856         switch (type) {
1857 
1858         case F_SOFTLOCK:
1859 
1860                 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1861                 /*
1862                  * Fall through to the F_INVAL case to load up the hat layer
1863                  * entries with the HAT_LOAD_LOCK flag.
1864                  */
1865                 /* FALLTHRU */
1866         case F_INVAL:
1867 
1868                 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1869                         return (FC_NOMAP);
1870 
1871                 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1872 
1873                 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1874                 if (err != 0) {
1875                         if (type == F_SOFTLOCK) {
1876                                 atomic_add_long((ulong_t *)(
1877                                     &(shmd->shm_softlockcnt)), -npages);
1878                         }
1879                         goto dism_err;
1880                 }
1881                 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1882                 a = segspt_addr;
1883                 pidx = 0;
1884                 if (type == F_SOFTLOCK) {
1885 
1886                         /*
1887                          * Load up the translation keeping it
1888                          * locked and don't unlock the page.
1889                          */
1890                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1891                                 hat_memload_array(sptseg->s_as->a_hat,
1892                                     a, pgsz, &ppa[pidx], sptd->spt_prot,
1893                                     HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1894                         }
1895                 } else {
1896                         /*
1897                          * Migrate pages marked for migration
1898                          */
1899                         if (lgrp_optimizations())
1900                                 page_migrate(seg, shm_addr, ppa, npages);
1901 
1902                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1903                                 hat_memload_array(sptseg->s_as->a_hat,
1904                                     a, pgsz, &ppa[pidx],
1905                                     sptd->spt_prot,
1906                                     HAT_LOAD_SHARE);
1907                         }
1908 
1909                         /*
1910                          * And now drop the SE_SHARED lock(s).
1911                          */
1912                         if (dyn_ism_unmap) {
1913                                 for (i = 0; i < npages; i++) {
1914                                         page_unlock(ppa[i]);
1915                                 }
1916                         }
1917                 }
1918 
1919                 if (!dyn_ism_unmap) {
1920                         if (hat_share(seg->s_as->a_hat, shm_addr,
1921                             curspt->a_hat, segspt_addr, ptob(npages),
1922                             seg->s_szc) != 0) {
1923                                 panic("hat_share err in DISM fault");
1924                                 /* NOTREACHED */
1925                         }
1926                         if (type == F_INVAL) {
1927                                 for (i = 0; i < npages; i++) {
1928                                         page_unlock(ppa[i]);
1929                                 }
1930                         }
1931                 }
1932                 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1933 dism_err:
1934                 kmem_free(ppa, npages * sizeof (page_t *));
1935                 return (err);
1936 
1937         case F_SOFTUNLOCK:
1938 
1939                 /*
1940                  * This is a bit ugly, we pass in the real seg pointer,
1941                  * but the segspt_addr is the virtual address within the
1942                  * dummy seg.
1943                  */
1944                 segspt_softunlock(seg, segspt_addr, size, rw);
1945                 return (0);
1946 
1947         case F_PROT:
1948 
1949                 /*
1950                  * This takes care of the unusual case where a user
1951                  * allocates a stack in shared memory and a register
1952                  * window overflow is written to that stack page before
1953                  * it is otherwise modified.
1954                  *
1955                  * We can get away with this because ISM segments are
1956                  * always rw. Other than this unusual case, there
1957                  * should be no instances of protection violations.
1958                  */
1959                 return (0);
1960 
1961         default:
1962 #ifdef DEBUG
1963                 panic("segspt_dismfault default type?");
1964 #else
1965                 return (FC_NOMAP);
1966 #endif
1967         }
1968 }
1969 
1970 
1971 faultcode_t
1972 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
1973     size_t len, enum fault_type type, enum seg_rw rw)
1974 {
1975         struct shm_data         *shmd = (struct shm_data *)seg->s_data;
1976         struct seg              *sptseg = shmd->shm_sptseg;
1977         struct as               *curspt = shmd->shm_sptas;
1978         struct spt_data         *sptd   = sptseg->s_data;
1979         pgcnt_t npages;
1980         size_t size;
1981         caddr_t sptseg_addr, shm_addr;
1982         page_t *pp, **ppa;
1983         int     i;
1984         u_offset_t offset;
1985         ulong_t anon_index = 0;
1986         struct vnode *vp;
1987         struct anon_map *amp;           /* XXX - for locknest */
1988         struct anon *ap = NULL;
1989         size_t          pgsz;
1990         pgcnt_t         pgcnt;
1991         caddr_t         a;
1992         pgcnt_t         pidx;
1993         size_t          sz;
1994 
1995 #ifdef lint
1996         hat = hat;
1997 #endif
1998 
1999         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2000 
2001         if (sptd->spt_flags & SHM_PAGEABLE) {
2002                 return (segspt_dismfault(hat, seg, addr, len, type, rw));
2003         }
2004 
2005         /*
2006          * Because of the way spt is implemented
2007          * the realsize of the segment does not have to be
2008          * equal to the segment size itself. The segment size is
2009          * often in multiples of a page size larger than PAGESIZE.
2010          * The realsize is rounded up to the nearest PAGESIZE
2011          * based on what the user requested. This is a bit of
2012          * ungliness that is historical but not easily fixed
2013          * without re-designing the higher levels of ISM.
2014          */
2015         ASSERT(addr >= seg->s_base);
2016         if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2017                 return (FC_NOMAP);
2018         /*
2019          * For all of the following cases except F_PROT, we need to
2020          * make any necessary adjustments to addr and len
2021          * and get all of the necessary page_t's into an array called ppa[].
2022          *
2023          * The code in shmat() forces base addr and len of ISM segment
2024          * to be aligned to largest page size supported. Therefore,
2025          * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2026          * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2027          * in large pagesize chunks, or else we will screw up the HAT
2028          * layer by calling hat_memload_array() with differing page sizes
2029          * over a given virtual range.
2030          */
2031         pgsz = page_get_pagesize(sptseg->s_szc);
2032         pgcnt = page_get_pagecnt(sptseg->s_szc);
2033         shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2034         size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2035         npages = btopr(size);
2036 
2037         /*
2038          * Now we need to convert from addr in segshm to addr in segspt.
2039          */
2040         anon_index = seg_page(seg, shm_addr);
2041         sptseg_addr = sptseg->s_base + ptob(anon_index);
2042 
2043         /*
2044          * And now we may have to adjust npages downward if we have
2045          * exceeded the realsize of the segment or initial anon
2046          * allocations.
2047          */
2048         if ((sptseg_addr + ptob(npages)) >
2049             (sptseg->s_base + sptd->spt_realsize))
2050                 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2051 
2052         npages = btopr(size);
2053 
2054         ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2055         ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2056 
2057         switch (type) {
2058 
2059         case F_SOFTLOCK:
2060 
2061                 /*
2062                  * availrmem is decremented once during anon_swap_adjust()
2063                  * and is incremented during the anon_unresv(), which is
2064                  * called from shm_rm_amp() when the segment is destroyed.
2065                  */
2066                 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2067                 /*
2068                  * Some platforms assume that ISM pages are SE_SHARED
2069                  * locked for the entire life of the segment.
2070                  */
2071                 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2072                         return (0);
2073                 /*
2074                  * Fall through to the F_INVAL case to load up the hat layer
2075                  * entries with the HAT_LOAD_LOCK flag.
2076                  */
2077 
2078                 /* FALLTHRU */
2079         case F_INVAL:
2080 
2081                 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2082                         return (FC_NOMAP);
2083 
2084                 /*
2085                  * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2086                  * may still rely on this call to hat_share(). That
2087                  * would imply that those hat's can fault on a
2088                  * HAT_LOAD_LOCK translation, which would seem
2089                  * contradictory.
2090                  */
2091                 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2092                         if (hat_share(seg->s_as->a_hat, seg->s_base,
2093                             curspt->a_hat, sptseg->s_base,
2094                             sptseg->s_size, sptseg->s_szc) != 0) {
2095                                 panic("hat_share error in ISM fault");
2096                                 /*NOTREACHED*/
2097                         }
2098                         return (0);
2099                 }
2100                 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2101 
2102                 /*
2103                  * I see no need to lock the real seg,
2104                  * here, because all of our work will be on the underlying
2105                  * dummy seg.
2106                  *
2107                  * sptseg_addr and npages now account for large pages.
2108                  */
2109                 amp = sptd->spt_amp;
2110                 ASSERT(amp != NULL);
2111                 anon_index = seg_page(sptseg, sptseg_addr);
2112 
2113                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2114                 for (i = 0; i < npages; i++) {
2115                         ap = anon_get_ptr(amp->ahp, anon_index++);
2116                         ASSERT(ap != NULL);
2117                         swap_xlate(ap, &vp, &offset);
2118                         pp = page_lookup(vp, offset, SE_SHARED);
2119                         ASSERT(pp != NULL);
2120                         ppa[i] = pp;
2121                 }
2122                 ANON_LOCK_EXIT(&amp->a_rwlock);
2123                 ASSERT(i == npages);
2124 
2125                 /*
2126                  * We are already holding the as->a_lock on the user's
2127                  * real segment, but we need to hold the a_lock on the
2128                  * underlying dummy as. This is mostly to satisfy the
2129                  * underlying HAT layer.
2130                  */
2131                 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
2132                 a = sptseg_addr;
2133                 pidx = 0;
2134                 if (type == F_SOFTLOCK) {
2135                         /*
2136                          * Load up the translation keeping it
2137                          * locked and don't unlock the page.
2138                          */
2139                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2140                                 sz = MIN(pgsz, ptob(npages - pidx));
2141                                 hat_memload_array(sptseg->s_as->a_hat, a,
2142                                     sz, &ppa[pidx], sptd->spt_prot,
2143                                     HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2144                         }
2145                 } else {
2146                         /*
2147                          * Migrate pages marked for migration.
2148                          */
2149                         if (lgrp_optimizations())
2150                                 page_migrate(seg, shm_addr, ppa, npages);
2151 
2152                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2153                                 sz = MIN(pgsz, ptob(npages - pidx));
2154                                 hat_memload_array(sptseg->s_as->a_hat,
2155                                     a, sz, &ppa[pidx],
2156                                     sptd->spt_prot, HAT_LOAD_SHARE);
2157                         }
2158 
2159                         /*
2160                          * And now drop the SE_SHARED lock(s).
2161                          */
2162                         for (i = 0; i < npages; i++)
2163                                 page_unlock(ppa[i]);
2164                 }
2165                 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
2166 
2167                 kmem_free(ppa, sizeof (page_t *) * npages);
2168                 return (0);
2169         case F_SOFTUNLOCK:
2170 
2171                 /*
2172                  * This is a bit ugly, we pass in the real seg pointer,
2173                  * but the sptseg_addr is the virtual address within the
2174                  * dummy seg.
2175                  */
2176                 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2177                 return (0);
2178 
2179         case F_PROT:
2180 
2181                 /*
2182                  * This takes care of the unusual case where a user
2183                  * allocates a stack in shared memory and a register
2184                  * window overflow is written to that stack page before
2185                  * it is otherwise modified.
2186                  *
2187                  * We can get away with this because ISM segments are
2188                  * always rw. Other than this unusual case, there
2189                  * should be no instances of protection violations.
2190                  */
2191                 return (0);
2192 
2193         default:
2194 #ifdef DEBUG
2195                 cmn_err(CE_WARN, "segspt_shmfault default type?");
2196 #endif
2197                 return (FC_NOMAP);
2198         }
2199 }
2200 
2201 /*ARGSUSED*/
2202 static faultcode_t
2203 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2204 {
2205         return (0);
2206 }
2207 
2208 /*ARGSUSED*/
2209 static int
2210 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2211 {
2212         return (0);
2213 }
2214 
2215 /*
2216  * duplicate the shared page tables
2217  */
2218 int
2219 segspt_shmdup(struct seg *seg, struct seg *newseg)
2220 {
2221         struct shm_data         *shmd = (struct shm_data *)seg->s_data;
2222         struct anon_map         *amp = shmd->shm_amp;
2223         struct shm_data         *shmd_new;
2224         struct seg              *spt_seg = shmd->shm_sptseg;
2225         struct spt_data         *sptd = spt_seg->s_data;
2226         int                     error = 0;
2227 
2228         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2229 
2230         shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2231         newseg->s_data = (void *)shmd_new;
2232         shmd_new->shm_sptas = shmd->shm_sptas;
2233         shmd_new->shm_amp = amp;
2234         shmd_new->shm_sptseg = shmd->shm_sptseg;
2235         newseg->s_ops = &segspt_shmops;
2236         newseg->s_szc = seg->s_szc;
2237         ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2238 
2239         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
2240         amp->refcnt++;
2241         ANON_LOCK_EXIT(&amp->a_rwlock);
2242 
2243         if (sptd->spt_flags & SHM_PAGEABLE) {
2244                 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2245                 shmd_new->shm_lckpgs = 0;
2246                 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2247                         if ((error = hat_share(newseg->s_as->a_hat,
2248                             newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2249                             seg->s_size, seg->s_szc)) != 0) {
2250                                 kmem_free(shmd_new->shm_vpage,
2251                                     btopr(amp->size));
2252                         }
2253                 }
2254                 return (error);
2255         } else {
2256                 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2257                     shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2258                     seg->s_szc));
2259 
2260         }
2261 }
2262 
2263 /*ARGSUSED*/
2264 int
2265 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2266 {
2267         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2268         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2269 
2270         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2271 
2272         /*
2273          * ISM segment is always rw.
2274          */
2275         return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2276 }
2277 
2278 /*
2279  * Return an array of locked large pages, for empty slots allocate
2280  * private zero-filled anon pages.
2281  */
2282 static int
2283 spt_anon_getpages(
2284         struct seg *sptseg,
2285         caddr_t sptaddr,
2286         size_t len,
2287         page_t *ppa[])
2288 {
2289         struct  spt_data *sptd = sptseg->s_data;
2290         struct  anon_map *amp = sptd->spt_amp;
2291         enum    seg_rw rw = sptd->spt_prot;
2292         uint_t  szc = sptseg->s_szc;
2293         size_t  pg_sz, share_sz = page_get_pagesize(szc);
2294         pgcnt_t lp_npgs;
2295         caddr_t lp_addr, e_sptaddr;
2296         uint_t  vpprot, ppa_szc = 0;
2297         struct  vpage *vpage = NULL;
2298         ulong_t j, ppa_idx;
2299         int     err, ierr = 0;
2300         pgcnt_t an_idx;
2301         anon_sync_obj_t cookie;
2302         int anon_locked = 0;
2303         pgcnt_t amp_pgs;
2304 
2305 
2306         ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2307         ASSERT(len != 0);
2308 
2309         pg_sz = share_sz;
2310         lp_npgs = btop(pg_sz);
2311         lp_addr = sptaddr;
2312         e_sptaddr = sptaddr + len;
2313         an_idx = seg_page(sptseg, sptaddr);
2314         ppa_idx = 0;
2315 
2316         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2317 
2318         amp_pgs = page_get_pagecnt(amp->a_szc);
2319 
2320         /*CONSTCOND*/
2321         while (1) {
2322                 for (; lp_addr < e_sptaddr;
2323                     an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2324 
2325                         /*
2326                          * If we're currently locked, and we get to a new
2327                          * page, unlock our current anon chunk.
2328                          */
2329                         if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2330                                 anon_array_exit(&cookie);
2331                                 anon_locked = 0;
2332                         }
2333                         if (!anon_locked) {
2334                                 anon_array_enter(amp, an_idx, &cookie);
2335                                 anon_locked = 1;
2336                         }
2337                         ppa_szc = (uint_t)-1;
2338                         ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2339                             lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2340                             &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2341 
2342                         if (ierr != 0) {
2343                                 if (ierr > 0) {
2344                                         err = FC_MAKE_ERR(ierr);
2345                                         goto lpgs_err;
2346                                 }
2347                                 break;
2348                         }
2349                 }
2350                 if (lp_addr == e_sptaddr) {
2351                         break;
2352                 }
2353                 ASSERT(lp_addr < e_sptaddr);
2354 
2355                 /*
2356                  * ierr == -1 means we failed to allocate a large page.
2357                  * so do a size down operation.
2358                  *
2359                  * ierr == -2 means some other process that privately shares
2360                  * pages with this process has allocated a larger page and we
2361                  * need to retry with larger pages. So do a size up
2362                  * operation. This relies on the fact that large pages are
2363                  * never partially shared i.e. if we share any constituent
2364                  * page of a large page with another process we must share the
2365                  * entire large page. Note this cannot happen for SOFTLOCK
2366                  * case, unless current address (lpaddr) is at the beginning
2367                  * of the next page size boundary because the other process
2368                  * couldn't have relocated locked pages.
2369                  */
2370                 ASSERT(ierr == -1 || ierr == -2);
2371                 if (segvn_anypgsz) {
2372                         ASSERT(ierr == -2 || szc != 0);
2373                         ASSERT(ierr == -1 || szc < sptseg->s_szc);
2374                         szc = (ierr == -1) ? szc - 1 : szc + 1;
2375                 } else {
2376                         /*
2377                          * For faults and segvn_anypgsz == 0
2378                          * we need to be careful not to loop forever
2379                          * if existing page is found with szc other
2380                          * than 0 or seg->s_szc. This could be due
2381                          * to page relocations on behalf of DR or
2382                          * more likely large page creation. For this
2383                          * case simply re-size to existing page's szc
2384                          * if returned by anon_map_getpages().
2385                          */
2386                         if (ppa_szc == (uint_t)-1) {
2387                                 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2388                         } else {
2389                                 ASSERT(ppa_szc <= sptseg->s_szc);
2390                                 ASSERT(ierr == -2 || ppa_szc < szc);
2391                                 ASSERT(ierr == -1 || ppa_szc > szc);
2392                                 szc = ppa_szc;
2393                         }
2394                 }
2395                 pg_sz = page_get_pagesize(szc);
2396                 lp_npgs = btop(pg_sz);
2397                 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2398         }
2399         if (anon_locked) {
2400                 anon_array_exit(&cookie);
2401         }
2402         ANON_LOCK_EXIT(&amp->a_rwlock);
2403         return (0);
2404 
2405 lpgs_err:
2406         if (anon_locked) {
2407                 anon_array_exit(&cookie);
2408         }
2409         ANON_LOCK_EXIT(&amp->a_rwlock);
2410         for (j = 0; j < ppa_idx; j++)
2411                 page_unlock(ppa[j]);
2412         return (err);
2413 }
2414 
2415 /*
2416  * count the number of bytes in a set of spt pages that are currently not
2417  * locked
2418  */
2419 static rctl_qty_t
2420 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2421 {
2422         ulong_t i;
2423         rctl_qty_t unlocked = 0;
2424 
2425         for (i = 0; i < npages; i++) {
2426                 if (ppa[i]->p_lckcnt == 0)
2427                         unlocked += PAGESIZE;
2428         }
2429         return (unlocked);
2430 }
2431 
2432 extern  u_longlong_t randtick(void);
2433 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2434 #define NLCK    (NCPU_P2)
2435 /* Random number with a range [0, n-1], n must be power of two */
2436 #define RAND_P2(n)      \
2437         ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2438 
2439 int
2440 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2441     page_t **ppa, ulong_t *lockmap, size_t pos,
2442     rctl_qty_t *locked)
2443 {
2444         struct  shm_data *shmd = seg->s_data;
2445         struct  spt_data *sptd = shmd->shm_sptseg->s_data;
2446         ulong_t i;
2447         int     kernel;
2448         pgcnt_t nlck = 0;
2449         int     rv = 0;
2450         int     use_reserved = 1;
2451 
2452         /* return the number of bytes actually locked */
2453         *locked = 0;
2454 
2455         /*
2456          * To avoid contention on freemem_lock, availrmem and pages_locked
2457          * global counters are updated only every nlck locked pages instead of
2458          * every time.  Reserve nlck locks up front and deduct from this
2459          * reservation for each page that requires a lock.  When the reservation
2460          * is consumed, reserve again.  nlck is randomized, so the competing
2461          * threads do not fall into a cyclic lock contention pattern. When
2462          * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2463          * is used to lock pages.
2464          */
2465         for (i = 0; i < npages; anon_index++, pos++, i++) {
2466                 if (nlck == 0 && use_reserved == 1) {
2467                         nlck = NLCK + RAND_P2(NLCK);
2468                         /* if fewer loops left, decrease nlck */
2469                         nlck = MIN(nlck, npages - i);
2470                         /*
2471                          * Reserve nlck locks up front and deduct from this
2472                          * reservation for each page that requires a lock.  When
2473                          * the reservation is consumed, reserve again.
2474                          */
2475                         mutex_enter(&freemem_lock);
2476                         if ((availrmem - nlck) < pages_pp_maximum) {
2477                                 /* Do not do advance memory reserves */
2478                                 use_reserved = 0;
2479                         } else {
2480                                 availrmem       -= nlck;
2481                                 pages_locked    += nlck;
2482                         }
2483                         mutex_exit(&freemem_lock);
2484                 }
2485                 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2486                         if (sptd->spt_ppa_lckcnt[anon_index] <
2487                             (ushort_t)DISM_LOCK_MAX) {
2488                                 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2489                                     (ushort_t)DISM_LOCK_MAX) {
2490                                         cmn_err(CE_WARN,
2491                                             "DISM page lock limit "
2492                                             "reached on DISM offset 0x%lx\n",
2493                                             anon_index << PAGESHIFT);
2494                                 }
2495                                 kernel = (sptd->spt_ppa &&
2496                                     sptd->spt_ppa[anon_index]);
2497                                 if (!page_pp_lock(ppa[i], 0, kernel ||
2498                                     use_reserved)) {
2499                                         sptd->spt_ppa_lckcnt[anon_index]--;
2500                                         rv = EAGAIN;
2501                                         break;
2502                                 }
2503                                 /* if this is a newly locked page, count it */
2504                                 if (ppa[i]->p_lckcnt == 1) {
2505                                         if (kernel == 0 && use_reserved == 1)
2506                                                 nlck--;
2507                                         *locked += PAGESIZE;
2508                                 }
2509                                 shmd->shm_lckpgs++;
2510                                 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2511                                 if (lockmap != NULL)
2512                                         BT_SET(lockmap, pos);
2513                         }
2514                 }
2515         }
2516         /* Return unused lock reservation */
2517         if (nlck != 0 && use_reserved == 1) {
2518                 mutex_enter(&freemem_lock);
2519                 availrmem       += nlck;
2520                 pages_locked    -= nlck;
2521                 mutex_exit(&freemem_lock);
2522         }
2523 
2524         return (rv);
2525 }
2526 
2527 int
2528 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2529     rctl_qty_t *unlocked)
2530 {
2531         struct shm_data *shmd = seg->s_data;
2532         struct spt_data *sptd = shmd->shm_sptseg->s_data;
2533         struct anon_map *amp = sptd->spt_amp;
2534         struct anon     *ap;
2535         struct vnode    *vp;
2536         u_offset_t      off;
2537         struct page     *pp;
2538         int             kernel;
2539         anon_sync_obj_t cookie;
2540         ulong_t         i;
2541         pgcnt_t         nlck = 0;
2542         pgcnt_t         nlck_limit = NLCK;
2543 
2544         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2545         for (i = 0; i < npages; i++, anon_index++) {
2546                 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2547                         anon_array_enter(amp, anon_index, &cookie);
2548                         ap = anon_get_ptr(amp->ahp, anon_index);
2549                         ASSERT(ap);
2550 
2551                         swap_xlate(ap, &vp, &off);
2552                         anon_array_exit(&cookie);
2553                         pp = page_lookup(vp, off, SE_SHARED);
2554                         ASSERT(pp);
2555                         /*
2556                          * availrmem is decremented only for pages which are not
2557                          * in seg pcache, for pages in seg pcache availrmem was
2558                          * decremented in _dismpagelock()
2559                          */
2560                         kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2561                         ASSERT(pp->p_lckcnt > 0);
2562 
2563                         /*
2564                          * lock page but do not change availrmem, we do it
2565                          * ourselves every nlck loops.
2566                          */
2567                         page_pp_unlock(pp, 0, 1);
2568                         if (pp->p_lckcnt == 0) {
2569                                 if (kernel == 0)
2570                                         nlck++;
2571                                 *unlocked += PAGESIZE;
2572                         }
2573                         page_unlock(pp);
2574                         shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2575                         sptd->spt_ppa_lckcnt[anon_index]--;
2576                         shmd->shm_lckpgs--;
2577                 }
2578 
2579                 /*
2580                  * To reduce freemem_lock contention, do not update availrmem
2581                  * until at least NLCK pages have been unlocked.
2582                  * 1. No need to update if nlck is zero
2583                  * 2. Always update if the last iteration
2584                  */
2585                 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2586                         mutex_enter(&freemem_lock);
2587                         availrmem       += nlck;
2588                         pages_locked    -= nlck;
2589                         mutex_exit(&freemem_lock);
2590                         nlck = 0;
2591                         nlck_limit = NLCK + RAND_P2(NLCK);
2592                 }
2593         }
2594         ANON_LOCK_EXIT(&amp->a_rwlock);
2595 
2596         return (0);
2597 }
2598 
2599 /*ARGSUSED*/
2600 static int
2601 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2602     int attr, int op, ulong_t *lockmap, size_t pos)
2603 {
2604         struct shm_data *shmd = seg->s_data;
2605         struct seg      *sptseg = shmd->shm_sptseg;
2606         struct spt_data *sptd = sptseg->s_data;
2607         struct kshmid   *sp = sptd->spt_amp->a_sp;
2608         pgcnt_t         npages, a_npages;
2609         page_t          **ppa;
2610         pgcnt_t         an_idx, a_an_idx, ppa_idx;
2611         caddr_t         spt_addr, a_addr;       /* spt and aligned address */
2612         size_t          a_len;                  /* aligned len */
2613         size_t          share_sz;
2614         ulong_t         i;
2615         int             sts = 0;
2616         rctl_qty_t      unlocked = 0;
2617         rctl_qty_t      locked = 0;
2618         struct proc     *p = curproc;
2619         kproject_t      *proj;
2620 
2621         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2622         ASSERT(sp != NULL);
2623 
2624         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2625                 return (0);
2626         }
2627 
2628         addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2629         an_idx = seg_page(seg, addr);
2630         npages = btopr(len);
2631 
2632         if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2633                 return (ENOMEM);
2634         }
2635 
2636         /*
2637          * A shm's project never changes, so no lock needed.
2638          * The shm has a hold on the project, so it will not go away.
2639          * Since we have a mapping to shm within this zone, we know
2640          * that the zone will not go away.
2641          */
2642         proj = sp->shm_perm.ipc_proj;
2643 
2644         if (op == MC_LOCK) {
2645 
2646                 /*
2647                  * Need to align addr and size request if they are not
2648                  * aligned so we can always allocate large page(s) however
2649                  * we only lock what was requested in initial request.
2650                  */
2651                 share_sz = page_get_pagesize(sptseg->s_szc);
2652                 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2653                 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2654                     share_sz);
2655                 a_npages = btop(a_len);
2656                 a_an_idx = seg_page(seg, a_addr);
2657                 spt_addr = sptseg->s_base + ptob(a_an_idx);
2658                 ppa_idx = an_idx - a_an_idx;
2659 
2660                 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2661                     KM_NOSLEEP)) == NULL) {
2662                         return (ENOMEM);
2663                 }
2664 
2665                 /*
2666                  * Don't cache any new pages for IO and
2667                  * flush any cached pages.
2668                  */
2669                 mutex_enter(&sptd->spt_lock);
2670                 if (sptd->spt_ppa != NULL)
2671                         sptd->spt_flags |= DISM_PPA_CHANGED;
2672 
2673                 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2674                 if (sts != 0) {
2675                         mutex_exit(&sptd->spt_lock);
2676                         kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2677                         return (sts);
2678                 }
2679 
2680                 mutex_enter(&sp->shm_mlock);
2681                 /* enforce locked memory rctl */
2682                 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2683 
2684                 mutex_enter(&p->p_lock);
2685                 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2686                         mutex_exit(&p->p_lock);
2687                         sts = EAGAIN;
2688                 } else {
2689                         mutex_exit(&p->p_lock);
2690                         sts = spt_lockpages(seg, an_idx, npages,
2691                             &ppa[ppa_idx], lockmap, pos, &locked);
2692 
2693                         /*
2694                          * correct locked count if not all pages could be
2695                          * locked
2696                          */
2697                         if ((unlocked - locked) > 0) {
2698                                 rctl_decr_locked_mem(NULL, proj,
2699                                     (unlocked - locked), 0);
2700                         }
2701                 }
2702                 /*
2703                  * unlock pages
2704                  */
2705                 for (i = 0; i < a_npages; i++)
2706                         page_unlock(ppa[i]);
2707                 if (sptd->spt_ppa != NULL)
2708                         sptd->spt_flags |= DISM_PPA_CHANGED;
2709                 mutex_exit(&sp->shm_mlock);
2710                 mutex_exit(&sptd->spt_lock);
2711 
2712                 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2713 
2714         } else if (op == MC_UNLOCK) { /* unlock */
2715                 page_t          **ppa;
2716 
2717                 mutex_enter(&sptd->spt_lock);
2718                 if (shmd->shm_lckpgs == 0) {
2719                         mutex_exit(&sptd->spt_lock);
2720                         return (0);
2721                 }
2722                 /*
2723                  * Don't cache new IO pages.
2724                  */
2725                 if (sptd->spt_ppa != NULL)
2726                         sptd->spt_flags |= DISM_PPA_CHANGED;
2727 
2728                 mutex_enter(&sp->shm_mlock);
2729                 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2730                 if ((ppa = sptd->spt_ppa) != NULL)
2731                         sptd->spt_flags |= DISM_PPA_CHANGED;
2732                 mutex_exit(&sptd->spt_lock);
2733 
2734                 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2735                 mutex_exit(&sp->shm_mlock);
2736 
2737                 if (ppa != NULL)
2738                         seg_ppurge_wiredpp(ppa);
2739         }
2740         return (sts);
2741 }
2742 
2743 /*ARGSUSED*/
2744 int
2745 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2746 {
2747         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2748         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2749         spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2750 
2751         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2752 
2753         /*
2754          * ISM segment is always rw.
2755          */
2756         while (--pgno >= 0)
2757                 *protv++ = sptd->spt_prot;
2758         return (0);
2759 }
2760 
2761 /*ARGSUSED*/
2762 u_offset_t
2763 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2764 {
2765         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2766 
2767         /* Offset does not matter in ISM memory */
2768 
2769         return ((u_offset_t)0);
2770 }
2771 
2772 /* ARGSUSED */
2773 int
2774 segspt_shmgettype(struct seg *seg, caddr_t addr)
2775 {
2776         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2777         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2778 
2779         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2780 
2781         /*
2782          * The shared memory mapping is always MAP_SHARED, SWAP is only
2783          * reserved for DISM
2784          */
2785         return (MAP_SHARED |
2786             ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2787 }
2788 
2789 /*ARGSUSED*/
2790 int
2791 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2792 {
2793         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2794         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2795 
2796         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2797 
2798         *vpp = sptd->spt_vp;
2799         return (0);
2800 }
2801 
2802 /*
2803  * We need to wait for pending IO to complete to a DISM segment in order for
2804  * pages to get kicked out of the seg_pcache.  120 seconds should be more
2805  * than enough time to wait.
2806  */
2807 static clock_t spt_pcache_wait = 120;
2808 
2809 /*ARGSUSED*/
2810 static int
2811 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2812 {
2813         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2814         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2815         struct anon_map *amp;
2816         pgcnt_t pg_idx;
2817         ushort_t gen;
2818         clock_t end_lbolt;
2819         int writer;
2820         page_t **ppa;
2821 
2822         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2823 
2824         if (behav == MADV_FREE) {
2825                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2826                         return (0);
2827 
2828                 amp = sptd->spt_amp;
2829                 pg_idx = seg_page(seg, addr);
2830 
2831                 mutex_enter(&sptd->spt_lock);
2832                 if ((ppa = sptd->spt_ppa) == NULL) {
2833                         mutex_exit(&sptd->spt_lock);
2834                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2835                         anon_disclaim(amp, pg_idx, len);
2836                         ANON_LOCK_EXIT(&amp->a_rwlock);
2837                         return (0);
2838                 }
2839 
2840                 sptd->spt_flags |= DISM_PPA_CHANGED;
2841                 gen = sptd->spt_gen;
2842 
2843                 mutex_exit(&sptd->spt_lock);
2844 
2845                 /*
2846                  * Purge all DISM cached pages
2847                  */
2848                 seg_ppurge_wiredpp(ppa);
2849 
2850                 /*
2851                  * Drop the AS_LOCK so that other threads can grab it
2852                  * in the as_pageunlock path and hopefully get the segment
2853                  * kicked out of the seg_pcache.  We bump the shm_softlockcnt
2854                  * to keep this segment resident.
2855                  */
2856                 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2857                 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2858                 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2859 
2860                 mutex_enter(&sptd->spt_lock);
2861 
2862                 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2863 
2864                 /*
2865                  * Try to wait for pages to get kicked out of the seg_pcache.
2866                  */
2867                 while (sptd->spt_gen == gen &&
2868                     (sptd->spt_flags & DISM_PPA_CHANGED) &&
2869                     ddi_get_lbolt() < end_lbolt) {
2870                         if (!cv_timedwait_sig(&sptd->spt_cv,
2871                             &sptd->spt_lock, end_lbolt)) {
2872                                 break;
2873                         }
2874                 }
2875 
2876                 mutex_exit(&sptd->spt_lock);
2877 
2878                 /* Regrab the AS_LOCK and release our hold on the segment */
2879                 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2880                     writer ? RW_WRITER : RW_READER);
2881                 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2882                 if (shmd->shm_softlockcnt <= 0) {
2883                         if (AS_ISUNMAPWAIT(seg->s_as)) {
2884                                 mutex_enter(&seg->s_as->a_contents);
2885                                 if (AS_ISUNMAPWAIT(seg->s_as)) {
2886                                         AS_CLRUNMAPWAIT(seg->s_as);
2887                                         cv_broadcast(&seg->s_as->a_cv);
2888                                 }
2889                                 mutex_exit(&seg->s_as->a_contents);
2890                         }
2891                 }
2892 
2893                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2894                 anon_disclaim(amp, pg_idx, len);
2895                 ANON_LOCK_EXIT(&amp->a_rwlock);
2896         } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2897             behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2898                 int                     already_set;
2899                 ulong_t                 anon_index;
2900                 lgrp_mem_policy_t       policy;
2901                 caddr_t                 shm_addr;
2902                 size_t                  share_size;
2903                 size_t                  size;
2904                 struct seg              *sptseg = shmd->shm_sptseg;
2905                 caddr_t                 sptseg_addr;
2906 
2907                 /*
2908                  * Align address and length to page size of underlying segment
2909                  */
2910                 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2911                 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2912                 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2913                     share_size);
2914 
2915                 amp = shmd->shm_amp;
2916                 anon_index = seg_page(seg, shm_addr);
2917 
2918                 /*
2919                  * And now we may have to adjust size downward if we have
2920                  * exceeded the realsize of the segment or initial anon
2921                  * allocations.
2922                  */
2923                 sptseg_addr = sptseg->s_base + ptob(anon_index);
2924                 if ((sptseg_addr + size) >
2925                     (sptseg->s_base + sptd->spt_realsize))
2926                         size = (sptseg->s_base + sptd->spt_realsize) -
2927                             sptseg_addr;
2928 
2929                 /*
2930                  * Set memory allocation policy for this segment
2931                  */
2932                 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2933                 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2934                     NULL, 0, len);
2935 
2936                 /*
2937                  * If random memory allocation policy set already,
2938                  * don't bother reapplying it.
2939                  */
2940                 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2941                         return (0);
2942 
2943                 /*
2944                  * Mark any existing pages in the given range for
2945                  * migration, flushing the I/O page cache, and using
2946                  * underlying segment to calculate anon index and get
2947                  * anonmap and vnode pointer from
2948                  */
2949                 if (shmd->shm_softlockcnt > 0)
2950                         segspt_purge(seg);
2951 
2952                 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
2953         }
2954 
2955         return (0);
2956 }
2957 
2958 /*ARGSUSED*/
2959 void
2960 segspt_shmdump(struct seg *seg)
2961 {
2962         /* no-op for ISM segment */
2963 }
2964 
2965 /*ARGSUSED*/
2966 static faultcode_t
2967 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
2968 {
2969         return (ENOTSUP);
2970 }
2971 
2972 /*
2973  * get a memory ID for an addr in a given segment
2974  */
2975 static int
2976 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
2977 {
2978         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2979         struct anon     *ap;
2980         size_t          anon_index;
2981         struct anon_map *amp = shmd->shm_amp;
2982         struct spt_data *sptd = shmd->shm_sptseg->s_data;
2983         struct seg      *sptseg = shmd->shm_sptseg;
2984         anon_sync_obj_t cookie;
2985 
2986         anon_index = seg_page(seg, addr);
2987 
2988         if (addr > (seg->s_base + sptd->spt_realsize)) {
2989                 return (EFAULT);
2990         }
2991 
2992         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2993         anon_array_enter(amp, anon_index, &cookie);
2994         ap = anon_get_ptr(amp->ahp, anon_index);
2995         if (ap == NULL) {
2996                 struct page *pp;
2997                 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
2998 
2999                 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
3000                 if (pp == NULL) {
3001                         anon_array_exit(&cookie);
3002                         ANON_LOCK_EXIT(&amp->a_rwlock);
3003                         return (ENOMEM);
3004                 }
3005                 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3006                 page_unlock(pp);
3007         }
3008         anon_array_exit(&cookie);
3009         ANON_LOCK_EXIT(&amp->a_rwlock);
3010         memidp->val[0] = (uintptr_t)ap;
3011         memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3012         return (0);
3013 }
3014 
3015 /*
3016  * Get memory allocation policy info for specified address in given segment
3017  */
3018 static lgrp_mem_policy_info_t *
3019 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3020 {
3021         struct anon_map         *amp;
3022         ulong_t                 anon_index;
3023         lgrp_mem_policy_info_t  *policy_info;
3024         struct shm_data         *shm_data;
3025 
3026         ASSERT(seg != NULL);
3027 
3028         /*
3029          * Get anon_map from segshm
3030          *
3031          * Assume that no lock needs to be held on anon_map, since
3032          * it should be protected by its reference count which must be
3033          * nonzero for an existing segment
3034          * Need to grab readers lock on policy tree though
3035          */
3036         shm_data = (struct shm_data *)seg->s_data;
3037         if (shm_data == NULL)
3038                 return (NULL);
3039         amp = shm_data->shm_amp;
3040         ASSERT(amp->refcnt != 0);
3041 
3042         /*
3043          * Get policy info
3044          *
3045          * Assume starting anon index of 0
3046          */
3047         anon_index = seg_page(seg, addr);
3048         policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3049 
3050         return (policy_info);
3051 }
3052 
3053 /*ARGSUSED*/
3054 static int
3055 segspt_shmcapable(struct seg *seg, segcapability_t capability)
3056 {
3057         return (0);
3058 }