1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 
  25 #include <sys/param.h>
  26 #include <sys/user.h>
  27 #include <sys/mman.h>
  28 #include <sys/kmem.h>
  29 #include <sys/sysmacros.h>
  30 #include <sys/cmn_err.h>
  31 #include <sys/systm.h>
  32 #include <sys/tuneable.h>
  33 #include <vm/hat.h>
  34 #include <vm/seg.h>
  35 #include <vm/as.h>
  36 #include <vm/anon.h>
  37 #include <vm/page.h>
  38 #include <sys/buf.h>
  39 #include <sys/swap.h>
  40 #include <sys/atomic.h>
  41 #include <vm/seg_spt.h>
  42 #include <sys/debug.h>
  43 #include <sys/vtrace.h>
  44 #include <sys/shm.h>
  45 #include <sys/shm_impl.h>
  46 #include <sys/lgrp.h>
  47 #include <sys/vmsystm.h>
  48 #include <sys/policy.h>
  49 #include <sys/project.h>
  50 #include <sys/tnf_probe.h>
  51 #include <sys/zone.h>
  52 
  53 #define SEGSPTADDR      (caddr_t)0x0
  54 
  55 /*
  56  * # pages used for spt
  57  */
  58 size_t  spt_used;
  59 
  60 /*
  61  * segspt_minfree is the memory left for system after ISM
  62  * locked its pages; it is set up to 5% of availrmem in
  63  * sptcreate when ISM is created.  ISM should not use more
  64  * than ~90% of availrmem; if it does, then the performance
  65  * of the system may decrease. Machines with large memories may
  66  * be able to use up more memory for ISM so we set the default
  67  * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
  68  * If somebody wants even more memory for ISM (risking hanging
  69  * the system) they can patch the segspt_minfree to smaller number.
  70  */
  71 pgcnt_t segspt_minfree = 0;
  72 
  73 static int segspt_create(struct seg *seg, caddr_t argsp);
  74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
  75 static void segspt_free(struct seg *seg);
  76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
  77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
  78 
  79 struct seg_ops segspt_ops = {
  80         .unmap          = segspt_unmap,
  81         .free           = segspt_free,
  82         .getpolicy      = segspt_getpolicy,
  83 };
  84 
  85 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
  86 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
  87 static void segspt_shmfree(struct seg *seg);
  88 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
  89                 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
  90 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
  91 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
  92                         register size_t len, register uint_t prot);
  93 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
  94                         uint_t prot);
  95 static int      segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
  96 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
  97                         register char *vec);
  98 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
  99                         int attr, uint_t flags);
 100 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
 101                         int attr, int op, ulong_t *lockmap, size_t pos);
 102 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
 103                         uint_t *protv);
 104 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
 105 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
 106 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
 107 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
 108                         uint_t behav);
 109 static void segspt_shmdump(struct seg *seg);
 110 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
 111                         struct page ***, enum lock_type, enum seg_rw);
 112 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
 113 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
 114 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
 115 static int segspt_shmcapable(struct seg *, segcapability_t);
 116 
 117 struct seg_ops segspt_shmops = {
 118         .dup            = segspt_shmdup,
 119         .unmap          = segspt_shmunmap,
 120         .free           = segspt_shmfree,
 121         .fault          = segspt_shmfault,
 122         .faulta         = segspt_shmfaulta,
 123         .setprot        = segspt_shmsetprot,
 124         .checkprot      = segspt_shmcheckprot,
 125         .kluster        = segspt_shmkluster,
 126         .sync           = segspt_shmsync,
 127         .incore         = segspt_shmincore,
 128         .lockop         = segspt_shmlockop,
 129         .getprot        = segspt_shmgetprot,
 130         .getoffset      = segspt_shmgetoffset,
 131         .gettype        = segspt_shmgettype,
 132         .getvp          = segspt_shmgetvp,
 133         .advise         = segspt_shmadvise,
 134         .dump           = segspt_shmdump,
 135         .pagelock       = segspt_shmpagelock,
 136         .setpagesize    = segspt_shmsetpgsz,
 137         .getmemid       = segspt_shmgetmemid,
 138         .getpolicy      = segspt_shmgetpolicy,
 139         .capable        = segspt_shmcapable,
 140 };
 141 
 142 static void segspt_purge(struct seg *seg);
 143 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
 144                 enum seg_rw, int);
 145 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
 146                 page_t **ppa);
 147 
 148 
 149 
 150 /*ARGSUSED*/
 151 int
 152 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
 153         uint_t prot, uint_t flags, uint_t share_szc)
 154 {
 155         int     err;
 156         struct  as      *newas;
 157         struct  segspt_crargs sptcargs;
 158 
 159 #ifdef DEBUG
 160         TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
 161                         tnf_ulong, size, size );
 162 #endif
 163         if (segspt_minfree == 0)        /* leave min 5% of availrmem for */
 164                 segspt_minfree = availrmem/20;  /* for the system */
 165 
 166         if (!hat_supported(HAT_SHARED_PT, (void *)0))
 167                 return (EINVAL);
 168 
 169         /*
 170          * get a new as for this shared memory segment
 171          */
 172         newas = as_alloc();
 173         newas->a_proc = NULL;
 174         sptcargs.amp = amp;
 175         sptcargs.prot = prot;
 176         sptcargs.flags = flags;
 177         sptcargs.szc = share_szc;
 178         /*
 179          * create a shared page table (spt) segment
 180          */
 181 
 182         if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
 183                 as_free(newas);
 184                 return (err);
 185         }
 186         *sptseg = sptcargs.seg_spt;
 187         return (0);
 188 }
 189 
 190 void
 191 sptdestroy(struct as *as, struct anon_map *amp)
 192 {
 193 
 194 #ifdef DEBUG
 195         TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
 196 #endif
 197         (void) as_unmap(as, SEGSPTADDR, amp->size);
 198         as_free(as);
 199 }
 200 
 201 /*
 202  * called from seg_free().
 203  * free (i.e., unlock, unmap, return to free list)
 204  *  all the pages in the given seg.
 205  */
 206 void
 207 segspt_free(struct seg  *seg)
 208 {
 209         struct spt_data *sptd = (struct spt_data *)seg->s_data;
 210 
 211         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 212 
 213         if (sptd != NULL) {
 214                 if (sptd->spt_realsize)
 215                         segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
 216 
 217         if (sptd->spt_ppa_lckcnt)
 218                 kmem_free(sptd->spt_ppa_lckcnt,
 219                     sizeof (*sptd->spt_ppa_lckcnt)
 220                     * btopr(sptd->spt_amp->size));
 221                 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
 222                 cv_destroy(&sptd->spt_cv);
 223                 mutex_destroy(&sptd->spt_lock);
 224                 kmem_free(sptd, sizeof (*sptd));
 225         }
 226 }
 227 
 228 /*ARGSUSED*/
 229 static int
 230 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
 231         uint_t flags)
 232 {
 233         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
 234 
 235         return (0);
 236 }
 237 
 238 /*ARGSUSED*/
 239 static size_t
 240 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
 241 {
 242         caddr_t eo_seg;
 243         pgcnt_t npages;
 244         struct shm_data *shmd = (struct shm_data *)seg->s_data;
 245         struct seg      *sptseg;
 246         struct spt_data *sptd;
 247 
 248         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
 249 #ifdef lint
 250         seg = seg;
 251 #endif
 252         sptseg = shmd->shm_sptseg;
 253         sptd = sptseg->s_data;
 254 
 255         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 256                 eo_seg = addr + len;
 257                 while (addr < eo_seg) {
 258                         /* page exists, and it's locked. */
 259                         *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
 260                             SEG_PAGE_ANON;
 261                         addr += PAGESIZE;
 262                 }
 263                 return (len);
 264         } else {
 265                 struct  anon_map *amp = shmd->shm_amp;
 266                 struct  anon    *ap;
 267                 page_t          *pp;
 268                 pgcnt_t         anon_index;
 269                 struct vnode    *vp;
 270                 u_offset_t      off;
 271                 ulong_t         i;
 272                 int             ret;
 273                 anon_sync_obj_t cookie;
 274 
 275                 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
 276                 anon_index = seg_page(seg, addr);
 277                 npages = btopr(len);
 278                 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
 279                         return (EINVAL);
 280                 }
 281                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
 282                 for (i = 0; i < npages; i++, anon_index++) {
 283                         ret = 0;
 284                         anon_array_enter(amp, anon_index, &cookie);
 285                         ap = anon_get_ptr(amp->ahp, anon_index);
 286                         if (ap != NULL) {
 287                                 swap_xlate(ap, &vp, &off);
 288                                 anon_array_exit(&cookie);
 289                                 pp = page_lookup_nowait(vp, off, SE_SHARED);
 290                                 if (pp != NULL) {
 291                                         ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
 292                                         page_unlock(pp);
 293                                 }
 294                         } else {
 295                                 anon_array_exit(&cookie);
 296                         }
 297                         if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
 298                                 ret |= SEG_PAGE_LOCKED;
 299                         }
 300                         *vec++ = (char)ret;
 301                 }
 302                 ANON_LOCK_EXIT(&amp->a_rwlock);
 303                 return (len);
 304         }
 305 }
 306 
 307 static int
 308 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
 309 {
 310         size_t share_size;
 311 
 312         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 313 
 314         /*
 315          * seg.s_size may have been rounded up to the largest page size
 316          * in shmat().
 317          * XXX This should be cleanedup. sptdestroy should take a length
 318          * argument which should be the same as sptcreate. Then
 319          * this rounding would not be needed (or is done in shm.c)
 320          * Only the check for full segment will be needed.
 321          *
 322          * XXX -- shouldn't raddr == 0 always? These tests don't seem
 323          * to be useful at all.
 324          */
 325         share_size = page_get_pagesize(seg->s_szc);
 326         ssize = P2ROUNDUP(ssize, share_size);
 327 
 328         if (raddr == seg->s_base && ssize == seg->s_size) {
 329                 seg_free(seg);
 330                 return (0);
 331         } else
 332                 return (EINVAL);
 333 }
 334 
 335 int
 336 segspt_create(struct seg *seg, caddr_t argsp)
 337 {
 338         int             err;
 339         caddr_t         addr = seg->s_base;
 340         struct spt_data *sptd;
 341         struct  segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
 342         struct anon_map *amp = sptcargs->amp;
 343         struct kshmid   *sp = amp->a_sp;
 344         struct  cred    *cred = CRED();
 345         ulong_t         i, j, anon_index = 0;
 346         pgcnt_t         npages = btopr(amp->size);
 347         struct vnode    *vp;
 348         page_t          **ppa;
 349         uint_t          hat_flags;
 350         size_t          pgsz;
 351         pgcnt_t         pgcnt;
 352         caddr_t         a;
 353         pgcnt_t         pidx;
 354         size_t          sz;
 355         proc_t          *procp = curproc;
 356         rctl_qty_t      lockedbytes = 0;
 357         kproject_t      *proj;
 358 
 359         /*
 360          * We are holding the a_lock on the underlying dummy as,
 361          * so we can make calls to the HAT layer.
 362          */
 363         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 364         ASSERT(sp != NULL);
 365 
 366 #ifdef DEBUG
 367         TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
 368             tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
 369 #endif
 370         if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
 371                 if (err = anon_swap_adjust(npages))
 372                         return (err);
 373         }
 374         err = ENOMEM;
 375 
 376         if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
 377                 goto out1;
 378 
 379         if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
 380                 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
 381                     KM_NOSLEEP)) == NULL)
 382                         goto out2;
 383         }
 384 
 385         mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
 386 
 387         if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
 388                 goto out3;
 389 
 390         seg->s_ops = &segspt_ops;
 391         sptd->spt_vp = vp;
 392         sptd->spt_amp = amp;
 393         sptd->spt_prot = sptcargs->prot;
 394         sptd->spt_flags = sptcargs->flags;
 395         seg->s_data = (caddr_t)sptd;
 396         sptd->spt_ppa = NULL;
 397         sptd->spt_ppa_lckcnt = NULL;
 398         seg->s_szc = sptcargs->szc;
 399         cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
 400         sptd->spt_gen = 0;
 401 
 402         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 403         if (seg->s_szc > amp->a_szc) {
 404                 amp->a_szc = seg->s_szc;
 405         }
 406         ANON_LOCK_EXIT(&amp->a_rwlock);
 407 
 408         /*
 409          * Set policy to affect initial allocation of pages in
 410          * anon_map_createpages()
 411          */
 412         (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
 413             NULL, 0, ptob(npages));
 414 
 415         if (sptcargs->flags & SHM_PAGEABLE) {
 416                 size_t  share_sz;
 417                 pgcnt_t new_npgs, more_pgs;
 418                 struct anon_hdr *nahp;
 419                 zone_t *zone;
 420 
 421                 share_sz = page_get_pagesize(seg->s_szc);
 422                 if (!IS_P2ALIGNED(amp->size, share_sz)) {
 423                         /*
 424                          * We are rounding up the size of the anon array
 425                          * on 4 M boundary because we always create 4 M
 426                          * of page(s) when locking, faulting pages and we
 427                          * don't have to check for all corner cases e.g.
 428                          * if there is enough space to allocate 4 M
 429                          * page.
 430                          */
 431                         new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
 432                         more_pgs = new_npgs - npages;
 433 
 434                         /*
 435                          * The zone will never be NULL, as a fully created
 436                          * shm always has an owning zone.
 437                          */
 438                         zone = sp->shm_perm.ipc_zone_ref.zref_zone;
 439                         ASSERT(zone != NULL);
 440                         if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
 441                                 err = ENOMEM;
 442                                 goto out4;
 443                         }
 444 
 445                         nahp = anon_create(new_npgs, ANON_SLEEP);
 446                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 447                         (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
 448                             ANON_SLEEP);
 449                         anon_release(amp->ahp, npages);
 450                         amp->ahp = nahp;
 451                         ASSERT(amp->swresv == ptob(npages));
 452                         amp->swresv = amp->size = ptob(new_npgs);
 453                         ANON_LOCK_EXIT(&amp->a_rwlock);
 454                         npages = new_npgs;
 455                 }
 456 
 457                 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
 458                     sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
 459                 sptd->spt_pcachecnt = 0;
 460                 sptd->spt_realsize = ptob(npages);
 461                 sptcargs->seg_spt = seg;
 462                 return (0);
 463         }
 464 
 465         /*
 466          * get array of pages for each anon slot in amp
 467          */
 468         if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
 469             seg, addr, S_CREATE, cred)) != 0)
 470                 goto out4;
 471 
 472         mutex_enter(&sp->shm_mlock);
 473 
 474         /* May be partially locked, so, count bytes to charge for locking */
 475         for (i = 0; i < npages; i++)
 476                 if (ppa[i]->p_lckcnt == 0)
 477                         lockedbytes += PAGESIZE;
 478 
 479         proj = sp->shm_perm.ipc_proj;
 480 
 481         if (lockedbytes > 0) {
 482                 mutex_enter(&procp->p_lock);
 483                 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
 484                         mutex_exit(&procp->p_lock);
 485                         mutex_exit(&sp->shm_mlock);
 486                         for (i = 0; i < npages; i++)
 487                                 page_unlock(ppa[i]);
 488                         err = ENOMEM;
 489                         goto out4;
 490                 }
 491                 mutex_exit(&procp->p_lock);
 492         }
 493 
 494         /*
 495          * addr is initial address corresponding to the first page on ppa list
 496          */
 497         for (i = 0; i < npages; i++) {
 498                 /* attempt to lock all pages */
 499                 if (page_pp_lock(ppa[i], 0, 1) == 0) {
 500                         /*
 501                          * if unable to lock any page, unlock all
 502                          * of them and return error
 503                          */
 504                         for (j = 0; j < i; j++)
 505                                 page_pp_unlock(ppa[j], 0, 1);
 506                         for (i = 0; i < npages; i++)
 507                                 page_unlock(ppa[i]);
 508                         rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
 509                         mutex_exit(&sp->shm_mlock);
 510                         err = ENOMEM;
 511                         goto out4;
 512                 }
 513         }
 514         mutex_exit(&sp->shm_mlock);
 515 
 516         /*
 517          * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
 518          * for the entire life of the segment. For example platforms
 519          * that do not support Dynamic Reconfiguration.
 520          */
 521         hat_flags = HAT_LOAD_SHARE;
 522         if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
 523                 hat_flags |= HAT_LOAD_LOCK;
 524 
 525         /*
 526          * Load translations one lare page at a time
 527          * to make sure we don't create mappings bigger than
 528          * segment's size code in case underlying pages
 529          * are shared with segvn's segment that uses bigger
 530          * size code than we do.
 531          */
 532         pgsz = page_get_pagesize(seg->s_szc);
 533         pgcnt = page_get_pagecnt(seg->s_szc);
 534         for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
 535                 sz = MIN(pgsz, ptob(npages - pidx));
 536                 hat_memload_array(seg->s_as->a_hat, a, sz,
 537                     &ppa[pidx], sptd->spt_prot, hat_flags);
 538         }
 539 
 540         /*
 541          * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
 542          * we will leave the pages locked SE_SHARED for the life
 543          * of the ISM segment. This will prevent any calls to
 544          * hat_pageunload() on this ISM segment for those platforms.
 545          */
 546         if (!(hat_flags & HAT_LOAD_LOCK)) {
 547                 /*
 548                  * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
 549                  * we no longer need to hold the SE_SHARED lock on the pages,
 550                  * since L_PAGELOCK and F_SOFTLOCK calls will grab the
 551                  * SE_SHARED lock on the pages as necessary.
 552                  */
 553                 for (i = 0; i < npages; i++)
 554                         page_unlock(ppa[i]);
 555         }
 556         sptd->spt_pcachecnt = 0;
 557         kmem_free(ppa, ((sizeof (page_t *)) * npages));
 558         sptd->spt_realsize = ptob(npages);
 559         atomic_add_long(&spt_used, npages);
 560         sptcargs->seg_spt = seg;
 561         return (0);
 562 
 563 out4:
 564         seg->s_data = NULL;
 565         kmem_free(vp, sizeof (*vp));
 566         cv_destroy(&sptd->spt_cv);
 567 out3:
 568         mutex_destroy(&sptd->spt_lock);
 569         if ((sptcargs->flags & SHM_PAGEABLE) == 0)
 570                 kmem_free(ppa, (sizeof (*ppa) * npages));
 571 out2:
 572         kmem_free(sptd, sizeof (*sptd));
 573 out1:
 574         if ((sptcargs->flags & SHM_PAGEABLE) == 0)
 575                 anon_swap_restore(npages);
 576         return (err);
 577 }
 578 
 579 /*ARGSUSED*/
 580 void
 581 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
 582 {
 583         struct page     *pp;
 584         struct spt_data *sptd = (struct spt_data *)seg->s_data;
 585         pgcnt_t         npages;
 586         ulong_t         anon_idx;
 587         struct anon_map *amp;
 588         struct anon     *ap;
 589         struct vnode    *vp;
 590         u_offset_t      off;
 591         uint_t          hat_flags;
 592         int             root = 0;
 593         pgcnt_t         pgs, curnpgs = 0;
 594         page_t          *rootpp;
 595         rctl_qty_t      unlocked_bytes = 0;
 596         kproject_t      *proj;
 597         kshmid_t        *sp;
 598 
 599         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 600 
 601         len = P2ROUNDUP(len, PAGESIZE);
 602 
 603         npages = btop(len);
 604 
 605         hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
 606         if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
 607             (sptd->spt_flags & SHM_PAGEABLE)) {
 608                 hat_flags = HAT_UNLOAD_UNMAP;
 609         }
 610 
 611         hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
 612 
 613         amp = sptd->spt_amp;
 614         if (sptd->spt_flags & SHM_PAGEABLE)
 615                 npages = btop(amp->size);
 616 
 617         ASSERT(amp != NULL);
 618 
 619         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 620                 sp = amp->a_sp;
 621                 proj = sp->shm_perm.ipc_proj;
 622                 mutex_enter(&sp->shm_mlock);
 623         }
 624         for (anon_idx = 0; anon_idx < npages; anon_idx++) {
 625                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 626                         if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
 627                                 panic("segspt_free_pages: null app");
 628                                 /*NOTREACHED*/
 629                         }
 630                 } else {
 631                         if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
 632                             == NULL)
 633                                 continue;
 634                 }
 635                 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
 636                 swap_xlate(ap, &vp, &off);
 637 
 638                 /*
 639                  * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
 640                  * the pages won't be having SE_SHARED lock at this
 641                  * point.
 642                  *
 643                  * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
 644                  * the pages are still held SE_SHARED locked from the
 645                  * original segspt_create()
 646                  *
 647                  * Our goal is to get SE_EXCL lock on each page, remove
 648                  * permanent lock on it and invalidate the page.
 649                  */
 650                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 651                         if (hat_flags == HAT_UNLOAD_UNMAP)
 652                                 pp = page_lookup(vp, off, SE_EXCL);
 653                         else {
 654                                 if ((pp = page_find(vp, off)) == NULL) {
 655                                         panic("segspt_free_pages: "
 656                                             "page not locked");
 657                                         /*NOTREACHED*/
 658                                 }
 659                                 if (!page_tryupgrade(pp)) {
 660                                         page_unlock(pp);
 661                                         pp = page_lookup(vp, off, SE_EXCL);
 662                                 }
 663                         }
 664                         if (pp == NULL) {
 665                                 panic("segspt_free_pages: "
 666                                     "page not in the system");
 667                                 /*NOTREACHED*/
 668                         }
 669                         ASSERT(pp->p_lckcnt > 0);
 670                         page_pp_unlock(pp, 0, 1);
 671                         if (pp->p_lckcnt == 0)
 672                                 unlocked_bytes += PAGESIZE;
 673                 } else {
 674                         if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
 675                                 continue;
 676                 }
 677                 /*
 678                  * It's logical to invalidate the pages here as in most cases
 679                  * these were created by segspt.
 680                  */
 681                 if (pp->p_szc != 0) {
 682                         if (root == 0) {
 683                                 ASSERT(curnpgs == 0);
 684                                 root = 1;
 685                                 rootpp = pp;
 686                                 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
 687                                 ASSERT(pgs > 1);
 688                                 ASSERT(IS_P2ALIGNED(pgs, pgs));
 689                                 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
 690                                 curnpgs--;
 691                         } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
 692                                 ASSERT(curnpgs == 1);
 693                                 ASSERT(page_pptonum(pp) ==
 694                                     page_pptonum(rootpp) + (pgs - 1));
 695                                 page_destroy_pages(rootpp);
 696                                 root = 0;
 697                                 curnpgs = 0;
 698                         } else {
 699                                 ASSERT(curnpgs > 1);
 700                                 ASSERT(page_pptonum(pp) ==
 701                                     page_pptonum(rootpp) + (pgs - curnpgs));
 702                                 curnpgs--;
 703                         }
 704                 } else {
 705                         if (root != 0 || curnpgs != 0) {
 706                                 panic("segspt_free_pages: bad large page");
 707                                 /*NOTREACHED*/
 708                         }
 709                         /*
 710                          * Before destroying the pages, we need to take care
 711                          * of the rctl locked memory accounting. For that
 712                          * we need to calculte the unlocked_bytes.
 713                          */
 714                         if (pp->p_lckcnt > 0)
 715                                 unlocked_bytes += PAGESIZE;
 716                         /*LINTED: constant in conditional context */
 717                         VN_DISPOSE(pp, B_INVAL, 0, kcred);
 718                 }
 719         }
 720         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 721                 if (unlocked_bytes > 0)
 722                         rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
 723                 mutex_exit(&sp->shm_mlock);
 724         }
 725         if (root != 0 || curnpgs != 0) {
 726                 panic("segspt_free_pages: bad large page");
 727                 /*NOTREACHED*/
 728         }
 729 
 730         /*
 731          * mark that pages have been released
 732          */
 733         sptd->spt_realsize = 0;
 734 
 735         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 736                 atomic_add_long(&spt_used, -npages);
 737                 anon_swap_restore(npages);
 738         }
 739 }
 740 
 741 /*
 742  * Get memory allocation policy info for specified address in given segment
 743  */
 744 static lgrp_mem_policy_info_t *
 745 segspt_getpolicy(struct seg *seg, caddr_t addr)
 746 {
 747         struct anon_map         *amp;
 748         ulong_t                 anon_index;
 749         lgrp_mem_policy_info_t  *policy_info;
 750         struct spt_data         *spt_data;
 751 
 752         ASSERT(seg != NULL);
 753 
 754         /*
 755          * Get anon_map from segspt
 756          *
 757          * Assume that no lock needs to be held on anon_map, since
 758          * it should be protected by its reference count which must be
 759          * nonzero for an existing segment
 760          * Need to grab readers lock on policy tree though
 761          */
 762         spt_data = (struct spt_data *)seg->s_data;
 763         if (spt_data == NULL)
 764                 return (NULL);
 765         amp = spt_data->spt_amp;
 766         ASSERT(amp->refcnt != 0);
 767 
 768         /*
 769          * Get policy info
 770          *
 771          * Assume starting anon index of 0
 772          */
 773         anon_index = seg_page(seg, addr);
 774         policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
 775 
 776         return (policy_info);
 777 }
 778 
 779 /*
 780  * DISM only.
 781  * Return locked pages over a given range.
 782  *
 783  * We will cache all DISM locked pages and save the pplist for the
 784  * entire segment in the ppa field of the underlying DISM segment structure.
 785  * Later, during a call to segspt_reclaim() we will use this ppa array
 786  * to page_unlock() all of the pages and then we will free this ppa list.
 787  */
 788 /*ARGSUSED*/
 789 static int
 790 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
 791     struct page ***ppp, enum lock_type type, enum seg_rw rw)
 792 {
 793         struct  shm_data *shmd = (struct shm_data *)seg->s_data;
 794         struct  seg     *sptseg = shmd->shm_sptseg;
 795         struct  spt_data *sptd = sptseg->s_data;
 796         pgcnt_t pg_idx, npages, tot_npages, npgs;
 797         struct  page **pplist, **pl, **ppa, *pp;
 798         struct  anon_map *amp;
 799         spgcnt_t        an_idx;
 800         int     ret = ENOTSUP;
 801         uint_t  pl_built = 0;
 802         struct  anon *ap;
 803         struct  vnode *vp;
 804         u_offset_t off;
 805         pgcnt_t claim_availrmem = 0;
 806         uint_t  szc;
 807 
 808         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
 809         ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
 810 
 811         /*
 812          * We want to lock/unlock the entire ISM segment. Therefore,
 813          * we will be using the underlying sptseg and it's base address
 814          * and length for the caching arguments.
 815          */
 816         ASSERT(sptseg);
 817         ASSERT(sptd);
 818 
 819         pg_idx = seg_page(seg, addr);
 820         npages = btopr(len);
 821 
 822         /*
 823          * check if the request is larger than number of pages covered
 824          * by amp
 825          */
 826         if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
 827                 *ppp = NULL;
 828                 return (ENOTSUP);
 829         }
 830 
 831         if (type == L_PAGEUNLOCK) {
 832                 ASSERT(sptd->spt_ppa != NULL);
 833 
 834                 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
 835                     sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 836 
 837                 /*
 838                  * If someone is blocked while unmapping, we purge
 839                  * segment page cache and thus reclaim pplist synchronously
 840                  * without waiting for seg_pasync_thread. This speeds up
 841                  * unmapping in cases where munmap(2) is called, while
 842                  * raw async i/o is still in progress or where a thread
 843                  * exits on data fault in a multithreaded application.
 844                  */
 845                 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
 846                     (AS_ISUNMAPWAIT(seg->s_as) &&
 847                     shmd->shm_softlockcnt > 0)) {
 848                         segspt_purge(seg);
 849                 }
 850                 return (0);
 851         }
 852 
 853         /* The L_PAGELOCK case ... */
 854 
 855         if (sptd->spt_flags & DISM_PPA_CHANGED) {
 856                 segspt_purge(seg);
 857                 /*
 858                  * for DISM ppa needs to be rebuild since
 859                  * number of locked pages could be changed
 860                  */
 861                 *ppp = NULL;
 862                 return (ENOTSUP);
 863         }
 864 
 865         /*
 866          * First try to find pages in segment page cache, without
 867          * holding the segment lock.
 868          */
 869         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
 870             S_WRITE, SEGP_FORCE_WIRED);
 871         if (pplist != NULL) {
 872                 ASSERT(sptd->spt_ppa != NULL);
 873                 ASSERT(sptd->spt_ppa == pplist);
 874                 ppa = sptd->spt_ppa;
 875                 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
 876                         if (ppa[an_idx] == NULL) {
 877                                 seg_pinactive(seg, NULL, seg->s_base,
 878                                     sptd->spt_amp->size, ppa,
 879                                     S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 880                                 *ppp = NULL;
 881                                 return (ENOTSUP);
 882                         }
 883                         if ((szc = ppa[an_idx]->p_szc) != 0) {
 884                                 npgs = page_get_pagecnt(szc);
 885                                 an_idx = P2ROUNDUP(an_idx + 1, npgs);
 886                         } else {
 887                                 an_idx++;
 888                         }
 889                 }
 890                 /*
 891                  * Since we cache the entire DISM segment, we want to
 892                  * set ppp to point to the first slot that corresponds
 893                  * to the requested addr, i.e. pg_idx.
 894                  */
 895                 *ppp = &(sptd->spt_ppa[pg_idx]);
 896                 return (0);
 897         }
 898 
 899         mutex_enter(&sptd->spt_lock);
 900         /*
 901          * try to find pages in segment page cache with mutex
 902          */
 903         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
 904             S_WRITE, SEGP_FORCE_WIRED);
 905         if (pplist != NULL) {
 906                 ASSERT(sptd->spt_ppa != NULL);
 907                 ASSERT(sptd->spt_ppa == pplist);
 908                 ppa = sptd->spt_ppa;
 909                 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
 910                         if (ppa[an_idx] == NULL) {
 911                                 mutex_exit(&sptd->spt_lock);
 912                                 seg_pinactive(seg, NULL, seg->s_base,
 913                                     sptd->spt_amp->size, ppa,
 914                                     S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 915                                 *ppp = NULL;
 916                                 return (ENOTSUP);
 917                         }
 918                         if ((szc = ppa[an_idx]->p_szc) != 0) {
 919                                 npgs = page_get_pagecnt(szc);
 920                                 an_idx = P2ROUNDUP(an_idx + 1, npgs);
 921                         } else {
 922                                 an_idx++;
 923                         }
 924                 }
 925                 /*
 926                  * Since we cache the entire DISM segment, we want to
 927                  * set ppp to point to the first slot that corresponds
 928                  * to the requested addr, i.e. pg_idx.
 929                  */
 930                 mutex_exit(&sptd->spt_lock);
 931                 *ppp = &(sptd->spt_ppa[pg_idx]);
 932                 return (0);
 933         }
 934         if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
 935             SEGP_FORCE_WIRED) == SEGP_FAIL) {
 936                 mutex_exit(&sptd->spt_lock);
 937                 *ppp = NULL;
 938                 return (ENOTSUP);
 939         }
 940 
 941         /*
 942          * No need to worry about protections because DISM pages are always rw.
 943          */
 944         pl = pplist = NULL;
 945         amp = sptd->spt_amp;
 946 
 947         /*
 948          * Do we need to build the ppa array?
 949          */
 950         if (sptd->spt_ppa == NULL) {
 951                 pgcnt_t lpg_cnt = 0;
 952 
 953                 pl_built = 1;
 954                 tot_npages = btopr(sptd->spt_amp->size);
 955 
 956                 ASSERT(sptd->spt_pcachecnt == 0);
 957                 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
 958                 pl = pplist;
 959 
 960                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 961                 for (an_idx = 0; an_idx < tot_npages; ) {
 962                         ap = anon_get_ptr(amp->ahp, an_idx);
 963                         /*
 964                          * Cache only mlocked pages. For large pages
 965                          * if one (constituent) page is mlocked
 966                          * all pages for that large page
 967                          * are cached also. This is for quick
 968                          * lookups of ppa array;
 969                          */
 970                         if ((ap != NULL) && (lpg_cnt != 0 ||
 971                             (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
 972 
 973                                 swap_xlate(ap, &vp, &off);
 974                                 pp = page_lookup(vp, off, SE_SHARED);
 975                                 ASSERT(pp != NULL);
 976                                 if (lpg_cnt == 0) {
 977                                         lpg_cnt++;
 978                                         /*
 979                                          * For a small page, we are done --
 980                                          * lpg_count is reset to 0 below.
 981                                          *
 982                                          * For a large page, we are guaranteed
 983                                          * to find the anon structures of all
 984                                          * constituent pages and a non-zero
 985                                          * lpg_cnt ensures that we don't test
 986                                          * for mlock for these. We are done
 987                                          * when lpg_count reaches (npgs + 1).
 988                                          * If we are not the first constituent
 989                                          * page, restart at the first one.
 990                                          */
 991                                         npgs = page_get_pagecnt(pp->p_szc);
 992                                         if (!IS_P2ALIGNED(an_idx, npgs)) {
 993                                                 an_idx = P2ALIGN(an_idx, npgs);
 994                                                 page_unlock(pp);
 995                                                 continue;
 996                                         }
 997                                 }
 998                                 if (++lpg_cnt > npgs)
 999                                         lpg_cnt = 0;
1000 
1001                                 /*
1002                                  * availrmem is decremented only
1003                                  * for unlocked pages
1004                                  */
1005                                 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1006                                         claim_availrmem++;
1007                                 pplist[an_idx] = pp;
1008                         }
1009                         an_idx++;
1010                 }
1011                 ANON_LOCK_EXIT(&amp->a_rwlock);
1012 
1013                 if (claim_availrmem) {
1014                         mutex_enter(&freemem_lock);
1015                         if (availrmem < tune.t_minarmem + claim_availrmem) {
1016                                 mutex_exit(&freemem_lock);
1017                                 ret = ENOTSUP;
1018                                 claim_availrmem = 0;
1019                                 goto insert_fail;
1020                         } else {
1021                                 availrmem -= claim_availrmem;
1022                         }
1023                         mutex_exit(&freemem_lock);
1024                 }
1025 
1026                 sptd->spt_ppa = pl;
1027         } else {
1028                 /*
1029                  * We already have a valid ppa[].
1030                  */
1031                 pl = sptd->spt_ppa;
1032         }
1033 
1034         ASSERT(pl != NULL);
1035 
1036         ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1037             sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1038             segspt_reclaim);
1039         if (ret == SEGP_FAIL) {
1040                 /*
1041                  * seg_pinsert failed. We return
1042                  * ENOTSUP, so that the as_pagelock() code will
1043                  * then try the slower F_SOFTLOCK path.
1044                  */
1045                 if (pl_built) {
1046                         /*
1047                          * No one else has referenced the ppa[].
1048                          * We created it and we need to destroy it.
1049                          */
1050                         sptd->spt_ppa = NULL;
1051                 }
1052                 ret = ENOTSUP;
1053                 goto insert_fail;
1054         }
1055 
1056         /*
1057          * In either case, we increment softlockcnt on the 'real' segment.
1058          */
1059         sptd->spt_pcachecnt++;
1060         atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1061 
1062         ppa = sptd->spt_ppa;
1063         for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1064                 if (ppa[an_idx] == NULL) {
1065                         mutex_exit(&sptd->spt_lock);
1066                         seg_pinactive(seg, NULL, seg->s_base,
1067                             sptd->spt_amp->size,
1068                             pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1069                         *ppp = NULL;
1070                         return (ENOTSUP);
1071                 }
1072                 if ((szc = ppa[an_idx]->p_szc) != 0) {
1073                         npgs = page_get_pagecnt(szc);
1074                         an_idx = P2ROUNDUP(an_idx + 1, npgs);
1075                 } else {
1076                         an_idx++;
1077                 }
1078         }
1079         /*
1080          * We can now drop the sptd->spt_lock since the ppa[]
1081          * exists and he have incremented pacachecnt.
1082          */
1083         mutex_exit(&sptd->spt_lock);
1084 
1085         /*
1086          * Since we cache the entire segment, we want to
1087          * set ppp to point to the first slot that corresponds
1088          * to the requested addr, i.e. pg_idx.
1089          */
1090         *ppp = &(sptd->spt_ppa[pg_idx]);
1091         return (0);
1092 
1093 insert_fail:
1094         /*
1095          * We will only reach this code if we tried and failed.
1096          *
1097          * And we can drop the lock on the dummy seg, once we've failed
1098          * to set up a new ppa[].
1099          */
1100         mutex_exit(&sptd->spt_lock);
1101 
1102         if (pl_built) {
1103                 if (claim_availrmem) {
1104                         mutex_enter(&freemem_lock);
1105                         availrmem += claim_availrmem;
1106                         mutex_exit(&freemem_lock);
1107                 }
1108 
1109                 /*
1110                  * We created pl and we need to destroy it.
1111                  */
1112                 pplist = pl;
1113                 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1114                         if (pplist[an_idx] != NULL)
1115                                 page_unlock(pplist[an_idx]);
1116                 }
1117                 kmem_free(pl, sizeof (page_t *) * tot_npages);
1118         }
1119 
1120         if (shmd->shm_softlockcnt <= 0) {
1121                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1122                         mutex_enter(&seg->s_as->a_contents);
1123                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1124                                 AS_CLRUNMAPWAIT(seg->s_as);
1125                                 cv_broadcast(&seg->s_as->a_cv);
1126                         }
1127                         mutex_exit(&seg->s_as->a_contents);
1128                 }
1129         }
1130         *ppp = NULL;
1131         return (ret);
1132 }
1133 
1134 
1135 
1136 /*
1137  * return locked pages over a given range.
1138  *
1139  * We will cache the entire ISM segment and save the pplist for the
1140  * entire segment in the ppa field of the underlying ISM segment structure.
1141  * Later, during a call to segspt_reclaim() we will use this ppa array
1142  * to page_unlock() all of the pages and then we will free this ppa list.
1143  */
1144 /*ARGSUSED*/
1145 static int
1146 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1147     struct page ***ppp, enum lock_type type, enum seg_rw rw)
1148 {
1149         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1150         struct seg      *sptseg = shmd->shm_sptseg;
1151         struct spt_data *sptd = sptseg->s_data;
1152         pgcnt_t np, page_index, npages;
1153         caddr_t a, spt_base;
1154         struct page **pplist, **pl, *pp;
1155         struct anon_map *amp;
1156         ulong_t anon_index;
1157         int ret = ENOTSUP;
1158         uint_t  pl_built = 0;
1159         struct anon *ap;
1160         struct vnode *vp;
1161         u_offset_t off;
1162 
1163         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1164         ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1165 
1166 
1167         /*
1168          * We want to lock/unlock the entire ISM segment. Therefore,
1169          * we will be using the underlying sptseg and it's base address
1170          * and length for the caching arguments.
1171          */
1172         ASSERT(sptseg);
1173         ASSERT(sptd);
1174 
1175         if (sptd->spt_flags & SHM_PAGEABLE) {
1176                 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1177         }
1178 
1179         page_index = seg_page(seg, addr);
1180         npages = btopr(len);
1181 
1182         /*
1183          * check if the request is larger than number of pages covered
1184          * by amp
1185          */
1186         if (page_index + npages > btopr(sptd->spt_amp->size)) {
1187                 *ppp = NULL;
1188                 return (ENOTSUP);
1189         }
1190 
1191         if (type == L_PAGEUNLOCK) {
1192 
1193                 ASSERT(sptd->spt_ppa != NULL);
1194 
1195                 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1196                     sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1197 
1198                 /*
1199                  * If someone is blocked while unmapping, we purge
1200                  * segment page cache and thus reclaim pplist synchronously
1201                  * without waiting for seg_pasync_thread. This speeds up
1202                  * unmapping in cases where munmap(2) is called, while
1203                  * raw async i/o is still in progress or where a thread
1204                  * exits on data fault in a multithreaded application.
1205                  */
1206                 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1207                         segspt_purge(seg);
1208                 }
1209                 return (0);
1210         }
1211 
1212         /* The L_PAGELOCK case... */
1213 
1214         /*
1215          * First try to find pages in segment page cache, without
1216          * holding the segment lock.
1217          */
1218         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1219             S_WRITE, SEGP_FORCE_WIRED);
1220         if (pplist != NULL) {
1221                 ASSERT(sptd->spt_ppa == pplist);
1222                 ASSERT(sptd->spt_ppa[page_index]);
1223                 /*
1224                  * Since we cache the entire ISM segment, we want to
1225                  * set ppp to point to the first slot that corresponds
1226                  * to the requested addr, i.e. page_index.
1227                  */
1228                 *ppp = &(sptd->spt_ppa[page_index]);
1229                 return (0);
1230         }
1231 
1232         mutex_enter(&sptd->spt_lock);
1233 
1234         /*
1235          * try to find pages in segment page cache
1236          */
1237         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1238             S_WRITE, SEGP_FORCE_WIRED);
1239         if (pplist != NULL) {
1240                 ASSERT(sptd->spt_ppa == pplist);
1241                 /*
1242                  * Since we cache the entire segment, we want to
1243                  * set ppp to point to the first slot that corresponds
1244                  * to the requested addr, i.e. page_index.
1245                  */
1246                 mutex_exit(&sptd->spt_lock);
1247                 *ppp = &(sptd->spt_ppa[page_index]);
1248                 return (0);
1249         }
1250 
1251         if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1252             SEGP_FORCE_WIRED) == SEGP_FAIL) {
1253                 mutex_exit(&sptd->spt_lock);
1254                 *ppp = NULL;
1255                 return (ENOTSUP);
1256         }
1257 
1258         /*
1259          * No need to worry about protections because ISM pages
1260          * are always rw.
1261          */
1262         pl = pplist = NULL;
1263 
1264         /*
1265          * Do we need to build the ppa array?
1266          */
1267         if (sptd->spt_ppa == NULL) {
1268                 ASSERT(sptd->spt_ppa == pplist);
1269 
1270                 spt_base = sptseg->s_base;
1271                 pl_built = 1;
1272 
1273                 /*
1274                  * availrmem is decremented once during anon_swap_adjust()
1275                  * and is incremented during the anon_unresv(), which is
1276                  * called from shm_rm_amp() when the segment is destroyed.
1277                  */
1278                 amp = sptd->spt_amp;
1279                 ASSERT(amp != NULL);
1280 
1281                 /* pcachecnt is protected by sptd->spt_lock */
1282                 ASSERT(sptd->spt_pcachecnt == 0);
1283                 pplist = kmem_zalloc(sizeof (page_t *)
1284                     * btopr(sptd->spt_amp->size), KM_SLEEP);
1285                 pl = pplist;
1286 
1287                 anon_index = seg_page(sptseg, spt_base);
1288 
1289                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
1290                 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1291                     a += PAGESIZE, anon_index++, pplist++) {
1292                         ap = anon_get_ptr(amp->ahp, anon_index);
1293                         ASSERT(ap != NULL);
1294                         swap_xlate(ap, &vp, &off);
1295                         pp = page_lookup(vp, off, SE_SHARED);
1296                         ASSERT(pp != NULL);
1297                         *pplist = pp;
1298                 }
1299                 ANON_LOCK_EXIT(&amp->a_rwlock);
1300 
1301                 if (a < (spt_base + sptd->spt_amp->size)) {
1302                         ret = ENOTSUP;
1303                         goto insert_fail;
1304                 }
1305                 sptd->spt_ppa = pl;
1306         } else {
1307                 /*
1308                  * We already have a valid ppa[].
1309                  */
1310                 pl = sptd->spt_ppa;
1311         }
1312 
1313         ASSERT(pl != NULL);
1314 
1315         ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1316             sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1317             segspt_reclaim);
1318         if (ret == SEGP_FAIL) {
1319                 /*
1320                  * seg_pinsert failed. We return
1321                  * ENOTSUP, so that the as_pagelock() code will
1322                  * then try the slower F_SOFTLOCK path.
1323                  */
1324                 if (pl_built) {
1325                         /*
1326                          * No one else has referenced the ppa[].
1327                          * We created it and we need to destroy it.
1328                          */
1329                         sptd->spt_ppa = NULL;
1330                 }
1331                 ret = ENOTSUP;
1332                 goto insert_fail;
1333         }
1334 
1335         /*
1336          * In either case, we increment softlockcnt on the 'real' segment.
1337          */
1338         sptd->spt_pcachecnt++;
1339         atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1340 
1341         /*
1342          * We can now drop the sptd->spt_lock since the ppa[]
1343          * exists and he have incremented pacachecnt.
1344          */
1345         mutex_exit(&sptd->spt_lock);
1346 
1347         /*
1348          * Since we cache the entire segment, we want to
1349          * set ppp to point to the first slot that corresponds
1350          * to the requested addr, i.e. page_index.
1351          */
1352         *ppp = &(sptd->spt_ppa[page_index]);
1353         return (0);
1354 
1355 insert_fail:
1356         /*
1357          * We will only reach this code if we tried and failed.
1358          *
1359          * And we can drop the lock on the dummy seg, once we've failed
1360          * to set up a new ppa[].
1361          */
1362         mutex_exit(&sptd->spt_lock);
1363 
1364         if (pl_built) {
1365                 /*
1366                  * We created pl and we need to destroy it.
1367                  */
1368                 pplist = pl;
1369                 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1370                 while (np) {
1371                         page_unlock(*pplist);
1372                         np--;
1373                         pplist++;
1374                 }
1375                 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1376         }
1377         if (shmd->shm_softlockcnt <= 0) {
1378                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1379                         mutex_enter(&seg->s_as->a_contents);
1380                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1381                                 AS_CLRUNMAPWAIT(seg->s_as);
1382                                 cv_broadcast(&seg->s_as->a_cv);
1383                         }
1384                         mutex_exit(&seg->s_as->a_contents);
1385                 }
1386         }
1387         *ppp = NULL;
1388         return (ret);
1389 }
1390 
1391 /*
1392  * purge any cached pages in the I/O page cache
1393  */
1394 static void
1395 segspt_purge(struct seg *seg)
1396 {
1397         seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1398 }
1399 
1400 static int
1401 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1402         enum seg_rw rw, int async)
1403 {
1404         struct seg *seg = (struct seg *)ptag;
1405         struct  shm_data *shmd = (struct shm_data *)seg->s_data;
1406         struct  seg     *sptseg;
1407         struct  spt_data *sptd;
1408         pgcnt_t npages, i, free_availrmem = 0;
1409         int     done = 0;
1410 
1411 #ifdef lint
1412         addr = addr;
1413 #endif
1414         sptseg = shmd->shm_sptseg;
1415         sptd = sptseg->s_data;
1416         npages = (len >> PAGESHIFT);
1417         ASSERT(npages);
1418         ASSERT(sptd->spt_pcachecnt != 0);
1419         ASSERT(sptd->spt_ppa == pplist);
1420         ASSERT(npages == btopr(sptd->spt_amp->size));
1421         ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1422 
1423         /*
1424          * Acquire the lock on the dummy seg and destroy the
1425          * ppa array IF this is the last pcachecnt.
1426          */
1427         mutex_enter(&sptd->spt_lock);
1428         if (--sptd->spt_pcachecnt == 0) {
1429                 for (i = 0; i < npages; i++) {
1430                         if (pplist[i] == NULL) {
1431                                 continue;
1432                         }
1433                         if (rw == S_WRITE) {
1434                                 hat_setrefmod(pplist[i]);
1435                         } else {
1436                                 hat_setref(pplist[i]);
1437                         }
1438                         if ((sptd->spt_flags & SHM_PAGEABLE) &&
1439                             (sptd->spt_ppa_lckcnt[i] == 0))
1440                                 free_availrmem++;
1441                         page_unlock(pplist[i]);
1442                 }
1443                 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1444                         mutex_enter(&freemem_lock);
1445                         availrmem += free_availrmem;
1446                         mutex_exit(&freemem_lock);
1447                 }
1448                 /*
1449                  * Since we want to cach/uncache the entire ISM segment,
1450                  * we will track the pplist in a segspt specific field
1451                  * ppa, that is initialized at the time we add an entry to
1452                  * the cache.
1453                  */
1454                 ASSERT(sptd->spt_pcachecnt == 0);
1455                 kmem_free(pplist, sizeof (page_t *) * npages);
1456                 sptd->spt_ppa = NULL;
1457                 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1458                 sptd->spt_gen++;
1459                 cv_broadcast(&sptd->spt_cv);
1460                 done = 1;
1461         }
1462         mutex_exit(&sptd->spt_lock);
1463 
1464         /*
1465          * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1466          * may not hold AS lock (in this case async argument is not 0). This
1467          * means if softlockcnt drops to 0 after the decrement below address
1468          * space may get freed. We can't allow it since after softlock
1469          * derement to 0 we still need to access as structure for possible
1470          * wakeup of unmap waiters. To prevent the disappearance of as we take
1471          * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1472          * this mutex as a barrier to make sure this routine completes before
1473          * segment is freed.
1474          *
1475          * The second complication we have to deal with in async case is a
1476          * possibility of missed wake up of unmap wait thread. When we don't
1477          * hold as lock here we may take a_contents lock before unmap wait
1478          * thread that was first to see softlockcnt was still not 0. As a
1479          * result we'll fail to wake up an unmap wait thread. To avoid this
1480          * race we set nounmapwait flag in as structure if we drop softlockcnt
1481          * to 0 if async is not 0.  unmapwait thread
1482          * will not block if this flag is set.
1483          */
1484         if (async)
1485                 mutex_enter(&shmd->shm_segfree_syncmtx);
1486 
1487         /*
1488          * Now decrement softlockcnt.
1489          */
1490         ASSERT(shmd->shm_softlockcnt > 0);
1491         atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1492 
1493         if (shmd->shm_softlockcnt <= 0) {
1494                 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1495                         mutex_enter(&seg->s_as->a_contents);
1496                         if (async)
1497                                 AS_SETNOUNMAPWAIT(seg->s_as);
1498                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1499                                 AS_CLRUNMAPWAIT(seg->s_as);
1500                                 cv_broadcast(&seg->s_as->a_cv);
1501                         }
1502                         mutex_exit(&seg->s_as->a_contents);
1503                 }
1504         }
1505 
1506         if (async)
1507                 mutex_exit(&shmd->shm_segfree_syncmtx);
1508 
1509         return (done);
1510 }
1511 
1512 /*
1513  * Do a F_SOFTUNLOCK call over the range requested.
1514  * The range must have already been F_SOFTLOCK'ed.
1515  *
1516  * The calls to acquire and release the anon map lock mutex were
1517  * removed in order to avoid a deadly embrace during a DR
1518  * memory delete operation.  (Eg. DR blocks while waiting for a
1519  * exclusive lock on a page that is being used for kaio; the
1520  * thread that will complete the kaio and call segspt_softunlock
1521  * blocks on the anon map lock; another thread holding the anon
1522  * map lock blocks on another page lock via the segspt_shmfault
1523  * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1524  *
1525  * The appropriateness of the removal is based upon the following:
1526  * 1. If we are holding a segment's reader lock and the page is held
1527  * shared, then the corresponding element in anonmap which points to
1528  * anon struct cannot change and there is no need to acquire the
1529  * anonymous map lock.
1530  * 2. Threads in segspt_softunlock have a reader lock on the segment
1531  * and already have the shared page lock, so we are guaranteed that
1532  * the anon map slot cannot change and therefore can call anon_get_ptr()
1533  * without grabbing the anonymous map lock.
1534  * 3. Threads that softlock a shared page break copy-on-write, even if
1535  * its a read.  Thus cow faults can be ignored with respect to soft
1536  * unlocking, since the breaking of cow means that the anon slot(s) will
1537  * not be shared.
1538  */
1539 static void
1540 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1541         size_t len, enum seg_rw rw)
1542 {
1543         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1544         struct seg      *sptseg;
1545         struct spt_data *sptd;
1546         page_t *pp;
1547         caddr_t adr;
1548         struct vnode *vp;
1549         u_offset_t offset;
1550         ulong_t anon_index;
1551         struct anon_map *amp;           /* XXX - for locknest */
1552         struct anon *ap = NULL;
1553         pgcnt_t npages;
1554 
1555         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1556 
1557         sptseg = shmd->shm_sptseg;
1558         sptd = sptseg->s_data;
1559 
1560         /*
1561          * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1562          * and therefore their pages are SE_SHARED locked
1563          * for the entire life of the segment.
1564          */
1565         if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1566             ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1567                 goto softlock_decrement;
1568         }
1569 
1570         /*
1571          * Any thread is free to do a page_find and
1572          * page_unlock() on the pages within this seg.
1573          *
1574          * We are already holding the as->a_lock on the user's
1575          * real segment, but we need to hold the a_lock on the
1576          * underlying dummy as. This is mostly to satisfy the
1577          * underlying HAT layer.
1578          */
1579         AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1580         hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1581         AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1582 
1583         amp = sptd->spt_amp;
1584         ASSERT(amp != NULL);
1585         anon_index = seg_page(sptseg, sptseg_addr);
1586 
1587         for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1588                 ap = anon_get_ptr(amp->ahp, anon_index++);
1589                 ASSERT(ap != NULL);
1590                 swap_xlate(ap, &vp, &offset);
1591 
1592                 /*
1593                  * Use page_find() instead of page_lookup() to
1594                  * find the page since we know that it has a
1595                  * "shared" lock.
1596                  */
1597                 pp = page_find(vp, offset);
1598                 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1599                 if (pp == NULL) {
1600                         panic("segspt_softunlock: "
1601                             "addr %p, ap %p, vp %p, off %llx",
1602                             (void *)adr, (void *)ap, (void *)vp, offset);
1603                         /*NOTREACHED*/
1604                 }
1605 
1606                 if (rw == S_WRITE) {
1607                         hat_setrefmod(pp);
1608                 } else if (rw != S_OTHER) {
1609                         hat_setref(pp);
1610                 }
1611                 page_unlock(pp);
1612         }
1613 
1614 softlock_decrement:
1615         npages = btopr(len);
1616         ASSERT(shmd->shm_softlockcnt >= npages);
1617         atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1618         if (shmd->shm_softlockcnt == 0) {
1619                 /*
1620                  * All SOFTLOCKS are gone. Wakeup any waiting
1621                  * unmappers so they can try again to unmap.
1622                  * Check for waiters first without the mutex
1623                  * held so we don't always grab the mutex on
1624                  * softunlocks.
1625                  */
1626                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1627                         mutex_enter(&seg->s_as->a_contents);
1628                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1629                                 AS_CLRUNMAPWAIT(seg->s_as);
1630                                 cv_broadcast(&seg->s_as->a_cv);
1631                         }
1632                         mutex_exit(&seg->s_as->a_contents);
1633                 }
1634         }
1635 }
1636 
1637 int
1638 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1639 {
1640         struct shm_data *shmd_arg = (struct shm_data *)argsp;
1641         struct shm_data *shmd;
1642         struct anon_map *shm_amp = shmd_arg->shm_amp;
1643         struct spt_data *sptd;
1644         int error = 0;
1645 
1646         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1647 
1648         shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1649         if (shmd == NULL)
1650                 return (ENOMEM);
1651 
1652         shmd->shm_sptas = shmd_arg->shm_sptas;
1653         shmd->shm_amp = shm_amp;
1654         shmd->shm_sptseg = shmd_arg->shm_sptseg;
1655 
1656         (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1657             NULL, 0, seg->s_size);
1658 
1659         mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1660 
1661         seg->s_data = (void *)shmd;
1662         seg->s_ops = &segspt_shmops;
1663         seg->s_szc = shmd->shm_sptseg->s_szc;
1664         sptd = shmd->shm_sptseg->s_data;
1665 
1666         if (sptd->spt_flags & SHM_PAGEABLE) {
1667                 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1668                     KM_NOSLEEP)) == NULL) {
1669                         seg->s_data = (void *)NULL;
1670                         kmem_free(shmd, (sizeof (*shmd)));
1671                         return (ENOMEM);
1672                 }
1673                 shmd->shm_lckpgs = 0;
1674                 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1675                         if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1676                             shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1677                             seg->s_size, seg->s_szc)) != 0) {
1678                                 kmem_free(shmd->shm_vpage,
1679                                     btopr(shm_amp->size));
1680                         }
1681                 }
1682         } else {
1683                 error = hat_share(seg->s_as->a_hat, seg->s_base,
1684                     shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1685                     seg->s_size, seg->s_szc);
1686         }
1687         if (error) {
1688                 seg->s_szc = 0;
1689                 seg->s_data = (void *)NULL;
1690                 kmem_free(shmd, (sizeof (*shmd)));
1691         } else {
1692                 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1693                 shm_amp->refcnt++;
1694                 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1695         }
1696         return (error);
1697 }
1698 
1699 int
1700 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1701 {
1702         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1703         int reclaim = 1;
1704 
1705         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1706 retry:
1707         if (shmd->shm_softlockcnt > 0) {
1708                 if (reclaim == 1) {
1709                         segspt_purge(seg);
1710                         reclaim = 0;
1711                         goto retry;
1712                 }
1713                 return (EAGAIN);
1714         }
1715 
1716         if (ssize != seg->s_size) {
1717 #ifdef DEBUG
1718                 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1719                     ssize, seg->s_size);
1720 #endif
1721                 return (EINVAL);
1722         }
1723 
1724         (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1725             NULL, 0);
1726         hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1727 
1728         seg_free(seg);
1729 
1730         return (0);
1731 }
1732 
1733 void
1734 segspt_shmfree(struct seg *seg)
1735 {
1736         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1737         struct anon_map *shm_amp = shmd->shm_amp;
1738 
1739         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1740 
1741         (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1742             MC_UNLOCK, NULL, 0);
1743 
1744         /*
1745          * Need to increment refcnt when attaching
1746          * and decrement when detaching because of dup().
1747          */
1748         ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1749         shm_amp->refcnt--;
1750         ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1751 
1752         if (shmd->shm_vpage) {       /* only for DISM */
1753                 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1754                 shmd->shm_vpage = NULL;
1755         }
1756 
1757         /*
1758          * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1759          * still working with this segment without holding as lock.
1760          */
1761         ASSERT(shmd->shm_softlockcnt == 0);
1762         mutex_enter(&shmd->shm_segfree_syncmtx);
1763         mutex_destroy(&shmd->shm_segfree_syncmtx);
1764 
1765         kmem_free(shmd, sizeof (*shmd));
1766 }
1767 
1768 /*ARGSUSED*/
1769 int
1770 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1771 {
1772         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1773 
1774         /*
1775          * Shared page table is more than shared mapping.
1776          *  Individual process sharing page tables can't change prot
1777          *  because there is only one set of page tables.
1778          *  This will be allowed after private page table is
1779          *  supported.
1780          */
1781 /* need to return correct status error? */
1782         return (0);
1783 }
1784 
1785 
1786 faultcode_t
1787 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1788     size_t len, enum fault_type type, enum seg_rw rw)
1789 {
1790         struct  shm_data        *shmd = (struct shm_data *)seg->s_data;
1791         struct  seg             *sptseg = shmd->shm_sptseg;
1792         struct  as              *curspt = shmd->shm_sptas;
1793         struct  spt_data        *sptd = sptseg->s_data;
1794         pgcnt_t npages;
1795         size_t  size;
1796         caddr_t segspt_addr, shm_addr;
1797         page_t  **ppa;
1798         int     i;
1799         ulong_t an_idx = 0;
1800         int     err = 0;
1801         int     dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1802         size_t  pgsz;
1803         pgcnt_t pgcnt;
1804         caddr_t a;
1805         pgcnt_t pidx;
1806 
1807 #ifdef lint
1808         hat = hat;
1809 #endif
1810         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1811 
1812         /*
1813          * Because of the way spt is implemented
1814          * the realsize of the segment does not have to be
1815          * equal to the segment size itself. The segment size is
1816          * often in multiples of a page size larger than PAGESIZE.
1817          * The realsize is rounded up to the nearest PAGESIZE
1818          * based on what the user requested. This is a bit of
1819          * ungliness that is historical but not easily fixed
1820          * without re-designing the higher levels of ISM.
1821          */
1822         ASSERT(addr >= seg->s_base);
1823         if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1824                 return (FC_NOMAP);
1825         /*
1826          * For all of the following cases except F_PROT, we need to
1827          * make any necessary adjustments to addr and len
1828          * and get all of the necessary page_t's into an array called ppa[].
1829          *
1830          * The code in shmat() forces base addr and len of ISM segment
1831          * to be aligned to largest page size supported. Therefore,
1832          * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1833          * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1834          * in large pagesize chunks, or else we will screw up the HAT
1835          * layer by calling hat_memload_array() with differing page sizes
1836          * over a given virtual range.
1837          */
1838         pgsz = page_get_pagesize(sptseg->s_szc);
1839         pgcnt = page_get_pagecnt(sptseg->s_szc);
1840         shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1841         size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1842         npages = btopr(size);
1843 
1844         /*
1845          * Now we need to convert from addr in segshm to addr in segspt.
1846          */
1847         an_idx = seg_page(seg, shm_addr);
1848         segspt_addr = sptseg->s_base + ptob(an_idx);
1849 
1850         ASSERT((segspt_addr + ptob(npages)) <=
1851             (sptseg->s_base + sptd->spt_realsize));
1852         ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1853 
1854         switch (type) {
1855 
1856         case F_SOFTLOCK:
1857 
1858                 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1859                 /*
1860                  * Fall through to the F_INVAL case to load up the hat layer
1861                  * entries with the HAT_LOAD_LOCK flag.
1862                  */
1863                 /* FALLTHRU */
1864         case F_INVAL:
1865 
1866                 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1867                         return (FC_NOMAP);
1868 
1869                 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1870 
1871                 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1872                 if (err != 0) {
1873                         if (type == F_SOFTLOCK) {
1874                                 atomic_add_long((ulong_t *)(
1875                                     &(shmd->shm_softlockcnt)), -npages);
1876                         }
1877                         goto dism_err;
1878                 }
1879                 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1880                 a = segspt_addr;
1881                 pidx = 0;
1882                 if (type == F_SOFTLOCK) {
1883 
1884                         /*
1885                          * Load up the translation keeping it
1886                          * locked and don't unlock the page.
1887                          */
1888                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1889                                 hat_memload_array(sptseg->s_as->a_hat,
1890                                     a, pgsz, &ppa[pidx], sptd->spt_prot,
1891                                     HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1892                         }
1893                 } else {
1894                         /*
1895                          * Migrate pages marked for migration
1896                          */
1897                         if (lgrp_optimizations())
1898                                 page_migrate(seg, shm_addr, ppa, npages);
1899 
1900                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1901                                 hat_memload_array(sptseg->s_as->a_hat,
1902                                     a, pgsz, &ppa[pidx],
1903                                     sptd->spt_prot,
1904                                     HAT_LOAD_SHARE);
1905                         }
1906 
1907                         /*
1908                          * And now drop the SE_SHARED lock(s).
1909                          */
1910                         if (dyn_ism_unmap) {
1911                                 for (i = 0; i < npages; i++) {
1912                                         page_unlock(ppa[i]);
1913                                 }
1914                         }
1915                 }
1916 
1917                 if (!dyn_ism_unmap) {
1918                         if (hat_share(seg->s_as->a_hat, shm_addr,
1919                             curspt->a_hat, segspt_addr, ptob(npages),
1920                             seg->s_szc) != 0) {
1921                                 panic("hat_share err in DISM fault");
1922                                 /* NOTREACHED */
1923                         }
1924                         if (type == F_INVAL) {
1925                                 for (i = 0; i < npages; i++) {
1926                                         page_unlock(ppa[i]);
1927                                 }
1928                         }
1929                 }
1930                 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1931 dism_err:
1932                 kmem_free(ppa, npages * sizeof (page_t *));
1933                 return (err);
1934 
1935         case F_SOFTUNLOCK:
1936 
1937                 /*
1938                  * This is a bit ugly, we pass in the real seg pointer,
1939                  * but the segspt_addr is the virtual address within the
1940                  * dummy seg.
1941                  */
1942                 segspt_softunlock(seg, segspt_addr, size, rw);
1943                 return (0);
1944 
1945         case F_PROT:
1946 
1947                 /*
1948                  * This takes care of the unusual case where a user
1949                  * allocates a stack in shared memory and a register
1950                  * window overflow is written to that stack page before
1951                  * it is otherwise modified.
1952                  *
1953                  * We can get away with this because ISM segments are
1954                  * always rw. Other than this unusual case, there
1955                  * should be no instances of protection violations.
1956                  */
1957                 return (0);
1958 
1959         default:
1960 #ifdef DEBUG
1961                 panic("segspt_dismfault default type?");
1962 #else
1963                 return (FC_NOMAP);
1964 #endif
1965         }
1966 }
1967 
1968 
1969 faultcode_t
1970 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
1971     size_t len, enum fault_type type, enum seg_rw rw)
1972 {
1973         struct shm_data         *shmd = (struct shm_data *)seg->s_data;
1974         struct seg              *sptseg = shmd->shm_sptseg;
1975         struct as               *curspt = shmd->shm_sptas;
1976         struct spt_data         *sptd   = sptseg->s_data;
1977         pgcnt_t npages;
1978         size_t size;
1979         caddr_t sptseg_addr, shm_addr;
1980         page_t *pp, **ppa;
1981         int     i;
1982         u_offset_t offset;
1983         ulong_t anon_index = 0;
1984         struct vnode *vp;
1985         struct anon_map *amp;           /* XXX - for locknest */
1986         struct anon *ap = NULL;
1987         size_t          pgsz;
1988         pgcnt_t         pgcnt;
1989         caddr_t         a;
1990         pgcnt_t         pidx;
1991         size_t          sz;
1992 
1993 #ifdef lint
1994         hat = hat;
1995 #endif
1996 
1997         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1998 
1999         if (sptd->spt_flags & SHM_PAGEABLE) {
2000                 return (segspt_dismfault(hat, seg, addr, len, type, rw));
2001         }
2002 
2003         /*
2004          * Because of the way spt is implemented
2005          * the realsize of the segment does not have to be
2006          * equal to the segment size itself. The segment size is
2007          * often in multiples of a page size larger than PAGESIZE.
2008          * The realsize is rounded up to the nearest PAGESIZE
2009          * based on what the user requested. This is a bit of
2010          * ungliness that is historical but not easily fixed
2011          * without re-designing the higher levels of ISM.
2012          */
2013         ASSERT(addr >= seg->s_base);
2014         if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2015                 return (FC_NOMAP);
2016         /*
2017          * For all of the following cases except F_PROT, we need to
2018          * make any necessary adjustments to addr and len
2019          * and get all of the necessary page_t's into an array called ppa[].
2020          *
2021          * The code in shmat() forces base addr and len of ISM segment
2022          * to be aligned to largest page size supported. Therefore,
2023          * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2024          * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2025          * in large pagesize chunks, or else we will screw up the HAT
2026          * layer by calling hat_memload_array() with differing page sizes
2027          * over a given virtual range.
2028          */
2029         pgsz = page_get_pagesize(sptseg->s_szc);
2030         pgcnt = page_get_pagecnt(sptseg->s_szc);
2031         shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2032         size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2033         npages = btopr(size);
2034 
2035         /*
2036          * Now we need to convert from addr in segshm to addr in segspt.
2037          */
2038         anon_index = seg_page(seg, shm_addr);
2039         sptseg_addr = sptseg->s_base + ptob(anon_index);
2040 
2041         /*
2042          * And now we may have to adjust npages downward if we have
2043          * exceeded the realsize of the segment or initial anon
2044          * allocations.
2045          */
2046         if ((sptseg_addr + ptob(npages)) >
2047             (sptseg->s_base + sptd->spt_realsize))
2048                 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2049 
2050         npages = btopr(size);
2051 
2052         ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2053         ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2054 
2055         switch (type) {
2056 
2057         case F_SOFTLOCK:
2058 
2059                 /*
2060                  * availrmem is decremented once during anon_swap_adjust()
2061                  * and is incremented during the anon_unresv(), which is
2062                  * called from shm_rm_amp() when the segment is destroyed.
2063                  */
2064                 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2065                 /*
2066                  * Some platforms assume that ISM pages are SE_SHARED
2067                  * locked for the entire life of the segment.
2068                  */
2069                 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2070                         return (0);
2071                 /*
2072                  * Fall through to the F_INVAL case to load up the hat layer
2073                  * entries with the HAT_LOAD_LOCK flag.
2074                  */
2075 
2076                 /* FALLTHRU */
2077         case F_INVAL:
2078 
2079                 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2080                         return (FC_NOMAP);
2081 
2082                 /*
2083                  * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2084                  * may still rely on this call to hat_share(). That
2085                  * would imply that those hat's can fault on a
2086                  * HAT_LOAD_LOCK translation, which would seem
2087                  * contradictory.
2088                  */
2089                 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2090                         if (hat_share(seg->s_as->a_hat, seg->s_base,
2091                             curspt->a_hat, sptseg->s_base,
2092                             sptseg->s_size, sptseg->s_szc) != 0) {
2093                                 panic("hat_share error in ISM fault");
2094                                 /*NOTREACHED*/
2095                         }
2096                         return (0);
2097                 }
2098                 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2099 
2100                 /*
2101                  * I see no need to lock the real seg,
2102                  * here, because all of our work will be on the underlying
2103                  * dummy seg.
2104                  *
2105                  * sptseg_addr and npages now account for large pages.
2106                  */
2107                 amp = sptd->spt_amp;
2108                 ASSERT(amp != NULL);
2109                 anon_index = seg_page(sptseg, sptseg_addr);
2110 
2111                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2112                 for (i = 0; i < npages; i++) {
2113                         ap = anon_get_ptr(amp->ahp, anon_index++);
2114                         ASSERT(ap != NULL);
2115                         swap_xlate(ap, &vp, &offset);
2116                         pp = page_lookup(vp, offset, SE_SHARED);
2117                         ASSERT(pp != NULL);
2118                         ppa[i] = pp;
2119                 }
2120                 ANON_LOCK_EXIT(&amp->a_rwlock);
2121                 ASSERT(i == npages);
2122 
2123                 /*
2124                  * We are already holding the as->a_lock on the user's
2125                  * real segment, but we need to hold the a_lock on the
2126                  * underlying dummy as. This is mostly to satisfy the
2127                  * underlying HAT layer.
2128                  */
2129                 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
2130                 a = sptseg_addr;
2131                 pidx = 0;
2132                 if (type == F_SOFTLOCK) {
2133                         /*
2134                          * Load up the translation keeping it
2135                          * locked and don't unlock the page.
2136                          */
2137                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2138                                 sz = MIN(pgsz, ptob(npages - pidx));
2139                                 hat_memload_array(sptseg->s_as->a_hat, a,
2140                                     sz, &ppa[pidx], sptd->spt_prot,
2141                                     HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2142                         }
2143                 } else {
2144                         /*
2145                          * Migrate pages marked for migration.
2146                          */
2147                         if (lgrp_optimizations())
2148                                 page_migrate(seg, shm_addr, ppa, npages);
2149 
2150                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2151                                 sz = MIN(pgsz, ptob(npages - pidx));
2152                                 hat_memload_array(sptseg->s_as->a_hat,
2153                                     a, sz, &ppa[pidx],
2154                                     sptd->spt_prot, HAT_LOAD_SHARE);
2155                         }
2156 
2157                         /*
2158                          * And now drop the SE_SHARED lock(s).
2159                          */
2160                         for (i = 0; i < npages; i++)
2161                                 page_unlock(ppa[i]);
2162                 }
2163                 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
2164 
2165                 kmem_free(ppa, sizeof (page_t *) * npages);
2166                 return (0);
2167         case F_SOFTUNLOCK:
2168 
2169                 /*
2170                  * This is a bit ugly, we pass in the real seg pointer,
2171                  * but the sptseg_addr is the virtual address within the
2172                  * dummy seg.
2173                  */
2174                 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2175                 return (0);
2176 
2177         case F_PROT:
2178 
2179                 /*
2180                  * This takes care of the unusual case where a user
2181                  * allocates a stack in shared memory and a register
2182                  * window overflow is written to that stack page before
2183                  * it is otherwise modified.
2184                  *
2185                  * We can get away with this because ISM segments are
2186                  * always rw. Other than this unusual case, there
2187                  * should be no instances of protection violations.
2188                  */
2189                 return (0);
2190 
2191         default:
2192 #ifdef DEBUG
2193                 cmn_err(CE_WARN, "segspt_shmfault default type?");
2194 #endif
2195                 return (FC_NOMAP);
2196         }
2197 }
2198 
2199 /*ARGSUSED*/
2200 static faultcode_t
2201 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2202 {
2203         return (0);
2204 }
2205 
2206 /*ARGSUSED*/
2207 static int
2208 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2209 {
2210         return (0);
2211 }
2212 
2213 /*
2214  * duplicate the shared page tables
2215  */
2216 int
2217 segspt_shmdup(struct seg *seg, struct seg *newseg)
2218 {
2219         struct shm_data         *shmd = (struct shm_data *)seg->s_data;
2220         struct anon_map         *amp = shmd->shm_amp;
2221         struct shm_data         *shmd_new;
2222         struct seg              *spt_seg = shmd->shm_sptseg;
2223         struct spt_data         *sptd = spt_seg->s_data;
2224         int                     error = 0;
2225 
2226         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2227 
2228         shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2229         newseg->s_data = (void *)shmd_new;
2230         shmd_new->shm_sptas = shmd->shm_sptas;
2231         shmd_new->shm_amp = amp;
2232         shmd_new->shm_sptseg = shmd->shm_sptseg;
2233         newseg->s_ops = &segspt_shmops;
2234         newseg->s_szc = seg->s_szc;
2235         ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2236 
2237         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
2238         amp->refcnt++;
2239         ANON_LOCK_EXIT(&amp->a_rwlock);
2240 
2241         if (sptd->spt_flags & SHM_PAGEABLE) {
2242                 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2243                 shmd_new->shm_lckpgs = 0;
2244                 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2245                         if ((error = hat_share(newseg->s_as->a_hat,
2246                             newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2247                             seg->s_size, seg->s_szc)) != 0) {
2248                                 kmem_free(shmd_new->shm_vpage,
2249                                     btopr(amp->size));
2250                         }
2251                 }
2252                 return (error);
2253         } else {
2254                 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2255                     shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2256                     seg->s_szc));
2257 
2258         }
2259 }
2260 
2261 /*ARGSUSED*/
2262 int
2263 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2264 {
2265         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2266         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2267 
2268         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2269 
2270         /*
2271          * ISM segment is always rw.
2272          */
2273         return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2274 }
2275 
2276 /*
2277  * Return an array of locked large pages, for empty slots allocate
2278  * private zero-filled anon pages.
2279  */
2280 static int
2281 spt_anon_getpages(
2282         struct seg *sptseg,
2283         caddr_t sptaddr,
2284         size_t len,
2285         page_t *ppa[])
2286 {
2287         struct  spt_data *sptd = sptseg->s_data;
2288         struct  anon_map *amp = sptd->spt_amp;
2289         enum    seg_rw rw = sptd->spt_prot;
2290         uint_t  szc = sptseg->s_szc;
2291         size_t  pg_sz, share_sz = page_get_pagesize(szc);
2292         pgcnt_t lp_npgs;
2293         caddr_t lp_addr, e_sptaddr;
2294         uint_t  vpprot, ppa_szc = 0;
2295         struct  vpage *vpage = NULL;
2296         ulong_t j, ppa_idx;
2297         int     err, ierr = 0;
2298         pgcnt_t an_idx;
2299         anon_sync_obj_t cookie;
2300         int anon_locked = 0;
2301         pgcnt_t amp_pgs;
2302 
2303 
2304         ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2305         ASSERT(len != 0);
2306 
2307         pg_sz = share_sz;
2308         lp_npgs = btop(pg_sz);
2309         lp_addr = sptaddr;
2310         e_sptaddr = sptaddr + len;
2311         an_idx = seg_page(sptseg, sptaddr);
2312         ppa_idx = 0;
2313 
2314         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2315 
2316         amp_pgs = page_get_pagecnt(amp->a_szc);
2317 
2318         /*CONSTCOND*/
2319         while (1) {
2320                 for (; lp_addr < e_sptaddr;
2321                     an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2322 
2323                         /*
2324                          * If we're currently locked, and we get to a new
2325                          * page, unlock our current anon chunk.
2326                          */
2327                         if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2328                                 anon_array_exit(&cookie);
2329                                 anon_locked = 0;
2330                         }
2331                         if (!anon_locked) {
2332                                 anon_array_enter(amp, an_idx, &cookie);
2333                                 anon_locked = 1;
2334                         }
2335                         ppa_szc = (uint_t)-1;
2336                         ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2337                             lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2338                             &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2339 
2340                         if (ierr != 0) {
2341                                 if (ierr > 0) {
2342                                         err = FC_MAKE_ERR(ierr);
2343                                         goto lpgs_err;
2344                                 }
2345                                 break;
2346                         }
2347                 }
2348                 if (lp_addr == e_sptaddr) {
2349                         break;
2350                 }
2351                 ASSERT(lp_addr < e_sptaddr);
2352 
2353                 /*
2354                  * ierr == -1 means we failed to allocate a large page.
2355                  * so do a size down operation.
2356                  *
2357                  * ierr == -2 means some other process that privately shares
2358                  * pages with this process has allocated a larger page and we
2359                  * need to retry with larger pages. So do a size up
2360                  * operation. This relies on the fact that large pages are
2361                  * never partially shared i.e. if we share any constituent
2362                  * page of a large page with another process we must share the
2363                  * entire large page. Note this cannot happen for SOFTLOCK
2364                  * case, unless current address (lpaddr) is at the beginning
2365                  * of the next page size boundary because the other process
2366                  * couldn't have relocated locked pages.
2367                  */
2368                 ASSERT(ierr == -1 || ierr == -2);
2369                 if (segvn_anypgsz) {
2370                         ASSERT(ierr == -2 || szc != 0);
2371                         ASSERT(ierr == -1 || szc < sptseg->s_szc);
2372                         szc = (ierr == -1) ? szc - 1 : szc + 1;
2373                 } else {
2374                         /*
2375                          * For faults and segvn_anypgsz == 0
2376                          * we need to be careful not to loop forever
2377                          * if existing page is found with szc other
2378                          * than 0 or seg->s_szc. This could be due
2379                          * to page relocations on behalf of DR or
2380                          * more likely large page creation. For this
2381                          * case simply re-size to existing page's szc
2382                          * if returned by anon_map_getpages().
2383                          */
2384                         if (ppa_szc == (uint_t)-1) {
2385                                 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2386                         } else {
2387                                 ASSERT(ppa_szc <= sptseg->s_szc);
2388                                 ASSERT(ierr == -2 || ppa_szc < szc);
2389                                 ASSERT(ierr == -1 || ppa_szc > szc);
2390                                 szc = ppa_szc;
2391                         }
2392                 }
2393                 pg_sz = page_get_pagesize(szc);
2394                 lp_npgs = btop(pg_sz);
2395                 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2396         }
2397         if (anon_locked) {
2398                 anon_array_exit(&cookie);
2399         }
2400         ANON_LOCK_EXIT(&amp->a_rwlock);
2401         return (0);
2402 
2403 lpgs_err:
2404         if (anon_locked) {
2405                 anon_array_exit(&cookie);
2406         }
2407         ANON_LOCK_EXIT(&amp->a_rwlock);
2408         for (j = 0; j < ppa_idx; j++)
2409                 page_unlock(ppa[j]);
2410         return (err);
2411 }
2412 
2413 /*
2414  * count the number of bytes in a set of spt pages that are currently not
2415  * locked
2416  */
2417 static rctl_qty_t
2418 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2419 {
2420         ulong_t i;
2421         rctl_qty_t unlocked = 0;
2422 
2423         for (i = 0; i < npages; i++) {
2424                 if (ppa[i]->p_lckcnt == 0)
2425                         unlocked += PAGESIZE;
2426         }
2427         return (unlocked);
2428 }
2429 
2430 extern  u_longlong_t randtick(void);
2431 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2432 #define NLCK    (NCPU_P2)
2433 /* Random number with a range [0, n-1], n must be power of two */
2434 #define RAND_P2(n)      \
2435         ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2436 
2437 int
2438 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2439     page_t **ppa, ulong_t *lockmap, size_t pos,
2440     rctl_qty_t *locked)
2441 {
2442         struct  shm_data *shmd = seg->s_data;
2443         struct  spt_data *sptd = shmd->shm_sptseg->s_data;
2444         ulong_t i;
2445         int     kernel;
2446         pgcnt_t nlck = 0;
2447         int     rv = 0;
2448         int     use_reserved = 1;
2449 
2450         /* return the number of bytes actually locked */
2451         *locked = 0;
2452 
2453         /*
2454          * To avoid contention on freemem_lock, availrmem and pages_locked
2455          * global counters are updated only every nlck locked pages instead of
2456          * every time.  Reserve nlck locks up front and deduct from this
2457          * reservation for each page that requires a lock.  When the reservation
2458          * is consumed, reserve again.  nlck is randomized, so the competing
2459          * threads do not fall into a cyclic lock contention pattern. When
2460          * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2461          * is used to lock pages.
2462          */
2463         for (i = 0; i < npages; anon_index++, pos++, i++) {
2464                 if (nlck == 0 && use_reserved == 1) {
2465                         nlck = NLCK + RAND_P2(NLCK);
2466                         /* if fewer loops left, decrease nlck */
2467                         nlck = MIN(nlck, npages - i);
2468                         /*
2469                          * Reserve nlck locks up front and deduct from this
2470                          * reservation for each page that requires a lock.  When
2471                          * the reservation is consumed, reserve again.
2472                          */
2473                         mutex_enter(&freemem_lock);
2474                         if ((availrmem - nlck) < pages_pp_maximum) {
2475                                 /* Do not do advance memory reserves */
2476                                 use_reserved = 0;
2477                         } else {
2478                                 availrmem       -= nlck;
2479                                 pages_locked    += nlck;
2480                         }
2481                         mutex_exit(&freemem_lock);
2482                 }
2483                 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2484                         if (sptd->spt_ppa_lckcnt[anon_index] <
2485                             (ushort_t)DISM_LOCK_MAX) {
2486                                 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2487                                     (ushort_t)DISM_LOCK_MAX) {
2488                                         cmn_err(CE_WARN,
2489                                             "DISM page lock limit "
2490                                             "reached on DISM offset 0x%lx\n",
2491                                             anon_index << PAGESHIFT);
2492                                 }
2493                                 kernel = (sptd->spt_ppa &&
2494                                     sptd->spt_ppa[anon_index]);
2495                                 if (!page_pp_lock(ppa[i], 0, kernel ||
2496                                     use_reserved)) {
2497                                         sptd->spt_ppa_lckcnt[anon_index]--;
2498                                         rv = EAGAIN;
2499                                         break;
2500                                 }
2501                                 /* if this is a newly locked page, count it */
2502                                 if (ppa[i]->p_lckcnt == 1) {
2503                                         if (kernel == 0 && use_reserved == 1)
2504                                                 nlck--;
2505                                         *locked += PAGESIZE;
2506                                 }
2507                                 shmd->shm_lckpgs++;
2508                                 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2509                                 if (lockmap != NULL)
2510                                         BT_SET(lockmap, pos);
2511                         }
2512                 }
2513         }
2514         /* Return unused lock reservation */
2515         if (nlck != 0 && use_reserved == 1) {
2516                 mutex_enter(&freemem_lock);
2517                 availrmem       += nlck;
2518                 pages_locked    -= nlck;
2519                 mutex_exit(&freemem_lock);
2520         }
2521 
2522         return (rv);
2523 }
2524 
2525 int
2526 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2527     rctl_qty_t *unlocked)
2528 {
2529         struct shm_data *shmd = seg->s_data;
2530         struct spt_data *sptd = shmd->shm_sptseg->s_data;
2531         struct anon_map *amp = sptd->spt_amp;
2532         struct anon     *ap;
2533         struct vnode    *vp;
2534         u_offset_t      off;
2535         struct page     *pp;
2536         int             kernel;
2537         anon_sync_obj_t cookie;
2538         ulong_t         i;
2539         pgcnt_t         nlck = 0;
2540         pgcnt_t         nlck_limit = NLCK;
2541 
2542         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2543         for (i = 0; i < npages; i++, anon_index++) {
2544                 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2545                         anon_array_enter(amp, anon_index, &cookie);
2546                         ap = anon_get_ptr(amp->ahp, anon_index);
2547                         ASSERT(ap);
2548 
2549                         swap_xlate(ap, &vp, &off);
2550                         anon_array_exit(&cookie);
2551                         pp = page_lookup(vp, off, SE_SHARED);
2552                         ASSERT(pp);
2553                         /*
2554                          * availrmem is decremented only for pages which are not
2555                          * in seg pcache, for pages in seg pcache availrmem was
2556                          * decremented in _dismpagelock()
2557                          */
2558                         kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2559                         ASSERT(pp->p_lckcnt > 0);
2560 
2561                         /*
2562                          * lock page but do not change availrmem, we do it
2563                          * ourselves every nlck loops.
2564                          */
2565                         page_pp_unlock(pp, 0, 1);
2566                         if (pp->p_lckcnt == 0) {
2567                                 if (kernel == 0)
2568                                         nlck++;
2569                                 *unlocked += PAGESIZE;
2570                         }
2571                         page_unlock(pp);
2572                         shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2573                         sptd->spt_ppa_lckcnt[anon_index]--;
2574                         shmd->shm_lckpgs--;
2575                 }
2576 
2577                 /*
2578                  * To reduce freemem_lock contention, do not update availrmem
2579                  * until at least NLCK pages have been unlocked.
2580                  * 1. No need to update if nlck is zero
2581                  * 2. Always update if the last iteration
2582                  */
2583                 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2584                         mutex_enter(&freemem_lock);
2585                         availrmem       += nlck;
2586                         pages_locked    -= nlck;
2587                         mutex_exit(&freemem_lock);
2588                         nlck = 0;
2589                         nlck_limit = NLCK + RAND_P2(NLCK);
2590                 }
2591         }
2592         ANON_LOCK_EXIT(&amp->a_rwlock);
2593 
2594         return (0);
2595 }
2596 
2597 /*ARGSUSED*/
2598 static int
2599 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2600     int attr, int op, ulong_t *lockmap, size_t pos)
2601 {
2602         struct shm_data *shmd = seg->s_data;
2603         struct seg      *sptseg = shmd->shm_sptseg;
2604         struct spt_data *sptd = sptseg->s_data;
2605         struct kshmid   *sp = sptd->spt_amp->a_sp;
2606         pgcnt_t         npages, a_npages;
2607         page_t          **ppa;
2608         pgcnt_t         an_idx, a_an_idx, ppa_idx;
2609         caddr_t         spt_addr, a_addr;       /* spt and aligned address */
2610         size_t          a_len;                  /* aligned len */
2611         size_t          share_sz;
2612         ulong_t         i;
2613         int             sts = 0;
2614         rctl_qty_t      unlocked = 0;
2615         rctl_qty_t      locked = 0;
2616         struct proc     *p = curproc;
2617         kproject_t      *proj;
2618 
2619         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2620         ASSERT(sp != NULL);
2621 
2622         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2623                 return (0);
2624         }
2625 
2626         addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2627         an_idx = seg_page(seg, addr);
2628         npages = btopr(len);
2629 
2630         if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2631                 return (ENOMEM);
2632         }
2633 
2634         /*
2635          * A shm's project never changes, so no lock needed.
2636          * The shm has a hold on the project, so it will not go away.
2637          * Since we have a mapping to shm within this zone, we know
2638          * that the zone will not go away.
2639          */
2640         proj = sp->shm_perm.ipc_proj;
2641 
2642         if (op == MC_LOCK) {
2643 
2644                 /*
2645                  * Need to align addr and size request if they are not
2646                  * aligned so we can always allocate large page(s) however
2647                  * we only lock what was requested in initial request.
2648                  */
2649                 share_sz = page_get_pagesize(sptseg->s_szc);
2650                 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2651                 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2652                     share_sz);
2653                 a_npages = btop(a_len);
2654                 a_an_idx = seg_page(seg, a_addr);
2655                 spt_addr = sptseg->s_base + ptob(a_an_idx);
2656                 ppa_idx = an_idx - a_an_idx;
2657 
2658                 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2659                     KM_NOSLEEP)) == NULL) {
2660                         return (ENOMEM);
2661                 }
2662 
2663                 /*
2664                  * Don't cache any new pages for IO and
2665                  * flush any cached pages.
2666                  */
2667                 mutex_enter(&sptd->spt_lock);
2668                 if (sptd->spt_ppa != NULL)
2669                         sptd->spt_flags |= DISM_PPA_CHANGED;
2670 
2671                 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2672                 if (sts != 0) {
2673                         mutex_exit(&sptd->spt_lock);
2674                         kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2675                         return (sts);
2676                 }
2677 
2678                 mutex_enter(&sp->shm_mlock);
2679                 /* enforce locked memory rctl */
2680                 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2681 
2682                 mutex_enter(&p->p_lock);
2683                 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2684                         mutex_exit(&p->p_lock);
2685                         sts = EAGAIN;
2686                 } else {
2687                         mutex_exit(&p->p_lock);
2688                         sts = spt_lockpages(seg, an_idx, npages,
2689                             &ppa[ppa_idx], lockmap, pos, &locked);
2690 
2691                         /*
2692                          * correct locked count if not all pages could be
2693                          * locked
2694                          */
2695                         if ((unlocked - locked) > 0) {
2696                                 rctl_decr_locked_mem(NULL, proj,
2697                                     (unlocked - locked), 0);
2698                         }
2699                 }
2700                 /*
2701                  * unlock pages
2702                  */
2703                 for (i = 0; i < a_npages; i++)
2704                         page_unlock(ppa[i]);
2705                 if (sptd->spt_ppa != NULL)
2706                         sptd->spt_flags |= DISM_PPA_CHANGED;
2707                 mutex_exit(&sp->shm_mlock);
2708                 mutex_exit(&sptd->spt_lock);
2709 
2710                 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2711 
2712         } else if (op == MC_UNLOCK) { /* unlock */
2713                 page_t          **ppa;
2714 
2715                 mutex_enter(&sptd->spt_lock);
2716                 if (shmd->shm_lckpgs == 0) {
2717                         mutex_exit(&sptd->spt_lock);
2718                         return (0);
2719                 }
2720                 /*
2721                  * Don't cache new IO pages.
2722                  */
2723                 if (sptd->spt_ppa != NULL)
2724                         sptd->spt_flags |= DISM_PPA_CHANGED;
2725 
2726                 mutex_enter(&sp->shm_mlock);
2727                 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2728                 if ((ppa = sptd->spt_ppa) != NULL)
2729                         sptd->spt_flags |= DISM_PPA_CHANGED;
2730                 mutex_exit(&sptd->spt_lock);
2731 
2732                 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2733                 mutex_exit(&sp->shm_mlock);
2734 
2735                 if (ppa != NULL)
2736                         seg_ppurge_wiredpp(ppa);
2737         }
2738         return (sts);
2739 }
2740 
2741 /*ARGSUSED*/
2742 int
2743 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2744 {
2745         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2746         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2747         spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2748 
2749         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2750 
2751         /*
2752          * ISM segment is always rw.
2753          */
2754         while (--pgno >= 0)
2755                 *protv++ = sptd->spt_prot;
2756         return (0);
2757 }
2758 
2759 /*ARGSUSED*/
2760 u_offset_t
2761 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2762 {
2763         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2764 
2765         /* Offset does not matter in ISM memory */
2766 
2767         return ((u_offset_t)0);
2768 }
2769 
2770 /* ARGSUSED */
2771 int
2772 segspt_shmgettype(struct seg *seg, caddr_t addr)
2773 {
2774         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2775         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2776 
2777         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2778 
2779         /*
2780          * The shared memory mapping is always MAP_SHARED, SWAP is only
2781          * reserved for DISM
2782          */
2783         return (MAP_SHARED |
2784             ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2785 }
2786 
2787 /*ARGSUSED*/
2788 int
2789 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2790 {
2791         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2792         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2793 
2794         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2795 
2796         *vpp = sptd->spt_vp;
2797         return (0);
2798 }
2799 
2800 /*
2801  * We need to wait for pending IO to complete to a DISM segment in order for
2802  * pages to get kicked out of the seg_pcache.  120 seconds should be more
2803  * than enough time to wait.
2804  */
2805 static clock_t spt_pcache_wait = 120;
2806 
2807 /*ARGSUSED*/
2808 static int
2809 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2810 {
2811         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2812         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2813         struct anon_map *amp;
2814         pgcnt_t pg_idx;
2815         ushort_t gen;
2816         clock_t end_lbolt;
2817         int writer;
2818         page_t **ppa;
2819 
2820         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2821 
2822         if (behav == MADV_FREE) {
2823                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2824                         return (0);
2825 
2826                 amp = sptd->spt_amp;
2827                 pg_idx = seg_page(seg, addr);
2828 
2829                 mutex_enter(&sptd->spt_lock);
2830                 if ((ppa = sptd->spt_ppa) == NULL) {
2831                         mutex_exit(&sptd->spt_lock);
2832                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2833                         anon_disclaim(amp, pg_idx, len);
2834                         ANON_LOCK_EXIT(&amp->a_rwlock);
2835                         return (0);
2836                 }
2837 
2838                 sptd->spt_flags |= DISM_PPA_CHANGED;
2839                 gen = sptd->spt_gen;
2840 
2841                 mutex_exit(&sptd->spt_lock);
2842 
2843                 /*
2844                  * Purge all DISM cached pages
2845                  */
2846                 seg_ppurge_wiredpp(ppa);
2847 
2848                 /*
2849                  * Drop the AS_LOCK so that other threads can grab it
2850                  * in the as_pageunlock path and hopefully get the segment
2851                  * kicked out of the seg_pcache.  We bump the shm_softlockcnt
2852                  * to keep this segment resident.
2853                  */
2854                 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2855                 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2856                 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2857 
2858                 mutex_enter(&sptd->spt_lock);
2859 
2860                 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2861 
2862                 /*
2863                  * Try to wait for pages to get kicked out of the seg_pcache.
2864                  */
2865                 while (sptd->spt_gen == gen &&
2866                     (sptd->spt_flags & DISM_PPA_CHANGED) &&
2867                     ddi_get_lbolt() < end_lbolt) {
2868                         if (!cv_timedwait_sig(&sptd->spt_cv,
2869                             &sptd->spt_lock, end_lbolt)) {
2870                                 break;
2871                         }
2872                 }
2873 
2874                 mutex_exit(&sptd->spt_lock);
2875 
2876                 /* Regrab the AS_LOCK and release our hold on the segment */
2877                 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2878                     writer ? RW_WRITER : RW_READER);
2879                 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2880                 if (shmd->shm_softlockcnt <= 0) {
2881                         if (AS_ISUNMAPWAIT(seg->s_as)) {
2882                                 mutex_enter(&seg->s_as->a_contents);
2883                                 if (AS_ISUNMAPWAIT(seg->s_as)) {
2884                                         AS_CLRUNMAPWAIT(seg->s_as);
2885                                         cv_broadcast(&seg->s_as->a_cv);
2886                                 }
2887                                 mutex_exit(&seg->s_as->a_contents);
2888                         }
2889                 }
2890 
2891                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2892                 anon_disclaim(amp, pg_idx, len);
2893                 ANON_LOCK_EXIT(&amp->a_rwlock);
2894         } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2895             behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2896                 int                     already_set;
2897                 ulong_t                 anon_index;
2898                 lgrp_mem_policy_t       policy;
2899                 caddr_t                 shm_addr;
2900                 size_t                  share_size;
2901                 size_t                  size;
2902                 struct seg              *sptseg = shmd->shm_sptseg;
2903                 caddr_t                 sptseg_addr;
2904 
2905                 /*
2906                  * Align address and length to page size of underlying segment
2907                  */
2908                 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2909                 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2910                 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2911                     share_size);
2912 
2913                 amp = shmd->shm_amp;
2914                 anon_index = seg_page(seg, shm_addr);
2915 
2916                 /*
2917                  * And now we may have to adjust size downward if we have
2918                  * exceeded the realsize of the segment or initial anon
2919                  * allocations.
2920                  */
2921                 sptseg_addr = sptseg->s_base + ptob(anon_index);
2922                 if ((sptseg_addr + size) >
2923                     (sptseg->s_base + sptd->spt_realsize))
2924                         size = (sptseg->s_base + sptd->spt_realsize) -
2925                             sptseg_addr;
2926 
2927                 /*
2928                  * Set memory allocation policy for this segment
2929                  */
2930                 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2931                 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2932                     NULL, 0, len);
2933 
2934                 /*
2935                  * If random memory allocation policy set already,
2936                  * don't bother reapplying it.
2937                  */
2938                 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2939                         return (0);
2940 
2941                 /*
2942                  * Mark any existing pages in the given range for
2943                  * migration, flushing the I/O page cache, and using
2944                  * underlying segment to calculate anon index and get
2945                  * anonmap and vnode pointer from
2946                  */
2947                 if (shmd->shm_softlockcnt > 0)
2948                         segspt_purge(seg);
2949 
2950                 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
2951         }
2952 
2953         return (0);
2954 }
2955 
2956 /*ARGSUSED*/
2957 void
2958 segspt_shmdump(struct seg *seg)
2959 {
2960         /* no-op for ISM segment */
2961 }
2962 
2963 /*ARGSUSED*/
2964 static faultcode_t
2965 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
2966 {
2967         return (ENOTSUP);
2968 }
2969 
2970 /*
2971  * get a memory ID for an addr in a given segment
2972  */
2973 static int
2974 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
2975 {
2976         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2977         struct anon     *ap;
2978         size_t          anon_index;
2979         struct anon_map *amp = shmd->shm_amp;
2980         struct spt_data *sptd = shmd->shm_sptseg->s_data;
2981         struct seg      *sptseg = shmd->shm_sptseg;
2982         anon_sync_obj_t cookie;
2983 
2984         anon_index = seg_page(seg, addr);
2985 
2986         if (addr > (seg->s_base + sptd->spt_realsize)) {
2987                 return (EFAULT);
2988         }
2989 
2990         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2991         anon_array_enter(amp, anon_index, &cookie);
2992         ap = anon_get_ptr(amp->ahp, anon_index);
2993         if (ap == NULL) {
2994                 struct page *pp;
2995                 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
2996 
2997                 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
2998                 if (pp == NULL) {
2999                         anon_array_exit(&cookie);
3000                         ANON_LOCK_EXIT(&amp->a_rwlock);
3001                         return (ENOMEM);
3002                 }
3003                 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3004                 page_unlock(pp);
3005         }
3006         anon_array_exit(&cookie);
3007         ANON_LOCK_EXIT(&amp->a_rwlock);
3008         memidp->val[0] = (uintptr_t)ap;
3009         memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3010         return (0);
3011 }
3012 
3013 /*
3014  * Get memory allocation policy info for specified address in given segment
3015  */
3016 static lgrp_mem_policy_info_t *
3017 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3018 {
3019         struct anon_map         *amp;
3020         ulong_t                 anon_index;
3021         lgrp_mem_policy_info_t  *policy_info;
3022         struct shm_data         *shm_data;
3023 
3024         ASSERT(seg != NULL);
3025 
3026         /*
3027          * Get anon_map from segshm
3028          *
3029          * Assume that no lock needs to be held on anon_map, since
3030          * it should be protected by its reference count which must be
3031          * nonzero for an existing segment
3032          * Need to grab readers lock on policy tree though
3033          */
3034         shm_data = (struct shm_data *)seg->s_data;
3035         if (shm_data == NULL)
3036                 return (NULL);
3037         amp = shm_data->shm_amp;
3038         ASSERT(amp->refcnt != 0);
3039 
3040         /*
3041          * Get policy info
3042          *
3043          * Assume starting anon index of 0
3044          */
3045         anon_index = seg_page(seg, addr);
3046         policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3047 
3048         return (policy_info);
3049 }
3050 
3051 /*ARGSUSED*/
3052 static int
3053 segspt_shmcapable(struct seg *seg, segcapability_t capability)
3054 {
3055         return (0);
3056 }