1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 
  25 #include <sys/param.h>
  26 #include <sys/user.h>
  27 #include <sys/mman.h>
  28 #include <sys/kmem.h>
  29 #include <sys/sysmacros.h>
  30 #include <sys/cmn_err.h>
  31 #include <sys/systm.h>
  32 #include <sys/tuneable.h>
  33 #include <vm/hat.h>
  34 #include <vm/seg.h>
  35 #include <vm/as.h>
  36 #include <vm/anon.h>
  37 #include <vm/page.h>
  38 #include <sys/buf.h>
  39 #include <sys/swap.h>
  40 #include <sys/atomic.h>
  41 #include <vm/seg_spt.h>
  42 #include <sys/debug.h>
  43 #include <sys/vtrace.h>
  44 #include <sys/shm.h>
  45 #include <sys/shm_impl.h>
  46 #include <sys/lgrp.h>
  47 #include <sys/vmsystm.h>
  48 #include <sys/policy.h>
  49 #include <sys/project.h>
  50 #include <sys/tnf_probe.h>
  51 #include <sys/zone.h>
  52 
  53 #define SEGSPTADDR      (caddr_t)0x0
  54 
  55 /*
  56  * # pages used for spt
  57  */
  58 size_t  spt_used;
  59 
  60 /*
  61  * segspt_minfree is the memory left for system after ISM
  62  * locked its pages; it is set up to 5% of availrmem in
  63  * sptcreate when ISM is created.  ISM should not use more
  64  * than ~90% of availrmem; if it does, then the performance
  65  * of the system may decrease. Machines with large memories may
  66  * be able to use up more memory for ISM so we set the default
  67  * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
  68  * If somebody wants even more memory for ISM (risking hanging
  69  * the system) they can patch the segspt_minfree to smaller number.
  70  */
  71 pgcnt_t segspt_minfree = 0;
  72 
  73 static int segspt_create(struct seg *seg, caddr_t argsp);
  74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
  75 static void segspt_free(struct seg *seg);
  76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
  77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
  78 
  79 struct seg_ops segspt_ops = {
  80         .unmap          = segspt_unmap,
  81         .free           = segspt_free,
  82         .getpolicy      = segspt_getpolicy,
  83 };
  84 
  85 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
  86 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
  87 static void segspt_shmfree(struct seg *seg);
  88 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
  89                 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
  90 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
  91 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
  92                         register size_t len, register uint_t prot);
  93 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
  94                         uint_t prot);
  95 static int      segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
  96 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
  97                         register char *vec);
  98 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
  99                         int attr, uint_t flags);
 100 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
 101                         int attr, int op, ulong_t *lockmap, size_t pos);
 102 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
 103                         uint_t *protv);
 104 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
 105 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
 106 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
 107 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
 108                         uint_t behav);
 109 static void segspt_shmdump(struct seg *seg);
 110 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
 111                         struct page ***, enum lock_type, enum seg_rw);
 112 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
 113 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
 114 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
 115 
 116 struct seg_ops segspt_shmops = {
 117         .dup            = segspt_shmdup,
 118         .unmap          = segspt_shmunmap,
 119         .free           = segspt_shmfree,
 120         .fault          = segspt_shmfault,
 121         .faulta         = segspt_shmfaulta,
 122         .setprot        = segspt_shmsetprot,
 123         .checkprot      = segspt_shmcheckprot,
 124         .kluster        = segspt_shmkluster,
 125         .sync           = segspt_shmsync,
 126         .incore         = segspt_shmincore,
 127         .lockop         = segspt_shmlockop,
 128         .getprot        = segspt_shmgetprot,
 129         .getoffset      = segspt_shmgetoffset,
 130         .gettype        = segspt_shmgettype,
 131         .getvp          = segspt_shmgetvp,
 132         .advise         = segspt_shmadvise,
 133         .dump           = segspt_shmdump,
 134         .pagelock       = segspt_shmpagelock,
 135         .setpagesize    = segspt_shmsetpgsz,
 136         .getmemid       = segspt_shmgetmemid,
 137         .getpolicy      = segspt_shmgetpolicy,
 138 };
 139 
 140 static void segspt_purge(struct seg *seg);
 141 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
 142                 enum seg_rw, int);
 143 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
 144                 page_t **ppa);
 145 
 146 
 147 
 148 /*ARGSUSED*/
 149 int
 150 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
 151         uint_t prot, uint_t flags, uint_t share_szc)
 152 {
 153         int     err;
 154         struct  as      *newas;
 155         struct  segspt_crargs sptcargs;
 156 
 157 #ifdef DEBUG
 158         TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
 159                         tnf_ulong, size, size );
 160 #endif
 161         if (segspt_minfree == 0)        /* leave min 5% of availrmem for */
 162                 segspt_minfree = availrmem/20;  /* for the system */
 163 
 164         if (!hat_supported(HAT_SHARED_PT, (void *)0))
 165                 return (EINVAL);
 166 
 167         /*
 168          * get a new as for this shared memory segment
 169          */
 170         newas = as_alloc();
 171         newas->a_proc = NULL;
 172         sptcargs.amp = amp;
 173         sptcargs.prot = prot;
 174         sptcargs.flags = flags;
 175         sptcargs.szc = share_szc;
 176         /*
 177          * create a shared page table (spt) segment
 178          */
 179 
 180         if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
 181                 as_free(newas);
 182                 return (err);
 183         }
 184         *sptseg = sptcargs.seg_spt;
 185         return (0);
 186 }
 187 
 188 void
 189 sptdestroy(struct as *as, struct anon_map *amp)
 190 {
 191 
 192 #ifdef DEBUG
 193         TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
 194 #endif
 195         (void) as_unmap(as, SEGSPTADDR, amp->size);
 196         as_free(as);
 197 }
 198 
 199 /*
 200  * called from seg_free().
 201  * free (i.e., unlock, unmap, return to free list)
 202  *  all the pages in the given seg.
 203  */
 204 void
 205 segspt_free(struct seg  *seg)
 206 {
 207         struct spt_data *sptd = (struct spt_data *)seg->s_data;
 208 
 209         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 210 
 211         if (sptd != NULL) {
 212                 if (sptd->spt_realsize)
 213                         segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
 214 
 215         if (sptd->spt_ppa_lckcnt)
 216                 kmem_free(sptd->spt_ppa_lckcnt,
 217                     sizeof (*sptd->spt_ppa_lckcnt)
 218                     * btopr(sptd->spt_amp->size));
 219                 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
 220                 cv_destroy(&sptd->spt_cv);
 221                 mutex_destroy(&sptd->spt_lock);
 222                 kmem_free(sptd, sizeof (*sptd));
 223         }
 224 }
 225 
 226 /*ARGSUSED*/
 227 static int
 228 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
 229         uint_t flags)
 230 {
 231         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
 232 
 233         return (0);
 234 }
 235 
 236 /*ARGSUSED*/
 237 static size_t
 238 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
 239 {
 240         caddr_t eo_seg;
 241         pgcnt_t npages;
 242         struct shm_data *shmd = (struct shm_data *)seg->s_data;
 243         struct seg      *sptseg;
 244         struct spt_data *sptd;
 245 
 246         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
 247 #ifdef lint
 248         seg = seg;
 249 #endif
 250         sptseg = shmd->shm_sptseg;
 251         sptd = sptseg->s_data;
 252 
 253         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 254                 eo_seg = addr + len;
 255                 while (addr < eo_seg) {
 256                         /* page exists, and it's locked. */
 257                         *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
 258                             SEG_PAGE_ANON;
 259                         addr += PAGESIZE;
 260                 }
 261                 return (len);
 262         } else {
 263                 struct  anon_map *amp = shmd->shm_amp;
 264                 struct  anon    *ap;
 265                 page_t          *pp;
 266                 pgcnt_t         anon_index;
 267                 struct vnode    *vp;
 268                 u_offset_t      off;
 269                 ulong_t         i;
 270                 int             ret;
 271                 anon_sync_obj_t cookie;
 272 
 273                 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
 274                 anon_index = seg_page(seg, addr);
 275                 npages = btopr(len);
 276                 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
 277                         return (EINVAL);
 278                 }
 279                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
 280                 for (i = 0; i < npages; i++, anon_index++) {
 281                         ret = 0;
 282                         anon_array_enter(amp, anon_index, &cookie);
 283                         ap = anon_get_ptr(amp->ahp, anon_index);
 284                         if (ap != NULL) {
 285                                 swap_xlate(ap, &vp, &off);
 286                                 anon_array_exit(&cookie);
 287                                 pp = page_lookup_nowait(vp, off, SE_SHARED);
 288                                 if (pp != NULL) {
 289                                         ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
 290                                         page_unlock(pp);
 291                                 }
 292                         } else {
 293                                 anon_array_exit(&cookie);
 294                         }
 295                         if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
 296                                 ret |= SEG_PAGE_LOCKED;
 297                         }
 298                         *vec++ = (char)ret;
 299                 }
 300                 ANON_LOCK_EXIT(&amp->a_rwlock);
 301                 return (len);
 302         }
 303 }
 304 
 305 static int
 306 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
 307 {
 308         size_t share_size;
 309 
 310         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 311 
 312         /*
 313          * seg.s_size may have been rounded up to the largest page size
 314          * in shmat().
 315          * XXX This should be cleanedup. sptdestroy should take a length
 316          * argument which should be the same as sptcreate. Then
 317          * this rounding would not be needed (or is done in shm.c)
 318          * Only the check for full segment will be needed.
 319          *
 320          * XXX -- shouldn't raddr == 0 always? These tests don't seem
 321          * to be useful at all.
 322          */
 323         share_size = page_get_pagesize(seg->s_szc);
 324         ssize = P2ROUNDUP(ssize, share_size);
 325 
 326         if (raddr == seg->s_base && ssize == seg->s_size) {
 327                 seg_free(seg);
 328                 return (0);
 329         } else
 330                 return (EINVAL);
 331 }
 332 
 333 int
 334 segspt_create(struct seg *seg, caddr_t argsp)
 335 {
 336         int             err;
 337         caddr_t         addr = seg->s_base;
 338         struct spt_data *sptd;
 339         struct  segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
 340         struct anon_map *amp = sptcargs->amp;
 341         struct kshmid   *sp = amp->a_sp;
 342         struct  cred    *cred = CRED();
 343         ulong_t         i, j, anon_index = 0;
 344         pgcnt_t         npages = btopr(amp->size);
 345         struct vnode    *vp;
 346         page_t          **ppa;
 347         uint_t          hat_flags;
 348         size_t          pgsz;
 349         pgcnt_t         pgcnt;
 350         caddr_t         a;
 351         pgcnt_t         pidx;
 352         size_t          sz;
 353         proc_t          *procp = curproc;
 354         rctl_qty_t      lockedbytes = 0;
 355         kproject_t      *proj;
 356 
 357         /*
 358          * We are holding the a_lock on the underlying dummy as,
 359          * so we can make calls to the HAT layer.
 360          */
 361         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 362         ASSERT(sp != NULL);
 363 
 364 #ifdef DEBUG
 365         TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
 366             tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
 367 #endif
 368         if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
 369                 if (err = anon_swap_adjust(npages))
 370                         return (err);
 371         }
 372         err = ENOMEM;
 373 
 374         if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
 375                 goto out1;
 376 
 377         if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
 378                 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
 379                     KM_NOSLEEP)) == NULL)
 380                         goto out2;
 381         }
 382 
 383         mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
 384 
 385         if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
 386                 goto out3;
 387 
 388         seg->s_ops = &segspt_ops;
 389         sptd->spt_vp = vp;
 390         sptd->spt_amp = amp;
 391         sptd->spt_prot = sptcargs->prot;
 392         sptd->spt_flags = sptcargs->flags;
 393         seg->s_data = (caddr_t)sptd;
 394         sptd->spt_ppa = NULL;
 395         sptd->spt_ppa_lckcnt = NULL;
 396         seg->s_szc = sptcargs->szc;
 397         cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
 398         sptd->spt_gen = 0;
 399 
 400         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 401         if (seg->s_szc > amp->a_szc) {
 402                 amp->a_szc = seg->s_szc;
 403         }
 404         ANON_LOCK_EXIT(&amp->a_rwlock);
 405 
 406         /*
 407          * Set policy to affect initial allocation of pages in
 408          * anon_map_createpages()
 409          */
 410         (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
 411             NULL, 0, ptob(npages));
 412 
 413         if (sptcargs->flags & SHM_PAGEABLE) {
 414                 size_t  share_sz;
 415                 pgcnt_t new_npgs, more_pgs;
 416                 struct anon_hdr *nahp;
 417                 zone_t *zone;
 418 
 419                 share_sz = page_get_pagesize(seg->s_szc);
 420                 if (!IS_P2ALIGNED(amp->size, share_sz)) {
 421                         /*
 422                          * We are rounding up the size of the anon array
 423                          * on 4 M boundary because we always create 4 M
 424                          * of page(s) when locking, faulting pages and we
 425                          * don't have to check for all corner cases e.g.
 426                          * if there is enough space to allocate 4 M
 427                          * page.
 428                          */
 429                         new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
 430                         more_pgs = new_npgs - npages;
 431 
 432                         /*
 433                          * The zone will never be NULL, as a fully created
 434                          * shm always has an owning zone.
 435                          */
 436                         zone = sp->shm_perm.ipc_zone_ref.zref_zone;
 437                         ASSERT(zone != NULL);
 438                         if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
 439                                 err = ENOMEM;
 440                                 goto out4;
 441                         }
 442 
 443                         nahp = anon_create(new_npgs, ANON_SLEEP);
 444                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 445                         (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
 446                             ANON_SLEEP);
 447                         anon_release(amp->ahp, npages);
 448                         amp->ahp = nahp;
 449                         ASSERT(amp->swresv == ptob(npages));
 450                         amp->swresv = amp->size = ptob(new_npgs);
 451                         ANON_LOCK_EXIT(&amp->a_rwlock);
 452                         npages = new_npgs;
 453                 }
 454 
 455                 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
 456                     sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
 457                 sptd->spt_pcachecnt = 0;
 458                 sptd->spt_realsize = ptob(npages);
 459                 sptcargs->seg_spt = seg;
 460                 return (0);
 461         }
 462 
 463         /*
 464          * get array of pages for each anon slot in amp
 465          */
 466         if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
 467             seg, addr, S_CREATE, cred)) != 0)
 468                 goto out4;
 469 
 470         mutex_enter(&sp->shm_mlock);
 471 
 472         /* May be partially locked, so, count bytes to charge for locking */
 473         for (i = 0; i < npages; i++)
 474                 if (ppa[i]->p_lckcnt == 0)
 475                         lockedbytes += PAGESIZE;
 476 
 477         proj = sp->shm_perm.ipc_proj;
 478 
 479         if (lockedbytes > 0) {
 480                 mutex_enter(&procp->p_lock);
 481                 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
 482                         mutex_exit(&procp->p_lock);
 483                         mutex_exit(&sp->shm_mlock);
 484                         for (i = 0; i < npages; i++)
 485                                 page_unlock(ppa[i]);
 486                         err = ENOMEM;
 487                         goto out4;
 488                 }
 489                 mutex_exit(&procp->p_lock);
 490         }
 491 
 492         /*
 493          * addr is initial address corresponding to the first page on ppa list
 494          */
 495         for (i = 0; i < npages; i++) {
 496                 /* attempt to lock all pages */
 497                 if (page_pp_lock(ppa[i], 0, 1) == 0) {
 498                         /*
 499                          * if unable to lock any page, unlock all
 500                          * of them and return error
 501                          */
 502                         for (j = 0; j < i; j++)
 503                                 page_pp_unlock(ppa[j], 0, 1);
 504                         for (i = 0; i < npages; i++)
 505                                 page_unlock(ppa[i]);
 506                         rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
 507                         mutex_exit(&sp->shm_mlock);
 508                         err = ENOMEM;
 509                         goto out4;
 510                 }
 511         }
 512         mutex_exit(&sp->shm_mlock);
 513 
 514         /*
 515          * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
 516          * for the entire life of the segment. For example platforms
 517          * that do not support Dynamic Reconfiguration.
 518          */
 519         hat_flags = HAT_LOAD_SHARE;
 520         if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
 521                 hat_flags |= HAT_LOAD_LOCK;
 522 
 523         /*
 524          * Load translations one lare page at a time
 525          * to make sure we don't create mappings bigger than
 526          * segment's size code in case underlying pages
 527          * are shared with segvn's segment that uses bigger
 528          * size code than we do.
 529          */
 530         pgsz = page_get_pagesize(seg->s_szc);
 531         pgcnt = page_get_pagecnt(seg->s_szc);
 532         for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
 533                 sz = MIN(pgsz, ptob(npages - pidx));
 534                 hat_memload_array(seg->s_as->a_hat, a, sz,
 535                     &ppa[pidx], sptd->spt_prot, hat_flags);
 536         }
 537 
 538         /*
 539          * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
 540          * we will leave the pages locked SE_SHARED for the life
 541          * of the ISM segment. This will prevent any calls to
 542          * hat_pageunload() on this ISM segment for those platforms.
 543          */
 544         if (!(hat_flags & HAT_LOAD_LOCK)) {
 545                 /*
 546                  * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
 547                  * we no longer need to hold the SE_SHARED lock on the pages,
 548                  * since L_PAGELOCK and F_SOFTLOCK calls will grab the
 549                  * SE_SHARED lock on the pages as necessary.
 550                  */
 551                 for (i = 0; i < npages; i++)
 552                         page_unlock(ppa[i]);
 553         }
 554         sptd->spt_pcachecnt = 0;
 555         kmem_free(ppa, ((sizeof (page_t *)) * npages));
 556         sptd->spt_realsize = ptob(npages);
 557         atomic_add_long(&spt_used, npages);
 558         sptcargs->seg_spt = seg;
 559         return (0);
 560 
 561 out4:
 562         seg->s_data = NULL;
 563         kmem_free(vp, sizeof (*vp));
 564         cv_destroy(&sptd->spt_cv);
 565 out3:
 566         mutex_destroy(&sptd->spt_lock);
 567         if ((sptcargs->flags & SHM_PAGEABLE) == 0)
 568                 kmem_free(ppa, (sizeof (*ppa) * npages));
 569 out2:
 570         kmem_free(sptd, sizeof (*sptd));
 571 out1:
 572         if ((sptcargs->flags & SHM_PAGEABLE) == 0)
 573                 anon_swap_restore(npages);
 574         return (err);
 575 }
 576 
 577 /*ARGSUSED*/
 578 void
 579 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
 580 {
 581         struct page     *pp;
 582         struct spt_data *sptd = (struct spt_data *)seg->s_data;
 583         pgcnt_t         npages;
 584         ulong_t         anon_idx;
 585         struct anon_map *amp;
 586         struct anon     *ap;
 587         struct vnode    *vp;
 588         u_offset_t      off;
 589         uint_t          hat_flags;
 590         int             root = 0;
 591         pgcnt_t         pgs, curnpgs = 0;
 592         page_t          *rootpp;
 593         rctl_qty_t      unlocked_bytes = 0;
 594         kproject_t      *proj;
 595         kshmid_t        *sp;
 596 
 597         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
 598 
 599         len = P2ROUNDUP(len, PAGESIZE);
 600 
 601         npages = btop(len);
 602 
 603         hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
 604         if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
 605             (sptd->spt_flags & SHM_PAGEABLE)) {
 606                 hat_flags = HAT_UNLOAD_UNMAP;
 607         }
 608 
 609         hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
 610 
 611         amp = sptd->spt_amp;
 612         if (sptd->spt_flags & SHM_PAGEABLE)
 613                 npages = btop(amp->size);
 614 
 615         ASSERT(amp != NULL);
 616 
 617         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 618                 sp = amp->a_sp;
 619                 proj = sp->shm_perm.ipc_proj;
 620                 mutex_enter(&sp->shm_mlock);
 621         }
 622         for (anon_idx = 0; anon_idx < npages; anon_idx++) {
 623                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 624                         if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
 625                                 panic("segspt_free_pages: null app");
 626                                 /*NOTREACHED*/
 627                         }
 628                 } else {
 629                         if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
 630                             == NULL)
 631                                 continue;
 632                 }
 633                 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
 634                 swap_xlate(ap, &vp, &off);
 635 
 636                 /*
 637                  * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
 638                  * the pages won't be having SE_SHARED lock at this
 639                  * point.
 640                  *
 641                  * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
 642                  * the pages are still held SE_SHARED locked from the
 643                  * original segspt_create()
 644                  *
 645                  * Our goal is to get SE_EXCL lock on each page, remove
 646                  * permanent lock on it and invalidate the page.
 647                  */
 648                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 649                         if (hat_flags == HAT_UNLOAD_UNMAP)
 650                                 pp = page_lookup(vp, off, SE_EXCL);
 651                         else {
 652                                 if ((pp = page_find(vp, off)) == NULL) {
 653                                         panic("segspt_free_pages: "
 654                                             "page not locked");
 655                                         /*NOTREACHED*/
 656                                 }
 657                                 if (!page_tryupgrade(pp)) {
 658                                         page_unlock(pp);
 659                                         pp = page_lookup(vp, off, SE_EXCL);
 660                                 }
 661                         }
 662                         if (pp == NULL) {
 663                                 panic("segspt_free_pages: "
 664                                     "page not in the system");
 665                                 /*NOTREACHED*/
 666                         }
 667                         ASSERT(pp->p_lckcnt > 0);
 668                         page_pp_unlock(pp, 0, 1);
 669                         if (pp->p_lckcnt == 0)
 670                                 unlocked_bytes += PAGESIZE;
 671                 } else {
 672                         if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
 673                                 continue;
 674                 }
 675                 /*
 676                  * It's logical to invalidate the pages here as in most cases
 677                  * these were created by segspt.
 678                  */
 679                 if (pp->p_szc != 0) {
 680                         if (root == 0) {
 681                                 ASSERT(curnpgs == 0);
 682                                 root = 1;
 683                                 rootpp = pp;
 684                                 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
 685                                 ASSERT(pgs > 1);
 686                                 ASSERT(IS_P2ALIGNED(pgs, pgs));
 687                                 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
 688                                 curnpgs--;
 689                         } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
 690                                 ASSERT(curnpgs == 1);
 691                                 ASSERT(page_pptonum(pp) ==
 692                                     page_pptonum(rootpp) + (pgs - 1));
 693                                 page_destroy_pages(rootpp);
 694                                 root = 0;
 695                                 curnpgs = 0;
 696                         } else {
 697                                 ASSERT(curnpgs > 1);
 698                                 ASSERT(page_pptonum(pp) ==
 699                                     page_pptonum(rootpp) + (pgs - curnpgs));
 700                                 curnpgs--;
 701                         }
 702                 } else {
 703                         if (root != 0 || curnpgs != 0) {
 704                                 panic("segspt_free_pages: bad large page");
 705                                 /*NOTREACHED*/
 706                         }
 707                         /*
 708                          * Before destroying the pages, we need to take care
 709                          * of the rctl locked memory accounting. For that
 710                          * we need to calculte the unlocked_bytes.
 711                          */
 712                         if (pp->p_lckcnt > 0)
 713                                 unlocked_bytes += PAGESIZE;
 714                         /*LINTED: constant in conditional context */
 715                         VN_DISPOSE(pp, B_INVAL, 0, kcred);
 716                 }
 717         }
 718         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 719                 if (unlocked_bytes > 0)
 720                         rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
 721                 mutex_exit(&sp->shm_mlock);
 722         }
 723         if (root != 0 || curnpgs != 0) {
 724                 panic("segspt_free_pages: bad large page");
 725                 /*NOTREACHED*/
 726         }
 727 
 728         /*
 729          * mark that pages have been released
 730          */
 731         sptd->spt_realsize = 0;
 732 
 733         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 734                 atomic_add_long(&spt_used, -npages);
 735                 anon_swap_restore(npages);
 736         }
 737 }
 738 
 739 /*
 740  * Get memory allocation policy info for specified address in given segment
 741  */
 742 static lgrp_mem_policy_info_t *
 743 segspt_getpolicy(struct seg *seg, caddr_t addr)
 744 {
 745         struct anon_map         *amp;
 746         ulong_t                 anon_index;
 747         lgrp_mem_policy_info_t  *policy_info;
 748         struct spt_data         *spt_data;
 749 
 750         ASSERT(seg != NULL);
 751 
 752         /*
 753          * Get anon_map from segspt
 754          *
 755          * Assume that no lock needs to be held on anon_map, since
 756          * it should be protected by its reference count which must be
 757          * nonzero for an existing segment
 758          * Need to grab readers lock on policy tree though
 759          */
 760         spt_data = (struct spt_data *)seg->s_data;
 761         if (spt_data == NULL)
 762                 return (NULL);
 763         amp = spt_data->spt_amp;
 764         ASSERT(amp->refcnt != 0);
 765 
 766         /*
 767          * Get policy info
 768          *
 769          * Assume starting anon index of 0
 770          */
 771         anon_index = seg_page(seg, addr);
 772         policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
 773 
 774         return (policy_info);
 775 }
 776 
 777 /*
 778  * DISM only.
 779  * Return locked pages over a given range.
 780  *
 781  * We will cache all DISM locked pages and save the pplist for the
 782  * entire segment in the ppa field of the underlying DISM segment structure.
 783  * Later, during a call to segspt_reclaim() we will use this ppa array
 784  * to page_unlock() all of the pages and then we will free this ppa list.
 785  */
 786 /*ARGSUSED*/
 787 static int
 788 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
 789     struct page ***ppp, enum lock_type type, enum seg_rw rw)
 790 {
 791         struct  shm_data *shmd = (struct shm_data *)seg->s_data;
 792         struct  seg     *sptseg = shmd->shm_sptseg;
 793         struct  spt_data *sptd = sptseg->s_data;
 794         pgcnt_t pg_idx, npages, tot_npages, npgs;
 795         struct  page **pplist, **pl, **ppa, *pp;
 796         struct  anon_map *amp;
 797         spgcnt_t        an_idx;
 798         int     ret = ENOTSUP;
 799         uint_t  pl_built = 0;
 800         struct  anon *ap;
 801         struct  vnode *vp;
 802         u_offset_t off;
 803         pgcnt_t claim_availrmem = 0;
 804         uint_t  szc;
 805 
 806         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
 807         ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
 808 
 809         /*
 810          * We want to lock/unlock the entire ISM segment. Therefore,
 811          * we will be using the underlying sptseg and it's base address
 812          * and length for the caching arguments.
 813          */
 814         ASSERT(sptseg);
 815         ASSERT(sptd);
 816 
 817         pg_idx = seg_page(seg, addr);
 818         npages = btopr(len);
 819 
 820         /*
 821          * check if the request is larger than number of pages covered
 822          * by amp
 823          */
 824         if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
 825                 *ppp = NULL;
 826                 return (ENOTSUP);
 827         }
 828 
 829         if (type == L_PAGEUNLOCK) {
 830                 ASSERT(sptd->spt_ppa != NULL);
 831 
 832                 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
 833                     sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 834 
 835                 /*
 836                  * If someone is blocked while unmapping, we purge
 837                  * segment page cache and thus reclaim pplist synchronously
 838                  * without waiting for seg_pasync_thread. This speeds up
 839                  * unmapping in cases where munmap(2) is called, while
 840                  * raw async i/o is still in progress or where a thread
 841                  * exits on data fault in a multithreaded application.
 842                  */
 843                 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
 844                     (AS_ISUNMAPWAIT(seg->s_as) &&
 845                     shmd->shm_softlockcnt > 0)) {
 846                         segspt_purge(seg);
 847                 }
 848                 return (0);
 849         }
 850 
 851         /* The L_PAGELOCK case ... */
 852 
 853         if (sptd->spt_flags & DISM_PPA_CHANGED) {
 854                 segspt_purge(seg);
 855                 /*
 856                  * for DISM ppa needs to be rebuild since
 857                  * number of locked pages could be changed
 858                  */
 859                 *ppp = NULL;
 860                 return (ENOTSUP);
 861         }
 862 
 863         /*
 864          * First try to find pages in segment page cache, without
 865          * holding the segment lock.
 866          */
 867         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
 868             S_WRITE, SEGP_FORCE_WIRED);
 869         if (pplist != NULL) {
 870                 ASSERT(sptd->spt_ppa != NULL);
 871                 ASSERT(sptd->spt_ppa == pplist);
 872                 ppa = sptd->spt_ppa;
 873                 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
 874                         if (ppa[an_idx] == NULL) {
 875                                 seg_pinactive(seg, NULL, seg->s_base,
 876                                     sptd->spt_amp->size, ppa,
 877                                     S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 878                                 *ppp = NULL;
 879                                 return (ENOTSUP);
 880                         }
 881                         if ((szc = ppa[an_idx]->p_szc) != 0) {
 882                                 npgs = page_get_pagecnt(szc);
 883                                 an_idx = P2ROUNDUP(an_idx + 1, npgs);
 884                         } else {
 885                                 an_idx++;
 886                         }
 887                 }
 888                 /*
 889                  * Since we cache the entire DISM segment, we want to
 890                  * set ppp to point to the first slot that corresponds
 891                  * to the requested addr, i.e. pg_idx.
 892                  */
 893                 *ppp = &(sptd->spt_ppa[pg_idx]);
 894                 return (0);
 895         }
 896 
 897         mutex_enter(&sptd->spt_lock);
 898         /*
 899          * try to find pages in segment page cache with mutex
 900          */
 901         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
 902             S_WRITE, SEGP_FORCE_WIRED);
 903         if (pplist != NULL) {
 904                 ASSERT(sptd->spt_ppa != NULL);
 905                 ASSERT(sptd->spt_ppa == pplist);
 906                 ppa = sptd->spt_ppa;
 907                 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
 908                         if (ppa[an_idx] == NULL) {
 909                                 mutex_exit(&sptd->spt_lock);
 910                                 seg_pinactive(seg, NULL, seg->s_base,
 911                                     sptd->spt_amp->size, ppa,
 912                                     S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 913                                 *ppp = NULL;
 914                                 return (ENOTSUP);
 915                         }
 916                         if ((szc = ppa[an_idx]->p_szc) != 0) {
 917                                 npgs = page_get_pagecnt(szc);
 918                                 an_idx = P2ROUNDUP(an_idx + 1, npgs);
 919                         } else {
 920                                 an_idx++;
 921                         }
 922                 }
 923                 /*
 924                  * Since we cache the entire DISM segment, we want to
 925                  * set ppp to point to the first slot that corresponds
 926                  * to the requested addr, i.e. pg_idx.
 927                  */
 928                 mutex_exit(&sptd->spt_lock);
 929                 *ppp = &(sptd->spt_ppa[pg_idx]);
 930                 return (0);
 931         }
 932         if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
 933             SEGP_FORCE_WIRED) == SEGP_FAIL) {
 934                 mutex_exit(&sptd->spt_lock);
 935                 *ppp = NULL;
 936                 return (ENOTSUP);
 937         }
 938 
 939         /*
 940          * No need to worry about protections because DISM pages are always rw.
 941          */
 942         pl = pplist = NULL;
 943         amp = sptd->spt_amp;
 944 
 945         /*
 946          * Do we need to build the ppa array?
 947          */
 948         if (sptd->spt_ppa == NULL) {
 949                 pgcnt_t lpg_cnt = 0;
 950 
 951                 pl_built = 1;
 952                 tot_npages = btopr(sptd->spt_amp->size);
 953 
 954                 ASSERT(sptd->spt_pcachecnt == 0);
 955                 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
 956                 pl = pplist;
 957 
 958                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 959                 for (an_idx = 0; an_idx < tot_npages; ) {
 960                         ap = anon_get_ptr(amp->ahp, an_idx);
 961                         /*
 962                          * Cache only mlocked pages. For large pages
 963                          * if one (constituent) page is mlocked
 964                          * all pages for that large page
 965                          * are cached also. This is for quick
 966                          * lookups of ppa array;
 967                          */
 968                         if ((ap != NULL) && (lpg_cnt != 0 ||
 969                             (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
 970 
 971                                 swap_xlate(ap, &vp, &off);
 972                                 pp = page_lookup(vp, off, SE_SHARED);
 973                                 ASSERT(pp != NULL);
 974                                 if (lpg_cnt == 0) {
 975                                         lpg_cnt++;
 976                                         /*
 977                                          * For a small page, we are done --
 978                                          * lpg_count is reset to 0 below.
 979                                          *
 980                                          * For a large page, we are guaranteed
 981                                          * to find the anon structures of all
 982                                          * constituent pages and a non-zero
 983                                          * lpg_cnt ensures that we don't test
 984                                          * for mlock for these. We are done
 985                                          * when lpg_count reaches (npgs + 1).
 986                                          * If we are not the first constituent
 987                                          * page, restart at the first one.
 988                                          */
 989                                         npgs = page_get_pagecnt(pp->p_szc);
 990                                         if (!IS_P2ALIGNED(an_idx, npgs)) {
 991                                                 an_idx = P2ALIGN(an_idx, npgs);
 992                                                 page_unlock(pp);
 993                                                 continue;
 994                                         }
 995                                 }
 996                                 if (++lpg_cnt > npgs)
 997                                         lpg_cnt = 0;
 998 
 999                                 /*
1000                                  * availrmem is decremented only
1001                                  * for unlocked pages
1002                                  */
1003                                 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1004                                         claim_availrmem++;
1005                                 pplist[an_idx] = pp;
1006                         }
1007                         an_idx++;
1008                 }
1009                 ANON_LOCK_EXIT(&amp->a_rwlock);
1010 
1011                 if (claim_availrmem) {
1012                         mutex_enter(&freemem_lock);
1013                         if (availrmem < tune.t_minarmem + claim_availrmem) {
1014                                 mutex_exit(&freemem_lock);
1015                                 ret = ENOTSUP;
1016                                 claim_availrmem = 0;
1017                                 goto insert_fail;
1018                         } else {
1019                                 availrmem -= claim_availrmem;
1020                         }
1021                         mutex_exit(&freemem_lock);
1022                 }
1023 
1024                 sptd->spt_ppa = pl;
1025         } else {
1026                 /*
1027                  * We already have a valid ppa[].
1028                  */
1029                 pl = sptd->spt_ppa;
1030         }
1031 
1032         ASSERT(pl != NULL);
1033 
1034         ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1035             sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1036             segspt_reclaim);
1037         if (ret == SEGP_FAIL) {
1038                 /*
1039                  * seg_pinsert failed. We return
1040                  * ENOTSUP, so that the as_pagelock() code will
1041                  * then try the slower F_SOFTLOCK path.
1042                  */
1043                 if (pl_built) {
1044                         /*
1045                          * No one else has referenced the ppa[].
1046                          * We created it and we need to destroy it.
1047                          */
1048                         sptd->spt_ppa = NULL;
1049                 }
1050                 ret = ENOTSUP;
1051                 goto insert_fail;
1052         }
1053 
1054         /*
1055          * In either case, we increment softlockcnt on the 'real' segment.
1056          */
1057         sptd->spt_pcachecnt++;
1058         atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1059 
1060         ppa = sptd->spt_ppa;
1061         for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1062                 if (ppa[an_idx] == NULL) {
1063                         mutex_exit(&sptd->spt_lock);
1064                         seg_pinactive(seg, NULL, seg->s_base,
1065                             sptd->spt_amp->size,
1066                             pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1067                         *ppp = NULL;
1068                         return (ENOTSUP);
1069                 }
1070                 if ((szc = ppa[an_idx]->p_szc) != 0) {
1071                         npgs = page_get_pagecnt(szc);
1072                         an_idx = P2ROUNDUP(an_idx + 1, npgs);
1073                 } else {
1074                         an_idx++;
1075                 }
1076         }
1077         /*
1078          * We can now drop the sptd->spt_lock since the ppa[]
1079          * exists and he have incremented pacachecnt.
1080          */
1081         mutex_exit(&sptd->spt_lock);
1082 
1083         /*
1084          * Since we cache the entire segment, we want to
1085          * set ppp to point to the first slot that corresponds
1086          * to the requested addr, i.e. pg_idx.
1087          */
1088         *ppp = &(sptd->spt_ppa[pg_idx]);
1089         return (0);
1090 
1091 insert_fail:
1092         /*
1093          * We will only reach this code if we tried and failed.
1094          *
1095          * And we can drop the lock on the dummy seg, once we've failed
1096          * to set up a new ppa[].
1097          */
1098         mutex_exit(&sptd->spt_lock);
1099 
1100         if (pl_built) {
1101                 if (claim_availrmem) {
1102                         mutex_enter(&freemem_lock);
1103                         availrmem += claim_availrmem;
1104                         mutex_exit(&freemem_lock);
1105                 }
1106 
1107                 /*
1108                  * We created pl and we need to destroy it.
1109                  */
1110                 pplist = pl;
1111                 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1112                         if (pplist[an_idx] != NULL)
1113                                 page_unlock(pplist[an_idx]);
1114                 }
1115                 kmem_free(pl, sizeof (page_t *) * tot_npages);
1116         }
1117 
1118         if (shmd->shm_softlockcnt <= 0) {
1119                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1120                         mutex_enter(&seg->s_as->a_contents);
1121                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1122                                 AS_CLRUNMAPWAIT(seg->s_as);
1123                                 cv_broadcast(&seg->s_as->a_cv);
1124                         }
1125                         mutex_exit(&seg->s_as->a_contents);
1126                 }
1127         }
1128         *ppp = NULL;
1129         return (ret);
1130 }
1131 
1132 
1133 
1134 /*
1135  * return locked pages over a given range.
1136  *
1137  * We will cache the entire ISM segment and save the pplist for the
1138  * entire segment in the ppa field of the underlying ISM segment structure.
1139  * Later, during a call to segspt_reclaim() we will use this ppa array
1140  * to page_unlock() all of the pages and then we will free this ppa list.
1141  */
1142 /*ARGSUSED*/
1143 static int
1144 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1145     struct page ***ppp, enum lock_type type, enum seg_rw rw)
1146 {
1147         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1148         struct seg      *sptseg = shmd->shm_sptseg;
1149         struct spt_data *sptd = sptseg->s_data;
1150         pgcnt_t np, page_index, npages;
1151         caddr_t a, spt_base;
1152         struct page **pplist, **pl, *pp;
1153         struct anon_map *amp;
1154         ulong_t anon_index;
1155         int ret = ENOTSUP;
1156         uint_t  pl_built = 0;
1157         struct anon *ap;
1158         struct vnode *vp;
1159         u_offset_t off;
1160 
1161         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1162         ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1163 
1164 
1165         /*
1166          * We want to lock/unlock the entire ISM segment. Therefore,
1167          * we will be using the underlying sptseg and it's base address
1168          * and length for the caching arguments.
1169          */
1170         ASSERT(sptseg);
1171         ASSERT(sptd);
1172 
1173         if (sptd->spt_flags & SHM_PAGEABLE) {
1174                 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1175         }
1176 
1177         page_index = seg_page(seg, addr);
1178         npages = btopr(len);
1179 
1180         /*
1181          * check if the request is larger than number of pages covered
1182          * by amp
1183          */
1184         if (page_index + npages > btopr(sptd->spt_amp->size)) {
1185                 *ppp = NULL;
1186                 return (ENOTSUP);
1187         }
1188 
1189         if (type == L_PAGEUNLOCK) {
1190 
1191                 ASSERT(sptd->spt_ppa != NULL);
1192 
1193                 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1194                     sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1195 
1196                 /*
1197                  * If someone is blocked while unmapping, we purge
1198                  * segment page cache and thus reclaim pplist synchronously
1199                  * without waiting for seg_pasync_thread. This speeds up
1200                  * unmapping in cases where munmap(2) is called, while
1201                  * raw async i/o is still in progress or where a thread
1202                  * exits on data fault in a multithreaded application.
1203                  */
1204                 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1205                         segspt_purge(seg);
1206                 }
1207                 return (0);
1208         }
1209 
1210         /* The L_PAGELOCK case... */
1211 
1212         /*
1213          * First try to find pages in segment page cache, without
1214          * holding the segment lock.
1215          */
1216         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1217             S_WRITE, SEGP_FORCE_WIRED);
1218         if (pplist != NULL) {
1219                 ASSERT(sptd->spt_ppa == pplist);
1220                 ASSERT(sptd->spt_ppa[page_index]);
1221                 /*
1222                  * Since we cache the entire ISM segment, we want to
1223                  * set ppp to point to the first slot that corresponds
1224                  * to the requested addr, i.e. page_index.
1225                  */
1226                 *ppp = &(sptd->spt_ppa[page_index]);
1227                 return (0);
1228         }
1229 
1230         mutex_enter(&sptd->spt_lock);
1231 
1232         /*
1233          * try to find pages in segment page cache
1234          */
1235         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1236             S_WRITE, SEGP_FORCE_WIRED);
1237         if (pplist != NULL) {
1238                 ASSERT(sptd->spt_ppa == pplist);
1239                 /*
1240                  * Since we cache the entire segment, we want to
1241                  * set ppp to point to the first slot that corresponds
1242                  * to the requested addr, i.e. page_index.
1243                  */
1244                 mutex_exit(&sptd->spt_lock);
1245                 *ppp = &(sptd->spt_ppa[page_index]);
1246                 return (0);
1247         }
1248 
1249         if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1250             SEGP_FORCE_WIRED) == SEGP_FAIL) {
1251                 mutex_exit(&sptd->spt_lock);
1252                 *ppp = NULL;
1253                 return (ENOTSUP);
1254         }
1255 
1256         /*
1257          * No need to worry about protections because ISM pages
1258          * are always rw.
1259          */
1260         pl = pplist = NULL;
1261 
1262         /*
1263          * Do we need to build the ppa array?
1264          */
1265         if (sptd->spt_ppa == NULL) {
1266                 ASSERT(sptd->spt_ppa == pplist);
1267 
1268                 spt_base = sptseg->s_base;
1269                 pl_built = 1;
1270 
1271                 /*
1272                  * availrmem is decremented once during anon_swap_adjust()
1273                  * and is incremented during the anon_unresv(), which is
1274                  * called from shm_rm_amp() when the segment is destroyed.
1275                  */
1276                 amp = sptd->spt_amp;
1277                 ASSERT(amp != NULL);
1278 
1279                 /* pcachecnt is protected by sptd->spt_lock */
1280                 ASSERT(sptd->spt_pcachecnt == 0);
1281                 pplist = kmem_zalloc(sizeof (page_t *)
1282                     * btopr(sptd->spt_amp->size), KM_SLEEP);
1283                 pl = pplist;
1284 
1285                 anon_index = seg_page(sptseg, spt_base);
1286 
1287                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
1288                 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1289                     a += PAGESIZE, anon_index++, pplist++) {
1290                         ap = anon_get_ptr(amp->ahp, anon_index);
1291                         ASSERT(ap != NULL);
1292                         swap_xlate(ap, &vp, &off);
1293                         pp = page_lookup(vp, off, SE_SHARED);
1294                         ASSERT(pp != NULL);
1295                         *pplist = pp;
1296                 }
1297                 ANON_LOCK_EXIT(&amp->a_rwlock);
1298 
1299                 if (a < (spt_base + sptd->spt_amp->size)) {
1300                         ret = ENOTSUP;
1301                         goto insert_fail;
1302                 }
1303                 sptd->spt_ppa = pl;
1304         } else {
1305                 /*
1306                  * We already have a valid ppa[].
1307                  */
1308                 pl = sptd->spt_ppa;
1309         }
1310 
1311         ASSERT(pl != NULL);
1312 
1313         ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1314             sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1315             segspt_reclaim);
1316         if (ret == SEGP_FAIL) {
1317                 /*
1318                  * seg_pinsert failed. We return
1319                  * ENOTSUP, so that the as_pagelock() code will
1320                  * then try the slower F_SOFTLOCK path.
1321                  */
1322                 if (pl_built) {
1323                         /*
1324                          * No one else has referenced the ppa[].
1325                          * We created it and we need to destroy it.
1326                          */
1327                         sptd->spt_ppa = NULL;
1328                 }
1329                 ret = ENOTSUP;
1330                 goto insert_fail;
1331         }
1332 
1333         /*
1334          * In either case, we increment softlockcnt on the 'real' segment.
1335          */
1336         sptd->spt_pcachecnt++;
1337         atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1338 
1339         /*
1340          * We can now drop the sptd->spt_lock since the ppa[]
1341          * exists and he have incremented pacachecnt.
1342          */
1343         mutex_exit(&sptd->spt_lock);
1344 
1345         /*
1346          * Since we cache the entire segment, we want to
1347          * set ppp to point to the first slot that corresponds
1348          * to the requested addr, i.e. page_index.
1349          */
1350         *ppp = &(sptd->spt_ppa[page_index]);
1351         return (0);
1352 
1353 insert_fail:
1354         /*
1355          * We will only reach this code if we tried and failed.
1356          *
1357          * And we can drop the lock on the dummy seg, once we've failed
1358          * to set up a new ppa[].
1359          */
1360         mutex_exit(&sptd->spt_lock);
1361 
1362         if (pl_built) {
1363                 /*
1364                  * We created pl and we need to destroy it.
1365                  */
1366                 pplist = pl;
1367                 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1368                 while (np) {
1369                         page_unlock(*pplist);
1370                         np--;
1371                         pplist++;
1372                 }
1373                 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1374         }
1375         if (shmd->shm_softlockcnt <= 0) {
1376                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1377                         mutex_enter(&seg->s_as->a_contents);
1378                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1379                                 AS_CLRUNMAPWAIT(seg->s_as);
1380                                 cv_broadcast(&seg->s_as->a_cv);
1381                         }
1382                         mutex_exit(&seg->s_as->a_contents);
1383                 }
1384         }
1385         *ppp = NULL;
1386         return (ret);
1387 }
1388 
1389 /*
1390  * purge any cached pages in the I/O page cache
1391  */
1392 static void
1393 segspt_purge(struct seg *seg)
1394 {
1395         seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1396 }
1397 
1398 static int
1399 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1400         enum seg_rw rw, int async)
1401 {
1402         struct seg *seg = (struct seg *)ptag;
1403         struct  shm_data *shmd = (struct shm_data *)seg->s_data;
1404         struct  seg     *sptseg;
1405         struct  spt_data *sptd;
1406         pgcnt_t npages, i, free_availrmem = 0;
1407         int     done = 0;
1408 
1409 #ifdef lint
1410         addr = addr;
1411 #endif
1412         sptseg = shmd->shm_sptseg;
1413         sptd = sptseg->s_data;
1414         npages = (len >> PAGESHIFT);
1415         ASSERT(npages);
1416         ASSERT(sptd->spt_pcachecnt != 0);
1417         ASSERT(sptd->spt_ppa == pplist);
1418         ASSERT(npages == btopr(sptd->spt_amp->size));
1419         ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1420 
1421         /*
1422          * Acquire the lock on the dummy seg and destroy the
1423          * ppa array IF this is the last pcachecnt.
1424          */
1425         mutex_enter(&sptd->spt_lock);
1426         if (--sptd->spt_pcachecnt == 0) {
1427                 for (i = 0; i < npages; i++) {
1428                         if (pplist[i] == NULL) {
1429                                 continue;
1430                         }
1431                         if (rw == S_WRITE) {
1432                                 hat_setrefmod(pplist[i]);
1433                         } else {
1434                                 hat_setref(pplist[i]);
1435                         }
1436                         if ((sptd->spt_flags & SHM_PAGEABLE) &&
1437                             (sptd->spt_ppa_lckcnt[i] == 0))
1438                                 free_availrmem++;
1439                         page_unlock(pplist[i]);
1440                 }
1441                 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1442                         mutex_enter(&freemem_lock);
1443                         availrmem += free_availrmem;
1444                         mutex_exit(&freemem_lock);
1445                 }
1446                 /*
1447                  * Since we want to cach/uncache the entire ISM segment,
1448                  * we will track the pplist in a segspt specific field
1449                  * ppa, that is initialized at the time we add an entry to
1450                  * the cache.
1451                  */
1452                 ASSERT(sptd->spt_pcachecnt == 0);
1453                 kmem_free(pplist, sizeof (page_t *) * npages);
1454                 sptd->spt_ppa = NULL;
1455                 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1456                 sptd->spt_gen++;
1457                 cv_broadcast(&sptd->spt_cv);
1458                 done = 1;
1459         }
1460         mutex_exit(&sptd->spt_lock);
1461 
1462         /*
1463          * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1464          * may not hold AS lock (in this case async argument is not 0). This
1465          * means if softlockcnt drops to 0 after the decrement below address
1466          * space may get freed. We can't allow it since after softlock
1467          * derement to 0 we still need to access as structure for possible
1468          * wakeup of unmap waiters. To prevent the disappearance of as we take
1469          * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1470          * this mutex as a barrier to make sure this routine completes before
1471          * segment is freed.
1472          *
1473          * The second complication we have to deal with in async case is a
1474          * possibility of missed wake up of unmap wait thread. When we don't
1475          * hold as lock here we may take a_contents lock before unmap wait
1476          * thread that was first to see softlockcnt was still not 0. As a
1477          * result we'll fail to wake up an unmap wait thread. To avoid this
1478          * race we set nounmapwait flag in as structure if we drop softlockcnt
1479          * to 0 if async is not 0.  unmapwait thread
1480          * will not block if this flag is set.
1481          */
1482         if (async)
1483                 mutex_enter(&shmd->shm_segfree_syncmtx);
1484 
1485         /*
1486          * Now decrement softlockcnt.
1487          */
1488         ASSERT(shmd->shm_softlockcnt > 0);
1489         atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1490 
1491         if (shmd->shm_softlockcnt <= 0) {
1492                 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1493                         mutex_enter(&seg->s_as->a_contents);
1494                         if (async)
1495                                 AS_SETNOUNMAPWAIT(seg->s_as);
1496                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1497                                 AS_CLRUNMAPWAIT(seg->s_as);
1498                                 cv_broadcast(&seg->s_as->a_cv);
1499                         }
1500                         mutex_exit(&seg->s_as->a_contents);
1501                 }
1502         }
1503 
1504         if (async)
1505                 mutex_exit(&shmd->shm_segfree_syncmtx);
1506 
1507         return (done);
1508 }
1509 
1510 /*
1511  * Do a F_SOFTUNLOCK call over the range requested.
1512  * The range must have already been F_SOFTLOCK'ed.
1513  *
1514  * The calls to acquire and release the anon map lock mutex were
1515  * removed in order to avoid a deadly embrace during a DR
1516  * memory delete operation.  (Eg. DR blocks while waiting for a
1517  * exclusive lock on a page that is being used for kaio; the
1518  * thread that will complete the kaio and call segspt_softunlock
1519  * blocks on the anon map lock; another thread holding the anon
1520  * map lock blocks on another page lock via the segspt_shmfault
1521  * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1522  *
1523  * The appropriateness of the removal is based upon the following:
1524  * 1. If we are holding a segment's reader lock and the page is held
1525  * shared, then the corresponding element in anonmap which points to
1526  * anon struct cannot change and there is no need to acquire the
1527  * anonymous map lock.
1528  * 2. Threads in segspt_softunlock have a reader lock on the segment
1529  * and already have the shared page lock, so we are guaranteed that
1530  * the anon map slot cannot change and therefore can call anon_get_ptr()
1531  * without grabbing the anonymous map lock.
1532  * 3. Threads that softlock a shared page break copy-on-write, even if
1533  * its a read.  Thus cow faults can be ignored with respect to soft
1534  * unlocking, since the breaking of cow means that the anon slot(s) will
1535  * not be shared.
1536  */
1537 static void
1538 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1539         size_t len, enum seg_rw rw)
1540 {
1541         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1542         struct seg      *sptseg;
1543         struct spt_data *sptd;
1544         page_t *pp;
1545         caddr_t adr;
1546         struct vnode *vp;
1547         u_offset_t offset;
1548         ulong_t anon_index;
1549         struct anon_map *amp;           /* XXX - for locknest */
1550         struct anon *ap = NULL;
1551         pgcnt_t npages;
1552 
1553         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1554 
1555         sptseg = shmd->shm_sptseg;
1556         sptd = sptseg->s_data;
1557 
1558         /*
1559          * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1560          * and therefore their pages are SE_SHARED locked
1561          * for the entire life of the segment.
1562          */
1563         if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1564             ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1565                 goto softlock_decrement;
1566         }
1567 
1568         /*
1569          * Any thread is free to do a page_find and
1570          * page_unlock() on the pages within this seg.
1571          *
1572          * We are already holding the as->a_lock on the user's
1573          * real segment, but we need to hold the a_lock on the
1574          * underlying dummy as. This is mostly to satisfy the
1575          * underlying HAT layer.
1576          */
1577         AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1578         hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1579         AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1580 
1581         amp = sptd->spt_amp;
1582         ASSERT(amp != NULL);
1583         anon_index = seg_page(sptseg, sptseg_addr);
1584 
1585         for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1586                 ap = anon_get_ptr(amp->ahp, anon_index++);
1587                 ASSERT(ap != NULL);
1588                 swap_xlate(ap, &vp, &offset);
1589 
1590                 /*
1591                  * Use page_find() instead of page_lookup() to
1592                  * find the page since we know that it has a
1593                  * "shared" lock.
1594                  */
1595                 pp = page_find(vp, offset);
1596                 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1597                 if (pp == NULL) {
1598                         panic("segspt_softunlock: "
1599                             "addr %p, ap %p, vp %p, off %llx",
1600                             (void *)adr, (void *)ap, (void *)vp, offset);
1601                         /*NOTREACHED*/
1602                 }
1603 
1604                 if (rw == S_WRITE) {
1605                         hat_setrefmod(pp);
1606                 } else if (rw != S_OTHER) {
1607                         hat_setref(pp);
1608                 }
1609                 page_unlock(pp);
1610         }
1611 
1612 softlock_decrement:
1613         npages = btopr(len);
1614         ASSERT(shmd->shm_softlockcnt >= npages);
1615         atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1616         if (shmd->shm_softlockcnt == 0) {
1617                 /*
1618                  * All SOFTLOCKS are gone. Wakeup any waiting
1619                  * unmappers so they can try again to unmap.
1620                  * Check for waiters first without the mutex
1621                  * held so we don't always grab the mutex on
1622                  * softunlocks.
1623                  */
1624                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1625                         mutex_enter(&seg->s_as->a_contents);
1626                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1627                                 AS_CLRUNMAPWAIT(seg->s_as);
1628                                 cv_broadcast(&seg->s_as->a_cv);
1629                         }
1630                         mutex_exit(&seg->s_as->a_contents);
1631                 }
1632         }
1633 }
1634 
1635 int
1636 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1637 {
1638         struct shm_data *shmd_arg = (struct shm_data *)argsp;
1639         struct shm_data *shmd;
1640         struct anon_map *shm_amp = shmd_arg->shm_amp;
1641         struct spt_data *sptd;
1642         int error = 0;
1643 
1644         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1645 
1646         shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1647         if (shmd == NULL)
1648                 return (ENOMEM);
1649 
1650         shmd->shm_sptas = shmd_arg->shm_sptas;
1651         shmd->shm_amp = shm_amp;
1652         shmd->shm_sptseg = shmd_arg->shm_sptseg;
1653 
1654         (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1655             NULL, 0, seg->s_size);
1656 
1657         mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1658 
1659         seg->s_data = (void *)shmd;
1660         seg->s_ops = &segspt_shmops;
1661         seg->s_szc = shmd->shm_sptseg->s_szc;
1662         sptd = shmd->shm_sptseg->s_data;
1663 
1664         if (sptd->spt_flags & SHM_PAGEABLE) {
1665                 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1666                     KM_NOSLEEP)) == NULL) {
1667                         seg->s_data = (void *)NULL;
1668                         kmem_free(shmd, (sizeof (*shmd)));
1669                         return (ENOMEM);
1670                 }
1671                 shmd->shm_lckpgs = 0;
1672                 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1673                         if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1674                             shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1675                             seg->s_size, seg->s_szc)) != 0) {
1676                                 kmem_free(shmd->shm_vpage,
1677                                     btopr(shm_amp->size));
1678                         }
1679                 }
1680         } else {
1681                 error = hat_share(seg->s_as->a_hat, seg->s_base,
1682                     shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1683                     seg->s_size, seg->s_szc);
1684         }
1685         if (error) {
1686                 seg->s_szc = 0;
1687                 seg->s_data = (void *)NULL;
1688                 kmem_free(shmd, (sizeof (*shmd)));
1689         } else {
1690                 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1691                 shm_amp->refcnt++;
1692                 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1693         }
1694         return (error);
1695 }
1696 
1697 int
1698 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1699 {
1700         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1701         int reclaim = 1;
1702 
1703         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1704 retry:
1705         if (shmd->shm_softlockcnt > 0) {
1706                 if (reclaim == 1) {
1707                         segspt_purge(seg);
1708                         reclaim = 0;
1709                         goto retry;
1710                 }
1711                 return (EAGAIN);
1712         }
1713 
1714         if (ssize != seg->s_size) {
1715 #ifdef DEBUG
1716                 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1717                     ssize, seg->s_size);
1718 #endif
1719                 return (EINVAL);
1720         }
1721 
1722         (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1723             NULL, 0);
1724         hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1725 
1726         seg_free(seg);
1727 
1728         return (0);
1729 }
1730 
1731 void
1732 segspt_shmfree(struct seg *seg)
1733 {
1734         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1735         struct anon_map *shm_amp = shmd->shm_amp;
1736 
1737         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1738 
1739         (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1740             MC_UNLOCK, NULL, 0);
1741 
1742         /*
1743          * Need to increment refcnt when attaching
1744          * and decrement when detaching because of dup().
1745          */
1746         ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1747         shm_amp->refcnt--;
1748         ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1749 
1750         if (shmd->shm_vpage) {       /* only for DISM */
1751                 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1752                 shmd->shm_vpage = NULL;
1753         }
1754 
1755         /*
1756          * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1757          * still working with this segment without holding as lock.
1758          */
1759         ASSERT(shmd->shm_softlockcnt == 0);
1760         mutex_enter(&shmd->shm_segfree_syncmtx);
1761         mutex_destroy(&shmd->shm_segfree_syncmtx);
1762 
1763         kmem_free(shmd, sizeof (*shmd));
1764 }
1765 
1766 /*ARGSUSED*/
1767 int
1768 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1769 {
1770         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1771 
1772         /*
1773          * Shared page table is more than shared mapping.
1774          *  Individual process sharing page tables can't change prot
1775          *  because there is only one set of page tables.
1776          *  This will be allowed after private page table is
1777          *  supported.
1778          */
1779 /* need to return correct status error? */
1780         return (0);
1781 }
1782 
1783 
1784 faultcode_t
1785 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1786     size_t len, enum fault_type type, enum seg_rw rw)
1787 {
1788         struct  shm_data        *shmd = (struct shm_data *)seg->s_data;
1789         struct  seg             *sptseg = shmd->shm_sptseg;
1790         struct  as              *curspt = shmd->shm_sptas;
1791         struct  spt_data        *sptd = sptseg->s_data;
1792         pgcnt_t npages;
1793         size_t  size;
1794         caddr_t segspt_addr, shm_addr;
1795         page_t  **ppa;
1796         int     i;
1797         ulong_t an_idx = 0;
1798         int     err = 0;
1799         int     dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1800         size_t  pgsz;
1801         pgcnt_t pgcnt;
1802         caddr_t a;
1803         pgcnt_t pidx;
1804 
1805 #ifdef lint
1806         hat = hat;
1807 #endif
1808         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1809 
1810         /*
1811          * Because of the way spt is implemented
1812          * the realsize of the segment does not have to be
1813          * equal to the segment size itself. The segment size is
1814          * often in multiples of a page size larger than PAGESIZE.
1815          * The realsize is rounded up to the nearest PAGESIZE
1816          * based on what the user requested. This is a bit of
1817          * ungliness that is historical but not easily fixed
1818          * without re-designing the higher levels of ISM.
1819          */
1820         ASSERT(addr >= seg->s_base);
1821         if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1822                 return (FC_NOMAP);
1823         /*
1824          * For all of the following cases except F_PROT, we need to
1825          * make any necessary adjustments to addr and len
1826          * and get all of the necessary page_t's into an array called ppa[].
1827          *
1828          * The code in shmat() forces base addr and len of ISM segment
1829          * to be aligned to largest page size supported. Therefore,
1830          * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1831          * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1832          * in large pagesize chunks, or else we will screw up the HAT
1833          * layer by calling hat_memload_array() with differing page sizes
1834          * over a given virtual range.
1835          */
1836         pgsz = page_get_pagesize(sptseg->s_szc);
1837         pgcnt = page_get_pagecnt(sptseg->s_szc);
1838         shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1839         size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1840         npages = btopr(size);
1841 
1842         /*
1843          * Now we need to convert from addr in segshm to addr in segspt.
1844          */
1845         an_idx = seg_page(seg, shm_addr);
1846         segspt_addr = sptseg->s_base + ptob(an_idx);
1847 
1848         ASSERT((segspt_addr + ptob(npages)) <=
1849             (sptseg->s_base + sptd->spt_realsize));
1850         ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1851 
1852         switch (type) {
1853 
1854         case F_SOFTLOCK:
1855 
1856                 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1857                 /*
1858                  * Fall through to the F_INVAL case to load up the hat layer
1859                  * entries with the HAT_LOAD_LOCK flag.
1860                  */
1861                 /* FALLTHRU */
1862         case F_INVAL:
1863 
1864                 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1865                         return (FC_NOMAP);
1866 
1867                 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1868 
1869                 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1870                 if (err != 0) {
1871                         if (type == F_SOFTLOCK) {
1872                                 atomic_add_long((ulong_t *)(
1873                                     &(shmd->shm_softlockcnt)), -npages);
1874                         }
1875                         goto dism_err;
1876                 }
1877                 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1878                 a = segspt_addr;
1879                 pidx = 0;
1880                 if (type == F_SOFTLOCK) {
1881 
1882                         /*
1883                          * Load up the translation keeping it
1884                          * locked and don't unlock the page.
1885                          */
1886                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1887                                 hat_memload_array(sptseg->s_as->a_hat,
1888                                     a, pgsz, &ppa[pidx], sptd->spt_prot,
1889                                     HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1890                         }
1891                 } else {
1892                         /*
1893                          * Migrate pages marked for migration
1894                          */
1895                         if (lgrp_optimizations())
1896                                 page_migrate(seg, shm_addr, ppa, npages);
1897 
1898                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1899                                 hat_memload_array(sptseg->s_as->a_hat,
1900                                     a, pgsz, &ppa[pidx],
1901                                     sptd->spt_prot,
1902                                     HAT_LOAD_SHARE);
1903                         }
1904 
1905                         /*
1906                          * And now drop the SE_SHARED lock(s).
1907                          */
1908                         if (dyn_ism_unmap) {
1909                                 for (i = 0; i < npages; i++) {
1910                                         page_unlock(ppa[i]);
1911                                 }
1912                         }
1913                 }
1914 
1915                 if (!dyn_ism_unmap) {
1916                         if (hat_share(seg->s_as->a_hat, shm_addr,
1917                             curspt->a_hat, segspt_addr, ptob(npages),
1918                             seg->s_szc) != 0) {
1919                                 panic("hat_share err in DISM fault");
1920                                 /* NOTREACHED */
1921                         }
1922                         if (type == F_INVAL) {
1923                                 for (i = 0; i < npages; i++) {
1924                                         page_unlock(ppa[i]);
1925                                 }
1926                         }
1927                 }
1928                 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1929 dism_err:
1930                 kmem_free(ppa, npages * sizeof (page_t *));
1931                 return (err);
1932 
1933         case F_SOFTUNLOCK:
1934 
1935                 /*
1936                  * This is a bit ugly, we pass in the real seg pointer,
1937                  * but the segspt_addr is the virtual address within the
1938                  * dummy seg.
1939                  */
1940                 segspt_softunlock(seg, segspt_addr, size, rw);
1941                 return (0);
1942 
1943         case F_PROT:
1944 
1945                 /*
1946                  * This takes care of the unusual case where a user
1947                  * allocates a stack in shared memory and a register
1948                  * window overflow is written to that stack page before
1949                  * it is otherwise modified.
1950                  *
1951                  * We can get away with this because ISM segments are
1952                  * always rw. Other than this unusual case, there
1953                  * should be no instances of protection violations.
1954                  */
1955                 return (0);
1956 
1957         default:
1958 #ifdef DEBUG
1959                 panic("segspt_dismfault default type?");
1960 #else
1961                 return (FC_NOMAP);
1962 #endif
1963         }
1964 }
1965 
1966 
1967 faultcode_t
1968 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
1969     size_t len, enum fault_type type, enum seg_rw rw)
1970 {
1971         struct shm_data         *shmd = (struct shm_data *)seg->s_data;
1972         struct seg              *sptseg = shmd->shm_sptseg;
1973         struct as               *curspt = shmd->shm_sptas;
1974         struct spt_data         *sptd   = sptseg->s_data;
1975         pgcnt_t npages;
1976         size_t size;
1977         caddr_t sptseg_addr, shm_addr;
1978         page_t *pp, **ppa;
1979         int     i;
1980         u_offset_t offset;
1981         ulong_t anon_index = 0;
1982         struct vnode *vp;
1983         struct anon_map *amp;           /* XXX - for locknest */
1984         struct anon *ap = NULL;
1985         size_t          pgsz;
1986         pgcnt_t         pgcnt;
1987         caddr_t         a;
1988         pgcnt_t         pidx;
1989         size_t          sz;
1990 
1991 #ifdef lint
1992         hat = hat;
1993 #endif
1994 
1995         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1996 
1997         if (sptd->spt_flags & SHM_PAGEABLE) {
1998                 return (segspt_dismfault(hat, seg, addr, len, type, rw));
1999         }
2000 
2001         /*
2002          * Because of the way spt is implemented
2003          * the realsize of the segment does not have to be
2004          * equal to the segment size itself. The segment size is
2005          * often in multiples of a page size larger than PAGESIZE.
2006          * The realsize is rounded up to the nearest PAGESIZE
2007          * based on what the user requested. This is a bit of
2008          * ungliness that is historical but not easily fixed
2009          * without re-designing the higher levels of ISM.
2010          */
2011         ASSERT(addr >= seg->s_base);
2012         if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2013                 return (FC_NOMAP);
2014         /*
2015          * For all of the following cases except F_PROT, we need to
2016          * make any necessary adjustments to addr and len
2017          * and get all of the necessary page_t's into an array called ppa[].
2018          *
2019          * The code in shmat() forces base addr and len of ISM segment
2020          * to be aligned to largest page size supported. Therefore,
2021          * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2022          * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2023          * in large pagesize chunks, or else we will screw up the HAT
2024          * layer by calling hat_memload_array() with differing page sizes
2025          * over a given virtual range.
2026          */
2027         pgsz = page_get_pagesize(sptseg->s_szc);
2028         pgcnt = page_get_pagecnt(sptseg->s_szc);
2029         shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2030         size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2031         npages = btopr(size);
2032 
2033         /*
2034          * Now we need to convert from addr in segshm to addr in segspt.
2035          */
2036         anon_index = seg_page(seg, shm_addr);
2037         sptseg_addr = sptseg->s_base + ptob(anon_index);
2038 
2039         /*
2040          * And now we may have to adjust npages downward if we have
2041          * exceeded the realsize of the segment or initial anon
2042          * allocations.
2043          */
2044         if ((sptseg_addr + ptob(npages)) >
2045             (sptseg->s_base + sptd->spt_realsize))
2046                 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2047 
2048         npages = btopr(size);
2049 
2050         ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2051         ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2052 
2053         switch (type) {
2054 
2055         case F_SOFTLOCK:
2056 
2057                 /*
2058                  * availrmem is decremented once during anon_swap_adjust()
2059                  * and is incremented during the anon_unresv(), which is
2060                  * called from shm_rm_amp() when the segment is destroyed.
2061                  */
2062                 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2063                 /*
2064                  * Some platforms assume that ISM pages are SE_SHARED
2065                  * locked for the entire life of the segment.
2066                  */
2067                 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2068                         return (0);
2069                 /*
2070                  * Fall through to the F_INVAL case to load up the hat layer
2071                  * entries with the HAT_LOAD_LOCK flag.
2072                  */
2073 
2074                 /* FALLTHRU */
2075         case F_INVAL:
2076 
2077                 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2078                         return (FC_NOMAP);
2079 
2080                 /*
2081                  * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2082                  * may still rely on this call to hat_share(). That
2083                  * would imply that those hat's can fault on a
2084                  * HAT_LOAD_LOCK translation, which would seem
2085                  * contradictory.
2086                  */
2087                 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2088                         if (hat_share(seg->s_as->a_hat, seg->s_base,
2089                             curspt->a_hat, sptseg->s_base,
2090                             sptseg->s_size, sptseg->s_szc) != 0) {
2091                                 panic("hat_share error in ISM fault");
2092                                 /*NOTREACHED*/
2093                         }
2094                         return (0);
2095                 }
2096                 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2097 
2098                 /*
2099                  * I see no need to lock the real seg,
2100                  * here, because all of our work will be on the underlying
2101                  * dummy seg.
2102                  *
2103                  * sptseg_addr and npages now account for large pages.
2104                  */
2105                 amp = sptd->spt_amp;
2106                 ASSERT(amp != NULL);
2107                 anon_index = seg_page(sptseg, sptseg_addr);
2108 
2109                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2110                 for (i = 0; i < npages; i++) {
2111                         ap = anon_get_ptr(amp->ahp, anon_index++);
2112                         ASSERT(ap != NULL);
2113                         swap_xlate(ap, &vp, &offset);
2114                         pp = page_lookup(vp, offset, SE_SHARED);
2115                         ASSERT(pp != NULL);
2116                         ppa[i] = pp;
2117                 }
2118                 ANON_LOCK_EXIT(&amp->a_rwlock);
2119                 ASSERT(i == npages);
2120 
2121                 /*
2122                  * We are already holding the as->a_lock on the user's
2123                  * real segment, but we need to hold the a_lock on the
2124                  * underlying dummy as. This is mostly to satisfy the
2125                  * underlying HAT layer.
2126                  */
2127                 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
2128                 a = sptseg_addr;
2129                 pidx = 0;
2130                 if (type == F_SOFTLOCK) {
2131                         /*
2132                          * Load up the translation keeping it
2133                          * locked and don't unlock the page.
2134                          */
2135                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2136                                 sz = MIN(pgsz, ptob(npages - pidx));
2137                                 hat_memload_array(sptseg->s_as->a_hat, a,
2138                                     sz, &ppa[pidx], sptd->spt_prot,
2139                                     HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2140                         }
2141                 } else {
2142                         /*
2143                          * Migrate pages marked for migration.
2144                          */
2145                         if (lgrp_optimizations())
2146                                 page_migrate(seg, shm_addr, ppa, npages);
2147 
2148                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2149                                 sz = MIN(pgsz, ptob(npages - pidx));
2150                                 hat_memload_array(sptseg->s_as->a_hat,
2151                                     a, sz, &ppa[pidx],
2152                                     sptd->spt_prot, HAT_LOAD_SHARE);
2153                         }
2154 
2155                         /*
2156                          * And now drop the SE_SHARED lock(s).
2157                          */
2158                         for (i = 0; i < npages; i++)
2159                                 page_unlock(ppa[i]);
2160                 }
2161                 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
2162 
2163                 kmem_free(ppa, sizeof (page_t *) * npages);
2164                 return (0);
2165         case F_SOFTUNLOCK:
2166 
2167                 /*
2168                  * This is a bit ugly, we pass in the real seg pointer,
2169                  * but the sptseg_addr is the virtual address within the
2170                  * dummy seg.
2171                  */
2172                 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2173                 return (0);
2174 
2175         case F_PROT:
2176 
2177                 /*
2178                  * This takes care of the unusual case where a user
2179                  * allocates a stack in shared memory and a register
2180                  * window overflow is written to that stack page before
2181                  * it is otherwise modified.
2182                  *
2183                  * We can get away with this because ISM segments are
2184                  * always rw. Other than this unusual case, there
2185                  * should be no instances of protection violations.
2186                  */
2187                 return (0);
2188 
2189         default:
2190 #ifdef DEBUG
2191                 cmn_err(CE_WARN, "segspt_shmfault default type?");
2192 #endif
2193                 return (FC_NOMAP);
2194         }
2195 }
2196 
2197 /*ARGSUSED*/
2198 static faultcode_t
2199 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2200 {
2201         return (0);
2202 }
2203 
2204 /*ARGSUSED*/
2205 static int
2206 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2207 {
2208         return (0);
2209 }
2210 
2211 /*
2212  * duplicate the shared page tables
2213  */
2214 int
2215 segspt_shmdup(struct seg *seg, struct seg *newseg)
2216 {
2217         struct shm_data         *shmd = (struct shm_data *)seg->s_data;
2218         struct anon_map         *amp = shmd->shm_amp;
2219         struct shm_data         *shmd_new;
2220         struct seg              *spt_seg = shmd->shm_sptseg;
2221         struct spt_data         *sptd = spt_seg->s_data;
2222         int                     error = 0;
2223 
2224         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2225 
2226         shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2227         newseg->s_data = (void *)shmd_new;
2228         shmd_new->shm_sptas = shmd->shm_sptas;
2229         shmd_new->shm_amp = amp;
2230         shmd_new->shm_sptseg = shmd->shm_sptseg;
2231         newseg->s_ops = &segspt_shmops;
2232         newseg->s_szc = seg->s_szc;
2233         ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2234 
2235         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
2236         amp->refcnt++;
2237         ANON_LOCK_EXIT(&amp->a_rwlock);
2238 
2239         if (sptd->spt_flags & SHM_PAGEABLE) {
2240                 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2241                 shmd_new->shm_lckpgs = 0;
2242                 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2243                         if ((error = hat_share(newseg->s_as->a_hat,
2244                             newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2245                             seg->s_size, seg->s_szc)) != 0) {
2246                                 kmem_free(shmd_new->shm_vpage,
2247                                     btopr(amp->size));
2248                         }
2249                 }
2250                 return (error);
2251         } else {
2252                 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2253                     shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2254                     seg->s_szc));
2255 
2256         }
2257 }
2258 
2259 /*ARGSUSED*/
2260 int
2261 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2262 {
2263         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2264         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2265 
2266         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2267 
2268         /*
2269          * ISM segment is always rw.
2270          */
2271         return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2272 }
2273 
2274 /*
2275  * Return an array of locked large pages, for empty slots allocate
2276  * private zero-filled anon pages.
2277  */
2278 static int
2279 spt_anon_getpages(
2280         struct seg *sptseg,
2281         caddr_t sptaddr,
2282         size_t len,
2283         page_t *ppa[])
2284 {
2285         struct  spt_data *sptd = sptseg->s_data;
2286         struct  anon_map *amp = sptd->spt_amp;
2287         enum    seg_rw rw = sptd->spt_prot;
2288         uint_t  szc = sptseg->s_szc;
2289         size_t  pg_sz, share_sz = page_get_pagesize(szc);
2290         pgcnt_t lp_npgs;
2291         caddr_t lp_addr, e_sptaddr;
2292         uint_t  vpprot, ppa_szc = 0;
2293         struct  vpage *vpage = NULL;
2294         ulong_t j, ppa_idx;
2295         int     err, ierr = 0;
2296         pgcnt_t an_idx;
2297         anon_sync_obj_t cookie;
2298         int anon_locked = 0;
2299         pgcnt_t amp_pgs;
2300 
2301 
2302         ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2303         ASSERT(len != 0);
2304 
2305         pg_sz = share_sz;
2306         lp_npgs = btop(pg_sz);
2307         lp_addr = sptaddr;
2308         e_sptaddr = sptaddr + len;
2309         an_idx = seg_page(sptseg, sptaddr);
2310         ppa_idx = 0;
2311 
2312         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2313 
2314         amp_pgs = page_get_pagecnt(amp->a_szc);
2315 
2316         /*CONSTCOND*/
2317         while (1) {
2318                 for (; lp_addr < e_sptaddr;
2319                     an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2320 
2321                         /*
2322                          * If we're currently locked, and we get to a new
2323                          * page, unlock our current anon chunk.
2324                          */
2325                         if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2326                                 anon_array_exit(&cookie);
2327                                 anon_locked = 0;
2328                         }
2329                         if (!anon_locked) {
2330                                 anon_array_enter(amp, an_idx, &cookie);
2331                                 anon_locked = 1;
2332                         }
2333                         ppa_szc = (uint_t)-1;
2334                         ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2335                             lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2336                             &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2337 
2338                         if (ierr != 0) {
2339                                 if (ierr > 0) {
2340                                         err = FC_MAKE_ERR(ierr);
2341                                         goto lpgs_err;
2342                                 }
2343                                 break;
2344                         }
2345                 }
2346                 if (lp_addr == e_sptaddr) {
2347                         break;
2348                 }
2349                 ASSERT(lp_addr < e_sptaddr);
2350 
2351                 /*
2352                  * ierr == -1 means we failed to allocate a large page.
2353                  * so do a size down operation.
2354                  *
2355                  * ierr == -2 means some other process that privately shares
2356                  * pages with this process has allocated a larger page and we
2357                  * need to retry with larger pages. So do a size up
2358                  * operation. This relies on the fact that large pages are
2359                  * never partially shared i.e. if we share any constituent
2360                  * page of a large page with another process we must share the
2361                  * entire large page. Note this cannot happen for SOFTLOCK
2362                  * case, unless current address (lpaddr) is at the beginning
2363                  * of the next page size boundary because the other process
2364                  * couldn't have relocated locked pages.
2365                  */
2366                 ASSERT(ierr == -1 || ierr == -2);
2367                 if (segvn_anypgsz) {
2368                         ASSERT(ierr == -2 || szc != 0);
2369                         ASSERT(ierr == -1 || szc < sptseg->s_szc);
2370                         szc = (ierr == -1) ? szc - 1 : szc + 1;
2371                 } else {
2372                         /*
2373                          * For faults and segvn_anypgsz == 0
2374                          * we need to be careful not to loop forever
2375                          * if existing page is found with szc other
2376                          * than 0 or seg->s_szc. This could be due
2377                          * to page relocations on behalf of DR or
2378                          * more likely large page creation. For this
2379                          * case simply re-size to existing page's szc
2380                          * if returned by anon_map_getpages().
2381                          */
2382                         if (ppa_szc == (uint_t)-1) {
2383                                 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2384                         } else {
2385                                 ASSERT(ppa_szc <= sptseg->s_szc);
2386                                 ASSERT(ierr == -2 || ppa_szc < szc);
2387                                 ASSERT(ierr == -1 || ppa_szc > szc);
2388                                 szc = ppa_szc;
2389                         }
2390                 }
2391                 pg_sz = page_get_pagesize(szc);
2392                 lp_npgs = btop(pg_sz);
2393                 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2394         }
2395         if (anon_locked) {
2396                 anon_array_exit(&cookie);
2397         }
2398         ANON_LOCK_EXIT(&amp->a_rwlock);
2399         return (0);
2400 
2401 lpgs_err:
2402         if (anon_locked) {
2403                 anon_array_exit(&cookie);
2404         }
2405         ANON_LOCK_EXIT(&amp->a_rwlock);
2406         for (j = 0; j < ppa_idx; j++)
2407                 page_unlock(ppa[j]);
2408         return (err);
2409 }
2410 
2411 /*
2412  * count the number of bytes in a set of spt pages that are currently not
2413  * locked
2414  */
2415 static rctl_qty_t
2416 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2417 {
2418         ulong_t i;
2419         rctl_qty_t unlocked = 0;
2420 
2421         for (i = 0; i < npages; i++) {
2422                 if (ppa[i]->p_lckcnt == 0)
2423                         unlocked += PAGESIZE;
2424         }
2425         return (unlocked);
2426 }
2427 
2428 extern  u_longlong_t randtick(void);
2429 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2430 #define NLCK    (NCPU_P2)
2431 /* Random number with a range [0, n-1], n must be power of two */
2432 #define RAND_P2(n)      \
2433         ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2434 
2435 int
2436 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2437     page_t **ppa, ulong_t *lockmap, size_t pos,
2438     rctl_qty_t *locked)
2439 {
2440         struct  shm_data *shmd = seg->s_data;
2441         struct  spt_data *sptd = shmd->shm_sptseg->s_data;
2442         ulong_t i;
2443         int     kernel;
2444         pgcnt_t nlck = 0;
2445         int     rv = 0;
2446         int     use_reserved = 1;
2447 
2448         /* return the number of bytes actually locked */
2449         *locked = 0;
2450 
2451         /*
2452          * To avoid contention on freemem_lock, availrmem and pages_locked
2453          * global counters are updated only every nlck locked pages instead of
2454          * every time.  Reserve nlck locks up front and deduct from this
2455          * reservation for each page that requires a lock.  When the reservation
2456          * is consumed, reserve again.  nlck is randomized, so the competing
2457          * threads do not fall into a cyclic lock contention pattern. When
2458          * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2459          * is used to lock pages.
2460          */
2461         for (i = 0; i < npages; anon_index++, pos++, i++) {
2462                 if (nlck == 0 && use_reserved == 1) {
2463                         nlck = NLCK + RAND_P2(NLCK);
2464                         /* if fewer loops left, decrease nlck */
2465                         nlck = MIN(nlck, npages - i);
2466                         /*
2467                          * Reserve nlck locks up front and deduct from this
2468                          * reservation for each page that requires a lock.  When
2469                          * the reservation is consumed, reserve again.
2470                          */
2471                         mutex_enter(&freemem_lock);
2472                         if ((availrmem - nlck) < pages_pp_maximum) {
2473                                 /* Do not do advance memory reserves */
2474                                 use_reserved = 0;
2475                         } else {
2476                                 availrmem       -= nlck;
2477                                 pages_locked    += nlck;
2478                         }
2479                         mutex_exit(&freemem_lock);
2480                 }
2481                 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2482                         if (sptd->spt_ppa_lckcnt[anon_index] <
2483                             (ushort_t)DISM_LOCK_MAX) {
2484                                 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2485                                     (ushort_t)DISM_LOCK_MAX) {
2486                                         cmn_err(CE_WARN,
2487                                             "DISM page lock limit "
2488                                             "reached on DISM offset 0x%lx\n",
2489                                             anon_index << PAGESHIFT);
2490                                 }
2491                                 kernel = (sptd->spt_ppa &&
2492                                     sptd->spt_ppa[anon_index]);
2493                                 if (!page_pp_lock(ppa[i], 0, kernel ||
2494                                     use_reserved)) {
2495                                         sptd->spt_ppa_lckcnt[anon_index]--;
2496                                         rv = EAGAIN;
2497                                         break;
2498                                 }
2499                                 /* if this is a newly locked page, count it */
2500                                 if (ppa[i]->p_lckcnt == 1) {
2501                                         if (kernel == 0 && use_reserved == 1)
2502                                                 nlck--;
2503                                         *locked += PAGESIZE;
2504                                 }
2505                                 shmd->shm_lckpgs++;
2506                                 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2507                                 if (lockmap != NULL)
2508                                         BT_SET(lockmap, pos);
2509                         }
2510                 }
2511         }
2512         /* Return unused lock reservation */
2513         if (nlck != 0 && use_reserved == 1) {
2514                 mutex_enter(&freemem_lock);
2515                 availrmem       += nlck;
2516                 pages_locked    -= nlck;
2517                 mutex_exit(&freemem_lock);
2518         }
2519 
2520         return (rv);
2521 }
2522 
2523 int
2524 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2525     rctl_qty_t *unlocked)
2526 {
2527         struct shm_data *shmd = seg->s_data;
2528         struct spt_data *sptd = shmd->shm_sptseg->s_data;
2529         struct anon_map *amp = sptd->spt_amp;
2530         struct anon     *ap;
2531         struct vnode    *vp;
2532         u_offset_t      off;
2533         struct page     *pp;
2534         int             kernel;
2535         anon_sync_obj_t cookie;
2536         ulong_t         i;
2537         pgcnt_t         nlck = 0;
2538         pgcnt_t         nlck_limit = NLCK;
2539 
2540         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2541         for (i = 0; i < npages; i++, anon_index++) {
2542                 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2543                         anon_array_enter(amp, anon_index, &cookie);
2544                         ap = anon_get_ptr(amp->ahp, anon_index);
2545                         ASSERT(ap);
2546 
2547                         swap_xlate(ap, &vp, &off);
2548                         anon_array_exit(&cookie);
2549                         pp = page_lookup(vp, off, SE_SHARED);
2550                         ASSERT(pp);
2551                         /*
2552                          * availrmem is decremented only for pages which are not
2553                          * in seg pcache, for pages in seg pcache availrmem was
2554                          * decremented in _dismpagelock()
2555                          */
2556                         kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2557                         ASSERT(pp->p_lckcnt > 0);
2558 
2559                         /*
2560                          * lock page but do not change availrmem, we do it
2561                          * ourselves every nlck loops.
2562                          */
2563                         page_pp_unlock(pp, 0, 1);
2564                         if (pp->p_lckcnt == 0) {
2565                                 if (kernel == 0)
2566                                         nlck++;
2567                                 *unlocked += PAGESIZE;
2568                         }
2569                         page_unlock(pp);
2570                         shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2571                         sptd->spt_ppa_lckcnt[anon_index]--;
2572                         shmd->shm_lckpgs--;
2573                 }
2574 
2575                 /*
2576                  * To reduce freemem_lock contention, do not update availrmem
2577                  * until at least NLCK pages have been unlocked.
2578                  * 1. No need to update if nlck is zero
2579                  * 2. Always update if the last iteration
2580                  */
2581                 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2582                         mutex_enter(&freemem_lock);
2583                         availrmem       += nlck;
2584                         pages_locked    -= nlck;
2585                         mutex_exit(&freemem_lock);
2586                         nlck = 0;
2587                         nlck_limit = NLCK + RAND_P2(NLCK);
2588                 }
2589         }
2590         ANON_LOCK_EXIT(&amp->a_rwlock);
2591 
2592         return (0);
2593 }
2594 
2595 /*ARGSUSED*/
2596 static int
2597 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2598     int attr, int op, ulong_t *lockmap, size_t pos)
2599 {
2600         struct shm_data *shmd = seg->s_data;
2601         struct seg      *sptseg = shmd->shm_sptseg;
2602         struct spt_data *sptd = sptseg->s_data;
2603         struct kshmid   *sp = sptd->spt_amp->a_sp;
2604         pgcnt_t         npages, a_npages;
2605         page_t          **ppa;
2606         pgcnt_t         an_idx, a_an_idx, ppa_idx;
2607         caddr_t         spt_addr, a_addr;       /* spt and aligned address */
2608         size_t          a_len;                  /* aligned len */
2609         size_t          share_sz;
2610         ulong_t         i;
2611         int             sts = 0;
2612         rctl_qty_t      unlocked = 0;
2613         rctl_qty_t      locked = 0;
2614         struct proc     *p = curproc;
2615         kproject_t      *proj;
2616 
2617         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2618         ASSERT(sp != NULL);
2619 
2620         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2621                 return (0);
2622         }
2623 
2624         addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2625         an_idx = seg_page(seg, addr);
2626         npages = btopr(len);
2627 
2628         if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2629                 return (ENOMEM);
2630         }
2631 
2632         /*
2633          * A shm's project never changes, so no lock needed.
2634          * The shm has a hold on the project, so it will not go away.
2635          * Since we have a mapping to shm within this zone, we know
2636          * that the zone will not go away.
2637          */
2638         proj = sp->shm_perm.ipc_proj;
2639 
2640         if (op == MC_LOCK) {
2641 
2642                 /*
2643                  * Need to align addr and size request if they are not
2644                  * aligned so we can always allocate large page(s) however
2645                  * we only lock what was requested in initial request.
2646                  */
2647                 share_sz = page_get_pagesize(sptseg->s_szc);
2648                 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2649                 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2650                     share_sz);
2651                 a_npages = btop(a_len);
2652                 a_an_idx = seg_page(seg, a_addr);
2653                 spt_addr = sptseg->s_base + ptob(a_an_idx);
2654                 ppa_idx = an_idx - a_an_idx;
2655 
2656                 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2657                     KM_NOSLEEP)) == NULL) {
2658                         return (ENOMEM);
2659                 }
2660 
2661                 /*
2662                  * Don't cache any new pages for IO and
2663                  * flush any cached pages.
2664                  */
2665                 mutex_enter(&sptd->spt_lock);
2666                 if (sptd->spt_ppa != NULL)
2667                         sptd->spt_flags |= DISM_PPA_CHANGED;
2668 
2669                 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2670                 if (sts != 0) {
2671                         mutex_exit(&sptd->spt_lock);
2672                         kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2673                         return (sts);
2674                 }
2675 
2676                 mutex_enter(&sp->shm_mlock);
2677                 /* enforce locked memory rctl */
2678                 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2679 
2680                 mutex_enter(&p->p_lock);
2681                 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2682                         mutex_exit(&p->p_lock);
2683                         sts = EAGAIN;
2684                 } else {
2685                         mutex_exit(&p->p_lock);
2686                         sts = spt_lockpages(seg, an_idx, npages,
2687                             &ppa[ppa_idx], lockmap, pos, &locked);
2688 
2689                         /*
2690                          * correct locked count if not all pages could be
2691                          * locked
2692                          */
2693                         if ((unlocked - locked) > 0) {
2694                                 rctl_decr_locked_mem(NULL, proj,
2695                                     (unlocked - locked), 0);
2696                         }
2697                 }
2698                 /*
2699                  * unlock pages
2700                  */
2701                 for (i = 0; i < a_npages; i++)
2702                         page_unlock(ppa[i]);
2703                 if (sptd->spt_ppa != NULL)
2704                         sptd->spt_flags |= DISM_PPA_CHANGED;
2705                 mutex_exit(&sp->shm_mlock);
2706                 mutex_exit(&sptd->spt_lock);
2707 
2708                 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2709 
2710         } else if (op == MC_UNLOCK) { /* unlock */
2711                 page_t          **ppa;
2712 
2713                 mutex_enter(&sptd->spt_lock);
2714                 if (shmd->shm_lckpgs == 0) {
2715                         mutex_exit(&sptd->spt_lock);
2716                         return (0);
2717                 }
2718                 /*
2719                  * Don't cache new IO pages.
2720                  */
2721                 if (sptd->spt_ppa != NULL)
2722                         sptd->spt_flags |= DISM_PPA_CHANGED;
2723 
2724                 mutex_enter(&sp->shm_mlock);
2725                 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2726                 if ((ppa = sptd->spt_ppa) != NULL)
2727                         sptd->spt_flags |= DISM_PPA_CHANGED;
2728                 mutex_exit(&sptd->spt_lock);
2729 
2730                 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2731                 mutex_exit(&sp->shm_mlock);
2732 
2733                 if (ppa != NULL)
2734                         seg_ppurge_wiredpp(ppa);
2735         }
2736         return (sts);
2737 }
2738 
2739 /*ARGSUSED*/
2740 int
2741 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2742 {
2743         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2744         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2745         spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2746 
2747         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2748 
2749         /*
2750          * ISM segment is always rw.
2751          */
2752         while (--pgno >= 0)
2753                 *protv++ = sptd->spt_prot;
2754         return (0);
2755 }
2756 
2757 /*ARGSUSED*/
2758 u_offset_t
2759 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2760 {
2761         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2762 
2763         /* Offset does not matter in ISM memory */
2764 
2765         return ((u_offset_t)0);
2766 }
2767 
2768 /* ARGSUSED */
2769 int
2770 segspt_shmgettype(struct seg *seg, caddr_t addr)
2771 {
2772         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2773         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2774 
2775         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2776 
2777         /*
2778          * The shared memory mapping is always MAP_SHARED, SWAP is only
2779          * reserved for DISM
2780          */
2781         return (MAP_SHARED |
2782             ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2783 }
2784 
2785 /*ARGSUSED*/
2786 int
2787 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2788 {
2789         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2790         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2791 
2792         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2793 
2794         *vpp = sptd->spt_vp;
2795         return (0);
2796 }
2797 
2798 /*
2799  * We need to wait for pending IO to complete to a DISM segment in order for
2800  * pages to get kicked out of the seg_pcache.  120 seconds should be more
2801  * than enough time to wait.
2802  */
2803 static clock_t spt_pcache_wait = 120;
2804 
2805 /*ARGSUSED*/
2806 static int
2807 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2808 {
2809         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2810         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2811         struct anon_map *amp;
2812         pgcnt_t pg_idx;
2813         ushort_t gen;
2814         clock_t end_lbolt;
2815         int writer;
2816         page_t **ppa;
2817 
2818         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2819 
2820         if (behav == MADV_FREE) {
2821                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2822                         return (0);
2823 
2824                 amp = sptd->spt_amp;
2825                 pg_idx = seg_page(seg, addr);
2826 
2827                 mutex_enter(&sptd->spt_lock);
2828                 if ((ppa = sptd->spt_ppa) == NULL) {
2829                         mutex_exit(&sptd->spt_lock);
2830                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2831                         anon_disclaim(amp, pg_idx, len);
2832                         ANON_LOCK_EXIT(&amp->a_rwlock);
2833                         return (0);
2834                 }
2835 
2836                 sptd->spt_flags |= DISM_PPA_CHANGED;
2837                 gen = sptd->spt_gen;
2838 
2839                 mutex_exit(&sptd->spt_lock);
2840 
2841                 /*
2842                  * Purge all DISM cached pages
2843                  */
2844                 seg_ppurge_wiredpp(ppa);
2845 
2846                 /*
2847                  * Drop the AS_LOCK so that other threads can grab it
2848                  * in the as_pageunlock path and hopefully get the segment
2849                  * kicked out of the seg_pcache.  We bump the shm_softlockcnt
2850                  * to keep this segment resident.
2851                  */
2852                 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2853                 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2854                 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2855 
2856                 mutex_enter(&sptd->spt_lock);
2857 
2858                 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2859 
2860                 /*
2861                  * Try to wait for pages to get kicked out of the seg_pcache.
2862                  */
2863                 while (sptd->spt_gen == gen &&
2864                     (sptd->spt_flags & DISM_PPA_CHANGED) &&
2865                     ddi_get_lbolt() < end_lbolt) {
2866                         if (!cv_timedwait_sig(&sptd->spt_cv,
2867                             &sptd->spt_lock, end_lbolt)) {
2868                                 break;
2869                         }
2870                 }
2871 
2872                 mutex_exit(&sptd->spt_lock);
2873 
2874                 /* Regrab the AS_LOCK and release our hold on the segment */
2875                 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2876                     writer ? RW_WRITER : RW_READER);
2877                 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2878                 if (shmd->shm_softlockcnt <= 0) {
2879                         if (AS_ISUNMAPWAIT(seg->s_as)) {
2880                                 mutex_enter(&seg->s_as->a_contents);
2881                                 if (AS_ISUNMAPWAIT(seg->s_as)) {
2882                                         AS_CLRUNMAPWAIT(seg->s_as);
2883                                         cv_broadcast(&seg->s_as->a_cv);
2884                                 }
2885                                 mutex_exit(&seg->s_as->a_contents);
2886                         }
2887                 }
2888 
2889                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2890                 anon_disclaim(amp, pg_idx, len);
2891                 ANON_LOCK_EXIT(&amp->a_rwlock);
2892         } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2893             behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2894                 int                     already_set;
2895                 ulong_t                 anon_index;
2896                 lgrp_mem_policy_t       policy;
2897                 caddr_t                 shm_addr;
2898                 size_t                  share_size;
2899                 size_t                  size;
2900                 struct seg              *sptseg = shmd->shm_sptseg;
2901                 caddr_t                 sptseg_addr;
2902 
2903                 /*
2904                  * Align address and length to page size of underlying segment
2905                  */
2906                 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2907                 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2908                 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2909                     share_size);
2910 
2911                 amp = shmd->shm_amp;
2912                 anon_index = seg_page(seg, shm_addr);
2913 
2914                 /*
2915                  * And now we may have to adjust size downward if we have
2916                  * exceeded the realsize of the segment or initial anon
2917                  * allocations.
2918                  */
2919                 sptseg_addr = sptseg->s_base + ptob(anon_index);
2920                 if ((sptseg_addr + size) >
2921                     (sptseg->s_base + sptd->spt_realsize))
2922                         size = (sptseg->s_base + sptd->spt_realsize) -
2923                             sptseg_addr;
2924 
2925                 /*
2926                  * Set memory allocation policy for this segment
2927                  */
2928                 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2929                 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2930                     NULL, 0, len);
2931 
2932                 /*
2933                  * If random memory allocation policy set already,
2934                  * don't bother reapplying it.
2935                  */
2936                 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2937                         return (0);
2938 
2939                 /*
2940                  * Mark any existing pages in the given range for
2941                  * migration, flushing the I/O page cache, and using
2942                  * underlying segment to calculate anon index and get
2943                  * anonmap and vnode pointer from
2944                  */
2945                 if (shmd->shm_softlockcnt > 0)
2946                         segspt_purge(seg);
2947 
2948                 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
2949         }
2950 
2951         return (0);
2952 }
2953 
2954 /*ARGSUSED*/
2955 void
2956 segspt_shmdump(struct seg *seg)
2957 {
2958         /* no-op for ISM segment */
2959 }
2960 
2961 /*ARGSUSED*/
2962 static faultcode_t
2963 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
2964 {
2965         return (ENOTSUP);
2966 }
2967 
2968 /*
2969  * get a memory ID for an addr in a given segment
2970  */
2971 static int
2972 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
2973 {
2974         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2975         struct anon     *ap;
2976         size_t          anon_index;
2977         struct anon_map *amp = shmd->shm_amp;
2978         struct spt_data *sptd = shmd->shm_sptseg->s_data;
2979         struct seg      *sptseg = shmd->shm_sptseg;
2980         anon_sync_obj_t cookie;
2981 
2982         anon_index = seg_page(seg, addr);
2983 
2984         if (addr > (seg->s_base + sptd->spt_realsize)) {
2985                 return (EFAULT);
2986         }
2987 
2988         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2989         anon_array_enter(amp, anon_index, &cookie);
2990         ap = anon_get_ptr(amp->ahp, anon_index);
2991         if (ap == NULL) {
2992                 struct page *pp;
2993                 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
2994 
2995                 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
2996                 if (pp == NULL) {
2997                         anon_array_exit(&cookie);
2998                         ANON_LOCK_EXIT(&amp->a_rwlock);
2999                         return (ENOMEM);
3000                 }
3001                 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3002                 page_unlock(pp);
3003         }
3004         anon_array_exit(&cookie);
3005         ANON_LOCK_EXIT(&amp->a_rwlock);
3006         memidp->val[0] = (uintptr_t)ap;
3007         memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3008         return (0);
3009 }
3010 
3011 /*
3012  * Get memory allocation policy info for specified address in given segment
3013  */
3014 static lgrp_mem_policy_info_t *
3015 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3016 {
3017         struct anon_map         *amp;
3018         ulong_t                 anon_index;
3019         lgrp_mem_policy_info_t  *policy_info;
3020         struct shm_data         *shm_data;
3021 
3022         ASSERT(seg != NULL);
3023 
3024         /*
3025          * Get anon_map from segshm
3026          *
3027          * Assume that no lock needs to be held on anon_map, since
3028          * it should be protected by its reference count which must be
3029          * nonzero for an existing segment
3030          * Need to grab readers lock on policy tree though
3031          */
3032         shm_data = (struct shm_data *)seg->s_data;
3033         if (shm_data == NULL)
3034                 return (NULL);
3035         amp = shm_data->shm_amp;
3036         ASSERT(amp->refcnt != 0);
3037 
3038         /*
3039          * Get policy info
3040          *
3041          * Assume starting anon index of 0
3042          */
3043         anon_index = seg_page(seg, addr);
3044         policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3045 
3046         return (policy_info);
3047 }