1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 
  25 #include <sys/param.h>
  26 #include <sys/user.h>
  27 #include <sys/mman.h>
  28 #include <sys/kmem.h>
  29 #include <sys/sysmacros.h>
  30 #include <sys/cmn_err.h>
  31 #include <sys/systm.h>
  32 #include <sys/tuneable.h>
  33 #include <vm/hat.h>
  34 #include <vm/seg.h>
  35 #include <vm/as.h>
  36 #include <vm/anon.h>
  37 #include <vm/page.h>
  38 #include <sys/buf.h>
  39 #include <sys/swap.h>
  40 #include <sys/atomic.h>
  41 #include <vm/seg_spt.h>
  42 #include <sys/debug.h>
  43 #include <sys/vtrace.h>
  44 #include <sys/shm.h>
  45 #include <sys/shm_impl.h>
  46 #include <sys/lgrp.h>
  47 #include <sys/vmsystm.h>
  48 #include <sys/policy.h>
  49 #include <sys/project.h>
  50 #include <sys/tnf_probe.h>
  51 #include <sys/zone.h>
  52 
  53 #define SEGSPTADDR      (caddr_t)0x0
  54 
  55 /*
  56  * # pages used for spt
  57  */
  58 size_t  spt_used;
  59 
  60 /*
  61  * segspt_minfree is the memory left for system after ISM
  62  * locked its pages; it is set up to 5% of availrmem in
  63  * sptcreate when ISM is created.  ISM should not use more
  64  * than ~90% of availrmem; if it does, then the performance
  65  * of the system may decrease. Machines with large memories may
  66  * be able to use up more memory for ISM so we set the default
  67  * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
  68  * If somebody wants even more memory for ISM (risking hanging
  69  * the system) they can patch the segspt_minfree to smaller number.
  70  */
  71 pgcnt_t segspt_minfree = 0;
  72 
  73 static int segspt_create(struct seg *seg, caddr_t argsp);
  74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
  75 static void segspt_free(struct seg *seg);
  76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
  77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
  78 
  79 static void
  80 segspt_badop()
  81 {
  82         panic("segspt_badop called");
  83         /*NOTREACHED*/
  84 }
  85 
  86 #define SEGSPT_BADOP(t) (t(*)())segspt_badop
  87 
  88 struct seg_ops segspt_ops = {
  89         SEGSPT_BADOP(int),              /* dup */
  90         segspt_unmap,
  91         segspt_free,
  92         SEGSPT_BADOP(int),              /* fault */
  93         SEGSPT_BADOP(faultcode_t),      /* faulta */
  94         SEGSPT_BADOP(int),              /* setprot */
  95         SEGSPT_BADOP(int),              /* checkprot */
  96         SEGSPT_BADOP(int),              /* kluster */
  97         SEGSPT_BADOP(int),              /* sync */
  98         SEGSPT_BADOP(size_t),           /* incore */
  99         SEGSPT_BADOP(int),              /* lockop */
 100         SEGSPT_BADOP(int),              /* getprot */
 101         SEGSPT_BADOP(u_offset_t),       /* getoffset */
 102         SEGSPT_BADOP(int),              /* gettype */
 103         SEGSPT_BADOP(int),              /* getvp */
 104         SEGSPT_BADOP(int),              /* advise */
 105         SEGSPT_BADOP(void),             /* dump */
 106         SEGSPT_BADOP(int),              /* pagelock */
 107         SEGSPT_BADOP(int),              /* setpgsz */
 108         SEGSPT_BADOP(int),              /* getmemid */
 109         segspt_getpolicy,               /* getpolicy */
 110         SEGSPT_BADOP(int),              /* capable */
 111         seg_inherit_notsup              /* inherit */
 112 };
 113 
 114 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
 115 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
 116 static void segspt_shmfree(struct seg *seg);
 117 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
 118                 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
 119 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
 120 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
 121                         register size_t len, register uint_t prot);
 122 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
 123                         uint_t prot);
 124 static int      segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
 125 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
 126                         register char *vec);
 127 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
 128                         int attr, uint_t flags);
 129 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
 130                         int attr, int op, ulong_t *lockmap, size_t pos);
 131 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
 132                         uint_t *protv);
 133 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
 134 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
 135 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
 136 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
 137                         uint_t behav);
 138 static void segspt_shmdump(struct seg *seg);
 139 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
 140                         struct page ***, enum lock_type, enum seg_rw);
 141 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
 142 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
 143 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
 144 static int segspt_shmcapable(struct seg *, segcapability_t);
 145 
 146 struct seg_ops segspt_shmops = {
 147         segspt_shmdup,
 148         segspt_shmunmap,
 149         segspt_shmfree,
 150         segspt_shmfault,
 151         segspt_shmfaulta,
 152         segspt_shmsetprot,
 153         segspt_shmcheckprot,
 154         segspt_shmkluster,
 155         segspt_shmsync,
 156         segspt_shmincore,
 157         segspt_shmlockop,
 158         segspt_shmgetprot,
 159         segspt_shmgetoffset,
 160         segspt_shmgettype,
 161         segspt_shmgetvp,
 162         segspt_shmadvise,       /* advise */
 163         segspt_shmdump,
 164         segspt_shmpagelock,
 165         segspt_shmsetpgsz,
 166         segspt_shmgetmemid,
 167         segspt_shmgetpolicy,
 168         segspt_shmcapable,
 169         seg_inherit_notsup
 170 };
 171 
 172 static void segspt_purge(struct seg *seg);
 173 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
 174                 enum seg_rw, int);
 175 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
 176                 page_t **ppa);
 177 
 178 
 179 
 180 /*ARGSUSED*/
 181 int
 182 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
 183         uint_t prot, uint_t flags, uint_t share_szc)
 184 {
 185         int     err;
 186         struct  as      *newas;
 187         struct  segspt_crargs sptcargs;
 188 
 189 #ifdef DEBUG
 190         TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
 191                         tnf_ulong, size, size );
 192 #endif
 193         if (segspt_minfree == 0)        /* leave min 5% of availrmem for */
 194                 segspt_minfree = availrmem/20;  /* for the system */
 195 
 196         if (!hat_supported(HAT_SHARED_PT, (void *)0))
 197                 return (EINVAL);
 198 
 199         /*
 200          * get a new as for this shared memory segment
 201          */
 202         newas = as_alloc();
 203         newas->a_proc = NULL;
 204         sptcargs.amp = amp;
 205         sptcargs.prot = prot;
 206         sptcargs.flags = flags;
 207         sptcargs.szc = share_szc;
 208         /*
 209          * create a shared page table (spt) segment
 210          */
 211 
 212         if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
 213                 as_free(newas);
 214                 return (err);
 215         }
 216         *sptseg = sptcargs.seg_spt;
 217         return (0);
 218 }
 219 
 220 void
 221 sptdestroy(struct as *as, struct anon_map *amp)
 222 {
 223 
 224 #ifdef DEBUG
 225         TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
 226 #endif
 227         (void) as_unmap(as, SEGSPTADDR, amp->size);
 228         as_free(as);
 229 }
 230 
 231 /*
 232  * called from seg_free().
 233  * free (i.e., unlock, unmap, return to free list)
 234  *  all the pages in the given seg.
 235  */
 236 void
 237 segspt_free(struct seg  *seg)
 238 {
 239         struct spt_data *sptd = (struct spt_data *)seg->s_data;
 240 
 241         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
 242 
 243         if (sptd != NULL) {
 244                 if (sptd->spt_realsize)
 245                         segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
 246 
 247         if (sptd->spt_ppa_lckcnt)
 248                 kmem_free(sptd->spt_ppa_lckcnt,
 249                     sizeof (*sptd->spt_ppa_lckcnt)
 250                     * btopr(sptd->spt_amp->size));
 251                 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
 252                 cv_destroy(&sptd->spt_cv);
 253                 mutex_destroy(&sptd->spt_lock);
 254                 kmem_free(sptd, sizeof (*sptd));
 255         }
 256 }
 257 
 258 /*ARGSUSED*/
 259 static int
 260 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
 261         uint_t flags)
 262 {
 263         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
 264 
 265         return (0);
 266 }
 267 
 268 /*ARGSUSED*/
 269 static size_t
 270 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
 271 {
 272         caddr_t eo_seg;
 273         pgcnt_t npages;
 274         struct shm_data *shmd = (struct shm_data *)seg->s_data;
 275         struct seg      *sptseg;
 276         struct spt_data *sptd;
 277 
 278         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
 279 #ifdef lint
 280         seg = seg;
 281 #endif
 282         sptseg = shmd->shm_sptseg;
 283         sptd = sptseg->s_data;
 284 
 285         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 286                 eo_seg = addr + len;
 287                 while (addr < eo_seg) {
 288                         /* page exists, and it's locked. */
 289                         *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
 290                             SEG_PAGE_ANON;
 291                         addr += PAGESIZE;
 292                 }
 293                 return (len);
 294         } else {
 295                 struct  anon_map *amp = shmd->shm_amp;
 296                 struct  anon    *ap;
 297                 page_t          *pp;
 298                 pgcnt_t         anon_index;
 299                 struct vnode    *vp;
 300                 u_offset_t      off;
 301                 ulong_t         i;
 302                 int             ret;
 303                 anon_sync_obj_t cookie;
 304 
 305                 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
 306                 anon_index = seg_page(seg, addr);
 307                 npages = btopr(len);
 308                 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
 309                         return (EINVAL);
 310                 }
 311                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
 312                 for (i = 0; i < npages; i++, anon_index++) {
 313                         ret = 0;
 314                         anon_array_enter(amp, anon_index, &cookie);
 315                         ap = anon_get_ptr(amp->ahp, anon_index);
 316                         if (ap != NULL) {
 317                                 swap_xlate(ap, &vp, &off);
 318                                 anon_array_exit(&cookie);
 319                                 pp = page_lookup_nowait(vp, off, SE_SHARED);
 320                                 if (pp != NULL) {
 321                                         ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
 322                                         page_unlock(pp);
 323                                 }
 324                         } else {
 325                                 anon_array_exit(&cookie);
 326                         }
 327                         if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
 328                                 ret |= SEG_PAGE_LOCKED;
 329                         }
 330                         *vec++ = (char)ret;
 331                 }
 332                 ANON_LOCK_EXIT(&amp->a_rwlock);
 333                 return (len);
 334         }
 335 }
 336 
 337 static int
 338 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
 339 {
 340         size_t share_size;
 341 
 342         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
 343 
 344         /*
 345          * seg.s_size may have been rounded up to the largest page size
 346          * in shmat().
 347          * XXX This should be cleanedup. sptdestroy should take a length
 348          * argument which should be the same as sptcreate. Then
 349          * this rounding would not be needed (or is done in shm.c)
 350          * Only the check for full segment will be needed.
 351          *
 352          * XXX -- shouldn't raddr == 0 always? These tests don't seem
 353          * to be useful at all.
 354          */
 355         share_size = page_get_pagesize(seg->s_szc);
 356         ssize = P2ROUNDUP(ssize, share_size);
 357 
 358         if (raddr == seg->s_base && ssize == seg->s_size) {
 359                 seg_free(seg);
 360                 return (0);
 361         } else
 362                 return (EINVAL);
 363 }
 364 
 365 int
 366 segspt_create(struct seg *seg, caddr_t argsp)
 367 {
 368         int             err;
 369         caddr_t         addr = seg->s_base;
 370         struct spt_data *sptd;
 371         struct  segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
 372         struct anon_map *amp = sptcargs->amp;
 373         struct kshmid   *sp = amp->a_sp;
 374         struct  cred    *cred = CRED();
 375         ulong_t         i, j, anon_index = 0;
 376         pgcnt_t         npages = btopr(amp->size);
 377         struct vnode    *vp;
 378         page_t          **ppa;
 379         uint_t          hat_flags;
 380         size_t          pgsz;
 381         pgcnt_t         pgcnt;
 382         caddr_t         a;
 383         pgcnt_t         pidx;
 384         size_t          sz;
 385         proc_t          *procp = curproc;
 386         rctl_qty_t      lockedbytes = 0;
 387         kproject_t      *proj;
 388 
 389         /*
 390          * We are holding the a_lock on the underlying dummy as,
 391          * so we can make calls to the HAT layer.
 392          */
 393         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
 394         ASSERT(sp != NULL);
 395 
 396 #ifdef DEBUG
 397         TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
 398             tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
 399 #endif
 400         if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
 401                 if (err = anon_swap_adjust(npages))
 402                         return (err);
 403         }
 404         err = ENOMEM;
 405 
 406         if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
 407                 goto out1;
 408 
 409         if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
 410                 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
 411                     KM_NOSLEEP)) == NULL)
 412                         goto out2;
 413         }
 414 
 415         mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
 416 
 417         if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
 418                 goto out3;
 419 
 420         seg->s_ops = &segspt_ops;
 421         sptd->spt_vp = vp;
 422         sptd->spt_amp = amp;
 423         sptd->spt_prot = sptcargs->prot;
 424         sptd->spt_flags = sptcargs->flags;
 425         seg->s_data = (caddr_t)sptd;
 426         sptd->spt_ppa = NULL;
 427         sptd->spt_ppa_lckcnt = NULL;
 428         seg->s_szc = sptcargs->szc;
 429         cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
 430         sptd->spt_gen = 0;
 431 
 432         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 433         if (seg->s_szc > amp->a_szc) {
 434                 amp->a_szc = seg->s_szc;
 435         }
 436         ANON_LOCK_EXIT(&amp->a_rwlock);
 437 
 438         /*
 439          * Set policy to affect initial allocation of pages in
 440          * anon_map_createpages()
 441          */
 442         (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
 443             NULL, 0, ptob(npages));
 444 
 445         if (sptcargs->flags & SHM_PAGEABLE) {
 446                 size_t  share_sz;
 447                 pgcnt_t new_npgs, more_pgs;
 448                 struct anon_hdr *nahp;
 449                 zone_t *zone;
 450 
 451                 share_sz = page_get_pagesize(seg->s_szc);
 452                 if (!IS_P2ALIGNED(amp->size, share_sz)) {
 453                         /*
 454                          * We are rounding up the size of the anon array
 455                          * on 4 M boundary because we always create 4 M
 456                          * of page(s) when locking, faulting pages and we
 457                          * don't have to check for all corner cases e.g.
 458                          * if there is enough space to allocate 4 M
 459                          * page.
 460                          */
 461                         new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
 462                         more_pgs = new_npgs - npages;
 463 
 464                         /*
 465                          * The zone will never be NULL, as a fully created
 466                          * shm always has an owning zone.
 467                          */
 468                         zone = sp->shm_perm.ipc_zone_ref.zref_zone;
 469                         ASSERT(zone != NULL);
 470                         if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
 471                                 err = ENOMEM;
 472                                 goto out4;
 473                         }
 474 
 475                         nahp = anon_create(new_npgs, ANON_SLEEP);
 476                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 477                         (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
 478                             ANON_SLEEP);
 479                         anon_release(amp->ahp, npages);
 480                         amp->ahp = nahp;
 481                         ASSERT(amp->swresv == ptob(npages));
 482                         amp->swresv = amp->size = ptob(new_npgs);
 483                         ANON_LOCK_EXIT(&amp->a_rwlock);
 484                         npages = new_npgs;
 485                 }
 486 
 487                 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
 488                     sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
 489                 sptd->spt_pcachecnt = 0;
 490                 sptd->spt_realsize = ptob(npages);
 491                 sptcargs->seg_spt = seg;
 492                 return (0);
 493         }
 494 
 495         /*
 496          * get array of pages for each anon slot in amp
 497          */
 498         if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
 499             seg, addr, S_CREATE, cred)) != 0)
 500                 goto out4;
 501 
 502         mutex_enter(&sp->shm_mlock);
 503 
 504         /* May be partially locked, so, count bytes to charge for locking */
 505         for (i = 0; i < npages; i++)
 506                 if (ppa[i]->p_lckcnt == 0)
 507                         lockedbytes += PAGESIZE;
 508 
 509         proj = sp->shm_perm.ipc_proj;
 510 
 511         if (lockedbytes > 0) {
 512                 mutex_enter(&procp->p_lock);
 513                 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
 514                         mutex_exit(&procp->p_lock);
 515                         mutex_exit(&sp->shm_mlock);
 516                         for (i = 0; i < npages; i++)
 517                                 page_unlock(ppa[i]);
 518                         err = ENOMEM;
 519                         goto out4;
 520                 }
 521                 mutex_exit(&procp->p_lock);
 522         }
 523 
 524         /*
 525          * addr is initial address corresponding to the first page on ppa list
 526          */
 527         for (i = 0; i < npages; i++) {
 528                 /* attempt to lock all pages */
 529                 if (page_pp_lock(ppa[i], 0, 1) == 0) {
 530                         /*
 531                          * if unable to lock any page, unlock all
 532                          * of them and return error
 533                          */
 534                         for (j = 0; j < i; j++)
 535                                 page_pp_unlock(ppa[j], 0, 1);
 536                         for (i = 0; i < npages; i++)
 537                                 page_unlock(ppa[i]);
 538                         rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
 539                         mutex_exit(&sp->shm_mlock);
 540                         err = ENOMEM;
 541                         goto out4;
 542                 }
 543         }
 544         mutex_exit(&sp->shm_mlock);
 545 
 546         /*
 547          * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
 548          * for the entire life of the segment. For example platforms
 549          * that do not support Dynamic Reconfiguration.
 550          */
 551         hat_flags = HAT_LOAD_SHARE;
 552         if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
 553                 hat_flags |= HAT_LOAD_LOCK;
 554 
 555         /*
 556          * Load translations one lare page at a time
 557          * to make sure we don't create mappings bigger than
 558          * segment's size code in case underlying pages
 559          * are shared with segvn's segment that uses bigger
 560          * size code than we do.
 561          */
 562         pgsz = page_get_pagesize(seg->s_szc);
 563         pgcnt = page_get_pagecnt(seg->s_szc);
 564         for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
 565                 sz = MIN(pgsz, ptob(npages - pidx));
 566                 hat_memload_array(seg->s_as->a_hat, a, sz,
 567                     &ppa[pidx], sptd->spt_prot, hat_flags);
 568         }
 569 
 570         /*
 571          * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
 572          * we will leave the pages locked SE_SHARED for the life
 573          * of the ISM segment. This will prevent any calls to
 574          * hat_pageunload() on this ISM segment for those platforms.
 575          */
 576         if (!(hat_flags & HAT_LOAD_LOCK)) {
 577                 /*
 578                  * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
 579                  * we no longer need to hold the SE_SHARED lock on the pages,
 580                  * since L_PAGELOCK and F_SOFTLOCK calls will grab the
 581                  * SE_SHARED lock on the pages as necessary.
 582                  */
 583                 for (i = 0; i < npages; i++)
 584                         page_unlock(ppa[i]);
 585         }
 586         sptd->spt_pcachecnt = 0;
 587         kmem_free(ppa, ((sizeof (page_t *)) * npages));
 588         sptd->spt_realsize = ptob(npages);
 589         atomic_add_long(&spt_used, npages);
 590         sptcargs->seg_spt = seg;
 591         return (0);
 592 
 593 out4:
 594         seg->s_data = NULL;
 595         kmem_free(vp, sizeof (*vp));
 596         cv_destroy(&sptd->spt_cv);
 597 out3:
 598         mutex_destroy(&sptd->spt_lock);
 599         if ((sptcargs->flags & SHM_PAGEABLE) == 0)
 600                 kmem_free(ppa, (sizeof (*ppa) * npages));
 601 out2:
 602         kmem_free(sptd, sizeof (*sptd));
 603 out1:
 604         if ((sptcargs->flags & SHM_PAGEABLE) == 0)
 605                 anon_swap_restore(npages);
 606         return (err);
 607 }
 608 
 609 /*ARGSUSED*/
 610 void
 611 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
 612 {
 613         struct page     *pp;
 614         struct spt_data *sptd = (struct spt_data *)seg->s_data;
 615         pgcnt_t         npages;
 616         ulong_t         anon_idx;
 617         struct anon_map *amp;
 618         struct anon     *ap;
 619         struct vnode    *vp;
 620         u_offset_t      off;
 621         uint_t          hat_flags;
 622         int             root = 0;
 623         pgcnt_t         pgs, curnpgs = 0;
 624         page_t          *rootpp;
 625         rctl_qty_t      unlocked_bytes = 0;
 626         kproject_t      *proj;
 627         kshmid_t        *sp;
 628 
 629         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
 630 
 631         len = P2ROUNDUP(len, PAGESIZE);
 632 
 633         npages = btop(len);
 634 
 635         hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
 636         if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
 637             (sptd->spt_flags & SHM_PAGEABLE)) {
 638                 hat_flags = HAT_UNLOAD_UNMAP;
 639         }
 640 
 641         hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
 642 
 643         amp = sptd->spt_amp;
 644         if (sptd->spt_flags & SHM_PAGEABLE)
 645                 npages = btop(amp->size);
 646 
 647         ASSERT(amp != NULL);
 648 
 649         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 650                 sp = amp->a_sp;
 651                 proj = sp->shm_perm.ipc_proj;
 652                 mutex_enter(&sp->shm_mlock);
 653         }
 654         for (anon_idx = 0; anon_idx < npages; anon_idx++) {
 655                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 656                         if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
 657                                 panic("segspt_free_pages: null app");
 658                                 /*NOTREACHED*/
 659                         }
 660                 } else {
 661                         if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
 662                             == NULL)
 663                                 continue;
 664                 }
 665                 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
 666                 swap_xlate(ap, &vp, &off);
 667 
 668                 /*
 669                  * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
 670                  * the pages won't be having SE_SHARED lock at this
 671                  * point.
 672                  *
 673                  * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
 674                  * the pages are still held SE_SHARED locked from the
 675                  * original segspt_create()
 676                  *
 677                  * Our goal is to get SE_EXCL lock on each page, remove
 678                  * permanent lock on it and invalidate the page.
 679                  */
 680                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 681                         if (hat_flags == HAT_UNLOAD_UNMAP)
 682                                 pp = page_lookup(vp, off, SE_EXCL);
 683                         else {
 684                                 if ((pp = page_find(vp, off)) == NULL) {
 685                                         panic("segspt_free_pages: "
 686                                             "page not locked");
 687                                         /*NOTREACHED*/
 688                                 }
 689                                 if (!page_tryupgrade(pp)) {
 690                                         page_unlock(pp);
 691                                         pp = page_lookup(vp, off, SE_EXCL);
 692                                 }
 693                         }
 694                         if (pp == NULL) {
 695                                 panic("segspt_free_pages: "
 696                                     "page not in the system");
 697                                 /*NOTREACHED*/
 698                         }
 699                         ASSERT(pp->p_lckcnt > 0);
 700                         page_pp_unlock(pp, 0, 1);
 701                         if (pp->p_lckcnt == 0)
 702                                 unlocked_bytes += PAGESIZE;
 703                 } else {
 704                         if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
 705                                 continue;
 706                 }
 707                 /*
 708                  * It's logical to invalidate the pages here as in most cases
 709                  * these were created by segspt.
 710                  */
 711                 if (pp->p_szc != 0) {
 712                         if (root == 0) {
 713                                 ASSERT(curnpgs == 0);
 714                                 root = 1;
 715                                 rootpp = pp;
 716                                 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
 717                                 ASSERT(pgs > 1);
 718                                 ASSERT(IS_P2ALIGNED(pgs, pgs));
 719                                 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
 720                                 curnpgs--;
 721                         } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
 722                                 ASSERT(curnpgs == 1);
 723                                 ASSERT(page_pptonum(pp) ==
 724                                     page_pptonum(rootpp) + (pgs - 1));
 725                                 page_destroy_pages(rootpp);
 726                                 root = 0;
 727                                 curnpgs = 0;
 728                         } else {
 729                                 ASSERT(curnpgs > 1);
 730                                 ASSERT(page_pptonum(pp) ==
 731                                     page_pptonum(rootpp) + (pgs - curnpgs));
 732                                 curnpgs--;
 733                         }
 734                 } else {
 735                         if (root != 0 || curnpgs != 0) {
 736                                 panic("segspt_free_pages: bad large page");
 737                                 /*NOTREACHED*/
 738                         }
 739                         /*
 740                          * Before destroying the pages, we need to take care
 741                          * of the rctl locked memory accounting. For that
 742                          * we need to calculte the unlocked_bytes.
 743                          */
 744                         if (pp->p_lckcnt > 0)
 745                                 unlocked_bytes += PAGESIZE;
 746                         /*LINTED: constant in conditional context */
 747                         VN_DISPOSE(pp, B_INVAL, 0, kcred);
 748                 }
 749         }
 750         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 751                 if (unlocked_bytes > 0)
 752                         rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
 753                 mutex_exit(&sp->shm_mlock);
 754         }
 755         if (root != 0 || curnpgs != 0) {
 756                 panic("segspt_free_pages: bad large page");
 757                 /*NOTREACHED*/
 758         }
 759 
 760         /*
 761          * mark that pages have been released
 762          */
 763         sptd->spt_realsize = 0;
 764 
 765         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
 766                 atomic_add_long(&spt_used, -npages);
 767                 anon_swap_restore(npages);
 768         }
 769 }
 770 
 771 /*
 772  * Get memory allocation policy info for specified address in given segment
 773  */
 774 static lgrp_mem_policy_info_t *
 775 segspt_getpolicy(struct seg *seg, caddr_t addr)
 776 {
 777         struct anon_map         *amp;
 778         ulong_t                 anon_index;
 779         lgrp_mem_policy_info_t  *policy_info;
 780         struct spt_data         *spt_data;
 781 
 782         ASSERT(seg != NULL);
 783 
 784         /*
 785          * Get anon_map from segspt
 786          *
 787          * Assume that no lock needs to be held on anon_map, since
 788          * it should be protected by its reference count which must be
 789          * nonzero for an existing segment
 790          * Need to grab readers lock on policy tree though
 791          */
 792         spt_data = (struct spt_data *)seg->s_data;
 793         if (spt_data == NULL)
 794                 return (NULL);
 795         amp = spt_data->spt_amp;
 796         ASSERT(amp->refcnt != 0);
 797 
 798         /*
 799          * Get policy info
 800          *
 801          * Assume starting anon index of 0
 802          */
 803         anon_index = seg_page(seg, addr);
 804         policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
 805 
 806         return (policy_info);
 807 }
 808 
 809 /*
 810  * DISM only.
 811  * Return locked pages over a given range.
 812  *
 813  * We will cache all DISM locked pages and save the pplist for the
 814  * entire segment in the ppa field of the underlying DISM segment structure.
 815  * Later, during a call to segspt_reclaim() we will use this ppa array
 816  * to page_unlock() all of the pages and then we will free this ppa list.
 817  */
 818 /*ARGSUSED*/
 819 static int
 820 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
 821     struct page ***ppp, enum lock_type type, enum seg_rw rw)
 822 {
 823         struct  shm_data *shmd = (struct shm_data *)seg->s_data;
 824         struct  seg     *sptseg = shmd->shm_sptseg;
 825         struct  spt_data *sptd = sptseg->s_data;
 826         pgcnt_t pg_idx, npages, tot_npages, npgs;
 827         struct  page **pplist, **pl, **ppa, *pp;
 828         struct  anon_map *amp;
 829         spgcnt_t        an_idx;
 830         int     ret = ENOTSUP;
 831         uint_t  pl_built = 0;
 832         struct  anon *ap;
 833         struct  vnode *vp;
 834         u_offset_t off;
 835         pgcnt_t claim_availrmem = 0;
 836         uint_t  szc;
 837 
 838         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
 839         ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
 840 
 841         /*
 842          * We want to lock/unlock the entire ISM segment. Therefore,
 843          * we will be using the underlying sptseg and it's base address
 844          * and length for the caching arguments.
 845          */
 846         ASSERT(sptseg);
 847         ASSERT(sptd);
 848 
 849         pg_idx = seg_page(seg, addr);
 850         npages = btopr(len);
 851 
 852         /*
 853          * check if the request is larger than number of pages covered
 854          * by amp
 855          */
 856         if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
 857                 *ppp = NULL;
 858                 return (ENOTSUP);
 859         }
 860 
 861         if (type == L_PAGEUNLOCK) {
 862                 ASSERT(sptd->spt_ppa != NULL);
 863 
 864                 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
 865                     sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 866 
 867                 /*
 868                  * If someone is blocked while unmapping, we purge
 869                  * segment page cache and thus reclaim pplist synchronously
 870                  * without waiting for seg_pasync_thread. This speeds up
 871                  * unmapping in cases where munmap(2) is called, while
 872                  * raw async i/o is still in progress or where a thread
 873                  * exits on data fault in a multithreaded application.
 874                  */
 875                 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
 876                     (AS_ISUNMAPWAIT(seg->s_as) &&
 877                     shmd->shm_softlockcnt > 0)) {
 878                         segspt_purge(seg);
 879                 }
 880                 return (0);
 881         }
 882 
 883         /* The L_PAGELOCK case ... */
 884 
 885         if (sptd->spt_flags & DISM_PPA_CHANGED) {
 886                 segspt_purge(seg);
 887                 /*
 888                  * for DISM ppa needs to be rebuild since
 889                  * number of locked pages could be changed
 890                  */
 891                 *ppp = NULL;
 892                 return (ENOTSUP);
 893         }
 894 
 895         /*
 896          * First try to find pages in segment page cache, without
 897          * holding the segment lock.
 898          */
 899         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
 900             S_WRITE, SEGP_FORCE_WIRED);
 901         if (pplist != NULL) {
 902                 ASSERT(sptd->spt_ppa != NULL);
 903                 ASSERT(sptd->spt_ppa == pplist);
 904                 ppa = sptd->spt_ppa;
 905                 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
 906                         if (ppa[an_idx] == NULL) {
 907                                 seg_pinactive(seg, NULL, seg->s_base,
 908                                     sptd->spt_amp->size, ppa,
 909                                     S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 910                                 *ppp = NULL;
 911                                 return (ENOTSUP);
 912                         }
 913                         if ((szc = ppa[an_idx]->p_szc) != 0) {
 914                                 npgs = page_get_pagecnt(szc);
 915                                 an_idx = P2ROUNDUP(an_idx + 1, npgs);
 916                         } else {
 917                                 an_idx++;
 918                         }
 919                 }
 920                 /*
 921                  * Since we cache the entire DISM segment, we want to
 922                  * set ppp to point to the first slot that corresponds
 923                  * to the requested addr, i.e. pg_idx.
 924                  */
 925                 *ppp = &(sptd->spt_ppa[pg_idx]);
 926                 return (0);
 927         }
 928 
 929         mutex_enter(&sptd->spt_lock);
 930         /*
 931          * try to find pages in segment page cache with mutex
 932          */
 933         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
 934             S_WRITE, SEGP_FORCE_WIRED);
 935         if (pplist != NULL) {
 936                 ASSERT(sptd->spt_ppa != NULL);
 937                 ASSERT(sptd->spt_ppa == pplist);
 938                 ppa = sptd->spt_ppa;
 939                 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
 940                         if (ppa[an_idx] == NULL) {
 941                                 mutex_exit(&sptd->spt_lock);
 942                                 seg_pinactive(seg, NULL, seg->s_base,
 943                                     sptd->spt_amp->size, ppa,
 944                                     S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
 945                                 *ppp = NULL;
 946                                 return (ENOTSUP);
 947                         }
 948                         if ((szc = ppa[an_idx]->p_szc) != 0) {
 949                                 npgs = page_get_pagecnt(szc);
 950                                 an_idx = P2ROUNDUP(an_idx + 1, npgs);
 951                         } else {
 952                                 an_idx++;
 953                         }
 954                 }
 955                 /*
 956                  * Since we cache the entire DISM segment, we want to
 957                  * set ppp to point to the first slot that corresponds
 958                  * to the requested addr, i.e. pg_idx.
 959                  */
 960                 mutex_exit(&sptd->spt_lock);
 961                 *ppp = &(sptd->spt_ppa[pg_idx]);
 962                 return (0);
 963         }
 964         if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
 965             SEGP_FORCE_WIRED) == SEGP_FAIL) {
 966                 mutex_exit(&sptd->spt_lock);
 967                 *ppp = NULL;
 968                 return (ENOTSUP);
 969         }
 970 
 971         /*
 972          * No need to worry about protections because DISM pages are always rw.
 973          */
 974         pl = pplist = NULL;
 975         amp = sptd->spt_amp;
 976 
 977         /*
 978          * Do we need to build the ppa array?
 979          */
 980         if (sptd->spt_ppa == NULL) {
 981                 pgcnt_t lpg_cnt = 0;
 982 
 983                 pl_built = 1;
 984                 tot_npages = btopr(sptd->spt_amp->size);
 985 
 986                 ASSERT(sptd->spt_pcachecnt == 0);
 987                 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
 988                 pl = pplist;
 989 
 990                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
 991                 for (an_idx = 0; an_idx < tot_npages; ) {
 992                         ap = anon_get_ptr(amp->ahp, an_idx);
 993                         /*
 994                          * Cache only mlocked pages. For large pages
 995                          * if one (constituent) page is mlocked
 996                          * all pages for that large page
 997                          * are cached also. This is for quick
 998                          * lookups of ppa array;
 999                          */
1000                         if ((ap != NULL) && (lpg_cnt != 0 ||
1001                             (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
1002 
1003                                 swap_xlate(ap, &vp, &off);
1004                                 pp = page_lookup(vp, off, SE_SHARED);
1005                                 ASSERT(pp != NULL);
1006                                 if (lpg_cnt == 0) {
1007                                         lpg_cnt++;
1008                                         /*
1009                                          * For a small page, we are done --
1010                                          * lpg_count is reset to 0 below.
1011                                          *
1012                                          * For a large page, we are guaranteed
1013                                          * to find the anon structures of all
1014                                          * constituent pages and a non-zero
1015                                          * lpg_cnt ensures that we don't test
1016                                          * for mlock for these. We are done
1017                                          * when lpg_count reaches (npgs + 1).
1018                                          * If we are not the first constituent
1019                                          * page, restart at the first one.
1020                                          */
1021                                         npgs = page_get_pagecnt(pp->p_szc);
1022                                         if (!IS_P2ALIGNED(an_idx, npgs)) {
1023                                                 an_idx = P2ALIGN(an_idx, npgs);
1024                                                 page_unlock(pp);
1025                                                 continue;
1026                                         }
1027                                 }
1028                                 if (++lpg_cnt > npgs)
1029                                         lpg_cnt = 0;
1030 
1031                                 /*
1032                                  * availrmem is decremented only
1033                                  * for unlocked pages
1034                                  */
1035                                 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1036                                         claim_availrmem++;
1037                                 pplist[an_idx] = pp;
1038                         }
1039                         an_idx++;
1040                 }
1041                 ANON_LOCK_EXIT(&amp->a_rwlock);
1042 
1043                 if (claim_availrmem) {
1044                         mutex_enter(&freemem_lock);
1045                         if (availrmem < tune.t_minarmem + claim_availrmem) {
1046                                 mutex_exit(&freemem_lock);
1047                                 ret = ENOTSUP;
1048                                 claim_availrmem = 0;
1049                                 goto insert_fail;
1050                         } else {
1051                                 availrmem -= claim_availrmem;
1052                         }
1053                         mutex_exit(&freemem_lock);
1054                 }
1055 
1056                 sptd->spt_ppa = pl;
1057         } else {
1058                 /*
1059                  * We already have a valid ppa[].
1060                  */
1061                 pl = sptd->spt_ppa;
1062         }
1063 
1064         ASSERT(pl != NULL);
1065 
1066         ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1067             sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1068             segspt_reclaim);
1069         if (ret == SEGP_FAIL) {
1070                 /*
1071                  * seg_pinsert failed. We return
1072                  * ENOTSUP, so that the as_pagelock() code will
1073                  * then try the slower F_SOFTLOCK path.
1074                  */
1075                 if (pl_built) {
1076                         /*
1077                          * No one else has referenced the ppa[].
1078                          * We created it and we need to destroy it.
1079                          */
1080                         sptd->spt_ppa = NULL;
1081                 }
1082                 ret = ENOTSUP;
1083                 goto insert_fail;
1084         }
1085 
1086         /*
1087          * In either case, we increment softlockcnt on the 'real' segment.
1088          */
1089         sptd->spt_pcachecnt++;
1090         atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1091 
1092         ppa = sptd->spt_ppa;
1093         for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1094                 if (ppa[an_idx] == NULL) {
1095                         mutex_exit(&sptd->spt_lock);
1096                         seg_pinactive(seg, NULL, seg->s_base,
1097                             sptd->spt_amp->size,
1098                             pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1099                         *ppp = NULL;
1100                         return (ENOTSUP);
1101                 }
1102                 if ((szc = ppa[an_idx]->p_szc) != 0) {
1103                         npgs = page_get_pagecnt(szc);
1104                         an_idx = P2ROUNDUP(an_idx + 1, npgs);
1105                 } else {
1106                         an_idx++;
1107                 }
1108         }
1109         /*
1110          * We can now drop the sptd->spt_lock since the ppa[]
1111          * exists and he have incremented pacachecnt.
1112          */
1113         mutex_exit(&sptd->spt_lock);
1114 
1115         /*
1116          * Since we cache the entire segment, we want to
1117          * set ppp to point to the first slot that corresponds
1118          * to the requested addr, i.e. pg_idx.
1119          */
1120         *ppp = &(sptd->spt_ppa[pg_idx]);
1121         return (0);
1122 
1123 insert_fail:
1124         /*
1125          * We will only reach this code if we tried and failed.
1126          *
1127          * And we can drop the lock on the dummy seg, once we've failed
1128          * to set up a new ppa[].
1129          */
1130         mutex_exit(&sptd->spt_lock);
1131 
1132         if (pl_built) {
1133                 if (claim_availrmem) {
1134                         mutex_enter(&freemem_lock);
1135                         availrmem += claim_availrmem;
1136                         mutex_exit(&freemem_lock);
1137                 }
1138 
1139                 /*
1140                  * We created pl and we need to destroy it.
1141                  */
1142                 pplist = pl;
1143                 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1144                         if (pplist[an_idx] != NULL)
1145                                 page_unlock(pplist[an_idx]);
1146                 }
1147                 kmem_free(pl, sizeof (page_t *) * tot_npages);
1148         }
1149 
1150         if (shmd->shm_softlockcnt <= 0) {
1151                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1152                         mutex_enter(&seg->s_as->a_contents);
1153                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1154                                 AS_CLRUNMAPWAIT(seg->s_as);
1155                                 cv_broadcast(&seg->s_as->a_cv);
1156                         }
1157                         mutex_exit(&seg->s_as->a_contents);
1158                 }
1159         }
1160         *ppp = NULL;
1161         return (ret);
1162 }
1163 
1164 
1165 
1166 /*
1167  * return locked pages over a given range.
1168  *
1169  * We will cache the entire ISM segment and save the pplist for the
1170  * entire segment in the ppa field of the underlying ISM segment structure.
1171  * Later, during a call to segspt_reclaim() we will use this ppa array
1172  * to page_unlock() all of the pages and then we will free this ppa list.
1173  */
1174 /*ARGSUSED*/
1175 static int
1176 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1177     struct page ***ppp, enum lock_type type, enum seg_rw rw)
1178 {
1179         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1180         struct seg      *sptseg = shmd->shm_sptseg;
1181         struct spt_data *sptd = sptseg->s_data;
1182         pgcnt_t np, page_index, npages;
1183         caddr_t a, spt_base;
1184         struct page **pplist, **pl, *pp;
1185         struct anon_map *amp;
1186         ulong_t anon_index;
1187         int ret = ENOTSUP;
1188         uint_t  pl_built = 0;
1189         struct anon *ap;
1190         struct vnode *vp;
1191         u_offset_t off;
1192 
1193         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1194         ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1195 
1196 
1197         /*
1198          * We want to lock/unlock the entire ISM segment. Therefore,
1199          * we will be using the underlying sptseg and it's base address
1200          * and length for the caching arguments.
1201          */
1202         ASSERT(sptseg);
1203         ASSERT(sptd);
1204 
1205         if (sptd->spt_flags & SHM_PAGEABLE) {
1206                 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1207         }
1208 
1209         page_index = seg_page(seg, addr);
1210         npages = btopr(len);
1211 
1212         /*
1213          * check if the request is larger than number of pages covered
1214          * by amp
1215          */
1216         if (page_index + npages > btopr(sptd->spt_amp->size)) {
1217                 *ppp = NULL;
1218                 return (ENOTSUP);
1219         }
1220 
1221         if (type == L_PAGEUNLOCK) {
1222 
1223                 ASSERT(sptd->spt_ppa != NULL);
1224 
1225                 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1226                     sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1227 
1228                 /*
1229                  * If someone is blocked while unmapping, we purge
1230                  * segment page cache and thus reclaim pplist synchronously
1231                  * without waiting for seg_pasync_thread. This speeds up
1232                  * unmapping in cases where munmap(2) is called, while
1233                  * raw async i/o is still in progress or where a thread
1234                  * exits on data fault in a multithreaded application.
1235                  */
1236                 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1237                         segspt_purge(seg);
1238                 }
1239                 return (0);
1240         }
1241 
1242         /* The L_PAGELOCK case... */
1243 
1244         /*
1245          * First try to find pages in segment page cache, without
1246          * holding the segment lock.
1247          */
1248         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1249             S_WRITE, SEGP_FORCE_WIRED);
1250         if (pplist != NULL) {
1251                 ASSERT(sptd->spt_ppa == pplist);
1252                 ASSERT(sptd->spt_ppa[page_index]);
1253                 /*
1254                  * Since we cache the entire ISM segment, we want to
1255                  * set ppp to point to the first slot that corresponds
1256                  * to the requested addr, i.e. page_index.
1257                  */
1258                 *ppp = &(sptd->spt_ppa[page_index]);
1259                 return (0);
1260         }
1261 
1262         mutex_enter(&sptd->spt_lock);
1263 
1264         /*
1265          * try to find pages in segment page cache
1266          */
1267         pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1268             S_WRITE, SEGP_FORCE_WIRED);
1269         if (pplist != NULL) {
1270                 ASSERT(sptd->spt_ppa == pplist);
1271                 /*
1272                  * Since we cache the entire segment, we want to
1273                  * set ppp to point to the first slot that corresponds
1274                  * to the requested addr, i.e. page_index.
1275                  */
1276                 mutex_exit(&sptd->spt_lock);
1277                 *ppp = &(sptd->spt_ppa[page_index]);
1278                 return (0);
1279         }
1280 
1281         if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1282             SEGP_FORCE_WIRED) == SEGP_FAIL) {
1283                 mutex_exit(&sptd->spt_lock);
1284                 *ppp = NULL;
1285                 return (ENOTSUP);
1286         }
1287 
1288         /*
1289          * No need to worry about protections because ISM pages
1290          * are always rw.
1291          */
1292         pl = pplist = NULL;
1293 
1294         /*
1295          * Do we need to build the ppa array?
1296          */
1297         if (sptd->spt_ppa == NULL) {
1298                 ASSERT(sptd->spt_ppa == pplist);
1299 
1300                 spt_base = sptseg->s_base;
1301                 pl_built = 1;
1302 
1303                 /*
1304                  * availrmem is decremented once during anon_swap_adjust()
1305                  * and is incremented during the anon_unresv(), which is
1306                  * called from shm_rm_amp() when the segment is destroyed.
1307                  */
1308                 amp = sptd->spt_amp;
1309                 ASSERT(amp != NULL);
1310 
1311                 /* pcachecnt is protected by sptd->spt_lock */
1312                 ASSERT(sptd->spt_pcachecnt == 0);
1313                 pplist = kmem_zalloc(sizeof (page_t *)
1314                     * btopr(sptd->spt_amp->size), KM_SLEEP);
1315                 pl = pplist;
1316 
1317                 anon_index = seg_page(sptseg, spt_base);
1318 
1319                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
1320                 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1321                     a += PAGESIZE, anon_index++, pplist++) {
1322                         ap = anon_get_ptr(amp->ahp, anon_index);
1323                         ASSERT(ap != NULL);
1324                         swap_xlate(ap, &vp, &off);
1325                         pp = page_lookup(vp, off, SE_SHARED);
1326                         ASSERT(pp != NULL);
1327                         *pplist = pp;
1328                 }
1329                 ANON_LOCK_EXIT(&amp->a_rwlock);
1330 
1331                 if (a < (spt_base + sptd->spt_amp->size)) {
1332                         ret = ENOTSUP;
1333                         goto insert_fail;
1334                 }
1335                 sptd->spt_ppa = pl;
1336         } else {
1337                 /*
1338                  * We already have a valid ppa[].
1339                  */
1340                 pl = sptd->spt_ppa;
1341         }
1342 
1343         ASSERT(pl != NULL);
1344 
1345         ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1346             sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1347             segspt_reclaim);
1348         if (ret == SEGP_FAIL) {
1349                 /*
1350                  * seg_pinsert failed. We return
1351                  * ENOTSUP, so that the as_pagelock() code will
1352                  * then try the slower F_SOFTLOCK path.
1353                  */
1354                 if (pl_built) {
1355                         /*
1356                          * No one else has referenced the ppa[].
1357                          * We created it and we need to destroy it.
1358                          */
1359                         sptd->spt_ppa = NULL;
1360                 }
1361                 ret = ENOTSUP;
1362                 goto insert_fail;
1363         }
1364 
1365         /*
1366          * In either case, we increment softlockcnt on the 'real' segment.
1367          */
1368         sptd->spt_pcachecnt++;
1369         atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1370 
1371         /*
1372          * We can now drop the sptd->spt_lock since the ppa[]
1373          * exists and he have incremented pacachecnt.
1374          */
1375         mutex_exit(&sptd->spt_lock);
1376 
1377         /*
1378          * Since we cache the entire segment, we want to
1379          * set ppp to point to the first slot that corresponds
1380          * to the requested addr, i.e. page_index.
1381          */
1382         *ppp = &(sptd->spt_ppa[page_index]);
1383         return (0);
1384 
1385 insert_fail:
1386         /*
1387          * We will only reach this code if we tried and failed.
1388          *
1389          * And we can drop the lock on the dummy seg, once we've failed
1390          * to set up a new ppa[].
1391          */
1392         mutex_exit(&sptd->spt_lock);
1393 
1394         if (pl_built) {
1395                 /*
1396                  * We created pl and we need to destroy it.
1397                  */
1398                 pplist = pl;
1399                 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1400                 while (np) {
1401                         page_unlock(*pplist);
1402                         np--;
1403                         pplist++;
1404                 }
1405                 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1406         }
1407         if (shmd->shm_softlockcnt <= 0) {
1408                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1409                         mutex_enter(&seg->s_as->a_contents);
1410                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1411                                 AS_CLRUNMAPWAIT(seg->s_as);
1412                                 cv_broadcast(&seg->s_as->a_cv);
1413                         }
1414                         mutex_exit(&seg->s_as->a_contents);
1415                 }
1416         }
1417         *ppp = NULL;
1418         return (ret);
1419 }
1420 
1421 /*
1422  * purge any cached pages in the I/O page cache
1423  */
1424 static void
1425 segspt_purge(struct seg *seg)
1426 {
1427         seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1428 }
1429 
1430 static int
1431 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1432         enum seg_rw rw, int async)
1433 {
1434         struct seg *seg = (struct seg *)ptag;
1435         struct  shm_data *shmd = (struct shm_data *)seg->s_data;
1436         struct  seg     *sptseg;
1437         struct  spt_data *sptd;
1438         pgcnt_t npages, i, free_availrmem = 0;
1439         int     done = 0;
1440 
1441 #ifdef lint
1442         addr = addr;
1443 #endif
1444         sptseg = shmd->shm_sptseg;
1445         sptd = sptseg->s_data;
1446         npages = (len >> PAGESHIFT);
1447         ASSERT(npages);
1448         ASSERT(sptd->spt_pcachecnt != 0);
1449         ASSERT(sptd->spt_ppa == pplist);
1450         ASSERT(npages == btopr(sptd->spt_amp->size));
1451         ASSERT(async || AS_LOCK_HELD(seg->s_as));
1452 
1453         /*
1454          * Acquire the lock on the dummy seg and destroy the
1455          * ppa array IF this is the last pcachecnt.
1456          */
1457         mutex_enter(&sptd->spt_lock);
1458         if (--sptd->spt_pcachecnt == 0) {
1459                 for (i = 0; i < npages; i++) {
1460                         if (pplist[i] == NULL) {
1461                                 continue;
1462                         }
1463                         if (rw == S_WRITE) {
1464                                 hat_setrefmod(pplist[i]);
1465                         } else {
1466                                 hat_setref(pplist[i]);
1467                         }
1468                         if ((sptd->spt_flags & SHM_PAGEABLE) &&
1469                             (sptd->spt_ppa_lckcnt[i] == 0))
1470                                 free_availrmem++;
1471                         page_unlock(pplist[i]);
1472                 }
1473                 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1474                         mutex_enter(&freemem_lock);
1475                         availrmem += free_availrmem;
1476                         mutex_exit(&freemem_lock);
1477                 }
1478                 /*
1479                  * Since we want to cach/uncache the entire ISM segment,
1480                  * we will track the pplist in a segspt specific field
1481                  * ppa, that is initialized at the time we add an entry to
1482                  * the cache.
1483                  */
1484                 ASSERT(sptd->spt_pcachecnt == 0);
1485                 kmem_free(pplist, sizeof (page_t *) * npages);
1486                 sptd->spt_ppa = NULL;
1487                 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1488                 sptd->spt_gen++;
1489                 cv_broadcast(&sptd->spt_cv);
1490                 done = 1;
1491         }
1492         mutex_exit(&sptd->spt_lock);
1493 
1494         /*
1495          * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1496          * may not hold AS lock (in this case async argument is not 0). This
1497          * means if softlockcnt drops to 0 after the decrement below address
1498          * space may get freed. We can't allow it since after softlock
1499          * derement to 0 we still need to access as structure for possible
1500          * wakeup of unmap waiters. To prevent the disappearance of as we take
1501          * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1502          * this mutex as a barrier to make sure this routine completes before
1503          * segment is freed.
1504          *
1505          * The second complication we have to deal with in async case is a
1506          * possibility of missed wake up of unmap wait thread. When we don't
1507          * hold as lock here we may take a_contents lock before unmap wait
1508          * thread that was first to see softlockcnt was still not 0. As a
1509          * result we'll fail to wake up an unmap wait thread. To avoid this
1510          * race we set nounmapwait flag in as structure if we drop softlockcnt
1511          * to 0 if async is not 0.  unmapwait thread
1512          * will not block if this flag is set.
1513          */
1514         if (async)
1515                 mutex_enter(&shmd->shm_segfree_syncmtx);
1516 
1517         /*
1518          * Now decrement softlockcnt.
1519          */
1520         ASSERT(shmd->shm_softlockcnt > 0);
1521         atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1522 
1523         if (shmd->shm_softlockcnt <= 0) {
1524                 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1525                         mutex_enter(&seg->s_as->a_contents);
1526                         if (async)
1527                                 AS_SETNOUNMAPWAIT(seg->s_as);
1528                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1529                                 AS_CLRUNMAPWAIT(seg->s_as);
1530                                 cv_broadcast(&seg->s_as->a_cv);
1531                         }
1532                         mutex_exit(&seg->s_as->a_contents);
1533                 }
1534         }
1535 
1536         if (async)
1537                 mutex_exit(&shmd->shm_segfree_syncmtx);
1538 
1539         return (done);
1540 }
1541 
1542 /*
1543  * Do a F_SOFTUNLOCK call over the range requested.
1544  * The range must have already been F_SOFTLOCK'ed.
1545  *
1546  * The calls to acquire and release the anon map lock mutex were
1547  * removed in order to avoid a deadly embrace during a DR
1548  * memory delete operation.  (Eg. DR blocks while waiting for a
1549  * exclusive lock on a page that is being used for kaio; the
1550  * thread that will complete the kaio and call segspt_softunlock
1551  * blocks on the anon map lock; another thread holding the anon
1552  * map lock blocks on another page lock via the segspt_shmfault
1553  * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1554  *
1555  * The appropriateness of the removal is based upon the following:
1556  * 1. If we are holding a segment's reader lock and the page is held
1557  * shared, then the corresponding element in anonmap which points to
1558  * anon struct cannot change and there is no need to acquire the
1559  * anonymous map lock.
1560  * 2. Threads in segspt_softunlock have a reader lock on the segment
1561  * and already have the shared page lock, so we are guaranteed that
1562  * the anon map slot cannot change and therefore can call anon_get_ptr()
1563  * without grabbing the anonymous map lock.
1564  * 3. Threads that softlock a shared page break copy-on-write, even if
1565  * its a read.  Thus cow faults can be ignored with respect to soft
1566  * unlocking, since the breaking of cow means that the anon slot(s) will
1567  * not be shared.
1568  */
1569 static void
1570 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1571         size_t len, enum seg_rw rw)
1572 {
1573         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1574         struct seg      *sptseg;
1575         struct spt_data *sptd;
1576         page_t *pp;
1577         caddr_t adr;
1578         struct vnode *vp;
1579         u_offset_t offset;
1580         ulong_t anon_index;
1581         struct anon_map *amp;           /* XXX - for locknest */
1582         struct anon *ap = NULL;
1583         pgcnt_t npages;
1584 
1585         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1586 
1587         sptseg = shmd->shm_sptseg;
1588         sptd = sptseg->s_data;
1589 
1590         /*
1591          * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1592          * and therefore their pages are SE_SHARED locked
1593          * for the entire life of the segment.
1594          */
1595         if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1596             ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1597                 goto softlock_decrement;
1598         }
1599 
1600         /*
1601          * Any thread is free to do a page_find and
1602          * page_unlock() on the pages within this seg.
1603          *
1604          * We are already holding the as->a_lock on the user's
1605          * real segment, but we need to hold the a_lock on the
1606          * underlying dummy as. This is mostly to satisfy the
1607          * underlying HAT layer.
1608          */
1609         AS_LOCK_ENTER(sptseg->s_as, RW_READER);
1610         hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1611         AS_LOCK_EXIT(sptseg->s_as);
1612 
1613         amp = sptd->spt_amp;
1614         ASSERT(amp != NULL);
1615         anon_index = seg_page(sptseg, sptseg_addr);
1616 
1617         for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1618                 ap = anon_get_ptr(amp->ahp, anon_index++);
1619                 ASSERT(ap != NULL);
1620                 swap_xlate(ap, &vp, &offset);
1621 
1622                 /*
1623                  * Use page_find() instead of page_lookup() to
1624                  * find the page since we know that it has a
1625                  * "shared" lock.
1626                  */
1627                 pp = page_find(vp, offset);
1628                 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1629                 if (pp == NULL) {
1630                         panic("segspt_softunlock: "
1631                             "addr %p, ap %p, vp %p, off %llx",
1632                             (void *)adr, (void *)ap, (void *)vp, offset);
1633                         /*NOTREACHED*/
1634                 }
1635 
1636                 if (rw == S_WRITE) {
1637                         hat_setrefmod(pp);
1638                 } else if (rw != S_OTHER) {
1639                         hat_setref(pp);
1640                 }
1641                 page_unlock(pp);
1642         }
1643 
1644 softlock_decrement:
1645         npages = btopr(len);
1646         ASSERT(shmd->shm_softlockcnt >= npages);
1647         atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1648         if (shmd->shm_softlockcnt == 0) {
1649                 /*
1650                  * All SOFTLOCKS are gone. Wakeup any waiting
1651                  * unmappers so they can try again to unmap.
1652                  * Check for waiters first without the mutex
1653                  * held so we don't always grab the mutex on
1654                  * softunlocks.
1655                  */
1656                 if (AS_ISUNMAPWAIT(seg->s_as)) {
1657                         mutex_enter(&seg->s_as->a_contents);
1658                         if (AS_ISUNMAPWAIT(seg->s_as)) {
1659                                 AS_CLRUNMAPWAIT(seg->s_as);
1660                                 cv_broadcast(&seg->s_as->a_cv);
1661                         }
1662                         mutex_exit(&seg->s_as->a_contents);
1663                 }
1664         }
1665 }
1666 
1667 int
1668 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1669 {
1670         struct shm_data *shmd_arg = (struct shm_data *)argsp;
1671         struct shm_data *shmd;
1672         struct anon_map *shm_amp = shmd_arg->shm_amp;
1673         struct spt_data *sptd;
1674         int error = 0;
1675 
1676         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1677 
1678         shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1679         if (shmd == NULL)
1680                 return (ENOMEM);
1681 
1682         shmd->shm_sptas = shmd_arg->shm_sptas;
1683         shmd->shm_amp = shm_amp;
1684         shmd->shm_sptseg = shmd_arg->shm_sptseg;
1685 
1686         (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1687             NULL, 0, seg->s_size);
1688 
1689         mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1690 
1691         seg->s_data = (void *)shmd;
1692         seg->s_ops = &segspt_shmops;
1693         seg->s_szc = shmd->shm_sptseg->s_szc;
1694         sptd = shmd->shm_sptseg->s_data;
1695 
1696         if (sptd->spt_flags & SHM_PAGEABLE) {
1697                 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1698                     KM_NOSLEEP)) == NULL) {
1699                         seg->s_data = (void *)NULL;
1700                         kmem_free(shmd, (sizeof (*shmd)));
1701                         return (ENOMEM);
1702                 }
1703                 shmd->shm_lckpgs = 0;
1704                 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1705                         if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1706                             shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1707                             seg->s_size, seg->s_szc)) != 0) {
1708                                 kmem_free(shmd->shm_vpage,
1709                                     btopr(shm_amp->size));
1710                         }
1711                 }
1712         } else {
1713                 error = hat_share(seg->s_as->a_hat, seg->s_base,
1714                     shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1715                     seg->s_size, seg->s_szc);
1716         }
1717         if (error) {
1718                 seg->s_szc = 0;
1719                 seg->s_data = (void *)NULL;
1720                 kmem_free(shmd, (sizeof (*shmd)));
1721         } else {
1722                 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1723                 shm_amp->refcnt++;
1724                 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1725         }
1726         return (error);
1727 }
1728 
1729 int
1730 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1731 {
1732         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1733         int reclaim = 1;
1734 
1735         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1736 retry:
1737         if (shmd->shm_softlockcnt > 0) {
1738                 if (reclaim == 1) {
1739                         segspt_purge(seg);
1740                         reclaim = 0;
1741                         goto retry;
1742                 }
1743                 return (EAGAIN);
1744         }
1745 
1746         if (ssize != seg->s_size) {
1747 #ifdef DEBUG
1748                 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1749                     ssize, seg->s_size);
1750 #endif
1751                 return (EINVAL);
1752         }
1753 
1754         (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1755             NULL, 0);
1756         hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1757 
1758         seg_free(seg);
1759 
1760         return (0);
1761 }
1762 
1763 void
1764 segspt_shmfree(struct seg *seg)
1765 {
1766         struct shm_data *shmd = (struct shm_data *)seg->s_data;
1767         struct anon_map *shm_amp = shmd->shm_amp;
1768 
1769         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
1770 
1771         (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1772             MC_UNLOCK, NULL, 0);
1773 
1774         /*
1775          * Need to increment refcnt when attaching
1776          * and decrement when detaching because of dup().
1777          */
1778         ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1779         shm_amp->refcnt--;
1780         ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1781 
1782         if (shmd->shm_vpage) {       /* only for DISM */
1783                 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1784                 shmd->shm_vpage = NULL;
1785         }
1786 
1787         /*
1788          * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1789          * still working with this segment without holding as lock.
1790          */
1791         ASSERT(shmd->shm_softlockcnt == 0);
1792         mutex_enter(&shmd->shm_segfree_syncmtx);
1793         mutex_destroy(&shmd->shm_segfree_syncmtx);
1794 
1795         kmem_free(shmd, sizeof (*shmd));
1796 }
1797 
1798 /*ARGSUSED*/
1799 int
1800 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1801 {
1802         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1803 
1804         /*
1805          * Shared page table is more than shared mapping.
1806          *  Individual process sharing page tables can't change prot
1807          *  because there is only one set of page tables.
1808          *  This will be allowed after private page table is
1809          *  supported.
1810          */
1811 /* need to return correct status error? */
1812         return (0);
1813 }
1814 
1815 
1816 faultcode_t
1817 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1818     size_t len, enum fault_type type, enum seg_rw rw)
1819 {
1820         struct  shm_data        *shmd = (struct shm_data *)seg->s_data;
1821         struct  seg             *sptseg = shmd->shm_sptseg;
1822         struct  as              *curspt = shmd->shm_sptas;
1823         struct  spt_data        *sptd = sptseg->s_data;
1824         pgcnt_t npages;
1825         size_t  size;
1826         caddr_t segspt_addr, shm_addr;
1827         page_t  **ppa;
1828         int     i;
1829         ulong_t an_idx = 0;
1830         int     err = 0;
1831         int     dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1832         size_t  pgsz;
1833         pgcnt_t pgcnt;
1834         caddr_t a;
1835         pgcnt_t pidx;
1836 
1837 #ifdef lint
1838         hat = hat;
1839 #endif
1840         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
1841 
1842         /*
1843          * Because of the way spt is implemented
1844          * the realsize of the segment does not have to be
1845          * equal to the segment size itself. The segment size is
1846          * often in multiples of a page size larger than PAGESIZE.
1847          * The realsize is rounded up to the nearest PAGESIZE
1848          * based on what the user requested. This is a bit of
1849          * ungliness that is historical but not easily fixed
1850          * without re-designing the higher levels of ISM.
1851          */
1852         ASSERT(addr >= seg->s_base);
1853         if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1854                 return (FC_NOMAP);
1855         /*
1856          * For all of the following cases except F_PROT, we need to
1857          * make any necessary adjustments to addr and len
1858          * and get all of the necessary page_t's into an array called ppa[].
1859          *
1860          * The code in shmat() forces base addr and len of ISM segment
1861          * to be aligned to largest page size supported. Therefore,
1862          * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1863          * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1864          * in large pagesize chunks, or else we will screw up the HAT
1865          * layer by calling hat_memload_array() with differing page sizes
1866          * over a given virtual range.
1867          */
1868         pgsz = page_get_pagesize(sptseg->s_szc);
1869         pgcnt = page_get_pagecnt(sptseg->s_szc);
1870         shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1871         size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1872         npages = btopr(size);
1873 
1874         /*
1875          * Now we need to convert from addr in segshm to addr in segspt.
1876          */
1877         an_idx = seg_page(seg, shm_addr);
1878         segspt_addr = sptseg->s_base + ptob(an_idx);
1879 
1880         ASSERT((segspt_addr + ptob(npages)) <=
1881             (sptseg->s_base + sptd->spt_realsize));
1882         ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1883 
1884         switch (type) {
1885 
1886         case F_SOFTLOCK:
1887 
1888                 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1889                 /*
1890                  * Fall through to the F_INVAL case to load up the hat layer
1891                  * entries with the HAT_LOAD_LOCK flag.
1892                  */
1893                 /* FALLTHRU */
1894         case F_INVAL:
1895 
1896                 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1897                         return (FC_NOMAP);
1898 
1899                 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1900 
1901                 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1902                 if (err != 0) {
1903                         if (type == F_SOFTLOCK) {
1904                                 atomic_add_long((ulong_t *)(
1905                                     &(shmd->shm_softlockcnt)), -npages);
1906                         }
1907                         goto dism_err;
1908                 }
1909                 AS_LOCK_ENTER(sptseg->s_as, RW_READER);
1910                 a = segspt_addr;
1911                 pidx = 0;
1912                 if (type == F_SOFTLOCK) {
1913 
1914                         /*
1915                          * Load up the translation keeping it
1916                          * locked and don't unlock the page.
1917                          */
1918                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1919                                 hat_memload_array(sptseg->s_as->a_hat,
1920                                     a, pgsz, &ppa[pidx], sptd->spt_prot,
1921                                     HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1922                         }
1923                 } else {
1924                         /*
1925                          * Migrate pages marked for migration
1926                          */
1927                         if (lgrp_optimizations())
1928                                 page_migrate(seg, shm_addr, ppa, npages);
1929 
1930                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1931                                 hat_memload_array(sptseg->s_as->a_hat,
1932                                     a, pgsz, &ppa[pidx],
1933                                     sptd->spt_prot,
1934                                     HAT_LOAD_SHARE);
1935                         }
1936 
1937                         /*
1938                          * And now drop the SE_SHARED lock(s).
1939                          */
1940                         if (dyn_ism_unmap) {
1941                                 for (i = 0; i < npages; i++) {
1942                                         page_unlock(ppa[i]);
1943                                 }
1944                         }
1945                 }
1946 
1947                 if (!dyn_ism_unmap) {
1948                         if (hat_share(seg->s_as->a_hat, shm_addr,
1949                             curspt->a_hat, segspt_addr, ptob(npages),
1950                             seg->s_szc) != 0) {
1951                                 panic("hat_share err in DISM fault");
1952                                 /* NOTREACHED */
1953                         }
1954                         if (type == F_INVAL) {
1955                                 for (i = 0; i < npages; i++) {
1956                                         page_unlock(ppa[i]);
1957                                 }
1958                         }
1959                 }
1960                 AS_LOCK_EXIT(sptseg->s_as);
1961 dism_err:
1962                 kmem_free(ppa, npages * sizeof (page_t *));
1963                 return (err);
1964 
1965         case F_SOFTUNLOCK:
1966 
1967                 /*
1968                  * This is a bit ugly, we pass in the real seg pointer,
1969                  * but the segspt_addr is the virtual address within the
1970                  * dummy seg.
1971                  */
1972                 segspt_softunlock(seg, segspt_addr, size, rw);
1973                 return (0);
1974 
1975         case F_PROT:
1976 
1977                 /*
1978                  * This takes care of the unusual case where a user
1979                  * allocates a stack in shared memory and a register
1980                  * window overflow is written to that stack page before
1981                  * it is otherwise modified.
1982                  *
1983                  * We can get away with this because ISM segments are
1984                  * always rw. Other than this unusual case, there
1985                  * should be no instances of protection violations.
1986                  */
1987                 return (0);
1988 
1989         default:
1990 #ifdef DEBUG
1991                 panic("segspt_dismfault default type?");
1992 #else
1993                 return (FC_NOMAP);
1994 #endif
1995         }
1996 }
1997 
1998 
1999 faultcode_t
2000 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
2001     size_t len, enum fault_type type, enum seg_rw rw)
2002 {
2003         struct shm_data         *shmd = (struct shm_data *)seg->s_data;
2004         struct seg              *sptseg = shmd->shm_sptseg;
2005         struct as               *curspt = shmd->shm_sptas;
2006         struct spt_data         *sptd   = sptseg->s_data;
2007         pgcnt_t npages;
2008         size_t size;
2009         caddr_t sptseg_addr, shm_addr;
2010         page_t *pp, **ppa;
2011         int     i;
2012         u_offset_t offset;
2013         ulong_t anon_index = 0;
2014         struct vnode *vp;
2015         struct anon_map *amp;           /* XXX - for locknest */
2016         struct anon *ap = NULL;
2017         size_t          pgsz;
2018         pgcnt_t         pgcnt;
2019         caddr_t         a;
2020         pgcnt_t         pidx;
2021         size_t          sz;
2022 
2023 #ifdef lint
2024         hat = hat;
2025 #endif
2026 
2027         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2028 
2029         if (sptd->spt_flags & SHM_PAGEABLE) {
2030                 return (segspt_dismfault(hat, seg, addr, len, type, rw));
2031         }
2032 
2033         /*
2034          * Because of the way spt is implemented
2035          * the realsize of the segment does not have to be
2036          * equal to the segment size itself. The segment size is
2037          * often in multiples of a page size larger than PAGESIZE.
2038          * The realsize is rounded up to the nearest PAGESIZE
2039          * based on what the user requested. This is a bit of
2040          * ungliness that is historical but not easily fixed
2041          * without re-designing the higher levels of ISM.
2042          */
2043         ASSERT(addr >= seg->s_base);
2044         if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2045                 return (FC_NOMAP);
2046         /*
2047          * For all of the following cases except F_PROT, we need to
2048          * make any necessary adjustments to addr and len
2049          * and get all of the necessary page_t's into an array called ppa[].
2050          *
2051          * The code in shmat() forces base addr and len of ISM segment
2052          * to be aligned to largest page size supported. Therefore,
2053          * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2054          * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2055          * in large pagesize chunks, or else we will screw up the HAT
2056          * layer by calling hat_memload_array() with differing page sizes
2057          * over a given virtual range.
2058          */
2059         pgsz = page_get_pagesize(sptseg->s_szc);
2060         pgcnt = page_get_pagecnt(sptseg->s_szc);
2061         shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2062         size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2063         npages = btopr(size);
2064 
2065         /*
2066          * Now we need to convert from addr in segshm to addr in segspt.
2067          */
2068         anon_index = seg_page(seg, shm_addr);
2069         sptseg_addr = sptseg->s_base + ptob(anon_index);
2070 
2071         /*
2072          * And now we may have to adjust npages downward if we have
2073          * exceeded the realsize of the segment or initial anon
2074          * allocations.
2075          */
2076         if ((sptseg_addr + ptob(npages)) >
2077             (sptseg->s_base + sptd->spt_realsize))
2078                 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2079 
2080         npages = btopr(size);
2081 
2082         ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2083         ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2084 
2085         switch (type) {
2086 
2087         case F_SOFTLOCK:
2088 
2089                 /*
2090                  * availrmem is decremented once during anon_swap_adjust()
2091                  * and is incremented during the anon_unresv(), which is
2092                  * called from shm_rm_amp() when the segment is destroyed.
2093                  */
2094                 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2095                 /*
2096                  * Some platforms assume that ISM pages are SE_SHARED
2097                  * locked for the entire life of the segment.
2098                  */
2099                 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2100                         return (0);
2101                 /*
2102                  * Fall through to the F_INVAL case to load up the hat layer
2103                  * entries with the HAT_LOAD_LOCK flag.
2104                  */
2105 
2106                 /* FALLTHRU */
2107         case F_INVAL:
2108 
2109                 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2110                         return (FC_NOMAP);
2111 
2112                 /*
2113                  * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2114                  * may still rely on this call to hat_share(). That
2115                  * would imply that those hat's can fault on a
2116                  * HAT_LOAD_LOCK translation, which would seem
2117                  * contradictory.
2118                  */
2119                 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2120                         if (hat_share(seg->s_as->a_hat, seg->s_base,
2121                             curspt->a_hat, sptseg->s_base,
2122                             sptseg->s_size, sptseg->s_szc) != 0) {
2123                                 panic("hat_share error in ISM fault");
2124                                 /*NOTREACHED*/
2125                         }
2126                         return (0);
2127                 }
2128                 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2129 
2130                 /*
2131                  * I see no need to lock the real seg,
2132                  * here, because all of our work will be on the underlying
2133                  * dummy seg.
2134                  *
2135                  * sptseg_addr and npages now account for large pages.
2136                  */
2137                 amp = sptd->spt_amp;
2138                 ASSERT(amp != NULL);
2139                 anon_index = seg_page(sptseg, sptseg_addr);
2140 
2141                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2142                 for (i = 0; i < npages; i++) {
2143                         ap = anon_get_ptr(amp->ahp, anon_index++);
2144                         ASSERT(ap != NULL);
2145                         swap_xlate(ap, &vp, &offset);
2146                         pp = page_lookup(vp, offset, SE_SHARED);
2147                         ASSERT(pp != NULL);
2148                         ppa[i] = pp;
2149                 }
2150                 ANON_LOCK_EXIT(&amp->a_rwlock);
2151                 ASSERT(i == npages);
2152 
2153                 /*
2154                  * We are already holding the as->a_lock on the user's
2155                  * real segment, but we need to hold the a_lock on the
2156                  * underlying dummy as. This is mostly to satisfy the
2157                  * underlying HAT layer.
2158                  */
2159                 AS_LOCK_ENTER(sptseg->s_as, RW_READER);
2160                 a = sptseg_addr;
2161                 pidx = 0;
2162                 if (type == F_SOFTLOCK) {
2163                         /*
2164                          * Load up the translation keeping it
2165                          * locked and don't unlock the page.
2166                          */
2167                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2168                                 sz = MIN(pgsz, ptob(npages - pidx));
2169                                 hat_memload_array(sptseg->s_as->a_hat, a,
2170                                     sz, &ppa[pidx], sptd->spt_prot,
2171                                     HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2172                         }
2173                 } else {
2174                         /*
2175                          * Migrate pages marked for migration.
2176                          */
2177                         if (lgrp_optimizations())
2178                                 page_migrate(seg, shm_addr, ppa, npages);
2179 
2180                         for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2181                                 sz = MIN(pgsz, ptob(npages - pidx));
2182                                 hat_memload_array(sptseg->s_as->a_hat,
2183                                     a, sz, &ppa[pidx],
2184                                     sptd->spt_prot, HAT_LOAD_SHARE);
2185                         }
2186 
2187                         /*
2188                          * And now drop the SE_SHARED lock(s).
2189                          */
2190                         for (i = 0; i < npages; i++)
2191                                 page_unlock(ppa[i]);
2192                 }
2193                 AS_LOCK_EXIT(sptseg->s_as);
2194 
2195                 kmem_free(ppa, sizeof (page_t *) * npages);
2196                 return (0);
2197         case F_SOFTUNLOCK:
2198 
2199                 /*
2200                  * This is a bit ugly, we pass in the real seg pointer,
2201                  * but the sptseg_addr is the virtual address within the
2202                  * dummy seg.
2203                  */
2204                 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2205                 return (0);
2206 
2207         case F_PROT:
2208 
2209                 /*
2210                  * This takes care of the unusual case where a user
2211                  * allocates a stack in shared memory and a register
2212                  * window overflow is written to that stack page before
2213                  * it is otherwise modified.
2214                  *
2215                  * We can get away with this because ISM segments are
2216                  * always rw. Other than this unusual case, there
2217                  * should be no instances of protection violations.
2218                  */
2219                 return (0);
2220 
2221         default:
2222 #ifdef DEBUG
2223                 cmn_err(CE_WARN, "segspt_shmfault default type?");
2224 #endif
2225                 return (FC_NOMAP);
2226         }
2227 }
2228 
2229 /*ARGSUSED*/
2230 static faultcode_t
2231 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2232 {
2233         return (0);
2234 }
2235 
2236 /*ARGSUSED*/
2237 static int
2238 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2239 {
2240         return (0);
2241 }
2242 
2243 /*
2244  * duplicate the shared page tables
2245  */
2246 int
2247 segspt_shmdup(struct seg *seg, struct seg *newseg)
2248 {
2249         struct shm_data         *shmd = (struct shm_data *)seg->s_data;
2250         struct anon_map         *amp = shmd->shm_amp;
2251         struct shm_data         *shmd_new;
2252         struct seg              *spt_seg = shmd->shm_sptseg;
2253         struct spt_data         *sptd = spt_seg->s_data;
2254         int                     error = 0;
2255 
2256         ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as));
2257 
2258         shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2259         newseg->s_data = (void *)shmd_new;
2260         shmd_new->shm_sptas = shmd->shm_sptas;
2261         shmd_new->shm_amp = amp;
2262         shmd_new->shm_sptseg = shmd->shm_sptseg;
2263         newseg->s_ops = &segspt_shmops;
2264         newseg->s_szc = seg->s_szc;
2265         ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2266 
2267         ANON_LOCK_ENTER(&amp->a_rwlock, RW_WRITER);
2268         amp->refcnt++;
2269         ANON_LOCK_EXIT(&amp->a_rwlock);
2270 
2271         if (sptd->spt_flags & SHM_PAGEABLE) {
2272                 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2273                 shmd_new->shm_lckpgs = 0;
2274                 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2275                         if ((error = hat_share(newseg->s_as->a_hat,
2276                             newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2277                             seg->s_size, seg->s_szc)) != 0) {
2278                                 kmem_free(shmd_new->shm_vpage,
2279                                     btopr(amp->size));
2280                         }
2281                 }
2282                 return (error);
2283         } else {
2284                 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2285                     shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2286                     seg->s_szc));
2287 
2288         }
2289 }
2290 
2291 /*ARGSUSED*/
2292 int
2293 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2294 {
2295         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2296         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2297 
2298         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2299 
2300         /*
2301          * ISM segment is always rw.
2302          */
2303         return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2304 }
2305 
2306 /*
2307  * Return an array of locked large pages, for empty slots allocate
2308  * private zero-filled anon pages.
2309  */
2310 static int
2311 spt_anon_getpages(
2312         struct seg *sptseg,
2313         caddr_t sptaddr,
2314         size_t len,
2315         page_t *ppa[])
2316 {
2317         struct  spt_data *sptd = sptseg->s_data;
2318         struct  anon_map *amp = sptd->spt_amp;
2319         enum    seg_rw rw = sptd->spt_prot;
2320         uint_t  szc = sptseg->s_szc;
2321         size_t  pg_sz, share_sz = page_get_pagesize(szc);
2322         pgcnt_t lp_npgs;
2323         caddr_t lp_addr, e_sptaddr;
2324         uint_t  vpprot, ppa_szc = 0;
2325         struct  vpage *vpage = NULL;
2326         ulong_t j, ppa_idx;
2327         int     err, ierr = 0;
2328         pgcnt_t an_idx;
2329         anon_sync_obj_t cookie;
2330         int anon_locked = 0;
2331         pgcnt_t amp_pgs;
2332 
2333 
2334         ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2335         ASSERT(len != 0);
2336 
2337         pg_sz = share_sz;
2338         lp_npgs = btop(pg_sz);
2339         lp_addr = sptaddr;
2340         e_sptaddr = sptaddr + len;
2341         an_idx = seg_page(sptseg, sptaddr);
2342         ppa_idx = 0;
2343 
2344         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2345 
2346         amp_pgs = page_get_pagecnt(amp->a_szc);
2347 
2348         /*CONSTCOND*/
2349         while (1) {
2350                 for (; lp_addr < e_sptaddr;
2351                     an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2352 
2353                         /*
2354                          * If we're currently locked, and we get to a new
2355                          * page, unlock our current anon chunk.
2356                          */
2357                         if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2358                                 anon_array_exit(&cookie);
2359                                 anon_locked = 0;
2360                         }
2361                         if (!anon_locked) {
2362                                 anon_array_enter(amp, an_idx, &cookie);
2363                                 anon_locked = 1;
2364                         }
2365                         ppa_szc = (uint_t)-1;
2366                         ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2367                             lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2368                             &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2369 
2370                         if (ierr != 0) {
2371                                 if (ierr > 0) {
2372                                         err = FC_MAKE_ERR(ierr);
2373                                         goto lpgs_err;
2374                                 }
2375                                 break;
2376                         }
2377                 }
2378                 if (lp_addr == e_sptaddr) {
2379                         break;
2380                 }
2381                 ASSERT(lp_addr < e_sptaddr);
2382 
2383                 /*
2384                  * ierr == -1 means we failed to allocate a large page.
2385                  * so do a size down operation.
2386                  *
2387                  * ierr == -2 means some other process that privately shares
2388                  * pages with this process has allocated a larger page and we
2389                  * need to retry with larger pages. So do a size up
2390                  * operation. This relies on the fact that large pages are
2391                  * never partially shared i.e. if we share any constituent
2392                  * page of a large page with another process we must share the
2393                  * entire large page. Note this cannot happen for SOFTLOCK
2394                  * case, unless current address (lpaddr) is at the beginning
2395                  * of the next page size boundary because the other process
2396                  * couldn't have relocated locked pages.
2397                  */
2398                 ASSERT(ierr == -1 || ierr == -2);
2399                 if (segvn_anypgsz) {
2400                         ASSERT(ierr == -2 || szc != 0);
2401                         ASSERT(ierr == -1 || szc < sptseg->s_szc);
2402                         szc = (ierr == -1) ? szc - 1 : szc + 1;
2403                 } else {
2404                         /*
2405                          * For faults and segvn_anypgsz == 0
2406                          * we need to be careful not to loop forever
2407                          * if existing page is found with szc other
2408                          * than 0 or seg->s_szc. This could be due
2409                          * to page relocations on behalf of DR or
2410                          * more likely large page creation. For this
2411                          * case simply re-size to existing page's szc
2412                          * if returned by anon_map_getpages().
2413                          */
2414                         if (ppa_szc == (uint_t)-1) {
2415                                 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2416                         } else {
2417                                 ASSERT(ppa_szc <= sptseg->s_szc);
2418                                 ASSERT(ierr == -2 || ppa_szc < szc);
2419                                 ASSERT(ierr == -1 || ppa_szc > szc);
2420                                 szc = ppa_szc;
2421                         }
2422                 }
2423                 pg_sz = page_get_pagesize(szc);
2424                 lp_npgs = btop(pg_sz);
2425                 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2426         }
2427         if (anon_locked) {
2428                 anon_array_exit(&cookie);
2429         }
2430         ANON_LOCK_EXIT(&amp->a_rwlock);
2431         return (0);
2432 
2433 lpgs_err:
2434         if (anon_locked) {
2435                 anon_array_exit(&cookie);
2436         }
2437         ANON_LOCK_EXIT(&amp->a_rwlock);
2438         for (j = 0; j < ppa_idx; j++)
2439                 page_unlock(ppa[j]);
2440         return (err);
2441 }
2442 
2443 /*
2444  * count the number of bytes in a set of spt pages that are currently not
2445  * locked
2446  */
2447 static rctl_qty_t
2448 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2449 {
2450         ulong_t i;
2451         rctl_qty_t unlocked = 0;
2452 
2453         for (i = 0; i < npages; i++) {
2454                 if (ppa[i]->p_lckcnt == 0)
2455                         unlocked += PAGESIZE;
2456         }
2457         return (unlocked);
2458 }
2459 
2460 extern  u_longlong_t randtick(void);
2461 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2462 #define NLCK    (NCPU_P2)
2463 /* Random number with a range [0, n-1], n must be power of two */
2464 #define RAND_P2(n)      \
2465         ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2466 
2467 int
2468 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2469     page_t **ppa, ulong_t *lockmap, size_t pos,
2470     rctl_qty_t *locked)
2471 {
2472         struct  shm_data *shmd = seg->s_data;
2473         struct  spt_data *sptd = shmd->shm_sptseg->s_data;
2474         ulong_t i;
2475         int     kernel;
2476         pgcnt_t nlck = 0;
2477         int     rv = 0;
2478         int     use_reserved = 1;
2479 
2480         /* return the number of bytes actually locked */
2481         *locked = 0;
2482 
2483         /*
2484          * To avoid contention on freemem_lock, availrmem and pages_locked
2485          * global counters are updated only every nlck locked pages instead of
2486          * every time.  Reserve nlck locks up front and deduct from this
2487          * reservation for each page that requires a lock.  When the reservation
2488          * is consumed, reserve again.  nlck is randomized, so the competing
2489          * threads do not fall into a cyclic lock contention pattern. When
2490          * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2491          * is used to lock pages.
2492          */
2493         for (i = 0; i < npages; anon_index++, pos++, i++) {
2494                 if (nlck == 0 && use_reserved == 1) {
2495                         nlck = NLCK + RAND_P2(NLCK);
2496                         /* if fewer loops left, decrease nlck */
2497                         nlck = MIN(nlck, npages - i);
2498                         /*
2499                          * Reserve nlck locks up front and deduct from this
2500                          * reservation for each page that requires a lock.  When
2501                          * the reservation is consumed, reserve again.
2502                          */
2503                         mutex_enter(&freemem_lock);
2504                         if ((availrmem - nlck) < pages_pp_maximum) {
2505                                 /* Do not do advance memory reserves */
2506                                 use_reserved = 0;
2507                         } else {
2508                                 availrmem       -= nlck;
2509                                 pages_locked    += nlck;
2510                         }
2511                         mutex_exit(&freemem_lock);
2512                 }
2513                 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2514                         if (sptd->spt_ppa_lckcnt[anon_index] <
2515                             (ushort_t)DISM_LOCK_MAX) {
2516                                 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2517                                     (ushort_t)DISM_LOCK_MAX) {
2518                                         cmn_err(CE_WARN,
2519                                             "DISM page lock limit "
2520                                             "reached on DISM offset 0x%lx\n",
2521                                             anon_index << PAGESHIFT);
2522                                 }
2523                                 kernel = (sptd->spt_ppa &&
2524                                     sptd->spt_ppa[anon_index]);
2525                                 if (!page_pp_lock(ppa[i], 0, kernel ||
2526                                     use_reserved)) {
2527                                         sptd->spt_ppa_lckcnt[anon_index]--;
2528                                         rv = EAGAIN;
2529                                         break;
2530                                 }
2531                                 /* if this is a newly locked page, count it */
2532                                 if (ppa[i]->p_lckcnt == 1) {
2533                                         if (kernel == 0 && use_reserved == 1)
2534                                                 nlck--;
2535                                         *locked += PAGESIZE;
2536                                 }
2537                                 shmd->shm_lckpgs++;
2538                                 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2539                                 if (lockmap != NULL)
2540                                         BT_SET(lockmap, pos);
2541                         }
2542                 }
2543         }
2544         /* Return unused lock reservation */
2545         if (nlck != 0 && use_reserved == 1) {
2546                 mutex_enter(&freemem_lock);
2547                 availrmem       += nlck;
2548                 pages_locked    -= nlck;
2549                 mutex_exit(&freemem_lock);
2550         }
2551 
2552         return (rv);
2553 }
2554 
2555 int
2556 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2557     rctl_qty_t *unlocked)
2558 {
2559         struct shm_data *shmd = seg->s_data;
2560         struct spt_data *sptd = shmd->shm_sptseg->s_data;
2561         struct anon_map *amp = sptd->spt_amp;
2562         struct anon     *ap;
2563         struct vnode    *vp;
2564         u_offset_t      off;
2565         struct page     *pp;
2566         int             kernel;
2567         anon_sync_obj_t cookie;
2568         ulong_t         i;
2569         pgcnt_t         nlck = 0;
2570         pgcnt_t         nlck_limit = NLCK;
2571 
2572         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2573         for (i = 0; i < npages; i++, anon_index++) {
2574                 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2575                         anon_array_enter(amp, anon_index, &cookie);
2576                         ap = anon_get_ptr(amp->ahp, anon_index);
2577                         ASSERT(ap);
2578 
2579                         swap_xlate(ap, &vp, &off);
2580                         anon_array_exit(&cookie);
2581                         pp = page_lookup(vp, off, SE_SHARED);
2582                         ASSERT(pp);
2583                         /*
2584                          * availrmem is decremented only for pages which are not
2585                          * in seg pcache, for pages in seg pcache availrmem was
2586                          * decremented in _dismpagelock()
2587                          */
2588                         kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2589                         ASSERT(pp->p_lckcnt > 0);
2590 
2591                         /*
2592                          * lock page but do not change availrmem, we do it
2593                          * ourselves every nlck loops.
2594                          */
2595                         page_pp_unlock(pp, 0, 1);
2596                         if (pp->p_lckcnt == 0) {
2597                                 if (kernel == 0)
2598                                         nlck++;
2599                                 *unlocked += PAGESIZE;
2600                         }
2601                         page_unlock(pp);
2602                         shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2603                         sptd->spt_ppa_lckcnt[anon_index]--;
2604                         shmd->shm_lckpgs--;
2605                 }
2606 
2607                 /*
2608                  * To reduce freemem_lock contention, do not update availrmem
2609                  * until at least NLCK pages have been unlocked.
2610                  * 1. No need to update if nlck is zero
2611                  * 2. Always update if the last iteration
2612                  */
2613                 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2614                         mutex_enter(&freemem_lock);
2615                         availrmem       += nlck;
2616                         pages_locked    -= nlck;
2617                         mutex_exit(&freemem_lock);
2618                         nlck = 0;
2619                         nlck_limit = NLCK + RAND_P2(NLCK);
2620                 }
2621         }
2622         ANON_LOCK_EXIT(&amp->a_rwlock);
2623 
2624         return (0);
2625 }
2626 
2627 /*ARGSUSED*/
2628 static int
2629 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2630     int attr, int op, ulong_t *lockmap, size_t pos)
2631 {
2632         struct shm_data *shmd = seg->s_data;
2633         struct seg      *sptseg = shmd->shm_sptseg;
2634         struct spt_data *sptd = sptseg->s_data;
2635         struct kshmid   *sp = sptd->spt_amp->a_sp;
2636         pgcnt_t         npages, a_npages;
2637         page_t          **ppa;
2638         pgcnt_t         an_idx, a_an_idx, ppa_idx;
2639         caddr_t         spt_addr, a_addr;       /* spt and aligned address */
2640         size_t          a_len;                  /* aligned len */
2641         size_t          share_sz;
2642         ulong_t         i;
2643         int             sts = 0;
2644         rctl_qty_t      unlocked = 0;
2645         rctl_qty_t      locked = 0;
2646         struct proc     *p = curproc;
2647         kproject_t      *proj;
2648 
2649         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2650         ASSERT(sp != NULL);
2651 
2652         if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2653                 return (0);
2654         }
2655 
2656         addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2657         an_idx = seg_page(seg, addr);
2658         npages = btopr(len);
2659 
2660         if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2661                 return (ENOMEM);
2662         }
2663 
2664         /*
2665          * A shm's project never changes, so no lock needed.
2666          * The shm has a hold on the project, so it will not go away.
2667          * Since we have a mapping to shm within this zone, we know
2668          * that the zone will not go away.
2669          */
2670         proj = sp->shm_perm.ipc_proj;
2671 
2672         if (op == MC_LOCK) {
2673 
2674                 /*
2675                  * Need to align addr and size request if they are not
2676                  * aligned so we can always allocate large page(s) however
2677                  * we only lock what was requested in initial request.
2678                  */
2679                 share_sz = page_get_pagesize(sptseg->s_szc);
2680                 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2681                 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2682                     share_sz);
2683                 a_npages = btop(a_len);
2684                 a_an_idx = seg_page(seg, a_addr);
2685                 spt_addr = sptseg->s_base + ptob(a_an_idx);
2686                 ppa_idx = an_idx - a_an_idx;
2687 
2688                 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2689                     KM_NOSLEEP)) == NULL) {
2690                         return (ENOMEM);
2691                 }
2692 
2693                 /*
2694                  * Don't cache any new pages for IO and
2695                  * flush any cached pages.
2696                  */
2697                 mutex_enter(&sptd->spt_lock);
2698                 if (sptd->spt_ppa != NULL)
2699                         sptd->spt_flags |= DISM_PPA_CHANGED;
2700 
2701                 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2702                 if (sts != 0) {
2703                         mutex_exit(&sptd->spt_lock);
2704                         kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2705                         return (sts);
2706                 }
2707 
2708                 mutex_enter(&sp->shm_mlock);
2709                 /* enforce locked memory rctl */
2710                 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2711 
2712                 mutex_enter(&p->p_lock);
2713                 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2714                         mutex_exit(&p->p_lock);
2715                         sts = EAGAIN;
2716                 } else {
2717                         mutex_exit(&p->p_lock);
2718                         sts = spt_lockpages(seg, an_idx, npages,
2719                             &ppa[ppa_idx], lockmap, pos, &locked);
2720 
2721                         /*
2722                          * correct locked count if not all pages could be
2723                          * locked
2724                          */
2725                         if ((unlocked - locked) > 0) {
2726                                 rctl_decr_locked_mem(NULL, proj,
2727                                     (unlocked - locked), 0);
2728                         }
2729                 }
2730                 /*
2731                  * unlock pages
2732                  */
2733                 for (i = 0; i < a_npages; i++)
2734                         page_unlock(ppa[i]);
2735                 if (sptd->spt_ppa != NULL)
2736                         sptd->spt_flags |= DISM_PPA_CHANGED;
2737                 mutex_exit(&sp->shm_mlock);
2738                 mutex_exit(&sptd->spt_lock);
2739 
2740                 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2741 
2742         } else if (op == MC_UNLOCK) { /* unlock */
2743                 page_t          **ppa;
2744 
2745                 mutex_enter(&sptd->spt_lock);
2746                 if (shmd->shm_lckpgs == 0) {
2747                         mutex_exit(&sptd->spt_lock);
2748                         return (0);
2749                 }
2750                 /*
2751                  * Don't cache new IO pages.
2752                  */
2753                 if (sptd->spt_ppa != NULL)
2754                         sptd->spt_flags |= DISM_PPA_CHANGED;
2755 
2756                 mutex_enter(&sp->shm_mlock);
2757                 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2758                 if ((ppa = sptd->spt_ppa) != NULL)
2759                         sptd->spt_flags |= DISM_PPA_CHANGED;
2760                 mutex_exit(&sptd->spt_lock);
2761 
2762                 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2763                 mutex_exit(&sp->shm_mlock);
2764 
2765                 if (ppa != NULL)
2766                         seg_ppurge_wiredpp(ppa);
2767         }
2768         return (sts);
2769 }
2770 
2771 /*ARGSUSED*/
2772 int
2773 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2774 {
2775         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2776         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2777         spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2778 
2779         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2780 
2781         /*
2782          * ISM segment is always rw.
2783          */
2784         while (--pgno >= 0)
2785                 *protv++ = sptd->spt_prot;
2786         return (0);
2787 }
2788 
2789 /*ARGSUSED*/
2790 u_offset_t
2791 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2792 {
2793         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2794 
2795         /* Offset does not matter in ISM memory */
2796 
2797         return ((u_offset_t)0);
2798 }
2799 
2800 /* ARGSUSED */
2801 int
2802 segspt_shmgettype(struct seg *seg, caddr_t addr)
2803 {
2804         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2805         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2806 
2807         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2808 
2809         /*
2810          * The shared memory mapping is always MAP_SHARED, SWAP is only
2811          * reserved for DISM
2812          */
2813         return (MAP_SHARED |
2814             ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2815 }
2816 
2817 /*ARGSUSED*/
2818 int
2819 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2820 {
2821         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2822         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2823 
2824         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2825 
2826         *vpp = sptd->spt_vp;
2827         return (0);
2828 }
2829 
2830 /*
2831  * We need to wait for pending IO to complete to a DISM segment in order for
2832  * pages to get kicked out of the seg_pcache.  120 seconds should be more
2833  * than enough time to wait.
2834  */
2835 static clock_t spt_pcache_wait = 120;
2836 
2837 /*ARGSUSED*/
2838 static int
2839 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2840 {
2841         struct shm_data *shmd = (struct shm_data *)seg->s_data;
2842         struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2843         struct anon_map *amp;
2844         pgcnt_t pg_idx;
2845         ushort_t gen;
2846         clock_t end_lbolt;
2847         int writer;
2848         page_t **ppa;
2849 
2850         ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as));
2851 
2852         if (behav == MADV_FREE) {
2853                 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2854                         return (0);
2855 
2856                 amp = sptd->spt_amp;
2857                 pg_idx = seg_page(seg, addr);
2858 
2859                 mutex_enter(&sptd->spt_lock);
2860                 if ((ppa = sptd->spt_ppa) == NULL) {
2861                         mutex_exit(&sptd->spt_lock);
2862                         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2863                         anon_disclaim(amp, pg_idx, len);
2864                         ANON_LOCK_EXIT(&amp->a_rwlock);
2865                         return (0);
2866                 }
2867 
2868                 sptd->spt_flags |= DISM_PPA_CHANGED;
2869                 gen = sptd->spt_gen;
2870 
2871                 mutex_exit(&sptd->spt_lock);
2872 
2873                 /*
2874                  * Purge all DISM cached pages
2875                  */
2876                 seg_ppurge_wiredpp(ppa);
2877 
2878                 /*
2879                  * Drop the AS_LOCK so that other threads can grab it
2880                  * in the as_pageunlock path and hopefully get the segment
2881                  * kicked out of the seg_pcache.  We bump the shm_softlockcnt
2882                  * to keep this segment resident.
2883                  */
2884                 writer = AS_WRITE_HELD(seg->s_as);
2885                 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2886                 AS_LOCK_EXIT(seg->s_as);
2887 
2888                 mutex_enter(&sptd->spt_lock);
2889 
2890                 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2891 
2892                 /*
2893                  * Try to wait for pages to get kicked out of the seg_pcache.
2894                  */
2895                 while (sptd->spt_gen == gen &&
2896                     (sptd->spt_flags & DISM_PPA_CHANGED) &&
2897                     ddi_get_lbolt() < end_lbolt) {
2898                         if (!cv_timedwait_sig(&sptd->spt_cv,
2899                             &sptd->spt_lock, end_lbolt)) {
2900                                 break;
2901                         }
2902                 }
2903 
2904                 mutex_exit(&sptd->spt_lock);
2905 
2906                 /* Regrab the AS_LOCK and release our hold on the segment */
2907                 AS_LOCK_ENTER(seg->s_as, writer ? RW_WRITER : RW_READER);
2908                 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2909                 if (shmd->shm_softlockcnt <= 0) {
2910                         if (AS_ISUNMAPWAIT(seg->s_as)) {
2911                                 mutex_enter(&seg->s_as->a_contents);
2912                                 if (AS_ISUNMAPWAIT(seg->s_as)) {
2913                                         AS_CLRUNMAPWAIT(seg->s_as);
2914                                         cv_broadcast(&seg->s_as->a_cv);
2915                                 }
2916                                 mutex_exit(&seg->s_as->a_contents);
2917                         }
2918                 }
2919 
2920                 ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
2921                 anon_disclaim(amp, pg_idx, len);
2922                 ANON_LOCK_EXIT(&amp->a_rwlock);
2923         } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2924             behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2925                 int                     already_set;
2926                 ulong_t                 anon_index;
2927                 lgrp_mem_policy_t       policy;
2928                 caddr_t                 shm_addr;
2929                 size_t                  share_size;
2930                 size_t                  size;
2931                 struct seg              *sptseg = shmd->shm_sptseg;
2932                 caddr_t                 sptseg_addr;
2933 
2934                 /*
2935                  * Align address and length to page size of underlying segment
2936                  */
2937                 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2938                 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2939                 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2940                     share_size);
2941 
2942                 amp = shmd->shm_amp;
2943                 anon_index = seg_page(seg, shm_addr);
2944 
2945                 /*
2946                  * And now we may have to adjust size downward if we have
2947                  * exceeded the realsize of the segment or initial anon
2948                  * allocations.
2949                  */
2950                 sptseg_addr = sptseg->s_base + ptob(anon_index);
2951                 if ((sptseg_addr + size) >
2952                     (sptseg->s_base + sptd->spt_realsize))
2953                         size = (sptseg->s_base + sptd->spt_realsize) -
2954                             sptseg_addr;
2955 
2956                 /*
2957                  * Set memory allocation policy for this segment
2958                  */
2959                 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2960                 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2961                     NULL, 0, len);
2962 
2963                 /*
2964                  * If random memory allocation policy set already,
2965                  * don't bother reapplying it.
2966                  */
2967                 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2968                         return (0);
2969 
2970                 /*
2971                  * Mark any existing pages in the given range for
2972                  * migration, flushing the I/O page cache, and using
2973                  * underlying segment to calculate anon index and get
2974                  * anonmap and vnode pointer from
2975                  */
2976                 if (shmd->shm_softlockcnt > 0)
2977                         segspt_purge(seg);
2978 
2979                 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
2980         }
2981 
2982         return (0);
2983 }
2984 
2985 /*ARGSUSED*/
2986 void
2987 segspt_shmdump(struct seg *seg)
2988 {
2989         /* no-op for ISM segment */
2990 }
2991 
2992 /*ARGSUSED*/
2993 static faultcode_t
2994 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
2995 {
2996         return (ENOTSUP);
2997 }
2998 
2999 /*
3000  * get a memory ID for an addr in a given segment
3001  */
3002 static int
3003 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3004 {
3005         struct shm_data *shmd = (struct shm_data *)seg->s_data;
3006         struct anon     *ap;
3007         size_t          anon_index;
3008         struct anon_map *amp = shmd->shm_amp;
3009         struct spt_data *sptd = shmd->shm_sptseg->s_data;
3010         struct seg      *sptseg = shmd->shm_sptseg;
3011         anon_sync_obj_t cookie;
3012 
3013         anon_index = seg_page(seg, addr);
3014 
3015         if (addr > (seg->s_base + sptd->spt_realsize)) {
3016                 return (EFAULT);
3017         }
3018 
3019         ANON_LOCK_ENTER(&amp->a_rwlock, RW_READER);
3020         anon_array_enter(amp, anon_index, &cookie);
3021         ap = anon_get_ptr(amp->ahp, anon_index);
3022         if (ap == NULL) {
3023                 struct page *pp;
3024                 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
3025 
3026                 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
3027                 if (pp == NULL) {
3028                         anon_array_exit(&cookie);
3029                         ANON_LOCK_EXIT(&amp->a_rwlock);
3030                         return (ENOMEM);
3031                 }
3032                 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3033                 page_unlock(pp);
3034         }
3035         anon_array_exit(&cookie);
3036         ANON_LOCK_EXIT(&amp->a_rwlock);
3037         memidp->val[0] = (uintptr_t)ap;
3038         memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3039         return (0);
3040 }
3041 
3042 /*
3043  * Get memory allocation policy info for specified address in given segment
3044  */
3045 static lgrp_mem_policy_info_t *
3046 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3047 {
3048         struct anon_map         *amp;
3049         ulong_t                 anon_index;
3050         lgrp_mem_policy_info_t  *policy_info;
3051         struct shm_data         *shm_data;
3052 
3053         ASSERT(seg != NULL);
3054 
3055         /*
3056          * Get anon_map from segshm
3057          *
3058          * Assume that no lock needs to be held on anon_map, since
3059          * it should be protected by its reference count which must be
3060          * nonzero for an existing segment
3061          * Need to grab readers lock on policy tree though
3062          */
3063         shm_data = (struct shm_data *)seg->s_data;
3064         if (shm_data == NULL)
3065                 return (NULL);
3066         amp = shm_data->shm_amp;
3067         ASSERT(amp->refcnt != 0);
3068 
3069         /*
3070          * Get policy info
3071          *
3072          * Assume starting anon index of 0
3073          */
3074         anon_index = seg_page(seg, addr);
3075         policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3076 
3077         return (policy_info);
3078 }
3079 
3080 /*ARGSUSED*/
3081 static int
3082 segspt_shmcapable(struct seg *seg, segcapability_t capability)
3083 {
3084         return (0);
3085 }