Print this page
use NULL setpagesize segop as a shorthand for ENOTSUP
Instead of forcing every segment driver to implement a dummp function to
return (hopefully) ENOTSUP, handle NULL setpagesize segop function pointer
as "return ENOTSUP" shorthand.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_spt.c
+++ new/usr/src/uts/common/vm/seg_spt.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/param.h>
26 26 #include <sys/user.h>
27 27 #include <sys/mman.h>
28 28 #include <sys/kmem.h>
29 29 #include <sys/sysmacros.h>
30 30 #include <sys/cmn_err.h>
31 31 #include <sys/systm.h>
32 32 #include <sys/tuneable.h>
33 33 #include <vm/hat.h>
34 34 #include <vm/seg.h>
35 35 #include <vm/as.h>
36 36 #include <vm/anon.h>
37 37 #include <vm/page.h>
38 38 #include <sys/buf.h>
39 39 #include <sys/swap.h>
40 40 #include <sys/atomic.h>
41 41 #include <vm/seg_spt.h>
42 42 #include <sys/debug.h>
43 43 #include <sys/vtrace.h>
44 44 #include <sys/shm.h>
45 45 #include <sys/shm_impl.h>
46 46 #include <sys/lgrp.h>
47 47 #include <sys/vmsystm.h>
48 48 #include <sys/policy.h>
49 49 #include <sys/project.h>
50 50 #include <sys/tnf_probe.h>
51 51 #include <sys/zone.h>
52 52
53 53 #define SEGSPTADDR (caddr_t)0x0
54 54
55 55 /*
56 56 * # pages used for spt
57 57 */
58 58 size_t spt_used;
59 59
60 60 /*
61 61 * segspt_minfree is the memory left for system after ISM
62 62 * locked its pages; it is set up to 5% of availrmem in
63 63 * sptcreate when ISM is created. ISM should not use more
64 64 * than ~90% of availrmem; if it does, then the performance
65 65 * of the system may decrease. Machines with large memories may
66 66 * be able to use up more memory for ISM so we set the default
67 67 * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
68 68 * If somebody wants even more memory for ISM (risking hanging
69 69 * the system) they can patch the segspt_minfree to smaller number.
70 70 */
71 71 pgcnt_t segspt_minfree = 0;
72 72
73 73 static int segspt_create(struct seg *seg, caddr_t argsp);
74 74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
75 75 static void segspt_free(struct seg *seg);
76 76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
77 77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
78 78
79 79 struct seg_ops segspt_ops = {
80 80 .unmap = segspt_unmap,
81 81 .free = segspt_free,
82 82 .getpolicy = segspt_getpolicy,
83 83 };
84 84
85 85 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
86 86 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
87 87 static void segspt_shmfree(struct seg *seg);
88 88 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
89 89 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
90 90 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
91 91 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
92 92 register size_t len, register uint_t prot);
93 93 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
94 94 uint_t prot);
95 95 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
96 96 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
97 97 register char *vec);
98 98 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
99 99 int attr, uint_t flags);
100 100 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
101 101 int attr, int op, ulong_t *lockmap, size_t pos);
↓ open down ↓ |
101 lines elided |
↑ open up ↑ |
102 102 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
103 103 uint_t *protv);
104 104 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
105 105 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
106 106 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
107 107 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
108 108 uint_t behav);
109 109 static void segspt_shmdump(struct seg *seg);
110 110 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
111 111 struct page ***, enum lock_type, enum seg_rw);
112 -static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
113 112 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
114 113 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
115 114
116 115 struct seg_ops segspt_shmops = {
117 116 .dup = segspt_shmdup,
118 117 .unmap = segspt_shmunmap,
119 118 .free = segspt_shmfree,
120 119 .fault = segspt_shmfault,
121 120 .faulta = segspt_shmfaulta,
122 121 .setprot = segspt_shmsetprot,
123 122 .checkprot = segspt_shmcheckprot,
124 123 .kluster = segspt_shmkluster,
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
125 124 .sync = segspt_shmsync,
126 125 .incore = segspt_shmincore,
127 126 .lockop = segspt_shmlockop,
128 127 .getprot = segspt_shmgetprot,
129 128 .getoffset = segspt_shmgetoffset,
130 129 .gettype = segspt_shmgettype,
131 130 .getvp = segspt_shmgetvp,
132 131 .advise = segspt_shmadvise,
133 132 .dump = segspt_shmdump,
134 133 .pagelock = segspt_shmpagelock,
135 - .setpagesize = segspt_shmsetpgsz,
136 134 .getmemid = segspt_shmgetmemid,
137 135 .getpolicy = segspt_shmgetpolicy,
138 136 };
139 137
140 138 static void segspt_purge(struct seg *seg);
141 139 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
142 140 enum seg_rw, int);
143 141 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
144 142 page_t **ppa);
145 143
146 144
147 145
148 146 /*ARGSUSED*/
149 147 int
150 148 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
151 149 uint_t prot, uint_t flags, uint_t share_szc)
152 150 {
153 151 int err;
154 152 struct as *newas;
155 153 struct segspt_crargs sptcargs;
156 154
157 155 #ifdef DEBUG
158 156 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
159 157 tnf_ulong, size, size );
160 158 #endif
161 159 if (segspt_minfree == 0) /* leave min 5% of availrmem for */
162 160 segspt_minfree = availrmem/20; /* for the system */
163 161
164 162 if (!hat_supported(HAT_SHARED_PT, (void *)0))
165 163 return (EINVAL);
166 164
167 165 /*
168 166 * get a new as for this shared memory segment
169 167 */
170 168 newas = as_alloc();
171 169 newas->a_proc = NULL;
172 170 sptcargs.amp = amp;
173 171 sptcargs.prot = prot;
174 172 sptcargs.flags = flags;
175 173 sptcargs.szc = share_szc;
176 174 /*
177 175 * create a shared page table (spt) segment
178 176 */
179 177
180 178 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
181 179 as_free(newas);
182 180 return (err);
183 181 }
184 182 *sptseg = sptcargs.seg_spt;
185 183 return (0);
186 184 }
187 185
188 186 void
189 187 sptdestroy(struct as *as, struct anon_map *amp)
190 188 {
191 189
192 190 #ifdef DEBUG
193 191 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
194 192 #endif
195 193 (void) as_unmap(as, SEGSPTADDR, amp->size);
196 194 as_free(as);
197 195 }
198 196
199 197 /*
200 198 * called from seg_free().
201 199 * free (i.e., unlock, unmap, return to free list)
202 200 * all the pages in the given seg.
203 201 */
204 202 void
205 203 segspt_free(struct seg *seg)
206 204 {
207 205 struct spt_data *sptd = (struct spt_data *)seg->s_data;
208 206
209 207 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
210 208
211 209 if (sptd != NULL) {
212 210 if (sptd->spt_realsize)
213 211 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
214 212
215 213 if (sptd->spt_ppa_lckcnt)
216 214 kmem_free(sptd->spt_ppa_lckcnt,
217 215 sizeof (*sptd->spt_ppa_lckcnt)
218 216 * btopr(sptd->spt_amp->size));
219 217 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
220 218 cv_destroy(&sptd->spt_cv);
221 219 mutex_destroy(&sptd->spt_lock);
222 220 kmem_free(sptd, sizeof (*sptd));
223 221 }
224 222 }
225 223
226 224 /*ARGSUSED*/
227 225 static int
228 226 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
229 227 uint_t flags)
230 228 {
231 229 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
232 230
233 231 return (0);
234 232 }
235 233
236 234 /*ARGSUSED*/
237 235 static size_t
238 236 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
239 237 {
240 238 caddr_t eo_seg;
241 239 pgcnt_t npages;
242 240 struct shm_data *shmd = (struct shm_data *)seg->s_data;
243 241 struct seg *sptseg;
244 242 struct spt_data *sptd;
245 243
246 244 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
247 245 #ifdef lint
248 246 seg = seg;
249 247 #endif
250 248 sptseg = shmd->shm_sptseg;
251 249 sptd = sptseg->s_data;
252 250
253 251 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
254 252 eo_seg = addr + len;
255 253 while (addr < eo_seg) {
256 254 /* page exists, and it's locked. */
257 255 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
258 256 SEG_PAGE_ANON;
259 257 addr += PAGESIZE;
260 258 }
261 259 return (len);
262 260 } else {
263 261 struct anon_map *amp = shmd->shm_amp;
264 262 struct anon *ap;
265 263 page_t *pp;
266 264 pgcnt_t anon_index;
267 265 struct vnode *vp;
268 266 u_offset_t off;
269 267 ulong_t i;
270 268 int ret;
271 269 anon_sync_obj_t cookie;
272 270
273 271 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
274 272 anon_index = seg_page(seg, addr);
275 273 npages = btopr(len);
276 274 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
277 275 return (EINVAL);
278 276 }
279 277 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
280 278 for (i = 0; i < npages; i++, anon_index++) {
281 279 ret = 0;
282 280 anon_array_enter(amp, anon_index, &cookie);
283 281 ap = anon_get_ptr(amp->ahp, anon_index);
284 282 if (ap != NULL) {
285 283 swap_xlate(ap, &vp, &off);
286 284 anon_array_exit(&cookie);
287 285 pp = page_lookup_nowait(vp, off, SE_SHARED);
288 286 if (pp != NULL) {
289 287 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
290 288 page_unlock(pp);
291 289 }
292 290 } else {
293 291 anon_array_exit(&cookie);
294 292 }
295 293 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
296 294 ret |= SEG_PAGE_LOCKED;
297 295 }
298 296 *vec++ = (char)ret;
299 297 }
300 298 ANON_LOCK_EXIT(&->a_rwlock);
301 299 return (len);
302 300 }
303 301 }
304 302
305 303 static int
306 304 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
307 305 {
308 306 size_t share_size;
309 307
310 308 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
311 309
312 310 /*
313 311 * seg.s_size may have been rounded up to the largest page size
314 312 * in shmat().
315 313 * XXX This should be cleanedup. sptdestroy should take a length
316 314 * argument which should be the same as sptcreate. Then
317 315 * this rounding would not be needed (or is done in shm.c)
318 316 * Only the check for full segment will be needed.
319 317 *
320 318 * XXX -- shouldn't raddr == 0 always? These tests don't seem
321 319 * to be useful at all.
322 320 */
323 321 share_size = page_get_pagesize(seg->s_szc);
324 322 ssize = P2ROUNDUP(ssize, share_size);
325 323
326 324 if (raddr == seg->s_base && ssize == seg->s_size) {
327 325 seg_free(seg);
328 326 return (0);
329 327 } else
330 328 return (EINVAL);
331 329 }
332 330
333 331 int
334 332 segspt_create(struct seg *seg, caddr_t argsp)
335 333 {
336 334 int err;
337 335 caddr_t addr = seg->s_base;
338 336 struct spt_data *sptd;
339 337 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
340 338 struct anon_map *amp = sptcargs->amp;
341 339 struct kshmid *sp = amp->a_sp;
342 340 struct cred *cred = CRED();
343 341 ulong_t i, j, anon_index = 0;
344 342 pgcnt_t npages = btopr(amp->size);
345 343 struct vnode *vp;
346 344 page_t **ppa;
347 345 uint_t hat_flags;
348 346 size_t pgsz;
349 347 pgcnt_t pgcnt;
350 348 caddr_t a;
351 349 pgcnt_t pidx;
352 350 size_t sz;
353 351 proc_t *procp = curproc;
354 352 rctl_qty_t lockedbytes = 0;
355 353 kproject_t *proj;
356 354
357 355 /*
358 356 * We are holding the a_lock on the underlying dummy as,
359 357 * so we can make calls to the HAT layer.
360 358 */
361 359 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
362 360 ASSERT(sp != NULL);
363 361
364 362 #ifdef DEBUG
365 363 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
366 364 tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
367 365 #endif
368 366 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
369 367 if (err = anon_swap_adjust(npages))
370 368 return (err);
371 369 }
372 370 err = ENOMEM;
373 371
374 372 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
375 373 goto out1;
376 374
377 375 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
378 376 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
379 377 KM_NOSLEEP)) == NULL)
380 378 goto out2;
381 379 }
382 380
383 381 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
384 382
385 383 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
386 384 goto out3;
387 385
388 386 seg->s_ops = &segspt_ops;
389 387 sptd->spt_vp = vp;
390 388 sptd->spt_amp = amp;
391 389 sptd->spt_prot = sptcargs->prot;
392 390 sptd->spt_flags = sptcargs->flags;
393 391 seg->s_data = (caddr_t)sptd;
394 392 sptd->spt_ppa = NULL;
395 393 sptd->spt_ppa_lckcnt = NULL;
396 394 seg->s_szc = sptcargs->szc;
397 395 cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
398 396 sptd->spt_gen = 0;
399 397
400 398 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
401 399 if (seg->s_szc > amp->a_szc) {
402 400 amp->a_szc = seg->s_szc;
403 401 }
404 402 ANON_LOCK_EXIT(&->a_rwlock);
405 403
406 404 /*
407 405 * Set policy to affect initial allocation of pages in
408 406 * anon_map_createpages()
409 407 */
410 408 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
411 409 NULL, 0, ptob(npages));
412 410
413 411 if (sptcargs->flags & SHM_PAGEABLE) {
414 412 size_t share_sz;
415 413 pgcnt_t new_npgs, more_pgs;
416 414 struct anon_hdr *nahp;
417 415 zone_t *zone;
418 416
419 417 share_sz = page_get_pagesize(seg->s_szc);
420 418 if (!IS_P2ALIGNED(amp->size, share_sz)) {
421 419 /*
422 420 * We are rounding up the size of the anon array
423 421 * on 4 M boundary because we always create 4 M
424 422 * of page(s) when locking, faulting pages and we
425 423 * don't have to check for all corner cases e.g.
426 424 * if there is enough space to allocate 4 M
427 425 * page.
428 426 */
429 427 new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
430 428 more_pgs = new_npgs - npages;
431 429
432 430 /*
433 431 * The zone will never be NULL, as a fully created
434 432 * shm always has an owning zone.
435 433 */
436 434 zone = sp->shm_perm.ipc_zone_ref.zref_zone;
437 435 ASSERT(zone != NULL);
438 436 if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
439 437 err = ENOMEM;
440 438 goto out4;
441 439 }
442 440
443 441 nahp = anon_create(new_npgs, ANON_SLEEP);
444 442 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
445 443 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
446 444 ANON_SLEEP);
447 445 anon_release(amp->ahp, npages);
448 446 amp->ahp = nahp;
449 447 ASSERT(amp->swresv == ptob(npages));
450 448 amp->swresv = amp->size = ptob(new_npgs);
451 449 ANON_LOCK_EXIT(&->a_rwlock);
452 450 npages = new_npgs;
453 451 }
454 452
455 453 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
456 454 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
457 455 sptd->spt_pcachecnt = 0;
458 456 sptd->spt_realsize = ptob(npages);
459 457 sptcargs->seg_spt = seg;
460 458 return (0);
461 459 }
462 460
463 461 /*
464 462 * get array of pages for each anon slot in amp
465 463 */
466 464 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
467 465 seg, addr, S_CREATE, cred)) != 0)
468 466 goto out4;
469 467
470 468 mutex_enter(&sp->shm_mlock);
471 469
472 470 /* May be partially locked, so, count bytes to charge for locking */
473 471 for (i = 0; i < npages; i++)
474 472 if (ppa[i]->p_lckcnt == 0)
475 473 lockedbytes += PAGESIZE;
476 474
477 475 proj = sp->shm_perm.ipc_proj;
478 476
479 477 if (lockedbytes > 0) {
480 478 mutex_enter(&procp->p_lock);
481 479 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
482 480 mutex_exit(&procp->p_lock);
483 481 mutex_exit(&sp->shm_mlock);
484 482 for (i = 0; i < npages; i++)
485 483 page_unlock(ppa[i]);
486 484 err = ENOMEM;
487 485 goto out4;
488 486 }
489 487 mutex_exit(&procp->p_lock);
490 488 }
491 489
492 490 /*
493 491 * addr is initial address corresponding to the first page on ppa list
494 492 */
495 493 for (i = 0; i < npages; i++) {
496 494 /* attempt to lock all pages */
497 495 if (page_pp_lock(ppa[i], 0, 1) == 0) {
498 496 /*
499 497 * if unable to lock any page, unlock all
500 498 * of them and return error
501 499 */
502 500 for (j = 0; j < i; j++)
503 501 page_pp_unlock(ppa[j], 0, 1);
504 502 for (i = 0; i < npages; i++)
505 503 page_unlock(ppa[i]);
506 504 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
507 505 mutex_exit(&sp->shm_mlock);
508 506 err = ENOMEM;
509 507 goto out4;
510 508 }
511 509 }
512 510 mutex_exit(&sp->shm_mlock);
513 511
514 512 /*
515 513 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
516 514 * for the entire life of the segment. For example platforms
517 515 * that do not support Dynamic Reconfiguration.
518 516 */
519 517 hat_flags = HAT_LOAD_SHARE;
520 518 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
521 519 hat_flags |= HAT_LOAD_LOCK;
522 520
523 521 /*
524 522 * Load translations one lare page at a time
525 523 * to make sure we don't create mappings bigger than
526 524 * segment's size code in case underlying pages
527 525 * are shared with segvn's segment that uses bigger
528 526 * size code than we do.
529 527 */
530 528 pgsz = page_get_pagesize(seg->s_szc);
531 529 pgcnt = page_get_pagecnt(seg->s_szc);
532 530 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
533 531 sz = MIN(pgsz, ptob(npages - pidx));
534 532 hat_memload_array(seg->s_as->a_hat, a, sz,
535 533 &ppa[pidx], sptd->spt_prot, hat_flags);
536 534 }
537 535
538 536 /*
539 537 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
540 538 * we will leave the pages locked SE_SHARED for the life
541 539 * of the ISM segment. This will prevent any calls to
542 540 * hat_pageunload() on this ISM segment for those platforms.
543 541 */
544 542 if (!(hat_flags & HAT_LOAD_LOCK)) {
545 543 /*
546 544 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
547 545 * we no longer need to hold the SE_SHARED lock on the pages,
548 546 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
549 547 * SE_SHARED lock on the pages as necessary.
550 548 */
551 549 for (i = 0; i < npages; i++)
552 550 page_unlock(ppa[i]);
553 551 }
554 552 sptd->spt_pcachecnt = 0;
555 553 kmem_free(ppa, ((sizeof (page_t *)) * npages));
556 554 sptd->spt_realsize = ptob(npages);
557 555 atomic_add_long(&spt_used, npages);
558 556 sptcargs->seg_spt = seg;
559 557 return (0);
560 558
561 559 out4:
562 560 seg->s_data = NULL;
563 561 kmem_free(vp, sizeof (*vp));
564 562 cv_destroy(&sptd->spt_cv);
565 563 out3:
566 564 mutex_destroy(&sptd->spt_lock);
567 565 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
568 566 kmem_free(ppa, (sizeof (*ppa) * npages));
569 567 out2:
570 568 kmem_free(sptd, sizeof (*sptd));
571 569 out1:
572 570 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
573 571 anon_swap_restore(npages);
574 572 return (err);
575 573 }
576 574
577 575 /*ARGSUSED*/
578 576 void
579 577 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
580 578 {
581 579 struct page *pp;
582 580 struct spt_data *sptd = (struct spt_data *)seg->s_data;
583 581 pgcnt_t npages;
584 582 ulong_t anon_idx;
585 583 struct anon_map *amp;
586 584 struct anon *ap;
587 585 struct vnode *vp;
588 586 u_offset_t off;
589 587 uint_t hat_flags;
590 588 int root = 0;
591 589 pgcnt_t pgs, curnpgs = 0;
592 590 page_t *rootpp;
593 591 rctl_qty_t unlocked_bytes = 0;
594 592 kproject_t *proj;
595 593 kshmid_t *sp;
596 594
597 595 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
598 596
599 597 len = P2ROUNDUP(len, PAGESIZE);
600 598
601 599 npages = btop(len);
602 600
603 601 hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
604 602 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
605 603 (sptd->spt_flags & SHM_PAGEABLE)) {
606 604 hat_flags = HAT_UNLOAD_UNMAP;
607 605 }
608 606
609 607 hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
610 608
611 609 amp = sptd->spt_amp;
612 610 if (sptd->spt_flags & SHM_PAGEABLE)
613 611 npages = btop(amp->size);
614 612
615 613 ASSERT(amp != NULL);
616 614
617 615 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
618 616 sp = amp->a_sp;
619 617 proj = sp->shm_perm.ipc_proj;
620 618 mutex_enter(&sp->shm_mlock);
621 619 }
622 620 for (anon_idx = 0; anon_idx < npages; anon_idx++) {
623 621 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
624 622 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
625 623 panic("segspt_free_pages: null app");
626 624 /*NOTREACHED*/
627 625 }
628 626 } else {
629 627 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
630 628 == NULL)
631 629 continue;
632 630 }
633 631 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
634 632 swap_xlate(ap, &vp, &off);
635 633
636 634 /*
637 635 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
638 636 * the pages won't be having SE_SHARED lock at this
639 637 * point.
640 638 *
641 639 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
642 640 * the pages are still held SE_SHARED locked from the
643 641 * original segspt_create()
644 642 *
645 643 * Our goal is to get SE_EXCL lock on each page, remove
646 644 * permanent lock on it and invalidate the page.
647 645 */
648 646 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
649 647 if (hat_flags == HAT_UNLOAD_UNMAP)
650 648 pp = page_lookup(vp, off, SE_EXCL);
651 649 else {
652 650 if ((pp = page_find(vp, off)) == NULL) {
653 651 panic("segspt_free_pages: "
654 652 "page not locked");
655 653 /*NOTREACHED*/
656 654 }
657 655 if (!page_tryupgrade(pp)) {
658 656 page_unlock(pp);
659 657 pp = page_lookup(vp, off, SE_EXCL);
660 658 }
661 659 }
662 660 if (pp == NULL) {
663 661 panic("segspt_free_pages: "
664 662 "page not in the system");
665 663 /*NOTREACHED*/
666 664 }
667 665 ASSERT(pp->p_lckcnt > 0);
668 666 page_pp_unlock(pp, 0, 1);
669 667 if (pp->p_lckcnt == 0)
670 668 unlocked_bytes += PAGESIZE;
671 669 } else {
672 670 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
673 671 continue;
674 672 }
675 673 /*
676 674 * It's logical to invalidate the pages here as in most cases
677 675 * these were created by segspt.
678 676 */
679 677 if (pp->p_szc != 0) {
680 678 if (root == 0) {
681 679 ASSERT(curnpgs == 0);
682 680 root = 1;
683 681 rootpp = pp;
684 682 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
685 683 ASSERT(pgs > 1);
686 684 ASSERT(IS_P2ALIGNED(pgs, pgs));
687 685 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
688 686 curnpgs--;
689 687 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
690 688 ASSERT(curnpgs == 1);
691 689 ASSERT(page_pptonum(pp) ==
692 690 page_pptonum(rootpp) + (pgs - 1));
693 691 page_destroy_pages(rootpp);
694 692 root = 0;
695 693 curnpgs = 0;
696 694 } else {
697 695 ASSERT(curnpgs > 1);
698 696 ASSERT(page_pptonum(pp) ==
699 697 page_pptonum(rootpp) + (pgs - curnpgs));
700 698 curnpgs--;
701 699 }
702 700 } else {
703 701 if (root != 0 || curnpgs != 0) {
704 702 panic("segspt_free_pages: bad large page");
705 703 /*NOTREACHED*/
706 704 }
707 705 /*
708 706 * Before destroying the pages, we need to take care
709 707 * of the rctl locked memory accounting. For that
710 708 * we need to calculte the unlocked_bytes.
711 709 */
712 710 if (pp->p_lckcnt > 0)
713 711 unlocked_bytes += PAGESIZE;
714 712 /*LINTED: constant in conditional context */
715 713 VN_DISPOSE(pp, B_INVAL, 0, kcred);
716 714 }
717 715 }
718 716 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
719 717 if (unlocked_bytes > 0)
720 718 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
721 719 mutex_exit(&sp->shm_mlock);
722 720 }
723 721 if (root != 0 || curnpgs != 0) {
724 722 panic("segspt_free_pages: bad large page");
725 723 /*NOTREACHED*/
726 724 }
727 725
728 726 /*
729 727 * mark that pages have been released
730 728 */
731 729 sptd->spt_realsize = 0;
732 730
733 731 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
734 732 atomic_add_long(&spt_used, -npages);
735 733 anon_swap_restore(npages);
736 734 }
737 735 }
738 736
739 737 /*
740 738 * Get memory allocation policy info for specified address in given segment
741 739 */
742 740 static lgrp_mem_policy_info_t *
743 741 segspt_getpolicy(struct seg *seg, caddr_t addr)
744 742 {
745 743 struct anon_map *amp;
746 744 ulong_t anon_index;
747 745 lgrp_mem_policy_info_t *policy_info;
748 746 struct spt_data *spt_data;
749 747
750 748 ASSERT(seg != NULL);
751 749
752 750 /*
753 751 * Get anon_map from segspt
754 752 *
755 753 * Assume that no lock needs to be held on anon_map, since
756 754 * it should be protected by its reference count which must be
757 755 * nonzero for an existing segment
758 756 * Need to grab readers lock on policy tree though
759 757 */
760 758 spt_data = (struct spt_data *)seg->s_data;
761 759 if (spt_data == NULL)
762 760 return (NULL);
763 761 amp = spt_data->spt_amp;
764 762 ASSERT(amp->refcnt != 0);
765 763
766 764 /*
767 765 * Get policy info
768 766 *
769 767 * Assume starting anon index of 0
770 768 */
771 769 anon_index = seg_page(seg, addr);
772 770 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
773 771
774 772 return (policy_info);
775 773 }
776 774
777 775 /*
778 776 * DISM only.
779 777 * Return locked pages over a given range.
780 778 *
781 779 * We will cache all DISM locked pages and save the pplist for the
782 780 * entire segment in the ppa field of the underlying DISM segment structure.
783 781 * Later, during a call to segspt_reclaim() we will use this ppa array
784 782 * to page_unlock() all of the pages and then we will free this ppa list.
785 783 */
786 784 /*ARGSUSED*/
787 785 static int
788 786 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
789 787 struct page ***ppp, enum lock_type type, enum seg_rw rw)
790 788 {
791 789 struct shm_data *shmd = (struct shm_data *)seg->s_data;
792 790 struct seg *sptseg = shmd->shm_sptseg;
793 791 struct spt_data *sptd = sptseg->s_data;
794 792 pgcnt_t pg_idx, npages, tot_npages, npgs;
795 793 struct page **pplist, **pl, **ppa, *pp;
796 794 struct anon_map *amp;
797 795 spgcnt_t an_idx;
798 796 int ret = ENOTSUP;
799 797 uint_t pl_built = 0;
800 798 struct anon *ap;
801 799 struct vnode *vp;
802 800 u_offset_t off;
803 801 pgcnt_t claim_availrmem = 0;
804 802 uint_t szc;
805 803
806 804 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
807 805 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
808 806
809 807 /*
810 808 * We want to lock/unlock the entire ISM segment. Therefore,
811 809 * we will be using the underlying sptseg and it's base address
812 810 * and length for the caching arguments.
813 811 */
814 812 ASSERT(sptseg);
815 813 ASSERT(sptd);
816 814
817 815 pg_idx = seg_page(seg, addr);
818 816 npages = btopr(len);
819 817
820 818 /*
821 819 * check if the request is larger than number of pages covered
822 820 * by amp
823 821 */
824 822 if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
825 823 *ppp = NULL;
826 824 return (ENOTSUP);
827 825 }
828 826
829 827 if (type == L_PAGEUNLOCK) {
830 828 ASSERT(sptd->spt_ppa != NULL);
831 829
832 830 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
833 831 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
834 832
835 833 /*
836 834 * If someone is blocked while unmapping, we purge
837 835 * segment page cache and thus reclaim pplist synchronously
838 836 * without waiting for seg_pasync_thread. This speeds up
839 837 * unmapping in cases where munmap(2) is called, while
840 838 * raw async i/o is still in progress or where a thread
841 839 * exits on data fault in a multithreaded application.
842 840 */
843 841 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
844 842 (AS_ISUNMAPWAIT(seg->s_as) &&
845 843 shmd->shm_softlockcnt > 0)) {
846 844 segspt_purge(seg);
847 845 }
848 846 return (0);
849 847 }
850 848
851 849 /* The L_PAGELOCK case ... */
852 850
853 851 if (sptd->spt_flags & DISM_PPA_CHANGED) {
854 852 segspt_purge(seg);
855 853 /*
856 854 * for DISM ppa needs to be rebuild since
857 855 * number of locked pages could be changed
858 856 */
859 857 *ppp = NULL;
860 858 return (ENOTSUP);
861 859 }
862 860
863 861 /*
864 862 * First try to find pages in segment page cache, without
865 863 * holding the segment lock.
866 864 */
867 865 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
868 866 S_WRITE, SEGP_FORCE_WIRED);
869 867 if (pplist != NULL) {
870 868 ASSERT(sptd->spt_ppa != NULL);
871 869 ASSERT(sptd->spt_ppa == pplist);
872 870 ppa = sptd->spt_ppa;
873 871 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
874 872 if (ppa[an_idx] == NULL) {
875 873 seg_pinactive(seg, NULL, seg->s_base,
876 874 sptd->spt_amp->size, ppa,
877 875 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
878 876 *ppp = NULL;
879 877 return (ENOTSUP);
880 878 }
881 879 if ((szc = ppa[an_idx]->p_szc) != 0) {
882 880 npgs = page_get_pagecnt(szc);
883 881 an_idx = P2ROUNDUP(an_idx + 1, npgs);
884 882 } else {
885 883 an_idx++;
886 884 }
887 885 }
888 886 /*
889 887 * Since we cache the entire DISM segment, we want to
890 888 * set ppp to point to the first slot that corresponds
891 889 * to the requested addr, i.e. pg_idx.
892 890 */
893 891 *ppp = &(sptd->spt_ppa[pg_idx]);
894 892 return (0);
895 893 }
896 894
897 895 mutex_enter(&sptd->spt_lock);
898 896 /*
899 897 * try to find pages in segment page cache with mutex
900 898 */
901 899 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
902 900 S_WRITE, SEGP_FORCE_WIRED);
903 901 if (pplist != NULL) {
904 902 ASSERT(sptd->spt_ppa != NULL);
905 903 ASSERT(sptd->spt_ppa == pplist);
906 904 ppa = sptd->spt_ppa;
907 905 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
908 906 if (ppa[an_idx] == NULL) {
909 907 mutex_exit(&sptd->spt_lock);
910 908 seg_pinactive(seg, NULL, seg->s_base,
911 909 sptd->spt_amp->size, ppa,
912 910 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
913 911 *ppp = NULL;
914 912 return (ENOTSUP);
915 913 }
916 914 if ((szc = ppa[an_idx]->p_szc) != 0) {
917 915 npgs = page_get_pagecnt(szc);
918 916 an_idx = P2ROUNDUP(an_idx + 1, npgs);
919 917 } else {
920 918 an_idx++;
921 919 }
922 920 }
923 921 /*
924 922 * Since we cache the entire DISM segment, we want to
925 923 * set ppp to point to the first slot that corresponds
926 924 * to the requested addr, i.e. pg_idx.
927 925 */
928 926 mutex_exit(&sptd->spt_lock);
929 927 *ppp = &(sptd->spt_ppa[pg_idx]);
930 928 return (0);
931 929 }
932 930 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
933 931 SEGP_FORCE_WIRED) == SEGP_FAIL) {
934 932 mutex_exit(&sptd->spt_lock);
935 933 *ppp = NULL;
936 934 return (ENOTSUP);
937 935 }
938 936
939 937 /*
940 938 * No need to worry about protections because DISM pages are always rw.
941 939 */
942 940 pl = pplist = NULL;
943 941 amp = sptd->spt_amp;
944 942
945 943 /*
946 944 * Do we need to build the ppa array?
947 945 */
948 946 if (sptd->spt_ppa == NULL) {
949 947 pgcnt_t lpg_cnt = 0;
950 948
951 949 pl_built = 1;
952 950 tot_npages = btopr(sptd->spt_amp->size);
953 951
954 952 ASSERT(sptd->spt_pcachecnt == 0);
955 953 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
956 954 pl = pplist;
957 955
958 956 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
959 957 for (an_idx = 0; an_idx < tot_npages; ) {
960 958 ap = anon_get_ptr(amp->ahp, an_idx);
961 959 /*
962 960 * Cache only mlocked pages. For large pages
963 961 * if one (constituent) page is mlocked
964 962 * all pages for that large page
965 963 * are cached also. This is for quick
966 964 * lookups of ppa array;
967 965 */
968 966 if ((ap != NULL) && (lpg_cnt != 0 ||
969 967 (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
970 968
971 969 swap_xlate(ap, &vp, &off);
972 970 pp = page_lookup(vp, off, SE_SHARED);
973 971 ASSERT(pp != NULL);
974 972 if (lpg_cnt == 0) {
975 973 lpg_cnt++;
976 974 /*
977 975 * For a small page, we are done --
978 976 * lpg_count is reset to 0 below.
979 977 *
980 978 * For a large page, we are guaranteed
981 979 * to find the anon structures of all
982 980 * constituent pages and a non-zero
983 981 * lpg_cnt ensures that we don't test
984 982 * for mlock for these. We are done
985 983 * when lpg_count reaches (npgs + 1).
986 984 * If we are not the first constituent
987 985 * page, restart at the first one.
988 986 */
989 987 npgs = page_get_pagecnt(pp->p_szc);
990 988 if (!IS_P2ALIGNED(an_idx, npgs)) {
991 989 an_idx = P2ALIGN(an_idx, npgs);
992 990 page_unlock(pp);
993 991 continue;
994 992 }
995 993 }
996 994 if (++lpg_cnt > npgs)
997 995 lpg_cnt = 0;
998 996
999 997 /*
1000 998 * availrmem is decremented only
1001 999 * for unlocked pages
1002 1000 */
1003 1001 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1004 1002 claim_availrmem++;
1005 1003 pplist[an_idx] = pp;
1006 1004 }
1007 1005 an_idx++;
1008 1006 }
1009 1007 ANON_LOCK_EXIT(&->a_rwlock);
1010 1008
1011 1009 if (claim_availrmem) {
1012 1010 mutex_enter(&freemem_lock);
1013 1011 if (availrmem < tune.t_minarmem + claim_availrmem) {
1014 1012 mutex_exit(&freemem_lock);
1015 1013 ret = ENOTSUP;
1016 1014 claim_availrmem = 0;
1017 1015 goto insert_fail;
1018 1016 } else {
1019 1017 availrmem -= claim_availrmem;
1020 1018 }
1021 1019 mutex_exit(&freemem_lock);
1022 1020 }
1023 1021
1024 1022 sptd->spt_ppa = pl;
1025 1023 } else {
1026 1024 /*
1027 1025 * We already have a valid ppa[].
1028 1026 */
1029 1027 pl = sptd->spt_ppa;
1030 1028 }
1031 1029
1032 1030 ASSERT(pl != NULL);
1033 1031
1034 1032 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1035 1033 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1036 1034 segspt_reclaim);
1037 1035 if (ret == SEGP_FAIL) {
1038 1036 /*
1039 1037 * seg_pinsert failed. We return
1040 1038 * ENOTSUP, so that the as_pagelock() code will
1041 1039 * then try the slower F_SOFTLOCK path.
1042 1040 */
1043 1041 if (pl_built) {
1044 1042 /*
1045 1043 * No one else has referenced the ppa[].
1046 1044 * We created it and we need to destroy it.
1047 1045 */
1048 1046 sptd->spt_ppa = NULL;
1049 1047 }
1050 1048 ret = ENOTSUP;
1051 1049 goto insert_fail;
1052 1050 }
1053 1051
1054 1052 /*
1055 1053 * In either case, we increment softlockcnt on the 'real' segment.
1056 1054 */
1057 1055 sptd->spt_pcachecnt++;
1058 1056 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1059 1057
1060 1058 ppa = sptd->spt_ppa;
1061 1059 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1062 1060 if (ppa[an_idx] == NULL) {
1063 1061 mutex_exit(&sptd->spt_lock);
1064 1062 seg_pinactive(seg, NULL, seg->s_base,
1065 1063 sptd->spt_amp->size,
1066 1064 pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1067 1065 *ppp = NULL;
1068 1066 return (ENOTSUP);
1069 1067 }
1070 1068 if ((szc = ppa[an_idx]->p_szc) != 0) {
1071 1069 npgs = page_get_pagecnt(szc);
1072 1070 an_idx = P2ROUNDUP(an_idx + 1, npgs);
1073 1071 } else {
1074 1072 an_idx++;
1075 1073 }
1076 1074 }
1077 1075 /*
1078 1076 * We can now drop the sptd->spt_lock since the ppa[]
1079 1077 * exists and he have incremented pacachecnt.
1080 1078 */
1081 1079 mutex_exit(&sptd->spt_lock);
1082 1080
1083 1081 /*
1084 1082 * Since we cache the entire segment, we want to
1085 1083 * set ppp to point to the first slot that corresponds
1086 1084 * to the requested addr, i.e. pg_idx.
1087 1085 */
1088 1086 *ppp = &(sptd->spt_ppa[pg_idx]);
1089 1087 return (0);
1090 1088
1091 1089 insert_fail:
1092 1090 /*
1093 1091 * We will only reach this code if we tried and failed.
1094 1092 *
1095 1093 * And we can drop the lock on the dummy seg, once we've failed
1096 1094 * to set up a new ppa[].
1097 1095 */
1098 1096 mutex_exit(&sptd->spt_lock);
1099 1097
1100 1098 if (pl_built) {
1101 1099 if (claim_availrmem) {
1102 1100 mutex_enter(&freemem_lock);
1103 1101 availrmem += claim_availrmem;
1104 1102 mutex_exit(&freemem_lock);
1105 1103 }
1106 1104
1107 1105 /*
1108 1106 * We created pl and we need to destroy it.
1109 1107 */
1110 1108 pplist = pl;
1111 1109 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1112 1110 if (pplist[an_idx] != NULL)
1113 1111 page_unlock(pplist[an_idx]);
1114 1112 }
1115 1113 kmem_free(pl, sizeof (page_t *) * tot_npages);
1116 1114 }
1117 1115
1118 1116 if (shmd->shm_softlockcnt <= 0) {
1119 1117 if (AS_ISUNMAPWAIT(seg->s_as)) {
1120 1118 mutex_enter(&seg->s_as->a_contents);
1121 1119 if (AS_ISUNMAPWAIT(seg->s_as)) {
1122 1120 AS_CLRUNMAPWAIT(seg->s_as);
1123 1121 cv_broadcast(&seg->s_as->a_cv);
1124 1122 }
1125 1123 mutex_exit(&seg->s_as->a_contents);
1126 1124 }
1127 1125 }
1128 1126 *ppp = NULL;
1129 1127 return (ret);
1130 1128 }
1131 1129
1132 1130
1133 1131
1134 1132 /*
1135 1133 * return locked pages over a given range.
1136 1134 *
1137 1135 * We will cache the entire ISM segment and save the pplist for the
1138 1136 * entire segment in the ppa field of the underlying ISM segment structure.
1139 1137 * Later, during a call to segspt_reclaim() we will use this ppa array
1140 1138 * to page_unlock() all of the pages and then we will free this ppa list.
1141 1139 */
1142 1140 /*ARGSUSED*/
1143 1141 static int
1144 1142 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1145 1143 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1146 1144 {
1147 1145 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1148 1146 struct seg *sptseg = shmd->shm_sptseg;
1149 1147 struct spt_data *sptd = sptseg->s_data;
1150 1148 pgcnt_t np, page_index, npages;
1151 1149 caddr_t a, spt_base;
1152 1150 struct page **pplist, **pl, *pp;
1153 1151 struct anon_map *amp;
1154 1152 ulong_t anon_index;
1155 1153 int ret = ENOTSUP;
1156 1154 uint_t pl_built = 0;
1157 1155 struct anon *ap;
1158 1156 struct vnode *vp;
1159 1157 u_offset_t off;
1160 1158
1161 1159 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1162 1160 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1163 1161
1164 1162
1165 1163 /*
1166 1164 * We want to lock/unlock the entire ISM segment. Therefore,
1167 1165 * we will be using the underlying sptseg and it's base address
1168 1166 * and length for the caching arguments.
1169 1167 */
1170 1168 ASSERT(sptseg);
1171 1169 ASSERT(sptd);
1172 1170
1173 1171 if (sptd->spt_flags & SHM_PAGEABLE) {
1174 1172 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1175 1173 }
1176 1174
1177 1175 page_index = seg_page(seg, addr);
1178 1176 npages = btopr(len);
1179 1177
1180 1178 /*
1181 1179 * check if the request is larger than number of pages covered
1182 1180 * by amp
1183 1181 */
1184 1182 if (page_index + npages > btopr(sptd->spt_amp->size)) {
1185 1183 *ppp = NULL;
1186 1184 return (ENOTSUP);
1187 1185 }
1188 1186
1189 1187 if (type == L_PAGEUNLOCK) {
1190 1188
1191 1189 ASSERT(sptd->spt_ppa != NULL);
1192 1190
1193 1191 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1194 1192 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1195 1193
1196 1194 /*
1197 1195 * If someone is blocked while unmapping, we purge
1198 1196 * segment page cache and thus reclaim pplist synchronously
1199 1197 * without waiting for seg_pasync_thread. This speeds up
1200 1198 * unmapping in cases where munmap(2) is called, while
1201 1199 * raw async i/o is still in progress or where a thread
1202 1200 * exits on data fault in a multithreaded application.
1203 1201 */
1204 1202 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1205 1203 segspt_purge(seg);
1206 1204 }
1207 1205 return (0);
1208 1206 }
1209 1207
1210 1208 /* The L_PAGELOCK case... */
1211 1209
1212 1210 /*
1213 1211 * First try to find pages in segment page cache, without
1214 1212 * holding the segment lock.
1215 1213 */
1216 1214 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1217 1215 S_WRITE, SEGP_FORCE_WIRED);
1218 1216 if (pplist != NULL) {
1219 1217 ASSERT(sptd->spt_ppa == pplist);
1220 1218 ASSERT(sptd->spt_ppa[page_index]);
1221 1219 /*
1222 1220 * Since we cache the entire ISM segment, we want to
1223 1221 * set ppp to point to the first slot that corresponds
1224 1222 * to the requested addr, i.e. page_index.
1225 1223 */
1226 1224 *ppp = &(sptd->spt_ppa[page_index]);
1227 1225 return (0);
1228 1226 }
1229 1227
1230 1228 mutex_enter(&sptd->spt_lock);
1231 1229
1232 1230 /*
1233 1231 * try to find pages in segment page cache
1234 1232 */
1235 1233 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1236 1234 S_WRITE, SEGP_FORCE_WIRED);
1237 1235 if (pplist != NULL) {
1238 1236 ASSERT(sptd->spt_ppa == pplist);
1239 1237 /*
1240 1238 * Since we cache the entire segment, we want to
1241 1239 * set ppp to point to the first slot that corresponds
1242 1240 * to the requested addr, i.e. page_index.
1243 1241 */
1244 1242 mutex_exit(&sptd->spt_lock);
1245 1243 *ppp = &(sptd->spt_ppa[page_index]);
1246 1244 return (0);
1247 1245 }
1248 1246
1249 1247 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1250 1248 SEGP_FORCE_WIRED) == SEGP_FAIL) {
1251 1249 mutex_exit(&sptd->spt_lock);
1252 1250 *ppp = NULL;
1253 1251 return (ENOTSUP);
1254 1252 }
1255 1253
1256 1254 /*
1257 1255 * No need to worry about protections because ISM pages
1258 1256 * are always rw.
1259 1257 */
1260 1258 pl = pplist = NULL;
1261 1259
1262 1260 /*
1263 1261 * Do we need to build the ppa array?
1264 1262 */
1265 1263 if (sptd->spt_ppa == NULL) {
1266 1264 ASSERT(sptd->spt_ppa == pplist);
1267 1265
1268 1266 spt_base = sptseg->s_base;
1269 1267 pl_built = 1;
1270 1268
1271 1269 /*
1272 1270 * availrmem is decremented once during anon_swap_adjust()
1273 1271 * and is incremented during the anon_unresv(), which is
1274 1272 * called from shm_rm_amp() when the segment is destroyed.
1275 1273 */
1276 1274 amp = sptd->spt_amp;
1277 1275 ASSERT(amp != NULL);
1278 1276
1279 1277 /* pcachecnt is protected by sptd->spt_lock */
1280 1278 ASSERT(sptd->spt_pcachecnt == 0);
1281 1279 pplist = kmem_zalloc(sizeof (page_t *)
1282 1280 * btopr(sptd->spt_amp->size), KM_SLEEP);
1283 1281 pl = pplist;
1284 1282
1285 1283 anon_index = seg_page(sptseg, spt_base);
1286 1284
1287 1285 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1288 1286 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1289 1287 a += PAGESIZE, anon_index++, pplist++) {
1290 1288 ap = anon_get_ptr(amp->ahp, anon_index);
1291 1289 ASSERT(ap != NULL);
1292 1290 swap_xlate(ap, &vp, &off);
1293 1291 pp = page_lookup(vp, off, SE_SHARED);
1294 1292 ASSERT(pp != NULL);
1295 1293 *pplist = pp;
1296 1294 }
1297 1295 ANON_LOCK_EXIT(&->a_rwlock);
1298 1296
1299 1297 if (a < (spt_base + sptd->spt_amp->size)) {
1300 1298 ret = ENOTSUP;
1301 1299 goto insert_fail;
1302 1300 }
1303 1301 sptd->spt_ppa = pl;
1304 1302 } else {
1305 1303 /*
1306 1304 * We already have a valid ppa[].
1307 1305 */
1308 1306 pl = sptd->spt_ppa;
1309 1307 }
1310 1308
1311 1309 ASSERT(pl != NULL);
1312 1310
1313 1311 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1314 1312 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1315 1313 segspt_reclaim);
1316 1314 if (ret == SEGP_FAIL) {
1317 1315 /*
1318 1316 * seg_pinsert failed. We return
1319 1317 * ENOTSUP, so that the as_pagelock() code will
1320 1318 * then try the slower F_SOFTLOCK path.
1321 1319 */
1322 1320 if (pl_built) {
1323 1321 /*
1324 1322 * No one else has referenced the ppa[].
1325 1323 * We created it and we need to destroy it.
1326 1324 */
1327 1325 sptd->spt_ppa = NULL;
1328 1326 }
1329 1327 ret = ENOTSUP;
1330 1328 goto insert_fail;
1331 1329 }
1332 1330
1333 1331 /*
1334 1332 * In either case, we increment softlockcnt on the 'real' segment.
1335 1333 */
1336 1334 sptd->spt_pcachecnt++;
1337 1335 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1338 1336
1339 1337 /*
1340 1338 * We can now drop the sptd->spt_lock since the ppa[]
1341 1339 * exists and he have incremented pacachecnt.
1342 1340 */
1343 1341 mutex_exit(&sptd->spt_lock);
1344 1342
1345 1343 /*
1346 1344 * Since we cache the entire segment, we want to
1347 1345 * set ppp to point to the first slot that corresponds
1348 1346 * to the requested addr, i.e. page_index.
1349 1347 */
1350 1348 *ppp = &(sptd->spt_ppa[page_index]);
1351 1349 return (0);
1352 1350
1353 1351 insert_fail:
1354 1352 /*
1355 1353 * We will only reach this code if we tried and failed.
1356 1354 *
1357 1355 * And we can drop the lock on the dummy seg, once we've failed
1358 1356 * to set up a new ppa[].
1359 1357 */
1360 1358 mutex_exit(&sptd->spt_lock);
1361 1359
1362 1360 if (pl_built) {
1363 1361 /*
1364 1362 * We created pl and we need to destroy it.
1365 1363 */
1366 1364 pplist = pl;
1367 1365 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1368 1366 while (np) {
1369 1367 page_unlock(*pplist);
1370 1368 np--;
1371 1369 pplist++;
1372 1370 }
1373 1371 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1374 1372 }
1375 1373 if (shmd->shm_softlockcnt <= 0) {
1376 1374 if (AS_ISUNMAPWAIT(seg->s_as)) {
1377 1375 mutex_enter(&seg->s_as->a_contents);
1378 1376 if (AS_ISUNMAPWAIT(seg->s_as)) {
1379 1377 AS_CLRUNMAPWAIT(seg->s_as);
1380 1378 cv_broadcast(&seg->s_as->a_cv);
1381 1379 }
1382 1380 mutex_exit(&seg->s_as->a_contents);
1383 1381 }
1384 1382 }
1385 1383 *ppp = NULL;
1386 1384 return (ret);
1387 1385 }
1388 1386
1389 1387 /*
1390 1388 * purge any cached pages in the I/O page cache
1391 1389 */
1392 1390 static void
1393 1391 segspt_purge(struct seg *seg)
1394 1392 {
1395 1393 seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1396 1394 }
1397 1395
1398 1396 static int
1399 1397 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1400 1398 enum seg_rw rw, int async)
1401 1399 {
1402 1400 struct seg *seg = (struct seg *)ptag;
1403 1401 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1404 1402 struct seg *sptseg;
1405 1403 struct spt_data *sptd;
1406 1404 pgcnt_t npages, i, free_availrmem = 0;
1407 1405 int done = 0;
1408 1406
1409 1407 #ifdef lint
1410 1408 addr = addr;
1411 1409 #endif
1412 1410 sptseg = shmd->shm_sptseg;
1413 1411 sptd = sptseg->s_data;
1414 1412 npages = (len >> PAGESHIFT);
1415 1413 ASSERT(npages);
1416 1414 ASSERT(sptd->spt_pcachecnt != 0);
1417 1415 ASSERT(sptd->spt_ppa == pplist);
1418 1416 ASSERT(npages == btopr(sptd->spt_amp->size));
1419 1417 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1420 1418
1421 1419 /*
1422 1420 * Acquire the lock on the dummy seg and destroy the
1423 1421 * ppa array IF this is the last pcachecnt.
1424 1422 */
1425 1423 mutex_enter(&sptd->spt_lock);
1426 1424 if (--sptd->spt_pcachecnt == 0) {
1427 1425 for (i = 0; i < npages; i++) {
1428 1426 if (pplist[i] == NULL) {
1429 1427 continue;
1430 1428 }
1431 1429 if (rw == S_WRITE) {
1432 1430 hat_setrefmod(pplist[i]);
1433 1431 } else {
1434 1432 hat_setref(pplist[i]);
1435 1433 }
1436 1434 if ((sptd->spt_flags & SHM_PAGEABLE) &&
1437 1435 (sptd->spt_ppa_lckcnt[i] == 0))
1438 1436 free_availrmem++;
1439 1437 page_unlock(pplist[i]);
1440 1438 }
1441 1439 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1442 1440 mutex_enter(&freemem_lock);
1443 1441 availrmem += free_availrmem;
1444 1442 mutex_exit(&freemem_lock);
1445 1443 }
1446 1444 /*
1447 1445 * Since we want to cach/uncache the entire ISM segment,
1448 1446 * we will track the pplist in a segspt specific field
1449 1447 * ppa, that is initialized at the time we add an entry to
1450 1448 * the cache.
1451 1449 */
1452 1450 ASSERT(sptd->spt_pcachecnt == 0);
1453 1451 kmem_free(pplist, sizeof (page_t *) * npages);
1454 1452 sptd->spt_ppa = NULL;
1455 1453 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1456 1454 sptd->spt_gen++;
1457 1455 cv_broadcast(&sptd->spt_cv);
1458 1456 done = 1;
1459 1457 }
1460 1458 mutex_exit(&sptd->spt_lock);
1461 1459
1462 1460 /*
1463 1461 * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1464 1462 * may not hold AS lock (in this case async argument is not 0). This
1465 1463 * means if softlockcnt drops to 0 after the decrement below address
1466 1464 * space may get freed. We can't allow it since after softlock
1467 1465 * derement to 0 we still need to access as structure for possible
1468 1466 * wakeup of unmap waiters. To prevent the disappearance of as we take
1469 1467 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1470 1468 * this mutex as a barrier to make sure this routine completes before
1471 1469 * segment is freed.
1472 1470 *
1473 1471 * The second complication we have to deal with in async case is a
1474 1472 * possibility of missed wake up of unmap wait thread. When we don't
1475 1473 * hold as lock here we may take a_contents lock before unmap wait
1476 1474 * thread that was first to see softlockcnt was still not 0. As a
1477 1475 * result we'll fail to wake up an unmap wait thread. To avoid this
1478 1476 * race we set nounmapwait flag in as structure if we drop softlockcnt
1479 1477 * to 0 if async is not 0. unmapwait thread
1480 1478 * will not block if this flag is set.
1481 1479 */
1482 1480 if (async)
1483 1481 mutex_enter(&shmd->shm_segfree_syncmtx);
1484 1482
1485 1483 /*
1486 1484 * Now decrement softlockcnt.
1487 1485 */
1488 1486 ASSERT(shmd->shm_softlockcnt > 0);
1489 1487 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1490 1488
1491 1489 if (shmd->shm_softlockcnt <= 0) {
1492 1490 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1493 1491 mutex_enter(&seg->s_as->a_contents);
1494 1492 if (async)
1495 1493 AS_SETNOUNMAPWAIT(seg->s_as);
1496 1494 if (AS_ISUNMAPWAIT(seg->s_as)) {
1497 1495 AS_CLRUNMAPWAIT(seg->s_as);
1498 1496 cv_broadcast(&seg->s_as->a_cv);
1499 1497 }
1500 1498 mutex_exit(&seg->s_as->a_contents);
1501 1499 }
1502 1500 }
1503 1501
1504 1502 if (async)
1505 1503 mutex_exit(&shmd->shm_segfree_syncmtx);
1506 1504
1507 1505 return (done);
1508 1506 }
1509 1507
1510 1508 /*
1511 1509 * Do a F_SOFTUNLOCK call over the range requested.
1512 1510 * The range must have already been F_SOFTLOCK'ed.
1513 1511 *
1514 1512 * The calls to acquire and release the anon map lock mutex were
1515 1513 * removed in order to avoid a deadly embrace during a DR
1516 1514 * memory delete operation. (Eg. DR blocks while waiting for a
1517 1515 * exclusive lock on a page that is being used for kaio; the
1518 1516 * thread that will complete the kaio and call segspt_softunlock
1519 1517 * blocks on the anon map lock; another thread holding the anon
1520 1518 * map lock blocks on another page lock via the segspt_shmfault
1521 1519 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1522 1520 *
1523 1521 * The appropriateness of the removal is based upon the following:
1524 1522 * 1. If we are holding a segment's reader lock and the page is held
1525 1523 * shared, then the corresponding element in anonmap which points to
1526 1524 * anon struct cannot change and there is no need to acquire the
1527 1525 * anonymous map lock.
1528 1526 * 2. Threads in segspt_softunlock have a reader lock on the segment
1529 1527 * and already have the shared page lock, so we are guaranteed that
1530 1528 * the anon map slot cannot change and therefore can call anon_get_ptr()
1531 1529 * without grabbing the anonymous map lock.
1532 1530 * 3. Threads that softlock a shared page break copy-on-write, even if
1533 1531 * its a read. Thus cow faults can be ignored with respect to soft
1534 1532 * unlocking, since the breaking of cow means that the anon slot(s) will
1535 1533 * not be shared.
1536 1534 */
1537 1535 static void
1538 1536 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1539 1537 size_t len, enum seg_rw rw)
1540 1538 {
1541 1539 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1542 1540 struct seg *sptseg;
1543 1541 struct spt_data *sptd;
1544 1542 page_t *pp;
1545 1543 caddr_t adr;
1546 1544 struct vnode *vp;
1547 1545 u_offset_t offset;
1548 1546 ulong_t anon_index;
1549 1547 struct anon_map *amp; /* XXX - for locknest */
1550 1548 struct anon *ap = NULL;
1551 1549 pgcnt_t npages;
1552 1550
1553 1551 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1554 1552
1555 1553 sptseg = shmd->shm_sptseg;
1556 1554 sptd = sptseg->s_data;
1557 1555
1558 1556 /*
1559 1557 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1560 1558 * and therefore their pages are SE_SHARED locked
1561 1559 * for the entire life of the segment.
1562 1560 */
1563 1561 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1564 1562 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1565 1563 goto softlock_decrement;
1566 1564 }
1567 1565
1568 1566 /*
1569 1567 * Any thread is free to do a page_find and
1570 1568 * page_unlock() on the pages within this seg.
1571 1569 *
1572 1570 * We are already holding the as->a_lock on the user's
1573 1571 * real segment, but we need to hold the a_lock on the
1574 1572 * underlying dummy as. This is mostly to satisfy the
1575 1573 * underlying HAT layer.
1576 1574 */
1577 1575 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1578 1576 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1579 1577 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1580 1578
1581 1579 amp = sptd->spt_amp;
1582 1580 ASSERT(amp != NULL);
1583 1581 anon_index = seg_page(sptseg, sptseg_addr);
1584 1582
1585 1583 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1586 1584 ap = anon_get_ptr(amp->ahp, anon_index++);
1587 1585 ASSERT(ap != NULL);
1588 1586 swap_xlate(ap, &vp, &offset);
1589 1587
1590 1588 /*
1591 1589 * Use page_find() instead of page_lookup() to
1592 1590 * find the page since we know that it has a
1593 1591 * "shared" lock.
1594 1592 */
1595 1593 pp = page_find(vp, offset);
1596 1594 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1597 1595 if (pp == NULL) {
1598 1596 panic("segspt_softunlock: "
1599 1597 "addr %p, ap %p, vp %p, off %llx",
1600 1598 (void *)adr, (void *)ap, (void *)vp, offset);
1601 1599 /*NOTREACHED*/
1602 1600 }
1603 1601
1604 1602 if (rw == S_WRITE) {
1605 1603 hat_setrefmod(pp);
1606 1604 } else if (rw != S_OTHER) {
1607 1605 hat_setref(pp);
1608 1606 }
1609 1607 page_unlock(pp);
1610 1608 }
1611 1609
1612 1610 softlock_decrement:
1613 1611 npages = btopr(len);
1614 1612 ASSERT(shmd->shm_softlockcnt >= npages);
1615 1613 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1616 1614 if (shmd->shm_softlockcnt == 0) {
1617 1615 /*
1618 1616 * All SOFTLOCKS are gone. Wakeup any waiting
1619 1617 * unmappers so they can try again to unmap.
1620 1618 * Check for waiters first without the mutex
1621 1619 * held so we don't always grab the mutex on
1622 1620 * softunlocks.
1623 1621 */
1624 1622 if (AS_ISUNMAPWAIT(seg->s_as)) {
1625 1623 mutex_enter(&seg->s_as->a_contents);
1626 1624 if (AS_ISUNMAPWAIT(seg->s_as)) {
1627 1625 AS_CLRUNMAPWAIT(seg->s_as);
1628 1626 cv_broadcast(&seg->s_as->a_cv);
1629 1627 }
1630 1628 mutex_exit(&seg->s_as->a_contents);
1631 1629 }
1632 1630 }
1633 1631 }
1634 1632
1635 1633 int
1636 1634 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1637 1635 {
1638 1636 struct shm_data *shmd_arg = (struct shm_data *)argsp;
1639 1637 struct shm_data *shmd;
1640 1638 struct anon_map *shm_amp = shmd_arg->shm_amp;
1641 1639 struct spt_data *sptd;
1642 1640 int error = 0;
1643 1641
1644 1642 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1645 1643
1646 1644 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1647 1645 if (shmd == NULL)
1648 1646 return (ENOMEM);
1649 1647
1650 1648 shmd->shm_sptas = shmd_arg->shm_sptas;
1651 1649 shmd->shm_amp = shm_amp;
1652 1650 shmd->shm_sptseg = shmd_arg->shm_sptseg;
1653 1651
1654 1652 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1655 1653 NULL, 0, seg->s_size);
1656 1654
1657 1655 mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1658 1656
1659 1657 seg->s_data = (void *)shmd;
1660 1658 seg->s_ops = &segspt_shmops;
1661 1659 seg->s_szc = shmd->shm_sptseg->s_szc;
1662 1660 sptd = shmd->shm_sptseg->s_data;
1663 1661
1664 1662 if (sptd->spt_flags & SHM_PAGEABLE) {
1665 1663 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1666 1664 KM_NOSLEEP)) == NULL) {
1667 1665 seg->s_data = (void *)NULL;
1668 1666 kmem_free(shmd, (sizeof (*shmd)));
1669 1667 return (ENOMEM);
1670 1668 }
1671 1669 shmd->shm_lckpgs = 0;
1672 1670 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1673 1671 if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1674 1672 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1675 1673 seg->s_size, seg->s_szc)) != 0) {
1676 1674 kmem_free(shmd->shm_vpage,
1677 1675 btopr(shm_amp->size));
1678 1676 }
1679 1677 }
1680 1678 } else {
1681 1679 error = hat_share(seg->s_as->a_hat, seg->s_base,
1682 1680 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1683 1681 seg->s_size, seg->s_szc);
1684 1682 }
1685 1683 if (error) {
1686 1684 seg->s_szc = 0;
1687 1685 seg->s_data = (void *)NULL;
1688 1686 kmem_free(shmd, (sizeof (*shmd)));
1689 1687 } else {
1690 1688 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1691 1689 shm_amp->refcnt++;
1692 1690 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1693 1691 }
1694 1692 return (error);
1695 1693 }
1696 1694
1697 1695 int
1698 1696 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1699 1697 {
1700 1698 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1701 1699 int reclaim = 1;
1702 1700
1703 1701 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1704 1702 retry:
1705 1703 if (shmd->shm_softlockcnt > 0) {
1706 1704 if (reclaim == 1) {
1707 1705 segspt_purge(seg);
1708 1706 reclaim = 0;
1709 1707 goto retry;
1710 1708 }
1711 1709 return (EAGAIN);
1712 1710 }
1713 1711
1714 1712 if (ssize != seg->s_size) {
1715 1713 #ifdef DEBUG
1716 1714 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1717 1715 ssize, seg->s_size);
1718 1716 #endif
1719 1717 return (EINVAL);
1720 1718 }
1721 1719
1722 1720 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1723 1721 NULL, 0);
1724 1722 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1725 1723
1726 1724 seg_free(seg);
1727 1725
1728 1726 return (0);
1729 1727 }
1730 1728
1731 1729 void
1732 1730 segspt_shmfree(struct seg *seg)
1733 1731 {
1734 1732 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1735 1733 struct anon_map *shm_amp = shmd->shm_amp;
1736 1734
1737 1735 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1738 1736
1739 1737 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1740 1738 MC_UNLOCK, NULL, 0);
1741 1739
1742 1740 /*
1743 1741 * Need to increment refcnt when attaching
1744 1742 * and decrement when detaching because of dup().
1745 1743 */
1746 1744 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1747 1745 shm_amp->refcnt--;
1748 1746 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1749 1747
1750 1748 if (shmd->shm_vpage) { /* only for DISM */
1751 1749 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1752 1750 shmd->shm_vpage = NULL;
1753 1751 }
1754 1752
1755 1753 /*
1756 1754 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1757 1755 * still working with this segment without holding as lock.
1758 1756 */
1759 1757 ASSERT(shmd->shm_softlockcnt == 0);
1760 1758 mutex_enter(&shmd->shm_segfree_syncmtx);
1761 1759 mutex_destroy(&shmd->shm_segfree_syncmtx);
1762 1760
1763 1761 kmem_free(shmd, sizeof (*shmd));
1764 1762 }
1765 1763
1766 1764 /*ARGSUSED*/
1767 1765 int
1768 1766 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1769 1767 {
1770 1768 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1771 1769
1772 1770 /*
1773 1771 * Shared page table is more than shared mapping.
1774 1772 * Individual process sharing page tables can't change prot
1775 1773 * because there is only one set of page tables.
1776 1774 * This will be allowed after private page table is
1777 1775 * supported.
1778 1776 */
1779 1777 /* need to return correct status error? */
1780 1778 return (0);
1781 1779 }
1782 1780
1783 1781
1784 1782 faultcode_t
1785 1783 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1786 1784 size_t len, enum fault_type type, enum seg_rw rw)
1787 1785 {
1788 1786 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1789 1787 struct seg *sptseg = shmd->shm_sptseg;
1790 1788 struct as *curspt = shmd->shm_sptas;
1791 1789 struct spt_data *sptd = sptseg->s_data;
1792 1790 pgcnt_t npages;
1793 1791 size_t size;
1794 1792 caddr_t segspt_addr, shm_addr;
1795 1793 page_t **ppa;
1796 1794 int i;
1797 1795 ulong_t an_idx = 0;
1798 1796 int err = 0;
1799 1797 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1800 1798 size_t pgsz;
1801 1799 pgcnt_t pgcnt;
1802 1800 caddr_t a;
1803 1801 pgcnt_t pidx;
1804 1802
1805 1803 #ifdef lint
1806 1804 hat = hat;
1807 1805 #endif
1808 1806 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1809 1807
1810 1808 /*
1811 1809 * Because of the way spt is implemented
1812 1810 * the realsize of the segment does not have to be
1813 1811 * equal to the segment size itself. The segment size is
1814 1812 * often in multiples of a page size larger than PAGESIZE.
1815 1813 * The realsize is rounded up to the nearest PAGESIZE
1816 1814 * based on what the user requested. This is a bit of
1817 1815 * ungliness that is historical but not easily fixed
1818 1816 * without re-designing the higher levels of ISM.
1819 1817 */
1820 1818 ASSERT(addr >= seg->s_base);
1821 1819 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1822 1820 return (FC_NOMAP);
1823 1821 /*
1824 1822 * For all of the following cases except F_PROT, we need to
1825 1823 * make any necessary adjustments to addr and len
1826 1824 * and get all of the necessary page_t's into an array called ppa[].
1827 1825 *
1828 1826 * The code in shmat() forces base addr and len of ISM segment
1829 1827 * to be aligned to largest page size supported. Therefore,
1830 1828 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1831 1829 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1832 1830 * in large pagesize chunks, or else we will screw up the HAT
1833 1831 * layer by calling hat_memload_array() with differing page sizes
1834 1832 * over a given virtual range.
1835 1833 */
1836 1834 pgsz = page_get_pagesize(sptseg->s_szc);
1837 1835 pgcnt = page_get_pagecnt(sptseg->s_szc);
1838 1836 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1839 1837 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1840 1838 npages = btopr(size);
1841 1839
1842 1840 /*
1843 1841 * Now we need to convert from addr in segshm to addr in segspt.
1844 1842 */
1845 1843 an_idx = seg_page(seg, shm_addr);
1846 1844 segspt_addr = sptseg->s_base + ptob(an_idx);
1847 1845
1848 1846 ASSERT((segspt_addr + ptob(npages)) <=
1849 1847 (sptseg->s_base + sptd->spt_realsize));
1850 1848 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1851 1849
1852 1850 switch (type) {
1853 1851
1854 1852 case F_SOFTLOCK:
1855 1853
1856 1854 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1857 1855 /*
1858 1856 * Fall through to the F_INVAL case to load up the hat layer
1859 1857 * entries with the HAT_LOAD_LOCK flag.
1860 1858 */
1861 1859 /* FALLTHRU */
1862 1860 case F_INVAL:
1863 1861
1864 1862 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1865 1863 return (FC_NOMAP);
1866 1864
1867 1865 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1868 1866
1869 1867 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1870 1868 if (err != 0) {
1871 1869 if (type == F_SOFTLOCK) {
1872 1870 atomic_add_long((ulong_t *)(
1873 1871 &(shmd->shm_softlockcnt)), -npages);
1874 1872 }
1875 1873 goto dism_err;
1876 1874 }
1877 1875 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1878 1876 a = segspt_addr;
1879 1877 pidx = 0;
1880 1878 if (type == F_SOFTLOCK) {
1881 1879
1882 1880 /*
1883 1881 * Load up the translation keeping it
1884 1882 * locked and don't unlock the page.
1885 1883 */
1886 1884 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1887 1885 hat_memload_array(sptseg->s_as->a_hat,
1888 1886 a, pgsz, &ppa[pidx], sptd->spt_prot,
1889 1887 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1890 1888 }
1891 1889 } else {
1892 1890 /*
1893 1891 * Migrate pages marked for migration
1894 1892 */
1895 1893 if (lgrp_optimizations())
1896 1894 page_migrate(seg, shm_addr, ppa, npages);
1897 1895
1898 1896 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1899 1897 hat_memload_array(sptseg->s_as->a_hat,
1900 1898 a, pgsz, &ppa[pidx],
1901 1899 sptd->spt_prot,
1902 1900 HAT_LOAD_SHARE);
1903 1901 }
1904 1902
1905 1903 /*
1906 1904 * And now drop the SE_SHARED lock(s).
1907 1905 */
1908 1906 if (dyn_ism_unmap) {
1909 1907 for (i = 0; i < npages; i++) {
1910 1908 page_unlock(ppa[i]);
1911 1909 }
1912 1910 }
1913 1911 }
1914 1912
1915 1913 if (!dyn_ism_unmap) {
1916 1914 if (hat_share(seg->s_as->a_hat, shm_addr,
1917 1915 curspt->a_hat, segspt_addr, ptob(npages),
1918 1916 seg->s_szc) != 0) {
1919 1917 panic("hat_share err in DISM fault");
1920 1918 /* NOTREACHED */
1921 1919 }
1922 1920 if (type == F_INVAL) {
1923 1921 for (i = 0; i < npages; i++) {
1924 1922 page_unlock(ppa[i]);
1925 1923 }
1926 1924 }
1927 1925 }
1928 1926 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1929 1927 dism_err:
1930 1928 kmem_free(ppa, npages * sizeof (page_t *));
1931 1929 return (err);
1932 1930
1933 1931 case F_SOFTUNLOCK:
1934 1932
1935 1933 /*
1936 1934 * This is a bit ugly, we pass in the real seg pointer,
1937 1935 * but the segspt_addr is the virtual address within the
1938 1936 * dummy seg.
1939 1937 */
1940 1938 segspt_softunlock(seg, segspt_addr, size, rw);
1941 1939 return (0);
1942 1940
1943 1941 case F_PROT:
1944 1942
1945 1943 /*
1946 1944 * This takes care of the unusual case where a user
1947 1945 * allocates a stack in shared memory and a register
1948 1946 * window overflow is written to that stack page before
1949 1947 * it is otherwise modified.
1950 1948 *
1951 1949 * We can get away with this because ISM segments are
1952 1950 * always rw. Other than this unusual case, there
1953 1951 * should be no instances of protection violations.
1954 1952 */
1955 1953 return (0);
1956 1954
1957 1955 default:
1958 1956 #ifdef DEBUG
1959 1957 panic("segspt_dismfault default type?");
1960 1958 #else
1961 1959 return (FC_NOMAP);
1962 1960 #endif
1963 1961 }
1964 1962 }
1965 1963
1966 1964
1967 1965 faultcode_t
1968 1966 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
1969 1967 size_t len, enum fault_type type, enum seg_rw rw)
1970 1968 {
1971 1969 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1972 1970 struct seg *sptseg = shmd->shm_sptseg;
1973 1971 struct as *curspt = shmd->shm_sptas;
1974 1972 struct spt_data *sptd = sptseg->s_data;
1975 1973 pgcnt_t npages;
1976 1974 size_t size;
1977 1975 caddr_t sptseg_addr, shm_addr;
1978 1976 page_t *pp, **ppa;
1979 1977 int i;
1980 1978 u_offset_t offset;
1981 1979 ulong_t anon_index = 0;
1982 1980 struct vnode *vp;
1983 1981 struct anon_map *amp; /* XXX - for locknest */
1984 1982 struct anon *ap = NULL;
1985 1983 size_t pgsz;
1986 1984 pgcnt_t pgcnt;
1987 1985 caddr_t a;
1988 1986 pgcnt_t pidx;
1989 1987 size_t sz;
1990 1988
1991 1989 #ifdef lint
1992 1990 hat = hat;
1993 1991 #endif
1994 1992
1995 1993 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1996 1994
1997 1995 if (sptd->spt_flags & SHM_PAGEABLE) {
1998 1996 return (segspt_dismfault(hat, seg, addr, len, type, rw));
1999 1997 }
2000 1998
2001 1999 /*
2002 2000 * Because of the way spt is implemented
2003 2001 * the realsize of the segment does not have to be
2004 2002 * equal to the segment size itself. The segment size is
2005 2003 * often in multiples of a page size larger than PAGESIZE.
2006 2004 * The realsize is rounded up to the nearest PAGESIZE
2007 2005 * based on what the user requested. This is a bit of
2008 2006 * ungliness that is historical but not easily fixed
2009 2007 * without re-designing the higher levels of ISM.
2010 2008 */
2011 2009 ASSERT(addr >= seg->s_base);
2012 2010 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2013 2011 return (FC_NOMAP);
2014 2012 /*
2015 2013 * For all of the following cases except F_PROT, we need to
2016 2014 * make any necessary adjustments to addr and len
2017 2015 * and get all of the necessary page_t's into an array called ppa[].
2018 2016 *
2019 2017 * The code in shmat() forces base addr and len of ISM segment
2020 2018 * to be aligned to largest page size supported. Therefore,
2021 2019 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2022 2020 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2023 2021 * in large pagesize chunks, or else we will screw up the HAT
2024 2022 * layer by calling hat_memload_array() with differing page sizes
2025 2023 * over a given virtual range.
2026 2024 */
2027 2025 pgsz = page_get_pagesize(sptseg->s_szc);
2028 2026 pgcnt = page_get_pagecnt(sptseg->s_szc);
2029 2027 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2030 2028 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2031 2029 npages = btopr(size);
2032 2030
2033 2031 /*
2034 2032 * Now we need to convert from addr in segshm to addr in segspt.
2035 2033 */
2036 2034 anon_index = seg_page(seg, shm_addr);
2037 2035 sptseg_addr = sptseg->s_base + ptob(anon_index);
2038 2036
2039 2037 /*
2040 2038 * And now we may have to adjust npages downward if we have
2041 2039 * exceeded the realsize of the segment or initial anon
2042 2040 * allocations.
2043 2041 */
2044 2042 if ((sptseg_addr + ptob(npages)) >
2045 2043 (sptseg->s_base + sptd->spt_realsize))
2046 2044 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2047 2045
2048 2046 npages = btopr(size);
2049 2047
2050 2048 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2051 2049 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2052 2050
2053 2051 switch (type) {
2054 2052
2055 2053 case F_SOFTLOCK:
2056 2054
2057 2055 /*
2058 2056 * availrmem is decremented once during anon_swap_adjust()
2059 2057 * and is incremented during the anon_unresv(), which is
2060 2058 * called from shm_rm_amp() when the segment is destroyed.
2061 2059 */
2062 2060 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2063 2061 /*
2064 2062 * Some platforms assume that ISM pages are SE_SHARED
2065 2063 * locked for the entire life of the segment.
2066 2064 */
2067 2065 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2068 2066 return (0);
2069 2067 /*
2070 2068 * Fall through to the F_INVAL case to load up the hat layer
2071 2069 * entries with the HAT_LOAD_LOCK flag.
2072 2070 */
2073 2071
2074 2072 /* FALLTHRU */
2075 2073 case F_INVAL:
2076 2074
2077 2075 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2078 2076 return (FC_NOMAP);
2079 2077
2080 2078 /*
2081 2079 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2082 2080 * may still rely on this call to hat_share(). That
2083 2081 * would imply that those hat's can fault on a
2084 2082 * HAT_LOAD_LOCK translation, which would seem
2085 2083 * contradictory.
2086 2084 */
2087 2085 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2088 2086 if (hat_share(seg->s_as->a_hat, seg->s_base,
2089 2087 curspt->a_hat, sptseg->s_base,
2090 2088 sptseg->s_size, sptseg->s_szc) != 0) {
2091 2089 panic("hat_share error in ISM fault");
2092 2090 /*NOTREACHED*/
2093 2091 }
2094 2092 return (0);
2095 2093 }
2096 2094 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2097 2095
2098 2096 /*
2099 2097 * I see no need to lock the real seg,
2100 2098 * here, because all of our work will be on the underlying
2101 2099 * dummy seg.
2102 2100 *
2103 2101 * sptseg_addr and npages now account for large pages.
2104 2102 */
2105 2103 amp = sptd->spt_amp;
2106 2104 ASSERT(amp != NULL);
2107 2105 anon_index = seg_page(sptseg, sptseg_addr);
2108 2106
2109 2107 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2110 2108 for (i = 0; i < npages; i++) {
2111 2109 ap = anon_get_ptr(amp->ahp, anon_index++);
2112 2110 ASSERT(ap != NULL);
2113 2111 swap_xlate(ap, &vp, &offset);
2114 2112 pp = page_lookup(vp, offset, SE_SHARED);
2115 2113 ASSERT(pp != NULL);
2116 2114 ppa[i] = pp;
2117 2115 }
2118 2116 ANON_LOCK_EXIT(&->a_rwlock);
2119 2117 ASSERT(i == npages);
2120 2118
2121 2119 /*
2122 2120 * We are already holding the as->a_lock on the user's
2123 2121 * real segment, but we need to hold the a_lock on the
2124 2122 * underlying dummy as. This is mostly to satisfy the
2125 2123 * underlying HAT layer.
2126 2124 */
2127 2125 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
2128 2126 a = sptseg_addr;
2129 2127 pidx = 0;
2130 2128 if (type == F_SOFTLOCK) {
2131 2129 /*
2132 2130 * Load up the translation keeping it
2133 2131 * locked and don't unlock the page.
2134 2132 */
2135 2133 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2136 2134 sz = MIN(pgsz, ptob(npages - pidx));
2137 2135 hat_memload_array(sptseg->s_as->a_hat, a,
2138 2136 sz, &ppa[pidx], sptd->spt_prot,
2139 2137 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2140 2138 }
2141 2139 } else {
2142 2140 /*
2143 2141 * Migrate pages marked for migration.
2144 2142 */
2145 2143 if (lgrp_optimizations())
2146 2144 page_migrate(seg, shm_addr, ppa, npages);
2147 2145
2148 2146 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2149 2147 sz = MIN(pgsz, ptob(npages - pidx));
2150 2148 hat_memload_array(sptseg->s_as->a_hat,
2151 2149 a, sz, &ppa[pidx],
2152 2150 sptd->spt_prot, HAT_LOAD_SHARE);
2153 2151 }
2154 2152
2155 2153 /*
2156 2154 * And now drop the SE_SHARED lock(s).
2157 2155 */
2158 2156 for (i = 0; i < npages; i++)
2159 2157 page_unlock(ppa[i]);
2160 2158 }
2161 2159 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
2162 2160
2163 2161 kmem_free(ppa, sizeof (page_t *) * npages);
2164 2162 return (0);
2165 2163 case F_SOFTUNLOCK:
2166 2164
2167 2165 /*
2168 2166 * This is a bit ugly, we pass in the real seg pointer,
2169 2167 * but the sptseg_addr is the virtual address within the
2170 2168 * dummy seg.
2171 2169 */
2172 2170 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2173 2171 return (0);
2174 2172
2175 2173 case F_PROT:
2176 2174
2177 2175 /*
2178 2176 * This takes care of the unusual case where a user
2179 2177 * allocates a stack in shared memory and a register
2180 2178 * window overflow is written to that stack page before
2181 2179 * it is otherwise modified.
2182 2180 *
2183 2181 * We can get away with this because ISM segments are
2184 2182 * always rw. Other than this unusual case, there
2185 2183 * should be no instances of protection violations.
2186 2184 */
2187 2185 return (0);
2188 2186
2189 2187 default:
2190 2188 #ifdef DEBUG
2191 2189 cmn_err(CE_WARN, "segspt_shmfault default type?");
2192 2190 #endif
2193 2191 return (FC_NOMAP);
2194 2192 }
2195 2193 }
2196 2194
2197 2195 /*ARGSUSED*/
2198 2196 static faultcode_t
2199 2197 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2200 2198 {
2201 2199 return (0);
2202 2200 }
2203 2201
2204 2202 /*ARGSUSED*/
2205 2203 static int
2206 2204 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2207 2205 {
2208 2206 return (0);
2209 2207 }
2210 2208
2211 2209 /*
2212 2210 * duplicate the shared page tables
2213 2211 */
2214 2212 int
2215 2213 segspt_shmdup(struct seg *seg, struct seg *newseg)
2216 2214 {
2217 2215 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2218 2216 struct anon_map *amp = shmd->shm_amp;
2219 2217 struct shm_data *shmd_new;
2220 2218 struct seg *spt_seg = shmd->shm_sptseg;
2221 2219 struct spt_data *sptd = spt_seg->s_data;
2222 2220 int error = 0;
2223 2221
2224 2222 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2225 2223
2226 2224 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2227 2225 newseg->s_data = (void *)shmd_new;
2228 2226 shmd_new->shm_sptas = shmd->shm_sptas;
2229 2227 shmd_new->shm_amp = amp;
2230 2228 shmd_new->shm_sptseg = shmd->shm_sptseg;
2231 2229 newseg->s_ops = &segspt_shmops;
2232 2230 newseg->s_szc = seg->s_szc;
2233 2231 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2234 2232
2235 2233 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2236 2234 amp->refcnt++;
2237 2235 ANON_LOCK_EXIT(&->a_rwlock);
2238 2236
2239 2237 if (sptd->spt_flags & SHM_PAGEABLE) {
2240 2238 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2241 2239 shmd_new->shm_lckpgs = 0;
2242 2240 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2243 2241 if ((error = hat_share(newseg->s_as->a_hat,
2244 2242 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2245 2243 seg->s_size, seg->s_szc)) != 0) {
2246 2244 kmem_free(shmd_new->shm_vpage,
2247 2245 btopr(amp->size));
2248 2246 }
2249 2247 }
2250 2248 return (error);
2251 2249 } else {
2252 2250 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2253 2251 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2254 2252 seg->s_szc));
2255 2253
2256 2254 }
2257 2255 }
2258 2256
2259 2257 /*ARGSUSED*/
2260 2258 int
2261 2259 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2262 2260 {
2263 2261 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2264 2262 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2265 2263
2266 2264 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2267 2265
2268 2266 /*
2269 2267 * ISM segment is always rw.
2270 2268 */
2271 2269 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2272 2270 }
2273 2271
2274 2272 /*
2275 2273 * Return an array of locked large pages, for empty slots allocate
2276 2274 * private zero-filled anon pages.
2277 2275 */
2278 2276 static int
2279 2277 spt_anon_getpages(
2280 2278 struct seg *sptseg,
2281 2279 caddr_t sptaddr,
2282 2280 size_t len,
2283 2281 page_t *ppa[])
2284 2282 {
2285 2283 struct spt_data *sptd = sptseg->s_data;
2286 2284 struct anon_map *amp = sptd->spt_amp;
2287 2285 enum seg_rw rw = sptd->spt_prot;
2288 2286 uint_t szc = sptseg->s_szc;
2289 2287 size_t pg_sz, share_sz = page_get_pagesize(szc);
2290 2288 pgcnt_t lp_npgs;
2291 2289 caddr_t lp_addr, e_sptaddr;
2292 2290 uint_t vpprot, ppa_szc = 0;
2293 2291 struct vpage *vpage = NULL;
2294 2292 ulong_t j, ppa_idx;
2295 2293 int err, ierr = 0;
2296 2294 pgcnt_t an_idx;
2297 2295 anon_sync_obj_t cookie;
2298 2296 int anon_locked = 0;
2299 2297 pgcnt_t amp_pgs;
2300 2298
2301 2299
2302 2300 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2303 2301 ASSERT(len != 0);
2304 2302
2305 2303 pg_sz = share_sz;
2306 2304 lp_npgs = btop(pg_sz);
2307 2305 lp_addr = sptaddr;
2308 2306 e_sptaddr = sptaddr + len;
2309 2307 an_idx = seg_page(sptseg, sptaddr);
2310 2308 ppa_idx = 0;
2311 2309
2312 2310 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2313 2311
2314 2312 amp_pgs = page_get_pagecnt(amp->a_szc);
2315 2313
2316 2314 /*CONSTCOND*/
2317 2315 while (1) {
2318 2316 for (; lp_addr < e_sptaddr;
2319 2317 an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2320 2318
2321 2319 /*
2322 2320 * If we're currently locked, and we get to a new
2323 2321 * page, unlock our current anon chunk.
2324 2322 */
2325 2323 if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2326 2324 anon_array_exit(&cookie);
2327 2325 anon_locked = 0;
2328 2326 }
2329 2327 if (!anon_locked) {
2330 2328 anon_array_enter(amp, an_idx, &cookie);
2331 2329 anon_locked = 1;
2332 2330 }
2333 2331 ppa_szc = (uint_t)-1;
2334 2332 ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2335 2333 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2336 2334 &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2337 2335
2338 2336 if (ierr != 0) {
2339 2337 if (ierr > 0) {
2340 2338 err = FC_MAKE_ERR(ierr);
2341 2339 goto lpgs_err;
2342 2340 }
2343 2341 break;
2344 2342 }
2345 2343 }
2346 2344 if (lp_addr == e_sptaddr) {
2347 2345 break;
2348 2346 }
2349 2347 ASSERT(lp_addr < e_sptaddr);
2350 2348
2351 2349 /*
2352 2350 * ierr == -1 means we failed to allocate a large page.
2353 2351 * so do a size down operation.
2354 2352 *
2355 2353 * ierr == -2 means some other process that privately shares
2356 2354 * pages with this process has allocated a larger page and we
2357 2355 * need to retry with larger pages. So do a size up
2358 2356 * operation. This relies on the fact that large pages are
2359 2357 * never partially shared i.e. if we share any constituent
2360 2358 * page of a large page with another process we must share the
2361 2359 * entire large page. Note this cannot happen for SOFTLOCK
2362 2360 * case, unless current address (lpaddr) is at the beginning
2363 2361 * of the next page size boundary because the other process
2364 2362 * couldn't have relocated locked pages.
2365 2363 */
2366 2364 ASSERT(ierr == -1 || ierr == -2);
2367 2365 if (segvn_anypgsz) {
2368 2366 ASSERT(ierr == -2 || szc != 0);
2369 2367 ASSERT(ierr == -1 || szc < sptseg->s_szc);
2370 2368 szc = (ierr == -1) ? szc - 1 : szc + 1;
2371 2369 } else {
2372 2370 /*
2373 2371 * For faults and segvn_anypgsz == 0
2374 2372 * we need to be careful not to loop forever
2375 2373 * if existing page is found with szc other
2376 2374 * than 0 or seg->s_szc. This could be due
2377 2375 * to page relocations on behalf of DR or
2378 2376 * more likely large page creation. For this
2379 2377 * case simply re-size to existing page's szc
2380 2378 * if returned by anon_map_getpages().
2381 2379 */
2382 2380 if (ppa_szc == (uint_t)-1) {
2383 2381 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2384 2382 } else {
2385 2383 ASSERT(ppa_szc <= sptseg->s_szc);
2386 2384 ASSERT(ierr == -2 || ppa_szc < szc);
2387 2385 ASSERT(ierr == -1 || ppa_szc > szc);
2388 2386 szc = ppa_szc;
2389 2387 }
2390 2388 }
2391 2389 pg_sz = page_get_pagesize(szc);
2392 2390 lp_npgs = btop(pg_sz);
2393 2391 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2394 2392 }
2395 2393 if (anon_locked) {
2396 2394 anon_array_exit(&cookie);
2397 2395 }
2398 2396 ANON_LOCK_EXIT(&->a_rwlock);
2399 2397 return (0);
2400 2398
2401 2399 lpgs_err:
2402 2400 if (anon_locked) {
2403 2401 anon_array_exit(&cookie);
2404 2402 }
2405 2403 ANON_LOCK_EXIT(&->a_rwlock);
2406 2404 for (j = 0; j < ppa_idx; j++)
2407 2405 page_unlock(ppa[j]);
2408 2406 return (err);
2409 2407 }
2410 2408
2411 2409 /*
2412 2410 * count the number of bytes in a set of spt pages that are currently not
2413 2411 * locked
2414 2412 */
2415 2413 static rctl_qty_t
2416 2414 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2417 2415 {
2418 2416 ulong_t i;
2419 2417 rctl_qty_t unlocked = 0;
2420 2418
2421 2419 for (i = 0; i < npages; i++) {
2422 2420 if (ppa[i]->p_lckcnt == 0)
2423 2421 unlocked += PAGESIZE;
2424 2422 }
2425 2423 return (unlocked);
2426 2424 }
2427 2425
2428 2426 extern u_longlong_t randtick(void);
2429 2427 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2430 2428 #define NLCK (NCPU_P2)
2431 2429 /* Random number with a range [0, n-1], n must be power of two */
2432 2430 #define RAND_P2(n) \
2433 2431 ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2434 2432
2435 2433 int
2436 2434 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2437 2435 page_t **ppa, ulong_t *lockmap, size_t pos,
2438 2436 rctl_qty_t *locked)
2439 2437 {
2440 2438 struct shm_data *shmd = seg->s_data;
2441 2439 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2442 2440 ulong_t i;
2443 2441 int kernel;
2444 2442 pgcnt_t nlck = 0;
2445 2443 int rv = 0;
2446 2444 int use_reserved = 1;
2447 2445
2448 2446 /* return the number of bytes actually locked */
2449 2447 *locked = 0;
2450 2448
2451 2449 /*
2452 2450 * To avoid contention on freemem_lock, availrmem and pages_locked
2453 2451 * global counters are updated only every nlck locked pages instead of
2454 2452 * every time. Reserve nlck locks up front and deduct from this
2455 2453 * reservation for each page that requires a lock. When the reservation
2456 2454 * is consumed, reserve again. nlck is randomized, so the competing
2457 2455 * threads do not fall into a cyclic lock contention pattern. When
2458 2456 * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2459 2457 * is used to lock pages.
2460 2458 */
2461 2459 for (i = 0; i < npages; anon_index++, pos++, i++) {
2462 2460 if (nlck == 0 && use_reserved == 1) {
2463 2461 nlck = NLCK + RAND_P2(NLCK);
2464 2462 /* if fewer loops left, decrease nlck */
2465 2463 nlck = MIN(nlck, npages - i);
2466 2464 /*
2467 2465 * Reserve nlck locks up front and deduct from this
2468 2466 * reservation for each page that requires a lock. When
2469 2467 * the reservation is consumed, reserve again.
2470 2468 */
2471 2469 mutex_enter(&freemem_lock);
2472 2470 if ((availrmem - nlck) < pages_pp_maximum) {
2473 2471 /* Do not do advance memory reserves */
2474 2472 use_reserved = 0;
2475 2473 } else {
2476 2474 availrmem -= nlck;
2477 2475 pages_locked += nlck;
2478 2476 }
2479 2477 mutex_exit(&freemem_lock);
2480 2478 }
2481 2479 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2482 2480 if (sptd->spt_ppa_lckcnt[anon_index] <
2483 2481 (ushort_t)DISM_LOCK_MAX) {
2484 2482 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2485 2483 (ushort_t)DISM_LOCK_MAX) {
2486 2484 cmn_err(CE_WARN,
2487 2485 "DISM page lock limit "
2488 2486 "reached on DISM offset 0x%lx\n",
2489 2487 anon_index << PAGESHIFT);
2490 2488 }
2491 2489 kernel = (sptd->spt_ppa &&
2492 2490 sptd->spt_ppa[anon_index]);
2493 2491 if (!page_pp_lock(ppa[i], 0, kernel ||
2494 2492 use_reserved)) {
2495 2493 sptd->spt_ppa_lckcnt[anon_index]--;
2496 2494 rv = EAGAIN;
2497 2495 break;
2498 2496 }
2499 2497 /* if this is a newly locked page, count it */
2500 2498 if (ppa[i]->p_lckcnt == 1) {
2501 2499 if (kernel == 0 && use_reserved == 1)
2502 2500 nlck--;
2503 2501 *locked += PAGESIZE;
2504 2502 }
2505 2503 shmd->shm_lckpgs++;
2506 2504 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2507 2505 if (lockmap != NULL)
2508 2506 BT_SET(lockmap, pos);
2509 2507 }
2510 2508 }
2511 2509 }
2512 2510 /* Return unused lock reservation */
2513 2511 if (nlck != 0 && use_reserved == 1) {
2514 2512 mutex_enter(&freemem_lock);
2515 2513 availrmem += nlck;
2516 2514 pages_locked -= nlck;
2517 2515 mutex_exit(&freemem_lock);
2518 2516 }
2519 2517
2520 2518 return (rv);
2521 2519 }
2522 2520
2523 2521 int
2524 2522 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2525 2523 rctl_qty_t *unlocked)
2526 2524 {
2527 2525 struct shm_data *shmd = seg->s_data;
2528 2526 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2529 2527 struct anon_map *amp = sptd->spt_amp;
2530 2528 struct anon *ap;
2531 2529 struct vnode *vp;
2532 2530 u_offset_t off;
2533 2531 struct page *pp;
2534 2532 int kernel;
2535 2533 anon_sync_obj_t cookie;
2536 2534 ulong_t i;
2537 2535 pgcnt_t nlck = 0;
2538 2536 pgcnt_t nlck_limit = NLCK;
2539 2537
2540 2538 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2541 2539 for (i = 0; i < npages; i++, anon_index++) {
2542 2540 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2543 2541 anon_array_enter(amp, anon_index, &cookie);
2544 2542 ap = anon_get_ptr(amp->ahp, anon_index);
2545 2543 ASSERT(ap);
2546 2544
2547 2545 swap_xlate(ap, &vp, &off);
2548 2546 anon_array_exit(&cookie);
2549 2547 pp = page_lookup(vp, off, SE_SHARED);
2550 2548 ASSERT(pp);
2551 2549 /*
2552 2550 * availrmem is decremented only for pages which are not
2553 2551 * in seg pcache, for pages in seg pcache availrmem was
2554 2552 * decremented in _dismpagelock()
2555 2553 */
2556 2554 kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2557 2555 ASSERT(pp->p_lckcnt > 0);
2558 2556
2559 2557 /*
2560 2558 * lock page but do not change availrmem, we do it
2561 2559 * ourselves every nlck loops.
2562 2560 */
2563 2561 page_pp_unlock(pp, 0, 1);
2564 2562 if (pp->p_lckcnt == 0) {
2565 2563 if (kernel == 0)
2566 2564 nlck++;
2567 2565 *unlocked += PAGESIZE;
2568 2566 }
2569 2567 page_unlock(pp);
2570 2568 shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2571 2569 sptd->spt_ppa_lckcnt[anon_index]--;
2572 2570 shmd->shm_lckpgs--;
2573 2571 }
2574 2572
2575 2573 /*
2576 2574 * To reduce freemem_lock contention, do not update availrmem
2577 2575 * until at least NLCK pages have been unlocked.
2578 2576 * 1. No need to update if nlck is zero
2579 2577 * 2. Always update if the last iteration
2580 2578 */
2581 2579 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2582 2580 mutex_enter(&freemem_lock);
2583 2581 availrmem += nlck;
2584 2582 pages_locked -= nlck;
2585 2583 mutex_exit(&freemem_lock);
2586 2584 nlck = 0;
2587 2585 nlck_limit = NLCK + RAND_P2(NLCK);
2588 2586 }
2589 2587 }
2590 2588 ANON_LOCK_EXIT(&->a_rwlock);
2591 2589
2592 2590 return (0);
2593 2591 }
2594 2592
2595 2593 /*ARGSUSED*/
2596 2594 static int
2597 2595 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2598 2596 int attr, int op, ulong_t *lockmap, size_t pos)
2599 2597 {
2600 2598 struct shm_data *shmd = seg->s_data;
2601 2599 struct seg *sptseg = shmd->shm_sptseg;
2602 2600 struct spt_data *sptd = sptseg->s_data;
2603 2601 struct kshmid *sp = sptd->spt_amp->a_sp;
2604 2602 pgcnt_t npages, a_npages;
2605 2603 page_t **ppa;
2606 2604 pgcnt_t an_idx, a_an_idx, ppa_idx;
2607 2605 caddr_t spt_addr, a_addr; /* spt and aligned address */
2608 2606 size_t a_len; /* aligned len */
2609 2607 size_t share_sz;
2610 2608 ulong_t i;
2611 2609 int sts = 0;
2612 2610 rctl_qty_t unlocked = 0;
2613 2611 rctl_qty_t locked = 0;
2614 2612 struct proc *p = curproc;
2615 2613 kproject_t *proj;
2616 2614
2617 2615 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2618 2616 ASSERT(sp != NULL);
2619 2617
2620 2618 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2621 2619 return (0);
2622 2620 }
2623 2621
2624 2622 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2625 2623 an_idx = seg_page(seg, addr);
2626 2624 npages = btopr(len);
2627 2625
2628 2626 if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2629 2627 return (ENOMEM);
2630 2628 }
2631 2629
2632 2630 /*
2633 2631 * A shm's project never changes, so no lock needed.
2634 2632 * The shm has a hold on the project, so it will not go away.
2635 2633 * Since we have a mapping to shm within this zone, we know
2636 2634 * that the zone will not go away.
2637 2635 */
2638 2636 proj = sp->shm_perm.ipc_proj;
2639 2637
2640 2638 if (op == MC_LOCK) {
2641 2639
2642 2640 /*
2643 2641 * Need to align addr and size request if they are not
2644 2642 * aligned so we can always allocate large page(s) however
2645 2643 * we only lock what was requested in initial request.
2646 2644 */
2647 2645 share_sz = page_get_pagesize(sptseg->s_szc);
2648 2646 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2649 2647 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2650 2648 share_sz);
2651 2649 a_npages = btop(a_len);
2652 2650 a_an_idx = seg_page(seg, a_addr);
2653 2651 spt_addr = sptseg->s_base + ptob(a_an_idx);
2654 2652 ppa_idx = an_idx - a_an_idx;
2655 2653
2656 2654 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2657 2655 KM_NOSLEEP)) == NULL) {
2658 2656 return (ENOMEM);
2659 2657 }
2660 2658
2661 2659 /*
2662 2660 * Don't cache any new pages for IO and
2663 2661 * flush any cached pages.
2664 2662 */
2665 2663 mutex_enter(&sptd->spt_lock);
2666 2664 if (sptd->spt_ppa != NULL)
2667 2665 sptd->spt_flags |= DISM_PPA_CHANGED;
2668 2666
2669 2667 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2670 2668 if (sts != 0) {
2671 2669 mutex_exit(&sptd->spt_lock);
2672 2670 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2673 2671 return (sts);
2674 2672 }
2675 2673
2676 2674 mutex_enter(&sp->shm_mlock);
2677 2675 /* enforce locked memory rctl */
2678 2676 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2679 2677
2680 2678 mutex_enter(&p->p_lock);
2681 2679 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2682 2680 mutex_exit(&p->p_lock);
2683 2681 sts = EAGAIN;
2684 2682 } else {
2685 2683 mutex_exit(&p->p_lock);
2686 2684 sts = spt_lockpages(seg, an_idx, npages,
2687 2685 &ppa[ppa_idx], lockmap, pos, &locked);
2688 2686
2689 2687 /*
2690 2688 * correct locked count if not all pages could be
2691 2689 * locked
2692 2690 */
2693 2691 if ((unlocked - locked) > 0) {
2694 2692 rctl_decr_locked_mem(NULL, proj,
2695 2693 (unlocked - locked), 0);
2696 2694 }
2697 2695 }
2698 2696 /*
2699 2697 * unlock pages
2700 2698 */
2701 2699 for (i = 0; i < a_npages; i++)
2702 2700 page_unlock(ppa[i]);
2703 2701 if (sptd->spt_ppa != NULL)
2704 2702 sptd->spt_flags |= DISM_PPA_CHANGED;
2705 2703 mutex_exit(&sp->shm_mlock);
2706 2704 mutex_exit(&sptd->spt_lock);
2707 2705
2708 2706 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2709 2707
2710 2708 } else if (op == MC_UNLOCK) { /* unlock */
2711 2709 page_t **ppa;
2712 2710
2713 2711 mutex_enter(&sptd->spt_lock);
2714 2712 if (shmd->shm_lckpgs == 0) {
2715 2713 mutex_exit(&sptd->spt_lock);
2716 2714 return (0);
2717 2715 }
2718 2716 /*
2719 2717 * Don't cache new IO pages.
2720 2718 */
2721 2719 if (sptd->spt_ppa != NULL)
2722 2720 sptd->spt_flags |= DISM_PPA_CHANGED;
2723 2721
2724 2722 mutex_enter(&sp->shm_mlock);
2725 2723 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2726 2724 if ((ppa = sptd->spt_ppa) != NULL)
2727 2725 sptd->spt_flags |= DISM_PPA_CHANGED;
2728 2726 mutex_exit(&sptd->spt_lock);
2729 2727
2730 2728 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2731 2729 mutex_exit(&sp->shm_mlock);
2732 2730
2733 2731 if (ppa != NULL)
2734 2732 seg_ppurge_wiredpp(ppa);
2735 2733 }
2736 2734 return (sts);
2737 2735 }
2738 2736
2739 2737 /*ARGSUSED*/
2740 2738 int
2741 2739 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2742 2740 {
2743 2741 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2744 2742 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2745 2743 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2746 2744
2747 2745 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2748 2746
2749 2747 /*
2750 2748 * ISM segment is always rw.
2751 2749 */
2752 2750 while (--pgno >= 0)
2753 2751 *protv++ = sptd->spt_prot;
2754 2752 return (0);
2755 2753 }
2756 2754
2757 2755 /*ARGSUSED*/
2758 2756 u_offset_t
2759 2757 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2760 2758 {
2761 2759 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2762 2760
2763 2761 /* Offset does not matter in ISM memory */
2764 2762
2765 2763 return ((u_offset_t)0);
2766 2764 }
2767 2765
2768 2766 /* ARGSUSED */
2769 2767 int
2770 2768 segspt_shmgettype(struct seg *seg, caddr_t addr)
2771 2769 {
2772 2770 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2773 2771 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2774 2772
2775 2773 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2776 2774
2777 2775 /*
2778 2776 * The shared memory mapping is always MAP_SHARED, SWAP is only
2779 2777 * reserved for DISM
2780 2778 */
2781 2779 return (MAP_SHARED |
2782 2780 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2783 2781 }
2784 2782
2785 2783 /*ARGSUSED*/
2786 2784 int
2787 2785 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2788 2786 {
2789 2787 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2790 2788 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2791 2789
2792 2790 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2793 2791
2794 2792 *vpp = sptd->spt_vp;
2795 2793 return (0);
2796 2794 }
2797 2795
2798 2796 /*
2799 2797 * We need to wait for pending IO to complete to a DISM segment in order for
2800 2798 * pages to get kicked out of the seg_pcache. 120 seconds should be more
2801 2799 * than enough time to wait.
2802 2800 */
2803 2801 static clock_t spt_pcache_wait = 120;
2804 2802
2805 2803 /*ARGSUSED*/
2806 2804 static int
2807 2805 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2808 2806 {
2809 2807 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2810 2808 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2811 2809 struct anon_map *amp;
2812 2810 pgcnt_t pg_idx;
2813 2811 ushort_t gen;
2814 2812 clock_t end_lbolt;
2815 2813 int writer;
2816 2814 page_t **ppa;
2817 2815
2818 2816 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2819 2817
2820 2818 if (behav == MADV_FREE) {
2821 2819 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2822 2820 return (0);
2823 2821
2824 2822 amp = sptd->spt_amp;
2825 2823 pg_idx = seg_page(seg, addr);
2826 2824
2827 2825 mutex_enter(&sptd->spt_lock);
2828 2826 if ((ppa = sptd->spt_ppa) == NULL) {
2829 2827 mutex_exit(&sptd->spt_lock);
2830 2828 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2831 2829 anon_disclaim(amp, pg_idx, len);
2832 2830 ANON_LOCK_EXIT(&->a_rwlock);
2833 2831 return (0);
2834 2832 }
2835 2833
2836 2834 sptd->spt_flags |= DISM_PPA_CHANGED;
2837 2835 gen = sptd->spt_gen;
2838 2836
2839 2837 mutex_exit(&sptd->spt_lock);
2840 2838
2841 2839 /*
2842 2840 * Purge all DISM cached pages
2843 2841 */
2844 2842 seg_ppurge_wiredpp(ppa);
2845 2843
2846 2844 /*
2847 2845 * Drop the AS_LOCK so that other threads can grab it
2848 2846 * in the as_pageunlock path and hopefully get the segment
2849 2847 * kicked out of the seg_pcache. We bump the shm_softlockcnt
2850 2848 * to keep this segment resident.
2851 2849 */
2852 2850 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2853 2851 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2854 2852 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2855 2853
2856 2854 mutex_enter(&sptd->spt_lock);
2857 2855
2858 2856 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2859 2857
2860 2858 /*
2861 2859 * Try to wait for pages to get kicked out of the seg_pcache.
2862 2860 */
2863 2861 while (sptd->spt_gen == gen &&
2864 2862 (sptd->spt_flags & DISM_PPA_CHANGED) &&
2865 2863 ddi_get_lbolt() < end_lbolt) {
2866 2864 if (!cv_timedwait_sig(&sptd->spt_cv,
2867 2865 &sptd->spt_lock, end_lbolt)) {
2868 2866 break;
2869 2867 }
2870 2868 }
2871 2869
2872 2870 mutex_exit(&sptd->spt_lock);
2873 2871
2874 2872 /* Regrab the AS_LOCK and release our hold on the segment */
2875 2873 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2876 2874 writer ? RW_WRITER : RW_READER);
2877 2875 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2878 2876 if (shmd->shm_softlockcnt <= 0) {
2879 2877 if (AS_ISUNMAPWAIT(seg->s_as)) {
2880 2878 mutex_enter(&seg->s_as->a_contents);
2881 2879 if (AS_ISUNMAPWAIT(seg->s_as)) {
2882 2880 AS_CLRUNMAPWAIT(seg->s_as);
2883 2881 cv_broadcast(&seg->s_as->a_cv);
2884 2882 }
2885 2883 mutex_exit(&seg->s_as->a_contents);
2886 2884 }
2887 2885 }
2888 2886
2889 2887 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2890 2888 anon_disclaim(amp, pg_idx, len);
2891 2889 ANON_LOCK_EXIT(&->a_rwlock);
2892 2890 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2893 2891 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2894 2892 int already_set;
2895 2893 ulong_t anon_index;
2896 2894 lgrp_mem_policy_t policy;
2897 2895 caddr_t shm_addr;
2898 2896 size_t share_size;
2899 2897 size_t size;
2900 2898 struct seg *sptseg = shmd->shm_sptseg;
2901 2899 caddr_t sptseg_addr;
2902 2900
2903 2901 /*
2904 2902 * Align address and length to page size of underlying segment
2905 2903 */
2906 2904 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2907 2905 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2908 2906 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2909 2907 share_size);
2910 2908
2911 2909 amp = shmd->shm_amp;
2912 2910 anon_index = seg_page(seg, shm_addr);
2913 2911
2914 2912 /*
2915 2913 * And now we may have to adjust size downward if we have
2916 2914 * exceeded the realsize of the segment or initial anon
2917 2915 * allocations.
2918 2916 */
2919 2917 sptseg_addr = sptseg->s_base + ptob(anon_index);
2920 2918 if ((sptseg_addr + size) >
2921 2919 (sptseg->s_base + sptd->spt_realsize))
2922 2920 size = (sptseg->s_base + sptd->spt_realsize) -
2923 2921 sptseg_addr;
2924 2922
2925 2923 /*
2926 2924 * Set memory allocation policy for this segment
2927 2925 */
2928 2926 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2929 2927 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2930 2928 NULL, 0, len);
2931 2929
2932 2930 /*
2933 2931 * If random memory allocation policy set already,
2934 2932 * don't bother reapplying it.
2935 2933 */
2936 2934 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2937 2935 return (0);
2938 2936
2939 2937 /*
2940 2938 * Mark any existing pages in the given range for
2941 2939 * migration, flushing the I/O page cache, and using
2942 2940 * underlying segment to calculate anon index and get
2943 2941 * anonmap and vnode pointer from
2944 2942 */
2945 2943 if (shmd->shm_softlockcnt > 0)
2946 2944 segspt_purge(seg);
2947 2945
2948 2946 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
↓ open down ↓ |
2803 lines elided |
↑ open up ↑ |
2949 2947 }
2950 2948
2951 2949 return (0);
2952 2950 }
2953 2951
2954 2952 /*ARGSUSED*/
2955 2953 void
2956 2954 segspt_shmdump(struct seg *seg)
2957 2955 {
2958 2956 /* no-op for ISM segment */
2959 -}
2960 -
2961 -/*ARGSUSED*/
2962 -static faultcode_t
2963 -segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
2964 -{
2965 - return (ENOTSUP);
2966 2957 }
2967 2958
2968 2959 /*
2969 2960 * get a memory ID for an addr in a given segment
2970 2961 */
2971 2962 static int
2972 2963 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
2973 2964 {
2974 2965 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2975 2966 struct anon *ap;
2976 2967 size_t anon_index;
2977 2968 struct anon_map *amp = shmd->shm_amp;
2978 2969 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2979 2970 struct seg *sptseg = shmd->shm_sptseg;
2980 2971 anon_sync_obj_t cookie;
2981 2972
2982 2973 anon_index = seg_page(seg, addr);
2983 2974
2984 2975 if (addr > (seg->s_base + sptd->spt_realsize)) {
2985 2976 return (EFAULT);
2986 2977 }
2987 2978
2988 2979 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2989 2980 anon_array_enter(amp, anon_index, &cookie);
2990 2981 ap = anon_get_ptr(amp->ahp, anon_index);
2991 2982 if (ap == NULL) {
2992 2983 struct page *pp;
2993 2984 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
2994 2985
2995 2986 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
2996 2987 if (pp == NULL) {
2997 2988 anon_array_exit(&cookie);
2998 2989 ANON_LOCK_EXIT(&->a_rwlock);
2999 2990 return (ENOMEM);
3000 2991 }
3001 2992 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3002 2993 page_unlock(pp);
3003 2994 }
3004 2995 anon_array_exit(&cookie);
3005 2996 ANON_LOCK_EXIT(&->a_rwlock);
3006 2997 memidp->val[0] = (uintptr_t)ap;
3007 2998 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3008 2999 return (0);
3009 3000 }
3010 3001
3011 3002 /*
3012 3003 * Get memory allocation policy info for specified address in given segment
3013 3004 */
3014 3005 static lgrp_mem_policy_info_t *
3015 3006 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3016 3007 {
3017 3008 struct anon_map *amp;
3018 3009 ulong_t anon_index;
3019 3010 lgrp_mem_policy_info_t *policy_info;
3020 3011 struct shm_data *shm_data;
3021 3012
3022 3013 ASSERT(seg != NULL);
3023 3014
3024 3015 /*
3025 3016 * Get anon_map from segshm
3026 3017 *
3027 3018 * Assume that no lock needs to be held on anon_map, since
3028 3019 * it should be protected by its reference count which must be
3029 3020 * nonzero for an existing segment
3030 3021 * Need to grab readers lock on policy tree though
3031 3022 */
3032 3023 shm_data = (struct shm_data *)seg->s_data;
3033 3024 if (shm_data == NULL)
3034 3025 return (NULL);
3035 3026 amp = shm_data->shm_amp;
3036 3027 ASSERT(amp->refcnt != 0);
3037 3028
3038 3029 /*
3039 3030 * Get policy info
3040 3031 *
3041 3032 * Assume starting anon index of 0
3042 3033 */
3043 3034 anon_index = seg_page(seg, addr);
3044 3035 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3045 3036
3046 3037 return (policy_info);
3047 3038 }
↓ open down ↓ |
72 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX