Print this page
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_spt.c
+++ new/usr/src/uts/common/vm/seg_spt.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/param.h>
26 26 #include <sys/user.h>
27 27 #include <sys/mman.h>
28 28 #include <sys/kmem.h>
29 29 #include <sys/sysmacros.h>
30 30 #include <sys/cmn_err.h>
31 31 #include <sys/systm.h>
32 32 #include <sys/tuneable.h>
33 33 #include <vm/hat.h>
34 34 #include <vm/seg.h>
35 35 #include <vm/as.h>
36 36 #include <vm/anon.h>
37 37 #include <vm/page.h>
38 38 #include <sys/buf.h>
39 39 #include <sys/swap.h>
40 40 #include <sys/atomic.h>
41 41 #include <vm/seg_spt.h>
42 42 #include <sys/debug.h>
43 43 #include <sys/vtrace.h>
44 44 #include <sys/shm.h>
45 45 #include <sys/shm_impl.h>
46 46 #include <sys/lgrp.h>
47 47 #include <sys/vmsystm.h>
48 48 #include <sys/policy.h>
49 49 #include <sys/project.h>
50 50 #include <sys/tnf_probe.h>
51 51 #include <sys/zone.h>
52 52
53 53 #define SEGSPTADDR (caddr_t)0x0
54 54
55 55 /*
56 56 * # pages used for spt
57 57 */
58 58 size_t spt_used;
59 59
60 60 /*
61 61 * segspt_minfree is the memory left for system after ISM
62 62 * locked its pages; it is set up to 5% of availrmem in
63 63 * sptcreate when ISM is created. ISM should not use more
64 64 * than ~90% of availrmem; if it does, then the performance
65 65 * of the system may decrease. Machines with large memories may
66 66 * be able to use up more memory for ISM so we set the default
67 67 * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
68 68 * If somebody wants even more memory for ISM (risking hanging
69 69 * the system) they can patch the segspt_minfree to smaller number.
70 70 */
71 71 pgcnt_t segspt_minfree = 0;
72 72
↓ open down ↓ |
72 lines elided |
↑ open up ↑ |
73 73 static int segspt_create(struct seg *seg, caddr_t argsp);
74 74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
75 75 static void segspt_free(struct seg *seg);
76 76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
77 77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
78 78
79 79 struct seg_ops segspt_ops = {
80 80 .unmap = segspt_unmap,
81 81 .free = segspt_free,
82 82 .getpolicy = segspt_getpolicy,
83 - .inherit = seg_inherit_notsup,
84 83 };
85 84
86 85 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
87 86 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
88 87 static void segspt_shmfree(struct seg *seg);
89 88 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
90 89 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
91 90 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
92 91 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
93 92 register size_t len, register uint_t prot);
94 93 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
95 94 uint_t prot);
96 95 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
97 96 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
98 97 register char *vec);
99 98 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
100 99 int attr, uint_t flags);
101 100 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
102 101 int attr, int op, ulong_t *lockmap, size_t pos);
103 102 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
104 103 uint_t *protv);
105 104 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
106 105 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
107 106 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
108 107 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
109 108 uint_t behav);
110 109 static void segspt_shmdump(struct seg *seg);
111 110 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
112 111 struct page ***, enum lock_type, enum seg_rw);
113 112 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
114 113 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
115 114 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
116 115 static int segspt_shmcapable(struct seg *, segcapability_t);
117 116
118 117 struct seg_ops segspt_shmops = {
119 118 .dup = segspt_shmdup,
120 119 .unmap = segspt_shmunmap,
121 120 .free = segspt_shmfree,
122 121 .fault = segspt_shmfault,
123 122 .faulta = segspt_shmfaulta,
124 123 .setprot = segspt_shmsetprot,
125 124 .checkprot = segspt_shmcheckprot,
126 125 .kluster = segspt_shmkluster,
127 126 .sync = segspt_shmsync,
128 127 .incore = segspt_shmincore,
129 128 .lockop = segspt_shmlockop,
130 129 .getprot = segspt_shmgetprot,
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
131 130 .getoffset = segspt_shmgetoffset,
132 131 .gettype = segspt_shmgettype,
133 132 .getvp = segspt_shmgetvp,
134 133 .advise = segspt_shmadvise,
135 134 .dump = segspt_shmdump,
136 135 .pagelock = segspt_shmpagelock,
137 136 .setpagesize = segspt_shmsetpgsz,
138 137 .getmemid = segspt_shmgetmemid,
139 138 .getpolicy = segspt_shmgetpolicy,
140 139 .capable = segspt_shmcapable,
141 - .inherit = seg_inherit_notsup,
142 140 };
143 141
144 142 static void segspt_purge(struct seg *seg);
145 143 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
146 144 enum seg_rw, int);
147 145 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
148 146 page_t **ppa);
149 147
150 148
151 149
152 150 /*ARGSUSED*/
153 151 int
154 152 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
155 153 uint_t prot, uint_t flags, uint_t share_szc)
156 154 {
157 155 int err;
158 156 struct as *newas;
159 157 struct segspt_crargs sptcargs;
160 158
161 159 #ifdef DEBUG
162 160 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
163 161 tnf_ulong, size, size );
164 162 #endif
165 163 if (segspt_minfree == 0) /* leave min 5% of availrmem for */
166 164 segspt_minfree = availrmem/20; /* for the system */
167 165
168 166 if (!hat_supported(HAT_SHARED_PT, (void *)0))
169 167 return (EINVAL);
170 168
171 169 /*
172 170 * get a new as for this shared memory segment
173 171 */
174 172 newas = as_alloc();
175 173 newas->a_proc = NULL;
176 174 sptcargs.amp = amp;
177 175 sptcargs.prot = prot;
178 176 sptcargs.flags = flags;
179 177 sptcargs.szc = share_szc;
180 178 /*
181 179 * create a shared page table (spt) segment
182 180 */
183 181
184 182 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
185 183 as_free(newas);
186 184 return (err);
187 185 }
188 186 *sptseg = sptcargs.seg_spt;
189 187 return (0);
190 188 }
191 189
192 190 void
193 191 sptdestroy(struct as *as, struct anon_map *amp)
194 192 {
195 193
196 194 #ifdef DEBUG
197 195 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
198 196 #endif
199 197 (void) as_unmap(as, SEGSPTADDR, amp->size);
200 198 as_free(as);
201 199 }
202 200
203 201 /*
204 202 * called from seg_free().
205 203 * free (i.e., unlock, unmap, return to free list)
206 204 * all the pages in the given seg.
207 205 */
208 206 void
209 207 segspt_free(struct seg *seg)
210 208 {
211 209 struct spt_data *sptd = (struct spt_data *)seg->s_data;
212 210
213 211 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
214 212
215 213 if (sptd != NULL) {
216 214 if (sptd->spt_realsize)
217 215 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
218 216
219 217 if (sptd->spt_ppa_lckcnt)
220 218 kmem_free(sptd->spt_ppa_lckcnt,
221 219 sizeof (*sptd->spt_ppa_lckcnt)
222 220 * btopr(sptd->spt_amp->size));
223 221 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
224 222 cv_destroy(&sptd->spt_cv);
225 223 mutex_destroy(&sptd->spt_lock);
226 224 kmem_free(sptd, sizeof (*sptd));
227 225 }
228 226 }
229 227
230 228 /*ARGSUSED*/
231 229 static int
232 230 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
233 231 uint_t flags)
234 232 {
235 233 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
236 234
237 235 return (0);
238 236 }
239 237
240 238 /*ARGSUSED*/
241 239 static size_t
242 240 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
243 241 {
244 242 caddr_t eo_seg;
245 243 pgcnt_t npages;
246 244 struct shm_data *shmd = (struct shm_data *)seg->s_data;
247 245 struct seg *sptseg;
248 246 struct spt_data *sptd;
249 247
250 248 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
251 249 #ifdef lint
252 250 seg = seg;
253 251 #endif
254 252 sptseg = shmd->shm_sptseg;
255 253 sptd = sptseg->s_data;
256 254
257 255 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
258 256 eo_seg = addr + len;
259 257 while (addr < eo_seg) {
260 258 /* page exists, and it's locked. */
261 259 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
262 260 SEG_PAGE_ANON;
263 261 addr += PAGESIZE;
264 262 }
265 263 return (len);
266 264 } else {
267 265 struct anon_map *amp = shmd->shm_amp;
268 266 struct anon *ap;
269 267 page_t *pp;
270 268 pgcnt_t anon_index;
271 269 struct vnode *vp;
272 270 u_offset_t off;
273 271 ulong_t i;
274 272 int ret;
275 273 anon_sync_obj_t cookie;
276 274
277 275 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
278 276 anon_index = seg_page(seg, addr);
279 277 npages = btopr(len);
280 278 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
281 279 return (EINVAL);
282 280 }
283 281 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
284 282 for (i = 0; i < npages; i++, anon_index++) {
285 283 ret = 0;
286 284 anon_array_enter(amp, anon_index, &cookie);
287 285 ap = anon_get_ptr(amp->ahp, anon_index);
288 286 if (ap != NULL) {
289 287 swap_xlate(ap, &vp, &off);
290 288 anon_array_exit(&cookie);
291 289 pp = page_lookup_nowait(vp, off, SE_SHARED);
292 290 if (pp != NULL) {
293 291 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
294 292 page_unlock(pp);
295 293 }
296 294 } else {
297 295 anon_array_exit(&cookie);
298 296 }
299 297 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
300 298 ret |= SEG_PAGE_LOCKED;
301 299 }
302 300 *vec++ = (char)ret;
303 301 }
304 302 ANON_LOCK_EXIT(&->a_rwlock);
305 303 return (len);
306 304 }
307 305 }
308 306
309 307 static int
310 308 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
311 309 {
312 310 size_t share_size;
313 311
314 312 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
315 313
316 314 /*
317 315 * seg.s_size may have been rounded up to the largest page size
318 316 * in shmat().
319 317 * XXX This should be cleanedup. sptdestroy should take a length
320 318 * argument which should be the same as sptcreate. Then
321 319 * this rounding would not be needed (or is done in shm.c)
322 320 * Only the check for full segment will be needed.
323 321 *
324 322 * XXX -- shouldn't raddr == 0 always? These tests don't seem
325 323 * to be useful at all.
326 324 */
327 325 share_size = page_get_pagesize(seg->s_szc);
328 326 ssize = P2ROUNDUP(ssize, share_size);
329 327
330 328 if (raddr == seg->s_base && ssize == seg->s_size) {
331 329 seg_free(seg);
332 330 return (0);
333 331 } else
334 332 return (EINVAL);
335 333 }
336 334
337 335 int
338 336 segspt_create(struct seg *seg, caddr_t argsp)
339 337 {
340 338 int err;
341 339 caddr_t addr = seg->s_base;
342 340 struct spt_data *sptd;
343 341 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
344 342 struct anon_map *amp = sptcargs->amp;
345 343 struct kshmid *sp = amp->a_sp;
346 344 struct cred *cred = CRED();
347 345 ulong_t i, j, anon_index = 0;
348 346 pgcnt_t npages = btopr(amp->size);
349 347 struct vnode *vp;
350 348 page_t **ppa;
351 349 uint_t hat_flags;
352 350 size_t pgsz;
353 351 pgcnt_t pgcnt;
354 352 caddr_t a;
355 353 pgcnt_t pidx;
356 354 size_t sz;
357 355 proc_t *procp = curproc;
358 356 rctl_qty_t lockedbytes = 0;
359 357 kproject_t *proj;
360 358
361 359 /*
362 360 * We are holding the a_lock on the underlying dummy as,
363 361 * so we can make calls to the HAT layer.
364 362 */
365 363 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
366 364 ASSERT(sp != NULL);
367 365
368 366 #ifdef DEBUG
369 367 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
370 368 tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
371 369 #endif
372 370 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
373 371 if (err = anon_swap_adjust(npages))
374 372 return (err);
375 373 }
376 374 err = ENOMEM;
377 375
378 376 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
379 377 goto out1;
380 378
381 379 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
382 380 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
383 381 KM_NOSLEEP)) == NULL)
384 382 goto out2;
385 383 }
386 384
387 385 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
388 386
389 387 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
390 388 goto out3;
391 389
392 390 seg->s_ops = &segspt_ops;
393 391 sptd->spt_vp = vp;
394 392 sptd->spt_amp = amp;
395 393 sptd->spt_prot = sptcargs->prot;
396 394 sptd->spt_flags = sptcargs->flags;
397 395 seg->s_data = (caddr_t)sptd;
398 396 sptd->spt_ppa = NULL;
399 397 sptd->spt_ppa_lckcnt = NULL;
400 398 seg->s_szc = sptcargs->szc;
401 399 cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
402 400 sptd->spt_gen = 0;
403 401
404 402 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
405 403 if (seg->s_szc > amp->a_szc) {
406 404 amp->a_szc = seg->s_szc;
407 405 }
408 406 ANON_LOCK_EXIT(&->a_rwlock);
409 407
410 408 /*
411 409 * Set policy to affect initial allocation of pages in
412 410 * anon_map_createpages()
413 411 */
414 412 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
415 413 NULL, 0, ptob(npages));
416 414
417 415 if (sptcargs->flags & SHM_PAGEABLE) {
418 416 size_t share_sz;
419 417 pgcnt_t new_npgs, more_pgs;
420 418 struct anon_hdr *nahp;
421 419 zone_t *zone;
422 420
423 421 share_sz = page_get_pagesize(seg->s_szc);
424 422 if (!IS_P2ALIGNED(amp->size, share_sz)) {
425 423 /*
426 424 * We are rounding up the size of the anon array
427 425 * on 4 M boundary because we always create 4 M
428 426 * of page(s) when locking, faulting pages and we
429 427 * don't have to check for all corner cases e.g.
430 428 * if there is enough space to allocate 4 M
431 429 * page.
432 430 */
433 431 new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
434 432 more_pgs = new_npgs - npages;
435 433
436 434 /*
437 435 * The zone will never be NULL, as a fully created
438 436 * shm always has an owning zone.
439 437 */
440 438 zone = sp->shm_perm.ipc_zone_ref.zref_zone;
441 439 ASSERT(zone != NULL);
442 440 if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
443 441 err = ENOMEM;
444 442 goto out4;
445 443 }
446 444
447 445 nahp = anon_create(new_npgs, ANON_SLEEP);
448 446 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
449 447 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
450 448 ANON_SLEEP);
451 449 anon_release(amp->ahp, npages);
452 450 amp->ahp = nahp;
453 451 ASSERT(amp->swresv == ptob(npages));
454 452 amp->swresv = amp->size = ptob(new_npgs);
455 453 ANON_LOCK_EXIT(&->a_rwlock);
456 454 npages = new_npgs;
457 455 }
458 456
459 457 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
460 458 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
461 459 sptd->spt_pcachecnt = 0;
462 460 sptd->spt_realsize = ptob(npages);
463 461 sptcargs->seg_spt = seg;
464 462 return (0);
465 463 }
466 464
467 465 /*
468 466 * get array of pages for each anon slot in amp
469 467 */
470 468 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
471 469 seg, addr, S_CREATE, cred)) != 0)
472 470 goto out4;
473 471
474 472 mutex_enter(&sp->shm_mlock);
475 473
476 474 /* May be partially locked, so, count bytes to charge for locking */
477 475 for (i = 0; i < npages; i++)
478 476 if (ppa[i]->p_lckcnt == 0)
479 477 lockedbytes += PAGESIZE;
480 478
481 479 proj = sp->shm_perm.ipc_proj;
482 480
483 481 if (lockedbytes > 0) {
484 482 mutex_enter(&procp->p_lock);
485 483 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
486 484 mutex_exit(&procp->p_lock);
487 485 mutex_exit(&sp->shm_mlock);
488 486 for (i = 0; i < npages; i++)
489 487 page_unlock(ppa[i]);
490 488 err = ENOMEM;
491 489 goto out4;
492 490 }
493 491 mutex_exit(&procp->p_lock);
494 492 }
495 493
496 494 /*
497 495 * addr is initial address corresponding to the first page on ppa list
498 496 */
499 497 for (i = 0; i < npages; i++) {
500 498 /* attempt to lock all pages */
501 499 if (page_pp_lock(ppa[i], 0, 1) == 0) {
502 500 /*
503 501 * if unable to lock any page, unlock all
504 502 * of them and return error
505 503 */
506 504 for (j = 0; j < i; j++)
507 505 page_pp_unlock(ppa[j], 0, 1);
508 506 for (i = 0; i < npages; i++)
509 507 page_unlock(ppa[i]);
510 508 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
511 509 mutex_exit(&sp->shm_mlock);
512 510 err = ENOMEM;
513 511 goto out4;
514 512 }
515 513 }
516 514 mutex_exit(&sp->shm_mlock);
517 515
518 516 /*
519 517 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
520 518 * for the entire life of the segment. For example platforms
521 519 * that do not support Dynamic Reconfiguration.
522 520 */
523 521 hat_flags = HAT_LOAD_SHARE;
524 522 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
525 523 hat_flags |= HAT_LOAD_LOCK;
526 524
527 525 /*
528 526 * Load translations one lare page at a time
529 527 * to make sure we don't create mappings bigger than
530 528 * segment's size code in case underlying pages
531 529 * are shared with segvn's segment that uses bigger
532 530 * size code than we do.
533 531 */
534 532 pgsz = page_get_pagesize(seg->s_szc);
535 533 pgcnt = page_get_pagecnt(seg->s_szc);
536 534 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
537 535 sz = MIN(pgsz, ptob(npages - pidx));
538 536 hat_memload_array(seg->s_as->a_hat, a, sz,
539 537 &ppa[pidx], sptd->spt_prot, hat_flags);
540 538 }
541 539
542 540 /*
543 541 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
544 542 * we will leave the pages locked SE_SHARED for the life
545 543 * of the ISM segment. This will prevent any calls to
546 544 * hat_pageunload() on this ISM segment for those platforms.
547 545 */
548 546 if (!(hat_flags & HAT_LOAD_LOCK)) {
549 547 /*
550 548 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
551 549 * we no longer need to hold the SE_SHARED lock on the pages,
552 550 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
553 551 * SE_SHARED lock on the pages as necessary.
554 552 */
555 553 for (i = 0; i < npages; i++)
556 554 page_unlock(ppa[i]);
557 555 }
558 556 sptd->spt_pcachecnt = 0;
559 557 kmem_free(ppa, ((sizeof (page_t *)) * npages));
560 558 sptd->spt_realsize = ptob(npages);
561 559 atomic_add_long(&spt_used, npages);
562 560 sptcargs->seg_spt = seg;
563 561 return (0);
564 562
565 563 out4:
566 564 seg->s_data = NULL;
567 565 kmem_free(vp, sizeof (*vp));
568 566 cv_destroy(&sptd->spt_cv);
569 567 out3:
570 568 mutex_destroy(&sptd->spt_lock);
571 569 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
572 570 kmem_free(ppa, (sizeof (*ppa) * npages));
573 571 out2:
574 572 kmem_free(sptd, sizeof (*sptd));
575 573 out1:
576 574 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
577 575 anon_swap_restore(npages);
578 576 return (err);
579 577 }
580 578
581 579 /*ARGSUSED*/
582 580 void
583 581 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
584 582 {
585 583 struct page *pp;
586 584 struct spt_data *sptd = (struct spt_data *)seg->s_data;
587 585 pgcnt_t npages;
588 586 ulong_t anon_idx;
589 587 struct anon_map *amp;
590 588 struct anon *ap;
591 589 struct vnode *vp;
592 590 u_offset_t off;
593 591 uint_t hat_flags;
594 592 int root = 0;
595 593 pgcnt_t pgs, curnpgs = 0;
596 594 page_t *rootpp;
597 595 rctl_qty_t unlocked_bytes = 0;
598 596 kproject_t *proj;
599 597 kshmid_t *sp;
600 598
601 599 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
602 600
603 601 len = P2ROUNDUP(len, PAGESIZE);
604 602
605 603 npages = btop(len);
606 604
607 605 hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
608 606 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
609 607 (sptd->spt_flags & SHM_PAGEABLE)) {
610 608 hat_flags = HAT_UNLOAD_UNMAP;
611 609 }
612 610
613 611 hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
614 612
615 613 amp = sptd->spt_amp;
616 614 if (sptd->spt_flags & SHM_PAGEABLE)
617 615 npages = btop(amp->size);
618 616
619 617 ASSERT(amp != NULL);
620 618
621 619 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
622 620 sp = amp->a_sp;
623 621 proj = sp->shm_perm.ipc_proj;
624 622 mutex_enter(&sp->shm_mlock);
625 623 }
626 624 for (anon_idx = 0; anon_idx < npages; anon_idx++) {
627 625 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
628 626 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
629 627 panic("segspt_free_pages: null app");
630 628 /*NOTREACHED*/
631 629 }
632 630 } else {
633 631 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
634 632 == NULL)
635 633 continue;
636 634 }
637 635 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
638 636 swap_xlate(ap, &vp, &off);
639 637
640 638 /*
641 639 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
642 640 * the pages won't be having SE_SHARED lock at this
643 641 * point.
644 642 *
645 643 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
646 644 * the pages are still held SE_SHARED locked from the
647 645 * original segspt_create()
648 646 *
649 647 * Our goal is to get SE_EXCL lock on each page, remove
650 648 * permanent lock on it and invalidate the page.
651 649 */
652 650 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
653 651 if (hat_flags == HAT_UNLOAD_UNMAP)
654 652 pp = page_lookup(vp, off, SE_EXCL);
655 653 else {
656 654 if ((pp = page_find(vp, off)) == NULL) {
657 655 panic("segspt_free_pages: "
658 656 "page not locked");
659 657 /*NOTREACHED*/
660 658 }
661 659 if (!page_tryupgrade(pp)) {
662 660 page_unlock(pp);
663 661 pp = page_lookup(vp, off, SE_EXCL);
664 662 }
665 663 }
666 664 if (pp == NULL) {
667 665 panic("segspt_free_pages: "
668 666 "page not in the system");
669 667 /*NOTREACHED*/
670 668 }
671 669 ASSERT(pp->p_lckcnt > 0);
672 670 page_pp_unlock(pp, 0, 1);
673 671 if (pp->p_lckcnt == 0)
674 672 unlocked_bytes += PAGESIZE;
675 673 } else {
676 674 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
677 675 continue;
678 676 }
679 677 /*
680 678 * It's logical to invalidate the pages here as in most cases
681 679 * these were created by segspt.
682 680 */
683 681 if (pp->p_szc != 0) {
684 682 if (root == 0) {
685 683 ASSERT(curnpgs == 0);
686 684 root = 1;
687 685 rootpp = pp;
688 686 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
689 687 ASSERT(pgs > 1);
690 688 ASSERT(IS_P2ALIGNED(pgs, pgs));
691 689 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
692 690 curnpgs--;
693 691 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
694 692 ASSERT(curnpgs == 1);
695 693 ASSERT(page_pptonum(pp) ==
696 694 page_pptonum(rootpp) + (pgs - 1));
697 695 page_destroy_pages(rootpp);
698 696 root = 0;
699 697 curnpgs = 0;
700 698 } else {
701 699 ASSERT(curnpgs > 1);
702 700 ASSERT(page_pptonum(pp) ==
703 701 page_pptonum(rootpp) + (pgs - curnpgs));
704 702 curnpgs--;
705 703 }
706 704 } else {
707 705 if (root != 0 || curnpgs != 0) {
708 706 panic("segspt_free_pages: bad large page");
709 707 /*NOTREACHED*/
710 708 }
711 709 /*
712 710 * Before destroying the pages, we need to take care
713 711 * of the rctl locked memory accounting. For that
714 712 * we need to calculte the unlocked_bytes.
715 713 */
716 714 if (pp->p_lckcnt > 0)
717 715 unlocked_bytes += PAGESIZE;
718 716 /*LINTED: constant in conditional context */
719 717 VN_DISPOSE(pp, B_INVAL, 0, kcred);
720 718 }
721 719 }
722 720 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
723 721 if (unlocked_bytes > 0)
724 722 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
725 723 mutex_exit(&sp->shm_mlock);
726 724 }
727 725 if (root != 0 || curnpgs != 0) {
728 726 panic("segspt_free_pages: bad large page");
729 727 /*NOTREACHED*/
730 728 }
731 729
732 730 /*
733 731 * mark that pages have been released
734 732 */
735 733 sptd->spt_realsize = 0;
736 734
737 735 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
738 736 atomic_add_long(&spt_used, -npages);
739 737 anon_swap_restore(npages);
740 738 }
741 739 }
742 740
743 741 /*
744 742 * Get memory allocation policy info for specified address in given segment
745 743 */
746 744 static lgrp_mem_policy_info_t *
747 745 segspt_getpolicy(struct seg *seg, caddr_t addr)
748 746 {
749 747 struct anon_map *amp;
750 748 ulong_t anon_index;
751 749 lgrp_mem_policy_info_t *policy_info;
752 750 struct spt_data *spt_data;
753 751
754 752 ASSERT(seg != NULL);
755 753
756 754 /*
757 755 * Get anon_map from segspt
758 756 *
759 757 * Assume that no lock needs to be held on anon_map, since
760 758 * it should be protected by its reference count which must be
761 759 * nonzero for an existing segment
762 760 * Need to grab readers lock on policy tree though
763 761 */
764 762 spt_data = (struct spt_data *)seg->s_data;
765 763 if (spt_data == NULL)
766 764 return (NULL);
767 765 amp = spt_data->spt_amp;
768 766 ASSERT(amp->refcnt != 0);
769 767
770 768 /*
771 769 * Get policy info
772 770 *
773 771 * Assume starting anon index of 0
774 772 */
775 773 anon_index = seg_page(seg, addr);
776 774 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
777 775
778 776 return (policy_info);
779 777 }
780 778
781 779 /*
782 780 * DISM only.
783 781 * Return locked pages over a given range.
784 782 *
785 783 * We will cache all DISM locked pages and save the pplist for the
786 784 * entire segment in the ppa field of the underlying DISM segment structure.
787 785 * Later, during a call to segspt_reclaim() we will use this ppa array
788 786 * to page_unlock() all of the pages and then we will free this ppa list.
789 787 */
790 788 /*ARGSUSED*/
791 789 static int
792 790 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
793 791 struct page ***ppp, enum lock_type type, enum seg_rw rw)
794 792 {
795 793 struct shm_data *shmd = (struct shm_data *)seg->s_data;
796 794 struct seg *sptseg = shmd->shm_sptseg;
797 795 struct spt_data *sptd = sptseg->s_data;
798 796 pgcnt_t pg_idx, npages, tot_npages, npgs;
799 797 struct page **pplist, **pl, **ppa, *pp;
800 798 struct anon_map *amp;
801 799 spgcnt_t an_idx;
802 800 int ret = ENOTSUP;
803 801 uint_t pl_built = 0;
804 802 struct anon *ap;
805 803 struct vnode *vp;
806 804 u_offset_t off;
807 805 pgcnt_t claim_availrmem = 0;
808 806 uint_t szc;
809 807
810 808 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
811 809 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
812 810
813 811 /*
814 812 * We want to lock/unlock the entire ISM segment. Therefore,
815 813 * we will be using the underlying sptseg and it's base address
816 814 * and length for the caching arguments.
817 815 */
818 816 ASSERT(sptseg);
819 817 ASSERT(sptd);
820 818
821 819 pg_idx = seg_page(seg, addr);
822 820 npages = btopr(len);
823 821
824 822 /*
825 823 * check if the request is larger than number of pages covered
826 824 * by amp
827 825 */
828 826 if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
829 827 *ppp = NULL;
830 828 return (ENOTSUP);
831 829 }
832 830
833 831 if (type == L_PAGEUNLOCK) {
834 832 ASSERT(sptd->spt_ppa != NULL);
835 833
836 834 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
837 835 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
838 836
839 837 /*
840 838 * If someone is blocked while unmapping, we purge
841 839 * segment page cache and thus reclaim pplist synchronously
842 840 * without waiting for seg_pasync_thread. This speeds up
843 841 * unmapping in cases where munmap(2) is called, while
844 842 * raw async i/o is still in progress or where a thread
845 843 * exits on data fault in a multithreaded application.
846 844 */
847 845 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
848 846 (AS_ISUNMAPWAIT(seg->s_as) &&
849 847 shmd->shm_softlockcnt > 0)) {
850 848 segspt_purge(seg);
851 849 }
852 850 return (0);
853 851 }
854 852
855 853 /* The L_PAGELOCK case ... */
856 854
857 855 if (sptd->spt_flags & DISM_PPA_CHANGED) {
858 856 segspt_purge(seg);
859 857 /*
860 858 * for DISM ppa needs to be rebuild since
861 859 * number of locked pages could be changed
862 860 */
863 861 *ppp = NULL;
864 862 return (ENOTSUP);
865 863 }
866 864
867 865 /*
868 866 * First try to find pages in segment page cache, without
869 867 * holding the segment lock.
870 868 */
871 869 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
872 870 S_WRITE, SEGP_FORCE_WIRED);
873 871 if (pplist != NULL) {
874 872 ASSERT(sptd->spt_ppa != NULL);
875 873 ASSERT(sptd->spt_ppa == pplist);
876 874 ppa = sptd->spt_ppa;
877 875 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
878 876 if (ppa[an_idx] == NULL) {
879 877 seg_pinactive(seg, NULL, seg->s_base,
880 878 sptd->spt_amp->size, ppa,
881 879 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
882 880 *ppp = NULL;
883 881 return (ENOTSUP);
884 882 }
885 883 if ((szc = ppa[an_idx]->p_szc) != 0) {
886 884 npgs = page_get_pagecnt(szc);
887 885 an_idx = P2ROUNDUP(an_idx + 1, npgs);
888 886 } else {
889 887 an_idx++;
890 888 }
891 889 }
892 890 /*
893 891 * Since we cache the entire DISM segment, we want to
894 892 * set ppp to point to the first slot that corresponds
895 893 * to the requested addr, i.e. pg_idx.
896 894 */
897 895 *ppp = &(sptd->spt_ppa[pg_idx]);
898 896 return (0);
899 897 }
900 898
901 899 mutex_enter(&sptd->spt_lock);
902 900 /*
903 901 * try to find pages in segment page cache with mutex
904 902 */
905 903 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
906 904 S_WRITE, SEGP_FORCE_WIRED);
907 905 if (pplist != NULL) {
908 906 ASSERT(sptd->spt_ppa != NULL);
909 907 ASSERT(sptd->spt_ppa == pplist);
910 908 ppa = sptd->spt_ppa;
911 909 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
912 910 if (ppa[an_idx] == NULL) {
913 911 mutex_exit(&sptd->spt_lock);
914 912 seg_pinactive(seg, NULL, seg->s_base,
915 913 sptd->spt_amp->size, ppa,
916 914 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
917 915 *ppp = NULL;
918 916 return (ENOTSUP);
919 917 }
920 918 if ((szc = ppa[an_idx]->p_szc) != 0) {
921 919 npgs = page_get_pagecnt(szc);
922 920 an_idx = P2ROUNDUP(an_idx + 1, npgs);
923 921 } else {
924 922 an_idx++;
925 923 }
926 924 }
927 925 /*
928 926 * Since we cache the entire DISM segment, we want to
929 927 * set ppp to point to the first slot that corresponds
930 928 * to the requested addr, i.e. pg_idx.
931 929 */
932 930 mutex_exit(&sptd->spt_lock);
933 931 *ppp = &(sptd->spt_ppa[pg_idx]);
934 932 return (0);
935 933 }
936 934 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
937 935 SEGP_FORCE_WIRED) == SEGP_FAIL) {
938 936 mutex_exit(&sptd->spt_lock);
939 937 *ppp = NULL;
940 938 return (ENOTSUP);
941 939 }
942 940
943 941 /*
944 942 * No need to worry about protections because DISM pages are always rw.
945 943 */
946 944 pl = pplist = NULL;
947 945 amp = sptd->spt_amp;
948 946
949 947 /*
950 948 * Do we need to build the ppa array?
951 949 */
952 950 if (sptd->spt_ppa == NULL) {
953 951 pgcnt_t lpg_cnt = 0;
954 952
955 953 pl_built = 1;
956 954 tot_npages = btopr(sptd->spt_amp->size);
957 955
958 956 ASSERT(sptd->spt_pcachecnt == 0);
959 957 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
960 958 pl = pplist;
961 959
962 960 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
963 961 for (an_idx = 0; an_idx < tot_npages; ) {
964 962 ap = anon_get_ptr(amp->ahp, an_idx);
965 963 /*
966 964 * Cache only mlocked pages. For large pages
967 965 * if one (constituent) page is mlocked
968 966 * all pages for that large page
969 967 * are cached also. This is for quick
970 968 * lookups of ppa array;
971 969 */
972 970 if ((ap != NULL) && (lpg_cnt != 0 ||
973 971 (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
974 972
975 973 swap_xlate(ap, &vp, &off);
976 974 pp = page_lookup(vp, off, SE_SHARED);
977 975 ASSERT(pp != NULL);
978 976 if (lpg_cnt == 0) {
979 977 lpg_cnt++;
980 978 /*
981 979 * For a small page, we are done --
982 980 * lpg_count is reset to 0 below.
983 981 *
984 982 * For a large page, we are guaranteed
985 983 * to find the anon structures of all
986 984 * constituent pages and a non-zero
987 985 * lpg_cnt ensures that we don't test
988 986 * for mlock for these. We are done
989 987 * when lpg_count reaches (npgs + 1).
990 988 * If we are not the first constituent
991 989 * page, restart at the first one.
992 990 */
993 991 npgs = page_get_pagecnt(pp->p_szc);
994 992 if (!IS_P2ALIGNED(an_idx, npgs)) {
995 993 an_idx = P2ALIGN(an_idx, npgs);
996 994 page_unlock(pp);
997 995 continue;
998 996 }
999 997 }
1000 998 if (++lpg_cnt > npgs)
1001 999 lpg_cnt = 0;
1002 1000
1003 1001 /*
1004 1002 * availrmem is decremented only
1005 1003 * for unlocked pages
1006 1004 */
1007 1005 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1008 1006 claim_availrmem++;
1009 1007 pplist[an_idx] = pp;
1010 1008 }
1011 1009 an_idx++;
1012 1010 }
1013 1011 ANON_LOCK_EXIT(&->a_rwlock);
1014 1012
1015 1013 if (claim_availrmem) {
1016 1014 mutex_enter(&freemem_lock);
1017 1015 if (availrmem < tune.t_minarmem + claim_availrmem) {
1018 1016 mutex_exit(&freemem_lock);
1019 1017 ret = ENOTSUP;
1020 1018 claim_availrmem = 0;
1021 1019 goto insert_fail;
1022 1020 } else {
1023 1021 availrmem -= claim_availrmem;
1024 1022 }
1025 1023 mutex_exit(&freemem_lock);
1026 1024 }
1027 1025
1028 1026 sptd->spt_ppa = pl;
1029 1027 } else {
1030 1028 /*
1031 1029 * We already have a valid ppa[].
1032 1030 */
1033 1031 pl = sptd->spt_ppa;
1034 1032 }
1035 1033
1036 1034 ASSERT(pl != NULL);
1037 1035
1038 1036 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1039 1037 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1040 1038 segspt_reclaim);
1041 1039 if (ret == SEGP_FAIL) {
1042 1040 /*
1043 1041 * seg_pinsert failed. We return
1044 1042 * ENOTSUP, so that the as_pagelock() code will
1045 1043 * then try the slower F_SOFTLOCK path.
1046 1044 */
1047 1045 if (pl_built) {
1048 1046 /*
1049 1047 * No one else has referenced the ppa[].
1050 1048 * We created it and we need to destroy it.
1051 1049 */
1052 1050 sptd->spt_ppa = NULL;
1053 1051 }
1054 1052 ret = ENOTSUP;
1055 1053 goto insert_fail;
1056 1054 }
1057 1055
1058 1056 /*
1059 1057 * In either case, we increment softlockcnt on the 'real' segment.
1060 1058 */
1061 1059 sptd->spt_pcachecnt++;
1062 1060 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1063 1061
1064 1062 ppa = sptd->spt_ppa;
1065 1063 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1066 1064 if (ppa[an_idx] == NULL) {
1067 1065 mutex_exit(&sptd->spt_lock);
1068 1066 seg_pinactive(seg, NULL, seg->s_base,
1069 1067 sptd->spt_amp->size,
1070 1068 pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1071 1069 *ppp = NULL;
1072 1070 return (ENOTSUP);
1073 1071 }
1074 1072 if ((szc = ppa[an_idx]->p_szc) != 0) {
1075 1073 npgs = page_get_pagecnt(szc);
1076 1074 an_idx = P2ROUNDUP(an_idx + 1, npgs);
1077 1075 } else {
1078 1076 an_idx++;
1079 1077 }
1080 1078 }
1081 1079 /*
1082 1080 * We can now drop the sptd->spt_lock since the ppa[]
1083 1081 * exists and he have incremented pacachecnt.
1084 1082 */
1085 1083 mutex_exit(&sptd->spt_lock);
1086 1084
1087 1085 /*
1088 1086 * Since we cache the entire segment, we want to
1089 1087 * set ppp to point to the first slot that corresponds
1090 1088 * to the requested addr, i.e. pg_idx.
1091 1089 */
1092 1090 *ppp = &(sptd->spt_ppa[pg_idx]);
1093 1091 return (0);
1094 1092
1095 1093 insert_fail:
1096 1094 /*
1097 1095 * We will only reach this code if we tried and failed.
1098 1096 *
1099 1097 * And we can drop the lock on the dummy seg, once we've failed
1100 1098 * to set up a new ppa[].
1101 1099 */
1102 1100 mutex_exit(&sptd->spt_lock);
1103 1101
1104 1102 if (pl_built) {
1105 1103 if (claim_availrmem) {
1106 1104 mutex_enter(&freemem_lock);
1107 1105 availrmem += claim_availrmem;
1108 1106 mutex_exit(&freemem_lock);
1109 1107 }
1110 1108
1111 1109 /*
1112 1110 * We created pl and we need to destroy it.
1113 1111 */
1114 1112 pplist = pl;
1115 1113 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1116 1114 if (pplist[an_idx] != NULL)
1117 1115 page_unlock(pplist[an_idx]);
1118 1116 }
1119 1117 kmem_free(pl, sizeof (page_t *) * tot_npages);
1120 1118 }
1121 1119
1122 1120 if (shmd->shm_softlockcnt <= 0) {
1123 1121 if (AS_ISUNMAPWAIT(seg->s_as)) {
1124 1122 mutex_enter(&seg->s_as->a_contents);
1125 1123 if (AS_ISUNMAPWAIT(seg->s_as)) {
1126 1124 AS_CLRUNMAPWAIT(seg->s_as);
1127 1125 cv_broadcast(&seg->s_as->a_cv);
1128 1126 }
1129 1127 mutex_exit(&seg->s_as->a_contents);
1130 1128 }
1131 1129 }
1132 1130 *ppp = NULL;
1133 1131 return (ret);
1134 1132 }
1135 1133
1136 1134
1137 1135
1138 1136 /*
1139 1137 * return locked pages over a given range.
1140 1138 *
1141 1139 * We will cache the entire ISM segment and save the pplist for the
1142 1140 * entire segment in the ppa field of the underlying ISM segment structure.
1143 1141 * Later, during a call to segspt_reclaim() we will use this ppa array
1144 1142 * to page_unlock() all of the pages and then we will free this ppa list.
1145 1143 */
1146 1144 /*ARGSUSED*/
1147 1145 static int
1148 1146 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1149 1147 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1150 1148 {
1151 1149 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1152 1150 struct seg *sptseg = shmd->shm_sptseg;
1153 1151 struct spt_data *sptd = sptseg->s_data;
1154 1152 pgcnt_t np, page_index, npages;
1155 1153 caddr_t a, spt_base;
1156 1154 struct page **pplist, **pl, *pp;
1157 1155 struct anon_map *amp;
1158 1156 ulong_t anon_index;
1159 1157 int ret = ENOTSUP;
1160 1158 uint_t pl_built = 0;
1161 1159 struct anon *ap;
1162 1160 struct vnode *vp;
1163 1161 u_offset_t off;
1164 1162
1165 1163 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1166 1164 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1167 1165
1168 1166
1169 1167 /*
1170 1168 * We want to lock/unlock the entire ISM segment. Therefore,
1171 1169 * we will be using the underlying sptseg and it's base address
1172 1170 * and length for the caching arguments.
1173 1171 */
1174 1172 ASSERT(sptseg);
1175 1173 ASSERT(sptd);
1176 1174
1177 1175 if (sptd->spt_flags & SHM_PAGEABLE) {
1178 1176 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1179 1177 }
1180 1178
1181 1179 page_index = seg_page(seg, addr);
1182 1180 npages = btopr(len);
1183 1181
1184 1182 /*
1185 1183 * check if the request is larger than number of pages covered
1186 1184 * by amp
1187 1185 */
1188 1186 if (page_index + npages > btopr(sptd->spt_amp->size)) {
1189 1187 *ppp = NULL;
1190 1188 return (ENOTSUP);
1191 1189 }
1192 1190
1193 1191 if (type == L_PAGEUNLOCK) {
1194 1192
1195 1193 ASSERT(sptd->spt_ppa != NULL);
1196 1194
1197 1195 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1198 1196 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1199 1197
1200 1198 /*
1201 1199 * If someone is blocked while unmapping, we purge
1202 1200 * segment page cache and thus reclaim pplist synchronously
1203 1201 * without waiting for seg_pasync_thread. This speeds up
1204 1202 * unmapping in cases where munmap(2) is called, while
1205 1203 * raw async i/o is still in progress or where a thread
1206 1204 * exits on data fault in a multithreaded application.
1207 1205 */
1208 1206 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1209 1207 segspt_purge(seg);
1210 1208 }
1211 1209 return (0);
1212 1210 }
1213 1211
1214 1212 /* The L_PAGELOCK case... */
1215 1213
1216 1214 /*
1217 1215 * First try to find pages in segment page cache, without
1218 1216 * holding the segment lock.
1219 1217 */
1220 1218 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1221 1219 S_WRITE, SEGP_FORCE_WIRED);
1222 1220 if (pplist != NULL) {
1223 1221 ASSERT(sptd->spt_ppa == pplist);
1224 1222 ASSERT(sptd->spt_ppa[page_index]);
1225 1223 /*
1226 1224 * Since we cache the entire ISM segment, we want to
1227 1225 * set ppp to point to the first slot that corresponds
1228 1226 * to the requested addr, i.e. page_index.
1229 1227 */
1230 1228 *ppp = &(sptd->spt_ppa[page_index]);
1231 1229 return (0);
1232 1230 }
1233 1231
1234 1232 mutex_enter(&sptd->spt_lock);
1235 1233
1236 1234 /*
1237 1235 * try to find pages in segment page cache
1238 1236 */
1239 1237 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1240 1238 S_WRITE, SEGP_FORCE_WIRED);
1241 1239 if (pplist != NULL) {
1242 1240 ASSERT(sptd->spt_ppa == pplist);
1243 1241 /*
1244 1242 * Since we cache the entire segment, we want to
1245 1243 * set ppp to point to the first slot that corresponds
1246 1244 * to the requested addr, i.e. page_index.
1247 1245 */
1248 1246 mutex_exit(&sptd->spt_lock);
1249 1247 *ppp = &(sptd->spt_ppa[page_index]);
1250 1248 return (0);
1251 1249 }
1252 1250
1253 1251 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1254 1252 SEGP_FORCE_WIRED) == SEGP_FAIL) {
1255 1253 mutex_exit(&sptd->spt_lock);
1256 1254 *ppp = NULL;
1257 1255 return (ENOTSUP);
1258 1256 }
1259 1257
1260 1258 /*
1261 1259 * No need to worry about protections because ISM pages
1262 1260 * are always rw.
1263 1261 */
1264 1262 pl = pplist = NULL;
1265 1263
1266 1264 /*
1267 1265 * Do we need to build the ppa array?
1268 1266 */
1269 1267 if (sptd->spt_ppa == NULL) {
1270 1268 ASSERT(sptd->spt_ppa == pplist);
1271 1269
1272 1270 spt_base = sptseg->s_base;
1273 1271 pl_built = 1;
1274 1272
1275 1273 /*
1276 1274 * availrmem is decremented once during anon_swap_adjust()
1277 1275 * and is incremented during the anon_unresv(), which is
1278 1276 * called from shm_rm_amp() when the segment is destroyed.
1279 1277 */
1280 1278 amp = sptd->spt_amp;
1281 1279 ASSERT(amp != NULL);
1282 1280
1283 1281 /* pcachecnt is protected by sptd->spt_lock */
1284 1282 ASSERT(sptd->spt_pcachecnt == 0);
1285 1283 pplist = kmem_zalloc(sizeof (page_t *)
1286 1284 * btopr(sptd->spt_amp->size), KM_SLEEP);
1287 1285 pl = pplist;
1288 1286
1289 1287 anon_index = seg_page(sptseg, spt_base);
1290 1288
1291 1289 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1292 1290 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1293 1291 a += PAGESIZE, anon_index++, pplist++) {
1294 1292 ap = anon_get_ptr(amp->ahp, anon_index);
1295 1293 ASSERT(ap != NULL);
1296 1294 swap_xlate(ap, &vp, &off);
1297 1295 pp = page_lookup(vp, off, SE_SHARED);
1298 1296 ASSERT(pp != NULL);
1299 1297 *pplist = pp;
1300 1298 }
1301 1299 ANON_LOCK_EXIT(&->a_rwlock);
1302 1300
1303 1301 if (a < (spt_base + sptd->spt_amp->size)) {
1304 1302 ret = ENOTSUP;
1305 1303 goto insert_fail;
1306 1304 }
1307 1305 sptd->spt_ppa = pl;
1308 1306 } else {
1309 1307 /*
1310 1308 * We already have a valid ppa[].
1311 1309 */
1312 1310 pl = sptd->spt_ppa;
1313 1311 }
1314 1312
1315 1313 ASSERT(pl != NULL);
1316 1314
1317 1315 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1318 1316 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1319 1317 segspt_reclaim);
1320 1318 if (ret == SEGP_FAIL) {
1321 1319 /*
1322 1320 * seg_pinsert failed. We return
1323 1321 * ENOTSUP, so that the as_pagelock() code will
1324 1322 * then try the slower F_SOFTLOCK path.
1325 1323 */
1326 1324 if (pl_built) {
1327 1325 /*
1328 1326 * No one else has referenced the ppa[].
1329 1327 * We created it and we need to destroy it.
1330 1328 */
1331 1329 sptd->spt_ppa = NULL;
1332 1330 }
1333 1331 ret = ENOTSUP;
1334 1332 goto insert_fail;
1335 1333 }
1336 1334
1337 1335 /*
1338 1336 * In either case, we increment softlockcnt on the 'real' segment.
1339 1337 */
1340 1338 sptd->spt_pcachecnt++;
1341 1339 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1342 1340
1343 1341 /*
1344 1342 * We can now drop the sptd->spt_lock since the ppa[]
1345 1343 * exists and he have incremented pacachecnt.
1346 1344 */
1347 1345 mutex_exit(&sptd->spt_lock);
1348 1346
1349 1347 /*
1350 1348 * Since we cache the entire segment, we want to
1351 1349 * set ppp to point to the first slot that corresponds
1352 1350 * to the requested addr, i.e. page_index.
1353 1351 */
1354 1352 *ppp = &(sptd->spt_ppa[page_index]);
1355 1353 return (0);
1356 1354
1357 1355 insert_fail:
1358 1356 /*
1359 1357 * We will only reach this code if we tried and failed.
1360 1358 *
1361 1359 * And we can drop the lock on the dummy seg, once we've failed
1362 1360 * to set up a new ppa[].
1363 1361 */
1364 1362 mutex_exit(&sptd->spt_lock);
1365 1363
1366 1364 if (pl_built) {
1367 1365 /*
1368 1366 * We created pl and we need to destroy it.
1369 1367 */
1370 1368 pplist = pl;
1371 1369 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1372 1370 while (np) {
1373 1371 page_unlock(*pplist);
1374 1372 np--;
1375 1373 pplist++;
1376 1374 }
1377 1375 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1378 1376 }
1379 1377 if (shmd->shm_softlockcnt <= 0) {
1380 1378 if (AS_ISUNMAPWAIT(seg->s_as)) {
1381 1379 mutex_enter(&seg->s_as->a_contents);
1382 1380 if (AS_ISUNMAPWAIT(seg->s_as)) {
1383 1381 AS_CLRUNMAPWAIT(seg->s_as);
1384 1382 cv_broadcast(&seg->s_as->a_cv);
1385 1383 }
1386 1384 mutex_exit(&seg->s_as->a_contents);
1387 1385 }
1388 1386 }
1389 1387 *ppp = NULL;
1390 1388 return (ret);
1391 1389 }
1392 1390
1393 1391 /*
1394 1392 * purge any cached pages in the I/O page cache
1395 1393 */
1396 1394 static void
1397 1395 segspt_purge(struct seg *seg)
1398 1396 {
1399 1397 seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1400 1398 }
1401 1399
1402 1400 static int
1403 1401 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1404 1402 enum seg_rw rw, int async)
1405 1403 {
1406 1404 struct seg *seg = (struct seg *)ptag;
1407 1405 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1408 1406 struct seg *sptseg;
1409 1407 struct spt_data *sptd;
1410 1408 pgcnt_t npages, i, free_availrmem = 0;
1411 1409 int done = 0;
1412 1410
1413 1411 #ifdef lint
1414 1412 addr = addr;
1415 1413 #endif
1416 1414 sptseg = shmd->shm_sptseg;
1417 1415 sptd = sptseg->s_data;
1418 1416 npages = (len >> PAGESHIFT);
1419 1417 ASSERT(npages);
1420 1418 ASSERT(sptd->spt_pcachecnt != 0);
1421 1419 ASSERT(sptd->spt_ppa == pplist);
1422 1420 ASSERT(npages == btopr(sptd->spt_amp->size));
1423 1421 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1424 1422
1425 1423 /*
1426 1424 * Acquire the lock on the dummy seg and destroy the
1427 1425 * ppa array IF this is the last pcachecnt.
1428 1426 */
1429 1427 mutex_enter(&sptd->spt_lock);
1430 1428 if (--sptd->spt_pcachecnt == 0) {
1431 1429 for (i = 0; i < npages; i++) {
1432 1430 if (pplist[i] == NULL) {
1433 1431 continue;
1434 1432 }
1435 1433 if (rw == S_WRITE) {
1436 1434 hat_setrefmod(pplist[i]);
1437 1435 } else {
1438 1436 hat_setref(pplist[i]);
1439 1437 }
1440 1438 if ((sptd->spt_flags & SHM_PAGEABLE) &&
1441 1439 (sptd->spt_ppa_lckcnt[i] == 0))
1442 1440 free_availrmem++;
1443 1441 page_unlock(pplist[i]);
1444 1442 }
1445 1443 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1446 1444 mutex_enter(&freemem_lock);
1447 1445 availrmem += free_availrmem;
1448 1446 mutex_exit(&freemem_lock);
1449 1447 }
1450 1448 /*
1451 1449 * Since we want to cach/uncache the entire ISM segment,
1452 1450 * we will track the pplist in a segspt specific field
1453 1451 * ppa, that is initialized at the time we add an entry to
1454 1452 * the cache.
1455 1453 */
1456 1454 ASSERT(sptd->spt_pcachecnt == 0);
1457 1455 kmem_free(pplist, sizeof (page_t *) * npages);
1458 1456 sptd->spt_ppa = NULL;
1459 1457 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1460 1458 sptd->spt_gen++;
1461 1459 cv_broadcast(&sptd->spt_cv);
1462 1460 done = 1;
1463 1461 }
1464 1462 mutex_exit(&sptd->spt_lock);
1465 1463
1466 1464 /*
1467 1465 * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1468 1466 * may not hold AS lock (in this case async argument is not 0). This
1469 1467 * means if softlockcnt drops to 0 after the decrement below address
1470 1468 * space may get freed. We can't allow it since after softlock
1471 1469 * derement to 0 we still need to access as structure for possible
1472 1470 * wakeup of unmap waiters. To prevent the disappearance of as we take
1473 1471 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1474 1472 * this mutex as a barrier to make sure this routine completes before
1475 1473 * segment is freed.
1476 1474 *
1477 1475 * The second complication we have to deal with in async case is a
1478 1476 * possibility of missed wake up of unmap wait thread. When we don't
1479 1477 * hold as lock here we may take a_contents lock before unmap wait
1480 1478 * thread that was first to see softlockcnt was still not 0. As a
1481 1479 * result we'll fail to wake up an unmap wait thread. To avoid this
1482 1480 * race we set nounmapwait flag in as structure if we drop softlockcnt
1483 1481 * to 0 if async is not 0. unmapwait thread
1484 1482 * will not block if this flag is set.
1485 1483 */
1486 1484 if (async)
1487 1485 mutex_enter(&shmd->shm_segfree_syncmtx);
1488 1486
1489 1487 /*
1490 1488 * Now decrement softlockcnt.
1491 1489 */
1492 1490 ASSERT(shmd->shm_softlockcnt > 0);
1493 1491 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1494 1492
1495 1493 if (shmd->shm_softlockcnt <= 0) {
1496 1494 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1497 1495 mutex_enter(&seg->s_as->a_contents);
1498 1496 if (async)
1499 1497 AS_SETNOUNMAPWAIT(seg->s_as);
1500 1498 if (AS_ISUNMAPWAIT(seg->s_as)) {
1501 1499 AS_CLRUNMAPWAIT(seg->s_as);
1502 1500 cv_broadcast(&seg->s_as->a_cv);
1503 1501 }
1504 1502 mutex_exit(&seg->s_as->a_contents);
1505 1503 }
1506 1504 }
1507 1505
1508 1506 if (async)
1509 1507 mutex_exit(&shmd->shm_segfree_syncmtx);
1510 1508
1511 1509 return (done);
1512 1510 }
1513 1511
1514 1512 /*
1515 1513 * Do a F_SOFTUNLOCK call over the range requested.
1516 1514 * The range must have already been F_SOFTLOCK'ed.
1517 1515 *
1518 1516 * The calls to acquire and release the anon map lock mutex were
1519 1517 * removed in order to avoid a deadly embrace during a DR
1520 1518 * memory delete operation. (Eg. DR blocks while waiting for a
1521 1519 * exclusive lock on a page that is being used for kaio; the
1522 1520 * thread that will complete the kaio and call segspt_softunlock
1523 1521 * blocks on the anon map lock; another thread holding the anon
1524 1522 * map lock blocks on another page lock via the segspt_shmfault
1525 1523 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1526 1524 *
1527 1525 * The appropriateness of the removal is based upon the following:
1528 1526 * 1. If we are holding a segment's reader lock and the page is held
1529 1527 * shared, then the corresponding element in anonmap which points to
1530 1528 * anon struct cannot change and there is no need to acquire the
1531 1529 * anonymous map lock.
1532 1530 * 2. Threads in segspt_softunlock have a reader lock on the segment
1533 1531 * and already have the shared page lock, so we are guaranteed that
1534 1532 * the anon map slot cannot change and therefore can call anon_get_ptr()
1535 1533 * without grabbing the anonymous map lock.
1536 1534 * 3. Threads that softlock a shared page break copy-on-write, even if
1537 1535 * its a read. Thus cow faults can be ignored with respect to soft
1538 1536 * unlocking, since the breaking of cow means that the anon slot(s) will
1539 1537 * not be shared.
1540 1538 */
1541 1539 static void
1542 1540 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1543 1541 size_t len, enum seg_rw rw)
1544 1542 {
1545 1543 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1546 1544 struct seg *sptseg;
1547 1545 struct spt_data *sptd;
1548 1546 page_t *pp;
1549 1547 caddr_t adr;
1550 1548 struct vnode *vp;
1551 1549 u_offset_t offset;
1552 1550 ulong_t anon_index;
1553 1551 struct anon_map *amp; /* XXX - for locknest */
1554 1552 struct anon *ap = NULL;
1555 1553 pgcnt_t npages;
1556 1554
1557 1555 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1558 1556
1559 1557 sptseg = shmd->shm_sptseg;
1560 1558 sptd = sptseg->s_data;
1561 1559
1562 1560 /*
1563 1561 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1564 1562 * and therefore their pages are SE_SHARED locked
1565 1563 * for the entire life of the segment.
1566 1564 */
1567 1565 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1568 1566 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1569 1567 goto softlock_decrement;
1570 1568 }
1571 1569
1572 1570 /*
1573 1571 * Any thread is free to do a page_find and
1574 1572 * page_unlock() on the pages within this seg.
1575 1573 *
1576 1574 * We are already holding the as->a_lock on the user's
1577 1575 * real segment, but we need to hold the a_lock on the
1578 1576 * underlying dummy as. This is mostly to satisfy the
1579 1577 * underlying HAT layer.
1580 1578 */
1581 1579 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1582 1580 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1583 1581 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1584 1582
1585 1583 amp = sptd->spt_amp;
1586 1584 ASSERT(amp != NULL);
1587 1585 anon_index = seg_page(sptseg, sptseg_addr);
1588 1586
1589 1587 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1590 1588 ap = anon_get_ptr(amp->ahp, anon_index++);
1591 1589 ASSERT(ap != NULL);
1592 1590 swap_xlate(ap, &vp, &offset);
1593 1591
1594 1592 /*
1595 1593 * Use page_find() instead of page_lookup() to
1596 1594 * find the page since we know that it has a
1597 1595 * "shared" lock.
1598 1596 */
1599 1597 pp = page_find(vp, offset);
1600 1598 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1601 1599 if (pp == NULL) {
1602 1600 panic("segspt_softunlock: "
1603 1601 "addr %p, ap %p, vp %p, off %llx",
1604 1602 (void *)adr, (void *)ap, (void *)vp, offset);
1605 1603 /*NOTREACHED*/
1606 1604 }
1607 1605
1608 1606 if (rw == S_WRITE) {
1609 1607 hat_setrefmod(pp);
1610 1608 } else if (rw != S_OTHER) {
1611 1609 hat_setref(pp);
1612 1610 }
1613 1611 page_unlock(pp);
1614 1612 }
1615 1613
1616 1614 softlock_decrement:
1617 1615 npages = btopr(len);
1618 1616 ASSERT(shmd->shm_softlockcnt >= npages);
1619 1617 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1620 1618 if (shmd->shm_softlockcnt == 0) {
1621 1619 /*
1622 1620 * All SOFTLOCKS are gone. Wakeup any waiting
1623 1621 * unmappers so they can try again to unmap.
1624 1622 * Check for waiters first without the mutex
1625 1623 * held so we don't always grab the mutex on
1626 1624 * softunlocks.
1627 1625 */
1628 1626 if (AS_ISUNMAPWAIT(seg->s_as)) {
1629 1627 mutex_enter(&seg->s_as->a_contents);
1630 1628 if (AS_ISUNMAPWAIT(seg->s_as)) {
1631 1629 AS_CLRUNMAPWAIT(seg->s_as);
1632 1630 cv_broadcast(&seg->s_as->a_cv);
1633 1631 }
1634 1632 mutex_exit(&seg->s_as->a_contents);
1635 1633 }
1636 1634 }
1637 1635 }
1638 1636
1639 1637 int
1640 1638 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1641 1639 {
1642 1640 struct shm_data *shmd_arg = (struct shm_data *)argsp;
1643 1641 struct shm_data *shmd;
1644 1642 struct anon_map *shm_amp = shmd_arg->shm_amp;
1645 1643 struct spt_data *sptd;
1646 1644 int error = 0;
1647 1645
1648 1646 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1649 1647
1650 1648 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1651 1649 if (shmd == NULL)
1652 1650 return (ENOMEM);
1653 1651
1654 1652 shmd->shm_sptas = shmd_arg->shm_sptas;
1655 1653 shmd->shm_amp = shm_amp;
1656 1654 shmd->shm_sptseg = shmd_arg->shm_sptseg;
1657 1655
1658 1656 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1659 1657 NULL, 0, seg->s_size);
1660 1658
1661 1659 mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1662 1660
1663 1661 seg->s_data = (void *)shmd;
1664 1662 seg->s_ops = &segspt_shmops;
1665 1663 seg->s_szc = shmd->shm_sptseg->s_szc;
1666 1664 sptd = shmd->shm_sptseg->s_data;
1667 1665
1668 1666 if (sptd->spt_flags & SHM_PAGEABLE) {
1669 1667 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1670 1668 KM_NOSLEEP)) == NULL) {
1671 1669 seg->s_data = (void *)NULL;
1672 1670 kmem_free(shmd, (sizeof (*shmd)));
1673 1671 return (ENOMEM);
1674 1672 }
1675 1673 shmd->shm_lckpgs = 0;
1676 1674 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1677 1675 if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1678 1676 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1679 1677 seg->s_size, seg->s_szc)) != 0) {
1680 1678 kmem_free(shmd->shm_vpage,
1681 1679 btopr(shm_amp->size));
1682 1680 }
1683 1681 }
1684 1682 } else {
1685 1683 error = hat_share(seg->s_as->a_hat, seg->s_base,
1686 1684 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1687 1685 seg->s_size, seg->s_szc);
1688 1686 }
1689 1687 if (error) {
1690 1688 seg->s_szc = 0;
1691 1689 seg->s_data = (void *)NULL;
1692 1690 kmem_free(shmd, (sizeof (*shmd)));
1693 1691 } else {
1694 1692 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1695 1693 shm_amp->refcnt++;
1696 1694 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1697 1695 }
1698 1696 return (error);
1699 1697 }
1700 1698
1701 1699 int
1702 1700 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1703 1701 {
1704 1702 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1705 1703 int reclaim = 1;
1706 1704
1707 1705 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1708 1706 retry:
1709 1707 if (shmd->shm_softlockcnt > 0) {
1710 1708 if (reclaim == 1) {
1711 1709 segspt_purge(seg);
1712 1710 reclaim = 0;
1713 1711 goto retry;
1714 1712 }
1715 1713 return (EAGAIN);
1716 1714 }
1717 1715
1718 1716 if (ssize != seg->s_size) {
1719 1717 #ifdef DEBUG
1720 1718 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1721 1719 ssize, seg->s_size);
1722 1720 #endif
1723 1721 return (EINVAL);
1724 1722 }
1725 1723
1726 1724 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1727 1725 NULL, 0);
1728 1726 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1729 1727
1730 1728 seg_free(seg);
1731 1729
1732 1730 return (0);
1733 1731 }
1734 1732
1735 1733 void
1736 1734 segspt_shmfree(struct seg *seg)
1737 1735 {
1738 1736 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1739 1737 struct anon_map *shm_amp = shmd->shm_amp;
1740 1738
1741 1739 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1742 1740
1743 1741 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1744 1742 MC_UNLOCK, NULL, 0);
1745 1743
1746 1744 /*
1747 1745 * Need to increment refcnt when attaching
1748 1746 * and decrement when detaching because of dup().
1749 1747 */
1750 1748 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1751 1749 shm_amp->refcnt--;
1752 1750 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1753 1751
1754 1752 if (shmd->shm_vpage) { /* only for DISM */
1755 1753 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1756 1754 shmd->shm_vpage = NULL;
1757 1755 }
1758 1756
1759 1757 /*
1760 1758 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1761 1759 * still working with this segment without holding as lock.
1762 1760 */
1763 1761 ASSERT(shmd->shm_softlockcnt == 0);
1764 1762 mutex_enter(&shmd->shm_segfree_syncmtx);
1765 1763 mutex_destroy(&shmd->shm_segfree_syncmtx);
1766 1764
1767 1765 kmem_free(shmd, sizeof (*shmd));
1768 1766 }
1769 1767
1770 1768 /*ARGSUSED*/
1771 1769 int
1772 1770 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1773 1771 {
1774 1772 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1775 1773
1776 1774 /*
1777 1775 * Shared page table is more than shared mapping.
1778 1776 * Individual process sharing page tables can't change prot
1779 1777 * because there is only one set of page tables.
1780 1778 * This will be allowed after private page table is
1781 1779 * supported.
1782 1780 */
1783 1781 /* need to return correct status error? */
1784 1782 return (0);
1785 1783 }
1786 1784
1787 1785
1788 1786 faultcode_t
1789 1787 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1790 1788 size_t len, enum fault_type type, enum seg_rw rw)
1791 1789 {
1792 1790 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1793 1791 struct seg *sptseg = shmd->shm_sptseg;
1794 1792 struct as *curspt = shmd->shm_sptas;
1795 1793 struct spt_data *sptd = sptseg->s_data;
1796 1794 pgcnt_t npages;
1797 1795 size_t size;
1798 1796 caddr_t segspt_addr, shm_addr;
1799 1797 page_t **ppa;
1800 1798 int i;
1801 1799 ulong_t an_idx = 0;
1802 1800 int err = 0;
1803 1801 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1804 1802 size_t pgsz;
1805 1803 pgcnt_t pgcnt;
1806 1804 caddr_t a;
1807 1805 pgcnt_t pidx;
1808 1806
1809 1807 #ifdef lint
1810 1808 hat = hat;
1811 1809 #endif
1812 1810 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1813 1811
1814 1812 /*
1815 1813 * Because of the way spt is implemented
1816 1814 * the realsize of the segment does not have to be
1817 1815 * equal to the segment size itself. The segment size is
1818 1816 * often in multiples of a page size larger than PAGESIZE.
1819 1817 * The realsize is rounded up to the nearest PAGESIZE
1820 1818 * based on what the user requested. This is a bit of
1821 1819 * ungliness that is historical but not easily fixed
1822 1820 * without re-designing the higher levels of ISM.
1823 1821 */
1824 1822 ASSERT(addr >= seg->s_base);
1825 1823 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1826 1824 return (FC_NOMAP);
1827 1825 /*
1828 1826 * For all of the following cases except F_PROT, we need to
1829 1827 * make any necessary adjustments to addr and len
1830 1828 * and get all of the necessary page_t's into an array called ppa[].
1831 1829 *
1832 1830 * The code in shmat() forces base addr and len of ISM segment
1833 1831 * to be aligned to largest page size supported. Therefore,
1834 1832 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1835 1833 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1836 1834 * in large pagesize chunks, or else we will screw up the HAT
1837 1835 * layer by calling hat_memload_array() with differing page sizes
1838 1836 * over a given virtual range.
1839 1837 */
1840 1838 pgsz = page_get_pagesize(sptseg->s_szc);
1841 1839 pgcnt = page_get_pagecnt(sptseg->s_szc);
1842 1840 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1843 1841 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1844 1842 npages = btopr(size);
1845 1843
1846 1844 /*
1847 1845 * Now we need to convert from addr in segshm to addr in segspt.
1848 1846 */
1849 1847 an_idx = seg_page(seg, shm_addr);
1850 1848 segspt_addr = sptseg->s_base + ptob(an_idx);
1851 1849
1852 1850 ASSERT((segspt_addr + ptob(npages)) <=
1853 1851 (sptseg->s_base + sptd->spt_realsize));
1854 1852 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1855 1853
1856 1854 switch (type) {
1857 1855
1858 1856 case F_SOFTLOCK:
1859 1857
1860 1858 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1861 1859 /*
1862 1860 * Fall through to the F_INVAL case to load up the hat layer
1863 1861 * entries with the HAT_LOAD_LOCK flag.
1864 1862 */
1865 1863 /* FALLTHRU */
1866 1864 case F_INVAL:
1867 1865
1868 1866 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1869 1867 return (FC_NOMAP);
1870 1868
1871 1869 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1872 1870
1873 1871 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1874 1872 if (err != 0) {
1875 1873 if (type == F_SOFTLOCK) {
1876 1874 atomic_add_long((ulong_t *)(
1877 1875 &(shmd->shm_softlockcnt)), -npages);
1878 1876 }
1879 1877 goto dism_err;
1880 1878 }
1881 1879 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1882 1880 a = segspt_addr;
1883 1881 pidx = 0;
1884 1882 if (type == F_SOFTLOCK) {
1885 1883
1886 1884 /*
1887 1885 * Load up the translation keeping it
1888 1886 * locked and don't unlock the page.
1889 1887 */
1890 1888 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1891 1889 hat_memload_array(sptseg->s_as->a_hat,
1892 1890 a, pgsz, &ppa[pidx], sptd->spt_prot,
1893 1891 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1894 1892 }
1895 1893 } else {
1896 1894 /*
1897 1895 * Migrate pages marked for migration
1898 1896 */
1899 1897 if (lgrp_optimizations())
1900 1898 page_migrate(seg, shm_addr, ppa, npages);
1901 1899
1902 1900 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1903 1901 hat_memload_array(sptseg->s_as->a_hat,
1904 1902 a, pgsz, &ppa[pidx],
1905 1903 sptd->spt_prot,
1906 1904 HAT_LOAD_SHARE);
1907 1905 }
1908 1906
1909 1907 /*
1910 1908 * And now drop the SE_SHARED lock(s).
1911 1909 */
1912 1910 if (dyn_ism_unmap) {
1913 1911 for (i = 0; i < npages; i++) {
1914 1912 page_unlock(ppa[i]);
1915 1913 }
1916 1914 }
1917 1915 }
1918 1916
1919 1917 if (!dyn_ism_unmap) {
1920 1918 if (hat_share(seg->s_as->a_hat, shm_addr,
1921 1919 curspt->a_hat, segspt_addr, ptob(npages),
1922 1920 seg->s_szc) != 0) {
1923 1921 panic("hat_share err in DISM fault");
1924 1922 /* NOTREACHED */
1925 1923 }
1926 1924 if (type == F_INVAL) {
1927 1925 for (i = 0; i < npages; i++) {
1928 1926 page_unlock(ppa[i]);
1929 1927 }
1930 1928 }
1931 1929 }
1932 1930 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1933 1931 dism_err:
1934 1932 kmem_free(ppa, npages * sizeof (page_t *));
1935 1933 return (err);
1936 1934
1937 1935 case F_SOFTUNLOCK:
1938 1936
1939 1937 /*
1940 1938 * This is a bit ugly, we pass in the real seg pointer,
1941 1939 * but the segspt_addr is the virtual address within the
1942 1940 * dummy seg.
1943 1941 */
1944 1942 segspt_softunlock(seg, segspt_addr, size, rw);
1945 1943 return (0);
1946 1944
1947 1945 case F_PROT:
1948 1946
1949 1947 /*
1950 1948 * This takes care of the unusual case where a user
1951 1949 * allocates a stack in shared memory and a register
1952 1950 * window overflow is written to that stack page before
1953 1951 * it is otherwise modified.
1954 1952 *
1955 1953 * We can get away with this because ISM segments are
1956 1954 * always rw. Other than this unusual case, there
1957 1955 * should be no instances of protection violations.
1958 1956 */
1959 1957 return (0);
1960 1958
1961 1959 default:
1962 1960 #ifdef DEBUG
1963 1961 panic("segspt_dismfault default type?");
1964 1962 #else
1965 1963 return (FC_NOMAP);
1966 1964 #endif
1967 1965 }
1968 1966 }
1969 1967
1970 1968
1971 1969 faultcode_t
1972 1970 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
1973 1971 size_t len, enum fault_type type, enum seg_rw rw)
1974 1972 {
1975 1973 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1976 1974 struct seg *sptseg = shmd->shm_sptseg;
1977 1975 struct as *curspt = shmd->shm_sptas;
1978 1976 struct spt_data *sptd = sptseg->s_data;
1979 1977 pgcnt_t npages;
1980 1978 size_t size;
1981 1979 caddr_t sptseg_addr, shm_addr;
1982 1980 page_t *pp, **ppa;
1983 1981 int i;
1984 1982 u_offset_t offset;
1985 1983 ulong_t anon_index = 0;
1986 1984 struct vnode *vp;
1987 1985 struct anon_map *amp; /* XXX - for locknest */
1988 1986 struct anon *ap = NULL;
1989 1987 size_t pgsz;
1990 1988 pgcnt_t pgcnt;
1991 1989 caddr_t a;
1992 1990 pgcnt_t pidx;
1993 1991 size_t sz;
1994 1992
1995 1993 #ifdef lint
1996 1994 hat = hat;
1997 1995 #endif
1998 1996
1999 1997 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2000 1998
2001 1999 if (sptd->spt_flags & SHM_PAGEABLE) {
2002 2000 return (segspt_dismfault(hat, seg, addr, len, type, rw));
2003 2001 }
2004 2002
2005 2003 /*
2006 2004 * Because of the way spt is implemented
2007 2005 * the realsize of the segment does not have to be
2008 2006 * equal to the segment size itself. The segment size is
2009 2007 * often in multiples of a page size larger than PAGESIZE.
2010 2008 * The realsize is rounded up to the nearest PAGESIZE
2011 2009 * based on what the user requested. This is a bit of
2012 2010 * ungliness that is historical but not easily fixed
2013 2011 * without re-designing the higher levels of ISM.
2014 2012 */
2015 2013 ASSERT(addr >= seg->s_base);
2016 2014 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2017 2015 return (FC_NOMAP);
2018 2016 /*
2019 2017 * For all of the following cases except F_PROT, we need to
2020 2018 * make any necessary adjustments to addr and len
2021 2019 * and get all of the necessary page_t's into an array called ppa[].
2022 2020 *
2023 2021 * The code in shmat() forces base addr and len of ISM segment
2024 2022 * to be aligned to largest page size supported. Therefore,
2025 2023 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2026 2024 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2027 2025 * in large pagesize chunks, or else we will screw up the HAT
2028 2026 * layer by calling hat_memload_array() with differing page sizes
2029 2027 * over a given virtual range.
2030 2028 */
2031 2029 pgsz = page_get_pagesize(sptseg->s_szc);
2032 2030 pgcnt = page_get_pagecnt(sptseg->s_szc);
2033 2031 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2034 2032 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2035 2033 npages = btopr(size);
2036 2034
2037 2035 /*
2038 2036 * Now we need to convert from addr in segshm to addr in segspt.
2039 2037 */
2040 2038 anon_index = seg_page(seg, shm_addr);
2041 2039 sptseg_addr = sptseg->s_base + ptob(anon_index);
2042 2040
2043 2041 /*
2044 2042 * And now we may have to adjust npages downward if we have
2045 2043 * exceeded the realsize of the segment or initial anon
2046 2044 * allocations.
2047 2045 */
2048 2046 if ((sptseg_addr + ptob(npages)) >
2049 2047 (sptseg->s_base + sptd->spt_realsize))
2050 2048 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2051 2049
2052 2050 npages = btopr(size);
2053 2051
2054 2052 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2055 2053 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2056 2054
2057 2055 switch (type) {
2058 2056
2059 2057 case F_SOFTLOCK:
2060 2058
2061 2059 /*
2062 2060 * availrmem is decremented once during anon_swap_adjust()
2063 2061 * and is incremented during the anon_unresv(), which is
2064 2062 * called from shm_rm_amp() when the segment is destroyed.
2065 2063 */
2066 2064 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2067 2065 /*
2068 2066 * Some platforms assume that ISM pages are SE_SHARED
2069 2067 * locked for the entire life of the segment.
2070 2068 */
2071 2069 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2072 2070 return (0);
2073 2071 /*
2074 2072 * Fall through to the F_INVAL case to load up the hat layer
2075 2073 * entries with the HAT_LOAD_LOCK flag.
2076 2074 */
2077 2075
2078 2076 /* FALLTHRU */
2079 2077 case F_INVAL:
2080 2078
2081 2079 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2082 2080 return (FC_NOMAP);
2083 2081
2084 2082 /*
2085 2083 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2086 2084 * may still rely on this call to hat_share(). That
2087 2085 * would imply that those hat's can fault on a
2088 2086 * HAT_LOAD_LOCK translation, which would seem
2089 2087 * contradictory.
2090 2088 */
2091 2089 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2092 2090 if (hat_share(seg->s_as->a_hat, seg->s_base,
2093 2091 curspt->a_hat, sptseg->s_base,
2094 2092 sptseg->s_size, sptseg->s_szc) != 0) {
2095 2093 panic("hat_share error in ISM fault");
2096 2094 /*NOTREACHED*/
2097 2095 }
2098 2096 return (0);
2099 2097 }
2100 2098 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2101 2099
2102 2100 /*
2103 2101 * I see no need to lock the real seg,
2104 2102 * here, because all of our work will be on the underlying
2105 2103 * dummy seg.
2106 2104 *
2107 2105 * sptseg_addr and npages now account for large pages.
2108 2106 */
2109 2107 amp = sptd->spt_amp;
2110 2108 ASSERT(amp != NULL);
2111 2109 anon_index = seg_page(sptseg, sptseg_addr);
2112 2110
2113 2111 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2114 2112 for (i = 0; i < npages; i++) {
2115 2113 ap = anon_get_ptr(amp->ahp, anon_index++);
2116 2114 ASSERT(ap != NULL);
2117 2115 swap_xlate(ap, &vp, &offset);
2118 2116 pp = page_lookup(vp, offset, SE_SHARED);
2119 2117 ASSERT(pp != NULL);
2120 2118 ppa[i] = pp;
2121 2119 }
2122 2120 ANON_LOCK_EXIT(&->a_rwlock);
2123 2121 ASSERT(i == npages);
2124 2122
2125 2123 /*
2126 2124 * We are already holding the as->a_lock on the user's
2127 2125 * real segment, but we need to hold the a_lock on the
2128 2126 * underlying dummy as. This is mostly to satisfy the
2129 2127 * underlying HAT layer.
2130 2128 */
2131 2129 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
2132 2130 a = sptseg_addr;
2133 2131 pidx = 0;
2134 2132 if (type == F_SOFTLOCK) {
2135 2133 /*
2136 2134 * Load up the translation keeping it
2137 2135 * locked and don't unlock the page.
2138 2136 */
2139 2137 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2140 2138 sz = MIN(pgsz, ptob(npages - pidx));
2141 2139 hat_memload_array(sptseg->s_as->a_hat, a,
2142 2140 sz, &ppa[pidx], sptd->spt_prot,
2143 2141 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2144 2142 }
2145 2143 } else {
2146 2144 /*
2147 2145 * Migrate pages marked for migration.
2148 2146 */
2149 2147 if (lgrp_optimizations())
2150 2148 page_migrate(seg, shm_addr, ppa, npages);
2151 2149
2152 2150 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2153 2151 sz = MIN(pgsz, ptob(npages - pidx));
2154 2152 hat_memload_array(sptseg->s_as->a_hat,
2155 2153 a, sz, &ppa[pidx],
2156 2154 sptd->spt_prot, HAT_LOAD_SHARE);
2157 2155 }
2158 2156
2159 2157 /*
2160 2158 * And now drop the SE_SHARED lock(s).
2161 2159 */
2162 2160 for (i = 0; i < npages; i++)
2163 2161 page_unlock(ppa[i]);
2164 2162 }
2165 2163 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
2166 2164
2167 2165 kmem_free(ppa, sizeof (page_t *) * npages);
2168 2166 return (0);
2169 2167 case F_SOFTUNLOCK:
2170 2168
2171 2169 /*
2172 2170 * This is a bit ugly, we pass in the real seg pointer,
2173 2171 * but the sptseg_addr is the virtual address within the
2174 2172 * dummy seg.
2175 2173 */
2176 2174 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2177 2175 return (0);
2178 2176
2179 2177 case F_PROT:
2180 2178
2181 2179 /*
2182 2180 * This takes care of the unusual case where a user
2183 2181 * allocates a stack in shared memory and a register
2184 2182 * window overflow is written to that stack page before
2185 2183 * it is otherwise modified.
2186 2184 *
2187 2185 * We can get away with this because ISM segments are
2188 2186 * always rw. Other than this unusual case, there
2189 2187 * should be no instances of protection violations.
2190 2188 */
2191 2189 return (0);
2192 2190
2193 2191 default:
2194 2192 #ifdef DEBUG
2195 2193 cmn_err(CE_WARN, "segspt_shmfault default type?");
2196 2194 #endif
2197 2195 return (FC_NOMAP);
2198 2196 }
2199 2197 }
2200 2198
2201 2199 /*ARGSUSED*/
2202 2200 static faultcode_t
2203 2201 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2204 2202 {
2205 2203 return (0);
2206 2204 }
2207 2205
2208 2206 /*ARGSUSED*/
2209 2207 static int
2210 2208 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2211 2209 {
2212 2210 return (0);
2213 2211 }
2214 2212
2215 2213 /*
2216 2214 * duplicate the shared page tables
2217 2215 */
2218 2216 int
2219 2217 segspt_shmdup(struct seg *seg, struct seg *newseg)
2220 2218 {
2221 2219 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2222 2220 struct anon_map *amp = shmd->shm_amp;
2223 2221 struct shm_data *shmd_new;
2224 2222 struct seg *spt_seg = shmd->shm_sptseg;
2225 2223 struct spt_data *sptd = spt_seg->s_data;
2226 2224 int error = 0;
2227 2225
2228 2226 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2229 2227
2230 2228 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2231 2229 newseg->s_data = (void *)shmd_new;
2232 2230 shmd_new->shm_sptas = shmd->shm_sptas;
2233 2231 shmd_new->shm_amp = amp;
2234 2232 shmd_new->shm_sptseg = shmd->shm_sptseg;
2235 2233 newseg->s_ops = &segspt_shmops;
2236 2234 newseg->s_szc = seg->s_szc;
2237 2235 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2238 2236
2239 2237 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2240 2238 amp->refcnt++;
2241 2239 ANON_LOCK_EXIT(&->a_rwlock);
2242 2240
2243 2241 if (sptd->spt_flags & SHM_PAGEABLE) {
2244 2242 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2245 2243 shmd_new->shm_lckpgs = 0;
2246 2244 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2247 2245 if ((error = hat_share(newseg->s_as->a_hat,
2248 2246 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2249 2247 seg->s_size, seg->s_szc)) != 0) {
2250 2248 kmem_free(shmd_new->shm_vpage,
2251 2249 btopr(amp->size));
2252 2250 }
2253 2251 }
2254 2252 return (error);
2255 2253 } else {
2256 2254 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2257 2255 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2258 2256 seg->s_szc));
2259 2257
2260 2258 }
2261 2259 }
2262 2260
2263 2261 /*ARGSUSED*/
2264 2262 int
2265 2263 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2266 2264 {
2267 2265 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2268 2266 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2269 2267
2270 2268 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2271 2269
2272 2270 /*
2273 2271 * ISM segment is always rw.
2274 2272 */
2275 2273 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2276 2274 }
2277 2275
2278 2276 /*
2279 2277 * Return an array of locked large pages, for empty slots allocate
2280 2278 * private zero-filled anon pages.
2281 2279 */
2282 2280 static int
2283 2281 spt_anon_getpages(
2284 2282 struct seg *sptseg,
2285 2283 caddr_t sptaddr,
2286 2284 size_t len,
2287 2285 page_t *ppa[])
2288 2286 {
2289 2287 struct spt_data *sptd = sptseg->s_data;
2290 2288 struct anon_map *amp = sptd->spt_amp;
2291 2289 enum seg_rw rw = sptd->spt_prot;
2292 2290 uint_t szc = sptseg->s_szc;
2293 2291 size_t pg_sz, share_sz = page_get_pagesize(szc);
2294 2292 pgcnt_t lp_npgs;
2295 2293 caddr_t lp_addr, e_sptaddr;
2296 2294 uint_t vpprot, ppa_szc = 0;
2297 2295 struct vpage *vpage = NULL;
2298 2296 ulong_t j, ppa_idx;
2299 2297 int err, ierr = 0;
2300 2298 pgcnt_t an_idx;
2301 2299 anon_sync_obj_t cookie;
2302 2300 int anon_locked = 0;
2303 2301 pgcnt_t amp_pgs;
2304 2302
2305 2303
2306 2304 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2307 2305 ASSERT(len != 0);
2308 2306
2309 2307 pg_sz = share_sz;
2310 2308 lp_npgs = btop(pg_sz);
2311 2309 lp_addr = sptaddr;
2312 2310 e_sptaddr = sptaddr + len;
2313 2311 an_idx = seg_page(sptseg, sptaddr);
2314 2312 ppa_idx = 0;
2315 2313
2316 2314 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2317 2315
2318 2316 amp_pgs = page_get_pagecnt(amp->a_szc);
2319 2317
2320 2318 /*CONSTCOND*/
2321 2319 while (1) {
2322 2320 for (; lp_addr < e_sptaddr;
2323 2321 an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2324 2322
2325 2323 /*
2326 2324 * If we're currently locked, and we get to a new
2327 2325 * page, unlock our current anon chunk.
2328 2326 */
2329 2327 if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2330 2328 anon_array_exit(&cookie);
2331 2329 anon_locked = 0;
2332 2330 }
2333 2331 if (!anon_locked) {
2334 2332 anon_array_enter(amp, an_idx, &cookie);
2335 2333 anon_locked = 1;
2336 2334 }
2337 2335 ppa_szc = (uint_t)-1;
2338 2336 ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2339 2337 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2340 2338 &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2341 2339
2342 2340 if (ierr != 0) {
2343 2341 if (ierr > 0) {
2344 2342 err = FC_MAKE_ERR(ierr);
2345 2343 goto lpgs_err;
2346 2344 }
2347 2345 break;
2348 2346 }
2349 2347 }
2350 2348 if (lp_addr == e_sptaddr) {
2351 2349 break;
2352 2350 }
2353 2351 ASSERT(lp_addr < e_sptaddr);
2354 2352
2355 2353 /*
2356 2354 * ierr == -1 means we failed to allocate a large page.
2357 2355 * so do a size down operation.
2358 2356 *
2359 2357 * ierr == -2 means some other process that privately shares
2360 2358 * pages with this process has allocated a larger page and we
2361 2359 * need to retry with larger pages. So do a size up
2362 2360 * operation. This relies on the fact that large pages are
2363 2361 * never partially shared i.e. if we share any constituent
2364 2362 * page of a large page with another process we must share the
2365 2363 * entire large page. Note this cannot happen for SOFTLOCK
2366 2364 * case, unless current address (lpaddr) is at the beginning
2367 2365 * of the next page size boundary because the other process
2368 2366 * couldn't have relocated locked pages.
2369 2367 */
2370 2368 ASSERT(ierr == -1 || ierr == -2);
2371 2369 if (segvn_anypgsz) {
2372 2370 ASSERT(ierr == -2 || szc != 0);
2373 2371 ASSERT(ierr == -1 || szc < sptseg->s_szc);
2374 2372 szc = (ierr == -1) ? szc - 1 : szc + 1;
2375 2373 } else {
2376 2374 /*
2377 2375 * For faults and segvn_anypgsz == 0
2378 2376 * we need to be careful not to loop forever
2379 2377 * if existing page is found with szc other
2380 2378 * than 0 or seg->s_szc. This could be due
2381 2379 * to page relocations on behalf of DR or
2382 2380 * more likely large page creation. For this
2383 2381 * case simply re-size to existing page's szc
2384 2382 * if returned by anon_map_getpages().
2385 2383 */
2386 2384 if (ppa_szc == (uint_t)-1) {
2387 2385 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2388 2386 } else {
2389 2387 ASSERT(ppa_szc <= sptseg->s_szc);
2390 2388 ASSERT(ierr == -2 || ppa_szc < szc);
2391 2389 ASSERT(ierr == -1 || ppa_szc > szc);
2392 2390 szc = ppa_szc;
2393 2391 }
2394 2392 }
2395 2393 pg_sz = page_get_pagesize(szc);
2396 2394 lp_npgs = btop(pg_sz);
2397 2395 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2398 2396 }
2399 2397 if (anon_locked) {
2400 2398 anon_array_exit(&cookie);
2401 2399 }
2402 2400 ANON_LOCK_EXIT(&->a_rwlock);
2403 2401 return (0);
2404 2402
2405 2403 lpgs_err:
2406 2404 if (anon_locked) {
2407 2405 anon_array_exit(&cookie);
2408 2406 }
2409 2407 ANON_LOCK_EXIT(&->a_rwlock);
2410 2408 for (j = 0; j < ppa_idx; j++)
2411 2409 page_unlock(ppa[j]);
2412 2410 return (err);
2413 2411 }
2414 2412
2415 2413 /*
2416 2414 * count the number of bytes in a set of spt pages that are currently not
2417 2415 * locked
2418 2416 */
2419 2417 static rctl_qty_t
2420 2418 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2421 2419 {
2422 2420 ulong_t i;
2423 2421 rctl_qty_t unlocked = 0;
2424 2422
2425 2423 for (i = 0; i < npages; i++) {
2426 2424 if (ppa[i]->p_lckcnt == 0)
2427 2425 unlocked += PAGESIZE;
2428 2426 }
2429 2427 return (unlocked);
2430 2428 }
2431 2429
2432 2430 extern u_longlong_t randtick(void);
2433 2431 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2434 2432 #define NLCK (NCPU_P2)
2435 2433 /* Random number with a range [0, n-1], n must be power of two */
2436 2434 #define RAND_P2(n) \
2437 2435 ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2438 2436
2439 2437 int
2440 2438 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2441 2439 page_t **ppa, ulong_t *lockmap, size_t pos,
2442 2440 rctl_qty_t *locked)
2443 2441 {
2444 2442 struct shm_data *shmd = seg->s_data;
2445 2443 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2446 2444 ulong_t i;
2447 2445 int kernel;
2448 2446 pgcnt_t nlck = 0;
2449 2447 int rv = 0;
2450 2448 int use_reserved = 1;
2451 2449
2452 2450 /* return the number of bytes actually locked */
2453 2451 *locked = 0;
2454 2452
2455 2453 /*
2456 2454 * To avoid contention on freemem_lock, availrmem and pages_locked
2457 2455 * global counters are updated only every nlck locked pages instead of
2458 2456 * every time. Reserve nlck locks up front and deduct from this
2459 2457 * reservation for each page that requires a lock. When the reservation
2460 2458 * is consumed, reserve again. nlck is randomized, so the competing
2461 2459 * threads do not fall into a cyclic lock contention pattern. When
2462 2460 * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2463 2461 * is used to lock pages.
2464 2462 */
2465 2463 for (i = 0; i < npages; anon_index++, pos++, i++) {
2466 2464 if (nlck == 0 && use_reserved == 1) {
2467 2465 nlck = NLCK + RAND_P2(NLCK);
2468 2466 /* if fewer loops left, decrease nlck */
2469 2467 nlck = MIN(nlck, npages - i);
2470 2468 /*
2471 2469 * Reserve nlck locks up front and deduct from this
2472 2470 * reservation for each page that requires a lock. When
2473 2471 * the reservation is consumed, reserve again.
2474 2472 */
2475 2473 mutex_enter(&freemem_lock);
2476 2474 if ((availrmem - nlck) < pages_pp_maximum) {
2477 2475 /* Do not do advance memory reserves */
2478 2476 use_reserved = 0;
2479 2477 } else {
2480 2478 availrmem -= nlck;
2481 2479 pages_locked += nlck;
2482 2480 }
2483 2481 mutex_exit(&freemem_lock);
2484 2482 }
2485 2483 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2486 2484 if (sptd->spt_ppa_lckcnt[anon_index] <
2487 2485 (ushort_t)DISM_LOCK_MAX) {
2488 2486 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2489 2487 (ushort_t)DISM_LOCK_MAX) {
2490 2488 cmn_err(CE_WARN,
2491 2489 "DISM page lock limit "
2492 2490 "reached on DISM offset 0x%lx\n",
2493 2491 anon_index << PAGESHIFT);
2494 2492 }
2495 2493 kernel = (sptd->spt_ppa &&
2496 2494 sptd->spt_ppa[anon_index]);
2497 2495 if (!page_pp_lock(ppa[i], 0, kernel ||
2498 2496 use_reserved)) {
2499 2497 sptd->spt_ppa_lckcnt[anon_index]--;
2500 2498 rv = EAGAIN;
2501 2499 break;
2502 2500 }
2503 2501 /* if this is a newly locked page, count it */
2504 2502 if (ppa[i]->p_lckcnt == 1) {
2505 2503 if (kernel == 0 && use_reserved == 1)
2506 2504 nlck--;
2507 2505 *locked += PAGESIZE;
2508 2506 }
2509 2507 shmd->shm_lckpgs++;
2510 2508 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2511 2509 if (lockmap != NULL)
2512 2510 BT_SET(lockmap, pos);
2513 2511 }
2514 2512 }
2515 2513 }
2516 2514 /* Return unused lock reservation */
2517 2515 if (nlck != 0 && use_reserved == 1) {
2518 2516 mutex_enter(&freemem_lock);
2519 2517 availrmem += nlck;
2520 2518 pages_locked -= nlck;
2521 2519 mutex_exit(&freemem_lock);
2522 2520 }
2523 2521
2524 2522 return (rv);
2525 2523 }
2526 2524
2527 2525 int
2528 2526 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2529 2527 rctl_qty_t *unlocked)
2530 2528 {
2531 2529 struct shm_data *shmd = seg->s_data;
2532 2530 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2533 2531 struct anon_map *amp = sptd->spt_amp;
2534 2532 struct anon *ap;
2535 2533 struct vnode *vp;
2536 2534 u_offset_t off;
2537 2535 struct page *pp;
2538 2536 int kernel;
2539 2537 anon_sync_obj_t cookie;
2540 2538 ulong_t i;
2541 2539 pgcnt_t nlck = 0;
2542 2540 pgcnt_t nlck_limit = NLCK;
2543 2541
2544 2542 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2545 2543 for (i = 0; i < npages; i++, anon_index++) {
2546 2544 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2547 2545 anon_array_enter(amp, anon_index, &cookie);
2548 2546 ap = anon_get_ptr(amp->ahp, anon_index);
2549 2547 ASSERT(ap);
2550 2548
2551 2549 swap_xlate(ap, &vp, &off);
2552 2550 anon_array_exit(&cookie);
2553 2551 pp = page_lookup(vp, off, SE_SHARED);
2554 2552 ASSERT(pp);
2555 2553 /*
2556 2554 * availrmem is decremented only for pages which are not
2557 2555 * in seg pcache, for pages in seg pcache availrmem was
2558 2556 * decremented in _dismpagelock()
2559 2557 */
2560 2558 kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2561 2559 ASSERT(pp->p_lckcnt > 0);
2562 2560
2563 2561 /*
2564 2562 * lock page but do not change availrmem, we do it
2565 2563 * ourselves every nlck loops.
2566 2564 */
2567 2565 page_pp_unlock(pp, 0, 1);
2568 2566 if (pp->p_lckcnt == 0) {
2569 2567 if (kernel == 0)
2570 2568 nlck++;
2571 2569 *unlocked += PAGESIZE;
2572 2570 }
2573 2571 page_unlock(pp);
2574 2572 shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2575 2573 sptd->spt_ppa_lckcnt[anon_index]--;
2576 2574 shmd->shm_lckpgs--;
2577 2575 }
2578 2576
2579 2577 /*
2580 2578 * To reduce freemem_lock contention, do not update availrmem
2581 2579 * until at least NLCK pages have been unlocked.
2582 2580 * 1. No need to update if nlck is zero
2583 2581 * 2. Always update if the last iteration
2584 2582 */
2585 2583 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2586 2584 mutex_enter(&freemem_lock);
2587 2585 availrmem += nlck;
2588 2586 pages_locked -= nlck;
2589 2587 mutex_exit(&freemem_lock);
2590 2588 nlck = 0;
2591 2589 nlck_limit = NLCK + RAND_P2(NLCK);
2592 2590 }
2593 2591 }
2594 2592 ANON_LOCK_EXIT(&->a_rwlock);
2595 2593
2596 2594 return (0);
2597 2595 }
2598 2596
2599 2597 /*ARGSUSED*/
2600 2598 static int
2601 2599 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2602 2600 int attr, int op, ulong_t *lockmap, size_t pos)
2603 2601 {
2604 2602 struct shm_data *shmd = seg->s_data;
2605 2603 struct seg *sptseg = shmd->shm_sptseg;
2606 2604 struct spt_data *sptd = sptseg->s_data;
2607 2605 struct kshmid *sp = sptd->spt_amp->a_sp;
2608 2606 pgcnt_t npages, a_npages;
2609 2607 page_t **ppa;
2610 2608 pgcnt_t an_idx, a_an_idx, ppa_idx;
2611 2609 caddr_t spt_addr, a_addr; /* spt and aligned address */
2612 2610 size_t a_len; /* aligned len */
2613 2611 size_t share_sz;
2614 2612 ulong_t i;
2615 2613 int sts = 0;
2616 2614 rctl_qty_t unlocked = 0;
2617 2615 rctl_qty_t locked = 0;
2618 2616 struct proc *p = curproc;
2619 2617 kproject_t *proj;
2620 2618
2621 2619 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2622 2620 ASSERT(sp != NULL);
2623 2621
2624 2622 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2625 2623 return (0);
2626 2624 }
2627 2625
2628 2626 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2629 2627 an_idx = seg_page(seg, addr);
2630 2628 npages = btopr(len);
2631 2629
2632 2630 if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2633 2631 return (ENOMEM);
2634 2632 }
2635 2633
2636 2634 /*
2637 2635 * A shm's project never changes, so no lock needed.
2638 2636 * The shm has a hold on the project, so it will not go away.
2639 2637 * Since we have a mapping to shm within this zone, we know
2640 2638 * that the zone will not go away.
2641 2639 */
2642 2640 proj = sp->shm_perm.ipc_proj;
2643 2641
2644 2642 if (op == MC_LOCK) {
2645 2643
2646 2644 /*
2647 2645 * Need to align addr and size request if they are not
2648 2646 * aligned so we can always allocate large page(s) however
2649 2647 * we only lock what was requested in initial request.
2650 2648 */
2651 2649 share_sz = page_get_pagesize(sptseg->s_szc);
2652 2650 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2653 2651 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2654 2652 share_sz);
2655 2653 a_npages = btop(a_len);
2656 2654 a_an_idx = seg_page(seg, a_addr);
2657 2655 spt_addr = sptseg->s_base + ptob(a_an_idx);
2658 2656 ppa_idx = an_idx - a_an_idx;
2659 2657
2660 2658 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2661 2659 KM_NOSLEEP)) == NULL) {
2662 2660 return (ENOMEM);
2663 2661 }
2664 2662
2665 2663 /*
2666 2664 * Don't cache any new pages for IO and
2667 2665 * flush any cached pages.
2668 2666 */
2669 2667 mutex_enter(&sptd->spt_lock);
2670 2668 if (sptd->spt_ppa != NULL)
2671 2669 sptd->spt_flags |= DISM_PPA_CHANGED;
2672 2670
2673 2671 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2674 2672 if (sts != 0) {
2675 2673 mutex_exit(&sptd->spt_lock);
2676 2674 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2677 2675 return (sts);
2678 2676 }
2679 2677
2680 2678 mutex_enter(&sp->shm_mlock);
2681 2679 /* enforce locked memory rctl */
2682 2680 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2683 2681
2684 2682 mutex_enter(&p->p_lock);
2685 2683 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2686 2684 mutex_exit(&p->p_lock);
2687 2685 sts = EAGAIN;
2688 2686 } else {
2689 2687 mutex_exit(&p->p_lock);
2690 2688 sts = spt_lockpages(seg, an_idx, npages,
2691 2689 &ppa[ppa_idx], lockmap, pos, &locked);
2692 2690
2693 2691 /*
2694 2692 * correct locked count if not all pages could be
2695 2693 * locked
2696 2694 */
2697 2695 if ((unlocked - locked) > 0) {
2698 2696 rctl_decr_locked_mem(NULL, proj,
2699 2697 (unlocked - locked), 0);
2700 2698 }
2701 2699 }
2702 2700 /*
2703 2701 * unlock pages
2704 2702 */
2705 2703 for (i = 0; i < a_npages; i++)
2706 2704 page_unlock(ppa[i]);
2707 2705 if (sptd->spt_ppa != NULL)
2708 2706 sptd->spt_flags |= DISM_PPA_CHANGED;
2709 2707 mutex_exit(&sp->shm_mlock);
2710 2708 mutex_exit(&sptd->spt_lock);
2711 2709
2712 2710 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2713 2711
2714 2712 } else if (op == MC_UNLOCK) { /* unlock */
2715 2713 page_t **ppa;
2716 2714
2717 2715 mutex_enter(&sptd->spt_lock);
2718 2716 if (shmd->shm_lckpgs == 0) {
2719 2717 mutex_exit(&sptd->spt_lock);
2720 2718 return (0);
2721 2719 }
2722 2720 /*
2723 2721 * Don't cache new IO pages.
2724 2722 */
2725 2723 if (sptd->spt_ppa != NULL)
2726 2724 sptd->spt_flags |= DISM_PPA_CHANGED;
2727 2725
2728 2726 mutex_enter(&sp->shm_mlock);
2729 2727 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2730 2728 if ((ppa = sptd->spt_ppa) != NULL)
2731 2729 sptd->spt_flags |= DISM_PPA_CHANGED;
2732 2730 mutex_exit(&sptd->spt_lock);
2733 2731
2734 2732 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2735 2733 mutex_exit(&sp->shm_mlock);
2736 2734
2737 2735 if (ppa != NULL)
2738 2736 seg_ppurge_wiredpp(ppa);
2739 2737 }
2740 2738 return (sts);
2741 2739 }
2742 2740
2743 2741 /*ARGSUSED*/
2744 2742 int
2745 2743 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2746 2744 {
2747 2745 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2748 2746 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2749 2747 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2750 2748
2751 2749 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2752 2750
2753 2751 /*
2754 2752 * ISM segment is always rw.
2755 2753 */
2756 2754 while (--pgno >= 0)
2757 2755 *protv++ = sptd->spt_prot;
2758 2756 return (0);
2759 2757 }
2760 2758
2761 2759 /*ARGSUSED*/
2762 2760 u_offset_t
2763 2761 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2764 2762 {
2765 2763 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2766 2764
2767 2765 /* Offset does not matter in ISM memory */
2768 2766
2769 2767 return ((u_offset_t)0);
2770 2768 }
2771 2769
2772 2770 /* ARGSUSED */
2773 2771 int
2774 2772 segspt_shmgettype(struct seg *seg, caddr_t addr)
2775 2773 {
2776 2774 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2777 2775 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2778 2776
2779 2777 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2780 2778
2781 2779 /*
2782 2780 * The shared memory mapping is always MAP_SHARED, SWAP is only
2783 2781 * reserved for DISM
2784 2782 */
2785 2783 return (MAP_SHARED |
2786 2784 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2787 2785 }
2788 2786
2789 2787 /*ARGSUSED*/
2790 2788 int
2791 2789 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2792 2790 {
2793 2791 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2794 2792 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2795 2793
2796 2794 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2797 2795
2798 2796 *vpp = sptd->spt_vp;
2799 2797 return (0);
2800 2798 }
2801 2799
2802 2800 /*
2803 2801 * We need to wait for pending IO to complete to a DISM segment in order for
2804 2802 * pages to get kicked out of the seg_pcache. 120 seconds should be more
2805 2803 * than enough time to wait.
2806 2804 */
2807 2805 static clock_t spt_pcache_wait = 120;
2808 2806
2809 2807 /*ARGSUSED*/
2810 2808 static int
2811 2809 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2812 2810 {
2813 2811 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2814 2812 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2815 2813 struct anon_map *amp;
2816 2814 pgcnt_t pg_idx;
2817 2815 ushort_t gen;
2818 2816 clock_t end_lbolt;
2819 2817 int writer;
2820 2818 page_t **ppa;
2821 2819
2822 2820 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2823 2821
2824 2822 if (behav == MADV_FREE) {
2825 2823 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2826 2824 return (0);
2827 2825
2828 2826 amp = sptd->spt_amp;
2829 2827 pg_idx = seg_page(seg, addr);
2830 2828
2831 2829 mutex_enter(&sptd->spt_lock);
2832 2830 if ((ppa = sptd->spt_ppa) == NULL) {
2833 2831 mutex_exit(&sptd->spt_lock);
2834 2832 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2835 2833 anon_disclaim(amp, pg_idx, len);
2836 2834 ANON_LOCK_EXIT(&->a_rwlock);
2837 2835 return (0);
2838 2836 }
2839 2837
2840 2838 sptd->spt_flags |= DISM_PPA_CHANGED;
2841 2839 gen = sptd->spt_gen;
2842 2840
2843 2841 mutex_exit(&sptd->spt_lock);
2844 2842
2845 2843 /*
2846 2844 * Purge all DISM cached pages
2847 2845 */
2848 2846 seg_ppurge_wiredpp(ppa);
2849 2847
2850 2848 /*
2851 2849 * Drop the AS_LOCK so that other threads can grab it
2852 2850 * in the as_pageunlock path and hopefully get the segment
2853 2851 * kicked out of the seg_pcache. We bump the shm_softlockcnt
2854 2852 * to keep this segment resident.
2855 2853 */
2856 2854 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2857 2855 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2858 2856 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2859 2857
2860 2858 mutex_enter(&sptd->spt_lock);
2861 2859
2862 2860 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2863 2861
2864 2862 /*
2865 2863 * Try to wait for pages to get kicked out of the seg_pcache.
2866 2864 */
2867 2865 while (sptd->spt_gen == gen &&
2868 2866 (sptd->spt_flags & DISM_PPA_CHANGED) &&
2869 2867 ddi_get_lbolt() < end_lbolt) {
2870 2868 if (!cv_timedwait_sig(&sptd->spt_cv,
2871 2869 &sptd->spt_lock, end_lbolt)) {
2872 2870 break;
2873 2871 }
2874 2872 }
2875 2873
2876 2874 mutex_exit(&sptd->spt_lock);
2877 2875
2878 2876 /* Regrab the AS_LOCK and release our hold on the segment */
2879 2877 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2880 2878 writer ? RW_WRITER : RW_READER);
2881 2879 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2882 2880 if (shmd->shm_softlockcnt <= 0) {
2883 2881 if (AS_ISUNMAPWAIT(seg->s_as)) {
2884 2882 mutex_enter(&seg->s_as->a_contents);
2885 2883 if (AS_ISUNMAPWAIT(seg->s_as)) {
2886 2884 AS_CLRUNMAPWAIT(seg->s_as);
2887 2885 cv_broadcast(&seg->s_as->a_cv);
2888 2886 }
2889 2887 mutex_exit(&seg->s_as->a_contents);
2890 2888 }
2891 2889 }
2892 2890
2893 2891 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2894 2892 anon_disclaim(amp, pg_idx, len);
2895 2893 ANON_LOCK_EXIT(&->a_rwlock);
2896 2894 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2897 2895 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2898 2896 int already_set;
2899 2897 ulong_t anon_index;
2900 2898 lgrp_mem_policy_t policy;
2901 2899 caddr_t shm_addr;
2902 2900 size_t share_size;
2903 2901 size_t size;
2904 2902 struct seg *sptseg = shmd->shm_sptseg;
2905 2903 caddr_t sptseg_addr;
2906 2904
2907 2905 /*
2908 2906 * Align address and length to page size of underlying segment
2909 2907 */
2910 2908 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2911 2909 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2912 2910 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2913 2911 share_size);
2914 2912
2915 2913 amp = shmd->shm_amp;
2916 2914 anon_index = seg_page(seg, shm_addr);
2917 2915
2918 2916 /*
2919 2917 * And now we may have to adjust size downward if we have
2920 2918 * exceeded the realsize of the segment or initial anon
2921 2919 * allocations.
2922 2920 */
2923 2921 sptseg_addr = sptseg->s_base + ptob(anon_index);
2924 2922 if ((sptseg_addr + size) >
2925 2923 (sptseg->s_base + sptd->spt_realsize))
2926 2924 size = (sptseg->s_base + sptd->spt_realsize) -
2927 2925 sptseg_addr;
2928 2926
2929 2927 /*
2930 2928 * Set memory allocation policy for this segment
2931 2929 */
2932 2930 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2933 2931 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2934 2932 NULL, 0, len);
2935 2933
2936 2934 /*
2937 2935 * If random memory allocation policy set already,
2938 2936 * don't bother reapplying it.
2939 2937 */
2940 2938 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2941 2939 return (0);
2942 2940
2943 2941 /*
2944 2942 * Mark any existing pages in the given range for
2945 2943 * migration, flushing the I/O page cache, and using
2946 2944 * underlying segment to calculate anon index and get
2947 2945 * anonmap and vnode pointer from
2948 2946 */
2949 2947 if (shmd->shm_softlockcnt > 0)
2950 2948 segspt_purge(seg);
2951 2949
2952 2950 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
2953 2951 }
2954 2952
2955 2953 return (0);
2956 2954 }
2957 2955
2958 2956 /*ARGSUSED*/
2959 2957 void
2960 2958 segspt_shmdump(struct seg *seg)
2961 2959 {
2962 2960 /* no-op for ISM segment */
2963 2961 }
2964 2962
2965 2963 /*ARGSUSED*/
2966 2964 static faultcode_t
2967 2965 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
2968 2966 {
2969 2967 return (ENOTSUP);
2970 2968 }
2971 2969
2972 2970 /*
2973 2971 * get a memory ID for an addr in a given segment
2974 2972 */
2975 2973 static int
2976 2974 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
2977 2975 {
2978 2976 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2979 2977 struct anon *ap;
2980 2978 size_t anon_index;
2981 2979 struct anon_map *amp = shmd->shm_amp;
2982 2980 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2983 2981 struct seg *sptseg = shmd->shm_sptseg;
2984 2982 anon_sync_obj_t cookie;
2985 2983
2986 2984 anon_index = seg_page(seg, addr);
2987 2985
2988 2986 if (addr > (seg->s_base + sptd->spt_realsize)) {
2989 2987 return (EFAULT);
2990 2988 }
2991 2989
2992 2990 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2993 2991 anon_array_enter(amp, anon_index, &cookie);
2994 2992 ap = anon_get_ptr(amp->ahp, anon_index);
2995 2993 if (ap == NULL) {
2996 2994 struct page *pp;
2997 2995 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
2998 2996
2999 2997 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
3000 2998 if (pp == NULL) {
3001 2999 anon_array_exit(&cookie);
3002 3000 ANON_LOCK_EXIT(&->a_rwlock);
3003 3001 return (ENOMEM);
3004 3002 }
3005 3003 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3006 3004 page_unlock(pp);
3007 3005 }
3008 3006 anon_array_exit(&cookie);
3009 3007 ANON_LOCK_EXIT(&->a_rwlock);
3010 3008 memidp->val[0] = (uintptr_t)ap;
3011 3009 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3012 3010 return (0);
3013 3011 }
3014 3012
3015 3013 /*
3016 3014 * Get memory allocation policy info for specified address in given segment
3017 3015 */
3018 3016 static lgrp_mem_policy_info_t *
3019 3017 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3020 3018 {
3021 3019 struct anon_map *amp;
3022 3020 ulong_t anon_index;
3023 3021 lgrp_mem_policy_info_t *policy_info;
3024 3022 struct shm_data *shm_data;
3025 3023
3026 3024 ASSERT(seg != NULL);
3027 3025
3028 3026 /*
3029 3027 * Get anon_map from segshm
3030 3028 *
3031 3029 * Assume that no lock needs to be held on anon_map, since
3032 3030 * it should be protected by its reference count which must be
3033 3031 * nonzero for an existing segment
3034 3032 * Need to grab readers lock on policy tree though
3035 3033 */
3036 3034 shm_data = (struct shm_data *)seg->s_data;
3037 3035 if (shm_data == NULL)
3038 3036 return (NULL);
3039 3037 amp = shm_data->shm_amp;
3040 3038 ASSERT(amp->refcnt != 0);
3041 3039
3042 3040 /*
3043 3041 * Get policy info
3044 3042 *
3045 3043 * Assume starting anon index of 0
3046 3044 */
3047 3045 anon_index = seg_page(seg, addr);
3048 3046 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3049 3047
3050 3048 return (policy_info);
3051 3049 }
3052 3050
3053 3051 /*ARGSUSED*/
3054 3052 static int
3055 3053 segspt_shmcapable(struct seg *seg, segcapability_t capability)
3056 3054 {
3057 3055 return (0);
3058 3056 }
↓ open down ↓ |
2907 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX