Print this page
use NULL dump segop as a shorthand for no-op
Instead of forcing every segment driver to implement a dummy function that
does nothing, handle NULL dump segop function pointer as a no-op shorthand.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_spt.c
+++ new/usr/src/uts/common/vm/seg_spt.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/param.h>
26 26 #include <sys/user.h>
27 27 #include <sys/mman.h>
28 28 #include <sys/kmem.h>
29 29 #include <sys/sysmacros.h>
30 30 #include <sys/cmn_err.h>
31 31 #include <sys/systm.h>
32 32 #include <sys/tuneable.h>
33 33 #include <vm/hat.h>
34 34 #include <vm/seg.h>
35 35 #include <vm/as.h>
36 36 #include <vm/anon.h>
37 37 #include <vm/page.h>
38 38 #include <sys/buf.h>
39 39 #include <sys/swap.h>
40 40 #include <sys/atomic.h>
41 41 #include <vm/seg_spt.h>
42 42 #include <sys/debug.h>
43 43 #include <sys/vtrace.h>
44 44 #include <sys/shm.h>
45 45 #include <sys/shm_impl.h>
46 46 #include <sys/lgrp.h>
47 47 #include <sys/vmsystm.h>
48 48 #include <sys/policy.h>
49 49 #include <sys/project.h>
50 50 #include <sys/tnf_probe.h>
51 51 #include <sys/zone.h>
52 52
53 53 #define SEGSPTADDR (caddr_t)0x0
54 54
55 55 /*
56 56 * # pages used for spt
57 57 */
58 58 size_t spt_used;
59 59
60 60 /*
61 61 * segspt_minfree is the memory left for system after ISM
62 62 * locked its pages; it is set up to 5% of availrmem in
63 63 * sptcreate when ISM is created. ISM should not use more
64 64 * than ~90% of availrmem; if it does, then the performance
65 65 * of the system may decrease. Machines with large memories may
66 66 * be able to use up more memory for ISM so we set the default
67 67 * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
68 68 * If somebody wants even more memory for ISM (risking hanging
69 69 * the system) they can patch the segspt_minfree to smaller number.
70 70 */
71 71 pgcnt_t segspt_minfree = 0;
72 72
73 73 static int segspt_create(struct seg *seg, caddr_t argsp);
74 74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
75 75 static void segspt_free(struct seg *seg);
76 76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
77 77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
78 78
79 79 static const struct seg_ops segspt_ops = {
80 80 .unmap = segspt_unmap,
81 81 .free = segspt_free,
82 82 .getpolicy = segspt_getpolicy,
83 83 };
84 84
85 85 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
86 86 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
87 87 static void segspt_shmfree(struct seg *seg);
88 88 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
89 89 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
90 90 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
91 91 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
92 92 register size_t len, register uint_t prot);
93 93 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
94 94 uint_t prot);
95 95 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
96 96 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
97 97 register char *vec);
98 98 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
↓ open down ↓ |
98 lines elided |
↑ open up ↑ |
99 99 int attr, uint_t flags);
100 100 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
101 101 int attr, int op, ulong_t *lockmap, size_t pos);
102 102 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
103 103 uint_t *protv);
104 104 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
105 105 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
106 106 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
107 107 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
108 108 uint_t behav);
109 -static void segspt_shmdump(struct seg *seg);
110 109 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
111 110 struct page ***, enum lock_type, enum seg_rw);
112 111 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
113 112 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
114 113
115 114 const struct seg_ops segspt_shmops = {
116 115 .dup = segspt_shmdup,
117 116 .unmap = segspt_shmunmap,
118 117 .free = segspt_shmfree,
119 118 .fault = segspt_shmfault,
120 119 .faulta = segspt_shmfaulta,
121 120 .setprot = segspt_shmsetprot,
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
122 121 .checkprot = segspt_shmcheckprot,
123 122 .kluster = segspt_shmkluster,
124 123 .sync = segspt_shmsync,
125 124 .incore = segspt_shmincore,
126 125 .lockop = segspt_shmlockop,
127 126 .getprot = segspt_shmgetprot,
128 127 .getoffset = segspt_shmgetoffset,
129 128 .gettype = segspt_shmgettype,
130 129 .getvp = segspt_shmgetvp,
131 130 .advise = segspt_shmadvise,
132 - .dump = segspt_shmdump,
133 131 .pagelock = segspt_shmpagelock,
134 132 .getmemid = segspt_shmgetmemid,
135 133 .getpolicy = segspt_shmgetpolicy,
136 134 };
137 135
138 136 static void segspt_purge(struct seg *seg);
139 137 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
140 138 enum seg_rw, int);
141 139 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
142 140 page_t **ppa);
143 141
144 142
145 143
146 144 /*ARGSUSED*/
147 145 int
148 146 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
149 147 uint_t prot, uint_t flags, uint_t share_szc)
150 148 {
151 149 int err;
152 150 struct as *newas;
153 151 struct segspt_crargs sptcargs;
154 152
155 153 #ifdef DEBUG
156 154 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
157 155 tnf_ulong, size, size );
158 156 #endif
159 157 if (segspt_minfree == 0) /* leave min 5% of availrmem for */
160 158 segspt_minfree = availrmem/20; /* for the system */
161 159
162 160 if (!hat_supported(HAT_SHARED_PT, (void *)0))
163 161 return (EINVAL);
164 162
165 163 /*
166 164 * get a new as for this shared memory segment
167 165 */
168 166 newas = as_alloc();
169 167 newas->a_proc = NULL;
170 168 sptcargs.amp = amp;
171 169 sptcargs.prot = prot;
172 170 sptcargs.flags = flags;
173 171 sptcargs.szc = share_szc;
174 172 /*
175 173 * create a shared page table (spt) segment
176 174 */
177 175
178 176 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
179 177 as_free(newas);
180 178 return (err);
181 179 }
182 180 *sptseg = sptcargs.seg_spt;
183 181 return (0);
184 182 }
185 183
186 184 void
187 185 sptdestroy(struct as *as, struct anon_map *amp)
188 186 {
189 187
190 188 #ifdef DEBUG
191 189 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
192 190 #endif
193 191 (void) as_unmap(as, SEGSPTADDR, amp->size);
194 192 as_free(as);
195 193 }
196 194
197 195 /*
198 196 * called from seg_free().
199 197 * free (i.e., unlock, unmap, return to free list)
200 198 * all the pages in the given seg.
201 199 */
202 200 void
203 201 segspt_free(struct seg *seg)
204 202 {
205 203 struct spt_data *sptd = (struct spt_data *)seg->s_data;
206 204
207 205 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
208 206
209 207 if (sptd != NULL) {
210 208 if (sptd->spt_realsize)
211 209 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
212 210
213 211 if (sptd->spt_ppa_lckcnt)
214 212 kmem_free(sptd->spt_ppa_lckcnt,
215 213 sizeof (*sptd->spt_ppa_lckcnt)
216 214 * btopr(sptd->spt_amp->size));
217 215 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
218 216 cv_destroy(&sptd->spt_cv);
219 217 mutex_destroy(&sptd->spt_lock);
220 218 kmem_free(sptd, sizeof (*sptd));
221 219 }
222 220 }
223 221
224 222 /*ARGSUSED*/
225 223 static int
226 224 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
227 225 uint_t flags)
228 226 {
229 227 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
230 228
231 229 return (0);
232 230 }
233 231
234 232 /*ARGSUSED*/
235 233 static size_t
236 234 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
237 235 {
238 236 caddr_t eo_seg;
239 237 pgcnt_t npages;
240 238 struct shm_data *shmd = (struct shm_data *)seg->s_data;
241 239 struct seg *sptseg;
242 240 struct spt_data *sptd;
243 241
244 242 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
245 243 #ifdef lint
246 244 seg = seg;
247 245 #endif
248 246 sptseg = shmd->shm_sptseg;
249 247 sptd = sptseg->s_data;
250 248
251 249 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
252 250 eo_seg = addr + len;
253 251 while (addr < eo_seg) {
254 252 /* page exists, and it's locked. */
255 253 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
256 254 SEG_PAGE_ANON;
257 255 addr += PAGESIZE;
258 256 }
259 257 return (len);
260 258 } else {
261 259 struct anon_map *amp = shmd->shm_amp;
262 260 struct anon *ap;
263 261 page_t *pp;
264 262 pgcnt_t anon_index;
265 263 struct vnode *vp;
266 264 u_offset_t off;
267 265 ulong_t i;
268 266 int ret;
269 267 anon_sync_obj_t cookie;
270 268
271 269 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
272 270 anon_index = seg_page(seg, addr);
273 271 npages = btopr(len);
274 272 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
275 273 return (EINVAL);
276 274 }
277 275 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
278 276 for (i = 0; i < npages; i++, anon_index++) {
279 277 ret = 0;
280 278 anon_array_enter(amp, anon_index, &cookie);
281 279 ap = anon_get_ptr(amp->ahp, anon_index);
282 280 if (ap != NULL) {
283 281 swap_xlate(ap, &vp, &off);
284 282 anon_array_exit(&cookie);
285 283 pp = page_lookup_nowait(vp, off, SE_SHARED);
286 284 if (pp != NULL) {
287 285 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
288 286 page_unlock(pp);
289 287 }
290 288 } else {
291 289 anon_array_exit(&cookie);
292 290 }
293 291 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
294 292 ret |= SEG_PAGE_LOCKED;
295 293 }
296 294 *vec++ = (char)ret;
297 295 }
298 296 ANON_LOCK_EXIT(&->a_rwlock);
299 297 return (len);
300 298 }
301 299 }
302 300
303 301 static int
304 302 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
305 303 {
306 304 size_t share_size;
307 305
308 306 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
309 307
310 308 /*
311 309 * seg.s_size may have been rounded up to the largest page size
312 310 * in shmat().
313 311 * XXX This should be cleanedup. sptdestroy should take a length
314 312 * argument which should be the same as sptcreate. Then
315 313 * this rounding would not be needed (or is done in shm.c)
316 314 * Only the check for full segment will be needed.
317 315 *
318 316 * XXX -- shouldn't raddr == 0 always? These tests don't seem
319 317 * to be useful at all.
320 318 */
321 319 share_size = page_get_pagesize(seg->s_szc);
322 320 ssize = P2ROUNDUP(ssize, share_size);
323 321
324 322 if (raddr == seg->s_base && ssize == seg->s_size) {
325 323 seg_free(seg);
326 324 return (0);
327 325 } else
328 326 return (EINVAL);
329 327 }
330 328
331 329 int
332 330 segspt_create(struct seg *seg, caddr_t argsp)
333 331 {
334 332 int err;
335 333 caddr_t addr = seg->s_base;
336 334 struct spt_data *sptd;
337 335 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
338 336 struct anon_map *amp = sptcargs->amp;
339 337 struct kshmid *sp = amp->a_sp;
340 338 struct cred *cred = CRED();
341 339 ulong_t i, j, anon_index = 0;
342 340 pgcnt_t npages = btopr(amp->size);
343 341 struct vnode *vp;
344 342 page_t **ppa;
345 343 uint_t hat_flags;
346 344 size_t pgsz;
347 345 pgcnt_t pgcnt;
348 346 caddr_t a;
349 347 pgcnt_t pidx;
350 348 size_t sz;
351 349 proc_t *procp = curproc;
352 350 rctl_qty_t lockedbytes = 0;
353 351 kproject_t *proj;
354 352
355 353 /*
356 354 * We are holding the a_lock on the underlying dummy as,
357 355 * so we can make calls to the HAT layer.
358 356 */
359 357 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
360 358 ASSERT(sp != NULL);
361 359
362 360 #ifdef DEBUG
363 361 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
364 362 tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
365 363 #endif
366 364 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
367 365 if (err = anon_swap_adjust(npages))
368 366 return (err);
369 367 }
370 368 err = ENOMEM;
371 369
372 370 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
373 371 goto out1;
374 372
375 373 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
376 374 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
377 375 KM_NOSLEEP)) == NULL)
378 376 goto out2;
379 377 }
380 378
381 379 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
382 380
383 381 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
384 382 goto out3;
385 383
386 384 seg->s_ops = &segspt_ops;
387 385 sptd->spt_vp = vp;
388 386 sptd->spt_amp = amp;
389 387 sptd->spt_prot = sptcargs->prot;
390 388 sptd->spt_flags = sptcargs->flags;
391 389 seg->s_data = (caddr_t)sptd;
392 390 sptd->spt_ppa = NULL;
393 391 sptd->spt_ppa_lckcnt = NULL;
394 392 seg->s_szc = sptcargs->szc;
395 393 cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
396 394 sptd->spt_gen = 0;
397 395
398 396 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
399 397 if (seg->s_szc > amp->a_szc) {
400 398 amp->a_szc = seg->s_szc;
401 399 }
402 400 ANON_LOCK_EXIT(&->a_rwlock);
403 401
404 402 /*
405 403 * Set policy to affect initial allocation of pages in
406 404 * anon_map_createpages()
407 405 */
408 406 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
409 407 NULL, 0, ptob(npages));
410 408
411 409 if (sptcargs->flags & SHM_PAGEABLE) {
412 410 size_t share_sz;
413 411 pgcnt_t new_npgs, more_pgs;
414 412 struct anon_hdr *nahp;
415 413 zone_t *zone;
416 414
417 415 share_sz = page_get_pagesize(seg->s_szc);
418 416 if (!IS_P2ALIGNED(amp->size, share_sz)) {
419 417 /*
420 418 * We are rounding up the size of the anon array
421 419 * on 4 M boundary because we always create 4 M
422 420 * of page(s) when locking, faulting pages and we
423 421 * don't have to check for all corner cases e.g.
424 422 * if there is enough space to allocate 4 M
425 423 * page.
426 424 */
427 425 new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
428 426 more_pgs = new_npgs - npages;
429 427
430 428 /*
431 429 * The zone will never be NULL, as a fully created
432 430 * shm always has an owning zone.
433 431 */
434 432 zone = sp->shm_perm.ipc_zone_ref.zref_zone;
435 433 ASSERT(zone != NULL);
436 434 if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
437 435 err = ENOMEM;
438 436 goto out4;
439 437 }
440 438
441 439 nahp = anon_create(new_npgs, ANON_SLEEP);
442 440 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
443 441 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
444 442 ANON_SLEEP);
445 443 anon_release(amp->ahp, npages);
446 444 amp->ahp = nahp;
447 445 ASSERT(amp->swresv == ptob(npages));
448 446 amp->swresv = amp->size = ptob(new_npgs);
449 447 ANON_LOCK_EXIT(&->a_rwlock);
450 448 npages = new_npgs;
451 449 }
452 450
453 451 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
454 452 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
455 453 sptd->spt_pcachecnt = 0;
456 454 sptd->spt_realsize = ptob(npages);
457 455 sptcargs->seg_spt = seg;
458 456 return (0);
459 457 }
460 458
461 459 /*
462 460 * get array of pages for each anon slot in amp
463 461 */
464 462 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
465 463 seg, addr, S_CREATE, cred)) != 0)
466 464 goto out4;
467 465
468 466 mutex_enter(&sp->shm_mlock);
469 467
470 468 /* May be partially locked, so, count bytes to charge for locking */
471 469 for (i = 0; i < npages; i++)
472 470 if (ppa[i]->p_lckcnt == 0)
473 471 lockedbytes += PAGESIZE;
474 472
475 473 proj = sp->shm_perm.ipc_proj;
476 474
477 475 if (lockedbytes > 0) {
478 476 mutex_enter(&procp->p_lock);
479 477 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
480 478 mutex_exit(&procp->p_lock);
481 479 mutex_exit(&sp->shm_mlock);
482 480 for (i = 0; i < npages; i++)
483 481 page_unlock(ppa[i]);
484 482 err = ENOMEM;
485 483 goto out4;
486 484 }
487 485 mutex_exit(&procp->p_lock);
488 486 }
489 487
490 488 /*
491 489 * addr is initial address corresponding to the first page on ppa list
492 490 */
493 491 for (i = 0; i < npages; i++) {
494 492 /* attempt to lock all pages */
495 493 if (page_pp_lock(ppa[i], 0, 1) == 0) {
496 494 /*
497 495 * if unable to lock any page, unlock all
498 496 * of them and return error
499 497 */
500 498 for (j = 0; j < i; j++)
501 499 page_pp_unlock(ppa[j], 0, 1);
502 500 for (i = 0; i < npages; i++)
503 501 page_unlock(ppa[i]);
504 502 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
505 503 mutex_exit(&sp->shm_mlock);
506 504 err = ENOMEM;
507 505 goto out4;
508 506 }
509 507 }
510 508 mutex_exit(&sp->shm_mlock);
511 509
512 510 /*
513 511 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
514 512 * for the entire life of the segment. For example platforms
515 513 * that do not support Dynamic Reconfiguration.
516 514 */
517 515 hat_flags = HAT_LOAD_SHARE;
518 516 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
519 517 hat_flags |= HAT_LOAD_LOCK;
520 518
521 519 /*
522 520 * Load translations one lare page at a time
523 521 * to make sure we don't create mappings bigger than
524 522 * segment's size code in case underlying pages
525 523 * are shared with segvn's segment that uses bigger
526 524 * size code than we do.
527 525 */
528 526 pgsz = page_get_pagesize(seg->s_szc);
529 527 pgcnt = page_get_pagecnt(seg->s_szc);
530 528 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
531 529 sz = MIN(pgsz, ptob(npages - pidx));
532 530 hat_memload_array(seg->s_as->a_hat, a, sz,
533 531 &ppa[pidx], sptd->spt_prot, hat_flags);
534 532 }
535 533
536 534 /*
537 535 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
538 536 * we will leave the pages locked SE_SHARED for the life
539 537 * of the ISM segment. This will prevent any calls to
540 538 * hat_pageunload() on this ISM segment for those platforms.
541 539 */
542 540 if (!(hat_flags & HAT_LOAD_LOCK)) {
543 541 /*
544 542 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
545 543 * we no longer need to hold the SE_SHARED lock on the pages,
546 544 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
547 545 * SE_SHARED lock on the pages as necessary.
548 546 */
549 547 for (i = 0; i < npages; i++)
550 548 page_unlock(ppa[i]);
551 549 }
552 550 sptd->spt_pcachecnt = 0;
553 551 kmem_free(ppa, ((sizeof (page_t *)) * npages));
554 552 sptd->spt_realsize = ptob(npages);
555 553 atomic_add_long(&spt_used, npages);
556 554 sptcargs->seg_spt = seg;
557 555 return (0);
558 556
559 557 out4:
560 558 seg->s_data = NULL;
561 559 kmem_free(vp, sizeof (*vp));
562 560 cv_destroy(&sptd->spt_cv);
563 561 out3:
564 562 mutex_destroy(&sptd->spt_lock);
565 563 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
566 564 kmem_free(ppa, (sizeof (*ppa) * npages));
567 565 out2:
568 566 kmem_free(sptd, sizeof (*sptd));
569 567 out1:
570 568 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
571 569 anon_swap_restore(npages);
572 570 return (err);
573 571 }
574 572
575 573 /*ARGSUSED*/
576 574 void
577 575 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
578 576 {
579 577 struct page *pp;
580 578 struct spt_data *sptd = (struct spt_data *)seg->s_data;
581 579 pgcnt_t npages;
582 580 ulong_t anon_idx;
583 581 struct anon_map *amp;
584 582 struct anon *ap;
585 583 struct vnode *vp;
586 584 u_offset_t off;
587 585 uint_t hat_flags;
588 586 int root = 0;
589 587 pgcnt_t pgs, curnpgs = 0;
590 588 page_t *rootpp;
591 589 rctl_qty_t unlocked_bytes = 0;
592 590 kproject_t *proj;
593 591 kshmid_t *sp;
594 592
595 593 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
596 594
597 595 len = P2ROUNDUP(len, PAGESIZE);
598 596
599 597 npages = btop(len);
600 598
601 599 hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
602 600 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
603 601 (sptd->spt_flags & SHM_PAGEABLE)) {
604 602 hat_flags = HAT_UNLOAD_UNMAP;
605 603 }
606 604
607 605 hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
608 606
609 607 amp = sptd->spt_amp;
610 608 if (sptd->spt_flags & SHM_PAGEABLE)
611 609 npages = btop(amp->size);
612 610
613 611 ASSERT(amp != NULL);
614 612
615 613 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
616 614 sp = amp->a_sp;
617 615 proj = sp->shm_perm.ipc_proj;
618 616 mutex_enter(&sp->shm_mlock);
619 617 }
620 618 for (anon_idx = 0; anon_idx < npages; anon_idx++) {
621 619 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
622 620 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
623 621 panic("segspt_free_pages: null app");
624 622 /*NOTREACHED*/
625 623 }
626 624 } else {
627 625 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
628 626 == NULL)
629 627 continue;
630 628 }
631 629 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
632 630 swap_xlate(ap, &vp, &off);
633 631
634 632 /*
635 633 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
636 634 * the pages won't be having SE_SHARED lock at this
637 635 * point.
638 636 *
639 637 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
640 638 * the pages are still held SE_SHARED locked from the
641 639 * original segspt_create()
642 640 *
643 641 * Our goal is to get SE_EXCL lock on each page, remove
644 642 * permanent lock on it and invalidate the page.
645 643 */
646 644 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
647 645 if (hat_flags == HAT_UNLOAD_UNMAP)
648 646 pp = page_lookup(vp, off, SE_EXCL);
649 647 else {
650 648 if ((pp = page_find(vp, off)) == NULL) {
651 649 panic("segspt_free_pages: "
652 650 "page not locked");
653 651 /*NOTREACHED*/
654 652 }
655 653 if (!page_tryupgrade(pp)) {
656 654 page_unlock(pp);
657 655 pp = page_lookup(vp, off, SE_EXCL);
658 656 }
659 657 }
660 658 if (pp == NULL) {
661 659 panic("segspt_free_pages: "
662 660 "page not in the system");
663 661 /*NOTREACHED*/
664 662 }
665 663 ASSERT(pp->p_lckcnt > 0);
666 664 page_pp_unlock(pp, 0, 1);
667 665 if (pp->p_lckcnt == 0)
668 666 unlocked_bytes += PAGESIZE;
669 667 } else {
670 668 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
671 669 continue;
672 670 }
673 671 /*
674 672 * It's logical to invalidate the pages here as in most cases
675 673 * these were created by segspt.
676 674 */
677 675 if (pp->p_szc != 0) {
678 676 if (root == 0) {
679 677 ASSERT(curnpgs == 0);
680 678 root = 1;
681 679 rootpp = pp;
682 680 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
683 681 ASSERT(pgs > 1);
684 682 ASSERT(IS_P2ALIGNED(pgs, pgs));
685 683 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
686 684 curnpgs--;
687 685 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
688 686 ASSERT(curnpgs == 1);
689 687 ASSERT(page_pptonum(pp) ==
690 688 page_pptonum(rootpp) + (pgs - 1));
691 689 page_destroy_pages(rootpp);
692 690 root = 0;
693 691 curnpgs = 0;
694 692 } else {
695 693 ASSERT(curnpgs > 1);
696 694 ASSERT(page_pptonum(pp) ==
697 695 page_pptonum(rootpp) + (pgs - curnpgs));
698 696 curnpgs--;
699 697 }
700 698 } else {
701 699 if (root != 0 || curnpgs != 0) {
702 700 panic("segspt_free_pages: bad large page");
703 701 /*NOTREACHED*/
704 702 }
705 703 /*
706 704 * Before destroying the pages, we need to take care
707 705 * of the rctl locked memory accounting. For that
708 706 * we need to calculte the unlocked_bytes.
709 707 */
710 708 if (pp->p_lckcnt > 0)
711 709 unlocked_bytes += PAGESIZE;
712 710 /*LINTED: constant in conditional context */
713 711 VN_DISPOSE(pp, B_INVAL, 0, kcred);
714 712 }
715 713 }
716 714 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
717 715 if (unlocked_bytes > 0)
718 716 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
719 717 mutex_exit(&sp->shm_mlock);
720 718 }
721 719 if (root != 0 || curnpgs != 0) {
722 720 panic("segspt_free_pages: bad large page");
723 721 /*NOTREACHED*/
724 722 }
725 723
726 724 /*
727 725 * mark that pages have been released
728 726 */
729 727 sptd->spt_realsize = 0;
730 728
731 729 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
732 730 atomic_add_long(&spt_used, -npages);
733 731 anon_swap_restore(npages);
734 732 }
735 733 }
736 734
737 735 /*
738 736 * Get memory allocation policy info for specified address in given segment
739 737 */
740 738 static lgrp_mem_policy_info_t *
741 739 segspt_getpolicy(struct seg *seg, caddr_t addr)
742 740 {
743 741 struct anon_map *amp;
744 742 ulong_t anon_index;
745 743 lgrp_mem_policy_info_t *policy_info;
746 744 struct spt_data *spt_data;
747 745
748 746 ASSERT(seg != NULL);
749 747
750 748 /*
751 749 * Get anon_map from segspt
752 750 *
753 751 * Assume that no lock needs to be held on anon_map, since
754 752 * it should be protected by its reference count which must be
755 753 * nonzero for an existing segment
756 754 * Need to grab readers lock on policy tree though
757 755 */
758 756 spt_data = (struct spt_data *)seg->s_data;
759 757 if (spt_data == NULL)
760 758 return (NULL);
761 759 amp = spt_data->spt_amp;
762 760 ASSERT(amp->refcnt != 0);
763 761
764 762 /*
765 763 * Get policy info
766 764 *
767 765 * Assume starting anon index of 0
768 766 */
769 767 anon_index = seg_page(seg, addr);
770 768 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
771 769
772 770 return (policy_info);
773 771 }
774 772
775 773 /*
776 774 * DISM only.
777 775 * Return locked pages over a given range.
778 776 *
779 777 * We will cache all DISM locked pages and save the pplist for the
780 778 * entire segment in the ppa field of the underlying DISM segment structure.
781 779 * Later, during a call to segspt_reclaim() we will use this ppa array
782 780 * to page_unlock() all of the pages and then we will free this ppa list.
783 781 */
784 782 /*ARGSUSED*/
785 783 static int
786 784 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
787 785 struct page ***ppp, enum lock_type type, enum seg_rw rw)
788 786 {
789 787 struct shm_data *shmd = (struct shm_data *)seg->s_data;
790 788 struct seg *sptseg = shmd->shm_sptseg;
791 789 struct spt_data *sptd = sptseg->s_data;
792 790 pgcnt_t pg_idx, npages, tot_npages, npgs;
793 791 struct page **pplist, **pl, **ppa, *pp;
794 792 struct anon_map *amp;
795 793 spgcnt_t an_idx;
796 794 int ret = ENOTSUP;
797 795 uint_t pl_built = 0;
798 796 struct anon *ap;
799 797 struct vnode *vp;
800 798 u_offset_t off;
801 799 pgcnt_t claim_availrmem = 0;
802 800 uint_t szc;
803 801
804 802 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
805 803 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
806 804
807 805 /*
808 806 * We want to lock/unlock the entire ISM segment. Therefore,
809 807 * we will be using the underlying sptseg and it's base address
810 808 * and length for the caching arguments.
811 809 */
812 810 ASSERT(sptseg);
813 811 ASSERT(sptd);
814 812
815 813 pg_idx = seg_page(seg, addr);
816 814 npages = btopr(len);
817 815
818 816 /*
819 817 * check if the request is larger than number of pages covered
820 818 * by amp
821 819 */
822 820 if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
823 821 *ppp = NULL;
824 822 return (ENOTSUP);
825 823 }
826 824
827 825 if (type == L_PAGEUNLOCK) {
828 826 ASSERT(sptd->spt_ppa != NULL);
829 827
830 828 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
831 829 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
832 830
833 831 /*
834 832 * If someone is blocked while unmapping, we purge
835 833 * segment page cache and thus reclaim pplist synchronously
836 834 * without waiting for seg_pasync_thread. This speeds up
837 835 * unmapping in cases where munmap(2) is called, while
838 836 * raw async i/o is still in progress or where a thread
839 837 * exits on data fault in a multithreaded application.
840 838 */
841 839 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
842 840 (AS_ISUNMAPWAIT(seg->s_as) &&
843 841 shmd->shm_softlockcnt > 0)) {
844 842 segspt_purge(seg);
845 843 }
846 844 return (0);
847 845 }
848 846
849 847 /* The L_PAGELOCK case ... */
850 848
851 849 if (sptd->spt_flags & DISM_PPA_CHANGED) {
852 850 segspt_purge(seg);
853 851 /*
854 852 * for DISM ppa needs to be rebuild since
855 853 * number of locked pages could be changed
856 854 */
857 855 *ppp = NULL;
858 856 return (ENOTSUP);
859 857 }
860 858
861 859 /*
862 860 * First try to find pages in segment page cache, without
863 861 * holding the segment lock.
864 862 */
865 863 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
866 864 S_WRITE, SEGP_FORCE_WIRED);
867 865 if (pplist != NULL) {
868 866 ASSERT(sptd->spt_ppa != NULL);
869 867 ASSERT(sptd->spt_ppa == pplist);
870 868 ppa = sptd->spt_ppa;
871 869 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
872 870 if (ppa[an_idx] == NULL) {
873 871 seg_pinactive(seg, NULL, seg->s_base,
874 872 sptd->spt_amp->size, ppa,
875 873 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
876 874 *ppp = NULL;
877 875 return (ENOTSUP);
878 876 }
879 877 if ((szc = ppa[an_idx]->p_szc) != 0) {
880 878 npgs = page_get_pagecnt(szc);
881 879 an_idx = P2ROUNDUP(an_idx + 1, npgs);
882 880 } else {
883 881 an_idx++;
884 882 }
885 883 }
886 884 /*
887 885 * Since we cache the entire DISM segment, we want to
888 886 * set ppp to point to the first slot that corresponds
889 887 * to the requested addr, i.e. pg_idx.
890 888 */
891 889 *ppp = &(sptd->spt_ppa[pg_idx]);
892 890 return (0);
893 891 }
894 892
895 893 mutex_enter(&sptd->spt_lock);
896 894 /*
897 895 * try to find pages in segment page cache with mutex
898 896 */
899 897 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
900 898 S_WRITE, SEGP_FORCE_WIRED);
901 899 if (pplist != NULL) {
902 900 ASSERT(sptd->spt_ppa != NULL);
903 901 ASSERT(sptd->spt_ppa == pplist);
904 902 ppa = sptd->spt_ppa;
905 903 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
906 904 if (ppa[an_idx] == NULL) {
907 905 mutex_exit(&sptd->spt_lock);
908 906 seg_pinactive(seg, NULL, seg->s_base,
909 907 sptd->spt_amp->size, ppa,
910 908 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
911 909 *ppp = NULL;
912 910 return (ENOTSUP);
913 911 }
914 912 if ((szc = ppa[an_idx]->p_szc) != 0) {
915 913 npgs = page_get_pagecnt(szc);
916 914 an_idx = P2ROUNDUP(an_idx + 1, npgs);
917 915 } else {
918 916 an_idx++;
919 917 }
920 918 }
921 919 /*
922 920 * Since we cache the entire DISM segment, we want to
923 921 * set ppp to point to the first slot that corresponds
924 922 * to the requested addr, i.e. pg_idx.
925 923 */
926 924 mutex_exit(&sptd->spt_lock);
927 925 *ppp = &(sptd->spt_ppa[pg_idx]);
928 926 return (0);
929 927 }
930 928 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
931 929 SEGP_FORCE_WIRED) == SEGP_FAIL) {
932 930 mutex_exit(&sptd->spt_lock);
933 931 *ppp = NULL;
934 932 return (ENOTSUP);
935 933 }
936 934
937 935 /*
938 936 * No need to worry about protections because DISM pages are always rw.
939 937 */
940 938 pl = pplist = NULL;
941 939 amp = sptd->spt_amp;
942 940
943 941 /*
944 942 * Do we need to build the ppa array?
945 943 */
946 944 if (sptd->spt_ppa == NULL) {
947 945 pgcnt_t lpg_cnt = 0;
948 946
949 947 pl_built = 1;
950 948 tot_npages = btopr(sptd->spt_amp->size);
951 949
952 950 ASSERT(sptd->spt_pcachecnt == 0);
953 951 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
954 952 pl = pplist;
955 953
956 954 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
957 955 for (an_idx = 0; an_idx < tot_npages; ) {
958 956 ap = anon_get_ptr(amp->ahp, an_idx);
959 957 /*
960 958 * Cache only mlocked pages. For large pages
961 959 * if one (constituent) page is mlocked
962 960 * all pages for that large page
963 961 * are cached also. This is for quick
964 962 * lookups of ppa array;
965 963 */
966 964 if ((ap != NULL) && (lpg_cnt != 0 ||
967 965 (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
968 966
969 967 swap_xlate(ap, &vp, &off);
970 968 pp = page_lookup(vp, off, SE_SHARED);
971 969 ASSERT(pp != NULL);
972 970 if (lpg_cnt == 0) {
973 971 lpg_cnt++;
974 972 /*
975 973 * For a small page, we are done --
976 974 * lpg_count is reset to 0 below.
977 975 *
978 976 * For a large page, we are guaranteed
979 977 * to find the anon structures of all
980 978 * constituent pages and a non-zero
981 979 * lpg_cnt ensures that we don't test
982 980 * for mlock for these. We are done
983 981 * when lpg_count reaches (npgs + 1).
984 982 * If we are not the first constituent
985 983 * page, restart at the first one.
986 984 */
987 985 npgs = page_get_pagecnt(pp->p_szc);
988 986 if (!IS_P2ALIGNED(an_idx, npgs)) {
989 987 an_idx = P2ALIGN(an_idx, npgs);
990 988 page_unlock(pp);
991 989 continue;
992 990 }
993 991 }
994 992 if (++lpg_cnt > npgs)
995 993 lpg_cnt = 0;
996 994
997 995 /*
998 996 * availrmem is decremented only
999 997 * for unlocked pages
1000 998 */
1001 999 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1002 1000 claim_availrmem++;
1003 1001 pplist[an_idx] = pp;
1004 1002 }
1005 1003 an_idx++;
1006 1004 }
1007 1005 ANON_LOCK_EXIT(&->a_rwlock);
1008 1006
1009 1007 if (claim_availrmem) {
1010 1008 mutex_enter(&freemem_lock);
1011 1009 if (availrmem < tune.t_minarmem + claim_availrmem) {
1012 1010 mutex_exit(&freemem_lock);
1013 1011 ret = ENOTSUP;
1014 1012 claim_availrmem = 0;
1015 1013 goto insert_fail;
1016 1014 } else {
1017 1015 availrmem -= claim_availrmem;
1018 1016 }
1019 1017 mutex_exit(&freemem_lock);
1020 1018 }
1021 1019
1022 1020 sptd->spt_ppa = pl;
1023 1021 } else {
1024 1022 /*
1025 1023 * We already have a valid ppa[].
1026 1024 */
1027 1025 pl = sptd->spt_ppa;
1028 1026 }
1029 1027
1030 1028 ASSERT(pl != NULL);
1031 1029
1032 1030 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1033 1031 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1034 1032 segspt_reclaim);
1035 1033 if (ret == SEGP_FAIL) {
1036 1034 /*
1037 1035 * seg_pinsert failed. We return
1038 1036 * ENOTSUP, so that the as_pagelock() code will
1039 1037 * then try the slower F_SOFTLOCK path.
1040 1038 */
1041 1039 if (pl_built) {
1042 1040 /*
1043 1041 * No one else has referenced the ppa[].
1044 1042 * We created it and we need to destroy it.
1045 1043 */
1046 1044 sptd->spt_ppa = NULL;
1047 1045 }
1048 1046 ret = ENOTSUP;
1049 1047 goto insert_fail;
1050 1048 }
1051 1049
1052 1050 /*
1053 1051 * In either case, we increment softlockcnt on the 'real' segment.
1054 1052 */
1055 1053 sptd->spt_pcachecnt++;
1056 1054 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1057 1055
1058 1056 ppa = sptd->spt_ppa;
1059 1057 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1060 1058 if (ppa[an_idx] == NULL) {
1061 1059 mutex_exit(&sptd->spt_lock);
1062 1060 seg_pinactive(seg, NULL, seg->s_base,
1063 1061 sptd->spt_amp->size,
1064 1062 pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1065 1063 *ppp = NULL;
1066 1064 return (ENOTSUP);
1067 1065 }
1068 1066 if ((szc = ppa[an_idx]->p_szc) != 0) {
1069 1067 npgs = page_get_pagecnt(szc);
1070 1068 an_idx = P2ROUNDUP(an_idx + 1, npgs);
1071 1069 } else {
1072 1070 an_idx++;
1073 1071 }
1074 1072 }
1075 1073 /*
1076 1074 * We can now drop the sptd->spt_lock since the ppa[]
1077 1075 * exists and he have incremented pacachecnt.
1078 1076 */
1079 1077 mutex_exit(&sptd->spt_lock);
1080 1078
1081 1079 /*
1082 1080 * Since we cache the entire segment, we want to
1083 1081 * set ppp to point to the first slot that corresponds
1084 1082 * to the requested addr, i.e. pg_idx.
1085 1083 */
1086 1084 *ppp = &(sptd->spt_ppa[pg_idx]);
1087 1085 return (0);
1088 1086
1089 1087 insert_fail:
1090 1088 /*
1091 1089 * We will only reach this code if we tried and failed.
1092 1090 *
1093 1091 * And we can drop the lock on the dummy seg, once we've failed
1094 1092 * to set up a new ppa[].
1095 1093 */
1096 1094 mutex_exit(&sptd->spt_lock);
1097 1095
1098 1096 if (pl_built) {
1099 1097 if (claim_availrmem) {
1100 1098 mutex_enter(&freemem_lock);
1101 1099 availrmem += claim_availrmem;
1102 1100 mutex_exit(&freemem_lock);
1103 1101 }
1104 1102
1105 1103 /*
1106 1104 * We created pl and we need to destroy it.
1107 1105 */
1108 1106 pplist = pl;
1109 1107 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1110 1108 if (pplist[an_idx] != NULL)
1111 1109 page_unlock(pplist[an_idx]);
1112 1110 }
1113 1111 kmem_free(pl, sizeof (page_t *) * tot_npages);
1114 1112 }
1115 1113
1116 1114 if (shmd->shm_softlockcnt <= 0) {
1117 1115 if (AS_ISUNMAPWAIT(seg->s_as)) {
1118 1116 mutex_enter(&seg->s_as->a_contents);
1119 1117 if (AS_ISUNMAPWAIT(seg->s_as)) {
1120 1118 AS_CLRUNMAPWAIT(seg->s_as);
1121 1119 cv_broadcast(&seg->s_as->a_cv);
1122 1120 }
1123 1121 mutex_exit(&seg->s_as->a_contents);
1124 1122 }
1125 1123 }
1126 1124 *ppp = NULL;
1127 1125 return (ret);
1128 1126 }
1129 1127
1130 1128
1131 1129
1132 1130 /*
1133 1131 * return locked pages over a given range.
1134 1132 *
1135 1133 * We will cache the entire ISM segment and save the pplist for the
1136 1134 * entire segment in the ppa field of the underlying ISM segment structure.
1137 1135 * Later, during a call to segspt_reclaim() we will use this ppa array
1138 1136 * to page_unlock() all of the pages and then we will free this ppa list.
1139 1137 */
1140 1138 /*ARGSUSED*/
1141 1139 static int
1142 1140 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1143 1141 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1144 1142 {
1145 1143 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1146 1144 struct seg *sptseg = shmd->shm_sptseg;
1147 1145 struct spt_data *sptd = sptseg->s_data;
1148 1146 pgcnt_t np, page_index, npages;
1149 1147 caddr_t a, spt_base;
1150 1148 struct page **pplist, **pl, *pp;
1151 1149 struct anon_map *amp;
1152 1150 ulong_t anon_index;
1153 1151 int ret = ENOTSUP;
1154 1152 uint_t pl_built = 0;
1155 1153 struct anon *ap;
1156 1154 struct vnode *vp;
1157 1155 u_offset_t off;
1158 1156
1159 1157 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1160 1158 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1161 1159
1162 1160
1163 1161 /*
1164 1162 * We want to lock/unlock the entire ISM segment. Therefore,
1165 1163 * we will be using the underlying sptseg and it's base address
1166 1164 * and length for the caching arguments.
1167 1165 */
1168 1166 ASSERT(sptseg);
1169 1167 ASSERT(sptd);
1170 1168
1171 1169 if (sptd->spt_flags & SHM_PAGEABLE) {
1172 1170 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1173 1171 }
1174 1172
1175 1173 page_index = seg_page(seg, addr);
1176 1174 npages = btopr(len);
1177 1175
1178 1176 /*
1179 1177 * check if the request is larger than number of pages covered
1180 1178 * by amp
1181 1179 */
1182 1180 if (page_index + npages > btopr(sptd->spt_amp->size)) {
1183 1181 *ppp = NULL;
1184 1182 return (ENOTSUP);
1185 1183 }
1186 1184
1187 1185 if (type == L_PAGEUNLOCK) {
1188 1186
1189 1187 ASSERT(sptd->spt_ppa != NULL);
1190 1188
1191 1189 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1192 1190 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1193 1191
1194 1192 /*
1195 1193 * If someone is blocked while unmapping, we purge
1196 1194 * segment page cache and thus reclaim pplist synchronously
1197 1195 * without waiting for seg_pasync_thread. This speeds up
1198 1196 * unmapping in cases where munmap(2) is called, while
1199 1197 * raw async i/o is still in progress or where a thread
1200 1198 * exits on data fault in a multithreaded application.
1201 1199 */
1202 1200 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1203 1201 segspt_purge(seg);
1204 1202 }
1205 1203 return (0);
1206 1204 }
1207 1205
1208 1206 /* The L_PAGELOCK case... */
1209 1207
1210 1208 /*
1211 1209 * First try to find pages in segment page cache, without
1212 1210 * holding the segment lock.
1213 1211 */
1214 1212 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1215 1213 S_WRITE, SEGP_FORCE_WIRED);
1216 1214 if (pplist != NULL) {
1217 1215 ASSERT(sptd->spt_ppa == pplist);
1218 1216 ASSERT(sptd->spt_ppa[page_index]);
1219 1217 /*
1220 1218 * Since we cache the entire ISM segment, we want to
1221 1219 * set ppp to point to the first slot that corresponds
1222 1220 * to the requested addr, i.e. page_index.
1223 1221 */
1224 1222 *ppp = &(sptd->spt_ppa[page_index]);
1225 1223 return (0);
1226 1224 }
1227 1225
1228 1226 mutex_enter(&sptd->spt_lock);
1229 1227
1230 1228 /*
1231 1229 * try to find pages in segment page cache
1232 1230 */
1233 1231 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1234 1232 S_WRITE, SEGP_FORCE_WIRED);
1235 1233 if (pplist != NULL) {
1236 1234 ASSERT(sptd->spt_ppa == pplist);
1237 1235 /*
1238 1236 * Since we cache the entire segment, we want to
1239 1237 * set ppp to point to the first slot that corresponds
1240 1238 * to the requested addr, i.e. page_index.
1241 1239 */
1242 1240 mutex_exit(&sptd->spt_lock);
1243 1241 *ppp = &(sptd->spt_ppa[page_index]);
1244 1242 return (0);
1245 1243 }
1246 1244
1247 1245 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1248 1246 SEGP_FORCE_WIRED) == SEGP_FAIL) {
1249 1247 mutex_exit(&sptd->spt_lock);
1250 1248 *ppp = NULL;
1251 1249 return (ENOTSUP);
1252 1250 }
1253 1251
1254 1252 /*
1255 1253 * No need to worry about protections because ISM pages
1256 1254 * are always rw.
1257 1255 */
1258 1256 pl = pplist = NULL;
1259 1257
1260 1258 /*
1261 1259 * Do we need to build the ppa array?
1262 1260 */
1263 1261 if (sptd->spt_ppa == NULL) {
1264 1262 ASSERT(sptd->spt_ppa == pplist);
1265 1263
1266 1264 spt_base = sptseg->s_base;
1267 1265 pl_built = 1;
1268 1266
1269 1267 /*
1270 1268 * availrmem is decremented once during anon_swap_adjust()
1271 1269 * and is incremented during the anon_unresv(), which is
1272 1270 * called from shm_rm_amp() when the segment is destroyed.
1273 1271 */
1274 1272 amp = sptd->spt_amp;
1275 1273 ASSERT(amp != NULL);
1276 1274
1277 1275 /* pcachecnt is protected by sptd->spt_lock */
1278 1276 ASSERT(sptd->spt_pcachecnt == 0);
1279 1277 pplist = kmem_zalloc(sizeof (page_t *)
1280 1278 * btopr(sptd->spt_amp->size), KM_SLEEP);
1281 1279 pl = pplist;
1282 1280
1283 1281 anon_index = seg_page(sptseg, spt_base);
1284 1282
1285 1283 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1286 1284 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1287 1285 a += PAGESIZE, anon_index++, pplist++) {
1288 1286 ap = anon_get_ptr(amp->ahp, anon_index);
1289 1287 ASSERT(ap != NULL);
1290 1288 swap_xlate(ap, &vp, &off);
1291 1289 pp = page_lookup(vp, off, SE_SHARED);
1292 1290 ASSERT(pp != NULL);
1293 1291 *pplist = pp;
1294 1292 }
1295 1293 ANON_LOCK_EXIT(&->a_rwlock);
1296 1294
1297 1295 if (a < (spt_base + sptd->spt_amp->size)) {
1298 1296 ret = ENOTSUP;
1299 1297 goto insert_fail;
1300 1298 }
1301 1299 sptd->spt_ppa = pl;
1302 1300 } else {
1303 1301 /*
1304 1302 * We already have a valid ppa[].
1305 1303 */
1306 1304 pl = sptd->spt_ppa;
1307 1305 }
1308 1306
1309 1307 ASSERT(pl != NULL);
1310 1308
1311 1309 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1312 1310 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1313 1311 segspt_reclaim);
1314 1312 if (ret == SEGP_FAIL) {
1315 1313 /*
1316 1314 * seg_pinsert failed. We return
1317 1315 * ENOTSUP, so that the as_pagelock() code will
1318 1316 * then try the slower F_SOFTLOCK path.
1319 1317 */
1320 1318 if (pl_built) {
1321 1319 /*
1322 1320 * No one else has referenced the ppa[].
1323 1321 * We created it and we need to destroy it.
1324 1322 */
1325 1323 sptd->spt_ppa = NULL;
1326 1324 }
1327 1325 ret = ENOTSUP;
1328 1326 goto insert_fail;
1329 1327 }
1330 1328
1331 1329 /*
1332 1330 * In either case, we increment softlockcnt on the 'real' segment.
1333 1331 */
1334 1332 sptd->spt_pcachecnt++;
1335 1333 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1336 1334
1337 1335 /*
1338 1336 * We can now drop the sptd->spt_lock since the ppa[]
1339 1337 * exists and he have incremented pacachecnt.
1340 1338 */
1341 1339 mutex_exit(&sptd->spt_lock);
1342 1340
1343 1341 /*
1344 1342 * Since we cache the entire segment, we want to
1345 1343 * set ppp to point to the first slot that corresponds
1346 1344 * to the requested addr, i.e. page_index.
1347 1345 */
1348 1346 *ppp = &(sptd->spt_ppa[page_index]);
1349 1347 return (0);
1350 1348
1351 1349 insert_fail:
1352 1350 /*
1353 1351 * We will only reach this code if we tried and failed.
1354 1352 *
1355 1353 * And we can drop the lock on the dummy seg, once we've failed
1356 1354 * to set up a new ppa[].
1357 1355 */
1358 1356 mutex_exit(&sptd->spt_lock);
1359 1357
1360 1358 if (pl_built) {
1361 1359 /*
1362 1360 * We created pl and we need to destroy it.
1363 1361 */
1364 1362 pplist = pl;
1365 1363 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1366 1364 while (np) {
1367 1365 page_unlock(*pplist);
1368 1366 np--;
1369 1367 pplist++;
1370 1368 }
1371 1369 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1372 1370 }
1373 1371 if (shmd->shm_softlockcnt <= 0) {
1374 1372 if (AS_ISUNMAPWAIT(seg->s_as)) {
1375 1373 mutex_enter(&seg->s_as->a_contents);
1376 1374 if (AS_ISUNMAPWAIT(seg->s_as)) {
1377 1375 AS_CLRUNMAPWAIT(seg->s_as);
1378 1376 cv_broadcast(&seg->s_as->a_cv);
1379 1377 }
1380 1378 mutex_exit(&seg->s_as->a_contents);
1381 1379 }
1382 1380 }
1383 1381 *ppp = NULL;
1384 1382 return (ret);
1385 1383 }
1386 1384
1387 1385 /*
1388 1386 * purge any cached pages in the I/O page cache
1389 1387 */
1390 1388 static void
1391 1389 segspt_purge(struct seg *seg)
1392 1390 {
1393 1391 seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1394 1392 }
1395 1393
1396 1394 static int
1397 1395 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1398 1396 enum seg_rw rw, int async)
1399 1397 {
1400 1398 struct seg *seg = (struct seg *)ptag;
1401 1399 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1402 1400 struct seg *sptseg;
1403 1401 struct spt_data *sptd;
1404 1402 pgcnt_t npages, i, free_availrmem = 0;
1405 1403 int done = 0;
1406 1404
1407 1405 #ifdef lint
1408 1406 addr = addr;
1409 1407 #endif
1410 1408 sptseg = shmd->shm_sptseg;
1411 1409 sptd = sptseg->s_data;
1412 1410 npages = (len >> PAGESHIFT);
1413 1411 ASSERT(npages);
1414 1412 ASSERT(sptd->spt_pcachecnt != 0);
1415 1413 ASSERT(sptd->spt_ppa == pplist);
1416 1414 ASSERT(npages == btopr(sptd->spt_amp->size));
1417 1415 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1418 1416
1419 1417 /*
1420 1418 * Acquire the lock on the dummy seg and destroy the
1421 1419 * ppa array IF this is the last pcachecnt.
1422 1420 */
1423 1421 mutex_enter(&sptd->spt_lock);
1424 1422 if (--sptd->spt_pcachecnt == 0) {
1425 1423 for (i = 0; i < npages; i++) {
1426 1424 if (pplist[i] == NULL) {
1427 1425 continue;
1428 1426 }
1429 1427 if (rw == S_WRITE) {
1430 1428 hat_setrefmod(pplist[i]);
1431 1429 } else {
1432 1430 hat_setref(pplist[i]);
1433 1431 }
1434 1432 if ((sptd->spt_flags & SHM_PAGEABLE) &&
1435 1433 (sptd->spt_ppa_lckcnt[i] == 0))
1436 1434 free_availrmem++;
1437 1435 page_unlock(pplist[i]);
1438 1436 }
1439 1437 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1440 1438 mutex_enter(&freemem_lock);
1441 1439 availrmem += free_availrmem;
1442 1440 mutex_exit(&freemem_lock);
1443 1441 }
1444 1442 /*
1445 1443 * Since we want to cach/uncache the entire ISM segment,
1446 1444 * we will track the pplist in a segspt specific field
1447 1445 * ppa, that is initialized at the time we add an entry to
1448 1446 * the cache.
1449 1447 */
1450 1448 ASSERT(sptd->spt_pcachecnt == 0);
1451 1449 kmem_free(pplist, sizeof (page_t *) * npages);
1452 1450 sptd->spt_ppa = NULL;
1453 1451 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1454 1452 sptd->spt_gen++;
1455 1453 cv_broadcast(&sptd->spt_cv);
1456 1454 done = 1;
1457 1455 }
1458 1456 mutex_exit(&sptd->spt_lock);
1459 1457
1460 1458 /*
1461 1459 * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1462 1460 * may not hold AS lock (in this case async argument is not 0). This
1463 1461 * means if softlockcnt drops to 0 after the decrement below address
1464 1462 * space may get freed. We can't allow it since after softlock
1465 1463 * derement to 0 we still need to access as structure for possible
1466 1464 * wakeup of unmap waiters. To prevent the disappearance of as we take
1467 1465 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1468 1466 * this mutex as a barrier to make sure this routine completes before
1469 1467 * segment is freed.
1470 1468 *
1471 1469 * The second complication we have to deal with in async case is a
1472 1470 * possibility of missed wake up of unmap wait thread. When we don't
1473 1471 * hold as lock here we may take a_contents lock before unmap wait
1474 1472 * thread that was first to see softlockcnt was still not 0. As a
1475 1473 * result we'll fail to wake up an unmap wait thread. To avoid this
1476 1474 * race we set nounmapwait flag in as structure if we drop softlockcnt
1477 1475 * to 0 if async is not 0. unmapwait thread
1478 1476 * will not block if this flag is set.
1479 1477 */
1480 1478 if (async)
1481 1479 mutex_enter(&shmd->shm_segfree_syncmtx);
1482 1480
1483 1481 /*
1484 1482 * Now decrement softlockcnt.
1485 1483 */
1486 1484 ASSERT(shmd->shm_softlockcnt > 0);
1487 1485 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1488 1486
1489 1487 if (shmd->shm_softlockcnt <= 0) {
1490 1488 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1491 1489 mutex_enter(&seg->s_as->a_contents);
1492 1490 if (async)
1493 1491 AS_SETNOUNMAPWAIT(seg->s_as);
1494 1492 if (AS_ISUNMAPWAIT(seg->s_as)) {
1495 1493 AS_CLRUNMAPWAIT(seg->s_as);
1496 1494 cv_broadcast(&seg->s_as->a_cv);
1497 1495 }
1498 1496 mutex_exit(&seg->s_as->a_contents);
1499 1497 }
1500 1498 }
1501 1499
1502 1500 if (async)
1503 1501 mutex_exit(&shmd->shm_segfree_syncmtx);
1504 1502
1505 1503 return (done);
1506 1504 }
1507 1505
1508 1506 /*
1509 1507 * Do a F_SOFTUNLOCK call over the range requested.
1510 1508 * The range must have already been F_SOFTLOCK'ed.
1511 1509 *
1512 1510 * The calls to acquire and release the anon map lock mutex were
1513 1511 * removed in order to avoid a deadly embrace during a DR
1514 1512 * memory delete operation. (Eg. DR blocks while waiting for a
1515 1513 * exclusive lock on a page that is being used for kaio; the
1516 1514 * thread that will complete the kaio and call segspt_softunlock
1517 1515 * blocks on the anon map lock; another thread holding the anon
1518 1516 * map lock blocks on another page lock via the segspt_shmfault
1519 1517 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1520 1518 *
1521 1519 * The appropriateness of the removal is based upon the following:
1522 1520 * 1. If we are holding a segment's reader lock and the page is held
1523 1521 * shared, then the corresponding element in anonmap which points to
1524 1522 * anon struct cannot change and there is no need to acquire the
1525 1523 * anonymous map lock.
1526 1524 * 2. Threads in segspt_softunlock have a reader lock on the segment
1527 1525 * and already have the shared page lock, so we are guaranteed that
1528 1526 * the anon map slot cannot change and therefore can call anon_get_ptr()
1529 1527 * without grabbing the anonymous map lock.
1530 1528 * 3. Threads that softlock a shared page break copy-on-write, even if
1531 1529 * its a read. Thus cow faults can be ignored with respect to soft
1532 1530 * unlocking, since the breaking of cow means that the anon slot(s) will
1533 1531 * not be shared.
1534 1532 */
1535 1533 static void
1536 1534 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1537 1535 size_t len, enum seg_rw rw)
1538 1536 {
1539 1537 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1540 1538 struct seg *sptseg;
1541 1539 struct spt_data *sptd;
1542 1540 page_t *pp;
1543 1541 caddr_t adr;
1544 1542 struct vnode *vp;
1545 1543 u_offset_t offset;
1546 1544 ulong_t anon_index;
1547 1545 struct anon_map *amp; /* XXX - for locknest */
1548 1546 struct anon *ap = NULL;
1549 1547 pgcnt_t npages;
1550 1548
1551 1549 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1552 1550
1553 1551 sptseg = shmd->shm_sptseg;
1554 1552 sptd = sptseg->s_data;
1555 1553
1556 1554 /*
1557 1555 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1558 1556 * and therefore their pages are SE_SHARED locked
1559 1557 * for the entire life of the segment.
1560 1558 */
1561 1559 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1562 1560 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1563 1561 goto softlock_decrement;
1564 1562 }
1565 1563
1566 1564 /*
1567 1565 * Any thread is free to do a page_find and
1568 1566 * page_unlock() on the pages within this seg.
1569 1567 *
1570 1568 * We are already holding the as->a_lock on the user's
1571 1569 * real segment, but we need to hold the a_lock on the
1572 1570 * underlying dummy as. This is mostly to satisfy the
1573 1571 * underlying HAT layer.
1574 1572 */
1575 1573 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1576 1574 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1577 1575 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1578 1576
1579 1577 amp = sptd->spt_amp;
1580 1578 ASSERT(amp != NULL);
1581 1579 anon_index = seg_page(sptseg, sptseg_addr);
1582 1580
1583 1581 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1584 1582 ap = anon_get_ptr(amp->ahp, anon_index++);
1585 1583 ASSERT(ap != NULL);
1586 1584 swap_xlate(ap, &vp, &offset);
1587 1585
1588 1586 /*
1589 1587 * Use page_find() instead of page_lookup() to
1590 1588 * find the page since we know that it has a
1591 1589 * "shared" lock.
1592 1590 */
1593 1591 pp = page_find(vp, offset);
1594 1592 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1595 1593 if (pp == NULL) {
1596 1594 panic("segspt_softunlock: "
1597 1595 "addr %p, ap %p, vp %p, off %llx",
1598 1596 (void *)adr, (void *)ap, (void *)vp, offset);
1599 1597 /*NOTREACHED*/
1600 1598 }
1601 1599
1602 1600 if (rw == S_WRITE) {
1603 1601 hat_setrefmod(pp);
1604 1602 } else if (rw != S_OTHER) {
1605 1603 hat_setref(pp);
1606 1604 }
1607 1605 page_unlock(pp);
1608 1606 }
1609 1607
1610 1608 softlock_decrement:
1611 1609 npages = btopr(len);
1612 1610 ASSERT(shmd->shm_softlockcnt >= npages);
1613 1611 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1614 1612 if (shmd->shm_softlockcnt == 0) {
1615 1613 /*
1616 1614 * All SOFTLOCKS are gone. Wakeup any waiting
1617 1615 * unmappers so they can try again to unmap.
1618 1616 * Check for waiters first without the mutex
1619 1617 * held so we don't always grab the mutex on
1620 1618 * softunlocks.
1621 1619 */
1622 1620 if (AS_ISUNMAPWAIT(seg->s_as)) {
1623 1621 mutex_enter(&seg->s_as->a_contents);
1624 1622 if (AS_ISUNMAPWAIT(seg->s_as)) {
1625 1623 AS_CLRUNMAPWAIT(seg->s_as);
1626 1624 cv_broadcast(&seg->s_as->a_cv);
1627 1625 }
1628 1626 mutex_exit(&seg->s_as->a_contents);
1629 1627 }
1630 1628 }
1631 1629 }
1632 1630
1633 1631 int
1634 1632 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1635 1633 {
1636 1634 struct shm_data *shmd_arg = (struct shm_data *)argsp;
1637 1635 struct shm_data *shmd;
1638 1636 struct anon_map *shm_amp = shmd_arg->shm_amp;
1639 1637 struct spt_data *sptd;
1640 1638 int error = 0;
1641 1639
1642 1640 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1643 1641
1644 1642 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1645 1643 if (shmd == NULL)
1646 1644 return (ENOMEM);
1647 1645
1648 1646 shmd->shm_sptas = shmd_arg->shm_sptas;
1649 1647 shmd->shm_amp = shm_amp;
1650 1648 shmd->shm_sptseg = shmd_arg->shm_sptseg;
1651 1649
1652 1650 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1653 1651 NULL, 0, seg->s_size);
1654 1652
1655 1653 mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1656 1654
1657 1655 seg->s_data = (void *)shmd;
1658 1656 seg->s_ops = &segspt_shmops;
1659 1657 seg->s_szc = shmd->shm_sptseg->s_szc;
1660 1658 sptd = shmd->shm_sptseg->s_data;
1661 1659
1662 1660 if (sptd->spt_flags & SHM_PAGEABLE) {
1663 1661 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1664 1662 KM_NOSLEEP)) == NULL) {
1665 1663 seg->s_data = (void *)NULL;
1666 1664 kmem_free(shmd, (sizeof (*shmd)));
1667 1665 return (ENOMEM);
1668 1666 }
1669 1667 shmd->shm_lckpgs = 0;
1670 1668 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1671 1669 if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1672 1670 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1673 1671 seg->s_size, seg->s_szc)) != 0) {
1674 1672 kmem_free(shmd->shm_vpage,
1675 1673 btopr(shm_amp->size));
1676 1674 }
1677 1675 }
1678 1676 } else {
1679 1677 error = hat_share(seg->s_as->a_hat, seg->s_base,
1680 1678 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1681 1679 seg->s_size, seg->s_szc);
1682 1680 }
1683 1681 if (error) {
1684 1682 seg->s_szc = 0;
1685 1683 seg->s_data = (void *)NULL;
1686 1684 kmem_free(shmd, (sizeof (*shmd)));
1687 1685 } else {
1688 1686 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1689 1687 shm_amp->refcnt++;
1690 1688 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1691 1689 }
1692 1690 return (error);
1693 1691 }
1694 1692
1695 1693 int
1696 1694 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1697 1695 {
1698 1696 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1699 1697 int reclaim = 1;
1700 1698
1701 1699 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1702 1700 retry:
1703 1701 if (shmd->shm_softlockcnt > 0) {
1704 1702 if (reclaim == 1) {
1705 1703 segspt_purge(seg);
1706 1704 reclaim = 0;
1707 1705 goto retry;
1708 1706 }
1709 1707 return (EAGAIN);
1710 1708 }
1711 1709
1712 1710 if (ssize != seg->s_size) {
1713 1711 #ifdef DEBUG
1714 1712 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1715 1713 ssize, seg->s_size);
1716 1714 #endif
1717 1715 return (EINVAL);
1718 1716 }
1719 1717
1720 1718 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1721 1719 NULL, 0);
1722 1720 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1723 1721
1724 1722 seg_free(seg);
1725 1723
1726 1724 return (0);
1727 1725 }
1728 1726
1729 1727 void
1730 1728 segspt_shmfree(struct seg *seg)
1731 1729 {
1732 1730 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1733 1731 struct anon_map *shm_amp = shmd->shm_amp;
1734 1732
1735 1733 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1736 1734
1737 1735 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1738 1736 MC_UNLOCK, NULL, 0);
1739 1737
1740 1738 /*
1741 1739 * Need to increment refcnt when attaching
1742 1740 * and decrement when detaching because of dup().
1743 1741 */
1744 1742 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1745 1743 shm_amp->refcnt--;
1746 1744 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1747 1745
1748 1746 if (shmd->shm_vpage) { /* only for DISM */
1749 1747 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1750 1748 shmd->shm_vpage = NULL;
1751 1749 }
1752 1750
1753 1751 /*
1754 1752 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1755 1753 * still working with this segment without holding as lock.
1756 1754 */
1757 1755 ASSERT(shmd->shm_softlockcnt == 0);
1758 1756 mutex_enter(&shmd->shm_segfree_syncmtx);
1759 1757 mutex_destroy(&shmd->shm_segfree_syncmtx);
1760 1758
1761 1759 kmem_free(shmd, sizeof (*shmd));
1762 1760 }
1763 1761
1764 1762 /*ARGSUSED*/
1765 1763 int
1766 1764 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1767 1765 {
1768 1766 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1769 1767
1770 1768 /*
1771 1769 * Shared page table is more than shared mapping.
1772 1770 * Individual process sharing page tables can't change prot
1773 1771 * because there is only one set of page tables.
1774 1772 * This will be allowed after private page table is
1775 1773 * supported.
1776 1774 */
1777 1775 /* need to return correct status error? */
1778 1776 return (0);
1779 1777 }
1780 1778
1781 1779
1782 1780 faultcode_t
1783 1781 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1784 1782 size_t len, enum fault_type type, enum seg_rw rw)
1785 1783 {
1786 1784 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1787 1785 struct seg *sptseg = shmd->shm_sptseg;
1788 1786 struct as *curspt = shmd->shm_sptas;
1789 1787 struct spt_data *sptd = sptseg->s_data;
1790 1788 pgcnt_t npages;
1791 1789 size_t size;
1792 1790 caddr_t segspt_addr, shm_addr;
1793 1791 page_t **ppa;
1794 1792 int i;
1795 1793 ulong_t an_idx = 0;
1796 1794 int err = 0;
1797 1795 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1798 1796 size_t pgsz;
1799 1797 pgcnt_t pgcnt;
1800 1798 caddr_t a;
1801 1799 pgcnt_t pidx;
1802 1800
1803 1801 #ifdef lint
1804 1802 hat = hat;
1805 1803 #endif
1806 1804 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1807 1805
1808 1806 /*
1809 1807 * Because of the way spt is implemented
1810 1808 * the realsize of the segment does not have to be
1811 1809 * equal to the segment size itself. The segment size is
1812 1810 * often in multiples of a page size larger than PAGESIZE.
1813 1811 * The realsize is rounded up to the nearest PAGESIZE
1814 1812 * based on what the user requested. This is a bit of
1815 1813 * ungliness that is historical but not easily fixed
1816 1814 * without re-designing the higher levels of ISM.
1817 1815 */
1818 1816 ASSERT(addr >= seg->s_base);
1819 1817 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1820 1818 return (FC_NOMAP);
1821 1819 /*
1822 1820 * For all of the following cases except F_PROT, we need to
1823 1821 * make any necessary adjustments to addr and len
1824 1822 * and get all of the necessary page_t's into an array called ppa[].
1825 1823 *
1826 1824 * The code in shmat() forces base addr and len of ISM segment
1827 1825 * to be aligned to largest page size supported. Therefore,
1828 1826 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1829 1827 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1830 1828 * in large pagesize chunks, or else we will screw up the HAT
1831 1829 * layer by calling hat_memload_array() with differing page sizes
1832 1830 * over a given virtual range.
1833 1831 */
1834 1832 pgsz = page_get_pagesize(sptseg->s_szc);
1835 1833 pgcnt = page_get_pagecnt(sptseg->s_szc);
1836 1834 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1837 1835 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1838 1836 npages = btopr(size);
1839 1837
1840 1838 /*
1841 1839 * Now we need to convert from addr in segshm to addr in segspt.
1842 1840 */
1843 1841 an_idx = seg_page(seg, shm_addr);
1844 1842 segspt_addr = sptseg->s_base + ptob(an_idx);
1845 1843
1846 1844 ASSERT((segspt_addr + ptob(npages)) <=
1847 1845 (sptseg->s_base + sptd->spt_realsize));
1848 1846 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1849 1847
1850 1848 switch (type) {
1851 1849
1852 1850 case F_SOFTLOCK:
1853 1851
1854 1852 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1855 1853 /*
1856 1854 * Fall through to the F_INVAL case to load up the hat layer
1857 1855 * entries with the HAT_LOAD_LOCK flag.
1858 1856 */
1859 1857 /* FALLTHRU */
1860 1858 case F_INVAL:
1861 1859
1862 1860 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1863 1861 return (FC_NOMAP);
1864 1862
1865 1863 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1866 1864
1867 1865 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1868 1866 if (err != 0) {
1869 1867 if (type == F_SOFTLOCK) {
1870 1868 atomic_add_long((ulong_t *)(
1871 1869 &(shmd->shm_softlockcnt)), -npages);
1872 1870 }
1873 1871 goto dism_err;
1874 1872 }
1875 1873 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1876 1874 a = segspt_addr;
1877 1875 pidx = 0;
1878 1876 if (type == F_SOFTLOCK) {
1879 1877
1880 1878 /*
1881 1879 * Load up the translation keeping it
1882 1880 * locked and don't unlock the page.
1883 1881 */
1884 1882 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1885 1883 hat_memload_array(sptseg->s_as->a_hat,
1886 1884 a, pgsz, &ppa[pidx], sptd->spt_prot,
1887 1885 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1888 1886 }
1889 1887 } else {
1890 1888 /*
1891 1889 * Migrate pages marked for migration
1892 1890 */
1893 1891 if (lgrp_optimizations())
1894 1892 page_migrate(seg, shm_addr, ppa, npages);
1895 1893
1896 1894 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1897 1895 hat_memload_array(sptseg->s_as->a_hat,
1898 1896 a, pgsz, &ppa[pidx],
1899 1897 sptd->spt_prot,
1900 1898 HAT_LOAD_SHARE);
1901 1899 }
1902 1900
1903 1901 /*
1904 1902 * And now drop the SE_SHARED lock(s).
1905 1903 */
1906 1904 if (dyn_ism_unmap) {
1907 1905 for (i = 0; i < npages; i++) {
1908 1906 page_unlock(ppa[i]);
1909 1907 }
1910 1908 }
1911 1909 }
1912 1910
1913 1911 if (!dyn_ism_unmap) {
1914 1912 if (hat_share(seg->s_as->a_hat, shm_addr,
1915 1913 curspt->a_hat, segspt_addr, ptob(npages),
1916 1914 seg->s_szc) != 0) {
1917 1915 panic("hat_share err in DISM fault");
1918 1916 /* NOTREACHED */
1919 1917 }
1920 1918 if (type == F_INVAL) {
1921 1919 for (i = 0; i < npages; i++) {
1922 1920 page_unlock(ppa[i]);
1923 1921 }
1924 1922 }
1925 1923 }
1926 1924 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1927 1925 dism_err:
1928 1926 kmem_free(ppa, npages * sizeof (page_t *));
1929 1927 return (err);
1930 1928
1931 1929 case F_SOFTUNLOCK:
1932 1930
1933 1931 /*
1934 1932 * This is a bit ugly, we pass in the real seg pointer,
1935 1933 * but the segspt_addr is the virtual address within the
1936 1934 * dummy seg.
1937 1935 */
1938 1936 segspt_softunlock(seg, segspt_addr, size, rw);
1939 1937 return (0);
1940 1938
1941 1939 case F_PROT:
1942 1940
1943 1941 /*
1944 1942 * This takes care of the unusual case where a user
1945 1943 * allocates a stack in shared memory and a register
1946 1944 * window overflow is written to that stack page before
1947 1945 * it is otherwise modified.
1948 1946 *
1949 1947 * We can get away with this because ISM segments are
1950 1948 * always rw. Other than this unusual case, there
1951 1949 * should be no instances of protection violations.
1952 1950 */
1953 1951 return (0);
1954 1952
1955 1953 default:
1956 1954 #ifdef DEBUG
1957 1955 panic("segspt_dismfault default type?");
1958 1956 #else
1959 1957 return (FC_NOMAP);
1960 1958 #endif
1961 1959 }
1962 1960 }
1963 1961
1964 1962
1965 1963 faultcode_t
1966 1964 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
1967 1965 size_t len, enum fault_type type, enum seg_rw rw)
1968 1966 {
1969 1967 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1970 1968 struct seg *sptseg = shmd->shm_sptseg;
1971 1969 struct as *curspt = shmd->shm_sptas;
1972 1970 struct spt_data *sptd = sptseg->s_data;
1973 1971 pgcnt_t npages;
1974 1972 size_t size;
1975 1973 caddr_t sptseg_addr, shm_addr;
1976 1974 page_t *pp, **ppa;
1977 1975 int i;
1978 1976 u_offset_t offset;
1979 1977 ulong_t anon_index = 0;
1980 1978 struct vnode *vp;
1981 1979 struct anon_map *amp; /* XXX - for locknest */
1982 1980 struct anon *ap = NULL;
1983 1981 size_t pgsz;
1984 1982 pgcnt_t pgcnt;
1985 1983 caddr_t a;
1986 1984 pgcnt_t pidx;
1987 1985 size_t sz;
1988 1986
1989 1987 #ifdef lint
1990 1988 hat = hat;
1991 1989 #endif
1992 1990
1993 1991 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1994 1992
1995 1993 if (sptd->spt_flags & SHM_PAGEABLE) {
1996 1994 return (segspt_dismfault(hat, seg, addr, len, type, rw));
1997 1995 }
1998 1996
1999 1997 /*
2000 1998 * Because of the way spt is implemented
2001 1999 * the realsize of the segment does not have to be
2002 2000 * equal to the segment size itself. The segment size is
2003 2001 * often in multiples of a page size larger than PAGESIZE.
2004 2002 * The realsize is rounded up to the nearest PAGESIZE
2005 2003 * based on what the user requested. This is a bit of
2006 2004 * ungliness that is historical but not easily fixed
2007 2005 * without re-designing the higher levels of ISM.
2008 2006 */
2009 2007 ASSERT(addr >= seg->s_base);
2010 2008 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2011 2009 return (FC_NOMAP);
2012 2010 /*
2013 2011 * For all of the following cases except F_PROT, we need to
2014 2012 * make any necessary adjustments to addr and len
2015 2013 * and get all of the necessary page_t's into an array called ppa[].
2016 2014 *
2017 2015 * The code in shmat() forces base addr and len of ISM segment
2018 2016 * to be aligned to largest page size supported. Therefore,
2019 2017 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2020 2018 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2021 2019 * in large pagesize chunks, or else we will screw up the HAT
2022 2020 * layer by calling hat_memload_array() with differing page sizes
2023 2021 * over a given virtual range.
2024 2022 */
2025 2023 pgsz = page_get_pagesize(sptseg->s_szc);
2026 2024 pgcnt = page_get_pagecnt(sptseg->s_szc);
2027 2025 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2028 2026 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2029 2027 npages = btopr(size);
2030 2028
2031 2029 /*
2032 2030 * Now we need to convert from addr in segshm to addr in segspt.
2033 2031 */
2034 2032 anon_index = seg_page(seg, shm_addr);
2035 2033 sptseg_addr = sptseg->s_base + ptob(anon_index);
2036 2034
2037 2035 /*
2038 2036 * And now we may have to adjust npages downward if we have
2039 2037 * exceeded the realsize of the segment or initial anon
2040 2038 * allocations.
2041 2039 */
2042 2040 if ((sptseg_addr + ptob(npages)) >
2043 2041 (sptseg->s_base + sptd->spt_realsize))
2044 2042 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2045 2043
2046 2044 npages = btopr(size);
2047 2045
2048 2046 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2049 2047 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2050 2048
2051 2049 switch (type) {
2052 2050
2053 2051 case F_SOFTLOCK:
2054 2052
2055 2053 /*
2056 2054 * availrmem is decremented once during anon_swap_adjust()
2057 2055 * and is incremented during the anon_unresv(), which is
2058 2056 * called from shm_rm_amp() when the segment is destroyed.
2059 2057 */
2060 2058 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2061 2059 /*
2062 2060 * Some platforms assume that ISM pages are SE_SHARED
2063 2061 * locked for the entire life of the segment.
2064 2062 */
2065 2063 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2066 2064 return (0);
2067 2065 /*
2068 2066 * Fall through to the F_INVAL case to load up the hat layer
2069 2067 * entries with the HAT_LOAD_LOCK flag.
2070 2068 */
2071 2069
2072 2070 /* FALLTHRU */
2073 2071 case F_INVAL:
2074 2072
2075 2073 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2076 2074 return (FC_NOMAP);
2077 2075
2078 2076 /*
2079 2077 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2080 2078 * may still rely on this call to hat_share(). That
2081 2079 * would imply that those hat's can fault on a
2082 2080 * HAT_LOAD_LOCK translation, which would seem
2083 2081 * contradictory.
2084 2082 */
2085 2083 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2086 2084 if (hat_share(seg->s_as->a_hat, seg->s_base,
2087 2085 curspt->a_hat, sptseg->s_base,
2088 2086 sptseg->s_size, sptseg->s_szc) != 0) {
2089 2087 panic("hat_share error in ISM fault");
2090 2088 /*NOTREACHED*/
2091 2089 }
2092 2090 return (0);
2093 2091 }
2094 2092 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2095 2093
2096 2094 /*
2097 2095 * I see no need to lock the real seg,
2098 2096 * here, because all of our work will be on the underlying
2099 2097 * dummy seg.
2100 2098 *
2101 2099 * sptseg_addr and npages now account for large pages.
2102 2100 */
2103 2101 amp = sptd->spt_amp;
2104 2102 ASSERT(amp != NULL);
2105 2103 anon_index = seg_page(sptseg, sptseg_addr);
2106 2104
2107 2105 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2108 2106 for (i = 0; i < npages; i++) {
2109 2107 ap = anon_get_ptr(amp->ahp, anon_index++);
2110 2108 ASSERT(ap != NULL);
2111 2109 swap_xlate(ap, &vp, &offset);
2112 2110 pp = page_lookup(vp, offset, SE_SHARED);
2113 2111 ASSERT(pp != NULL);
2114 2112 ppa[i] = pp;
2115 2113 }
2116 2114 ANON_LOCK_EXIT(&->a_rwlock);
2117 2115 ASSERT(i == npages);
2118 2116
2119 2117 /*
2120 2118 * We are already holding the as->a_lock on the user's
2121 2119 * real segment, but we need to hold the a_lock on the
2122 2120 * underlying dummy as. This is mostly to satisfy the
2123 2121 * underlying HAT layer.
2124 2122 */
2125 2123 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
2126 2124 a = sptseg_addr;
2127 2125 pidx = 0;
2128 2126 if (type == F_SOFTLOCK) {
2129 2127 /*
2130 2128 * Load up the translation keeping it
2131 2129 * locked and don't unlock the page.
2132 2130 */
2133 2131 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2134 2132 sz = MIN(pgsz, ptob(npages - pidx));
2135 2133 hat_memload_array(sptseg->s_as->a_hat, a,
2136 2134 sz, &ppa[pidx], sptd->spt_prot,
2137 2135 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2138 2136 }
2139 2137 } else {
2140 2138 /*
2141 2139 * Migrate pages marked for migration.
2142 2140 */
2143 2141 if (lgrp_optimizations())
2144 2142 page_migrate(seg, shm_addr, ppa, npages);
2145 2143
2146 2144 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2147 2145 sz = MIN(pgsz, ptob(npages - pidx));
2148 2146 hat_memload_array(sptseg->s_as->a_hat,
2149 2147 a, sz, &ppa[pidx],
2150 2148 sptd->spt_prot, HAT_LOAD_SHARE);
2151 2149 }
2152 2150
2153 2151 /*
2154 2152 * And now drop the SE_SHARED lock(s).
2155 2153 */
2156 2154 for (i = 0; i < npages; i++)
2157 2155 page_unlock(ppa[i]);
2158 2156 }
2159 2157 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
2160 2158
2161 2159 kmem_free(ppa, sizeof (page_t *) * npages);
2162 2160 return (0);
2163 2161 case F_SOFTUNLOCK:
2164 2162
2165 2163 /*
2166 2164 * This is a bit ugly, we pass in the real seg pointer,
2167 2165 * but the sptseg_addr is the virtual address within the
2168 2166 * dummy seg.
2169 2167 */
2170 2168 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2171 2169 return (0);
2172 2170
2173 2171 case F_PROT:
2174 2172
2175 2173 /*
2176 2174 * This takes care of the unusual case where a user
2177 2175 * allocates a stack in shared memory and a register
2178 2176 * window overflow is written to that stack page before
2179 2177 * it is otherwise modified.
2180 2178 *
2181 2179 * We can get away with this because ISM segments are
2182 2180 * always rw. Other than this unusual case, there
2183 2181 * should be no instances of protection violations.
2184 2182 */
2185 2183 return (0);
2186 2184
2187 2185 default:
2188 2186 #ifdef DEBUG
2189 2187 cmn_err(CE_WARN, "segspt_shmfault default type?");
2190 2188 #endif
2191 2189 return (FC_NOMAP);
2192 2190 }
2193 2191 }
2194 2192
2195 2193 /*ARGSUSED*/
2196 2194 static faultcode_t
2197 2195 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2198 2196 {
2199 2197 return (0);
2200 2198 }
2201 2199
2202 2200 /*ARGSUSED*/
2203 2201 static int
2204 2202 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2205 2203 {
2206 2204 return (0);
2207 2205 }
2208 2206
2209 2207 /*
2210 2208 * duplicate the shared page tables
2211 2209 */
2212 2210 int
2213 2211 segspt_shmdup(struct seg *seg, struct seg *newseg)
2214 2212 {
2215 2213 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2216 2214 struct anon_map *amp = shmd->shm_amp;
2217 2215 struct shm_data *shmd_new;
2218 2216 struct seg *spt_seg = shmd->shm_sptseg;
2219 2217 struct spt_data *sptd = spt_seg->s_data;
2220 2218 int error = 0;
2221 2219
2222 2220 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2223 2221
2224 2222 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2225 2223 newseg->s_data = (void *)shmd_new;
2226 2224 shmd_new->shm_sptas = shmd->shm_sptas;
2227 2225 shmd_new->shm_amp = amp;
2228 2226 shmd_new->shm_sptseg = shmd->shm_sptseg;
2229 2227 newseg->s_ops = &segspt_shmops;
2230 2228 newseg->s_szc = seg->s_szc;
2231 2229 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2232 2230
2233 2231 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2234 2232 amp->refcnt++;
2235 2233 ANON_LOCK_EXIT(&->a_rwlock);
2236 2234
2237 2235 if (sptd->spt_flags & SHM_PAGEABLE) {
2238 2236 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2239 2237 shmd_new->shm_lckpgs = 0;
2240 2238 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2241 2239 if ((error = hat_share(newseg->s_as->a_hat,
2242 2240 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2243 2241 seg->s_size, seg->s_szc)) != 0) {
2244 2242 kmem_free(shmd_new->shm_vpage,
2245 2243 btopr(amp->size));
2246 2244 }
2247 2245 }
2248 2246 return (error);
2249 2247 } else {
2250 2248 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2251 2249 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2252 2250 seg->s_szc));
2253 2251
2254 2252 }
2255 2253 }
2256 2254
2257 2255 /*ARGSUSED*/
2258 2256 int
2259 2257 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2260 2258 {
2261 2259 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2262 2260 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2263 2261
2264 2262 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2265 2263
2266 2264 /*
2267 2265 * ISM segment is always rw.
2268 2266 */
2269 2267 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2270 2268 }
2271 2269
2272 2270 /*
2273 2271 * Return an array of locked large pages, for empty slots allocate
2274 2272 * private zero-filled anon pages.
2275 2273 */
2276 2274 static int
2277 2275 spt_anon_getpages(
2278 2276 struct seg *sptseg,
2279 2277 caddr_t sptaddr,
2280 2278 size_t len,
2281 2279 page_t *ppa[])
2282 2280 {
2283 2281 struct spt_data *sptd = sptseg->s_data;
2284 2282 struct anon_map *amp = sptd->spt_amp;
2285 2283 enum seg_rw rw = sptd->spt_prot;
2286 2284 uint_t szc = sptseg->s_szc;
2287 2285 size_t pg_sz, share_sz = page_get_pagesize(szc);
2288 2286 pgcnt_t lp_npgs;
2289 2287 caddr_t lp_addr, e_sptaddr;
2290 2288 uint_t vpprot, ppa_szc = 0;
2291 2289 struct vpage *vpage = NULL;
2292 2290 ulong_t j, ppa_idx;
2293 2291 int err, ierr = 0;
2294 2292 pgcnt_t an_idx;
2295 2293 anon_sync_obj_t cookie;
2296 2294 int anon_locked = 0;
2297 2295 pgcnt_t amp_pgs;
2298 2296
2299 2297
2300 2298 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2301 2299 ASSERT(len != 0);
2302 2300
2303 2301 pg_sz = share_sz;
2304 2302 lp_npgs = btop(pg_sz);
2305 2303 lp_addr = sptaddr;
2306 2304 e_sptaddr = sptaddr + len;
2307 2305 an_idx = seg_page(sptseg, sptaddr);
2308 2306 ppa_idx = 0;
2309 2307
2310 2308 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2311 2309
2312 2310 amp_pgs = page_get_pagecnt(amp->a_szc);
2313 2311
2314 2312 /*CONSTCOND*/
2315 2313 while (1) {
2316 2314 for (; lp_addr < e_sptaddr;
2317 2315 an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2318 2316
2319 2317 /*
2320 2318 * If we're currently locked, and we get to a new
2321 2319 * page, unlock our current anon chunk.
2322 2320 */
2323 2321 if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2324 2322 anon_array_exit(&cookie);
2325 2323 anon_locked = 0;
2326 2324 }
2327 2325 if (!anon_locked) {
2328 2326 anon_array_enter(amp, an_idx, &cookie);
2329 2327 anon_locked = 1;
2330 2328 }
2331 2329 ppa_szc = (uint_t)-1;
2332 2330 ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2333 2331 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2334 2332 &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2335 2333
2336 2334 if (ierr != 0) {
2337 2335 if (ierr > 0) {
2338 2336 err = FC_MAKE_ERR(ierr);
2339 2337 goto lpgs_err;
2340 2338 }
2341 2339 break;
2342 2340 }
2343 2341 }
2344 2342 if (lp_addr == e_sptaddr) {
2345 2343 break;
2346 2344 }
2347 2345 ASSERT(lp_addr < e_sptaddr);
2348 2346
2349 2347 /*
2350 2348 * ierr == -1 means we failed to allocate a large page.
2351 2349 * so do a size down operation.
2352 2350 *
2353 2351 * ierr == -2 means some other process that privately shares
2354 2352 * pages with this process has allocated a larger page and we
2355 2353 * need to retry with larger pages. So do a size up
2356 2354 * operation. This relies on the fact that large pages are
2357 2355 * never partially shared i.e. if we share any constituent
2358 2356 * page of a large page with another process we must share the
2359 2357 * entire large page. Note this cannot happen for SOFTLOCK
2360 2358 * case, unless current address (lpaddr) is at the beginning
2361 2359 * of the next page size boundary because the other process
2362 2360 * couldn't have relocated locked pages.
2363 2361 */
2364 2362 ASSERT(ierr == -1 || ierr == -2);
2365 2363 if (segvn_anypgsz) {
2366 2364 ASSERT(ierr == -2 || szc != 0);
2367 2365 ASSERT(ierr == -1 || szc < sptseg->s_szc);
2368 2366 szc = (ierr == -1) ? szc - 1 : szc + 1;
2369 2367 } else {
2370 2368 /*
2371 2369 * For faults and segvn_anypgsz == 0
2372 2370 * we need to be careful not to loop forever
2373 2371 * if existing page is found with szc other
2374 2372 * than 0 or seg->s_szc. This could be due
2375 2373 * to page relocations on behalf of DR or
2376 2374 * more likely large page creation. For this
2377 2375 * case simply re-size to existing page's szc
2378 2376 * if returned by anon_map_getpages().
2379 2377 */
2380 2378 if (ppa_szc == (uint_t)-1) {
2381 2379 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2382 2380 } else {
2383 2381 ASSERT(ppa_szc <= sptseg->s_szc);
2384 2382 ASSERT(ierr == -2 || ppa_szc < szc);
2385 2383 ASSERT(ierr == -1 || ppa_szc > szc);
2386 2384 szc = ppa_szc;
2387 2385 }
2388 2386 }
2389 2387 pg_sz = page_get_pagesize(szc);
2390 2388 lp_npgs = btop(pg_sz);
2391 2389 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2392 2390 }
2393 2391 if (anon_locked) {
2394 2392 anon_array_exit(&cookie);
2395 2393 }
2396 2394 ANON_LOCK_EXIT(&->a_rwlock);
2397 2395 return (0);
2398 2396
2399 2397 lpgs_err:
2400 2398 if (anon_locked) {
2401 2399 anon_array_exit(&cookie);
2402 2400 }
2403 2401 ANON_LOCK_EXIT(&->a_rwlock);
2404 2402 for (j = 0; j < ppa_idx; j++)
2405 2403 page_unlock(ppa[j]);
2406 2404 return (err);
2407 2405 }
2408 2406
2409 2407 /*
2410 2408 * count the number of bytes in a set of spt pages that are currently not
2411 2409 * locked
2412 2410 */
2413 2411 static rctl_qty_t
2414 2412 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2415 2413 {
2416 2414 ulong_t i;
2417 2415 rctl_qty_t unlocked = 0;
2418 2416
2419 2417 for (i = 0; i < npages; i++) {
2420 2418 if (ppa[i]->p_lckcnt == 0)
2421 2419 unlocked += PAGESIZE;
2422 2420 }
2423 2421 return (unlocked);
2424 2422 }
2425 2423
2426 2424 extern u_longlong_t randtick(void);
2427 2425 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2428 2426 #define NLCK (NCPU_P2)
2429 2427 /* Random number with a range [0, n-1], n must be power of two */
2430 2428 #define RAND_P2(n) \
2431 2429 ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2432 2430
2433 2431 int
2434 2432 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2435 2433 page_t **ppa, ulong_t *lockmap, size_t pos,
2436 2434 rctl_qty_t *locked)
2437 2435 {
2438 2436 struct shm_data *shmd = seg->s_data;
2439 2437 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2440 2438 ulong_t i;
2441 2439 int kernel;
2442 2440 pgcnt_t nlck = 0;
2443 2441 int rv = 0;
2444 2442 int use_reserved = 1;
2445 2443
2446 2444 /* return the number of bytes actually locked */
2447 2445 *locked = 0;
2448 2446
2449 2447 /*
2450 2448 * To avoid contention on freemem_lock, availrmem and pages_locked
2451 2449 * global counters are updated only every nlck locked pages instead of
2452 2450 * every time. Reserve nlck locks up front and deduct from this
2453 2451 * reservation for each page that requires a lock. When the reservation
2454 2452 * is consumed, reserve again. nlck is randomized, so the competing
2455 2453 * threads do not fall into a cyclic lock contention pattern. When
2456 2454 * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2457 2455 * is used to lock pages.
2458 2456 */
2459 2457 for (i = 0; i < npages; anon_index++, pos++, i++) {
2460 2458 if (nlck == 0 && use_reserved == 1) {
2461 2459 nlck = NLCK + RAND_P2(NLCK);
2462 2460 /* if fewer loops left, decrease nlck */
2463 2461 nlck = MIN(nlck, npages - i);
2464 2462 /*
2465 2463 * Reserve nlck locks up front and deduct from this
2466 2464 * reservation for each page that requires a lock. When
2467 2465 * the reservation is consumed, reserve again.
2468 2466 */
2469 2467 mutex_enter(&freemem_lock);
2470 2468 if ((availrmem - nlck) < pages_pp_maximum) {
2471 2469 /* Do not do advance memory reserves */
2472 2470 use_reserved = 0;
2473 2471 } else {
2474 2472 availrmem -= nlck;
2475 2473 pages_locked += nlck;
2476 2474 }
2477 2475 mutex_exit(&freemem_lock);
2478 2476 }
2479 2477 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2480 2478 if (sptd->spt_ppa_lckcnt[anon_index] <
2481 2479 (ushort_t)DISM_LOCK_MAX) {
2482 2480 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2483 2481 (ushort_t)DISM_LOCK_MAX) {
2484 2482 cmn_err(CE_WARN,
2485 2483 "DISM page lock limit "
2486 2484 "reached on DISM offset 0x%lx\n",
2487 2485 anon_index << PAGESHIFT);
2488 2486 }
2489 2487 kernel = (sptd->spt_ppa &&
2490 2488 sptd->spt_ppa[anon_index]);
2491 2489 if (!page_pp_lock(ppa[i], 0, kernel ||
2492 2490 use_reserved)) {
2493 2491 sptd->spt_ppa_lckcnt[anon_index]--;
2494 2492 rv = EAGAIN;
2495 2493 break;
2496 2494 }
2497 2495 /* if this is a newly locked page, count it */
2498 2496 if (ppa[i]->p_lckcnt == 1) {
2499 2497 if (kernel == 0 && use_reserved == 1)
2500 2498 nlck--;
2501 2499 *locked += PAGESIZE;
2502 2500 }
2503 2501 shmd->shm_lckpgs++;
2504 2502 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2505 2503 if (lockmap != NULL)
2506 2504 BT_SET(lockmap, pos);
2507 2505 }
2508 2506 }
2509 2507 }
2510 2508 /* Return unused lock reservation */
2511 2509 if (nlck != 0 && use_reserved == 1) {
2512 2510 mutex_enter(&freemem_lock);
2513 2511 availrmem += nlck;
2514 2512 pages_locked -= nlck;
2515 2513 mutex_exit(&freemem_lock);
2516 2514 }
2517 2515
2518 2516 return (rv);
2519 2517 }
2520 2518
2521 2519 int
2522 2520 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2523 2521 rctl_qty_t *unlocked)
2524 2522 {
2525 2523 struct shm_data *shmd = seg->s_data;
2526 2524 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2527 2525 struct anon_map *amp = sptd->spt_amp;
2528 2526 struct anon *ap;
2529 2527 struct vnode *vp;
2530 2528 u_offset_t off;
2531 2529 struct page *pp;
2532 2530 int kernel;
2533 2531 anon_sync_obj_t cookie;
2534 2532 ulong_t i;
2535 2533 pgcnt_t nlck = 0;
2536 2534 pgcnt_t nlck_limit = NLCK;
2537 2535
2538 2536 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2539 2537 for (i = 0; i < npages; i++, anon_index++) {
2540 2538 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2541 2539 anon_array_enter(amp, anon_index, &cookie);
2542 2540 ap = anon_get_ptr(amp->ahp, anon_index);
2543 2541 ASSERT(ap);
2544 2542
2545 2543 swap_xlate(ap, &vp, &off);
2546 2544 anon_array_exit(&cookie);
2547 2545 pp = page_lookup(vp, off, SE_SHARED);
2548 2546 ASSERT(pp);
2549 2547 /*
2550 2548 * availrmem is decremented only for pages which are not
2551 2549 * in seg pcache, for pages in seg pcache availrmem was
2552 2550 * decremented in _dismpagelock()
2553 2551 */
2554 2552 kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2555 2553 ASSERT(pp->p_lckcnt > 0);
2556 2554
2557 2555 /*
2558 2556 * lock page but do not change availrmem, we do it
2559 2557 * ourselves every nlck loops.
2560 2558 */
2561 2559 page_pp_unlock(pp, 0, 1);
2562 2560 if (pp->p_lckcnt == 0) {
2563 2561 if (kernel == 0)
2564 2562 nlck++;
2565 2563 *unlocked += PAGESIZE;
2566 2564 }
2567 2565 page_unlock(pp);
2568 2566 shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2569 2567 sptd->spt_ppa_lckcnt[anon_index]--;
2570 2568 shmd->shm_lckpgs--;
2571 2569 }
2572 2570
2573 2571 /*
2574 2572 * To reduce freemem_lock contention, do not update availrmem
2575 2573 * until at least NLCK pages have been unlocked.
2576 2574 * 1. No need to update if nlck is zero
2577 2575 * 2. Always update if the last iteration
2578 2576 */
2579 2577 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2580 2578 mutex_enter(&freemem_lock);
2581 2579 availrmem += nlck;
2582 2580 pages_locked -= nlck;
2583 2581 mutex_exit(&freemem_lock);
2584 2582 nlck = 0;
2585 2583 nlck_limit = NLCK + RAND_P2(NLCK);
2586 2584 }
2587 2585 }
2588 2586 ANON_LOCK_EXIT(&->a_rwlock);
2589 2587
2590 2588 return (0);
2591 2589 }
2592 2590
2593 2591 /*ARGSUSED*/
2594 2592 static int
2595 2593 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2596 2594 int attr, int op, ulong_t *lockmap, size_t pos)
2597 2595 {
2598 2596 struct shm_data *shmd = seg->s_data;
2599 2597 struct seg *sptseg = shmd->shm_sptseg;
2600 2598 struct spt_data *sptd = sptseg->s_data;
2601 2599 struct kshmid *sp = sptd->spt_amp->a_sp;
2602 2600 pgcnt_t npages, a_npages;
2603 2601 page_t **ppa;
2604 2602 pgcnt_t an_idx, a_an_idx, ppa_idx;
2605 2603 caddr_t spt_addr, a_addr; /* spt and aligned address */
2606 2604 size_t a_len; /* aligned len */
2607 2605 size_t share_sz;
2608 2606 ulong_t i;
2609 2607 int sts = 0;
2610 2608 rctl_qty_t unlocked = 0;
2611 2609 rctl_qty_t locked = 0;
2612 2610 struct proc *p = curproc;
2613 2611 kproject_t *proj;
2614 2612
2615 2613 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2616 2614 ASSERT(sp != NULL);
2617 2615
2618 2616 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2619 2617 return (0);
2620 2618 }
2621 2619
2622 2620 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2623 2621 an_idx = seg_page(seg, addr);
2624 2622 npages = btopr(len);
2625 2623
2626 2624 if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2627 2625 return (ENOMEM);
2628 2626 }
2629 2627
2630 2628 /*
2631 2629 * A shm's project never changes, so no lock needed.
2632 2630 * The shm has a hold on the project, so it will not go away.
2633 2631 * Since we have a mapping to shm within this zone, we know
2634 2632 * that the zone will not go away.
2635 2633 */
2636 2634 proj = sp->shm_perm.ipc_proj;
2637 2635
2638 2636 if (op == MC_LOCK) {
2639 2637
2640 2638 /*
2641 2639 * Need to align addr and size request if they are not
2642 2640 * aligned so we can always allocate large page(s) however
2643 2641 * we only lock what was requested in initial request.
2644 2642 */
2645 2643 share_sz = page_get_pagesize(sptseg->s_szc);
2646 2644 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2647 2645 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2648 2646 share_sz);
2649 2647 a_npages = btop(a_len);
2650 2648 a_an_idx = seg_page(seg, a_addr);
2651 2649 spt_addr = sptseg->s_base + ptob(a_an_idx);
2652 2650 ppa_idx = an_idx - a_an_idx;
2653 2651
2654 2652 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2655 2653 KM_NOSLEEP)) == NULL) {
2656 2654 return (ENOMEM);
2657 2655 }
2658 2656
2659 2657 /*
2660 2658 * Don't cache any new pages for IO and
2661 2659 * flush any cached pages.
2662 2660 */
2663 2661 mutex_enter(&sptd->spt_lock);
2664 2662 if (sptd->spt_ppa != NULL)
2665 2663 sptd->spt_flags |= DISM_PPA_CHANGED;
2666 2664
2667 2665 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2668 2666 if (sts != 0) {
2669 2667 mutex_exit(&sptd->spt_lock);
2670 2668 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2671 2669 return (sts);
2672 2670 }
2673 2671
2674 2672 mutex_enter(&sp->shm_mlock);
2675 2673 /* enforce locked memory rctl */
2676 2674 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2677 2675
2678 2676 mutex_enter(&p->p_lock);
2679 2677 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2680 2678 mutex_exit(&p->p_lock);
2681 2679 sts = EAGAIN;
2682 2680 } else {
2683 2681 mutex_exit(&p->p_lock);
2684 2682 sts = spt_lockpages(seg, an_idx, npages,
2685 2683 &ppa[ppa_idx], lockmap, pos, &locked);
2686 2684
2687 2685 /*
2688 2686 * correct locked count if not all pages could be
2689 2687 * locked
2690 2688 */
2691 2689 if ((unlocked - locked) > 0) {
2692 2690 rctl_decr_locked_mem(NULL, proj,
2693 2691 (unlocked - locked), 0);
2694 2692 }
2695 2693 }
2696 2694 /*
2697 2695 * unlock pages
2698 2696 */
2699 2697 for (i = 0; i < a_npages; i++)
2700 2698 page_unlock(ppa[i]);
2701 2699 if (sptd->spt_ppa != NULL)
2702 2700 sptd->spt_flags |= DISM_PPA_CHANGED;
2703 2701 mutex_exit(&sp->shm_mlock);
2704 2702 mutex_exit(&sptd->spt_lock);
2705 2703
2706 2704 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2707 2705
2708 2706 } else if (op == MC_UNLOCK) { /* unlock */
2709 2707 page_t **ppa;
2710 2708
2711 2709 mutex_enter(&sptd->spt_lock);
2712 2710 if (shmd->shm_lckpgs == 0) {
2713 2711 mutex_exit(&sptd->spt_lock);
2714 2712 return (0);
2715 2713 }
2716 2714 /*
2717 2715 * Don't cache new IO pages.
2718 2716 */
2719 2717 if (sptd->spt_ppa != NULL)
2720 2718 sptd->spt_flags |= DISM_PPA_CHANGED;
2721 2719
2722 2720 mutex_enter(&sp->shm_mlock);
2723 2721 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2724 2722 if ((ppa = sptd->spt_ppa) != NULL)
2725 2723 sptd->spt_flags |= DISM_PPA_CHANGED;
2726 2724 mutex_exit(&sptd->spt_lock);
2727 2725
2728 2726 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2729 2727 mutex_exit(&sp->shm_mlock);
2730 2728
2731 2729 if (ppa != NULL)
2732 2730 seg_ppurge_wiredpp(ppa);
2733 2731 }
2734 2732 return (sts);
2735 2733 }
2736 2734
2737 2735 /*ARGSUSED*/
2738 2736 int
2739 2737 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2740 2738 {
2741 2739 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2742 2740 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2743 2741 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2744 2742
2745 2743 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2746 2744
2747 2745 /*
2748 2746 * ISM segment is always rw.
2749 2747 */
2750 2748 while (--pgno >= 0)
2751 2749 *protv++ = sptd->spt_prot;
2752 2750 return (0);
2753 2751 }
2754 2752
2755 2753 /*ARGSUSED*/
2756 2754 u_offset_t
2757 2755 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2758 2756 {
2759 2757 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2760 2758
2761 2759 /* Offset does not matter in ISM memory */
2762 2760
2763 2761 return ((u_offset_t)0);
2764 2762 }
2765 2763
2766 2764 /* ARGSUSED */
2767 2765 int
2768 2766 segspt_shmgettype(struct seg *seg, caddr_t addr)
2769 2767 {
2770 2768 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2771 2769 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2772 2770
2773 2771 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2774 2772
2775 2773 /*
2776 2774 * The shared memory mapping is always MAP_SHARED, SWAP is only
2777 2775 * reserved for DISM
2778 2776 */
2779 2777 return (MAP_SHARED |
2780 2778 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2781 2779 }
2782 2780
2783 2781 /*ARGSUSED*/
2784 2782 int
2785 2783 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2786 2784 {
2787 2785 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2788 2786 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2789 2787
2790 2788 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2791 2789
2792 2790 *vpp = sptd->spt_vp;
2793 2791 return (0);
2794 2792 }
2795 2793
2796 2794 /*
2797 2795 * We need to wait for pending IO to complete to a DISM segment in order for
2798 2796 * pages to get kicked out of the seg_pcache. 120 seconds should be more
2799 2797 * than enough time to wait.
2800 2798 */
2801 2799 static clock_t spt_pcache_wait = 120;
2802 2800
2803 2801 /*ARGSUSED*/
2804 2802 static int
2805 2803 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2806 2804 {
2807 2805 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2808 2806 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2809 2807 struct anon_map *amp;
2810 2808 pgcnt_t pg_idx;
2811 2809 ushort_t gen;
2812 2810 clock_t end_lbolt;
2813 2811 int writer;
2814 2812 page_t **ppa;
2815 2813
2816 2814 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2817 2815
2818 2816 if (behav == MADV_FREE) {
2819 2817 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2820 2818 return (0);
2821 2819
2822 2820 amp = sptd->spt_amp;
2823 2821 pg_idx = seg_page(seg, addr);
2824 2822
2825 2823 mutex_enter(&sptd->spt_lock);
2826 2824 if ((ppa = sptd->spt_ppa) == NULL) {
2827 2825 mutex_exit(&sptd->spt_lock);
2828 2826 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2829 2827 anon_disclaim(amp, pg_idx, len);
2830 2828 ANON_LOCK_EXIT(&->a_rwlock);
2831 2829 return (0);
2832 2830 }
2833 2831
2834 2832 sptd->spt_flags |= DISM_PPA_CHANGED;
2835 2833 gen = sptd->spt_gen;
2836 2834
2837 2835 mutex_exit(&sptd->spt_lock);
2838 2836
2839 2837 /*
2840 2838 * Purge all DISM cached pages
2841 2839 */
2842 2840 seg_ppurge_wiredpp(ppa);
2843 2841
2844 2842 /*
2845 2843 * Drop the AS_LOCK so that other threads can grab it
2846 2844 * in the as_pageunlock path and hopefully get the segment
2847 2845 * kicked out of the seg_pcache. We bump the shm_softlockcnt
2848 2846 * to keep this segment resident.
2849 2847 */
2850 2848 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2851 2849 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2852 2850 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2853 2851
2854 2852 mutex_enter(&sptd->spt_lock);
2855 2853
2856 2854 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2857 2855
2858 2856 /*
2859 2857 * Try to wait for pages to get kicked out of the seg_pcache.
2860 2858 */
2861 2859 while (sptd->spt_gen == gen &&
2862 2860 (sptd->spt_flags & DISM_PPA_CHANGED) &&
2863 2861 ddi_get_lbolt() < end_lbolt) {
2864 2862 if (!cv_timedwait_sig(&sptd->spt_cv,
2865 2863 &sptd->spt_lock, end_lbolt)) {
2866 2864 break;
2867 2865 }
2868 2866 }
2869 2867
2870 2868 mutex_exit(&sptd->spt_lock);
2871 2869
2872 2870 /* Regrab the AS_LOCK and release our hold on the segment */
2873 2871 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2874 2872 writer ? RW_WRITER : RW_READER);
2875 2873 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2876 2874 if (shmd->shm_softlockcnt <= 0) {
2877 2875 if (AS_ISUNMAPWAIT(seg->s_as)) {
2878 2876 mutex_enter(&seg->s_as->a_contents);
2879 2877 if (AS_ISUNMAPWAIT(seg->s_as)) {
2880 2878 AS_CLRUNMAPWAIT(seg->s_as);
2881 2879 cv_broadcast(&seg->s_as->a_cv);
2882 2880 }
2883 2881 mutex_exit(&seg->s_as->a_contents);
2884 2882 }
2885 2883 }
2886 2884
2887 2885 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2888 2886 anon_disclaim(amp, pg_idx, len);
2889 2887 ANON_LOCK_EXIT(&->a_rwlock);
2890 2888 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2891 2889 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2892 2890 int already_set;
2893 2891 ulong_t anon_index;
2894 2892 lgrp_mem_policy_t policy;
2895 2893 caddr_t shm_addr;
2896 2894 size_t share_size;
2897 2895 size_t size;
2898 2896 struct seg *sptseg = shmd->shm_sptseg;
2899 2897 caddr_t sptseg_addr;
2900 2898
2901 2899 /*
2902 2900 * Align address and length to page size of underlying segment
2903 2901 */
2904 2902 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2905 2903 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2906 2904 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2907 2905 share_size);
2908 2906
2909 2907 amp = shmd->shm_amp;
2910 2908 anon_index = seg_page(seg, shm_addr);
2911 2909
2912 2910 /*
2913 2911 * And now we may have to adjust size downward if we have
2914 2912 * exceeded the realsize of the segment or initial anon
2915 2913 * allocations.
2916 2914 */
2917 2915 sptseg_addr = sptseg->s_base + ptob(anon_index);
2918 2916 if ((sptseg_addr + size) >
2919 2917 (sptseg->s_base + sptd->spt_realsize))
2920 2918 size = (sptseg->s_base + sptd->spt_realsize) -
2921 2919 sptseg_addr;
2922 2920
2923 2921 /*
2924 2922 * Set memory allocation policy for this segment
2925 2923 */
2926 2924 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2927 2925 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2928 2926 NULL, 0, len);
2929 2927
2930 2928 /*
2931 2929 * If random memory allocation policy set already,
2932 2930 * don't bother reapplying it.
2933 2931 */
2934 2932 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2935 2933 return (0);
2936 2934
2937 2935 /*
2938 2936 * Mark any existing pages in the given range for
2939 2937 * migration, flushing the I/O page cache, and using
↓ open down ↓ |
2797 lines elided |
↑ open up ↑ |
2940 2938 * underlying segment to calculate anon index and get
2941 2939 * anonmap and vnode pointer from
2942 2940 */
2943 2941 if (shmd->shm_softlockcnt > 0)
2944 2942 segspt_purge(seg);
2945 2943
2946 2944 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
2947 2945 }
2948 2946
2949 2947 return (0);
2950 -}
2951 -
2952 -/*ARGSUSED*/
2953 -void
2954 -segspt_shmdump(struct seg *seg)
2955 -{
2956 - /* no-op for ISM segment */
2957 2948 }
2958 2949
2959 2950 /*
2960 2951 * get a memory ID for an addr in a given segment
2961 2952 */
2962 2953 static int
2963 2954 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
2964 2955 {
2965 2956 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2966 2957 struct anon *ap;
2967 2958 size_t anon_index;
2968 2959 struct anon_map *amp = shmd->shm_amp;
2969 2960 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2970 2961 struct seg *sptseg = shmd->shm_sptseg;
2971 2962 anon_sync_obj_t cookie;
2972 2963
2973 2964 anon_index = seg_page(seg, addr);
2974 2965
2975 2966 if (addr > (seg->s_base + sptd->spt_realsize)) {
2976 2967 return (EFAULT);
2977 2968 }
2978 2969
2979 2970 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2980 2971 anon_array_enter(amp, anon_index, &cookie);
2981 2972 ap = anon_get_ptr(amp->ahp, anon_index);
2982 2973 if (ap == NULL) {
2983 2974 struct page *pp;
2984 2975 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
2985 2976
2986 2977 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
2987 2978 if (pp == NULL) {
2988 2979 anon_array_exit(&cookie);
2989 2980 ANON_LOCK_EXIT(&->a_rwlock);
2990 2981 return (ENOMEM);
2991 2982 }
2992 2983 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
2993 2984 page_unlock(pp);
2994 2985 }
2995 2986 anon_array_exit(&cookie);
2996 2987 ANON_LOCK_EXIT(&->a_rwlock);
2997 2988 memidp->val[0] = (uintptr_t)ap;
2998 2989 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
2999 2990 return (0);
3000 2991 }
3001 2992
3002 2993 /*
3003 2994 * Get memory allocation policy info for specified address in given segment
3004 2995 */
3005 2996 static lgrp_mem_policy_info_t *
3006 2997 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3007 2998 {
3008 2999 struct anon_map *amp;
3009 3000 ulong_t anon_index;
3010 3001 lgrp_mem_policy_info_t *policy_info;
3011 3002 struct shm_data *shm_data;
3012 3003
3013 3004 ASSERT(seg != NULL);
3014 3005
3015 3006 /*
3016 3007 * Get anon_map from segshm
3017 3008 *
3018 3009 * Assume that no lock needs to be held on anon_map, since
3019 3010 * it should be protected by its reference count which must be
3020 3011 * nonzero for an existing segment
3021 3012 * Need to grab readers lock on policy tree though
3022 3013 */
3023 3014 shm_data = (struct shm_data *)seg->s_data;
3024 3015 if (shm_data == NULL)
3025 3016 return (NULL);
3026 3017 amp = shm_data->shm_amp;
3027 3018 ASSERT(amp->refcnt != 0);
3028 3019
3029 3020 /*
3030 3021 * Get policy info
3031 3022 *
3032 3023 * Assume starting anon index of 0
3033 3024 */
3034 3025 anon_index = seg_page(seg, addr);
3035 3026 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3036 3027
3037 3028 return (policy_info);
3038 3029 }
↓ open down ↓ |
72 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX