Print this page
use NULL capable segop as a shorthand for no-capabilities
Instead of forcing every segment driver to implement a dummy "return 0"
function, handle NULL capable segop function pointer as "no copabilities
supported" shorthand.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_spt.c
+++ new/usr/src/uts/common/vm/seg_spt.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/param.h>
26 26 #include <sys/user.h>
27 27 #include <sys/mman.h>
28 28 #include <sys/kmem.h>
29 29 #include <sys/sysmacros.h>
30 30 #include <sys/cmn_err.h>
31 31 #include <sys/systm.h>
32 32 #include <sys/tuneable.h>
33 33 #include <vm/hat.h>
34 34 #include <vm/seg.h>
35 35 #include <vm/as.h>
36 36 #include <vm/anon.h>
37 37 #include <vm/page.h>
38 38 #include <sys/buf.h>
39 39 #include <sys/swap.h>
40 40 #include <sys/atomic.h>
41 41 #include <vm/seg_spt.h>
42 42 #include <sys/debug.h>
43 43 #include <sys/vtrace.h>
44 44 #include <sys/shm.h>
45 45 #include <sys/shm_impl.h>
46 46 #include <sys/lgrp.h>
47 47 #include <sys/vmsystm.h>
48 48 #include <sys/policy.h>
49 49 #include <sys/project.h>
50 50 #include <sys/tnf_probe.h>
51 51 #include <sys/zone.h>
52 52
53 53 #define SEGSPTADDR (caddr_t)0x0
54 54
55 55 /*
56 56 * # pages used for spt
57 57 */
58 58 size_t spt_used;
59 59
60 60 /*
61 61 * segspt_minfree is the memory left for system after ISM
62 62 * locked its pages; it is set up to 5% of availrmem in
63 63 * sptcreate when ISM is created. ISM should not use more
64 64 * than ~90% of availrmem; if it does, then the performance
65 65 * of the system may decrease. Machines with large memories may
66 66 * be able to use up more memory for ISM so we set the default
67 67 * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
68 68 * If somebody wants even more memory for ISM (risking hanging
69 69 * the system) they can patch the segspt_minfree to smaller number.
70 70 */
71 71 pgcnt_t segspt_minfree = 0;
72 72
73 73 static int segspt_create(struct seg *seg, caddr_t argsp);
74 74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
75 75 static void segspt_free(struct seg *seg);
76 76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
77 77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
78 78
79 79 struct seg_ops segspt_ops = {
80 80 .unmap = segspt_unmap,
81 81 .free = segspt_free,
82 82 .getpolicy = segspt_getpolicy,
83 83 };
84 84
85 85 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
86 86 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
87 87 static void segspt_shmfree(struct seg *seg);
88 88 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
89 89 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
90 90 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
91 91 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
92 92 register size_t len, register uint_t prot);
93 93 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
94 94 uint_t prot);
95 95 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
96 96 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
97 97 register char *vec);
98 98 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
99 99 int attr, uint_t flags);
100 100 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
101 101 int attr, int op, ulong_t *lockmap, size_t pos);
102 102 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
103 103 uint_t *protv);
104 104 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
↓ open down ↓ |
104 lines elided |
↑ open up ↑ |
105 105 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
106 106 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
107 107 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
108 108 uint_t behav);
109 109 static void segspt_shmdump(struct seg *seg);
110 110 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
111 111 struct page ***, enum lock_type, enum seg_rw);
112 112 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
113 113 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
114 114 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
115 -static int segspt_shmcapable(struct seg *, segcapability_t);
116 115
117 116 struct seg_ops segspt_shmops = {
118 117 .dup = segspt_shmdup,
119 118 .unmap = segspt_shmunmap,
120 119 .free = segspt_shmfree,
121 120 .fault = segspt_shmfault,
122 121 .faulta = segspt_shmfaulta,
123 122 .setprot = segspt_shmsetprot,
124 123 .checkprot = segspt_shmcheckprot,
125 124 .kluster = segspt_shmkluster,
126 125 .sync = segspt_shmsync,
127 126 .incore = segspt_shmincore,
128 127 .lockop = segspt_shmlockop,
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
129 128 .getprot = segspt_shmgetprot,
130 129 .getoffset = segspt_shmgetoffset,
131 130 .gettype = segspt_shmgettype,
132 131 .getvp = segspt_shmgetvp,
133 132 .advise = segspt_shmadvise,
134 133 .dump = segspt_shmdump,
135 134 .pagelock = segspt_shmpagelock,
136 135 .setpagesize = segspt_shmsetpgsz,
137 136 .getmemid = segspt_shmgetmemid,
138 137 .getpolicy = segspt_shmgetpolicy,
139 - .capable = segspt_shmcapable,
140 138 };
141 139
142 140 static void segspt_purge(struct seg *seg);
143 141 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
144 142 enum seg_rw, int);
145 143 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
146 144 page_t **ppa);
147 145
148 146
149 147
150 148 /*ARGSUSED*/
151 149 int
152 150 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
153 151 uint_t prot, uint_t flags, uint_t share_szc)
154 152 {
155 153 int err;
156 154 struct as *newas;
157 155 struct segspt_crargs sptcargs;
158 156
159 157 #ifdef DEBUG
160 158 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
161 159 tnf_ulong, size, size );
162 160 #endif
163 161 if (segspt_minfree == 0) /* leave min 5% of availrmem for */
164 162 segspt_minfree = availrmem/20; /* for the system */
165 163
166 164 if (!hat_supported(HAT_SHARED_PT, (void *)0))
167 165 return (EINVAL);
168 166
169 167 /*
170 168 * get a new as for this shared memory segment
171 169 */
172 170 newas = as_alloc();
173 171 newas->a_proc = NULL;
174 172 sptcargs.amp = amp;
175 173 sptcargs.prot = prot;
176 174 sptcargs.flags = flags;
177 175 sptcargs.szc = share_szc;
178 176 /*
179 177 * create a shared page table (spt) segment
180 178 */
181 179
182 180 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
183 181 as_free(newas);
184 182 return (err);
185 183 }
186 184 *sptseg = sptcargs.seg_spt;
187 185 return (0);
188 186 }
189 187
190 188 void
191 189 sptdestroy(struct as *as, struct anon_map *amp)
192 190 {
193 191
194 192 #ifdef DEBUG
195 193 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
196 194 #endif
197 195 (void) as_unmap(as, SEGSPTADDR, amp->size);
198 196 as_free(as);
199 197 }
200 198
201 199 /*
202 200 * called from seg_free().
203 201 * free (i.e., unlock, unmap, return to free list)
204 202 * all the pages in the given seg.
205 203 */
206 204 void
207 205 segspt_free(struct seg *seg)
208 206 {
209 207 struct spt_data *sptd = (struct spt_data *)seg->s_data;
210 208
211 209 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
212 210
213 211 if (sptd != NULL) {
214 212 if (sptd->spt_realsize)
215 213 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
216 214
217 215 if (sptd->spt_ppa_lckcnt)
218 216 kmem_free(sptd->spt_ppa_lckcnt,
219 217 sizeof (*sptd->spt_ppa_lckcnt)
220 218 * btopr(sptd->spt_amp->size));
221 219 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
222 220 cv_destroy(&sptd->spt_cv);
223 221 mutex_destroy(&sptd->spt_lock);
224 222 kmem_free(sptd, sizeof (*sptd));
225 223 }
226 224 }
227 225
228 226 /*ARGSUSED*/
229 227 static int
230 228 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
231 229 uint_t flags)
232 230 {
233 231 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
234 232
235 233 return (0);
236 234 }
237 235
238 236 /*ARGSUSED*/
239 237 static size_t
240 238 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
241 239 {
242 240 caddr_t eo_seg;
243 241 pgcnt_t npages;
244 242 struct shm_data *shmd = (struct shm_data *)seg->s_data;
245 243 struct seg *sptseg;
246 244 struct spt_data *sptd;
247 245
248 246 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
249 247 #ifdef lint
250 248 seg = seg;
251 249 #endif
252 250 sptseg = shmd->shm_sptseg;
253 251 sptd = sptseg->s_data;
254 252
255 253 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
256 254 eo_seg = addr + len;
257 255 while (addr < eo_seg) {
258 256 /* page exists, and it's locked. */
259 257 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
260 258 SEG_PAGE_ANON;
261 259 addr += PAGESIZE;
262 260 }
263 261 return (len);
264 262 } else {
265 263 struct anon_map *amp = shmd->shm_amp;
266 264 struct anon *ap;
267 265 page_t *pp;
268 266 pgcnt_t anon_index;
269 267 struct vnode *vp;
270 268 u_offset_t off;
271 269 ulong_t i;
272 270 int ret;
273 271 anon_sync_obj_t cookie;
274 272
275 273 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
276 274 anon_index = seg_page(seg, addr);
277 275 npages = btopr(len);
278 276 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
279 277 return (EINVAL);
280 278 }
281 279 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
282 280 for (i = 0; i < npages; i++, anon_index++) {
283 281 ret = 0;
284 282 anon_array_enter(amp, anon_index, &cookie);
285 283 ap = anon_get_ptr(amp->ahp, anon_index);
286 284 if (ap != NULL) {
287 285 swap_xlate(ap, &vp, &off);
288 286 anon_array_exit(&cookie);
289 287 pp = page_lookup_nowait(vp, off, SE_SHARED);
290 288 if (pp != NULL) {
291 289 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
292 290 page_unlock(pp);
293 291 }
294 292 } else {
295 293 anon_array_exit(&cookie);
296 294 }
297 295 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
298 296 ret |= SEG_PAGE_LOCKED;
299 297 }
300 298 *vec++ = (char)ret;
301 299 }
302 300 ANON_LOCK_EXIT(&->a_rwlock);
303 301 return (len);
304 302 }
305 303 }
306 304
307 305 static int
308 306 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
309 307 {
310 308 size_t share_size;
311 309
312 310 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
313 311
314 312 /*
315 313 * seg.s_size may have been rounded up to the largest page size
316 314 * in shmat().
317 315 * XXX This should be cleanedup. sptdestroy should take a length
318 316 * argument which should be the same as sptcreate. Then
319 317 * this rounding would not be needed (or is done in shm.c)
320 318 * Only the check for full segment will be needed.
321 319 *
322 320 * XXX -- shouldn't raddr == 0 always? These tests don't seem
323 321 * to be useful at all.
324 322 */
325 323 share_size = page_get_pagesize(seg->s_szc);
326 324 ssize = P2ROUNDUP(ssize, share_size);
327 325
328 326 if (raddr == seg->s_base && ssize == seg->s_size) {
329 327 seg_free(seg);
330 328 return (0);
331 329 } else
332 330 return (EINVAL);
333 331 }
334 332
335 333 int
336 334 segspt_create(struct seg *seg, caddr_t argsp)
337 335 {
338 336 int err;
339 337 caddr_t addr = seg->s_base;
340 338 struct spt_data *sptd;
341 339 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
342 340 struct anon_map *amp = sptcargs->amp;
343 341 struct kshmid *sp = amp->a_sp;
344 342 struct cred *cred = CRED();
345 343 ulong_t i, j, anon_index = 0;
346 344 pgcnt_t npages = btopr(amp->size);
347 345 struct vnode *vp;
348 346 page_t **ppa;
349 347 uint_t hat_flags;
350 348 size_t pgsz;
351 349 pgcnt_t pgcnt;
352 350 caddr_t a;
353 351 pgcnt_t pidx;
354 352 size_t sz;
355 353 proc_t *procp = curproc;
356 354 rctl_qty_t lockedbytes = 0;
357 355 kproject_t *proj;
358 356
359 357 /*
360 358 * We are holding the a_lock on the underlying dummy as,
361 359 * so we can make calls to the HAT layer.
362 360 */
363 361 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
364 362 ASSERT(sp != NULL);
365 363
366 364 #ifdef DEBUG
367 365 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
368 366 tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
369 367 #endif
370 368 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
371 369 if (err = anon_swap_adjust(npages))
372 370 return (err);
373 371 }
374 372 err = ENOMEM;
375 373
376 374 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
377 375 goto out1;
378 376
379 377 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
380 378 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
381 379 KM_NOSLEEP)) == NULL)
382 380 goto out2;
383 381 }
384 382
385 383 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
386 384
387 385 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
388 386 goto out3;
389 387
390 388 seg->s_ops = &segspt_ops;
391 389 sptd->spt_vp = vp;
392 390 sptd->spt_amp = amp;
393 391 sptd->spt_prot = sptcargs->prot;
394 392 sptd->spt_flags = sptcargs->flags;
395 393 seg->s_data = (caddr_t)sptd;
396 394 sptd->spt_ppa = NULL;
397 395 sptd->spt_ppa_lckcnt = NULL;
398 396 seg->s_szc = sptcargs->szc;
399 397 cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
400 398 sptd->spt_gen = 0;
401 399
402 400 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
403 401 if (seg->s_szc > amp->a_szc) {
404 402 amp->a_szc = seg->s_szc;
405 403 }
406 404 ANON_LOCK_EXIT(&->a_rwlock);
407 405
408 406 /*
409 407 * Set policy to affect initial allocation of pages in
410 408 * anon_map_createpages()
411 409 */
412 410 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
413 411 NULL, 0, ptob(npages));
414 412
415 413 if (sptcargs->flags & SHM_PAGEABLE) {
416 414 size_t share_sz;
417 415 pgcnt_t new_npgs, more_pgs;
418 416 struct anon_hdr *nahp;
419 417 zone_t *zone;
420 418
421 419 share_sz = page_get_pagesize(seg->s_szc);
422 420 if (!IS_P2ALIGNED(amp->size, share_sz)) {
423 421 /*
424 422 * We are rounding up the size of the anon array
425 423 * on 4 M boundary because we always create 4 M
426 424 * of page(s) when locking, faulting pages and we
427 425 * don't have to check for all corner cases e.g.
428 426 * if there is enough space to allocate 4 M
429 427 * page.
430 428 */
431 429 new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
432 430 more_pgs = new_npgs - npages;
433 431
434 432 /*
435 433 * The zone will never be NULL, as a fully created
436 434 * shm always has an owning zone.
437 435 */
438 436 zone = sp->shm_perm.ipc_zone_ref.zref_zone;
439 437 ASSERT(zone != NULL);
440 438 if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
441 439 err = ENOMEM;
442 440 goto out4;
443 441 }
444 442
445 443 nahp = anon_create(new_npgs, ANON_SLEEP);
446 444 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
447 445 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
448 446 ANON_SLEEP);
449 447 anon_release(amp->ahp, npages);
450 448 amp->ahp = nahp;
451 449 ASSERT(amp->swresv == ptob(npages));
452 450 amp->swresv = amp->size = ptob(new_npgs);
453 451 ANON_LOCK_EXIT(&->a_rwlock);
454 452 npages = new_npgs;
455 453 }
456 454
457 455 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
458 456 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
459 457 sptd->spt_pcachecnt = 0;
460 458 sptd->spt_realsize = ptob(npages);
461 459 sptcargs->seg_spt = seg;
462 460 return (0);
463 461 }
464 462
465 463 /*
466 464 * get array of pages for each anon slot in amp
467 465 */
468 466 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
469 467 seg, addr, S_CREATE, cred)) != 0)
470 468 goto out4;
471 469
472 470 mutex_enter(&sp->shm_mlock);
473 471
474 472 /* May be partially locked, so, count bytes to charge for locking */
475 473 for (i = 0; i < npages; i++)
476 474 if (ppa[i]->p_lckcnt == 0)
477 475 lockedbytes += PAGESIZE;
478 476
479 477 proj = sp->shm_perm.ipc_proj;
480 478
481 479 if (lockedbytes > 0) {
482 480 mutex_enter(&procp->p_lock);
483 481 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
484 482 mutex_exit(&procp->p_lock);
485 483 mutex_exit(&sp->shm_mlock);
486 484 for (i = 0; i < npages; i++)
487 485 page_unlock(ppa[i]);
488 486 err = ENOMEM;
489 487 goto out4;
490 488 }
491 489 mutex_exit(&procp->p_lock);
492 490 }
493 491
494 492 /*
495 493 * addr is initial address corresponding to the first page on ppa list
496 494 */
497 495 for (i = 0; i < npages; i++) {
498 496 /* attempt to lock all pages */
499 497 if (page_pp_lock(ppa[i], 0, 1) == 0) {
500 498 /*
501 499 * if unable to lock any page, unlock all
502 500 * of them and return error
503 501 */
504 502 for (j = 0; j < i; j++)
505 503 page_pp_unlock(ppa[j], 0, 1);
506 504 for (i = 0; i < npages; i++)
507 505 page_unlock(ppa[i]);
508 506 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
509 507 mutex_exit(&sp->shm_mlock);
510 508 err = ENOMEM;
511 509 goto out4;
512 510 }
513 511 }
514 512 mutex_exit(&sp->shm_mlock);
515 513
516 514 /*
517 515 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
518 516 * for the entire life of the segment. For example platforms
519 517 * that do not support Dynamic Reconfiguration.
520 518 */
521 519 hat_flags = HAT_LOAD_SHARE;
522 520 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
523 521 hat_flags |= HAT_LOAD_LOCK;
524 522
525 523 /*
526 524 * Load translations one lare page at a time
527 525 * to make sure we don't create mappings bigger than
528 526 * segment's size code in case underlying pages
529 527 * are shared with segvn's segment that uses bigger
530 528 * size code than we do.
531 529 */
532 530 pgsz = page_get_pagesize(seg->s_szc);
533 531 pgcnt = page_get_pagecnt(seg->s_szc);
534 532 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
535 533 sz = MIN(pgsz, ptob(npages - pidx));
536 534 hat_memload_array(seg->s_as->a_hat, a, sz,
537 535 &ppa[pidx], sptd->spt_prot, hat_flags);
538 536 }
539 537
540 538 /*
541 539 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
542 540 * we will leave the pages locked SE_SHARED for the life
543 541 * of the ISM segment. This will prevent any calls to
544 542 * hat_pageunload() on this ISM segment for those platforms.
545 543 */
546 544 if (!(hat_flags & HAT_LOAD_LOCK)) {
547 545 /*
548 546 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
549 547 * we no longer need to hold the SE_SHARED lock on the pages,
550 548 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
551 549 * SE_SHARED lock on the pages as necessary.
552 550 */
553 551 for (i = 0; i < npages; i++)
554 552 page_unlock(ppa[i]);
555 553 }
556 554 sptd->spt_pcachecnt = 0;
557 555 kmem_free(ppa, ((sizeof (page_t *)) * npages));
558 556 sptd->spt_realsize = ptob(npages);
559 557 atomic_add_long(&spt_used, npages);
560 558 sptcargs->seg_spt = seg;
561 559 return (0);
562 560
563 561 out4:
564 562 seg->s_data = NULL;
565 563 kmem_free(vp, sizeof (*vp));
566 564 cv_destroy(&sptd->spt_cv);
567 565 out3:
568 566 mutex_destroy(&sptd->spt_lock);
569 567 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
570 568 kmem_free(ppa, (sizeof (*ppa) * npages));
571 569 out2:
572 570 kmem_free(sptd, sizeof (*sptd));
573 571 out1:
574 572 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
575 573 anon_swap_restore(npages);
576 574 return (err);
577 575 }
578 576
579 577 /*ARGSUSED*/
580 578 void
581 579 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
582 580 {
583 581 struct page *pp;
584 582 struct spt_data *sptd = (struct spt_data *)seg->s_data;
585 583 pgcnt_t npages;
586 584 ulong_t anon_idx;
587 585 struct anon_map *amp;
588 586 struct anon *ap;
589 587 struct vnode *vp;
590 588 u_offset_t off;
591 589 uint_t hat_flags;
592 590 int root = 0;
593 591 pgcnt_t pgs, curnpgs = 0;
594 592 page_t *rootpp;
595 593 rctl_qty_t unlocked_bytes = 0;
596 594 kproject_t *proj;
597 595 kshmid_t *sp;
598 596
599 597 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
600 598
601 599 len = P2ROUNDUP(len, PAGESIZE);
602 600
603 601 npages = btop(len);
604 602
605 603 hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
606 604 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
607 605 (sptd->spt_flags & SHM_PAGEABLE)) {
608 606 hat_flags = HAT_UNLOAD_UNMAP;
609 607 }
610 608
611 609 hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
612 610
613 611 amp = sptd->spt_amp;
614 612 if (sptd->spt_flags & SHM_PAGEABLE)
615 613 npages = btop(amp->size);
616 614
617 615 ASSERT(amp != NULL);
618 616
619 617 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
620 618 sp = amp->a_sp;
621 619 proj = sp->shm_perm.ipc_proj;
622 620 mutex_enter(&sp->shm_mlock);
623 621 }
624 622 for (anon_idx = 0; anon_idx < npages; anon_idx++) {
625 623 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
626 624 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
627 625 panic("segspt_free_pages: null app");
628 626 /*NOTREACHED*/
629 627 }
630 628 } else {
631 629 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
632 630 == NULL)
633 631 continue;
634 632 }
635 633 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
636 634 swap_xlate(ap, &vp, &off);
637 635
638 636 /*
639 637 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
640 638 * the pages won't be having SE_SHARED lock at this
641 639 * point.
642 640 *
643 641 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
644 642 * the pages are still held SE_SHARED locked from the
645 643 * original segspt_create()
646 644 *
647 645 * Our goal is to get SE_EXCL lock on each page, remove
648 646 * permanent lock on it and invalidate the page.
649 647 */
650 648 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
651 649 if (hat_flags == HAT_UNLOAD_UNMAP)
652 650 pp = page_lookup(vp, off, SE_EXCL);
653 651 else {
654 652 if ((pp = page_find(vp, off)) == NULL) {
655 653 panic("segspt_free_pages: "
656 654 "page not locked");
657 655 /*NOTREACHED*/
658 656 }
659 657 if (!page_tryupgrade(pp)) {
660 658 page_unlock(pp);
661 659 pp = page_lookup(vp, off, SE_EXCL);
662 660 }
663 661 }
664 662 if (pp == NULL) {
665 663 panic("segspt_free_pages: "
666 664 "page not in the system");
667 665 /*NOTREACHED*/
668 666 }
669 667 ASSERT(pp->p_lckcnt > 0);
670 668 page_pp_unlock(pp, 0, 1);
671 669 if (pp->p_lckcnt == 0)
672 670 unlocked_bytes += PAGESIZE;
673 671 } else {
674 672 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
675 673 continue;
676 674 }
677 675 /*
678 676 * It's logical to invalidate the pages here as in most cases
679 677 * these were created by segspt.
680 678 */
681 679 if (pp->p_szc != 0) {
682 680 if (root == 0) {
683 681 ASSERT(curnpgs == 0);
684 682 root = 1;
685 683 rootpp = pp;
686 684 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
687 685 ASSERT(pgs > 1);
688 686 ASSERT(IS_P2ALIGNED(pgs, pgs));
689 687 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
690 688 curnpgs--;
691 689 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
692 690 ASSERT(curnpgs == 1);
693 691 ASSERT(page_pptonum(pp) ==
694 692 page_pptonum(rootpp) + (pgs - 1));
695 693 page_destroy_pages(rootpp);
696 694 root = 0;
697 695 curnpgs = 0;
698 696 } else {
699 697 ASSERT(curnpgs > 1);
700 698 ASSERT(page_pptonum(pp) ==
701 699 page_pptonum(rootpp) + (pgs - curnpgs));
702 700 curnpgs--;
703 701 }
704 702 } else {
705 703 if (root != 0 || curnpgs != 0) {
706 704 panic("segspt_free_pages: bad large page");
707 705 /*NOTREACHED*/
708 706 }
709 707 /*
710 708 * Before destroying the pages, we need to take care
711 709 * of the rctl locked memory accounting. For that
712 710 * we need to calculte the unlocked_bytes.
713 711 */
714 712 if (pp->p_lckcnt > 0)
715 713 unlocked_bytes += PAGESIZE;
716 714 /*LINTED: constant in conditional context */
717 715 VN_DISPOSE(pp, B_INVAL, 0, kcred);
718 716 }
719 717 }
720 718 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
721 719 if (unlocked_bytes > 0)
722 720 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
723 721 mutex_exit(&sp->shm_mlock);
724 722 }
725 723 if (root != 0 || curnpgs != 0) {
726 724 panic("segspt_free_pages: bad large page");
727 725 /*NOTREACHED*/
728 726 }
729 727
730 728 /*
731 729 * mark that pages have been released
732 730 */
733 731 sptd->spt_realsize = 0;
734 732
735 733 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
736 734 atomic_add_long(&spt_used, -npages);
737 735 anon_swap_restore(npages);
738 736 }
739 737 }
740 738
741 739 /*
742 740 * Get memory allocation policy info for specified address in given segment
743 741 */
744 742 static lgrp_mem_policy_info_t *
745 743 segspt_getpolicy(struct seg *seg, caddr_t addr)
746 744 {
747 745 struct anon_map *amp;
748 746 ulong_t anon_index;
749 747 lgrp_mem_policy_info_t *policy_info;
750 748 struct spt_data *spt_data;
751 749
752 750 ASSERT(seg != NULL);
753 751
754 752 /*
755 753 * Get anon_map from segspt
756 754 *
757 755 * Assume that no lock needs to be held on anon_map, since
758 756 * it should be protected by its reference count which must be
759 757 * nonzero for an existing segment
760 758 * Need to grab readers lock on policy tree though
761 759 */
762 760 spt_data = (struct spt_data *)seg->s_data;
763 761 if (spt_data == NULL)
764 762 return (NULL);
765 763 amp = spt_data->spt_amp;
766 764 ASSERT(amp->refcnt != 0);
767 765
768 766 /*
769 767 * Get policy info
770 768 *
771 769 * Assume starting anon index of 0
772 770 */
773 771 anon_index = seg_page(seg, addr);
774 772 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
775 773
776 774 return (policy_info);
777 775 }
778 776
779 777 /*
780 778 * DISM only.
781 779 * Return locked pages over a given range.
782 780 *
783 781 * We will cache all DISM locked pages and save the pplist for the
784 782 * entire segment in the ppa field of the underlying DISM segment structure.
785 783 * Later, during a call to segspt_reclaim() we will use this ppa array
786 784 * to page_unlock() all of the pages and then we will free this ppa list.
787 785 */
788 786 /*ARGSUSED*/
789 787 static int
790 788 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
791 789 struct page ***ppp, enum lock_type type, enum seg_rw rw)
792 790 {
793 791 struct shm_data *shmd = (struct shm_data *)seg->s_data;
794 792 struct seg *sptseg = shmd->shm_sptseg;
795 793 struct spt_data *sptd = sptseg->s_data;
796 794 pgcnt_t pg_idx, npages, tot_npages, npgs;
797 795 struct page **pplist, **pl, **ppa, *pp;
798 796 struct anon_map *amp;
799 797 spgcnt_t an_idx;
800 798 int ret = ENOTSUP;
801 799 uint_t pl_built = 0;
802 800 struct anon *ap;
803 801 struct vnode *vp;
804 802 u_offset_t off;
805 803 pgcnt_t claim_availrmem = 0;
806 804 uint_t szc;
807 805
808 806 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
809 807 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
810 808
811 809 /*
812 810 * We want to lock/unlock the entire ISM segment. Therefore,
813 811 * we will be using the underlying sptseg and it's base address
814 812 * and length for the caching arguments.
815 813 */
816 814 ASSERT(sptseg);
817 815 ASSERT(sptd);
818 816
819 817 pg_idx = seg_page(seg, addr);
820 818 npages = btopr(len);
821 819
822 820 /*
823 821 * check if the request is larger than number of pages covered
824 822 * by amp
825 823 */
826 824 if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
827 825 *ppp = NULL;
828 826 return (ENOTSUP);
829 827 }
830 828
831 829 if (type == L_PAGEUNLOCK) {
832 830 ASSERT(sptd->spt_ppa != NULL);
833 831
834 832 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
835 833 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
836 834
837 835 /*
838 836 * If someone is blocked while unmapping, we purge
839 837 * segment page cache and thus reclaim pplist synchronously
840 838 * without waiting for seg_pasync_thread. This speeds up
841 839 * unmapping in cases where munmap(2) is called, while
842 840 * raw async i/o is still in progress or where a thread
843 841 * exits on data fault in a multithreaded application.
844 842 */
845 843 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
846 844 (AS_ISUNMAPWAIT(seg->s_as) &&
847 845 shmd->shm_softlockcnt > 0)) {
848 846 segspt_purge(seg);
849 847 }
850 848 return (0);
851 849 }
852 850
853 851 /* The L_PAGELOCK case ... */
854 852
855 853 if (sptd->spt_flags & DISM_PPA_CHANGED) {
856 854 segspt_purge(seg);
857 855 /*
858 856 * for DISM ppa needs to be rebuild since
859 857 * number of locked pages could be changed
860 858 */
861 859 *ppp = NULL;
862 860 return (ENOTSUP);
863 861 }
864 862
865 863 /*
866 864 * First try to find pages in segment page cache, without
867 865 * holding the segment lock.
868 866 */
869 867 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
870 868 S_WRITE, SEGP_FORCE_WIRED);
871 869 if (pplist != NULL) {
872 870 ASSERT(sptd->spt_ppa != NULL);
873 871 ASSERT(sptd->spt_ppa == pplist);
874 872 ppa = sptd->spt_ppa;
875 873 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
876 874 if (ppa[an_idx] == NULL) {
877 875 seg_pinactive(seg, NULL, seg->s_base,
878 876 sptd->spt_amp->size, ppa,
879 877 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
880 878 *ppp = NULL;
881 879 return (ENOTSUP);
882 880 }
883 881 if ((szc = ppa[an_idx]->p_szc) != 0) {
884 882 npgs = page_get_pagecnt(szc);
885 883 an_idx = P2ROUNDUP(an_idx + 1, npgs);
886 884 } else {
887 885 an_idx++;
888 886 }
889 887 }
890 888 /*
891 889 * Since we cache the entire DISM segment, we want to
892 890 * set ppp to point to the first slot that corresponds
893 891 * to the requested addr, i.e. pg_idx.
894 892 */
895 893 *ppp = &(sptd->spt_ppa[pg_idx]);
896 894 return (0);
897 895 }
898 896
899 897 mutex_enter(&sptd->spt_lock);
900 898 /*
901 899 * try to find pages in segment page cache with mutex
902 900 */
903 901 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
904 902 S_WRITE, SEGP_FORCE_WIRED);
905 903 if (pplist != NULL) {
906 904 ASSERT(sptd->spt_ppa != NULL);
907 905 ASSERT(sptd->spt_ppa == pplist);
908 906 ppa = sptd->spt_ppa;
909 907 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
910 908 if (ppa[an_idx] == NULL) {
911 909 mutex_exit(&sptd->spt_lock);
912 910 seg_pinactive(seg, NULL, seg->s_base,
913 911 sptd->spt_amp->size, ppa,
914 912 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
915 913 *ppp = NULL;
916 914 return (ENOTSUP);
917 915 }
918 916 if ((szc = ppa[an_idx]->p_szc) != 0) {
919 917 npgs = page_get_pagecnt(szc);
920 918 an_idx = P2ROUNDUP(an_idx + 1, npgs);
921 919 } else {
922 920 an_idx++;
923 921 }
924 922 }
925 923 /*
926 924 * Since we cache the entire DISM segment, we want to
927 925 * set ppp to point to the first slot that corresponds
928 926 * to the requested addr, i.e. pg_idx.
929 927 */
930 928 mutex_exit(&sptd->spt_lock);
931 929 *ppp = &(sptd->spt_ppa[pg_idx]);
932 930 return (0);
933 931 }
934 932 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
935 933 SEGP_FORCE_WIRED) == SEGP_FAIL) {
936 934 mutex_exit(&sptd->spt_lock);
937 935 *ppp = NULL;
938 936 return (ENOTSUP);
939 937 }
940 938
941 939 /*
942 940 * No need to worry about protections because DISM pages are always rw.
943 941 */
944 942 pl = pplist = NULL;
945 943 amp = sptd->spt_amp;
946 944
947 945 /*
948 946 * Do we need to build the ppa array?
949 947 */
950 948 if (sptd->spt_ppa == NULL) {
951 949 pgcnt_t lpg_cnt = 0;
952 950
953 951 pl_built = 1;
954 952 tot_npages = btopr(sptd->spt_amp->size);
955 953
956 954 ASSERT(sptd->spt_pcachecnt == 0);
957 955 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
958 956 pl = pplist;
959 957
960 958 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
961 959 for (an_idx = 0; an_idx < tot_npages; ) {
962 960 ap = anon_get_ptr(amp->ahp, an_idx);
963 961 /*
964 962 * Cache only mlocked pages. For large pages
965 963 * if one (constituent) page is mlocked
966 964 * all pages for that large page
967 965 * are cached also. This is for quick
968 966 * lookups of ppa array;
969 967 */
970 968 if ((ap != NULL) && (lpg_cnt != 0 ||
971 969 (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
972 970
973 971 swap_xlate(ap, &vp, &off);
974 972 pp = page_lookup(vp, off, SE_SHARED);
975 973 ASSERT(pp != NULL);
976 974 if (lpg_cnt == 0) {
977 975 lpg_cnt++;
978 976 /*
979 977 * For a small page, we are done --
980 978 * lpg_count is reset to 0 below.
981 979 *
982 980 * For a large page, we are guaranteed
983 981 * to find the anon structures of all
984 982 * constituent pages and a non-zero
985 983 * lpg_cnt ensures that we don't test
986 984 * for mlock for these. We are done
987 985 * when lpg_count reaches (npgs + 1).
988 986 * If we are not the first constituent
989 987 * page, restart at the first one.
990 988 */
991 989 npgs = page_get_pagecnt(pp->p_szc);
992 990 if (!IS_P2ALIGNED(an_idx, npgs)) {
993 991 an_idx = P2ALIGN(an_idx, npgs);
994 992 page_unlock(pp);
995 993 continue;
996 994 }
997 995 }
998 996 if (++lpg_cnt > npgs)
999 997 lpg_cnt = 0;
1000 998
1001 999 /*
1002 1000 * availrmem is decremented only
1003 1001 * for unlocked pages
1004 1002 */
1005 1003 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1006 1004 claim_availrmem++;
1007 1005 pplist[an_idx] = pp;
1008 1006 }
1009 1007 an_idx++;
1010 1008 }
1011 1009 ANON_LOCK_EXIT(&->a_rwlock);
1012 1010
1013 1011 if (claim_availrmem) {
1014 1012 mutex_enter(&freemem_lock);
1015 1013 if (availrmem < tune.t_minarmem + claim_availrmem) {
1016 1014 mutex_exit(&freemem_lock);
1017 1015 ret = ENOTSUP;
1018 1016 claim_availrmem = 0;
1019 1017 goto insert_fail;
1020 1018 } else {
1021 1019 availrmem -= claim_availrmem;
1022 1020 }
1023 1021 mutex_exit(&freemem_lock);
1024 1022 }
1025 1023
1026 1024 sptd->spt_ppa = pl;
1027 1025 } else {
1028 1026 /*
1029 1027 * We already have a valid ppa[].
1030 1028 */
1031 1029 pl = sptd->spt_ppa;
1032 1030 }
1033 1031
1034 1032 ASSERT(pl != NULL);
1035 1033
1036 1034 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1037 1035 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1038 1036 segspt_reclaim);
1039 1037 if (ret == SEGP_FAIL) {
1040 1038 /*
1041 1039 * seg_pinsert failed. We return
1042 1040 * ENOTSUP, so that the as_pagelock() code will
1043 1041 * then try the slower F_SOFTLOCK path.
1044 1042 */
1045 1043 if (pl_built) {
1046 1044 /*
1047 1045 * No one else has referenced the ppa[].
1048 1046 * We created it and we need to destroy it.
1049 1047 */
1050 1048 sptd->spt_ppa = NULL;
1051 1049 }
1052 1050 ret = ENOTSUP;
1053 1051 goto insert_fail;
1054 1052 }
1055 1053
1056 1054 /*
1057 1055 * In either case, we increment softlockcnt on the 'real' segment.
1058 1056 */
1059 1057 sptd->spt_pcachecnt++;
1060 1058 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1061 1059
1062 1060 ppa = sptd->spt_ppa;
1063 1061 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1064 1062 if (ppa[an_idx] == NULL) {
1065 1063 mutex_exit(&sptd->spt_lock);
1066 1064 seg_pinactive(seg, NULL, seg->s_base,
1067 1065 sptd->spt_amp->size,
1068 1066 pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1069 1067 *ppp = NULL;
1070 1068 return (ENOTSUP);
1071 1069 }
1072 1070 if ((szc = ppa[an_idx]->p_szc) != 0) {
1073 1071 npgs = page_get_pagecnt(szc);
1074 1072 an_idx = P2ROUNDUP(an_idx + 1, npgs);
1075 1073 } else {
1076 1074 an_idx++;
1077 1075 }
1078 1076 }
1079 1077 /*
1080 1078 * We can now drop the sptd->spt_lock since the ppa[]
1081 1079 * exists and he have incremented pacachecnt.
1082 1080 */
1083 1081 mutex_exit(&sptd->spt_lock);
1084 1082
1085 1083 /*
1086 1084 * Since we cache the entire segment, we want to
1087 1085 * set ppp to point to the first slot that corresponds
1088 1086 * to the requested addr, i.e. pg_idx.
1089 1087 */
1090 1088 *ppp = &(sptd->spt_ppa[pg_idx]);
1091 1089 return (0);
1092 1090
1093 1091 insert_fail:
1094 1092 /*
1095 1093 * We will only reach this code if we tried and failed.
1096 1094 *
1097 1095 * And we can drop the lock on the dummy seg, once we've failed
1098 1096 * to set up a new ppa[].
1099 1097 */
1100 1098 mutex_exit(&sptd->spt_lock);
1101 1099
1102 1100 if (pl_built) {
1103 1101 if (claim_availrmem) {
1104 1102 mutex_enter(&freemem_lock);
1105 1103 availrmem += claim_availrmem;
1106 1104 mutex_exit(&freemem_lock);
1107 1105 }
1108 1106
1109 1107 /*
1110 1108 * We created pl and we need to destroy it.
1111 1109 */
1112 1110 pplist = pl;
1113 1111 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1114 1112 if (pplist[an_idx] != NULL)
1115 1113 page_unlock(pplist[an_idx]);
1116 1114 }
1117 1115 kmem_free(pl, sizeof (page_t *) * tot_npages);
1118 1116 }
1119 1117
1120 1118 if (shmd->shm_softlockcnt <= 0) {
1121 1119 if (AS_ISUNMAPWAIT(seg->s_as)) {
1122 1120 mutex_enter(&seg->s_as->a_contents);
1123 1121 if (AS_ISUNMAPWAIT(seg->s_as)) {
1124 1122 AS_CLRUNMAPWAIT(seg->s_as);
1125 1123 cv_broadcast(&seg->s_as->a_cv);
1126 1124 }
1127 1125 mutex_exit(&seg->s_as->a_contents);
1128 1126 }
1129 1127 }
1130 1128 *ppp = NULL;
1131 1129 return (ret);
1132 1130 }
1133 1131
1134 1132
1135 1133
1136 1134 /*
1137 1135 * return locked pages over a given range.
1138 1136 *
1139 1137 * We will cache the entire ISM segment and save the pplist for the
1140 1138 * entire segment in the ppa field of the underlying ISM segment structure.
1141 1139 * Later, during a call to segspt_reclaim() we will use this ppa array
1142 1140 * to page_unlock() all of the pages and then we will free this ppa list.
1143 1141 */
1144 1142 /*ARGSUSED*/
1145 1143 static int
1146 1144 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1147 1145 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1148 1146 {
1149 1147 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1150 1148 struct seg *sptseg = shmd->shm_sptseg;
1151 1149 struct spt_data *sptd = sptseg->s_data;
1152 1150 pgcnt_t np, page_index, npages;
1153 1151 caddr_t a, spt_base;
1154 1152 struct page **pplist, **pl, *pp;
1155 1153 struct anon_map *amp;
1156 1154 ulong_t anon_index;
1157 1155 int ret = ENOTSUP;
1158 1156 uint_t pl_built = 0;
1159 1157 struct anon *ap;
1160 1158 struct vnode *vp;
1161 1159 u_offset_t off;
1162 1160
1163 1161 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1164 1162 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1165 1163
1166 1164
1167 1165 /*
1168 1166 * We want to lock/unlock the entire ISM segment. Therefore,
1169 1167 * we will be using the underlying sptseg and it's base address
1170 1168 * and length for the caching arguments.
1171 1169 */
1172 1170 ASSERT(sptseg);
1173 1171 ASSERT(sptd);
1174 1172
1175 1173 if (sptd->spt_flags & SHM_PAGEABLE) {
1176 1174 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1177 1175 }
1178 1176
1179 1177 page_index = seg_page(seg, addr);
1180 1178 npages = btopr(len);
1181 1179
1182 1180 /*
1183 1181 * check if the request is larger than number of pages covered
1184 1182 * by amp
1185 1183 */
1186 1184 if (page_index + npages > btopr(sptd->spt_amp->size)) {
1187 1185 *ppp = NULL;
1188 1186 return (ENOTSUP);
1189 1187 }
1190 1188
1191 1189 if (type == L_PAGEUNLOCK) {
1192 1190
1193 1191 ASSERT(sptd->spt_ppa != NULL);
1194 1192
1195 1193 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1196 1194 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1197 1195
1198 1196 /*
1199 1197 * If someone is blocked while unmapping, we purge
1200 1198 * segment page cache and thus reclaim pplist synchronously
1201 1199 * without waiting for seg_pasync_thread. This speeds up
1202 1200 * unmapping in cases where munmap(2) is called, while
1203 1201 * raw async i/o is still in progress or where a thread
1204 1202 * exits on data fault in a multithreaded application.
1205 1203 */
1206 1204 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1207 1205 segspt_purge(seg);
1208 1206 }
1209 1207 return (0);
1210 1208 }
1211 1209
1212 1210 /* The L_PAGELOCK case... */
1213 1211
1214 1212 /*
1215 1213 * First try to find pages in segment page cache, without
1216 1214 * holding the segment lock.
1217 1215 */
1218 1216 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1219 1217 S_WRITE, SEGP_FORCE_WIRED);
1220 1218 if (pplist != NULL) {
1221 1219 ASSERT(sptd->spt_ppa == pplist);
1222 1220 ASSERT(sptd->spt_ppa[page_index]);
1223 1221 /*
1224 1222 * Since we cache the entire ISM segment, we want to
1225 1223 * set ppp to point to the first slot that corresponds
1226 1224 * to the requested addr, i.e. page_index.
1227 1225 */
1228 1226 *ppp = &(sptd->spt_ppa[page_index]);
1229 1227 return (0);
1230 1228 }
1231 1229
1232 1230 mutex_enter(&sptd->spt_lock);
1233 1231
1234 1232 /*
1235 1233 * try to find pages in segment page cache
1236 1234 */
1237 1235 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1238 1236 S_WRITE, SEGP_FORCE_WIRED);
1239 1237 if (pplist != NULL) {
1240 1238 ASSERT(sptd->spt_ppa == pplist);
1241 1239 /*
1242 1240 * Since we cache the entire segment, we want to
1243 1241 * set ppp to point to the first slot that corresponds
1244 1242 * to the requested addr, i.e. page_index.
1245 1243 */
1246 1244 mutex_exit(&sptd->spt_lock);
1247 1245 *ppp = &(sptd->spt_ppa[page_index]);
1248 1246 return (0);
1249 1247 }
1250 1248
1251 1249 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1252 1250 SEGP_FORCE_WIRED) == SEGP_FAIL) {
1253 1251 mutex_exit(&sptd->spt_lock);
1254 1252 *ppp = NULL;
1255 1253 return (ENOTSUP);
1256 1254 }
1257 1255
1258 1256 /*
1259 1257 * No need to worry about protections because ISM pages
1260 1258 * are always rw.
1261 1259 */
1262 1260 pl = pplist = NULL;
1263 1261
1264 1262 /*
1265 1263 * Do we need to build the ppa array?
1266 1264 */
1267 1265 if (sptd->spt_ppa == NULL) {
1268 1266 ASSERT(sptd->spt_ppa == pplist);
1269 1267
1270 1268 spt_base = sptseg->s_base;
1271 1269 pl_built = 1;
1272 1270
1273 1271 /*
1274 1272 * availrmem is decremented once during anon_swap_adjust()
1275 1273 * and is incremented during the anon_unresv(), which is
1276 1274 * called from shm_rm_amp() when the segment is destroyed.
1277 1275 */
1278 1276 amp = sptd->spt_amp;
1279 1277 ASSERT(amp != NULL);
1280 1278
1281 1279 /* pcachecnt is protected by sptd->spt_lock */
1282 1280 ASSERT(sptd->spt_pcachecnt == 0);
1283 1281 pplist = kmem_zalloc(sizeof (page_t *)
1284 1282 * btopr(sptd->spt_amp->size), KM_SLEEP);
1285 1283 pl = pplist;
1286 1284
1287 1285 anon_index = seg_page(sptseg, spt_base);
1288 1286
1289 1287 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1290 1288 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1291 1289 a += PAGESIZE, anon_index++, pplist++) {
1292 1290 ap = anon_get_ptr(amp->ahp, anon_index);
1293 1291 ASSERT(ap != NULL);
1294 1292 swap_xlate(ap, &vp, &off);
1295 1293 pp = page_lookup(vp, off, SE_SHARED);
1296 1294 ASSERT(pp != NULL);
1297 1295 *pplist = pp;
1298 1296 }
1299 1297 ANON_LOCK_EXIT(&->a_rwlock);
1300 1298
1301 1299 if (a < (spt_base + sptd->spt_amp->size)) {
1302 1300 ret = ENOTSUP;
1303 1301 goto insert_fail;
1304 1302 }
1305 1303 sptd->spt_ppa = pl;
1306 1304 } else {
1307 1305 /*
1308 1306 * We already have a valid ppa[].
1309 1307 */
1310 1308 pl = sptd->spt_ppa;
1311 1309 }
1312 1310
1313 1311 ASSERT(pl != NULL);
1314 1312
1315 1313 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1316 1314 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1317 1315 segspt_reclaim);
1318 1316 if (ret == SEGP_FAIL) {
1319 1317 /*
1320 1318 * seg_pinsert failed. We return
1321 1319 * ENOTSUP, so that the as_pagelock() code will
1322 1320 * then try the slower F_SOFTLOCK path.
1323 1321 */
1324 1322 if (pl_built) {
1325 1323 /*
1326 1324 * No one else has referenced the ppa[].
1327 1325 * We created it and we need to destroy it.
1328 1326 */
1329 1327 sptd->spt_ppa = NULL;
1330 1328 }
1331 1329 ret = ENOTSUP;
1332 1330 goto insert_fail;
1333 1331 }
1334 1332
1335 1333 /*
1336 1334 * In either case, we increment softlockcnt on the 'real' segment.
1337 1335 */
1338 1336 sptd->spt_pcachecnt++;
1339 1337 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1340 1338
1341 1339 /*
1342 1340 * We can now drop the sptd->spt_lock since the ppa[]
1343 1341 * exists and he have incremented pacachecnt.
1344 1342 */
1345 1343 mutex_exit(&sptd->spt_lock);
1346 1344
1347 1345 /*
1348 1346 * Since we cache the entire segment, we want to
1349 1347 * set ppp to point to the first slot that corresponds
1350 1348 * to the requested addr, i.e. page_index.
1351 1349 */
1352 1350 *ppp = &(sptd->spt_ppa[page_index]);
1353 1351 return (0);
1354 1352
1355 1353 insert_fail:
1356 1354 /*
1357 1355 * We will only reach this code if we tried and failed.
1358 1356 *
1359 1357 * And we can drop the lock on the dummy seg, once we've failed
1360 1358 * to set up a new ppa[].
1361 1359 */
1362 1360 mutex_exit(&sptd->spt_lock);
1363 1361
1364 1362 if (pl_built) {
1365 1363 /*
1366 1364 * We created pl and we need to destroy it.
1367 1365 */
1368 1366 pplist = pl;
1369 1367 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1370 1368 while (np) {
1371 1369 page_unlock(*pplist);
1372 1370 np--;
1373 1371 pplist++;
1374 1372 }
1375 1373 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1376 1374 }
1377 1375 if (shmd->shm_softlockcnt <= 0) {
1378 1376 if (AS_ISUNMAPWAIT(seg->s_as)) {
1379 1377 mutex_enter(&seg->s_as->a_contents);
1380 1378 if (AS_ISUNMAPWAIT(seg->s_as)) {
1381 1379 AS_CLRUNMAPWAIT(seg->s_as);
1382 1380 cv_broadcast(&seg->s_as->a_cv);
1383 1381 }
1384 1382 mutex_exit(&seg->s_as->a_contents);
1385 1383 }
1386 1384 }
1387 1385 *ppp = NULL;
1388 1386 return (ret);
1389 1387 }
1390 1388
1391 1389 /*
1392 1390 * purge any cached pages in the I/O page cache
1393 1391 */
1394 1392 static void
1395 1393 segspt_purge(struct seg *seg)
1396 1394 {
1397 1395 seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1398 1396 }
1399 1397
1400 1398 static int
1401 1399 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1402 1400 enum seg_rw rw, int async)
1403 1401 {
1404 1402 struct seg *seg = (struct seg *)ptag;
1405 1403 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1406 1404 struct seg *sptseg;
1407 1405 struct spt_data *sptd;
1408 1406 pgcnt_t npages, i, free_availrmem = 0;
1409 1407 int done = 0;
1410 1408
1411 1409 #ifdef lint
1412 1410 addr = addr;
1413 1411 #endif
1414 1412 sptseg = shmd->shm_sptseg;
1415 1413 sptd = sptseg->s_data;
1416 1414 npages = (len >> PAGESHIFT);
1417 1415 ASSERT(npages);
1418 1416 ASSERT(sptd->spt_pcachecnt != 0);
1419 1417 ASSERT(sptd->spt_ppa == pplist);
1420 1418 ASSERT(npages == btopr(sptd->spt_amp->size));
1421 1419 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1422 1420
1423 1421 /*
1424 1422 * Acquire the lock on the dummy seg and destroy the
1425 1423 * ppa array IF this is the last pcachecnt.
1426 1424 */
1427 1425 mutex_enter(&sptd->spt_lock);
1428 1426 if (--sptd->spt_pcachecnt == 0) {
1429 1427 for (i = 0; i < npages; i++) {
1430 1428 if (pplist[i] == NULL) {
1431 1429 continue;
1432 1430 }
1433 1431 if (rw == S_WRITE) {
1434 1432 hat_setrefmod(pplist[i]);
1435 1433 } else {
1436 1434 hat_setref(pplist[i]);
1437 1435 }
1438 1436 if ((sptd->spt_flags & SHM_PAGEABLE) &&
1439 1437 (sptd->spt_ppa_lckcnt[i] == 0))
1440 1438 free_availrmem++;
1441 1439 page_unlock(pplist[i]);
1442 1440 }
1443 1441 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1444 1442 mutex_enter(&freemem_lock);
1445 1443 availrmem += free_availrmem;
1446 1444 mutex_exit(&freemem_lock);
1447 1445 }
1448 1446 /*
1449 1447 * Since we want to cach/uncache the entire ISM segment,
1450 1448 * we will track the pplist in a segspt specific field
1451 1449 * ppa, that is initialized at the time we add an entry to
1452 1450 * the cache.
1453 1451 */
1454 1452 ASSERT(sptd->spt_pcachecnt == 0);
1455 1453 kmem_free(pplist, sizeof (page_t *) * npages);
1456 1454 sptd->spt_ppa = NULL;
1457 1455 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1458 1456 sptd->spt_gen++;
1459 1457 cv_broadcast(&sptd->spt_cv);
1460 1458 done = 1;
1461 1459 }
1462 1460 mutex_exit(&sptd->spt_lock);
1463 1461
1464 1462 /*
1465 1463 * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1466 1464 * may not hold AS lock (in this case async argument is not 0). This
1467 1465 * means if softlockcnt drops to 0 after the decrement below address
1468 1466 * space may get freed. We can't allow it since after softlock
1469 1467 * derement to 0 we still need to access as structure for possible
1470 1468 * wakeup of unmap waiters. To prevent the disappearance of as we take
1471 1469 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1472 1470 * this mutex as a barrier to make sure this routine completes before
1473 1471 * segment is freed.
1474 1472 *
1475 1473 * The second complication we have to deal with in async case is a
1476 1474 * possibility of missed wake up of unmap wait thread. When we don't
1477 1475 * hold as lock here we may take a_contents lock before unmap wait
1478 1476 * thread that was first to see softlockcnt was still not 0. As a
1479 1477 * result we'll fail to wake up an unmap wait thread. To avoid this
1480 1478 * race we set nounmapwait flag in as structure if we drop softlockcnt
1481 1479 * to 0 if async is not 0. unmapwait thread
1482 1480 * will not block if this flag is set.
1483 1481 */
1484 1482 if (async)
1485 1483 mutex_enter(&shmd->shm_segfree_syncmtx);
1486 1484
1487 1485 /*
1488 1486 * Now decrement softlockcnt.
1489 1487 */
1490 1488 ASSERT(shmd->shm_softlockcnt > 0);
1491 1489 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1492 1490
1493 1491 if (shmd->shm_softlockcnt <= 0) {
1494 1492 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1495 1493 mutex_enter(&seg->s_as->a_contents);
1496 1494 if (async)
1497 1495 AS_SETNOUNMAPWAIT(seg->s_as);
1498 1496 if (AS_ISUNMAPWAIT(seg->s_as)) {
1499 1497 AS_CLRUNMAPWAIT(seg->s_as);
1500 1498 cv_broadcast(&seg->s_as->a_cv);
1501 1499 }
1502 1500 mutex_exit(&seg->s_as->a_contents);
1503 1501 }
1504 1502 }
1505 1503
1506 1504 if (async)
1507 1505 mutex_exit(&shmd->shm_segfree_syncmtx);
1508 1506
1509 1507 return (done);
1510 1508 }
1511 1509
1512 1510 /*
1513 1511 * Do a F_SOFTUNLOCK call over the range requested.
1514 1512 * The range must have already been F_SOFTLOCK'ed.
1515 1513 *
1516 1514 * The calls to acquire and release the anon map lock mutex were
1517 1515 * removed in order to avoid a deadly embrace during a DR
1518 1516 * memory delete operation. (Eg. DR blocks while waiting for a
1519 1517 * exclusive lock on a page that is being used for kaio; the
1520 1518 * thread that will complete the kaio and call segspt_softunlock
1521 1519 * blocks on the anon map lock; another thread holding the anon
1522 1520 * map lock blocks on another page lock via the segspt_shmfault
1523 1521 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1524 1522 *
1525 1523 * The appropriateness of the removal is based upon the following:
1526 1524 * 1. If we are holding a segment's reader lock and the page is held
1527 1525 * shared, then the corresponding element in anonmap which points to
1528 1526 * anon struct cannot change and there is no need to acquire the
1529 1527 * anonymous map lock.
1530 1528 * 2. Threads in segspt_softunlock have a reader lock on the segment
1531 1529 * and already have the shared page lock, so we are guaranteed that
1532 1530 * the anon map slot cannot change and therefore can call anon_get_ptr()
1533 1531 * without grabbing the anonymous map lock.
1534 1532 * 3. Threads that softlock a shared page break copy-on-write, even if
1535 1533 * its a read. Thus cow faults can be ignored with respect to soft
1536 1534 * unlocking, since the breaking of cow means that the anon slot(s) will
1537 1535 * not be shared.
1538 1536 */
1539 1537 static void
1540 1538 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1541 1539 size_t len, enum seg_rw rw)
1542 1540 {
1543 1541 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1544 1542 struct seg *sptseg;
1545 1543 struct spt_data *sptd;
1546 1544 page_t *pp;
1547 1545 caddr_t adr;
1548 1546 struct vnode *vp;
1549 1547 u_offset_t offset;
1550 1548 ulong_t anon_index;
1551 1549 struct anon_map *amp; /* XXX - for locknest */
1552 1550 struct anon *ap = NULL;
1553 1551 pgcnt_t npages;
1554 1552
1555 1553 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1556 1554
1557 1555 sptseg = shmd->shm_sptseg;
1558 1556 sptd = sptseg->s_data;
1559 1557
1560 1558 /*
1561 1559 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1562 1560 * and therefore their pages are SE_SHARED locked
1563 1561 * for the entire life of the segment.
1564 1562 */
1565 1563 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1566 1564 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1567 1565 goto softlock_decrement;
1568 1566 }
1569 1567
1570 1568 /*
1571 1569 * Any thread is free to do a page_find and
1572 1570 * page_unlock() on the pages within this seg.
1573 1571 *
1574 1572 * We are already holding the as->a_lock on the user's
1575 1573 * real segment, but we need to hold the a_lock on the
1576 1574 * underlying dummy as. This is mostly to satisfy the
1577 1575 * underlying HAT layer.
1578 1576 */
1579 1577 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1580 1578 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1581 1579 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1582 1580
1583 1581 amp = sptd->spt_amp;
1584 1582 ASSERT(amp != NULL);
1585 1583 anon_index = seg_page(sptseg, sptseg_addr);
1586 1584
1587 1585 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1588 1586 ap = anon_get_ptr(amp->ahp, anon_index++);
1589 1587 ASSERT(ap != NULL);
1590 1588 swap_xlate(ap, &vp, &offset);
1591 1589
1592 1590 /*
1593 1591 * Use page_find() instead of page_lookup() to
1594 1592 * find the page since we know that it has a
1595 1593 * "shared" lock.
1596 1594 */
1597 1595 pp = page_find(vp, offset);
1598 1596 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1599 1597 if (pp == NULL) {
1600 1598 panic("segspt_softunlock: "
1601 1599 "addr %p, ap %p, vp %p, off %llx",
1602 1600 (void *)adr, (void *)ap, (void *)vp, offset);
1603 1601 /*NOTREACHED*/
1604 1602 }
1605 1603
1606 1604 if (rw == S_WRITE) {
1607 1605 hat_setrefmod(pp);
1608 1606 } else if (rw != S_OTHER) {
1609 1607 hat_setref(pp);
1610 1608 }
1611 1609 page_unlock(pp);
1612 1610 }
1613 1611
1614 1612 softlock_decrement:
1615 1613 npages = btopr(len);
1616 1614 ASSERT(shmd->shm_softlockcnt >= npages);
1617 1615 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1618 1616 if (shmd->shm_softlockcnt == 0) {
1619 1617 /*
1620 1618 * All SOFTLOCKS are gone. Wakeup any waiting
1621 1619 * unmappers so they can try again to unmap.
1622 1620 * Check for waiters first without the mutex
1623 1621 * held so we don't always grab the mutex on
1624 1622 * softunlocks.
1625 1623 */
1626 1624 if (AS_ISUNMAPWAIT(seg->s_as)) {
1627 1625 mutex_enter(&seg->s_as->a_contents);
1628 1626 if (AS_ISUNMAPWAIT(seg->s_as)) {
1629 1627 AS_CLRUNMAPWAIT(seg->s_as);
1630 1628 cv_broadcast(&seg->s_as->a_cv);
1631 1629 }
1632 1630 mutex_exit(&seg->s_as->a_contents);
1633 1631 }
1634 1632 }
1635 1633 }
1636 1634
1637 1635 int
1638 1636 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1639 1637 {
1640 1638 struct shm_data *shmd_arg = (struct shm_data *)argsp;
1641 1639 struct shm_data *shmd;
1642 1640 struct anon_map *shm_amp = shmd_arg->shm_amp;
1643 1641 struct spt_data *sptd;
1644 1642 int error = 0;
1645 1643
1646 1644 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1647 1645
1648 1646 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1649 1647 if (shmd == NULL)
1650 1648 return (ENOMEM);
1651 1649
1652 1650 shmd->shm_sptas = shmd_arg->shm_sptas;
1653 1651 shmd->shm_amp = shm_amp;
1654 1652 shmd->shm_sptseg = shmd_arg->shm_sptseg;
1655 1653
1656 1654 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1657 1655 NULL, 0, seg->s_size);
1658 1656
1659 1657 mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1660 1658
1661 1659 seg->s_data = (void *)shmd;
1662 1660 seg->s_ops = &segspt_shmops;
1663 1661 seg->s_szc = shmd->shm_sptseg->s_szc;
1664 1662 sptd = shmd->shm_sptseg->s_data;
1665 1663
1666 1664 if (sptd->spt_flags & SHM_PAGEABLE) {
1667 1665 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1668 1666 KM_NOSLEEP)) == NULL) {
1669 1667 seg->s_data = (void *)NULL;
1670 1668 kmem_free(shmd, (sizeof (*shmd)));
1671 1669 return (ENOMEM);
1672 1670 }
1673 1671 shmd->shm_lckpgs = 0;
1674 1672 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1675 1673 if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1676 1674 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1677 1675 seg->s_size, seg->s_szc)) != 0) {
1678 1676 kmem_free(shmd->shm_vpage,
1679 1677 btopr(shm_amp->size));
1680 1678 }
1681 1679 }
1682 1680 } else {
1683 1681 error = hat_share(seg->s_as->a_hat, seg->s_base,
1684 1682 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1685 1683 seg->s_size, seg->s_szc);
1686 1684 }
1687 1685 if (error) {
1688 1686 seg->s_szc = 0;
1689 1687 seg->s_data = (void *)NULL;
1690 1688 kmem_free(shmd, (sizeof (*shmd)));
1691 1689 } else {
1692 1690 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1693 1691 shm_amp->refcnt++;
1694 1692 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1695 1693 }
1696 1694 return (error);
1697 1695 }
1698 1696
1699 1697 int
1700 1698 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1701 1699 {
1702 1700 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1703 1701 int reclaim = 1;
1704 1702
1705 1703 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1706 1704 retry:
1707 1705 if (shmd->shm_softlockcnt > 0) {
1708 1706 if (reclaim == 1) {
1709 1707 segspt_purge(seg);
1710 1708 reclaim = 0;
1711 1709 goto retry;
1712 1710 }
1713 1711 return (EAGAIN);
1714 1712 }
1715 1713
1716 1714 if (ssize != seg->s_size) {
1717 1715 #ifdef DEBUG
1718 1716 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1719 1717 ssize, seg->s_size);
1720 1718 #endif
1721 1719 return (EINVAL);
1722 1720 }
1723 1721
1724 1722 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1725 1723 NULL, 0);
1726 1724 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1727 1725
1728 1726 seg_free(seg);
1729 1727
1730 1728 return (0);
1731 1729 }
1732 1730
1733 1731 void
1734 1732 segspt_shmfree(struct seg *seg)
1735 1733 {
1736 1734 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1737 1735 struct anon_map *shm_amp = shmd->shm_amp;
1738 1736
1739 1737 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1740 1738
1741 1739 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1742 1740 MC_UNLOCK, NULL, 0);
1743 1741
1744 1742 /*
1745 1743 * Need to increment refcnt when attaching
1746 1744 * and decrement when detaching because of dup().
1747 1745 */
1748 1746 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1749 1747 shm_amp->refcnt--;
1750 1748 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1751 1749
1752 1750 if (shmd->shm_vpage) { /* only for DISM */
1753 1751 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1754 1752 shmd->shm_vpage = NULL;
1755 1753 }
1756 1754
1757 1755 /*
1758 1756 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1759 1757 * still working with this segment without holding as lock.
1760 1758 */
1761 1759 ASSERT(shmd->shm_softlockcnt == 0);
1762 1760 mutex_enter(&shmd->shm_segfree_syncmtx);
1763 1761 mutex_destroy(&shmd->shm_segfree_syncmtx);
1764 1762
1765 1763 kmem_free(shmd, sizeof (*shmd));
1766 1764 }
1767 1765
1768 1766 /*ARGSUSED*/
1769 1767 int
1770 1768 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1771 1769 {
1772 1770 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1773 1771
1774 1772 /*
1775 1773 * Shared page table is more than shared mapping.
1776 1774 * Individual process sharing page tables can't change prot
1777 1775 * because there is only one set of page tables.
1778 1776 * This will be allowed after private page table is
1779 1777 * supported.
1780 1778 */
1781 1779 /* need to return correct status error? */
1782 1780 return (0);
1783 1781 }
1784 1782
1785 1783
1786 1784 faultcode_t
1787 1785 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1788 1786 size_t len, enum fault_type type, enum seg_rw rw)
1789 1787 {
1790 1788 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1791 1789 struct seg *sptseg = shmd->shm_sptseg;
1792 1790 struct as *curspt = shmd->shm_sptas;
1793 1791 struct spt_data *sptd = sptseg->s_data;
1794 1792 pgcnt_t npages;
1795 1793 size_t size;
1796 1794 caddr_t segspt_addr, shm_addr;
1797 1795 page_t **ppa;
1798 1796 int i;
1799 1797 ulong_t an_idx = 0;
1800 1798 int err = 0;
1801 1799 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1802 1800 size_t pgsz;
1803 1801 pgcnt_t pgcnt;
1804 1802 caddr_t a;
1805 1803 pgcnt_t pidx;
1806 1804
1807 1805 #ifdef lint
1808 1806 hat = hat;
1809 1807 #endif
1810 1808 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1811 1809
1812 1810 /*
1813 1811 * Because of the way spt is implemented
1814 1812 * the realsize of the segment does not have to be
1815 1813 * equal to the segment size itself. The segment size is
1816 1814 * often in multiples of a page size larger than PAGESIZE.
1817 1815 * The realsize is rounded up to the nearest PAGESIZE
1818 1816 * based on what the user requested. This is a bit of
1819 1817 * ungliness that is historical but not easily fixed
1820 1818 * without re-designing the higher levels of ISM.
1821 1819 */
1822 1820 ASSERT(addr >= seg->s_base);
1823 1821 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1824 1822 return (FC_NOMAP);
1825 1823 /*
1826 1824 * For all of the following cases except F_PROT, we need to
1827 1825 * make any necessary adjustments to addr and len
1828 1826 * and get all of the necessary page_t's into an array called ppa[].
1829 1827 *
1830 1828 * The code in shmat() forces base addr and len of ISM segment
1831 1829 * to be aligned to largest page size supported. Therefore,
1832 1830 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1833 1831 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1834 1832 * in large pagesize chunks, or else we will screw up the HAT
1835 1833 * layer by calling hat_memload_array() with differing page sizes
1836 1834 * over a given virtual range.
1837 1835 */
1838 1836 pgsz = page_get_pagesize(sptseg->s_szc);
1839 1837 pgcnt = page_get_pagecnt(sptseg->s_szc);
1840 1838 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1841 1839 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1842 1840 npages = btopr(size);
1843 1841
1844 1842 /*
1845 1843 * Now we need to convert from addr in segshm to addr in segspt.
1846 1844 */
1847 1845 an_idx = seg_page(seg, shm_addr);
1848 1846 segspt_addr = sptseg->s_base + ptob(an_idx);
1849 1847
1850 1848 ASSERT((segspt_addr + ptob(npages)) <=
1851 1849 (sptseg->s_base + sptd->spt_realsize));
1852 1850 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1853 1851
1854 1852 switch (type) {
1855 1853
1856 1854 case F_SOFTLOCK:
1857 1855
1858 1856 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1859 1857 /*
1860 1858 * Fall through to the F_INVAL case to load up the hat layer
1861 1859 * entries with the HAT_LOAD_LOCK flag.
1862 1860 */
1863 1861 /* FALLTHRU */
1864 1862 case F_INVAL:
1865 1863
1866 1864 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1867 1865 return (FC_NOMAP);
1868 1866
1869 1867 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1870 1868
1871 1869 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1872 1870 if (err != 0) {
1873 1871 if (type == F_SOFTLOCK) {
1874 1872 atomic_add_long((ulong_t *)(
1875 1873 &(shmd->shm_softlockcnt)), -npages);
1876 1874 }
1877 1875 goto dism_err;
1878 1876 }
1879 1877 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1880 1878 a = segspt_addr;
1881 1879 pidx = 0;
1882 1880 if (type == F_SOFTLOCK) {
1883 1881
1884 1882 /*
1885 1883 * Load up the translation keeping it
1886 1884 * locked and don't unlock the page.
1887 1885 */
1888 1886 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1889 1887 hat_memload_array(sptseg->s_as->a_hat,
1890 1888 a, pgsz, &ppa[pidx], sptd->spt_prot,
1891 1889 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1892 1890 }
1893 1891 } else {
1894 1892 /*
1895 1893 * Migrate pages marked for migration
1896 1894 */
1897 1895 if (lgrp_optimizations())
1898 1896 page_migrate(seg, shm_addr, ppa, npages);
1899 1897
1900 1898 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1901 1899 hat_memload_array(sptseg->s_as->a_hat,
1902 1900 a, pgsz, &ppa[pidx],
1903 1901 sptd->spt_prot,
1904 1902 HAT_LOAD_SHARE);
1905 1903 }
1906 1904
1907 1905 /*
1908 1906 * And now drop the SE_SHARED lock(s).
1909 1907 */
1910 1908 if (dyn_ism_unmap) {
1911 1909 for (i = 0; i < npages; i++) {
1912 1910 page_unlock(ppa[i]);
1913 1911 }
1914 1912 }
1915 1913 }
1916 1914
1917 1915 if (!dyn_ism_unmap) {
1918 1916 if (hat_share(seg->s_as->a_hat, shm_addr,
1919 1917 curspt->a_hat, segspt_addr, ptob(npages),
1920 1918 seg->s_szc) != 0) {
1921 1919 panic("hat_share err in DISM fault");
1922 1920 /* NOTREACHED */
1923 1921 }
1924 1922 if (type == F_INVAL) {
1925 1923 for (i = 0; i < npages; i++) {
1926 1924 page_unlock(ppa[i]);
1927 1925 }
1928 1926 }
1929 1927 }
1930 1928 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1931 1929 dism_err:
1932 1930 kmem_free(ppa, npages * sizeof (page_t *));
1933 1931 return (err);
1934 1932
1935 1933 case F_SOFTUNLOCK:
1936 1934
1937 1935 /*
1938 1936 * This is a bit ugly, we pass in the real seg pointer,
1939 1937 * but the segspt_addr is the virtual address within the
1940 1938 * dummy seg.
1941 1939 */
1942 1940 segspt_softunlock(seg, segspt_addr, size, rw);
1943 1941 return (0);
1944 1942
1945 1943 case F_PROT:
1946 1944
1947 1945 /*
1948 1946 * This takes care of the unusual case where a user
1949 1947 * allocates a stack in shared memory and a register
1950 1948 * window overflow is written to that stack page before
1951 1949 * it is otherwise modified.
1952 1950 *
1953 1951 * We can get away with this because ISM segments are
1954 1952 * always rw. Other than this unusual case, there
1955 1953 * should be no instances of protection violations.
1956 1954 */
1957 1955 return (0);
1958 1956
1959 1957 default:
1960 1958 #ifdef DEBUG
1961 1959 panic("segspt_dismfault default type?");
1962 1960 #else
1963 1961 return (FC_NOMAP);
1964 1962 #endif
1965 1963 }
1966 1964 }
1967 1965
1968 1966
1969 1967 faultcode_t
1970 1968 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
1971 1969 size_t len, enum fault_type type, enum seg_rw rw)
1972 1970 {
1973 1971 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1974 1972 struct seg *sptseg = shmd->shm_sptseg;
1975 1973 struct as *curspt = shmd->shm_sptas;
1976 1974 struct spt_data *sptd = sptseg->s_data;
1977 1975 pgcnt_t npages;
1978 1976 size_t size;
1979 1977 caddr_t sptseg_addr, shm_addr;
1980 1978 page_t *pp, **ppa;
1981 1979 int i;
1982 1980 u_offset_t offset;
1983 1981 ulong_t anon_index = 0;
1984 1982 struct vnode *vp;
1985 1983 struct anon_map *amp; /* XXX - for locknest */
1986 1984 struct anon *ap = NULL;
1987 1985 size_t pgsz;
1988 1986 pgcnt_t pgcnt;
1989 1987 caddr_t a;
1990 1988 pgcnt_t pidx;
1991 1989 size_t sz;
1992 1990
1993 1991 #ifdef lint
1994 1992 hat = hat;
1995 1993 #endif
1996 1994
1997 1995 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1998 1996
1999 1997 if (sptd->spt_flags & SHM_PAGEABLE) {
2000 1998 return (segspt_dismfault(hat, seg, addr, len, type, rw));
2001 1999 }
2002 2000
2003 2001 /*
2004 2002 * Because of the way spt is implemented
2005 2003 * the realsize of the segment does not have to be
2006 2004 * equal to the segment size itself. The segment size is
2007 2005 * often in multiples of a page size larger than PAGESIZE.
2008 2006 * The realsize is rounded up to the nearest PAGESIZE
2009 2007 * based on what the user requested. This is a bit of
2010 2008 * ungliness that is historical but not easily fixed
2011 2009 * without re-designing the higher levels of ISM.
2012 2010 */
2013 2011 ASSERT(addr >= seg->s_base);
2014 2012 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2015 2013 return (FC_NOMAP);
2016 2014 /*
2017 2015 * For all of the following cases except F_PROT, we need to
2018 2016 * make any necessary adjustments to addr and len
2019 2017 * and get all of the necessary page_t's into an array called ppa[].
2020 2018 *
2021 2019 * The code in shmat() forces base addr and len of ISM segment
2022 2020 * to be aligned to largest page size supported. Therefore,
2023 2021 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2024 2022 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2025 2023 * in large pagesize chunks, or else we will screw up the HAT
2026 2024 * layer by calling hat_memload_array() with differing page sizes
2027 2025 * over a given virtual range.
2028 2026 */
2029 2027 pgsz = page_get_pagesize(sptseg->s_szc);
2030 2028 pgcnt = page_get_pagecnt(sptseg->s_szc);
2031 2029 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2032 2030 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2033 2031 npages = btopr(size);
2034 2032
2035 2033 /*
2036 2034 * Now we need to convert from addr in segshm to addr in segspt.
2037 2035 */
2038 2036 anon_index = seg_page(seg, shm_addr);
2039 2037 sptseg_addr = sptseg->s_base + ptob(anon_index);
2040 2038
2041 2039 /*
2042 2040 * And now we may have to adjust npages downward if we have
2043 2041 * exceeded the realsize of the segment or initial anon
2044 2042 * allocations.
2045 2043 */
2046 2044 if ((sptseg_addr + ptob(npages)) >
2047 2045 (sptseg->s_base + sptd->spt_realsize))
2048 2046 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2049 2047
2050 2048 npages = btopr(size);
2051 2049
2052 2050 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2053 2051 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2054 2052
2055 2053 switch (type) {
2056 2054
2057 2055 case F_SOFTLOCK:
2058 2056
2059 2057 /*
2060 2058 * availrmem is decremented once during anon_swap_adjust()
2061 2059 * and is incremented during the anon_unresv(), which is
2062 2060 * called from shm_rm_amp() when the segment is destroyed.
2063 2061 */
2064 2062 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2065 2063 /*
2066 2064 * Some platforms assume that ISM pages are SE_SHARED
2067 2065 * locked for the entire life of the segment.
2068 2066 */
2069 2067 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2070 2068 return (0);
2071 2069 /*
2072 2070 * Fall through to the F_INVAL case to load up the hat layer
2073 2071 * entries with the HAT_LOAD_LOCK flag.
2074 2072 */
2075 2073
2076 2074 /* FALLTHRU */
2077 2075 case F_INVAL:
2078 2076
2079 2077 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2080 2078 return (FC_NOMAP);
2081 2079
2082 2080 /*
2083 2081 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2084 2082 * may still rely on this call to hat_share(). That
2085 2083 * would imply that those hat's can fault on a
2086 2084 * HAT_LOAD_LOCK translation, which would seem
2087 2085 * contradictory.
2088 2086 */
2089 2087 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2090 2088 if (hat_share(seg->s_as->a_hat, seg->s_base,
2091 2089 curspt->a_hat, sptseg->s_base,
2092 2090 sptseg->s_size, sptseg->s_szc) != 0) {
2093 2091 panic("hat_share error in ISM fault");
2094 2092 /*NOTREACHED*/
2095 2093 }
2096 2094 return (0);
2097 2095 }
2098 2096 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2099 2097
2100 2098 /*
2101 2099 * I see no need to lock the real seg,
2102 2100 * here, because all of our work will be on the underlying
2103 2101 * dummy seg.
2104 2102 *
2105 2103 * sptseg_addr and npages now account for large pages.
2106 2104 */
2107 2105 amp = sptd->spt_amp;
2108 2106 ASSERT(amp != NULL);
2109 2107 anon_index = seg_page(sptseg, sptseg_addr);
2110 2108
2111 2109 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2112 2110 for (i = 0; i < npages; i++) {
2113 2111 ap = anon_get_ptr(amp->ahp, anon_index++);
2114 2112 ASSERT(ap != NULL);
2115 2113 swap_xlate(ap, &vp, &offset);
2116 2114 pp = page_lookup(vp, offset, SE_SHARED);
2117 2115 ASSERT(pp != NULL);
2118 2116 ppa[i] = pp;
2119 2117 }
2120 2118 ANON_LOCK_EXIT(&->a_rwlock);
2121 2119 ASSERT(i == npages);
2122 2120
2123 2121 /*
2124 2122 * We are already holding the as->a_lock on the user's
2125 2123 * real segment, but we need to hold the a_lock on the
2126 2124 * underlying dummy as. This is mostly to satisfy the
2127 2125 * underlying HAT layer.
2128 2126 */
2129 2127 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
2130 2128 a = sptseg_addr;
2131 2129 pidx = 0;
2132 2130 if (type == F_SOFTLOCK) {
2133 2131 /*
2134 2132 * Load up the translation keeping it
2135 2133 * locked and don't unlock the page.
2136 2134 */
2137 2135 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2138 2136 sz = MIN(pgsz, ptob(npages - pidx));
2139 2137 hat_memload_array(sptseg->s_as->a_hat, a,
2140 2138 sz, &ppa[pidx], sptd->spt_prot,
2141 2139 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2142 2140 }
2143 2141 } else {
2144 2142 /*
2145 2143 * Migrate pages marked for migration.
2146 2144 */
2147 2145 if (lgrp_optimizations())
2148 2146 page_migrate(seg, shm_addr, ppa, npages);
2149 2147
2150 2148 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2151 2149 sz = MIN(pgsz, ptob(npages - pidx));
2152 2150 hat_memload_array(sptseg->s_as->a_hat,
2153 2151 a, sz, &ppa[pidx],
2154 2152 sptd->spt_prot, HAT_LOAD_SHARE);
2155 2153 }
2156 2154
2157 2155 /*
2158 2156 * And now drop the SE_SHARED lock(s).
2159 2157 */
2160 2158 for (i = 0; i < npages; i++)
2161 2159 page_unlock(ppa[i]);
2162 2160 }
2163 2161 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
2164 2162
2165 2163 kmem_free(ppa, sizeof (page_t *) * npages);
2166 2164 return (0);
2167 2165 case F_SOFTUNLOCK:
2168 2166
2169 2167 /*
2170 2168 * This is a bit ugly, we pass in the real seg pointer,
2171 2169 * but the sptseg_addr is the virtual address within the
2172 2170 * dummy seg.
2173 2171 */
2174 2172 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2175 2173 return (0);
2176 2174
2177 2175 case F_PROT:
2178 2176
2179 2177 /*
2180 2178 * This takes care of the unusual case where a user
2181 2179 * allocates a stack in shared memory and a register
2182 2180 * window overflow is written to that stack page before
2183 2181 * it is otherwise modified.
2184 2182 *
2185 2183 * We can get away with this because ISM segments are
2186 2184 * always rw. Other than this unusual case, there
2187 2185 * should be no instances of protection violations.
2188 2186 */
2189 2187 return (0);
2190 2188
2191 2189 default:
2192 2190 #ifdef DEBUG
2193 2191 cmn_err(CE_WARN, "segspt_shmfault default type?");
2194 2192 #endif
2195 2193 return (FC_NOMAP);
2196 2194 }
2197 2195 }
2198 2196
2199 2197 /*ARGSUSED*/
2200 2198 static faultcode_t
2201 2199 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2202 2200 {
2203 2201 return (0);
2204 2202 }
2205 2203
2206 2204 /*ARGSUSED*/
2207 2205 static int
2208 2206 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2209 2207 {
2210 2208 return (0);
2211 2209 }
2212 2210
2213 2211 /*
2214 2212 * duplicate the shared page tables
2215 2213 */
2216 2214 int
2217 2215 segspt_shmdup(struct seg *seg, struct seg *newseg)
2218 2216 {
2219 2217 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2220 2218 struct anon_map *amp = shmd->shm_amp;
2221 2219 struct shm_data *shmd_new;
2222 2220 struct seg *spt_seg = shmd->shm_sptseg;
2223 2221 struct spt_data *sptd = spt_seg->s_data;
2224 2222 int error = 0;
2225 2223
2226 2224 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2227 2225
2228 2226 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2229 2227 newseg->s_data = (void *)shmd_new;
2230 2228 shmd_new->shm_sptas = shmd->shm_sptas;
2231 2229 shmd_new->shm_amp = amp;
2232 2230 shmd_new->shm_sptseg = shmd->shm_sptseg;
2233 2231 newseg->s_ops = &segspt_shmops;
2234 2232 newseg->s_szc = seg->s_szc;
2235 2233 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2236 2234
2237 2235 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2238 2236 amp->refcnt++;
2239 2237 ANON_LOCK_EXIT(&->a_rwlock);
2240 2238
2241 2239 if (sptd->spt_flags & SHM_PAGEABLE) {
2242 2240 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2243 2241 shmd_new->shm_lckpgs = 0;
2244 2242 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2245 2243 if ((error = hat_share(newseg->s_as->a_hat,
2246 2244 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2247 2245 seg->s_size, seg->s_szc)) != 0) {
2248 2246 kmem_free(shmd_new->shm_vpage,
2249 2247 btopr(amp->size));
2250 2248 }
2251 2249 }
2252 2250 return (error);
2253 2251 } else {
2254 2252 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2255 2253 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2256 2254 seg->s_szc));
2257 2255
2258 2256 }
2259 2257 }
2260 2258
2261 2259 /*ARGSUSED*/
2262 2260 int
2263 2261 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2264 2262 {
2265 2263 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2266 2264 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2267 2265
2268 2266 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2269 2267
2270 2268 /*
2271 2269 * ISM segment is always rw.
2272 2270 */
2273 2271 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2274 2272 }
2275 2273
2276 2274 /*
2277 2275 * Return an array of locked large pages, for empty slots allocate
2278 2276 * private zero-filled anon pages.
2279 2277 */
2280 2278 static int
2281 2279 spt_anon_getpages(
2282 2280 struct seg *sptseg,
2283 2281 caddr_t sptaddr,
2284 2282 size_t len,
2285 2283 page_t *ppa[])
2286 2284 {
2287 2285 struct spt_data *sptd = sptseg->s_data;
2288 2286 struct anon_map *amp = sptd->spt_amp;
2289 2287 enum seg_rw rw = sptd->spt_prot;
2290 2288 uint_t szc = sptseg->s_szc;
2291 2289 size_t pg_sz, share_sz = page_get_pagesize(szc);
2292 2290 pgcnt_t lp_npgs;
2293 2291 caddr_t lp_addr, e_sptaddr;
2294 2292 uint_t vpprot, ppa_szc = 0;
2295 2293 struct vpage *vpage = NULL;
2296 2294 ulong_t j, ppa_idx;
2297 2295 int err, ierr = 0;
2298 2296 pgcnt_t an_idx;
2299 2297 anon_sync_obj_t cookie;
2300 2298 int anon_locked = 0;
2301 2299 pgcnt_t amp_pgs;
2302 2300
2303 2301
2304 2302 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2305 2303 ASSERT(len != 0);
2306 2304
2307 2305 pg_sz = share_sz;
2308 2306 lp_npgs = btop(pg_sz);
2309 2307 lp_addr = sptaddr;
2310 2308 e_sptaddr = sptaddr + len;
2311 2309 an_idx = seg_page(sptseg, sptaddr);
2312 2310 ppa_idx = 0;
2313 2311
2314 2312 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2315 2313
2316 2314 amp_pgs = page_get_pagecnt(amp->a_szc);
2317 2315
2318 2316 /*CONSTCOND*/
2319 2317 while (1) {
2320 2318 for (; lp_addr < e_sptaddr;
2321 2319 an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2322 2320
2323 2321 /*
2324 2322 * If we're currently locked, and we get to a new
2325 2323 * page, unlock our current anon chunk.
2326 2324 */
2327 2325 if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2328 2326 anon_array_exit(&cookie);
2329 2327 anon_locked = 0;
2330 2328 }
2331 2329 if (!anon_locked) {
2332 2330 anon_array_enter(amp, an_idx, &cookie);
2333 2331 anon_locked = 1;
2334 2332 }
2335 2333 ppa_szc = (uint_t)-1;
2336 2334 ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2337 2335 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2338 2336 &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2339 2337
2340 2338 if (ierr != 0) {
2341 2339 if (ierr > 0) {
2342 2340 err = FC_MAKE_ERR(ierr);
2343 2341 goto lpgs_err;
2344 2342 }
2345 2343 break;
2346 2344 }
2347 2345 }
2348 2346 if (lp_addr == e_sptaddr) {
2349 2347 break;
2350 2348 }
2351 2349 ASSERT(lp_addr < e_sptaddr);
2352 2350
2353 2351 /*
2354 2352 * ierr == -1 means we failed to allocate a large page.
2355 2353 * so do a size down operation.
2356 2354 *
2357 2355 * ierr == -2 means some other process that privately shares
2358 2356 * pages with this process has allocated a larger page and we
2359 2357 * need to retry with larger pages. So do a size up
2360 2358 * operation. This relies on the fact that large pages are
2361 2359 * never partially shared i.e. if we share any constituent
2362 2360 * page of a large page with another process we must share the
2363 2361 * entire large page. Note this cannot happen for SOFTLOCK
2364 2362 * case, unless current address (lpaddr) is at the beginning
2365 2363 * of the next page size boundary because the other process
2366 2364 * couldn't have relocated locked pages.
2367 2365 */
2368 2366 ASSERT(ierr == -1 || ierr == -2);
2369 2367 if (segvn_anypgsz) {
2370 2368 ASSERT(ierr == -2 || szc != 0);
2371 2369 ASSERT(ierr == -1 || szc < sptseg->s_szc);
2372 2370 szc = (ierr == -1) ? szc - 1 : szc + 1;
2373 2371 } else {
2374 2372 /*
2375 2373 * For faults and segvn_anypgsz == 0
2376 2374 * we need to be careful not to loop forever
2377 2375 * if existing page is found with szc other
2378 2376 * than 0 or seg->s_szc. This could be due
2379 2377 * to page relocations on behalf of DR or
2380 2378 * more likely large page creation. For this
2381 2379 * case simply re-size to existing page's szc
2382 2380 * if returned by anon_map_getpages().
2383 2381 */
2384 2382 if (ppa_szc == (uint_t)-1) {
2385 2383 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2386 2384 } else {
2387 2385 ASSERT(ppa_szc <= sptseg->s_szc);
2388 2386 ASSERT(ierr == -2 || ppa_szc < szc);
2389 2387 ASSERT(ierr == -1 || ppa_szc > szc);
2390 2388 szc = ppa_szc;
2391 2389 }
2392 2390 }
2393 2391 pg_sz = page_get_pagesize(szc);
2394 2392 lp_npgs = btop(pg_sz);
2395 2393 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2396 2394 }
2397 2395 if (anon_locked) {
2398 2396 anon_array_exit(&cookie);
2399 2397 }
2400 2398 ANON_LOCK_EXIT(&->a_rwlock);
2401 2399 return (0);
2402 2400
2403 2401 lpgs_err:
2404 2402 if (anon_locked) {
2405 2403 anon_array_exit(&cookie);
2406 2404 }
2407 2405 ANON_LOCK_EXIT(&->a_rwlock);
2408 2406 for (j = 0; j < ppa_idx; j++)
2409 2407 page_unlock(ppa[j]);
2410 2408 return (err);
2411 2409 }
2412 2410
2413 2411 /*
2414 2412 * count the number of bytes in a set of spt pages that are currently not
2415 2413 * locked
2416 2414 */
2417 2415 static rctl_qty_t
2418 2416 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2419 2417 {
2420 2418 ulong_t i;
2421 2419 rctl_qty_t unlocked = 0;
2422 2420
2423 2421 for (i = 0; i < npages; i++) {
2424 2422 if (ppa[i]->p_lckcnt == 0)
2425 2423 unlocked += PAGESIZE;
2426 2424 }
2427 2425 return (unlocked);
2428 2426 }
2429 2427
2430 2428 extern u_longlong_t randtick(void);
2431 2429 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2432 2430 #define NLCK (NCPU_P2)
2433 2431 /* Random number with a range [0, n-1], n must be power of two */
2434 2432 #define RAND_P2(n) \
2435 2433 ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2436 2434
2437 2435 int
2438 2436 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2439 2437 page_t **ppa, ulong_t *lockmap, size_t pos,
2440 2438 rctl_qty_t *locked)
2441 2439 {
2442 2440 struct shm_data *shmd = seg->s_data;
2443 2441 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2444 2442 ulong_t i;
2445 2443 int kernel;
2446 2444 pgcnt_t nlck = 0;
2447 2445 int rv = 0;
2448 2446 int use_reserved = 1;
2449 2447
2450 2448 /* return the number of bytes actually locked */
2451 2449 *locked = 0;
2452 2450
2453 2451 /*
2454 2452 * To avoid contention on freemem_lock, availrmem and pages_locked
2455 2453 * global counters are updated only every nlck locked pages instead of
2456 2454 * every time. Reserve nlck locks up front and deduct from this
2457 2455 * reservation for each page that requires a lock. When the reservation
2458 2456 * is consumed, reserve again. nlck is randomized, so the competing
2459 2457 * threads do not fall into a cyclic lock contention pattern. When
2460 2458 * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2461 2459 * is used to lock pages.
2462 2460 */
2463 2461 for (i = 0; i < npages; anon_index++, pos++, i++) {
2464 2462 if (nlck == 0 && use_reserved == 1) {
2465 2463 nlck = NLCK + RAND_P2(NLCK);
2466 2464 /* if fewer loops left, decrease nlck */
2467 2465 nlck = MIN(nlck, npages - i);
2468 2466 /*
2469 2467 * Reserve nlck locks up front and deduct from this
2470 2468 * reservation for each page that requires a lock. When
2471 2469 * the reservation is consumed, reserve again.
2472 2470 */
2473 2471 mutex_enter(&freemem_lock);
2474 2472 if ((availrmem - nlck) < pages_pp_maximum) {
2475 2473 /* Do not do advance memory reserves */
2476 2474 use_reserved = 0;
2477 2475 } else {
2478 2476 availrmem -= nlck;
2479 2477 pages_locked += nlck;
2480 2478 }
2481 2479 mutex_exit(&freemem_lock);
2482 2480 }
2483 2481 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2484 2482 if (sptd->spt_ppa_lckcnt[anon_index] <
2485 2483 (ushort_t)DISM_LOCK_MAX) {
2486 2484 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2487 2485 (ushort_t)DISM_LOCK_MAX) {
2488 2486 cmn_err(CE_WARN,
2489 2487 "DISM page lock limit "
2490 2488 "reached on DISM offset 0x%lx\n",
2491 2489 anon_index << PAGESHIFT);
2492 2490 }
2493 2491 kernel = (sptd->spt_ppa &&
2494 2492 sptd->spt_ppa[anon_index]);
2495 2493 if (!page_pp_lock(ppa[i], 0, kernel ||
2496 2494 use_reserved)) {
2497 2495 sptd->spt_ppa_lckcnt[anon_index]--;
2498 2496 rv = EAGAIN;
2499 2497 break;
2500 2498 }
2501 2499 /* if this is a newly locked page, count it */
2502 2500 if (ppa[i]->p_lckcnt == 1) {
2503 2501 if (kernel == 0 && use_reserved == 1)
2504 2502 nlck--;
2505 2503 *locked += PAGESIZE;
2506 2504 }
2507 2505 shmd->shm_lckpgs++;
2508 2506 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2509 2507 if (lockmap != NULL)
2510 2508 BT_SET(lockmap, pos);
2511 2509 }
2512 2510 }
2513 2511 }
2514 2512 /* Return unused lock reservation */
2515 2513 if (nlck != 0 && use_reserved == 1) {
2516 2514 mutex_enter(&freemem_lock);
2517 2515 availrmem += nlck;
2518 2516 pages_locked -= nlck;
2519 2517 mutex_exit(&freemem_lock);
2520 2518 }
2521 2519
2522 2520 return (rv);
2523 2521 }
2524 2522
2525 2523 int
2526 2524 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2527 2525 rctl_qty_t *unlocked)
2528 2526 {
2529 2527 struct shm_data *shmd = seg->s_data;
2530 2528 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2531 2529 struct anon_map *amp = sptd->spt_amp;
2532 2530 struct anon *ap;
2533 2531 struct vnode *vp;
2534 2532 u_offset_t off;
2535 2533 struct page *pp;
2536 2534 int kernel;
2537 2535 anon_sync_obj_t cookie;
2538 2536 ulong_t i;
2539 2537 pgcnt_t nlck = 0;
2540 2538 pgcnt_t nlck_limit = NLCK;
2541 2539
2542 2540 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2543 2541 for (i = 0; i < npages; i++, anon_index++) {
2544 2542 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2545 2543 anon_array_enter(amp, anon_index, &cookie);
2546 2544 ap = anon_get_ptr(amp->ahp, anon_index);
2547 2545 ASSERT(ap);
2548 2546
2549 2547 swap_xlate(ap, &vp, &off);
2550 2548 anon_array_exit(&cookie);
2551 2549 pp = page_lookup(vp, off, SE_SHARED);
2552 2550 ASSERT(pp);
2553 2551 /*
2554 2552 * availrmem is decremented only for pages which are not
2555 2553 * in seg pcache, for pages in seg pcache availrmem was
2556 2554 * decremented in _dismpagelock()
2557 2555 */
2558 2556 kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2559 2557 ASSERT(pp->p_lckcnt > 0);
2560 2558
2561 2559 /*
2562 2560 * lock page but do not change availrmem, we do it
2563 2561 * ourselves every nlck loops.
2564 2562 */
2565 2563 page_pp_unlock(pp, 0, 1);
2566 2564 if (pp->p_lckcnt == 0) {
2567 2565 if (kernel == 0)
2568 2566 nlck++;
2569 2567 *unlocked += PAGESIZE;
2570 2568 }
2571 2569 page_unlock(pp);
2572 2570 shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2573 2571 sptd->spt_ppa_lckcnt[anon_index]--;
2574 2572 shmd->shm_lckpgs--;
2575 2573 }
2576 2574
2577 2575 /*
2578 2576 * To reduce freemem_lock contention, do not update availrmem
2579 2577 * until at least NLCK pages have been unlocked.
2580 2578 * 1. No need to update if nlck is zero
2581 2579 * 2. Always update if the last iteration
2582 2580 */
2583 2581 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2584 2582 mutex_enter(&freemem_lock);
2585 2583 availrmem += nlck;
2586 2584 pages_locked -= nlck;
2587 2585 mutex_exit(&freemem_lock);
2588 2586 nlck = 0;
2589 2587 nlck_limit = NLCK + RAND_P2(NLCK);
2590 2588 }
2591 2589 }
2592 2590 ANON_LOCK_EXIT(&->a_rwlock);
2593 2591
2594 2592 return (0);
2595 2593 }
2596 2594
2597 2595 /*ARGSUSED*/
2598 2596 static int
2599 2597 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2600 2598 int attr, int op, ulong_t *lockmap, size_t pos)
2601 2599 {
2602 2600 struct shm_data *shmd = seg->s_data;
2603 2601 struct seg *sptseg = shmd->shm_sptseg;
2604 2602 struct spt_data *sptd = sptseg->s_data;
2605 2603 struct kshmid *sp = sptd->spt_amp->a_sp;
2606 2604 pgcnt_t npages, a_npages;
2607 2605 page_t **ppa;
2608 2606 pgcnt_t an_idx, a_an_idx, ppa_idx;
2609 2607 caddr_t spt_addr, a_addr; /* spt and aligned address */
2610 2608 size_t a_len; /* aligned len */
2611 2609 size_t share_sz;
2612 2610 ulong_t i;
2613 2611 int sts = 0;
2614 2612 rctl_qty_t unlocked = 0;
2615 2613 rctl_qty_t locked = 0;
2616 2614 struct proc *p = curproc;
2617 2615 kproject_t *proj;
2618 2616
2619 2617 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2620 2618 ASSERT(sp != NULL);
2621 2619
2622 2620 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2623 2621 return (0);
2624 2622 }
2625 2623
2626 2624 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2627 2625 an_idx = seg_page(seg, addr);
2628 2626 npages = btopr(len);
2629 2627
2630 2628 if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2631 2629 return (ENOMEM);
2632 2630 }
2633 2631
2634 2632 /*
2635 2633 * A shm's project never changes, so no lock needed.
2636 2634 * The shm has a hold on the project, so it will not go away.
2637 2635 * Since we have a mapping to shm within this zone, we know
2638 2636 * that the zone will not go away.
2639 2637 */
2640 2638 proj = sp->shm_perm.ipc_proj;
2641 2639
2642 2640 if (op == MC_LOCK) {
2643 2641
2644 2642 /*
2645 2643 * Need to align addr and size request if they are not
2646 2644 * aligned so we can always allocate large page(s) however
2647 2645 * we only lock what was requested in initial request.
2648 2646 */
2649 2647 share_sz = page_get_pagesize(sptseg->s_szc);
2650 2648 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2651 2649 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2652 2650 share_sz);
2653 2651 a_npages = btop(a_len);
2654 2652 a_an_idx = seg_page(seg, a_addr);
2655 2653 spt_addr = sptseg->s_base + ptob(a_an_idx);
2656 2654 ppa_idx = an_idx - a_an_idx;
2657 2655
2658 2656 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2659 2657 KM_NOSLEEP)) == NULL) {
2660 2658 return (ENOMEM);
2661 2659 }
2662 2660
2663 2661 /*
2664 2662 * Don't cache any new pages for IO and
2665 2663 * flush any cached pages.
2666 2664 */
2667 2665 mutex_enter(&sptd->spt_lock);
2668 2666 if (sptd->spt_ppa != NULL)
2669 2667 sptd->spt_flags |= DISM_PPA_CHANGED;
2670 2668
2671 2669 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2672 2670 if (sts != 0) {
2673 2671 mutex_exit(&sptd->spt_lock);
2674 2672 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2675 2673 return (sts);
2676 2674 }
2677 2675
2678 2676 mutex_enter(&sp->shm_mlock);
2679 2677 /* enforce locked memory rctl */
2680 2678 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2681 2679
2682 2680 mutex_enter(&p->p_lock);
2683 2681 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2684 2682 mutex_exit(&p->p_lock);
2685 2683 sts = EAGAIN;
2686 2684 } else {
2687 2685 mutex_exit(&p->p_lock);
2688 2686 sts = spt_lockpages(seg, an_idx, npages,
2689 2687 &ppa[ppa_idx], lockmap, pos, &locked);
2690 2688
2691 2689 /*
2692 2690 * correct locked count if not all pages could be
2693 2691 * locked
2694 2692 */
2695 2693 if ((unlocked - locked) > 0) {
2696 2694 rctl_decr_locked_mem(NULL, proj,
2697 2695 (unlocked - locked), 0);
2698 2696 }
2699 2697 }
2700 2698 /*
2701 2699 * unlock pages
2702 2700 */
2703 2701 for (i = 0; i < a_npages; i++)
2704 2702 page_unlock(ppa[i]);
2705 2703 if (sptd->spt_ppa != NULL)
2706 2704 sptd->spt_flags |= DISM_PPA_CHANGED;
2707 2705 mutex_exit(&sp->shm_mlock);
2708 2706 mutex_exit(&sptd->spt_lock);
2709 2707
2710 2708 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2711 2709
2712 2710 } else if (op == MC_UNLOCK) { /* unlock */
2713 2711 page_t **ppa;
2714 2712
2715 2713 mutex_enter(&sptd->spt_lock);
2716 2714 if (shmd->shm_lckpgs == 0) {
2717 2715 mutex_exit(&sptd->spt_lock);
2718 2716 return (0);
2719 2717 }
2720 2718 /*
2721 2719 * Don't cache new IO pages.
2722 2720 */
2723 2721 if (sptd->spt_ppa != NULL)
2724 2722 sptd->spt_flags |= DISM_PPA_CHANGED;
2725 2723
2726 2724 mutex_enter(&sp->shm_mlock);
2727 2725 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2728 2726 if ((ppa = sptd->spt_ppa) != NULL)
2729 2727 sptd->spt_flags |= DISM_PPA_CHANGED;
2730 2728 mutex_exit(&sptd->spt_lock);
2731 2729
2732 2730 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2733 2731 mutex_exit(&sp->shm_mlock);
2734 2732
2735 2733 if (ppa != NULL)
2736 2734 seg_ppurge_wiredpp(ppa);
2737 2735 }
2738 2736 return (sts);
2739 2737 }
2740 2738
2741 2739 /*ARGSUSED*/
2742 2740 int
2743 2741 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2744 2742 {
2745 2743 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2746 2744 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2747 2745 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2748 2746
2749 2747 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2750 2748
2751 2749 /*
2752 2750 * ISM segment is always rw.
2753 2751 */
2754 2752 while (--pgno >= 0)
2755 2753 *protv++ = sptd->spt_prot;
2756 2754 return (0);
2757 2755 }
2758 2756
2759 2757 /*ARGSUSED*/
2760 2758 u_offset_t
2761 2759 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2762 2760 {
2763 2761 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2764 2762
2765 2763 /* Offset does not matter in ISM memory */
2766 2764
2767 2765 return ((u_offset_t)0);
2768 2766 }
2769 2767
2770 2768 /* ARGSUSED */
2771 2769 int
2772 2770 segspt_shmgettype(struct seg *seg, caddr_t addr)
2773 2771 {
2774 2772 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2775 2773 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2776 2774
2777 2775 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2778 2776
2779 2777 /*
2780 2778 * The shared memory mapping is always MAP_SHARED, SWAP is only
2781 2779 * reserved for DISM
2782 2780 */
2783 2781 return (MAP_SHARED |
2784 2782 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2785 2783 }
2786 2784
2787 2785 /*ARGSUSED*/
2788 2786 int
2789 2787 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2790 2788 {
2791 2789 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2792 2790 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2793 2791
2794 2792 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2795 2793
2796 2794 *vpp = sptd->spt_vp;
2797 2795 return (0);
2798 2796 }
2799 2797
2800 2798 /*
2801 2799 * We need to wait for pending IO to complete to a DISM segment in order for
2802 2800 * pages to get kicked out of the seg_pcache. 120 seconds should be more
2803 2801 * than enough time to wait.
2804 2802 */
2805 2803 static clock_t spt_pcache_wait = 120;
2806 2804
2807 2805 /*ARGSUSED*/
2808 2806 static int
2809 2807 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2810 2808 {
2811 2809 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2812 2810 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2813 2811 struct anon_map *amp;
2814 2812 pgcnt_t pg_idx;
2815 2813 ushort_t gen;
2816 2814 clock_t end_lbolt;
2817 2815 int writer;
2818 2816 page_t **ppa;
2819 2817
2820 2818 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2821 2819
2822 2820 if (behav == MADV_FREE) {
2823 2821 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2824 2822 return (0);
2825 2823
2826 2824 amp = sptd->spt_amp;
2827 2825 pg_idx = seg_page(seg, addr);
2828 2826
2829 2827 mutex_enter(&sptd->spt_lock);
2830 2828 if ((ppa = sptd->spt_ppa) == NULL) {
2831 2829 mutex_exit(&sptd->spt_lock);
2832 2830 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2833 2831 anon_disclaim(amp, pg_idx, len);
2834 2832 ANON_LOCK_EXIT(&->a_rwlock);
2835 2833 return (0);
2836 2834 }
2837 2835
2838 2836 sptd->spt_flags |= DISM_PPA_CHANGED;
2839 2837 gen = sptd->spt_gen;
2840 2838
2841 2839 mutex_exit(&sptd->spt_lock);
2842 2840
2843 2841 /*
2844 2842 * Purge all DISM cached pages
2845 2843 */
2846 2844 seg_ppurge_wiredpp(ppa);
2847 2845
2848 2846 /*
2849 2847 * Drop the AS_LOCK so that other threads can grab it
2850 2848 * in the as_pageunlock path and hopefully get the segment
2851 2849 * kicked out of the seg_pcache. We bump the shm_softlockcnt
2852 2850 * to keep this segment resident.
2853 2851 */
2854 2852 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2855 2853 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2856 2854 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2857 2855
2858 2856 mutex_enter(&sptd->spt_lock);
2859 2857
2860 2858 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2861 2859
2862 2860 /*
2863 2861 * Try to wait for pages to get kicked out of the seg_pcache.
2864 2862 */
2865 2863 while (sptd->spt_gen == gen &&
2866 2864 (sptd->spt_flags & DISM_PPA_CHANGED) &&
2867 2865 ddi_get_lbolt() < end_lbolt) {
2868 2866 if (!cv_timedwait_sig(&sptd->spt_cv,
2869 2867 &sptd->spt_lock, end_lbolt)) {
2870 2868 break;
2871 2869 }
2872 2870 }
2873 2871
2874 2872 mutex_exit(&sptd->spt_lock);
2875 2873
2876 2874 /* Regrab the AS_LOCK and release our hold on the segment */
2877 2875 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2878 2876 writer ? RW_WRITER : RW_READER);
2879 2877 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2880 2878 if (shmd->shm_softlockcnt <= 0) {
2881 2879 if (AS_ISUNMAPWAIT(seg->s_as)) {
2882 2880 mutex_enter(&seg->s_as->a_contents);
2883 2881 if (AS_ISUNMAPWAIT(seg->s_as)) {
2884 2882 AS_CLRUNMAPWAIT(seg->s_as);
2885 2883 cv_broadcast(&seg->s_as->a_cv);
2886 2884 }
2887 2885 mutex_exit(&seg->s_as->a_contents);
2888 2886 }
2889 2887 }
2890 2888
2891 2889 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2892 2890 anon_disclaim(amp, pg_idx, len);
2893 2891 ANON_LOCK_EXIT(&->a_rwlock);
2894 2892 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2895 2893 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2896 2894 int already_set;
2897 2895 ulong_t anon_index;
2898 2896 lgrp_mem_policy_t policy;
2899 2897 caddr_t shm_addr;
2900 2898 size_t share_size;
2901 2899 size_t size;
2902 2900 struct seg *sptseg = shmd->shm_sptseg;
2903 2901 caddr_t sptseg_addr;
2904 2902
2905 2903 /*
2906 2904 * Align address and length to page size of underlying segment
2907 2905 */
2908 2906 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2909 2907 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2910 2908 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2911 2909 share_size);
2912 2910
2913 2911 amp = shmd->shm_amp;
2914 2912 anon_index = seg_page(seg, shm_addr);
2915 2913
2916 2914 /*
2917 2915 * And now we may have to adjust size downward if we have
2918 2916 * exceeded the realsize of the segment or initial anon
2919 2917 * allocations.
2920 2918 */
2921 2919 sptseg_addr = sptseg->s_base + ptob(anon_index);
2922 2920 if ((sptseg_addr + size) >
2923 2921 (sptseg->s_base + sptd->spt_realsize))
2924 2922 size = (sptseg->s_base + sptd->spt_realsize) -
2925 2923 sptseg_addr;
2926 2924
2927 2925 /*
2928 2926 * Set memory allocation policy for this segment
2929 2927 */
2930 2928 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2931 2929 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2932 2930 NULL, 0, len);
2933 2931
2934 2932 /*
2935 2933 * If random memory allocation policy set already,
2936 2934 * don't bother reapplying it.
2937 2935 */
2938 2936 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2939 2937 return (0);
2940 2938
2941 2939 /*
2942 2940 * Mark any existing pages in the given range for
2943 2941 * migration, flushing the I/O page cache, and using
2944 2942 * underlying segment to calculate anon index and get
2945 2943 * anonmap and vnode pointer from
2946 2944 */
2947 2945 if (shmd->shm_softlockcnt > 0)
2948 2946 segspt_purge(seg);
2949 2947
2950 2948 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
2951 2949 }
2952 2950
2953 2951 return (0);
2954 2952 }
2955 2953
2956 2954 /*ARGSUSED*/
2957 2955 void
2958 2956 segspt_shmdump(struct seg *seg)
2959 2957 {
2960 2958 /* no-op for ISM segment */
2961 2959 }
2962 2960
2963 2961 /*ARGSUSED*/
2964 2962 static faultcode_t
2965 2963 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
2966 2964 {
2967 2965 return (ENOTSUP);
2968 2966 }
2969 2967
2970 2968 /*
2971 2969 * get a memory ID for an addr in a given segment
2972 2970 */
2973 2971 static int
2974 2972 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
2975 2973 {
2976 2974 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2977 2975 struct anon *ap;
2978 2976 size_t anon_index;
2979 2977 struct anon_map *amp = shmd->shm_amp;
2980 2978 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2981 2979 struct seg *sptseg = shmd->shm_sptseg;
2982 2980 anon_sync_obj_t cookie;
2983 2981
2984 2982 anon_index = seg_page(seg, addr);
2985 2983
2986 2984 if (addr > (seg->s_base + sptd->spt_realsize)) {
2987 2985 return (EFAULT);
2988 2986 }
2989 2987
2990 2988 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2991 2989 anon_array_enter(amp, anon_index, &cookie);
2992 2990 ap = anon_get_ptr(amp->ahp, anon_index);
2993 2991 if (ap == NULL) {
2994 2992 struct page *pp;
2995 2993 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
2996 2994
2997 2995 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
2998 2996 if (pp == NULL) {
2999 2997 anon_array_exit(&cookie);
3000 2998 ANON_LOCK_EXIT(&->a_rwlock);
3001 2999 return (ENOMEM);
3002 3000 }
3003 3001 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3004 3002 page_unlock(pp);
3005 3003 }
3006 3004 anon_array_exit(&cookie);
3007 3005 ANON_LOCK_EXIT(&->a_rwlock);
3008 3006 memidp->val[0] = (uintptr_t)ap;
3009 3007 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3010 3008 return (0);
3011 3009 }
3012 3010
3013 3011 /*
3014 3012 * Get memory allocation policy info for specified address in given segment
3015 3013 */
3016 3014 static lgrp_mem_policy_info_t *
3017 3015 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3018 3016 {
3019 3017 struct anon_map *amp;
3020 3018 ulong_t anon_index;
3021 3019 lgrp_mem_policy_info_t *policy_info;
3022 3020 struct shm_data *shm_data;
3023 3021
3024 3022 ASSERT(seg != NULL);
3025 3023
3026 3024 /*
3027 3025 * Get anon_map from segshm
3028 3026 *
3029 3027 * Assume that no lock needs to be held on anon_map, since
3030 3028 * it should be protected by its reference count which must be
3031 3029 * nonzero for an existing segment
3032 3030 * Need to grab readers lock on policy tree though
3033 3031 */
3034 3032 shm_data = (struct shm_data *)seg->s_data;
3035 3033 if (shm_data == NULL)
3036 3034 return (NULL);
3037 3035 amp = shm_data->shm_amp;
3038 3036 ASSERT(amp->refcnt != 0);
↓ open down ↓ |
2889 lines elided |
↑ open up ↑ |
3039 3037
3040 3038 /*
3041 3039 * Get policy info
3042 3040 *
3043 3041 * Assume starting anon index of 0
3044 3042 */
3045 3043 anon_index = seg_page(seg, addr);
3046 3044 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3047 3045
3048 3046 return (policy_info);
3049 -}
3050 -
3051 -/*ARGSUSED*/
3052 -static int
3053 -segspt_shmcapable(struct seg *seg, segcapability_t capability)
3054 -{
3055 - return (0);
3056 3047 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX