Print this page
6151 use NULL setpagesize segop as a shorthand for ENOTSUP
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_spt.c
+++ new/usr/src/uts/common/vm/seg_spt.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/param.h>
26 26 #include <sys/user.h>
27 27 #include <sys/mman.h>
28 28 #include <sys/kmem.h>
29 29 #include <sys/sysmacros.h>
30 30 #include <sys/cmn_err.h>
31 31 #include <sys/systm.h>
32 32 #include <sys/tuneable.h>
33 33 #include <vm/hat.h>
34 34 #include <vm/seg.h>
35 35 #include <vm/as.h>
36 36 #include <vm/anon.h>
37 37 #include <vm/page.h>
38 38 #include <sys/buf.h>
39 39 #include <sys/swap.h>
40 40 #include <sys/atomic.h>
41 41 #include <vm/seg_spt.h>
42 42 #include <sys/debug.h>
43 43 #include <sys/vtrace.h>
44 44 #include <sys/shm.h>
45 45 #include <sys/shm_impl.h>
46 46 #include <sys/lgrp.h>
47 47 #include <sys/vmsystm.h>
48 48 #include <sys/policy.h>
49 49 #include <sys/project.h>
50 50 #include <sys/tnf_probe.h>
51 51 #include <sys/zone.h>
52 52
53 53 #define SEGSPTADDR (caddr_t)0x0
54 54
55 55 /*
56 56 * # pages used for spt
57 57 */
58 58 size_t spt_used;
59 59
60 60 /*
61 61 * segspt_minfree is the memory left for system after ISM
62 62 * locked its pages; it is set up to 5% of availrmem in
63 63 * sptcreate when ISM is created. ISM should not use more
64 64 * than ~90% of availrmem; if it does, then the performance
65 65 * of the system may decrease. Machines with large memories may
66 66 * be able to use up more memory for ISM so we set the default
67 67 * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
68 68 * If somebody wants even more memory for ISM (risking hanging
69 69 * the system) they can patch the segspt_minfree to smaller number.
70 70 */
71 71 pgcnt_t segspt_minfree = 0;
72 72
73 73 static int segspt_create(struct seg *seg, caddr_t argsp);
74 74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
75 75 static void segspt_free(struct seg *seg);
76 76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
77 77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
78 78
79 79 static void
80 80 segspt_badop()
81 81 {
82 82 panic("segspt_badop called");
83 83 /*NOTREACHED*/
84 84 }
85 85
86 86 #define SEGSPT_BADOP(t) (t(*)())segspt_badop
87 87
88 88 struct seg_ops segspt_ops = {
89 89 .dup = SEGSPT_BADOP(int),
90 90 .unmap = segspt_unmap,
91 91 .free = segspt_free,
92 92 .fault = SEGSPT_BADOP(int),
93 93 .faulta = SEGSPT_BADOP(faultcode_t),
94 94 .setprot = SEGSPT_BADOP(int),
95 95 .checkprot = SEGSPT_BADOP(int),
96 96 .kluster = SEGSPT_BADOP(int),
97 97 .swapout = SEGSPT_BADOP(size_t),
98 98 .sync = SEGSPT_BADOP(int),
99 99 .incore = SEGSPT_BADOP(size_t),
100 100 .lockop = SEGSPT_BADOP(int),
101 101 .getprot = SEGSPT_BADOP(int),
102 102 .getoffset = SEGSPT_BADOP(u_offset_t),
103 103 .gettype = SEGSPT_BADOP(int),
104 104 .getvp = SEGSPT_BADOP(int),
105 105 .advise = SEGSPT_BADOP(int),
106 106 .dump = SEGSPT_BADOP(void),
107 107 .pagelock = SEGSPT_BADOP(int),
108 108 .setpagesize = SEGSPT_BADOP(int),
109 109 .getmemid = SEGSPT_BADOP(int),
110 110 .getpolicy = segspt_getpolicy,
111 111 .capable = SEGSPT_BADOP(int),
112 112 };
113 113
114 114 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
115 115 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
116 116 static void segspt_shmfree(struct seg *seg);
117 117 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
118 118 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
119 119 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
120 120 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
121 121 register size_t len, register uint_t prot);
122 122 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
123 123 uint_t prot);
124 124 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
125 125 static size_t segspt_shmswapout(struct seg *seg);
126 126 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
127 127 register char *vec);
128 128 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
129 129 int attr, uint_t flags);
130 130 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
131 131 int attr, int op, ulong_t *lockmap, size_t pos);
↓ open down ↓ |
131 lines elided |
↑ open up ↑ |
132 132 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
133 133 uint_t *protv);
134 134 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
135 135 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
136 136 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
137 137 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
138 138 uint_t behav);
139 139 static void segspt_shmdump(struct seg *seg);
140 140 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
141 141 struct page ***, enum lock_type, enum seg_rw);
142 -static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
143 142 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
144 143 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
145 144
146 145 struct seg_ops segspt_shmops = {
147 146 .dup = segspt_shmdup,
148 147 .unmap = segspt_shmunmap,
149 148 .free = segspt_shmfree,
150 149 .fault = segspt_shmfault,
151 150 .faulta = segspt_shmfaulta,
152 151 .setprot = segspt_shmsetprot,
153 152 .checkprot = segspt_shmcheckprot,
154 153 .kluster = segspt_shmkluster,
155 154 .swapout = segspt_shmswapout,
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
156 155 .sync = segspt_shmsync,
157 156 .incore = segspt_shmincore,
158 157 .lockop = segspt_shmlockop,
159 158 .getprot = segspt_shmgetprot,
160 159 .getoffset = segspt_shmgetoffset,
161 160 .gettype = segspt_shmgettype,
162 161 .getvp = segspt_shmgetvp,
163 162 .advise = segspt_shmadvise,
164 163 .dump = segspt_shmdump,
165 164 .pagelock = segspt_shmpagelock,
166 - .setpagesize = segspt_shmsetpgsz,
167 165 .getmemid = segspt_shmgetmemid,
168 166 .getpolicy = segspt_shmgetpolicy,
169 167 };
170 168
171 169 static void segspt_purge(struct seg *seg);
172 170 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
173 171 enum seg_rw, int);
174 172 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
175 173 page_t **ppa);
176 174
177 175
178 176
179 177 /*ARGSUSED*/
180 178 int
181 179 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
182 180 uint_t prot, uint_t flags, uint_t share_szc)
183 181 {
184 182 int err;
185 183 struct as *newas;
186 184 struct segspt_crargs sptcargs;
187 185
188 186 #ifdef DEBUG
189 187 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
190 188 tnf_ulong, size, size );
191 189 #endif
192 190 if (segspt_minfree == 0) /* leave min 5% of availrmem for */
193 191 segspt_minfree = availrmem/20; /* for the system */
194 192
195 193 if (!hat_supported(HAT_SHARED_PT, (void *)0))
196 194 return (EINVAL);
197 195
198 196 /*
199 197 * get a new as for this shared memory segment
200 198 */
201 199 newas = as_alloc();
202 200 newas->a_proc = NULL;
203 201 sptcargs.amp = amp;
204 202 sptcargs.prot = prot;
205 203 sptcargs.flags = flags;
206 204 sptcargs.szc = share_szc;
207 205 /*
208 206 * create a shared page table (spt) segment
209 207 */
210 208
211 209 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
212 210 as_free(newas);
213 211 return (err);
214 212 }
215 213 *sptseg = sptcargs.seg_spt;
216 214 return (0);
217 215 }
218 216
219 217 void
220 218 sptdestroy(struct as *as, struct anon_map *amp)
221 219 {
222 220
223 221 #ifdef DEBUG
224 222 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
225 223 #endif
226 224 (void) as_unmap(as, SEGSPTADDR, amp->size);
227 225 as_free(as);
228 226 }
229 227
230 228 /*
231 229 * called from seg_free().
232 230 * free (i.e., unlock, unmap, return to free list)
233 231 * all the pages in the given seg.
234 232 */
235 233 void
236 234 segspt_free(struct seg *seg)
237 235 {
238 236 struct spt_data *sptd = (struct spt_data *)seg->s_data;
239 237
240 238 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
241 239
242 240 if (sptd != NULL) {
243 241 if (sptd->spt_realsize)
244 242 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
245 243
246 244 if (sptd->spt_ppa_lckcnt)
247 245 kmem_free(sptd->spt_ppa_lckcnt,
248 246 sizeof (*sptd->spt_ppa_lckcnt)
249 247 * btopr(sptd->spt_amp->size));
250 248 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
251 249 cv_destroy(&sptd->spt_cv);
252 250 mutex_destroy(&sptd->spt_lock);
253 251 kmem_free(sptd, sizeof (*sptd));
254 252 }
255 253 }
256 254
257 255 /*ARGSUSED*/
258 256 static int
259 257 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
260 258 uint_t flags)
261 259 {
262 260 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
263 261
264 262 return (0);
265 263 }
266 264
267 265 /*ARGSUSED*/
268 266 static size_t
269 267 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
270 268 {
271 269 caddr_t eo_seg;
272 270 pgcnt_t npages;
273 271 struct shm_data *shmd = (struct shm_data *)seg->s_data;
274 272 struct seg *sptseg;
275 273 struct spt_data *sptd;
276 274
277 275 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
278 276 #ifdef lint
279 277 seg = seg;
280 278 #endif
281 279 sptseg = shmd->shm_sptseg;
282 280 sptd = sptseg->s_data;
283 281
284 282 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
285 283 eo_seg = addr + len;
286 284 while (addr < eo_seg) {
287 285 /* page exists, and it's locked. */
288 286 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
289 287 SEG_PAGE_ANON;
290 288 addr += PAGESIZE;
291 289 }
292 290 return (len);
293 291 } else {
294 292 struct anon_map *amp = shmd->shm_amp;
295 293 struct anon *ap;
296 294 page_t *pp;
297 295 pgcnt_t anon_index;
298 296 struct vnode *vp;
299 297 u_offset_t off;
300 298 ulong_t i;
301 299 int ret;
302 300 anon_sync_obj_t cookie;
303 301
304 302 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
305 303 anon_index = seg_page(seg, addr);
306 304 npages = btopr(len);
307 305 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
308 306 return (EINVAL);
309 307 }
310 308 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
311 309 for (i = 0; i < npages; i++, anon_index++) {
312 310 ret = 0;
313 311 anon_array_enter(amp, anon_index, &cookie);
314 312 ap = anon_get_ptr(amp->ahp, anon_index);
315 313 if (ap != NULL) {
316 314 swap_xlate(ap, &vp, &off);
317 315 anon_array_exit(&cookie);
318 316 pp = page_lookup_nowait(vp, off, SE_SHARED);
319 317 if (pp != NULL) {
320 318 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
321 319 page_unlock(pp);
322 320 }
323 321 } else {
324 322 anon_array_exit(&cookie);
325 323 }
326 324 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
327 325 ret |= SEG_PAGE_LOCKED;
328 326 }
329 327 *vec++ = (char)ret;
330 328 }
331 329 ANON_LOCK_EXIT(&->a_rwlock);
332 330 return (len);
333 331 }
334 332 }
335 333
336 334 static int
337 335 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
338 336 {
339 337 size_t share_size;
340 338
341 339 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
342 340
343 341 /*
344 342 * seg.s_size may have been rounded up to the largest page size
345 343 * in shmat().
346 344 * XXX This should be cleanedup. sptdestroy should take a length
347 345 * argument which should be the same as sptcreate. Then
348 346 * this rounding would not be needed (or is done in shm.c)
349 347 * Only the check for full segment will be needed.
350 348 *
351 349 * XXX -- shouldn't raddr == 0 always? These tests don't seem
352 350 * to be useful at all.
353 351 */
354 352 share_size = page_get_pagesize(seg->s_szc);
355 353 ssize = P2ROUNDUP(ssize, share_size);
356 354
357 355 if (raddr == seg->s_base && ssize == seg->s_size) {
358 356 seg_free(seg);
359 357 return (0);
360 358 } else
361 359 return (EINVAL);
362 360 }
363 361
364 362 int
365 363 segspt_create(struct seg *seg, caddr_t argsp)
366 364 {
367 365 int err;
368 366 caddr_t addr = seg->s_base;
369 367 struct spt_data *sptd;
370 368 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
371 369 struct anon_map *amp = sptcargs->amp;
372 370 struct kshmid *sp = amp->a_sp;
373 371 struct cred *cred = CRED();
374 372 ulong_t i, j, anon_index = 0;
375 373 pgcnt_t npages = btopr(amp->size);
376 374 struct vnode *vp;
377 375 page_t **ppa;
378 376 uint_t hat_flags;
379 377 size_t pgsz;
380 378 pgcnt_t pgcnt;
381 379 caddr_t a;
382 380 pgcnt_t pidx;
383 381 size_t sz;
384 382 proc_t *procp = curproc;
385 383 rctl_qty_t lockedbytes = 0;
386 384 kproject_t *proj;
387 385
388 386 /*
389 387 * We are holding the a_lock on the underlying dummy as,
390 388 * so we can make calls to the HAT layer.
391 389 */
392 390 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
393 391 ASSERT(sp != NULL);
394 392
395 393 #ifdef DEBUG
396 394 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
397 395 tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
398 396 #endif
399 397 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
400 398 if (err = anon_swap_adjust(npages))
401 399 return (err);
402 400 }
403 401 err = ENOMEM;
404 402
405 403 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
406 404 goto out1;
407 405
408 406 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
409 407 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
410 408 KM_NOSLEEP)) == NULL)
411 409 goto out2;
412 410 }
413 411
414 412 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
415 413
416 414 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
417 415 goto out3;
418 416
419 417 seg->s_ops = &segspt_ops;
420 418 sptd->spt_vp = vp;
421 419 sptd->spt_amp = amp;
422 420 sptd->spt_prot = sptcargs->prot;
423 421 sptd->spt_flags = sptcargs->flags;
424 422 seg->s_data = (caddr_t)sptd;
425 423 sptd->spt_ppa = NULL;
426 424 sptd->spt_ppa_lckcnt = NULL;
427 425 seg->s_szc = sptcargs->szc;
428 426 cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
429 427 sptd->spt_gen = 0;
430 428
431 429 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
432 430 if (seg->s_szc > amp->a_szc) {
433 431 amp->a_szc = seg->s_szc;
434 432 }
435 433 ANON_LOCK_EXIT(&->a_rwlock);
436 434
437 435 /*
438 436 * Set policy to affect initial allocation of pages in
439 437 * anon_map_createpages()
440 438 */
441 439 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
442 440 NULL, 0, ptob(npages));
443 441
444 442 if (sptcargs->flags & SHM_PAGEABLE) {
445 443 size_t share_sz;
446 444 pgcnt_t new_npgs, more_pgs;
447 445 struct anon_hdr *nahp;
448 446 zone_t *zone;
449 447
450 448 share_sz = page_get_pagesize(seg->s_szc);
451 449 if (!IS_P2ALIGNED(amp->size, share_sz)) {
452 450 /*
453 451 * We are rounding up the size of the anon array
454 452 * on 4 M boundary because we always create 4 M
455 453 * of page(s) when locking, faulting pages and we
456 454 * don't have to check for all corner cases e.g.
457 455 * if there is enough space to allocate 4 M
458 456 * page.
459 457 */
460 458 new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
461 459 more_pgs = new_npgs - npages;
462 460
463 461 /*
464 462 * The zone will never be NULL, as a fully created
465 463 * shm always has an owning zone.
466 464 */
467 465 zone = sp->shm_perm.ipc_zone_ref.zref_zone;
468 466 ASSERT(zone != NULL);
469 467 if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
470 468 err = ENOMEM;
471 469 goto out4;
472 470 }
473 471
474 472 nahp = anon_create(new_npgs, ANON_SLEEP);
475 473 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
476 474 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
477 475 ANON_SLEEP);
478 476 anon_release(amp->ahp, npages);
479 477 amp->ahp = nahp;
480 478 ASSERT(amp->swresv == ptob(npages));
481 479 amp->swresv = amp->size = ptob(new_npgs);
482 480 ANON_LOCK_EXIT(&->a_rwlock);
483 481 npages = new_npgs;
484 482 }
485 483
486 484 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
487 485 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
488 486 sptd->spt_pcachecnt = 0;
489 487 sptd->spt_realsize = ptob(npages);
490 488 sptcargs->seg_spt = seg;
491 489 return (0);
492 490 }
493 491
494 492 /*
495 493 * get array of pages for each anon slot in amp
496 494 */
497 495 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
498 496 seg, addr, S_CREATE, cred)) != 0)
499 497 goto out4;
500 498
501 499 mutex_enter(&sp->shm_mlock);
502 500
503 501 /* May be partially locked, so, count bytes to charge for locking */
504 502 for (i = 0; i < npages; i++)
505 503 if (ppa[i]->p_lckcnt == 0)
506 504 lockedbytes += PAGESIZE;
507 505
508 506 proj = sp->shm_perm.ipc_proj;
509 507
510 508 if (lockedbytes > 0) {
511 509 mutex_enter(&procp->p_lock);
512 510 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
513 511 mutex_exit(&procp->p_lock);
514 512 mutex_exit(&sp->shm_mlock);
515 513 for (i = 0; i < npages; i++)
516 514 page_unlock(ppa[i]);
517 515 err = ENOMEM;
518 516 goto out4;
519 517 }
520 518 mutex_exit(&procp->p_lock);
521 519 }
522 520
523 521 /*
524 522 * addr is initial address corresponding to the first page on ppa list
525 523 */
526 524 for (i = 0; i < npages; i++) {
527 525 /* attempt to lock all pages */
528 526 if (page_pp_lock(ppa[i], 0, 1) == 0) {
529 527 /*
530 528 * if unable to lock any page, unlock all
531 529 * of them and return error
532 530 */
533 531 for (j = 0; j < i; j++)
534 532 page_pp_unlock(ppa[j], 0, 1);
535 533 for (i = 0; i < npages; i++)
536 534 page_unlock(ppa[i]);
537 535 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
538 536 mutex_exit(&sp->shm_mlock);
539 537 err = ENOMEM;
540 538 goto out4;
541 539 }
542 540 }
543 541 mutex_exit(&sp->shm_mlock);
544 542
545 543 /*
546 544 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
547 545 * for the entire life of the segment. For example platforms
548 546 * that do not support Dynamic Reconfiguration.
549 547 */
550 548 hat_flags = HAT_LOAD_SHARE;
551 549 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
552 550 hat_flags |= HAT_LOAD_LOCK;
553 551
554 552 /*
555 553 * Load translations one lare page at a time
556 554 * to make sure we don't create mappings bigger than
557 555 * segment's size code in case underlying pages
558 556 * are shared with segvn's segment that uses bigger
559 557 * size code than we do.
560 558 */
561 559 pgsz = page_get_pagesize(seg->s_szc);
562 560 pgcnt = page_get_pagecnt(seg->s_szc);
563 561 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
564 562 sz = MIN(pgsz, ptob(npages - pidx));
565 563 hat_memload_array(seg->s_as->a_hat, a, sz,
566 564 &ppa[pidx], sptd->spt_prot, hat_flags);
567 565 }
568 566
569 567 /*
570 568 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
571 569 * we will leave the pages locked SE_SHARED for the life
572 570 * of the ISM segment. This will prevent any calls to
573 571 * hat_pageunload() on this ISM segment for those platforms.
574 572 */
575 573 if (!(hat_flags & HAT_LOAD_LOCK)) {
576 574 /*
577 575 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
578 576 * we no longer need to hold the SE_SHARED lock on the pages,
579 577 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
580 578 * SE_SHARED lock on the pages as necessary.
581 579 */
582 580 for (i = 0; i < npages; i++)
583 581 page_unlock(ppa[i]);
584 582 }
585 583 sptd->spt_pcachecnt = 0;
586 584 kmem_free(ppa, ((sizeof (page_t *)) * npages));
587 585 sptd->spt_realsize = ptob(npages);
588 586 atomic_add_long(&spt_used, npages);
589 587 sptcargs->seg_spt = seg;
590 588 return (0);
591 589
592 590 out4:
593 591 seg->s_data = NULL;
594 592 kmem_free(vp, sizeof (*vp));
595 593 cv_destroy(&sptd->spt_cv);
596 594 out3:
597 595 mutex_destroy(&sptd->spt_lock);
598 596 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
599 597 kmem_free(ppa, (sizeof (*ppa) * npages));
600 598 out2:
601 599 kmem_free(sptd, sizeof (*sptd));
602 600 out1:
603 601 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
604 602 anon_swap_restore(npages);
605 603 return (err);
606 604 }
607 605
608 606 /*ARGSUSED*/
609 607 void
610 608 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
611 609 {
612 610 struct page *pp;
613 611 struct spt_data *sptd = (struct spt_data *)seg->s_data;
614 612 pgcnt_t npages;
615 613 ulong_t anon_idx;
616 614 struct anon_map *amp;
617 615 struct anon *ap;
618 616 struct vnode *vp;
619 617 u_offset_t off;
620 618 uint_t hat_flags;
621 619 int root = 0;
622 620 pgcnt_t pgs, curnpgs = 0;
623 621 page_t *rootpp;
624 622 rctl_qty_t unlocked_bytes = 0;
625 623 kproject_t *proj;
626 624 kshmid_t *sp;
627 625
628 626 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
629 627
630 628 len = P2ROUNDUP(len, PAGESIZE);
631 629
632 630 npages = btop(len);
633 631
634 632 hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
635 633 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
636 634 (sptd->spt_flags & SHM_PAGEABLE)) {
637 635 hat_flags = HAT_UNLOAD_UNMAP;
638 636 }
639 637
640 638 hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
641 639
642 640 amp = sptd->spt_amp;
643 641 if (sptd->spt_flags & SHM_PAGEABLE)
644 642 npages = btop(amp->size);
645 643
646 644 ASSERT(amp != NULL);
647 645
648 646 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
649 647 sp = amp->a_sp;
650 648 proj = sp->shm_perm.ipc_proj;
651 649 mutex_enter(&sp->shm_mlock);
652 650 }
653 651 for (anon_idx = 0; anon_idx < npages; anon_idx++) {
654 652 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
655 653 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
656 654 panic("segspt_free_pages: null app");
657 655 /*NOTREACHED*/
658 656 }
659 657 } else {
660 658 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
661 659 == NULL)
662 660 continue;
663 661 }
664 662 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
665 663 swap_xlate(ap, &vp, &off);
666 664
667 665 /*
668 666 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
669 667 * the pages won't be having SE_SHARED lock at this
670 668 * point.
671 669 *
672 670 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
673 671 * the pages are still held SE_SHARED locked from the
674 672 * original segspt_create()
675 673 *
676 674 * Our goal is to get SE_EXCL lock on each page, remove
677 675 * permanent lock on it and invalidate the page.
678 676 */
679 677 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
680 678 if (hat_flags == HAT_UNLOAD_UNMAP)
681 679 pp = page_lookup(vp, off, SE_EXCL);
682 680 else {
683 681 if ((pp = page_find(vp, off)) == NULL) {
684 682 panic("segspt_free_pages: "
685 683 "page not locked");
686 684 /*NOTREACHED*/
687 685 }
688 686 if (!page_tryupgrade(pp)) {
689 687 page_unlock(pp);
690 688 pp = page_lookup(vp, off, SE_EXCL);
691 689 }
692 690 }
693 691 if (pp == NULL) {
694 692 panic("segspt_free_pages: "
695 693 "page not in the system");
696 694 /*NOTREACHED*/
697 695 }
698 696 ASSERT(pp->p_lckcnt > 0);
699 697 page_pp_unlock(pp, 0, 1);
700 698 if (pp->p_lckcnt == 0)
701 699 unlocked_bytes += PAGESIZE;
702 700 } else {
703 701 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
704 702 continue;
705 703 }
706 704 /*
707 705 * It's logical to invalidate the pages here as in most cases
708 706 * these were created by segspt.
709 707 */
710 708 if (pp->p_szc != 0) {
711 709 if (root == 0) {
712 710 ASSERT(curnpgs == 0);
713 711 root = 1;
714 712 rootpp = pp;
715 713 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
716 714 ASSERT(pgs > 1);
717 715 ASSERT(IS_P2ALIGNED(pgs, pgs));
718 716 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
719 717 curnpgs--;
720 718 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
721 719 ASSERT(curnpgs == 1);
722 720 ASSERT(page_pptonum(pp) ==
723 721 page_pptonum(rootpp) + (pgs - 1));
724 722 page_destroy_pages(rootpp);
725 723 root = 0;
726 724 curnpgs = 0;
727 725 } else {
728 726 ASSERT(curnpgs > 1);
729 727 ASSERT(page_pptonum(pp) ==
730 728 page_pptonum(rootpp) + (pgs - curnpgs));
731 729 curnpgs--;
732 730 }
733 731 } else {
734 732 if (root != 0 || curnpgs != 0) {
735 733 panic("segspt_free_pages: bad large page");
736 734 /*NOTREACHED*/
737 735 }
738 736 /*
739 737 * Before destroying the pages, we need to take care
740 738 * of the rctl locked memory accounting. For that
741 739 * we need to calculte the unlocked_bytes.
742 740 */
743 741 if (pp->p_lckcnt > 0)
744 742 unlocked_bytes += PAGESIZE;
745 743 /*LINTED: constant in conditional context */
746 744 VN_DISPOSE(pp, B_INVAL, 0, kcred);
747 745 }
748 746 }
749 747 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
750 748 if (unlocked_bytes > 0)
751 749 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
752 750 mutex_exit(&sp->shm_mlock);
753 751 }
754 752 if (root != 0 || curnpgs != 0) {
755 753 panic("segspt_free_pages: bad large page");
756 754 /*NOTREACHED*/
757 755 }
758 756
759 757 /*
760 758 * mark that pages have been released
761 759 */
762 760 sptd->spt_realsize = 0;
763 761
764 762 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
765 763 atomic_add_long(&spt_used, -npages);
766 764 anon_swap_restore(npages);
767 765 }
768 766 }
769 767
770 768 /*
771 769 * Get memory allocation policy info for specified address in given segment
772 770 */
773 771 static lgrp_mem_policy_info_t *
774 772 segspt_getpolicy(struct seg *seg, caddr_t addr)
775 773 {
776 774 struct anon_map *amp;
777 775 ulong_t anon_index;
778 776 lgrp_mem_policy_info_t *policy_info;
779 777 struct spt_data *spt_data;
780 778
781 779 ASSERT(seg != NULL);
782 780
783 781 /*
784 782 * Get anon_map from segspt
785 783 *
786 784 * Assume that no lock needs to be held on anon_map, since
787 785 * it should be protected by its reference count which must be
788 786 * nonzero for an existing segment
789 787 * Need to grab readers lock on policy tree though
790 788 */
791 789 spt_data = (struct spt_data *)seg->s_data;
792 790 if (spt_data == NULL)
793 791 return (NULL);
794 792 amp = spt_data->spt_amp;
795 793 ASSERT(amp->refcnt != 0);
796 794
797 795 /*
798 796 * Get policy info
799 797 *
800 798 * Assume starting anon index of 0
801 799 */
802 800 anon_index = seg_page(seg, addr);
803 801 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
804 802
805 803 return (policy_info);
806 804 }
807 805
808 806 /*
809 807 * DISM only.
810 808 * Return locked pages over a given range.
811 809 *
812 810 * We will cache all DISM locked pages and save the pplist for the
813 811 * entire segment in the ppa field of the underlying DISM segment structure.
814 812 * Later, during a call to segspt_reclaim() we will use this ppa array
815 813 * to page_unlock() all of the pages and then we will free this ppa list.
816 814 */
817 815 /*ARGSUSED*/
818 816 static int
819 817 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
820 818 struct page ***ppp, enum lock_type type, enum seg_rw rw)
821 819 {
822 820 struct shm_data *shmd = (struct shm_data *)seg->s_data;
823 821 struct seg *sptseg = shmd->shm_sptseg;
824 822 struct spt_data *sptd = sptseg->s_data;
825 823 pgcnt_t pg_idx, npages, tot_npages, npgs;
826 824 struct page **pplist, **pl, **ppa, *pp;
827 825 struct anon_map *amp;
828 826 spgcnt_t an_idx;
829 827 int ret = ENOTSUP;
830 828 uint_t pl_built = 0;
831 829 struct anon *ap;
832 830 struct vnode *vp;
833 831 u_offset_t off;
834 832 pgcnt_t claim_availrmem = 0;
835 833 uint_t szc;
836 834
837 835 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
838 836 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
839 837
840 838 /*
841 839 * We want to lock/unlock the entire ISM segment. Therefore,
842 840 * we will be using the underlying sptseg and it's base address
843 841 * and length for the caching arguments.
844 842 */
845 843 ASSERT(sptseg);
846 844 ASSERT(sptd);
847 845
848 846 pg_idx = seg_page(seg, addr);
849 847 npages = btopr(len);
850 848
851 849 /*
852 850 * check if the request is larger than number of pages covered
853 851 * by amp
854 852 */
855 853 if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
856 854 *ppp = NULL;
857 855 return (ENOTSUP);
858 856 }
859 857
860 858 if (type == L_PAGEUNLOCK) {
861 859 ASSERT(sptd->spt_ppa != NULL);
862 860
863 861 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
864 862 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
865 863
866 864 /*
867 865 * If someone is blocked while unmapping, we purge
868 866 * segment page cache and thus reclaim pplist synchronously
869 867 * without waiting for seg_pasync_thread. This speeds up
870 868 * unmapping in cases where munmap(2) is called, while
871 869 * raw async i/o is still in progress or where a thread
872 870 * exits on data fault in a multithreaded application.
873 871 */
874 872 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
875 873 (AS_ISUNMAPWAIT(seg->s_as) &&
876 874 shmd->shm_softlockcnt > 0)) {
877 875 segspt_purge(seg);
878 876 }
879 877 return (0);
880 878 }
881 879
882 880 /* The L_PAGELOCK case ... */
883 881
884 882 if (sptd->spt_flags & DISM_PPA_CHANGED) {
885 883 segspt_purge(seg);
886 884 /*
887 885 * for DISM ppa needs to be rebuild since
888 886 * number of locked pages could be changed
889 887 */
890 888 *ppp = NULL;
891 889 return (ENOTSUP);
892 890 }
893 891
894 892 /*
895 893 * First try to find pages in segment page cache, without
896 894 * holding the segment lock.
897 895 */
898 896 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
899 897 S_WRITE, SEGP_FORCE_WIRED);
900 898 if (pplist != NULL) {
901 899 ASSERT(sptd->spt_ppa != NULL);
902 900 ASSERT(sptd->spt_ppa == pplist);
903 901 ppa = sptd->spt_ppa;
904 902 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
905 903 if (ppa[an_idx] == NULL) {
906 904 seg_pinactive(seg, NULL, seg->s_base,
907 905 sptd->spt_amp->size, ppa,
908 906 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
909 907 *ppp = NULL;
910 908 return (ENOTSUP);
911 909 }
912 910 if ((szc = ppa[an_idx]->p_szc) != 0) {
913 911 npgs = page_get_pagecnt(szc);
914 912 an_idx = P2ROUNDUP(an_idx + 1, npgs);
915 913 } else {
916 914 an_idx++;
917 915 }
918 916 }
919 917 /*
920 918 * Since we cache the entire DISM segment, we want to
921 919 * set ppp to point to the first slot that corresponds
922 920 * to the requested addr, i.e. pg_idx.
923 921 */
924 922 *ppp = &(sptd->spt_ppa[pg_idx]);
925 923 return (0);
926 924 }
927 925
928 926 mutex_enter(&sptd->spt_lock);
929 927 /*
930 928 * try to find pages in segment page cache with mutex
931 929 */
932 930 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
933 931 S_WRITE, SEGP_FORCE_WIRED);
934 932 if (pplist != NULL) {
935 933 ASSERT(sptd->spt_ppa != NULL);
936 934 ASSERT(sptd->spt_ppa == pplist);
937 935 ppa = sptd->spt_ppa;
938 936 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
939 937 if (ppa[an_idx] == NULL) {
940 938 mutex_exit(&sptd->spt_lock);
941 939 seg_pinactive(seg, NULL, seg->s_base,
942 940 sptd->spt_amp->size, ppa,
943 941 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
944 942 *ppp = NULL;
945 943 return (ENOTSUP);
946 944 }
947 945 if ((szc = ppa[an_idx]->p_szc) != 0) {
948 946 npgs = page_get_pagecnt(szc);
949 947 an_idx = P2ROUNDUP(an_idx + 1, npgs);
950 948 } else {
951 949 an_idx++;
952 950 }
953 951 }
954 952 /*
955 953 * Since we cache the entire DISM segment, we want to
956 954 * set ppp to point to the first slot that corresponds
957 955 * to the requested addr, i.e. pg_idx.
958 956 */
959 957 mutex_exit(&sptd->spt_lock);
960 958 *ppp = &(sptd->spt_ppa[pg_idx]);
961 959 return (0);
962 960 }
963 961 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
964 962 SEGP_FORCE_WIRED) == SEGP_FAIL) {
965 963 mutex_exit(&sptd->spt_lock);
966 964 *ppp = NULL;
967 965 return (ENOTSUP);
968 966 }
969 967
970 968 /*
971 969 * No need to worry about protections because DISM pages are always rw.
972 970 */
973 971 pl = pplist = NULL;
974 972 amp = sptd->spt_amp;
975 973
976 974 /*
977 975 * Do we need to build the ppa array?
978 976 */
979 977 if (sptd->spt_ppa == NULL) {
980 978 pgcnt_t lpg_cnt = 0;
981 979
982 980 pl_built = 1;
983 981 tot_npages = btopr(sptd->spt_amp->size);
984 982
985 983 ASSERT(sptd->spt_pcachecnt == 0);
986 984 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
987 985 pl = pplist;
988 986
989 987 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
990 988 for (an_idx = 0; an_idx < tot_npages; ) {
991 989 ap = anon_get_ptr(amp->ahp, an_idx);
992 990 /*
993 991 * Cache only mlocked pages. For large pages
994 992 * if one (constituent) page is mlocked
995 993 * all pages for that large page
996 994 * are cached also. This is for quick
997 995 * lookups of ppa array;
998 996 */
999 997 if ((ap != NULL) && (lpg_cnt != 0 ||
1000 998 (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
1001 999
1002 1000 swap_xlate(ap, &vp, &off);
1003 1001 pp = page_lookup(vp, off, SE_SHARED);
1004 1002 ASSERT(pp != NULL);
1005 1003 if (lpg_cnt == 0) {
1006 1004 lpg_cnt++;
1007 1005 /*
1008 1006 * For a small page, we are done --
1009 1007 * lpg_count is reset to 0 below.
1010 1008 *
1011 1009 * For a large page, we are guaranteed
1012 1010 * to find the anon structures of all
1013 1011 * constituent pages and a non-zero
1014 1012 * lpg_cnt ensures that we don't test
1015 1013 * for mlock for these. We are done
1016 1014 * when lpg_count reaches (npgs + 1).
1017 1015 * If we are not the first constituent
1018 1016 * page, restart at the first one.
1019 1017 */
1020 1018 npgs = page_get_pagecnt(pp->p_szc);
1021 1019 if (!IS_P2ALIGNED(an_idx, npgs)) {
1022 1020 an_idx = P2ALIGN(an_idx, npgs);
1023 1021 page_unlock(pp);
1024 1022 continue;
1025 1023 }
1026 1024 }
1027 1025 if (++lpg_cnt > npgs)
1028 1026 lpg_cnt = 0;
1029 1027
1030 1028 /*
1031 1029 * availrmem is decremented only
1032 1030 * for unlocked pages
1033 1031 */
1034 1032 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1035 1033 claim_availrmem++;
1036 1034 pplist[an_idx] = pp;
1037 1035 }
1038 1036 an_idx++;
1039 1037 }
1040 1038 ANON_LOCK_EXIT(&->a_rwlock);
1041 1039
1042 1040 if (claim_availrmem) {
1043 1041 mutex_enter(&freemem_lock);
1044 1042 if (availrmem < tune.t_minarmem + claim_availrmem) {
1045 1043 mutex_exit(&freemem_lock);
1046 1044 ret = ENOTSUP;
1047 1045 claim_availrmem = 0;
1048 1046 goto insert_fail;
1049 1047 } else {
1050 1048 availrmem -= claim_availrmem;
1051 1049 }
1052 1050 mutex_exit(&freemem_lock);
1053 1051 }
1054 1052
1055 1053 sptd->spt_ppa = pl;
1056 1054 } else {
1057 1055 /*
1058 1056 * We already have a valid ppa[].
1059 1057 */
1060 1058 pl = sptd->spt_ppa;
1061 1059 }
1062 1060
1063 1061 ASSERT(pl != NULL);
1064 1062
1065 1063 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1066 1064 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1067 1065 segspt_reclaim);
1068 1066 if (ret == SEGP_FAIL) {
1069 1067 /*
1070 1068 * seg_pinsert failed. We return
1071 1069 * ENOTSUP, so that the as_pagelock() code will
1072 1070 * then try the slower F_SOFTLOCK path.
1073 1071 */
1074 1072 if (pl_built) {
1075 1073 /*
1076 1074 * No one else has referenced the ppa[].
1077 1075 * We created it and we need to destroy it.
1078 1076 */
1079 1077 sptd->spt_ppa = NULL;
1080 1078 }
1081 1079 ret = ENOTSUP;
1082 1080 goto insert_fail;
1083 1081 }
1084 1082
1085 1083 /*
1086 1084 * In either case, we increment softlockcnt on the 'real' segment.
1087 1085 */
1088 1086 sptd->spt_pcachecnt++;
1089 1087 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1090 1088
1091 1089 ppa = sptd->spt_ppa;
1092 1090 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1093 1091 if (ppa[an_idx] == NULL) {
1094 1092 mutex_exit(&sptd->spt_lock);
1095 1093 seg_pinactive(seg, NULL, seg->s_base,
1096 1094 sptd->spt_amp->size,
1097 1095 pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1098 1096 *ppp = NULL;
1099 1097 return (ENOTSUP);
1100 1098 }
1101 1099 if ((szc = ppa[an_idx]->p_szc) != 0) {
1102 1100 npgs = page_get_pagecnt(szc);
1103 1101 an_idx = P2ROUNDUP(an_idx + 1, npgs);
1104 1102 } else {
1105 1103 an_idx++;
1106 1104 }
1107 1105 }
1108 1106 /*
1109 1107 * We can now drop the sptd->spt_lock since the ppa[]
1110 1108 * exists and he have incremented pacachecnt.
1111 1109 */
1112 1110 mutex_exit(&sptd->spt_lock);
1113 1111
1114 1112 /*
1115 1113 * Since we cache the entire segment, we want to
1116 1114 * set ppp to point to the first slot that corresponds
1117 1115 * to the requested addr, i.e. pg_idx.
1118 1116 */
1119 1117 *ppp = &(sptd->spt_ppa[pg_idx]);
1120 1118 return (0);
1121 1119
1122 1120 insert_fail:
1123 1121 /*
1124 1122 * We will only reach this code if we tried and failed.
1125 1123 *
1126 1124 * And we can drop the lock on the dummy seg, once we've failed
1127 1125 * to set up a new ppa[].
1128 1126 */
1129 1127 mutex_exit(&sptd->spt_lock);
1130 1128
1131 1129 if (pl_built) {
1132 1130 if (claim_availrmem) {
1133 1131 mutex_enter(&freemem_lock);
1134 1132 availrmem += claim_availrmem;
1135 1133 mutex_exit(&freemem_lock);
1136 1134 }
1137 1135
1138 1136 /*
1139 1137 * We created pl and we need to destroy it.
1140 1138 */
1141 1139 pplist = pl;
1142 1140 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1143 1141 if (pplist[an_idx] != NULL)
1144 1142 page_unlock(pplist[an_idx]);
1145 1143 }
1146 1144 kmem_free(pl, sizeof (page_t *) * tot_npages);
1147 1145 }
1148 1146
1149 1147 if (shmd->shm_softlockcnt <= 0) {
1150 1148 if (AS_ISUNMAPWAIT(seg->s_as)) {
1151 1149 mutex_enter(&seg->s_as->a_contents);
1152 1150 if (AS_ISUNMAPWAIT(seg->s_as)) {
1153 1151 AS_CLRUNMAPWAIT(seg->s_as);
1154 1152 cv_broadcast(&seg->s_as->a_cv);
1155 1153 }
1156 1154 mutex_exit(&seg->s_as->a_contents);
1157 1155 }
1158 1156 }
1159 1157 *ppp = NULL;
1160 1158 return (ret);
1161 1159 }
1162 1160
1163 1161
1164 1162
1165 1163 /*
1166 1164 * return locked pages over a given range.
1167 1165 *
1168 1166 * We will cache the entire ISM segment and save the pplist for the
1169 1167 * entire segment in the ppa field of the underlying ISM segment structure.
1170 1168 * Later, during a call to segspt_reclaim() we will use this ppa array
1171 1169 * to page_unlock() all of the pages and then we will free this ppa list.
1172 1170 */
1173 1171 /*ARGSUSED*/
1174 1172 static int
1175 1173 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1176 1174 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1177 1175 {
1178 1176 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1179 1177 struct seg *sptseg = shmd->shm_sptseg;
1180 1178 struct spt_data *sptd = sptseg->s_data;
1181 1179 pgcnt_t np, page_index, npages;
1182 1180 caddr_t a, spt_base;
1183 1181 struct page **pplist, **pl, *pp;
1184 1182 struct anon_map *amp;
1185 1183 ulong_t anon_index;
1186 1184 int ret = ENOTSUP;
1187 1185 uint_t pl_built = 0;
1188 1186 struct anon *ap;
1189 1187 struct vnode *vp;
1190 1188 u_offset_t off;
1191 1189
1192 1190 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1193 1191 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1194 1192
1195 1193
1196 1194 /*
1197 1195 * We want to lock/unlock the entire ISM segment. Therefore,
1198 1196 * we will be using the underlying sptseg and it's base address
1199 1197 * and length for the caching arguments.
1200 1198 */
1201 1199 ASSERT(sptseg);
1202 1200 ASSERT(sptd);
1203 1201
1204 1202 if (sptd->spt_flags & SHM_PAGEABLE) {
1205 1203 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1206 1204 }
1207 1205
1208 1206 page_index = seg_page(seg, addr);
1209 1207 npages = btopr(len);
1210 1208
1211 1209 /*
1212 1210 * check if the request is larger than number of pages covered
1213 1211 * by amp
1214 1212 */
1215 1213 if (page_index + npages > btopr(sptd->spt_amp->size)) {
1216 1214 *ppp = NULL;
1217 1215 return (ENOTSUP);
1218 1216 }
1219 1217
1220 1218 if (type == L_PAGEUNLOCK) {
1221 1219
1222 1220 ASSERT(sptd->spt_ppa != NULL);
1223 1221
1224 1222 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1225 1223 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1226 1224
1227 1225 /*
1228 1226 * If someone is blocked while unmapping, we purge
1229 1227 * segment page cache and thus reclaim pplist synchronously
1230 1228 * without waiting for seg_pasync_thread. This speeds up
1231 1229 * unmapping in cases where munmap(2) is called, while
1232 1230 * raw async i/o is still in progress or where a thread
1233 1231 * exits on data fault in a multithreaded application.
1234 1232 */
1235 1233 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1236 1234 segspt_purge(seg);
1237 1235 }
1238 1236 return (0);
1239 1237 }
1240 1238
1241 1239 /* The L_PAGELOCK case... */
1242 1240
1243 1241 /*
1244 1242 * First try to find pages in segment page cache, without
1245 1243 * holding the segment lock.
1246 1244 */
1247 1245 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1248 1246 S_WRITE, SEGP_FORCE_WIRED);
1249 1247 if (pplist != NULL) {
1250 1248 ASSERT(sptd->spt_ppa == pplist);
1251 1249 ASSERT(sptd->spt_ppa[page_index]);
1252 1250 /*
1253 1251 * Since we cache the entire ISM segment, we want to
1254 1252 * set ppp to point to the first slot that corresponds
1255 1253 * to the requested addr, i.e. page_index.
1256 1254 */
1257 1255 *ppp = &(sptd->spt_ppa[page_index]);
1258 1256 return (0);
1259 1257 }
1260 1258
1261 1259 mutex_enter(&sptd->spt_lock);
1262 1260
1263 1261 /*
1264 1262 * try to find pages in segment page cache
1265 1263 */
1266 1264 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1267 1265 S_WRITE, SEGP_FORCE_WIRED);
1268 1266 if (pplist != NULL) {
1269 1267 ASSERT(sptd->spt_ppa == pplist);
1270 1268 /*
1271 1269 * Since we cache the entire segment, we want to
1272 1270 * set ppp to point to the first slot that corresponds
1273 1271 * to the requested addr, i.e. page_index.
1274 1272 */
1275 1273 mutex_exit(&sptd->spt_lock);
1276 1274 *ppp = &(sptd->spt_ppa[page_index]);
1277 1275 return (0);
1278 1276 }
1279 1277
1280 1278 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1281 1279 SEGP_FORCE_WIRED) == SEGP_FAIL) {
1282 1280 mutex_exit(&sptd->spt_lock);
1283 1281 *ppp = NULL;
1284 1282 return (ENOTSUP);
1285 1283 }
1286 1284
1287 1285 /*
1288 1286 * No need to worry about protections because ISM pages
1289 1287 * are always rw.
1290 1288 */
1291 1289 pl = pplist = NULL;
1292 1290
1293 1291 /*
1294 1292 * Do we need to build the ppa array?
1295 1293 */
1296 1294 if (sptd->spt_ppa == NULL) {
1297 1295 ASSERT(sptd->spt_ppa == pplist);
1298 1296
1299 1297 spt_base = sptseg->s_base;
1300 1298 pl_built = 1;
1301 1299
1302 1300 /*
1303 1301 * availrmem is decremented once during anon_swap_adjust()
1304 1302 * and is incremented during the anon_unresv(), which is
1305 1303 * called from shm_rm_amp() when the segment is destroyed.
1306 1304 */
1307 1305 amp = sptd->spt_amp;
1308 1306 ASSERT(amp != NULL);
1309 1307
1310 1308 /* pcachecnt is protected by sptd->spt_lock */
1311 1309 ASSERT(sptd->spt_pcachecnt == 0);
1312 1310 pplist = kmem_zalloc(sizeof (page_t *)
1313 1311 * btopr(sptd->spt_amp->size), KM_SLEEP);
1314 1312 pl = pplist;
1315 1313
1316 1314 anon_index = seg_page(sptseg, spt_base);
1317 1315
1318 1316 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1319 1317 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1320 1318 a += PAGESIZE, anon_index++, pplist++) {
1321 1319 ap = anon_get_ptr(amp->ahp, anon_index);
1322 1320 ASSERT(ap != NULL);
1323 1321 swap_xlate(ap, &vp, &off);
1324 1322 pp = page_lookup(vp, off, SE_SHARED);
1325 1323 ASSERT(pp != NULL);
1326 1324 *pplist = pp;
1327 1325 }
1328 1326 ANON_LOCK_EXIT(&->a_rwlock);
1329 1327
1330 1328 if (a < (spt_base + sptd->spt_amp->size)) {
1331 1329 ret = ENOTSUP;
1332 1330 goto insert_fail;
1333 1331 }
1334 1332 sptd->spt_ppa = pl;
1335 1333 } else {
1336 1334 /*
1337 1335 * We already have a valid ppa[].
1338 1336 */
1339 1337 pl = sptd->spt_ppa;
1340 1338 }
1341 1339
1342 1340 ASSERT(pl != NULL);
1343 1341
1344 1342 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1345 1343 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1346 1344 segspt_reclaim);
1347 1345 if (ret == SEGP_FAIL) {
1348 1346 /*
1349 1347 * seg_pinsert failed. We return
1350 1348 * ENOTSUP, so that the as_pagelock() code will
1351 1349 * then try the slower F_SOFTLOCK path.
1352 1350 */
1353 1351 if (pl_built) {
1354 1352 /*
1355 1353 * No one else has referenced the ppa[].
1356 1354 * We created it and we need to destroy it.
1357 1355 */
1358 1356 sptd->spt_ppa = NULL;
1359 1357 }
1360 1358 ret = ENOTSUP;
1361 1359 goto insert_fail;
1362 1360 }
1363 1361
1364 1362 /*
1365 1363 * In either case, we increment softlockcnt on the 'real' segment.
1366 1364 */
1367 1365 sptd->spt_pcachecnt++;
1368 1366 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1369 1367
1370 1368 /*
1371 1369 * We can now drop the sptd->spt_lock since the ppa[]
1372 1370 * exists and he have incremented pacachecnt.
1373 1371 */
1374 1372 mutex_exit(&sptd->spt_lock);
1375 1373
1376 1374 /*
1377 1375 * Since we cache the entire segment, we want to
1378 1376 * set ppp to point to the first slot that corresponds
1379 1377 * to the requested addr, i.e. page_index.
1380 1378 */
1381 1379 *ppp = &(sptd->spt_ppa[page_index]);
1382 1380 return (0);
1383 1381
1384 1382 insert_fail:
1385 1383 /*
1386 1384 * We will only reach this code if we tried and failed.
1387 1385 *
1388 1386 * And we can drop the lock on the dummy seg, once we've failed
1389 1387 * to set up a new ppa[].
1390 1388 */
1391 1389 mutex_exit(&sptd->spt_lock);
1392 1390
1393 1391 if (pl_built) {
1394 1392 /*
1395 1393 * We created pl and we need to destroy it.
1396 1394 */
1397 1395 pplist = pl;
1398 1396 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1399 1397 while (np) {
1400 1398 page_unlock(*pplist);
1401 1399 np--;
1402 1400 pplist++;
1403 1401 }
1404 1402 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1405 1403 }
1406 1404 if (shmd->shm_softlockcnt <= 0) {
1407 1405 if (AS_ISUNMAPWAIT(seg->s_as)) {
1408 1406 mutex_enter(&seg->s_as->a_contents);
1409 1407 if (AS_ISUNMAPWAIT(seg->s_as)) {
1410 1408 AS_CLRUNMAPWAIT(seg->s_as);
1411 1409 cv_broadcast(&seg->s_as->a_cv);
1412 1410 }
1413 1411 mutex_exit(&seg->s_as->a_contents);
1414 1412 }
1415 1413 }
1416 1414 *ppp = NULL;
1417 1415 return (ret);
1418 1416 }
1419 1417
1420 1418 /*
1421 1419 * purge any cached pages in the I/O page cache
1422 1420 */
1423 1421 static void
1424 1422 segspt_purge(struct seg *seg)
1425 1423 {
1426 1424 seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1427 1425 }
1428 1426
1429 1427 static int
1430 1428 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1431 1429 enum seg_rw rw, int async)
1432 1430 {
1433 1431 struct seg *seg = (struct seg *)ptag;
1434 1432 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1435 1433 struct seg *sptseg;
1436 1434 struct spt_data *sptd;
1437 1435 pgcnt_t npages, i, free_availrmem = 0;
1438 1436 int done = 0;
1439 1437
1440 1438 #ifdef lint
1441 1439 addr = addr;
1442 1440 #endif
1443 1441 sptseg = shmd->shm_sptseg;
1444 1442 sptd = sptseg->s_data;
1445 1443 npages = (len >> PAGESHIFT);
1446 1444 ASSERT(npages);
1447 1445 ASSERT(sptd->spt_pcachecnt != 0);
1448 1446 ASSERT(sptd->spt_ppa == pplist);
1449 1447 ASSERT(npages == btopr(sptd->spt_amp->size));
1450 1448 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1451 1449
1452 1450 /*
1453 1451 * Acquire the lock on the dummy seg and destroy the
1454 1452 * ppa array IF this is the last pcachecnt.
1455 1453 */
1456 1454 mutex_enter(&sptd->spt_lock);
1457 1455 if (--sptd->spt_pcachecnt == 0) {
1458 1456 for (i = 0; i < npages; i++) {
1459 1457 if (pplist[i] == NULL) {
1460 1458 continue;
1461 1459 }
1462 1460 if (rw == S_WRITE) {
1463 1461 hat_setrefmod(pplist[i]);
1464 1462 } else {
1465 1463 hat_setref(pplist[i]);
1466 1464 }
1467 1465 if ((sptd->spt_flags & SHM_PAGEABLE) &&
1468 1466 (sptd->spt_ppa_lckcnt[i] == 0))
1469 1467 free_availrmem++;
1470 1468 page_unlock(pplist[i]);
1471 1469 }
1472 1470 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1473 1471 mutex_enter(&freemem_lock);
1474 1472 availrmem += free_availrmem;
1475 1473 mutex_exit(&freemem_lock);
1476 1474 }
1477 1475 /*
1478 1476 * Since we want to cach/uncache the entire ISM segment,
1479 1477 * we will track the pplist in a segspt specific field
1480 1478 * ppa, that is initialized at the time we add an entry to
1481 1479 * the cache.
1482 1480 */
1483 1481 ASSERT(sptd->spt_pcachecnt == 0);
1484 1482 kmem_free(pplist, sizeof (page_t *) * npages);
1485 1483 sptd->spt_ppa = NULL;
1486 1484 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1487 1485 sptd->spt_gen++;
1488 1486 cv_broadcast(&sptd->spt_cv);
1489 1487 done = 1;
1490 1488 }
1491 1489 mutex_exit(&sptd->spt_lock);
1492 1490
1493 1491 /*
1494 1492 * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1495 1493 * may not hold AS lock (in this case async argument is not 0). This
1496 1494 * means if softlockcnt drops to 0 after the decrement below address
1497 1495 * space may get freed. We can't allow it since after softlock
1498 1496 * derement to 0 we still need to access as structure for possible
1499 1497 * wakeup of unmap waiters. To prevent the disappearance of as we take
1500 1498 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1501 1499 * this mutex as a barrier to make sure this routine completes before
1502 1500 * segment is freed.
1503 1501 *
1504 1502 * The second complication we have to deal with in async case is a
1505 1503 * possibility of missed wake up of unmap wait thread. When we don't
1506 1504 * hold as lock here we may take a_contents lock before unmap wait
1507 1505 * thread that was first to see softlockcnt was still not 0. As a
1508 1506 * result we'll fail to wake up an unmap wait thread. To avoid this
1509 1507 * race we set nounmapwait flag in as structure if we drop softlockcnt
1510 1508 * to 0 if async is not 0. unmapwait thread
1511 1509 * will not block if this flag is set.
1512 1510 */
1513 1511 if (async)
1514 1512 mutex_enter(&shmd->shm_segfree_syncmtx);
1515 1513
1516 1514 /*
1517 1515 * Now decrement softlockcnt.
1518 1516 */
1519 1517 ASSERT(shmd->shm_softlockcnt > 0);
1520 1518 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1521 1519
1522 1520 if (shmd->shm_softlockcnt <= 0) {
1523 1521 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1524 1522 mutex_enter(&seg->s_as->a_contents);
1525 1523 if (async)
1526 1524 AS_SETNOUNMAPWAIT(seg->s_as);
1527 1525 if (AS_ISUNMAPWAIT(seg->s_as)) {
1528 1526 AS_CLRUNMAPWAIT(seg->s_as);
1529 1527 cv_broadcast(&seg->s_as->a_cv);
1530 1528 }
1531 1529 mutex_exit(&seg->s_as->a_contents);
1532 1530 }
1533 1531 }
1534 1532
1535 1533 if (async)
1536 1534 mutex_exit(&shmd->shm_segfree_syncmtx);
1537 1535
1538 1536 return (done);
1539 1537 }
1540 1538
1541 1539 /*
1542 1540 * Do a F_SOFTUNLOCK call over the range requested.
1543 1541 * The range must have already been F_SOFTLOCK'ed.
1544 1542 *
1545 1543 * The calls to acquire and release the anon map lock mutex were
1546 1544 * removed in order to avoid a deadly embrace during a DR
1547 1545 * memory delete operation. (Eg. DR blocks while waiting for a
1548 1546 * exclusive lock on a page that is being used for kaio; the
1549 1547 * thread that will complete the kaio and call segspt_softunlock
1550 1548 * blocks on the anon map lock; another thread holding the anon
1551 1549 * map lock blocks on another page lock via the segspt_shmfault
1552 1550 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1553 1551 *
1554 1552 * The appropriateness of the removal is based upon the following:
1555 1553 * 1. If we are holding a segment's reader lock and the page is held
1556 1554 * shared, then the corresponding element in anonmap which points to
1557 1555 * anon struct cannot change and there is no need to acquire the
1558 1556 * anonymous map lock.
1559 1557 * 2. Threads in segspt_softunlock have a reader lock on the segment
1560 1558 * and already have the shared page lock, so we are guaranteed that
1561 1559 * the anon map slot cannot change and therefore can call anon_get_ptr()
1562 1560 * without grabbing the anonymous map lock.
1563 1561 * 3. Threads that softlock a shared page break copy-on-write, even if
1564 1562 * its a read. Thus cow faults can be ignored with respect to soft
1565 1563 * unlocking, since the breaking of cow means that the anon slot(s) will
1566 1564 * not be shared.
1567 1565 */
1568 1566 static void
1569 1567 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1570 1568 size_t len, enum seg_rw rw)
1571 1569 {
1572 1570 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1573 1571 struct seg *sptseg;
1574 1572 struct spt_data *sptd;
1575 1573 page_t *pp;
1576 1574 caddr_t adr;
1577 1575 struct vnode *vp;
1578 1576 u_offset_t offset;
1579 1577 ulong_t anon_index;
1580 1578 struct anon_map *amp; /* XXX - for locknest */
1581 1579 struct anon *ap = NULL;
1582 1580 pgcnt_t npages;
1583 1581
1584 1582 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1585 1583
1586 1584 sptseg = shmd->shm_sptseg;
1587 1585 sptd = sptseg->s_data;
1588 1586
1589 1587 /*
1590 1588 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1591 1589 * and therefore their pages are SE_SHARED locked
1592 1590 * for the entire life of the segment.
1593 1591 */
1594 1592 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1595 1593 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1596 1594 goto softlock_decrement;
1597 1595 }
1598 1596
1599 1597 /*
1600 1598 * Any thread is free to do a page_find and
1601 1599 * page_unlock() on the pages within this seg.
1602 1600 *
1603 1601 * We are already holding the as->a_lock on the user's
1604 1602 * real segment, but we need to hold the a_lock on the
1605 1603 * underlying dummy as. This is mostly to satisfy the
1606 1604 * underlying HAT layer.
1607 1605 */
1608 1606 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1609 1607 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1610 1608 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1611 1609
1612 1610 amp = sptd->spt_amp;
1613 1611 ASSERT(amp != NULL);
1614 1612 anon_index = seg_page(sptseg, sptseg_addr);
1615 1613
1616 1614 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1617 1615 ap = anon_get_ptr(amp->ahp, anon_index++);
1618 1616 ASSERT(ap != NULL);
1619 1617 swap_xlate(ap, &vp, &offset);
1620 1618
1621 1619 /*
1622 1620 * Use page_find() instead of page_lookup() to
1623 1621 * find the page since we know that it has a
1624 1622 * "shared" lock.
1625 1623 */
1626 1624 pp = page_find(vp, offset);
1627 1625 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1628 1626 if (pp == NULL) {
1629 1627 panic("segspt_softunlock: "
1630 1628 "addr %p, ap %p, vp %p, off %llx",
1631 1629 (void *)adr, (void *)ap, (void *)vp, offset);
1632 1630 /*NOTREACHED*/
1633 1631 }
1634 1632
1635 1633 if (rw == S_WRITE) {
1636 1634 hat_setrefmod(pp);
1637 1635 } else if (rw != S_OTHER) {
1638 1636 hat_setref(pp);
1639 1637 }
1640 1638 page_unlock(pp);
1641 1639 }
1642 1640
1643 1641 softlock_decrement:
1644 1642 npages = btopr(len);
1645 1643 ASSERT(shmd->shm_softlockcnt >= npages);
1646 1644 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1647 1645 if (shmd->shm_softlockcnt == 0) {
1648 1646 /*
1649 1647 * All SOFTLOCKS are gone. Wakeup any waiting
1650 1648 * unmappers so they can try again to unmap.
1651 1649 * Check for waiters first without the mutex
1652 1650 * held so we don't always grab the mutex on
1653 1651 * softunlocks.
1654 1652 */
1655 1653 if (AS_ISUNMAPWAIT(seg->s_as)) {
1656 1654 mutex_enter(&seg->s_as->a_contents);
1657 1655 if (AS_ISUNMAPWAIT(seg->s_as)) {
1658 1656 AS_CLRUNMAPWAIT(seg->s_as);
1659 1657 cv_broadcast(&seg->s_as->a_cv);
1660 1658 }
1661 1659 mutex_exit(&seg->s_as->a_contents);
1662 1660 }
1663 1661 }
1664 1662 }
1665 1663
1666 1664 int
1667 1665 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1668 1666 {
1669 1667 struct shm_data *shmd_arg = (struct shm_data *)argsp;
1670 1668 struct shm_data *shmd;
1671 1669 struct anon_map *shm_amp = shmd_arg->shm_amp;
1672 1670 struct spt_data *sptd;
1673 1671 int error = 0;
1674 1672
1675 1673 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1676 1674
1677 1675 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1678 1676 if (shmd == NULL)
1679 1677 return (ENOMEM);
1680 1678
1681 1679 shmd->shm_sptas = shmd_arg->shm_sptas;
1682 1680 shmd->shm_amp = shm_amp;
1683 1681 shmd->shm_sptseg = shmd_arg->shm_sptseg;
1684 1682
1685 1683 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1686 1684 NULL, 0, seg->s_size);
1687 1685
1688 1686 mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1689 1687
1690 1688 seg->s_data = (void *)shmd;
1691 1689 seg->s_ops = &segspt_shmops;
1692 1690 seg->s_szc = shmd->shm_sptseg->s_szc;
1693 1691 sptd = shmd->shm_sptseg->s_data;
1694 1692
1695 1693 if (sptd->spt_flags & SHM_PAGEABLE) {
1696 1694 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1697 1695 KM_NOSLEEP)) == NULL) {
1698 1696 seg->s_data = (void *)NULL;
1699 1697 kmem_free(shmd, (sizeof (*shmd)));
1700 1698 return (ENOMEM);
1701 1699 }
1702 1700 shmd->shm_lckpgs = 0;
1703 1701 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1704 1702 if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1705 1703 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1706 1704 seg->s_size, seg->s_szc)) != 0) {
1707 1705 kmem_free(shmd->shm_vpage,
1708 1706 btopr(shm_amp->size));
1709 1707 }
1710 1708 }
1711 1709 } else {
1712 1710 error = hat_share(seg->s_as->a_hat, seg->s_base,
1713 1711 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1714 1712 seg->s_size, seg->s_szc);
1715 1713 }
1716 1714 if (error) {
1717 1715 seg->s_szc = 0;
1718 1716 seg->s_data = (void *)NULL;
1719 1717 kmem_free(shmd, (sizeof (*shmd)));
1720 1718 } else {
1721 1719 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1722 1720 shm_amp->refcnt++;
1723 1721 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1724 1722 }
1725 1723 return (error);
1726 1724 }
1727 1725
1728 1726 int
1729 1727 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1730 1728 {
1731 1729 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1732 1730 int reclaim = 1;
1733 1731
1734 1732 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1735 1733 retry:
1736 1734 if (shmd->shm_softlockcnt > 0) {
1737 1735 if (reclaim == 1) {
1738 1736 segspt_purge(seg);
1739 1737 reclaim = 0;
1740 1738 goto retry;
1741 1739 }
1742 1740 return (EAGAIN);
1743 1741 }
1744 1742
1745 1743 if (ssize != seg->s_size) {
1746 1744 #ifdef DEBUG
1747 1745 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1748 1746 ssize, seg->s_size);
1749 1747 #endif
1750 1748 return (EINVAL);
1751 1749 }
1752 1750
1753 1751 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1754 1752 NULL, 0);
1755 1753 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1756 1754
1757 1755 seg_free(seg);
1758 1756
1759 1757 return (0);
1760 1758 }
1761 1759
1762 1760 void
1763 1761 segspt_shmfree(struct seg *seg)
1764 1762 {
1765 1763 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1766 1764 struct anon_map *shm_amp = shmd->shm_amp;
1767 1765
1768 1766 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1769 1767
1770 1768 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1771 1769 MC_UNLOCK, NULL, 0);
1772 1770
1773 1771 /*
1774 1772 * Need to increment refcnt when attaching
1775 1773 * and decrement when detaching because of dup().
1776 1774 */
1777 1775 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1778 1776 shm_amp->refcnt--;
1779 1777 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1780 1778
1781 1779 if (shmd->shm_vpage) { /* only for DISM */
1782 1780 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1783 1781 shmd->shm_vpage = NULL;
1784 1782 }
1785 1783
1786 1784 /*
1787 1785 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1788 1786 * still working with this segment without holding as lock.
1789 1787 */
1790 1788 ASSERT(shmd->shm_softlockcnt == 0);
1791 1789 mutex_enter(&shmd->shm_segfree_syncmtx);
1792 1790 mutex_destroy(&shmd->shm_segfree_syncmtx);
1793 1791
1794 1792 kmem_free(shmd, sizeof (*shmd));
1795 1793 }
1796 1794
1797 1795 /*ARGSUSED*/
1798 1796 int
1799 1797 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1800 1798 {
1801 1799 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1802 1800
1803 1801 /*
1804 1802 * Shared page table is more than shared mapping.
1805 1803 * Individual process sharing page tables can't change prot
1806 1804 * because there is only one set of page tables.
1807 1805 * This will be allowed after private page table is
1808 1806 * supported.
1809 1807 */
1810 1808 /* need to return correct status error? */
1811 1809 return (0);
1812 1810 }
1813 1811
1814 1812
1815 1813 faultcode_t
1816 1814 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1817 1815 size_t len, enum fault_type type, enum seg_rw rw)
1818 1816 {
1819 1817 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1820 1818 struct seg *sptseg = shmd->shm_sptseg;
1821 1819 struct as *curspt = shmd->shm_sptas;
1822 1820 struct spt_data *sptd = sptseg->s_data;
1823 1821 pgcnt_t npages;
1824 1822 size_t size;
1825 1823 caddr_t segspt_addr, shm_addr;
1826 1824 page_t **ppa;
1827 1825 int i;
1828 1826 ulong_t an_idx = 0;
1829 1827 int err = 0;
1830 1828 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1831 1829 size_t pgsz;
1832 1830 pgcnt_t pgcnt;
1833 1831 caddr_t a;
1834 1832 pgcnt_t pidx;
1835 1833
1836 1834 #ifdef lint
1837 1835 hat = hat;
1838 1836 #endif
1839 1837 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1840 1838
1841 1839 /*
1842 1840 * Because of the way spt is implemented
1843 1841 * the realsize of the segment does not have to be
1844 1842 * equal to the segment size itself. The segment size is
1845 1843 * often in multiples of a page size larger than PAGESIZE.
1846 1844 * The realsize is rounded up to the nearest PAGESIZE
1847 1845 * based on what the user requested. This is a bit of
1848 1846 * ungliness that is historical but not easily fixed
1849 1847 * without re-designing the higher levels of ISM.
1850 1848 */
1851 1849 ASSERT(addr >= seg->s_base);
1852 1850 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1853 1851 return (FC_NOMAP);
1854 1852 /*
1855 1853 * For all of the following cases except F_PROT, we need to
1856 1854 * make any necessary adjustments to addr and len
1857 1855 * and get all of the necessary page_t's into an array called ppa[].
1858 1856 *
1859 1857 * The code in shmat() forces base addr and len of ISM segment
1860 1858 * to be aligned to largest page size supported. Therefore,
1861 1859 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1862 1860 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1863 1861 * in large pagesize chunks, or else we will screw up the HAT
1864 1862 * layer by calling hat_memload_array() with differing page sizes
1865 1863 * over a given virtual range.
1866 1864 */
1867 1865 pgsz = page_get_pagesize(sptseg->s_szc);
1868 1866 pgcnt = page_get_pagecnt(sptseg->s_szc);
1869 1867 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1870 1868 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1871 1869 npages = btopr(size);
1872 1870
1873 1871 /*
1874 1872 * Now we need to convert from addr in segshm to addr in segspt.
1875 1873 */
1876 1874 an_idx = seg_page(seg, shm_addr);
1877 1875 segspt_addr = sptseg->s_base + ptob(an_idx);
1878 1876
1879 1877 ASSERT((segspt_addr + ptob(npages)) <=
1880 1878 (sptseg->s_base + sptd->spt_realsize));
1881 1879 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1882 1880
1883 1881 switch (type) {
1884 1882
1885 1883 case F_SOFTLOCK:
1886 1884
1887 1885 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1888 1886 /*
1889 1887 * Fall through to the F_INVAL case to load up the hat layer
1890 1888 * entries with the HAT_LOAD_LOCK flag.
1891 1889 */
1892 1890 /* FALLTHRU */
1893 1891 case F_INVAL:
1894 1892
1895 1893 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1896 1894 return (FC_NOMAP);
1897 1895
1898 1896 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1899 1897
1900 1898 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1901 1899 if (err != 0) {
1902 1900 if (type == F_SOFTLOCK) {
1903 1901 atomic_add_long((ulong_t *)(
1904 1902 &(shmd->shm_softlockcnt)), -npages);
1905 1903 }
1906 1904 goto dism_err;
1907 1905 }
1908 1906 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1909 1907 a = segspt_addr;
1910 1908 pidx = 0;
1911 1909 if (type == F_SOFTLOCK) {
1912 1910
1913 1911 /*
1914 1912 * Load up the translation keeping it
1915 1913 * locked and don't unlock the page.
1916 1914 */
1917 1915 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1918 1916 hat_memload_array(sptseg->s_as->a_hat,
1919 1917 a, pgsz, &ppa[pidx], sptd->spt_prot,
1920 1918 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1921 1919 }
1922 1920 } else {
1923 1921 if (hat == seg->s_as->a_hat) {
1924 1922
1925 1923 /*
1926 1924 * Migrate pages marked for migration
1927 1925 */
1928 1926 if (lgrp_optimizations())
1929 1927 page_migrate(seg, shm_addr, ppa,
1930 1928 npages);
1931 1929
1932 1930 /* CPU HAT */
1933 1931 for (; pidx < npages;
1934 1932 a += pgsz, pidx += pgcnt) {
1935 1933 hat_memload_array(sptseg->s_as->a_hat,
1936 1934 a, pgsz, &ppa[pidx],
1937 1935 sptd->spt_prot,
1938 1936 HAT_LOAD_SHARE);
1939 1937 }
1940 1938 } else {
1941 1939 /* XHAT. Pass real address */
1942 1940 hat_memload_array(hat, shm_addr,
1943 1941 size, ppa, sptd->spt_prot, HAT_LOAD_SHARE);
1944 1942 }
1945 1943
1946 1944 /*
1947 1945 * And now drop the SE_SHARED lock(s).
1948 1946 */
1949 1947 if (dyn_ism_unmap) {
1950 1948 for (i = 0; i < npages; i++) {
1951 1949 page_unlock(ppa[i]);
1952 1950 }
1953 1951 }
1954 1952 }
1955 1953
1956 1954 if (!dyn_ism_unmap) {
1957 1955 if (hat_share(seg->s_as->a_hat, shm_addr,
1958 1956 curspt->a_hat, segspt_addr, ptob(npages),
1959 1957 seg->s_szc) != 0) {
1960 1958 panic("hat_share err in DISM fault");
1961 1959 /* NOTREACHED */
1962 1960 }
1963 1961 if (type == F_INVAL) {
1964 1962 for (i = 0; i < npages; i++) {
1965 1963 page_unlock(ppa[i]);
1966 1964 }
1967 1965 }
1968 1966 }
1969 1967 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1970 1968 dism_err:
1971 1969 kmem_free(ppa, npages * sizeof (page_t *));
1972 1970 return (err);
1973 1971
1974 1972 case F_SOFTUNLOCK:
1975 1973
1976 1974 /*
1977 1975 * This is a bit ugly, we pass in the real seg pointer,
1978 1976 * but the segspt_addr is the virtual address within the
1979 1977 * dummy seg.
1980 1978 */
1981 1979 segspt_softunlock(seg, segspt_addr, size, rw);
1982 1980 return (0);
1983 1981
1984 1982 case F_PROT:
1985 1983
1986 1984 /*
1987 1985 * This takes care of the unusual case where a user
1988 1986 * allocates a stack in shared memory and a register
1989 1987 * window overflow is written to that stack page before
1990 1988 * it is otherwise modified.
1991 1989 *
1992 1990 * We can get away with this because ISM segments are
1993 1991 * always rw. Other than this unusual case, there
1994 1992 * should be no instances of protection violations.
1995 1993 */
1996 1994 return (0);
1997 1995
1998 1996 default:
1999 1997 #ifdef DEBUG
2000 1998 panic("segspt_dismfault default type?");
2001 1999 #else
2002 2000 return (FC_NOMAP);
2003 2001 #endif
2004 2002 }
2005 2003 }
2006 2004
2007 2005
2008 2006 faultcode_t
2009 2007 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
2010 2008 size_t len, enum fault_type type, enum seg_rw rw)
2011 2009 {
2012 2010 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2013 2011 struct seg *sptseg = shmd->shm_sptseg;
2014 2012 struct as *curspt = shmd->shm_sptas;
2015 2013 struct spt_data *sptd = sptseg->s_data;
2016 2014 pgcnt_t npages;
2017 2015 size_t size;
2018 2016 caddr_t sptseg_addr, shm_addr;
2019 2017 page_t *pp, **ppa;
2020 2018 int i;
2021 2019 u_offset_t offset;
2022 2020 ulong_t anon_index = 0;
2023 2021 struct vnode *vp;
2024 2022 struct anon_map *amp; /* XXX - for locknest */
2025 2023 struct anon *ap = NULL;
2026 2024 size_t pgsz;
2027 2025 pgcnt_t pgcnt;
2028 2026 caddr_t a;
2029 2027 pgcnt_t pidx;
2030 2028 size_t sz;
2031 2029
2032 2030 #ifdef lint
2033 2031 hat = hat;
2034 2032 #endif
2035 2033
2036 2034 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2037 2035
2038 2036 if (sptd->spt_flags & SHM_PAGEABLE) {
2039 2037 return (segspt_dismfault(hat, seg, addr, len, type, rw));
2040 2038 }
2041 2039
2042 2040 /*
2043 2041 * Because of the way spt is implemented
2044 2042 * the realsize of the segment does not have to be
2045 2043 * equal to the segment size itself. The segment size is
2046 2044 * often in multiples of a page size larger than PAGESIZE.
2047 2045 * The realsize is rounded up to the nearest PAGESIZE
2048 2046 * based on what the user requested. This is a bit of
2049 2047 * ungliness that is historical but not easily fixed
2050 2048 * without re-designing the higher levels of ISM.
2051 2049 */
2052 2050 ASSERT(addr >= seg->s_base);
2053 2051 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2054 2052 return (FC_NOMAP);
2055 2053 /*
2056 2054 * For all of the following cases except F_PROT, we need to
2057 2055 * make any necessary adjustments to addr and len
2058 2056 * and get all of the necessary page_t's into an array called ppa[].
2059 2057 *
2060 2058 * The code in shmat() forces base addr and len of ISM segment
2061 2059 * to be aligned to largest page size supported. Therefore,
2062 2060 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2063 2061 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2064 2062 * in large pagesize chunks, or else we will screw up the HAT
2065 2063 * layer by calling hat_memload_array() with differing page sizes
2066 2064 * over a given virtual range.
2067 2065 */
2068 2066 pgsz = page_get_pagesize(sptseg->s_szc);
2069 2067 pgcnt = page_get_pagecnt(sptseg->s_szc);
2070 2068 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2071 2069 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2072 2070 npages = btopr(size);
2073 2071
2074 2072 /*
2075 2073 * Now we need to convert from addr in segshm to addr in segspt.
2076 2074 */
2077 2075 anon_index = seg_page(seg, shm_addr);
2078 2076 sptseg_addr = sptseg->s_base + ptob(anon_index);
2079 2077
2080 2078 /*
2081 2079 * And now we may have to adjust npages downward if we have
2082 2080 * exceeded the realsize of the segment or initial anon
2083 2081 * allocations.
2084 2082 */
2085 2083 if ((sptseg_addr + ptob(npages)) >
2086 2084 (sptseg->s_base + sptd->spt_realsize))
2087 2085 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2088 2086
2089 2087 npages = btopr(size);
2090 2088
2091 2089 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2092 2090 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2093 2091
2094 2092 switch (type) {
2095 2093
2096 2094 case F_SOFTLOCK:
2097 2095
2098 2096 /*
2099 2097 * availrmem is decremented once during anon_swap_adjust()
2100 2098 * and is incremented during the anon_unresv(), which is
2101 2099 * called from shm_rm_amp() when the segment is destroyed.
2102 2100 */
2103 2101 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2104 2102 /*
2105 2103 * Some platforms assume that ISM pages are SE_SHARED
2106 2104 * locked for the entire life of the segment.
2107 2105 */
2108 2106 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2109 2107 return (0);
2110 2108 /*
2111 2109 * Fall through to the F_INVAL case to load up the hat layer
2112 2110 * entries with the HAT_LOAD_LOCK flag.
2113 2111 */
2114 2112
2115 2113 /* FALLTHRU */
2116 2114 case F_INVAL:
2117 2115
2118 2116 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2119 2117 return (FC_NOMAP);
2120 2118
2121 2119 /*
2122 2120 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2123 2121 * may still rely on this call to hat_share(). That
2124 2122 * would imply that those hat's can fault on a
2125 2123 * HAT_LOAD_LOCK translation, which would seem
2126 2124 * contradictory.
2127 2125 */
2128 2126 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2129 2127 if (hat_share(seg->s_as->a_hat, seg->s_base,
2130 2128 curspt->a_hat, sptseg->s_base,
2131 2129 sptseg->s_size, sptseg->s_szc) != 0) {
2132 2130 panic("hat_share error in ISM fault");
2133 2131 /*NOTREACHED*/
2134 2132 }
2135 2133 return (0);
2136 2134 }
2137 2135 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2138 2136
2139 2137 /*
2140 2138 * I see no need to lock the real seg,
2141 2139 * here, because all of our work will be on the underlying
2142 2140 * dummy seg.
2143 2141 *
2144 2142 * sptseg_addr and npages now account for large pages.
2145 2143 */
2146 2144 amp = sptd->spt_amp;
2147 2145 ASSERT(amp != NULL);
2148 2146 anon_index = seg_page(sptseg, sptseg_addr);
2149 2147
2150 2148 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2151 2149 for (i = 0; i < npages; i++) {
2152 2150 ap = anon_get_ptr(amp->ahp, anon_index++);
2153 2151 ASSERT(ap != NULL);
2154 2152 swap_xlate(ap, &vp, &offset);
2155 2153 pp = page_lookup(vp, offset, SE_SHARED);
2156 2154 ASSERT(pp != NULL);
2157 2155 ppa[i] = pp;
2158 2156 }
2159 2157 ANON_LOCK_EXIT(&->a_rwlock);
2160 2158 ASSERT(i == npages);
2161 2159
2162 2160 /*
2163 2161 * We are already holding the as->a_lock on the user's
2164 2162 * real segment, but we need to hold the a_lock on the
2165 2163 * underlying dummy as. This is mostly to satisfy the
2166 2164 * underlying HAT layer.
2167 2165 */
2168 2166 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
2169 2167 a = sptseg_addr;
2170 2168 pidx = 0;
2171 2169 if (type == F_SOFTLOCK) {
2172 2170 /*
2173 2171 * Load up the translation keeping it
2174 2172 * locked and don't unlock the page.
2175 2173 */
2176 2174 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2177 2175 sz = MIN(pgsz, ptob(npages - pidx));
2178 2176 hat_memload_array(sptseg->s_as->a_hat, a,
2179 2177 sz, &ppa[pidx], sptd->spt_prot,
2180 2178 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2181 2179 }
2182 2180 } else {
2183 2181 if (hat == seg->s_as->a_hat) {
2184 2182
2185 2183 /*
2186 2184 * Migrate pages marked for migration.
2187 2185 */
2188 2186 if (lgrp_optimizations())
2189 2187 page_migrate(seg, shm_addr, ppa,
2190 2188 npages);
2191 2189
2192 2190 /* CPU HAT */
2193 2191 for (; pidx < npages;
2194 2192 a += pgsz, pidx += pgcnt) {
2195 2193 sz = MIN(pgsz, ptob(npages - pidx));
2196 2194 hat_memload_array(sptseg->s_as->a_hat,
2197 2195 a, sz, &ppa[pidx],
2198 2196 sptd->spt_prot, HAT_LOAD_SHARE);
2199 2197 }
2200 2198 } else {
2201 2199 /* XHAT. Pass real address */
2202 2200 hat_memload_array(hat, shm_addr,
2203 2201 ptob(npages), ppa, sptd->spt_prot,
2204 2202 HAT_LOAD_SHARE);
2205 2203 }
2206 2204
2207 2205 /*
2208 2206 * And now drop the SE_SHARED lock(s).
2209 2207 */
2210 2208 for (i = 0; i < npages; i++)
2211 2209 page_unlock(ppa[i]);
2212 2210 }
2213 2211 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
2214 2212
2215 2213 kmem_free(ppa, sizeof (page_t *) * npages);
2216 2214 return (0);
2217 2215 case F_SOFTUNLOCK:
2218 2216
2219 2217 /*
2220 2218 * This is a bit ugly, we pass in the real seg pointer,
2221 2219 * but the sptseg_addr is the virtual address within the
2222 2220 * dummy seg.
2223 2221 */
2224 2222 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2225 2223 return (0);
2226 2224
2227 2225 case F_PROT:
2228 2226
2229 2227 /*
2230 2228 * This takes care of the unusual case where a user
2231 2229 * allocates a stack in shared memory and a register
2232 2230 * window overflow is written to that stack page before
2233 2231 * it is otherwise modified.
2234 2232 *
2235 2233 * We can get away with this because ISM segments are
2236 2234 * always rw. Other than this unusual case, there
2237 2235 * should be no instances of protection violations.
2238 2236 */
2239 2237 return (0);
2240 2238
2241 2239 default:
2242 2240 #ifdef DEBUG
2243 2241 cmn_err(CE_WARN, "segspt_shmfault default type?");
2244 2242 #endif
2245 2243 return (FC_NOMAP);
2246 2244 }
2247 2245 }
2248 2246
2249 2247 /*ARGSUSED*/
2250 2248 static faultcode_t
2251 2249 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2252 2250 {
2253 2251 return (0);
2254 2252 }
2255 2253
2256 2254 /*ARGSUSED*/
2257 2255 static int
2258 2256 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2259 2257 {
2260 2258 return (0);
2261 2259 }
2262 2260
2263 2261 /*ARGSUSED*/
2264 2262 static size_t
2265 2263 segspt_shmswapout(struct seg *seg)
2266 2264 {
2267 2265 return (0);
2268 2266 }
2269 2267
2270 2268 /*
2271 2269 * duplicate the shared page tables
2272 2270 */
2273 2271 int
2274 2272 segspt_shmdup(struct seg *seg, struct seg *newseg)
2275 2273 {
2276 2274 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2277 2275 struct anon_map *amp = shmd->shm_amp;
2278 2276 struct shm_data *shmd_new;
2279 2277 struct seg *spt_seg = shmd->shm_sptseg;
2280 2278 struct spt_data *sptd = spt_seg->s_data;
2281 2279 int error = 0;
2282 2280
2283 2281 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2284 2282
2285 2283 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2286 2284 newseg->s_data = (void *)shmd_new;
2287 2285 shmd_new->shm_sptas = shmd->shm_sptas;
2288 2286 shmd_new->shm_amp = amp;
2289 2287 shmd_new->shm_sptseg = shmd->shm_sptseg;
2290 2288 newseg->s_ops = &segspt_shmops;
2291 2289 newseg->s_szc = seg->s_szc;
2292 2290 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2293 2291
2294 2292 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2295 2293 amp->refcnt++;
2296 2294 ANON_LOCK_EXIT(&->a_rwlock);
2297 2295
2298 2296 if (sptd->spt_flags & SHM_PAGEABLE) {
2299 2297 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2300 2298 shmd_new->shm_lckpgs = 0;
2301 2299 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2302 2300 if ((error = hat_share(newseg->s_as->a_hat,
2303 2301 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2304 2302 seg->s_size, seg->s_szc)) != 0) {
2305 2303 kmem_free(shmd_new->shm_vpage,
2306 2304 btopr(amp->size));
2307 2305 }
2308 2306 }
2309 2307 return (error);
2310 2308 } else {
2311 2309 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2312 2310 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2313 2311 seg->s_szc));
2314 2312
2315 2313 }
2316 2314 }
2317 2315
2318 2316 /*ARGSUSED*/
2319 2317 int
2320 2318 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2321 2319 {
2322 2320 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2323 2321 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2324 2322
2325 2323 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2326 2324
2327 2325 /*
2328 2326 * ISM segment is always rw.
2329 2327 */
2330 2328 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2331 2329 }
2332 2330
2333 2331 /*
2334 2332 * Return an array of locked large pages, for empty slots allocate
2335 2333 * private zero-filled anon pages.
2336 2334 */
2337 2335 static int
2338 2336 spt_anon_getpages(
2339 2337 struct seg *sptseg,
2340 2338 caddr_t sptaddr,
2341 2339 size_t len,
2342 2340 page_t *ppa[])
2343 2341 {
2344 2342 struct spt_data *sptd = sptseg->s_data;
2345 2343 struct anon_map *amp = sptd->spt_amp;
2346 2344 enum seg_rw rw = sptd->spt_prot;
2347 2345 uint_t szc = sptseg->s_szc;
2348 2346 size_t pg_sz, share_sz = page_get_pagesize(szc);
2349 2347 pgcnt_t lp_npgs;
2350 2348 caddr_t lp_addr, e_sptaddr;
2351 2349 uint_t vpprot, ppa_szc = 0;
2352 2350 struct vpage *vpage = NULL;
2353 2351 ulong_t j, ppa_idx;
2354 2352 int err, ierr = 0;
2355 2353 pgcnt_t an_idx;
2356 2354 anon_sync_obj_t cookie;
2357 2355 int anon_locked = 0;
2358 2356 pgcnt_t amp_pgs;
2359 2357
2360 2358
2361 2359 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2362 2360 ASSERT(len != 0);
2363 2361
2364 2362 pg_sz = share_sz;
2365 2363 lp_npgs = btop(pg_sz);
2366 2364 lp_addr = sptaddr;
2367 2365 e_sptaddr = sptaddr + len;
2368 2366 an_idx = seg_page(sptseg, sptaddr);
2369 2367 ppa_idx = 0;
2370 2368
2371 2369 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2372 2370
2373 2371 amp_pgs = page_get_pagecnt(amp->a_szc);
2374 2372
2375 2373 /*CONSTCOND*/
2376 2374 while (1) {
2377 2375 for (; lp_addr < e_sptaddr;
2378 2376 an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2379 2377
2380 2378 /*
2381 2379 * If we're currently locked, and we get to a new
2382 2380 * page, unlock our current anon chunk.
2383 2381 */
2384 2382 if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2385 2383 anon_array_exit(&cookie);
2386 2384 anon_locked = 0;
2387 2385 }
2388 2386 if (!anon_locked) {
2389 2387 anon_array_enter(amp, an_idx, &cookie);
2390 2388 anon_locked = 1;
2391 2389 }
2392 2390 ppa_szc = (uint_t)-1;
2393 2391 ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2394 2392 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2395 2393 &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2396 2394
2397 2395 if (ierr != 0) {
2398 2396 if (ierr > 0) {
2399 2397 err = FC_MAKE_ERR(ierr);
2400 2398 goto lpgs_err;
2401 2399 }
2402 2400 break;
2403 2401 }
2404 2402 }
2405 2403 if (lp_addr == e_sptaddr) {
2406 2404 break;
2407 2405 }
2408 2406 ASSERT(lp_addr < e_sptaddr);
2409 2407
2410 2408 /*
2411 2409 * ierr == -1 means we failed to allocate a large page.
2412 2410 * so do a size down operation.
2413 2411 *
2414 2412 * ierr == -2 means some other process that privately shares
2415 2413 * pages with this process has allocated a larger page and we
2416 2414 * need to retry with larger pages. So do a size up
2417 2415 * operation. This relies on the fact that large pages are
2418 2416 * never partially shared i.e. if we share any constituent
2419 2417 * page of a large page with another process we must share the
2420 2418 * entire large page. Note this cannot happen for SOFTLOCK
2421 2419 * case, unless current address (lpaddr) is at the beginning
2422 2420 * of the next page size boundary because the other process
2423 2421 * couldn't have relocated locked pages.
2424 2422 */
2425 2423 ASSERT(ierr == -1 || ierr == -2);
2426 2424 if (segvn_anypgsz) {
2427 2425 ASSERT(ierr == -2 || szc != 0);
2428 2426 ASSERT(ierr == -1 || szc < sptseg->s_szc);
2429 2427 szc = (ierr == -1) ? szc - 1 : szc + 1;
2430 2428 } else {
2431 2429 /*
2432 2430 * For faults and segvn_anypgsz == 0
2433 2431 * we need to be careful not to loop forever
2434 2432 * if existing page is found with szc other
2435 2433 * than 0 or seg->s_szc. This could be due
2436 2434 * to page relocations on behalf of DR or
2437 2435 * more likely large page creation. For this
2438 2436 * case simply re-size to existing page's szc
2439 2437 * if returned by anon_map_getpages().
2440 2438 */
2441 2439 if (ppa_szc == (uint_t)-1) {
2442 2440 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2443 2441 } else {
2444 2442 ASSERT(ppa_szc <= sptseg->s_szc);
2445 2443 ASSERT(ierr == -2 || ppa_szc < szc);
2446 2444 ASSERT(ierr == -1 || ppa_szc > szc);
2447 2445 szc = ppa_szc;
2448 2446 }
2449 2447 }
2450 2448 pg_sz = page_get_pagesize(szc);
2451 2449 lp_npgs = btop(pg_sz);
2452 2450 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2453 2451 }
2454 2452 if (anon_locked) {
2455 2453 anon_array_exit(&cookie);
2456 2454 }
2457 2455 ANON_LOCK_EXIT(&->a_rwlock);
2458 2456 return (0);
2459 2457
2460 2458 lpgs_err:
2461 2459 if (anon_locked) {
2462 2460 anon_array_exit(&cookie);
2463 2461 }
2464 2462 ANON_LOCK_EXIT(&->a_rwlock);
2465 2463 for (j = 0; j < ppa_idx; j++)
2466 2464 page_unlock(ppa[j]);
2467 2465 return (err);
2468 2466 }
2469 2467
2470 2468 /*
2471 2469 * count the number of bytes in a set of spt pages that are currently not
2472 2470 * locked
2473 2471 */
2474 2472 static rctl_qty_t
2475 2473 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2476 2474 {
2477 2475 ulong_t i;
2478 2476 rctl_qty_t unlocked = 0;
2479 2477
2480 2478 for (i = 0; i < npages; i++) {
2481 2479 if (ppa[i]->p_lckcnt == 0)
2482 2480 unlocked += PAGESIZE;
2483 2481 }
2484 2482 return (unlocked);
2485 2483 }
2486 2484
2487 2485 extern u_longlong_t randtick(void);
2488 2486 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2489 2487 #define NLCK (NCPU_P2)
2490 2488 /* Random number with a range [0, n-1], n must be power of two */
2491 2489 #define RAND_P2(n) \
2492 2490 ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2493 2491
2494 2492 int
2495 2493 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2496 2494 page_t **ppa, ulong_t *lockmap, size_t pos,
2497 2495 rctl_qty_t *locked)
2498 2496 {
2499 2497 struct shm_data *shmd = seg->s_data;
2500 2498 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2501 2499 ulong_t i;
2502 2500 int kernel;
2503 2501 pgcnt_t nlck = 0;
2504 2502 int rv = 0;
2505 2503 int use_reserved = 1;
2506 2504
2507 2505 /* return the number of bytes actually locked */
2508 2506 *locked = 0;
2509 2507
2510 2508 /*
2511 2509 * To avoid contention on freemem_lock, availrmem and pages_locked
2512 2510 * global counters are updated only every nlck locked pages instead of
2513 2511 * every time. Reserve nlck locks up front and deduct from this
2514 2512 * reservation for each page that requires a lock. When the reservation
2515 2513 * is consumed, reserve again. nlck is randomized, so the competing
2516 2514 * threads do not fall into a cyclic lock contention pattern. When
2517 2515 * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2518 2516 * is used to lock pages.
2519 2517 */
2520 2518 for (i = 0; i < npages; anon_index++, pos++, i++) {
2521 2519 if (nlck == 0 && use_reserved == 1) {
2522 2520 nlck = NLCK + RAND_P2(NLCK);
2523 2521 /* if fewer loops left, decrease nlck */
2524 2522 nlck = MIN(nlck, npages - i);
2525 2523 /*
2526 2524 * Reserve nlck locks up front and deduct from this
2527 2525 * reservation for each page that requires a lock. When
2528 2526 * the reservation is consumed, reserve again.
2529 2527 */
2530 2528 mutex_enter(&freemem_lock);
2531 2529 if ((availrmem - nlck) < pages_pp_maximum) {
2532 2530 /* Do not do advance memory reserves */
2533 2531 use_reserved = 0;
2534 2532 } else {
2535 2533 availrmem -= nlck;
2536 2534 pages_locked += nlck;
2537 2535 }
2538 2536 mutex_exit(&freemem_lock);
2539 2537 }
2540 2538 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2541 2539 if (sptd->spt_ppa_lckcnt[anon_index] <
2542 2540 (ushort_t)DISM_LOCK_MAX) {
2543 2541 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2544 2542 (ushort_t)DISM_LOCK_MAX) {
2545 2543 cmn_err(CE_WARN,
2546 2544 "DISM page lock limit "
2547 2545 "reached on DISM offset 0x%lx\n",
2548 2546 anon_index << PAGESHIFT);
2549 2547 }
2550 2548 kernel = (sptd->spt_ppa &&
2551 2549 sptd->spt_ppa[anon_index]);
2552 2550 if (!page_pp_lock(ppa[i], 0, kernel ||
2553 2551 use_reserved)) {
2554 2552 sptd->spt_ppa_lckcnt[anon_index]--;
2555 2553 rv = EAGAIN;
2556 2554 break;
2557 2555 }
2558 2556 /* if this is a newly locked page, count it */
2559 2557 if (ppa[i]->p_lckcnt == 1) {
2560 2558 if (kernel == 0 && use_reserved == 1)
2561 2559 nlck--;
2562 2560 *locked += PAGESIZE;
2563 2561 }
2564 2562 shmd->shm_lckpgs++;
2565 2563 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2566 2564 if (lockmap != NULL)
2567 2565 BT_SET(lockmap, pos);
2568 2566 }
2569 2567 }
2570 2568 }
2571 2569 /* Return unused lock reservation */
2572 2570 if (nlck != 0 && use_reserved == 1) {
2573 2571 mutex_enter(&freemem_lock);
2574 2572 availrmem += nlck;
2575 2573 pages_locked -= nlck;
2576 2574 mutex_exit(&freemem_lock);
2577 2575 }
2578 2576
2579 2577 return (rv);
2580 2578 }
2581 2579
2582 2580 int
2583 2581 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2584 2582 rctl_qty_t *unlocked)
2585 2583 {
2586 2584 struct shm_data *shmd = seg->s_data;
2587 2585 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2588 2586 struct anon_map *amp = sptd->spt_amp;
2589 2587 struct anon *ap;
2590 2588 struct vnode *vp;
2591 2589 u_offset_t off;
2592 2590 struct page *pp;
2593 2591 int kernel;
2594 2592 anon_sync_obj_t cookie;
2595 2593 ulong_t i;
2596 2594 pgcnt_t nlck = 0;
2597 2595 pgcnt_t nlck_limit = NLCK;
2598 2596
2599 2597 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2600 2598 for (i = 0; i < npages; i++, anon_index++) {
2601 2599 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2602 2600 anon_array_enter(amp, anon_index, &cookie);
2603 2601 ap = anon_get_ptr(amp->ahp, anon_index);
2604 2602 ASSERT(ap);
2605 2603
2606 2604 swap_xlate(ap, &vp, &off);
2607 2605 anon_array_exit(&cookie);
2608 2606 pp = page_lookup(vp, off, SE_SHARED);
2609 2607 ASSERT(pp);
2610 2608 /*
2611 2609 * availrmem is decremented only for pages which are not
2612 2610 * in seg pcache, for pages in seg pcache availrmem was
2613 2611 * decremented in _dismpagelock()
2614 2612 */
2615 2613 kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2616 2614 ASSERT(pp->p_lckcnt > 0);
2617 2615
2618 2616 /*
2619 2617 * lock page but do not change availrmem, we do it
2620 2618 * ourselves every nlck loops.
2621 2619 */
2622 2620 page_pp_unlock(pp, 0, 1);
2623 2621 if (pp->p_lckcnt == 0) {
2624 2622 if (kernel == 0)
2625 2623 nlck++;
2626 2624 *unlocked += PAGESIZE;
2627 2625 }
2628 2626 page_unlock(pp);
2629 2627 shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2630 2628 sptd->spt_ppa_lckcnt[anon_index]--;
2631 2629 shmd->shm_lckpgs--;
2632 2630 }
2633 2631
2634 2632 /*
2635 2633 * To reduce freemem_lock contention, do not update availrmem
2636 2634 * until at least NLCK pages have been unlocked.
2637 2635 * 1. No need to update if nlck is zero
2638 2636 * 2. Always update if the last iteration
2639 2637 */
2640 2638 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2641 2639 mutex_enter(&freemem_lock);
2642 2640 availrmem += nlck;
2643 2641 pages_locked -= nlck;
2644 2642 mutex_exit(&freemem_lock);
2645 2643 nlck = 0;
2646 2644 nlck_limit = NLCK + RAND_P2(NLCK);
2647 2645 }
2648 2646 }
2649 2647 ANON_LOCK_EXIT(&->a_rwlock);
2650 2648
2651 2649 return (0);
2652 2650 }
2653 2651
2654 2652 /*ARGSUSED*/
2655 2653 static int
2656 2654 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2657 2655 int attr, int op, ulong_t *lockmap, size_t pos)
2658 2656 {
2659 2657 struct shm_data *shmd = seg->s_data;
2660 2658 struct seg *sptseg = shmd->shm_sptseg;
2661 2659 struct spt_data *sptd = sptseg->s_data;
2662 2660 struct kshmid *sp = sptd->spt_amp->a_sp;
2663 2661 pgcnt_t npages, a_npages;
2664 2662 page_t **ppa;
2665 2663 pgcnt_t an_idx, a_an_idx, ppa_idx;
2666 2664 caddr_t spt_addr, a_addr; /* spt and aligned address */
2667 2665 size_t a_len; /* aligned len */
2668 2666 size_t share_sz;
2669 2667 ulong_t i;
2670 2668 int sts = 0;
2671 2669 rctl_qty_t unlocked = 0;
2672 2670 rctl_qty_t locked = 0;
2673 2671 struct proc *p = curproc;
2674 2672 kproject_t *proj;
2675 2673
2676 2674 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2677 2675 ASSERT(sp != NULL);
2678 2676
2679 2677 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2680 2678 return (0);
2681 2679 }
2682 2680
2683 2681 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2684 2682 an_idx = seg_page(seg, addr);
2685 2683 npages = btopr(len);
2686 2684
2687 2685 if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2688 2686 return (ENOMEM);
2689 2687 }
2690 2688
2691 2689 /*
2692 2690 * A shm's project never changes, so no lock needed.
2693 2691 * The shm has a hold on the project, so it will not go away.
2694 2692 * Since we have a mapping to shm within this zone, we know
2695 2693 * that the zone will not go away.
2696 2694 */
2697 2695 proj = sp->shm_perm.ipc_proj;
2698 2696
2699 2697 if (op == MC_LOCK) {
2700 2698
2701 2699 /*
2702 2700 * Need to align addr and size request if they are not
2703 2701 * aligned so we can always allocate large page(s) however
2704 2702 * we only lock what was requested in initial request.
2705 2703 */
2706 2704 share_sz = page_get_pagesize(sptseg->s_szc);
2707 2705 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2708 2706 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2709 2707 share_sz);
2710 2708 a_npages = btop(a_len);
2711 2709 a_an_idx = seg_page(seg, a_addr);
2712 2710 spt_addr = sptseg->s_base + ptob(a_an_idx);
2713 2711 ppa_idx = an_idx - a_an_idx;
2714 2712
2715 2713 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2716 2714 KM_NOSLEEP)) == NULL) {
2717 2715 return (ENOMEM);
2718 2716 }
2719 2717
2720 2718 /*
2721 2719 * Don't cache any new pages for IO and
2722 2720 * flush any cached pages.
2723 2721 */
2724 2722 mutex_enter(&sptd->spt_lock);
2725 2723 if (sptd->spt_ppa != NULL)
2726 2724 sptd->spt_flags |= DISM_PPA_CHANGED;
2727 2725
2728 2726 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2729 2727 if (sts != 0) {
2730 2728 mutex_exit(&sptd->spt_lock);
2731 2729 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2732 2730 return (sts);
2733 2731 }
2734 2732
2735 2733 mutex_enter(&sp->shm_mlock);
2736 2734 /* enforce locked memory rctl */
2737 2735 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2738 2736
2739 2737 mutex_enter(&p->p_lock);
2740 2738 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2741 2739 mutex_exit(&p->p_lock);
2742 2740 sts = EAGAIN;
2743 2741 } else {
2744 2742 mutex_exit(&p->p_lock);
2745 2743 sts = spt_lockpages(seg, an_idx, npages,
2746 2744 &ppa[ppa_idx], lockmap, pos, &locked);
2747 2745
2748 2746 /*
2749 2747 * correct locked count if not all pages could be
2750 2748 * locked
2751 2749 */
2752 2750 if ((unlocked - locked) > 0) {
2753 2751 rctl_decr_locked_mem(NULL, proj,
2754 2752 (unlocked - locked), 0);
2755 2753 }
2756 2754 }
2757 2755 /*
2758 2756 * unlock pages
2759 2757 */
2760 2758 for (i = 0; i < a_npages; i++)
2761 2759 page_unlock(ppa[i]);
2762 2760 if (sptd->spt_ppa != NULL)
2763 2761 sptd->spt_flags |= DISM_PPA_CHANGED;
2764 2762 mutex_exit(&sp->shm_mlock);
2765 2763 mutex_exit(&sptd->spt_lock);
2766 2764
2767 2765 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2768 2766
2769 2767 } else if (op == MC_UNLOCK) { /* unlock */
2770 2768 page_t **ppa;
2771 2769
2772 2770 mutex_enter(&sptd->spt_lock);
2773 2771 if (shmd->shm_lckpgs == 0) {
2774 2772 mutex_exit(&sptd->spt_lock);
2775 2773 return (0);
2776 2774 }
2777 2775 /*
2778 2776 * Don't cache new IO pages.
2779 2777 */
2780 2778 if (sptd->spt_ppa != NULL)
2781 2779 sptd->spt_flags |= DISM_PPA_CHANGED;
2782 2780
2783 2781 mutex_enter(&sp->shm_mlock);
2784 2782 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2785 2783 if ((ppa = sptd->spt_ppa) != NULL)
2786 2784 sptd->spt_flags |= DISM_PPA_CHANGED;
2787 2785 mutex_exit(&sptd->spt_lock);
2788 2786
2789 2787 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2790 2788 mutex_exit(&sp->shm_mlock);
2791 2789
2792 2790 if (ppa != NULL)
2793 2791 seg_ppurge_wiredpp(ppa);
2794 2792 }
2795 2793 return (sts);
2796 2794 }
2797 2795
2798 2796 /*ARGSUSED*/
2799 2797 int
2800 2798 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2801 2799 {
2802 2800 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2803 2801 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2804 2802 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2805 2803
2806 2804 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2807 2805
2808 2806 /*
2809 2807 * ISM segment is always rw.
2810 2808 */
2811 2809 while (--pgno >= 0)
2812 2810 *protv++ = sptd->spt_prot;
2813 2811 return (0);
2814 2812 }
2815 2813
2816 2814 /*ARGSUSED*/
2817 2815 u_offset_t
2818 2816 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2819 2817 {
2820 2818 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2821 2819
2822 2820 /* Offset does not matter in ISM memory */
2823 2821
2824 2822 return ((u_offset_t)0);
2825 2823 }
2826 2824
2827 2825 /* ARGSUSED */
2828 2826 int
2829 2827 segspt_shmgettype(struct seg *seg, caddr_t addr)
2830 2828 {
2831 2829 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2832 2830 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2833 2831
2834 2832 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2835 2833
2836 2834 /*
2837 2835 * The shared memory mapping is always MAP_SHARED, SWAP is only
2838 2836 * reserved for DISM
2839 2837 */
2840 2838 return (MAP_SHARED |
2841 2839 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2842 2840 }
2843 2841
2844 2842 /*ARGSUSED*/
2845 2843 int
2846 2844 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2847 2845 {
2848 2846 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2849 2847 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2850 2848
2851 2849 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2852 2850
2853 2851 *vpp = sptd->spt_vp;
2854 2852 return (0);
2855 2853 }
2856 2854
2857 2855 /*
2858 2856 * We need to wait for pending IO to complete to a DISM segment in order for
2859 2857 * pages to get kicked out of the seg_pcache. 120 seconds should be more
2860 2858 * than enough time to wait.
2861 2859 */
2862 2860 static clock_t spt_pcache_wait = 120;
2863 2861
2864 2862 /*ARGSUSED*/
2865 2863 static int
2866 2864 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2867 2865 {
2868 2866 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2869 2867 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2870 2868 struct anon_map *amp;
2871 2869 pgcnt_t pg_idx;
2872 2870 ushort_t gen;
2873 2871 clock_t end_lbolt;
2874 2872 int writer;
2875 2873 page_t **ppa;
2876 2874
2877 2875 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2878 2876
2879 2877 if (behav == MADV_FREE) {
2880 2878 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2881 2879 return (0);
2882 2880
2883 2881 amp = sptd->spt_amp;
2884 2882 pg_idx = seg_page(seg, addr);
2885 2883
2886 2884 mutex_enter(&sptd->spt_lock);
2887 2885 if ((ppa = sptd->spt_ppa) == NULL) {
2888 2886 mutex_exit(&sptd->spt_lock);
2889 2887 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2890 2888 anon_disclaim(amp, pg_idx, len);
2891 2889 ANON_LOCK_EXIT(&->a_rwlock);
2892 2890 return (0);
2893 2891 }
2894 2892
2895 2893 sptd->spt_flags |= DISM_PPA_CHANGED;
2896 2894 gen = sptd->spt_gen;
2897 2895
2898 2896 mutex_exit(&sptd->spt_lock);
2899 2897
2900 2898 /*
2901 2899 * Purge all DISM cached pages
2902 2900 */
2903 2901 seg_ppurge_wiredpp(ppa);
2904 2902
2905 2903 /*
2906 2904 * Drop the AS_LOCK so that other threads can grab it
2907 2905 * in the as_pageunlock path and hopefully get the segment
2908 2906 * kicked out of the seg_pcache. We bump the shm_softlockcnt
2909 2907 * to keep this segment resident.
2910 2908 */
2911 2909 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2912 2910 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2913 2911 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2914 2912
2915 2913 mutex_enter(&sptd->spt_lock);
2916 2914
2917 2915 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2918 2916
2919 2917 /*
2920 2918 * Try to wait for pages to get kicked out of the seg_pcache.
2921 2919 */
2922 2920 while (sptd->spt_gen == gen &&
2923 2921 (sptd->spt_flags & DISM_PPA_CHANGED) &&
2924 2922 ddi_get_lbolt() < end_lbolt) {
2925 2923 if (!cv_timedwait_sig(&sptd->spt_cv,
2926 2924 &sptd->spt_lock, end_lbolt)) {
2927 2925 break;
2928 2926 }
2929 2927 }
2930 2928
2931 2929 mutex_exit(&sptd->spt_lock);
2932 2930
2933 2931 /* Regrab the AS_LOCK and release our hold on the segment */
2934 2932 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2935 2933 writer ? RW_WRITER : RW_READER);
2936 2934 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2937 2935 if (shmd->shm_softlockcnt <= 0) {
2938 2936 if (AS_ISUNMAPWAIT(seg->s_as)) {
2939 2937 mutex_enter(&seg->s_as->a_contents);
2940 2938 if (AS_ISUNMAPWAIT(seg->s_as)) {
2941 2939 AS_CLRUNMAPWAIT(seg->s_as);
2942 2940 cv_broadcast(&seg->s_as->a_cv);
2943 2941 }
2944 2942 mutex_exit(&seg->s_as->a_contents);
2945 2943 }
2946 2944 }
2947 2945
2948 2946 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2949 2947 anon_disclaim(amp, pg_idx, len);
2950 2948 ANON_LOCK_EXIT(&->a_rwlock);
2951 2949 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2952 2950 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2953 2951 int already_set;
2954 2952 ulong_t anon_index;
2955 2953 lgrp_mem_policy_t policy;
2956 2954 caddr_t shm_addr;
2957 2955 size_t share_size;
2958 2956 size_t size;
2959 2957 struct seg *sptseg = shmd->shm_sptseg;
2960 2958 caddr_t sptseg_addr;
2961 2959
2962 2960 /*
2963 2961 * Align address and length to page size of underlying segment
2964 2962 */
2965 2963 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2966 2964 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2967 2965 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2968 2966 share_size);
2969 2967
2970 2968 amp = shmd->shm_amp;
2971 2969 anon_index = seg_page(seg, shm_addr);
2972 2970
2973 2971 /*
2974 2972 * And now we may have to adjust size downward if we have
2975 2973 * exceeded the realsize of the segment or initial anon
2976 2974 * allocations.
2977 2975 */
2978 2976 sptseg_addr = sptseg->s_base + ptob(anon_index);
2979 2977 if ((sptseg_addr + size) >
2980 2978 (sptseg->s_base + sptd->spt_realsize))
2981 2979 size = (sptseg->s_base + sptd->spt_realsize) -
2982 2980 sptseg_addr;
2983 2981
2984 2982 /*
2985 2983 * Set memory allocation policy for this segment
2986 2984 */
2987 2985 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2988 2986 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2989 2987 NULL, 0, len);
2990 2988
2991 2989 /*
2992 2990 * If random memory allocation policy set already,
2993 2991 * don't bother reapplying it.
2994 2992 */
2995 2993 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2996 2994 return (0);
2997 2995
2998 2996 /*
2999 2997 * Mark any existing pages in the given range for
3000 2998 * migration, flushing the I/O page cache, and using
3001 2999 * underlying segment to calculate anon index and get
3002 3000 * anonmap and vnode pointer from
3003 3001 */
3004 3002 if (shmd->shm_softlockcnt > 0)
3005 3003 segspt_purge(seg);
3006 3004
3007 3005 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
↓ open down ↓ |
2831 lines elided |
↑ open up ↑ |
3008 3006 }
3009 3007
3010 3008 return (0);
3011 3009 }
3012 3010
3013 3011 /*ARGSUSED*/
3014 3012 void
3015 3013 segspt_shmdump(struct seg *seg)
3016 3014 {
3017 3015 /* no-op for ISM segment */
3018 -}
3019 -
3020 -/*ARGSUSED*/
3021 -static faultcode_t
3022 -segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
3023 -{
3024 - return (ENOTSUP);
3025 3016 }
3026 3017
3027 3018 /*
3028 3019 * get a memory ID for an addr in a given segment
3029 3020 */
3030 3021 static int
3031 3022 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3032 3023 {
3033 3024 struct shm_data *shmd = (struct shm_data *)seg->s_data;
3034 3025 struct anon *ap;
3035 3026 size_t anon_index;
3036 3027 struct anon_map *amp = shmd->shm_amp;
3037 3028 struct spt_data *sptd = shmd->shm_sptseg->s_data;
3038 3029 struct seg *sptseg = shmd->shm_sptseg;
3039 3030 anon_sync_obj_t cookie;
3040 3031
3041 3032 anon_index = seg_page(seg, addr);
3042 3033
3043 3034 if (addr > (seg->s_base + sptd->spt_realsize)) {
3044 3035 return (EFAULT);
3045 3036 }
3046 3037
3047 3038 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3048 3039 anon_array_enter(amp, anon_index, &cookie);
3049 3040 ap = anon_get_ptr(amp->ahp, anon_index);
3050 3041 if (ap == NULL) {
3051 3042 struct page *pp;
3052 3043 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
3053 3044
3054 3045 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
3055 3046 if (pp == NULL) {
3056 3047 anon_array_exit(&cookie);
3057 3048 ANON_LOCK_EXIT(&->a_rwlock);
3058 3049 return (ENOMEM);
3059 3050 }
3060 3051 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3061 3052 page_unlock(pp);
3062 3053 }
3063 3054 anon_array_exit(&cookie);
3064 3055 ANON_LOCK_EXIT(&->a_rwlock);
3065 3056 memidp->val[0] = (uintptr_t)ap;
3066 3057 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3067 3058 return (0);
3068 3059 }
3069 3060
3070 3061 /*
3071 3062 * Get memory allocation policy info for specified address in given segment
3072 3063 */
3073 3064 static lgrp_mem_policy_info_t *
3074 3065 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3075 3066 {
3076 3067 struct anon_map *amp;
3077 3068 ulong_t anon_index;
3078 3069 lgrp_mem_policy_info_t *policy_info;
3079 3070 struct shm_data *shm_data;
3080 3071
3081 3072 ASSERT(seg != NULL);
3082 3073
3083 3074 /*
3084 3075 * Get anon_map from segshm
3085 3076 *
3086 3077 * Assume that no lock needs to be held on anon_map, since
3087 3078 * it should be protected by its reference count which must be
3088 3079 * nonzero for an existing segment
3089 3080 * Need to grab readers lock on policy tree though
3090 3081 */
3091 3082 shm_data = (struct shm_data *)seg->s_data;
3092 3083 if (shm_data == NULL)
3093 3084 return (NULL);
3094 3085 amp = shm_data->shm_amp;
3095 3086 ASSERT(amp->refcnt != 0);
3096 3087
3097 3088 /*
3098 3089 * Get policy info
3099 3090 *
3100 3091 * Assume starting anon index of 0
3101 3092 */
3102 3093 anon_index = seg_page(seg, addr);
3103 3094 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3104 3095
3105 3096 return (policy_info);
3106 3097 }
↓ open down ↓ |
72 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX