Print this page
use C99 initializers in segment ops structures
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_spt.c
+++ new/usr/src/uts/common/vm/seg_spt.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 #include <sys/param.h>
26 26 #include <sys/user.h>
27 27 #include <sys/mman.h>
28 28 #include <sys/kmem.h>
29 29 #include <sys/sysmacros.h>
30 30 #include <sys/cmn_err.h>
31 31 #include <sys/systm.h>
32 32 #include <sys/tuneable.h>
33 33 #include <vm/hat.h>
34 34 #include <vm/seg.h>
35 35 #include <vm/as.h>
36 36 #include <vm/anon.h>
37 37 #include <vm/page.h>
38 38 #include <sys/buf.h>
39 39 #include <sys/swap.h>
40 40 #include <sys/atomic.h>
41 41 #include <vm/seg_spt.h>
42 42 #include <sys/debug.h>
43 43 #include <sys/vtrace.h>
44 44 #include <sys/shm.h>
45 45 #include <sys/shm_impl.h>
46 46 #include <sys/lgrp.h>
47 47 #include <sys/vmsystm.h>
48 48 #include <sys/policy.h>
49 49 #include <sys/project.h>
50 50 #include <sys/tnf_probe.h>
51 51 #include <sys/zone.h>
52 52
53 53 #define SEGSPTADDR (caddr_t)0x0
54 54
55 55 /*
56 56 * # pages used for spt
57 57 */
58 58 size_t spt_used;
59 59
60 60 /*
61 61 * segspt_minfree is the memory left for system after ISM
62 62 * locked its pages; it is set up to 5% of availrmem in
63 63 * sptcreate when ISM is created. ISM should not use more
64 64 * than ~90% of availrmem; if it does, then the performance
65 65 * of the system may decrease. Machines with large memories may
66 66 * be able to use up more memory for ISM so we set the default
67 67 * segspt_minfree to 5% (which gives ISM max 95% of availrmem.
68 68 * If somebody wants even more memory for ISM (risking hanging
69 69 * the system) they can patch the segspt_minfree to smaller number.
70 70 */
71 71 pgcnt_t segspt_minfree = 0;
72 72
73 73 static int segspt_create(struct seg *seg, caddr_t argsp);
74 74 static int segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize);
75 75 static void segspt_free(struct seg *seg);
76 76 static void segspt_free_pages(struct seg *seg, caddr_t addr, size_t len);
77 77 static lgrp_mem_policy_info_t *segspt_getpolicy(struct seg *seg, caddr_t addr);
78 78
↓ open down ↓ |
78 lines elided |
↑ open up ↑ |
79 79 static void
80 80 segspt_badop()
81 81 {
82 82 panic("segspt_badop called");
83 83 /*NOTREACHED*/
84 84 }
85 85
86 86 #define SEGSPT_BADOP(t) (t(*)())segspt_badop
87 87
88 88 struct seg_ops segspt_ops = {
89 - SEGSPT_BADOP(int), /* dup */
90 - segspt_unmap,
91 - segspt_free,
92 - SEGSPT_BADOP(int), /* fault */
93 - SEGSPT_BADOP(faultcode_t), /* faulta */
94 - SEGSPT_BADOP(int), /* setprot */
95 - SEGSPT_BADOP(int), /* checkprot */
96 - SEGSPT_BADOP(int), /* kluster */
97 - SEGSPT_BADOP(int), /* sync */
98 - SEGSPT_BADOP(size_t), /* incore */
99 - SEGSPT_BADOP(int), /* lockop */
100 - SEGSPT_BADOP(int), /* getprot */
101 - SEGSPT_BADOP(u_offset_t), /* getoffset */
102 - SEGSPT_BADOP(int), /* gettype */
103 - SEGSPT_BADOP(int), /* getvp */
104 - SEGSPT_BADOP(int), /* advise */
105 - SEGSPT_BADOP(void), /* dump */
106 - SEGSPT_BADOP(int), /* pagelock */
107 - SEGSPT_BADOP(int), /* setpgsz */
108 - SEGSPT_BADOP(int), /* getmemid */
109 - segspt_getpolicy, /* getpolicy */
110 - SEGSPT_BADOP(int), /* capable */
111 - seg_inherit_notsup /* inherit */
89 + .dup = SEGSPT_BADOP(int),
90 + .unmap = segspt_unmap,
91 + .free = segspt_free,
92 + .fault = SEGSPT_BADOP(int),
93 + .faulta = SEGSPT_BADOP(faultcode_t),
94 + .setprot = SEGSPT_BADOP(int),
95 + .checkprot = SEGSPT_BADOP(int),
96 + .kluster = SEGSPT_BADOP(int),
97 + .sync = SEGSPT_BADOP(int),
98 + .incore = SEGSPT_BADOP(size_t),
99 + .lockop = SEGSPT_BADOP(int),
100 + .getprot = SEGSPT_BADOP(int),
101 + .getoffset = SEGSPT_BADOP(u_offset_t),
102 + .gettype = SEGSPT_BADOP(int),
103 + .getvp = SEGSPT_BADOP(int),
104 + .advise = SEGSPT_BADOP(int),
105 + .dump = SEGSPT_BADOP(void),
106 + .pagelock = SEGSPT_BADOP(int),
107 + .setpagesize = SEGSPT_BADOP(int),
108 + .getmemid = SEGSPT_BADOP(int),
109 + .getpolicy = segspt_getpolicy,
110 + .capable = SEGSPT_BADOP(int),
111 + .inherit = seg_inherit_notsup,
112 112 };
113 113
114 114 static int segspt_shmdup(struct seg *seg, struct seg *newseg);
115 115 static int segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize);
116 116 static void segspt_shmfree(struct seg *seg);
117 117 static faultcode_t segspt_shmfault(struct hat *hat, struct seg *seg,
118 118 caddr_t addr, size_t len, enum fault_type type, enum seg_rw rw);
119 119 static faultcode_t segspt_shmfaulta(struct seg *seg, caddr_t addr);
120 120 static int segspt_shmsetprot(register struct seg *seg, register caddr_t addr,
121 121 register size_t len, register uint_t prot);
122 122 static int segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size,
123 123 uint_t prot);
124 124 static int segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta);
125 125 static size_t segspt_shmincore(struct seg *seg, caddr_t addr, size_t len,
126 126 register char *vec);
127 127 static int segspt_shmsync(struct seg *seg, register caddr_t addr, size_t len,
128 128 int attr, uint_t flags);
129 129 static int segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
130 130 int attr, int op, ulong_t *lockmap, size_t pos);
131 131 static int segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len,
132 132 uint_t *protv);
133 133 static u_offset_t segspt_shmgetoffset(struct seg *seg, caddr_t addr);
134 134 static int segspt_shmgettype(struct seg *seg, caddr_t addr);
135 135 static int segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
136 136 static int segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len,
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
137 137 uint_t behav);
138 138 static void segspt_shmdump(struct seg *seg);
139 139 static int segspt_shmpagelock(struct seg *, caddr_t, size_t,
140 140 struct page ***, enum lock_type, enum seg_rw);
141 141 static int segspt_shmsetpgsz(struct seg *, caddr_t, size_t, uint_t);
142 142 static int segspt_shmgetmemid(struct seg *, caddr_t, memid_t *);
143 143 static lgrp_mem_policy_info_t *segspt_shmgetpolicy(struct seg *, caddr_t);
144 144 static int segspt_shmcapable(struct seg *, segcapability_t);
145 145
146 146 struct seg_ops segspt_shmops = {
147 - segspt_shmdup,
148 - segspt_shmunmap,
149 - segspt_shmfree,
150 - segspt_shmfault,
151 - segspt_shmfaulta,
152 - segspt_shmsetprot,
153 - segspt_shmcheckprot,
154 - segspt_shmkluster,
155 - segspt_shmsync,
156 - segspt_shmincore,
157 - segspt_shmlockop,
158 - segspt_shmgetprot,
159 - segspt_shmgetoffset,
160 - segspt_shmgettype,
161 - segspt_shmgetvp,
162 - segspt_shmadvise, /* advise */
163 - segspt_shmdump,
164 - segspt_shmpagelock,
165 - segspt_shmsetpgsz,
166 - segspt_shmgetmemid,
167 - segspt_shmgetpolicy,
168 - segspt_shmcapable,
169 - seg_inherit_notsup
147 + .dup = segspt_shmdup,
148 + .unmap = segspt_shmunmap,
149 + .free = segspt_shmfree,
150 + .fault = segspt_shmfault,
151 + .faulta = segspt_shmfaulta,
152 + .setprot = segspt_shmsetprot,
153 + .checkprot = segspt_shmcheckprot,
154 + .kluster = segspt_shmkluster,
155 + .sync = segspt_shmsync,
156 + .incore = segspt_shmincore,
157 + .lockop = segspt_shmlockop,
158 + .getprot = segspt_shmgetprot,
159 + .getoffset = segspt_shmgetoffset,
160 + .gettype = segspt_shmgettype,
161 + .getvp = segspt_shmgetvp,
162 + .advise = segspt_shmadvise,
163 + .dump = segspt_shmdump,
164 + .pagelock = segspt_shmpagelock,
165 + .setpagesize = segspt_shmsetpgsz,
166 + .getmemid = segspt_shmgetmemid,
167 + .getpolicy = segspt_shmgetpolicy,
168 + .capable = segspt_shmcapable,
169 + .inherit = seg_inherit_notsup,
170 170 };
171 171
172 172 static void segspt_purge(struct seg *seg);
173 173 static int segspt_reclaim(void *, caddr_t, size_t, struct page **,
174 174 enum seg_rw, int);
175 175 static int spt_anon_getpages(struct seg *seg, caddr_t addr, size_t len,
176 176 page_t **ppa);
177 177
178 178
179 179
180 180 /*ARGSUSED*/
181 181 int
182 182 sptcreate(size_t size, struct seg **sptseg, struct anon_map *amp,
183 183 uint_t prot, uint_t flags, uint_t share_szc)
184 184 {
185 185 int err;
186 186 struct as *newas;
187 187 struct segspt_crargs sptcargs;
188 188
189 189 #ifdef DEBUG
190 190 TNF_PROBE_1(sptcreate, "spt", /* CSTYLED */,
191 191 tnf_ulong, size, size );
192 192 #endif
193 193 if (segspt_minfree == 0) /* leave min 5% of availrmem for */
194 194 segspt_minfree = availrmem/20; /* for the system */
195 195
196 196 if (!hat_supported(HAT_SHARED_PT, (void *)0))
197 197 return (EINVAL);
198 198
199 199 /*
200 200 * get a new as for this shared memory segment
201 201 */
202 202 newas = as_alloc();
203 203 newas->a_proc = NULL;
204 204 sptcargs.amp = amp;
205 205 sptcargs.prot = prot;
206 206 sptcargs.flags = flags;
207 207 sptcargs.szc = share_szc;
208 208 /*
209 209 * create a shared page table (spt) segment
210 210 */
211 211
212 212 if (err = as_map(newas, SEGSPTADDR, size, segspt_create, &sptcargs)) {
213 213 as_free(newas);
214 214 return (err);
215 215 }
216 216 *sptseg = sptcargs.seg_spt;
217 217 return (0);
218 218 }
219 219
220 220 void
221 221 sptdestroy(struct as *as, struct anon_map *amp)
222 222 {
223 223
224 224 #ifdef DEBUG
225 225 TNF_PROBE_0(sptdestroy, "spt", /* CSTYLED */);
226 226 #endif
227 227 (void) as_unmap(as, SEGSPTADDR, amp->size);
228 228 as_free(as);
229 229 }
230 230
231 231 /*
232 232 * called from seg_free().
233 233 * free (i.e., unlock, unmap, return to free list)
234 234 * all the pages in the given seg.
235 235 */
236 236 void
237 237 segspt_free(struct seg *seg)
238 238 {
239 239 struct spt_data *sptd = (struct spt_data *)seg->s_data;
240 240
241 241 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
242 242
243 243 if (sptd != NULL) {
244 244 if (sptd->spt_realsize)
245 245 segspt_free_pages(seg, seg->s_base, sptd->spt_realsize);
246 246
247 247 if (sptd->spt_ppa_lckcnt)
248 248 kmem_free(sptd->spt_ppa_lckcnt,
249 249 sizeof (*sptd->spt_ppa_lckcnt)
250 250 * btopr(sptd->spt_amp->size));
251 251 kmem_free(sptd->spt_vp, sizeof (*sptd->spt_vp));
252 252 cv_destroy(&sptd->spt_cv);
253 253 mutex_destroy(&sptd->spt_lock);
254 254 kmem_free(sptd, sizeof (*sptd));
255 255 }
256 256 }
257 257
258 258 /*ARGSUSED*/
259 259 static int
260 260 segspt_shmsync(struct seg *seg, caddr_t addr, size_t len, int attr,
261 261 uint_t flags)
262 262 {
263 263 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
264 264
265 265 return (0);
266 266 }
267 267
268 268 /*ARGSUSED*/
269 269 static size_t
270 270 segspt_shmincore(struct seg *seg, caddr_t addr, size_t len, char *vec)
271 271 {
272 272 caddr_t eo_seg;
273 273 pgcnt_t npages;
274 274 struct shm_data *shmd = (struct shm_data *)seg->s_data;
275 275 struct seg *sptseg;
276 276 struct spt_data *sptd;
277 277
278 278 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
279 279 #ifdef lint
280 280 seg = seg;
281 281 #endif
282 282 sptseg = shmd->shm_sptseg;
283 283 sptd = sptseg->s_data;
284 284
285 285 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
286 286 eo_seg = addr + len;
287 287 while (addr < eo_seg) {
288 288 /* page exists, and it's locked. */
289 289 *vec++ = SEG_PAGE_INCORE | SEG_PAGE_LOCKED |
290 290 SEG_PAGE_ANON;
291 291 addr += PAGESIZE;
292 292 }
293 293 return (len);
294 294 } else {
295 295 struct anon_map *amp = shmd->shm_amp;
296 296 struct anon *ap;
297 297 page_t *pp;
298 298 pgcnt_t anon_index;
299 299 struct vnode *vp;
300 300 u_offset_t off;
301 301 ulong_t i;
302 302 int ret;
303 303 anon_sync_obj_t cookie;
304 304
305 305 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
306 306 anon_index = seg_page(seg, addr);
307 307 npages = btopr(len);
308 308 if (anon_index + npages > btopr(shmd->shm_amp->size)) {
309 309 return (EINVAL);
310 310 }
311 311 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
312 312 for (i = 0; i < npages; i++, anon_index++) {
313 313 ret = 0;
314 314 anon_array_enter(amp, anon_index, &cookie);
315 315 ap = anon_get_ptr(amp->ahp, anon_index);
316 316 if (ap != NULL) {
317 317 swap_xlate(ap, &vp, &off);
318 318 anon_array_exit(&cookie);
319 319 pp = page_lookup_nowait(vp, off, SE_SHARED);
320 320 if (pp != NULL) {
321 321 ret |= SEG_PAGE_INCORE | SEG_PAGE_ANON;
322 322 page_unlock(pp);
323 323 }
324 324 } else {
325 325 anon_array_exit(&cookie);
326 326 }
327 327 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
328 328 ret |= SEG_PAGE_LOCKED;
329 329 }
330 330 *vec++ = (char)ret;
331 331 }
332 332 ANON_LOCK_EXIT(&->a_rwlock);
333 333 return (len);
334 334 }
335 335 }
336 336
337 337 static int
338 338 segspt_unmap(struct seg *seg, caddr_t raddr, size_t ssize)
339 339 {
340 340 size_t share_size;
341 341
342 342 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
343 343
344 344 /*
345 345 * seg.s_size may have been rounded up to the largest page size
346 346 * in shmat().
347 347 * XXX This should be cleanedup. sptdestroy should take a length
348 348 * argument which should be the same as sptcreate. Then
349 349 * this rounding would not be needed (or is done in shm.c)
350 350 * Only the check for full segment will be needed.
351 351 *
352 352 * XXX -- shouldn't raddr == 0 always? These tests don't seem
353 353 * to be useful at all.
354 354 */
355 355 share_size = page_get_pagesize(seg->s_szc);
356 356 ssize = P2ROUNDUP(ssize, share_size);
357 357
358 358 if (raddr == seg->s_base && ssize == seg->s_size) {
359 359 seg_free(seg);
360 360 return (0);
361 361 } else
362 362 return (EINVAL);
363 363 }
364 364
365 365 int
366 366 segspt_create(struct seg *seg, caddr_t argsp)
367 367 {
368 368 int err;
369 369 caddr_t addr = seg->s_base;
370 370 struct spt_data *sptd;
371 371 struct segspt_crargs *sptcargs = (struct segspt_crargs *)argsp;
372 372 struct anon_map *amp = sptcargs->amp;
373 373 struct kshmid *sp = amp->a_sp;
374 374 struct cred *cred = CRED();
375 375 ulong_t i, j, anon_index = 0;
376 376 pgcnt_t npages = btopr(amp->size);
377 377 struct vnode *vp;
378 378 page_t **ppa;
379 379 uint_t hat_flags;
380 380 size_t pgsz;
381 381 pgcnt_t pgcnt;
382 382 caddr_t a;
383 383 pgcnt_t pidx;
384 384 size_t sz;
385 385 proc_t *procp = curproc;
386 386 rctl_qty_t lockedbytes = 0;
387 387 kproject_t *proj;
388 388
389 389 /*
390 390 * We are holding the a_lock on the underlying dummy as,
391 391 * so we can make calls to the HAT layer.
392 392 */
393 393 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
394 394 ASSERT(sp != NULL);
395 395
396 396 #ifdef DEBUG
397 397 TNF_PROBE_2(segspt_create, "spt", /* CSTYLED */,
398 398 tnf_opaque, addr, addr, tnf_ulong, len, seg->s_size);
399 399 #endif
400 400 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
401 401 if (err = anon_swap_adjust(npages))
402 402 return (err);
403 403 }
404 404 err = ENOMEM;
405 405
406 406 if ((sptd = kmem_zalloc(sizeof (*sptd), KM_NOSLEEP)) == NULL)
407 407 goto out1;
408 408
409 409 if ((sptcargs->flags & SHM_PAGEABLE) == 0) {
410 410 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * npages),
411 411 KM_NOSLEEP)) == NULL)
412 412 goto out2;
413 413 }
414 414
415 415 mutex_init(&sptd->spt_lock, NULL, MUTEX_DEFAULT, NULL);
416 416
417 417 if ((vp = kmem_zalloc(sizeof (*vp), KM_NOSLEEP)) == NULL)
418 418 goto out3;
419 419
420 420 seg->s_ops = &segspt_ops;
421 421 sptd->spt_vp = vp;
422 422 sptd->spt_amp = amp;
423 423 sptd->spt_prot = sptcargs->prot;
424 424 sptd->spt_flags = sptcargs->flags;
425 425 seg->s_data = (caddr_t)sptd;
426 426 sptd->spt_ppa = NULL;
427 427 sptd->spt_ppa_lckcnt = NULL;
428 428 seg->s_szc = sptcargs->szc;
429 429 cv_init(&sptd->spt_cv, NULL, CV_DEFAULT, NULL);
430 430 sptd->spt_gen = 0;
431 431
432 432 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
433 433 if (seg->s_szc > amp->a_szc) {
434 434 amp->a_szc = seg->s_szc;
435 435 }
436 436 ANON_LOCK_EXIT(&->a_rwlock);
437 437
438 438 /*
439 439 * Set policy to affect initial allocation of pages in
440 440 * anon_map_createpages()
441 441 */
442 442 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, amp, anon_index,
443 443 NULL, 0, ptob(npages));
444 444
445 445 if (sptcargs->flags & SHM_PAGEABLE) {
446 446 size_t share_sz;
447 447 pgcnt_t new_npgs, more_pgs;
448 448 struct anon_hdr *nahp;
449 449 zone_t *zone;
450 450
451 451 share_sz = page_get_pagesize(seg->s_szc);
452 452 if (!IS_P2ALIGNED(amp->size, share_sz)) {
453 453 /*
454 454 * We are rounding up the size of the anon array
455 455 * on 4 M boundary because we always create 4 M
456 456 * of page(s) when locking, faulting pages and we
457 457 * don't have to check for all corner cases e.g.
458 458 * if there is enough space to allocate 4 M
459 459 * page.
460 460 */
461 461 new_npgs = btop(P2ROUNDUP(amp->size, share_sz));
462 462 more_pgs = new_npgs - npages;
463 463
464 464 /*
465 465 * The zone will never be NULL, as a fully created
466 466 * shm always has an owning zone.
467 467 */
468 468 zone = sp->shm_perm.ipc_zone_ref.zref_zone;
469 469 ASSERT(zone != NULL);
470 470 if (anon_resv_zone(ptob(more_pgs), zone) == 0) {
471 471 err = ENOMEM;
472 472 goto out4;
473 473 }
474 474
475 475 nahp = anon_create(new_npgs, ANON_SLEEP);
476 476 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
477 477 (void) anon_copy_ptr(amp->ahp, 0, nahp, 0, npages,
478 478 ANON_SLEEP);
479 479 anon_release(amp->ahp, npages);
480 480 amp->ahp = nahp;
481 481 ASSERT(amp->swresv == ptob(npages));
482 482 amp->swresv = amp->size = ptob(new_npgs);
483 483 ANON_LOCK_EXIT(&->a_rwlock);
484 484 npages = new_npgs;
485 485 }
486 486
487 487 sptd->spt_ppa_lckcnt = kmem_zalloc(npages *
488 488 sizeof (*sptd->spt_ppa_lckcnt), KM_SLEEP);
489 489 sptd->spt_pcachecnt = 0;
490 490 sptd->spt_realsize = ptob(npages);
491 491 sptcargs->seg_spt = seg;
492 492 return (0);
493 493 }
494 494
495 495 /*
496 496 * get array of pages for each anon slot in amp
497 497 */
498 498 if ((err = anon_map_createpages(amp, anon_index, ptob(npages), ppa,
499 499 seg, addr, S_CREATE, cred)) != 0)
500 500 goto out4;
501 501
502 502 mutex_enter(&sp->shm_mlock);
503 503
504 504 /* May be partially locked, so, count bytes to charge for locking */
505 505 for (i = 0; i < npages; i++)
506 506 if (ppa[i]->p_lckcnt == 0)
507 507 lockedbytes += PAGESIZE;
508 508
509 509 proj = sp->shm_perm.ipc_proj;
510 510
511 511 if (lockedbytes > 0) {
512 512 mutex_enter(&procp->p_lock);
513 513 if (rctl_incr_locked_mem(procp, proj, lockedbytes, 0)) {
514 514 mutex_exit(&procp->p_lock);
515 515 mutex_exit(&sp->shm_mlock);
516 516 for (i = 0; i < npages; i++)
517 517 page_unlock(ppa[i]);
518 518 err = ENOMEM;
519 519 goto out4;
520 520 }
521 521 mutex_exit(&procp->p_lock);
522 522 }
523 523
524 524 /*
525 525 * addr is initial address corresponding to the first page on ppa list
526 526 */
527 527 for (i = 0; i < npages; i++) {
528 528 /* attempt to lock all pages */
529 529 if (page_pp_lock(ppa[i], 0, 1) == 0) {
530 530 /*
531 531 * if unable to lock any page, unlock all
532 532 * of them and return error
533 533 */
534 534 for (j = 0; j < i; j++)
535 535 page_pp_unlock(ppa[j], 0, 1);
536 536 for (i = 0; i < npages; i++)
537 537 page_unlock(ppa[i]);
538 538 rctl_decr_locked_mem(NULL, proj, lockedbytes, 0);
539 539 mutex_exit(&sp->shm_mlock);
540 540 err = ENOMEM;
541 541 goto out4;
542 542 }
543 543 }
544 544 mutex_exit(&sp->shm_mlock);
545 545
546 546 /*
547 547 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
548 548 * for the entire life of the segment. For example platforms
549 549 * that do not support Dynamic Reconfiguration.
550 550 */
551 551 hat_flags = HAT_LOAD_SHARE;
552 552 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, NULL))
553 553 hat_flags |= HAT_LOAD_LOCK;
554 554
555 555 /*
556 556 * Load translations one lare page at a time
557 557 * to make sure we don't create mappings bigger than
558 558 * segment's size code in case underlying pages
559 559 * are shared with segvn's segment that uses bigger
560 560 * size code than we do.
561 561 */
562 562 pgsz = page_get_pagesize(seg->s_szc);
563 563 pgcnt = page_get_pagecnt(seg->s_szc);
564 564 for (a = addr, pidx = 0; pidx < npages; a += pgsz, pidx += pgcnt) {
565 565 sz = MIN(pgsz, ptob(npages - pidx));
566 566 hat_memload_array(seg->s_as->a_hat, a, sz,
567 567 &ppa[pidx], sptd->spt_prot, hat_flags);
568 568 }
569 569
570 570 /*
571 571 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
572 572 * we will leave the pages locked SE_SHARED for the life
573 573 * of the ISM segment. This will prevent any calls to
574 574 * hat_pageunload() on this ISM segment for those platforms.
575 575 */
576 576 if (!(hat_flags & HAT_LOAD_LOCK)) {
577 577 /*
578 578 * On platforms that support HAT_DYNAMIC_ISM_UNMAP,
579 579 * we no longer need to hold the SE_SHARED lock on the pages,
580 580 * since L_PAGELOCK and F_SOFTLOCK calls will grab the
581 581 * SE_SHARED lock on the pages as necessary.
582 582 */
583 583 for (i = 0; i < npages; i++)
584 584 page_unlock(ppa[i]);
585 585 }
586 586 sptd->spt_pcachecnt = 0;
587 587 kmem_free(ppa, ((sizeof (page_t *)) * npages));
588 588 sptd->spt_realsize = ptob(npages);
589 589 atomic_add_long(&spt_used, npages);
590 590 sptcargs->seg_spt = seg;
591 591 return (0);
592 592
593 593 out4:
594 594 seg->s_data = NULL;
595 595 kmem_free(vp, sizeof (*vp));
596 596 cv_destroy(&sptd->spt_cv);
597 597 out3:
598 598 mutex_destroy(&sptd->spt_lock);
599 599 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
600 600 kmem_free(ppa, (sizeof (*ppa) * npages));
601 601 out2:
602 602 kmem_free(sptd, sizeof (*sptd));
603 603 out1:
604 604 if ((sptcargs->flags & SHM_PAGEABLE) == 0)
605 605 anon_swap_restore(npages);
606 606 return (err);
607 607 }
608 608
609 609 /*ARGSUSED*/
610 610 void
611 611 segspt_free_pages(struct seg *seg, caddr_t addr, size_t len)
612 612 {
613 613 struct page *pp;
614 614 struct spt_data *sptd = (struct spt_data *)seg->s_data;
615 615 pgcnt_t npages;
616 616 ulong_t anon_idx;
617 617 struct anon_map *amp;
618 618 struct anon *ap;
619 619 struct vnode *vp;
620 620 u_offset_t off;
621 621 uint_t hat_flags;
622 622 int root = 0;
623 623 pgcnt_t pgs, curnpgs = 0;
624 624 page_t *rootpp;
625 625 rctl_qty_t unlocked_bytes = 0;
626 626 kproject_t *proj;
627 627 kshmid_t *sp;
628 628
629 629 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
630 630
631 631 len = P2ROUNDUP(len, PAGESIZE);
632 632
633 633 npages = btop(len);
634 634
635 635 hat_flags = HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP;
636 636 if ((hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) ||
637 637 (sptd->spt_flags & SHM_PAGEABLE)) {
638 638 hat_flags = HAT_UNLOAD_UNMAP;
639 639 }
640 640
641 641 hat_unload(seg->s_as->a_hat, addr, len, hat_flags);
642 642
643 643 amp = sptd->spt_amp;
644 644 if (sptd->spt_flags & SHM_PAGEABLE)
645 645 npages = btop(amp->size);
646 646
647 647 ASSERT(amp != NULL);
648 648
649 649 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
650 650 sp = amp->a_sp;
651 651 proj = sp->shm_perm.ipc_proj;
652 652 mutex_enter(&sp->shm_mlock);
653 653 }
654 654 for (anon_idx = 0; anon_idx < npages; anon_idx++) {
655 655 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
656 656 if ((ap = anon_get_ptr(amp->ahp, anon_idx)) == NULL) {
657 657 panic("segspt_free_pages: null app");
658 658 /*NOTREACHED*/
659 659 }
660 660 } else {
661 661 if ((ap = anon_get_next_ptr(amp->ahp, &anon_idx))
662 662 == NULL)
663 663 continue;
664 664 }
665 665 ASSERT(ANON_ISBUSY(anon_get_slot(amp->ahp, anon_idx)) == 0);
666 666 swap_xlate(ap, &vp, &off);
667 667
668 668 /*
669 669 * If this platform supports HAT_DYNAMIC_ISM_UNMAP,
670 670 * the pages won't be having SE_SHARED lock at this
671 671 * point.
672 672 *
673 673 * On platforms that do not support HAT_DYNAMIC_ISM_UNMAP,
674 674 * the pages are still held SE_SHARED locked from the
675 675 * original segspt_create()
676 676 *
677 677 * Our goal is to get SE_EXCL lock on each page, remove
678 678 * permanent lock on it and invalidate the page.
679 679 */
680 680 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
681 681 if (hat_flags == HAT_UNLOAD_UNMAP)
682 682 pp = page_lookup(vp, off, SE_EXCL);
683 683 else {
684 684 if ((pp = page_find(vp, off)) == NULL) {
685 685 panic("segspt_free_pages: "
686 686 "page not locked");
687 687 /*NOTREACHED*/
688 688 }
689 689 if (!page_tryupgrade(pp)) {
690 690 page_unlock(pp);
691 691 pp = page_lookup(vp, off, SE_EXCL);
692 692 }
693 693 }
694 694 if (pp == NULL) {
695 695 panic("segspt_free_pages: "
696 696 "page not in the system");
697 697 /*NOTREACHED*/
698 698 }
699 699 ASSERT(pp->p_lckcnt > 0);
700 700 page_pp_unlock(pp, 0, 1);
701 701 if (pp->p_lckcnt == 0)
702 702 unlocked_bytes += PAGESIZE;
703 703 } else {
704 704 if ((pp = page_lookup(vp, off, SE_EXCL)) == NULL)
705 705 continue;
706 706 }
707 707 /*
708 708 * It's logical to invalidate the pages here as in most cases
709 709 * these were created by segspt.
710 710 */
711 711 if (pp->p_szc != 0) {
712 712 if (root == 0) {
713 713 ASSERT(curnpgs == 0);
714 714 root = 1;
715 715 rootpp = pp;
716 716 pgs = curnpgs = page_get_pagecnt(pp->p_szc);
717 717 ASSERT(pgs > 1);
718 718 ASSERT(IS_P2ALIGNED(pgs, pgs));
719 719 ASSERT(!(page_pptonum(pp) & (pgs - 1)));
720 720 curnpgs--;
721 721 } else if ((page_pptonum(pp) & (pgs - 1)) == pgs - 1) {
722 722 ASSERT(curnpgs == 1);
723 723 ASSERT(page_pptonum(pp) ==
724 724 page_pptonum(rootpp) + (pgs - 1));
725 725 page_destroy_pages(rootpp);
726 726 root = 0;
727 727 curnpgs = 0;
728 728 } else {
729 729 ASSERT(curnpgs > 1);
730 730 ASSERT(page_pptonum(pp) ==
731 731 page_pptonum(rootpp) + (pgs - curnpgs));
732 732 curnpgs--;
733 733 }
734 734 } else {
735 735 if (root != 0 || curnpgs != 0) {
736 736 panic("segspt_free_pages: bad large page");
737 737 /*NOTREACHED*/
738 738 }
739 739 /*
740 740 * Before destroying the pages, we need to take care
741 741 * of the rctl locked memory accounting. For that
742 742 * we need to calculte the unlocked_bytes.
743 743 */
744 744 if (pp->p_lckcnt > 0)
745 745 unlocked_bytes += PAGESIZE;
746 746 /*LINTED: constant in conditional context */
747 747 VN_DISPOSE(pp, B_INVAL, 0, kcred);
748 748 }
749 749 }
750 750 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
751 751 if (unlocked_bytes > 0)
752 752 rctl_decr_locked_mem(NULL, proj, unlocked_bytes, 0);
753 753 mutex_exit(&sp->shm_mlock);
754 754 }
755 755 if (root != 0 || curnpgs != 0) {
756 756 panic("segspt_free_pages: bad large page");
757 757 /*NOTREACHED*/
758 758 }
759 759
760 760 /*
761 761 * mark that pages have been released
762 762 */
763 763 sptd->spt_realsize = 0;
764 764
765 765 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
766 766 atomic_add_long(&spt_used, -npages);
767 767 anon_swap_restore(npages);
768 768 }
769 769 }
770 770
771 771 /*
772 772 * Get memory allocation policy info for specified address in given segment
773 773 */
774 774 static lgrp_mem_policy_info_t *
775 775 segspt_getpolicy(struct seg *seg, caddr_t addr)
776 776 {
777 777 struct anon_map *amp;
778 778 ulong_t anon_index;
779 779 lgrp_mem_policy_info_t *policy_info;
780 780 struct spt_data *spt_data;
781 781
782 782 ASSERT(seg != NULL);
783 783
784 784 /*
785 785 * Get anon_map from segspt
786 786 *
787 787 * Assume that no lock needs to be held on anon_map, since
788 788 * it should be protected by its reference count which must be
789 789 * nonzero for an existing segment
790 790 * Need to grab readers lock on policy tree though
791 791 */
792 792 spt_data = (struct spt_data *)seg->s_data;
793 793 if (spt_data == NULL)
794 794 return (NULL);
795 795 amp = spt_data->spt_amp;
796 796 ASSERT(amp->refcnt != 0);
797 797
798 798 /*
799 799 * Get policy info
800 800 *
801 801 * Assume starting anon index of 0
802 802 */
803 803 anon_index = seg_page(seg, addr);
804 804 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
805 805
806 806 return (policy_info);
807 807 }
808 808
809 809 /*
810 810 * DISM only.
811 811 * Return locked pages over a given range.
812 812 *
813 813 * We will cache all DISM locked pages and save the pplist for the
814 814 * entire segment in the ppa field of the underlying DISM segment structure.
815 815 * Later, during a call to segspt_reclaim() we will use this ppa array
816 816 * to page_unlock() all of the pages and then we will free this ppa list.
817 817 */
818 818 /*ARGSUSED*/
819 819 static int
820 820 segspt_dismpagelock(struct seg *seg, caddr_t addr, size_t len,
821 821 struct page ***ppp, enum lock_type type, enum seg_rw rw)
822 822 {
823 823 struct shm_data *shmd = (struct shm_data *)seg->s_data;
824 824 struct seg *sptseg = shmd->shm_sptseg;
825 825 struct spt_data *sptd = sptseg->s_data;
826 826 pgcnt_t pg_idx, npages, tot_npages, npgs;
827 827 struct page **pplist, **pl, **ppa, *pp;
828 828 struct anon_map *amp;
829 829 spgcnt_t an_idx;
830 830 int ret = ENOTSUP;
831 831 uint_t pl_built = 0;
832 832 struct anon *ap;
833 833 struct vnode *vp;
834 834 u_offset_t off;
835 835 pgcnt_t claim_availrmem = 0;
836 836 uint_t szc;
837 837
838 838 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
839 839 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
840 840
841 841 /*
842 842 * We want to lock/unlock the entire ISM segment. Therefore,
843 843 * we will be using the underlying sptseg and it's base address
844 844 * and length for the caching arguments.
845 845 */
846 846 ASSERT(sptseg);
847 847 ASSERT(sptd);
848 848
849 849 pg_idx = seg_page(seg, addr);
850 850 npages = btopr(len);
851 851
852 852 /*
853 853 * check if the request is larger than number of pages covered
854 854 * by amp
855 855 */
856 856 if (pg_idx + npages > btopr(sptd->spt_amp->size)) {
857 857 *ppp = NULL;
858 858 return (ENOTSUP);
859 859 }
860 860
861 861 if (type == L_PAGEUNLOCK) {
862 862 ASSERT(sptd->spt_ppa != NULL);
863 863
864 864 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
865 865 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
866 866
867 867 /*
868 868 * If someone is blocked while unmapping, we purge
869 869 * segment page cache and thus reclaim pplist synchronously
870 870 * without waiting for seg_pasync_thread. This speeds up
871 871 * unmapping in cases where munmap(2) is called, while
872 872 * raw async i/o is still in progress or where a thread
873 873 * exits on data fault in a multithreaded application.
874 874 */
875 875 if ((sptd->spt_flags & DISM_PPA_CHANGED) ||
876 876 (AS_ISUNMAPWAIT(seg->s_as) &&
877 877 shmd->shm_softlockcnt > 0)) {
878 878 segspt_purge(seg);
879 879 }
880 880 return (0);
881 881 }
882 882
883 883 /* The L_PAGELOCK case ... */
884 884
885 885 if (sptd->spt_flags & DISM_PPA_CHANGED) {
886 886 segspt_purge(seg);
887 887 /*
888 888 * for DISM ppa needs to be rebuild since
889 889 * number of locked pages could be changed
890 890 */
891 891 *ppp = NULL;
892 892 return (ENOTSUP);
893 893 }
894 894
895 895 /*
896 896 * First try to find pages in segment page cache, without
897 897 * holding the segment lock.
898 898 */
899 899 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
900 900 S_WRITE, SEGP_FORCE_WIRED);
901 901 if (pplist != NULL) {
902 902 ASSERT(sptd->spt_ppa != NULL);
903 903 ASSERT(sptd->spt_ppa == pplist);
904 904 ppa = sptd->spt_ppa;
905 905 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
906 906 if (ppa[an_idx] == NULL) {
907 907 seg_pinactive(seg, NULL, seg->s_base,
908 908 sptd->spt_amp->size, ppa,
909 909 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
910 910 *ppp = NULL;
911 911 return (ENOTSUP);
912 912 }
913 913 if ((szc = ppa[an_idx]->p_szc) != 0) {
914 914 npgs = page_get_pagecnt(szc);
915 915 an_idx = P2ROUNDUP(an_idx + 1, npgs);
916 916 } else {
917 917 an_idx++;
918 918 }
919 919 }
920 920 /*
921 921 * Since we cache the entire DISM segment, we want to
922 922 * set ppp to point to the first slot that corresponds
923 923 * to the requested addr, i.e. pg_idx.
924 924 */
925 925 *ppp = &(sptd->spt_ppa[pg_idx]);
926 926 return (0);
927 927 }
928 928
929 929 mutex_enter(&sptd->spt_lock);
930 930 /*
931 931 * try to find pages in segment page cache with mutex
932 932 */
933 933 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
934 934 S_WRITE, SEGP_FORCE_WIRED);
935 935 if (pplist != NULL) {
936 936 ASSERT(sptd->spt_ppa != NULL);
937 937 ASSERT(sptd->spt_ppa == pplist);
938 938 ppa = sptd->spt_ppa;
939 939 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
940 940 if (ppa[an_idx] == NULL) {
941 941 mutex_exit(&sptd->spt_lock);
942 942 seg_pinactive(seg, NULL, seg->s_base,
943 943 sptd->spt_amp->size, ppa,
944 944 S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
945 945 *ppp = NULL;
946 946 return (ENOTSUP);
947 947 }
948 948 if ((szc = ppa[an_idx]->p_szc) != 0) {
949 949 npgs = page_get_pagecnt(szc);
950 950 an_idx = P2ROUNDUP(an_idx + 1, npgs);
951 951 } else {
952 952 an_idx++;
953 953 }
954 954 }
955 955 /*
956 956 * Since we cache the entire DISM segment, we want to
957 957 * set ppp to point to the first slot that corresponds
958 958 * to the requested addr, i.e. pg_idx.
959 959 */
960 960 mutex_exit(&sptd->spt_lock);
961 961 *ppp = &(sptd->spt_ppa[pg_idx]);
962 962 return (0);
963 963 }
964 964 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
965 965 SEGP_FORCE_WIRED) == SEGP_FAIL) {
966 966 mutex_exit(&sptd->spt_lock);
967 967 *ppp = NULL;
968 968 return (ENOTSUP);
969 969 }
970 970
971 971 /*
972 972 * No need to worry about protections because DISM pages are always rw.
973 973 */
974 974 pl = pplist = NULL;
975 975 amp = sptd->spt_amp;
976 976
977 977 /*
978 978 * Do we need to build the ppa array?
979 979 */
980 980 if (sptd->spt_ppa == NULL) {
981 981 pgcnt_t lpg_cnt = 0;
982 982
983 983 pl_built = 1;
984 984 tot_npages = btopr(sptd->spt_amp->size);
985 985
986 986 ASSERT(sptd->spt_pcachecnt == 0);
987 987 pplist = kmem_zalloc(sizeof (page_t *) * tot_npages, KM_SLEEP);
988 988 pl = pplist;
989 989
990 990 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
991 991 for (an_idx = 0; an_idx < tot_npages; ) {
992 992 ap = anon_get_ptr(amp->ahp, an_idx);
993 993 /*
994 994 * Cache only mlocked pages. For large pages
995 995 * if one (constituent) page is mlocked
996 996 * all pages for that large page
997 997 * are cached also. This is for quick
998 998 * lookups of ppa array;
999 999 */
1000 1000 if ((ap != NULL) && (lpg_cnt != 0 ||
1001 1001 (sptd->spt_ppa_lckcnt[an_idx] != 0))) {
1002 1002
1003 1003 swap_xlate(ap, &vp, &off);
1004 1004 pp = page_lookup(vp, off, SE_SHARED);
1005 1005 ASSERT(pp != NULL);
1006 1006 if (lpg_cnt == 0) {
1007 1007 lpg_cnt++;
1008 1008 /*
1009 1009 * For a small page, we are done --
1010 1010 * lpg_count is reset to 0 below.
1011 1011 *
1012 1012 * For a large page, we are guaranteed
1013 1013 * to find the anon structures of all
1014 1014 * constituent pages and a non-zero
1015 1015 * lpg_cnt ensures that we don't test
1016 1016 * for mlock for these. We are done
1017 1017 * when lpg_count reaches (npgs + 1).
1018 1018 * If we are not the first constituent
1019 1019 * page, restart at the first one.
1020 1020 */
1021 1021 npgs = page_get_pagecnt(pp->p_szc);
1022 1022 if (!IS_P2ALIGNED(an_idx, npgs)) {
1023 1023 an_idx = P2ALIGN(an_idx, npgs);
1024 1024 page_unlock(pp);
1025 1025 continue;
1026 1026 }
1027 1027 }
1028 1028 if (++lpg_cnt > npgs)
1029 1029 lpg_cnt = 0;
1030 1030
1031 1031 /*
1032 1032 * availrmem is decremented only
1033 1033 * for unlocked pages
1034 1034 */
1035 1035 if (sptd->spt_ppa_lckcnt[an_idx] == 0)
1036 1036 claim_availrmem++;
1037 1037 pplist[an_idx] = pp;
1038 1038 }
1039 1039 an_idx++;
1040 1040 }
1041 1041 ANON_LOCK_EXIT(&->a_rwlock);
1042 1042
1043 1043 if (claim_availrmem) {
1044 1044 mutex_enter(&freemem_lock);
1045 1045 if (availrmem < tune.t_minarmem + claim_availrmem) {
1046 1046 mutex_exit(&freemem_lock);
1047 1047 ret = ENOTSUP;
1048 1048 claim_availrmem = 0;
1049 1049 goto insert_fail;
1050 1050 } else {
1051 1051 availrmem -= claim_availrmem;
1052 1052 }
1053 1053 mutex_exit(&freemem_lock);
1054 1054 }
1055 1055
1056 1056 sptd->spt_ppa = pl;
1057 1057 } else {
1058 1058 /*
1059 1059 * We already have a valid ppa[].
1060 1060 */
1061 1061 pl = sptd->spt_ppa;
1062 1062 }
1063 1063
1064 1064 ASSERT(pl != NULL);
1065 1065
1066 1066 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1067 1067 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1068 1068 segspt_reclaim);
1069 1069 if (ret == SEGP_FAIL) {
1070 1070 /*
1071 1071 * seg_pinsert failed. We return
1072 1072 * ENOTSUP, so that the as_pagelock() code will
1073 1073 * then try the slower F_SOFTLOCK path.
1074 1074 */
1075 1075 if (pl_built) {
1076 1076 /*
1077 1077 * No one else has referenced the ppa[].
1078 1078 * We created it and we need to destroy it.
1079 1079 */
1080 1080 sptd->spt_ppa = NULL;
1081 1081 }
1082 1082 ret = ENOTSUP;
1083 1083 goto insert_fail;
1084 1084 }
1085 1085
1086 1086 /*
1087 1087 * In either case, we increment softlockcnt on the 'real' segment.
1088 1088 */
1089 1089 sptd->spt_pcachecnt++;
1090 1090 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1091 1091
1092 1092 ppa = sptd->spt_ppa;
1093 1093 for (an_idx = pg_idx; an_idx < pg_idx + npages; ) {
1094 1094 if (ppa[an_idx] == NULL) {
1095 1095 mutex_exit(&sptd->spt_lock);
1096 1096 seg_pinactive(seg, NULL, seg->s_base,
1097 1097 sptd->spt_amp->size,
1098 1098 pl, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1099 1099 *ppp = NULL;
1100 1100 return (ENOTSUP);
1101 1101 }
1102 1102 if ((szc = ppa[an_idx]->p_szc) != 0) {
1103 1103 npgs = page_get_pagecnt(szc);
1104 1104 an_idx = P2ROUNDUP(an_idx + 1, npgs);
1105 1105 } else {
1106 1106 an_idx++;
1107 1107 }
1108 1108 }
1109 1109 /*
1110 1110 * We can now drop the sptd->spt_lock since the ppa[]
1111 1111 * exists and he have incremented pacachecnt.
1112 1112 */
1113 1113 mutex_exit(&sptd->spt_lock);
1114 1114
1115 1115 /*
1116 1116 * Since we cache the entire segment, we want to
1117 1117 * set ppp to point to the first slot that corresponds
1118 1118 * to the requested addr, i.e. pg_idx.
1119 1119 */
1120 1120 *ppp = &(sptd->spt_ppa[pg_idx]);
1121 1121 return (0);
1122 1122
1123 1123 insert_fail:
1124 1124 /*
1125 1125 * We will only reach this code if we tried and failed.
1126 1126 *
1127 1127 * And we can drop the lock on the dummy seg, once we've failed
1128 1128 * to set up a new ppa[].
1129 1129 */
1130 1130 mutex_exit(&sptd->spt_lock);
1131 1131
1132 1132 if (pl_built) {
1133 1133 if (claim_availrmem) {
1134 1134 mutex_enter(&freemem_lock);
1135 1135 availrmem += claim_availrmem;
1136 1136 mutex_exit(&freemem_lock);
1137 1137 }
1138 1138
1139 1139 /*
1140 1140 * We created pl and we need to destroy it.
1141 1141 */
1142 1142 pplist = pl;
1143 1143 for (an_idx = 0; an_idx < tot_npages; an_idx++) {
1144 1144 if (pplist[an_idx] != NULL)
1145 1145 page_unlock(pplist[an_idx]);
1146 1146 }
1147 1147 kmem_free(pl, sizeof (page_t *) * tot_npages);
1148 1148 }
1149 1149
1150 1150 if (shmd->shm_softlockcnt <= 0) {
1151 1151 if (AS_ISUNMAPWAIT(seg->s_as)) {
1152 1152 mutex_enter(&seg->s_as->a_contents);
1153 1153 if (AS_ISUNMAPWAIT(seg->s_as)) {
1154 1154 AS_CLRUNMAPWAIT(seg->s_as);
1155 1155 cv_broadcast(&seg->s_as->a_cv);
1156 1156 }
1157 1157 mutex_exit(&seg->s_as->a_contents);
1158 1158 }
1159 1159 }
1160 1160 *ppp = NULL;
1161 1161 return (ret);
1162 1162 }
1163 1163
1164 1164
1165 1165
1166 1166 /*
1167 1167 * return locked pages over a given range.
1168 1168 *
1169 1169 * We will cache the entire ISM segment and save the pplist for the
1170 1170 * entire segment in the ppa field of the underlying ISM segment structure.
1171 1171 * Later, during a call to segspt_reclaim() we will use this ppa array
1172 1172 * to page_unlock() all of the pages and then we will free this ppa list.
1173 1173 */
1174 1174 /*ARGSUSED*/
1175 1175 static int
1176 1176 segspt_shmpagelock(struct seg *seg, caddr_t addr, size_t len,
1177 1177 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1178 1178 {
1179 1179 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1180 1180 struct seg *sptseg = shmd->shm_sptseg;
1181 1181 struct spt_data *sptd = sptseg->s_data;
1182 1182 pgcnt_t np, page_index, npages;
1183 1183 caddr_t a, spt_base;
1184 1184 struct page **pplist, **pl, *pp;
1185 1185 struct anon_map *amp;
1186 1186 ulong_t anon_index;
1187 1187 int ret = ENOTSUP;
1188 1188 uint_t pl_built = 0;
1189 1189 struct anon *ap;
1190 1190 struct vnode *vp;
1191 1191 u_offset_t off;
1192 1192
1193 1193 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1194 1194 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
1195 1195
1196 1196
1197 1197 /*
1198 1198 * We want to lock/unlock the entire ISM segment. Therefore,
1199 1199 * we will be using the underlying sptseg and it's base address
1200 1200 * and length for the caching arguments.
1201 1201 */
1202 1202 ASSERT(sptseg);
1203 1203 ASSERT(sptd);
1204 1204
1205 1205 if (sptd->spt_flags & SHM_PAGEABLE) {
1206 1206 return (segspt_dismpagelock(seg, addr, len, ppp, type, rw));
1207 1207 }
1208 1208
1209 1209 page_index = seg_page(seg, addr);
1210 1210 npages = btopr(len);
1211 1211
1212 1212 /*
1213 1213 * check if the request is larger than number of pages covered
1214 1214 * by amp
1215 1215 */
1216 1216 if (page_index + npages > btopr(sptd->spt_amp->size)) {
1217 1217 *ppp = NULL;
1218 1218 return (ENOTSUP);
1219 1219 }
1220 1220
1221 1221 if (type == L_PAGEUNLOCK) {
1222 1222
1223 1223 ASSERT(sptd->spt_ppa != NULL);
1224 1224
1225 1225 seg_pinactive(seg, NULL, seg->s_base, sptd->spt_amp->size,
1226 1226 sptd->spt_ppa, S_WRITE, SEGP_FORCE_WIRED, segspt_reclaim);
1227 1227
1228 1228 /*
1229 1229 * If someone is blocked while unmapping, we purge
1230 1230 * segment page cache and thus reclaim pplist synchronously
1231 1231 * without waiting for seg_pasync_thread. This speeds up
1232 1232 * unmapping in cases where munmap(2) is called, while
1233 1233 * raw async i/o is still in progress or where a thread
1234 1234 * exits on data fault in a multithreaded application.
1235 1235 */
1236 1236 if (AS_ISUNMAPWAIT(seg->s_as) && (shmd->shm_softlockcnt > 0)) {
1237 1237 segspt_purge(seg);
1238 1238 }
1239 1239 return (0);
1240 1240 }
1241 1241
1242 1242 /* The L_PAGELOCK case... */
1243 1243
1244 1244 /*
1245 1245 * First try to find pages in segment page cache, without
1246 1246 * holding the segment lock.
1247 1247 */
1248 1248 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1249 1249 S_WRITE, SEGP_FORCE_WIRED);
1250 1250 if (pplist != NULL) {
1251 1251 ASSERT(sptd->spt_ppa == pplist);
1252 1252 ASSERT(sptd->spt_ppa[page_index]);
1253 1253 /*
1254 1254 * Since we cache the entire ISM segment, we want to
1255 1255 * set ppp to point to the first slot that corresponds
1256 1256 * to the requested addr, i.e. page_index.
1257 1257 */
1258 1258 *ppp = &(sptd->spt_ppa[page_index]);
1259 1259 return (0);
1260 1260 }
1261 1261
1262 1262 mutex_enter(&sptd->spt_lock);
1263 1263
1264 1264 /*
1265 1265 * try to find pages in segment page cache
1266 1266 */
1267 1267 pplist = seg_plookup(seg, NULL, seg->s_base, sptd->spt_amp->size,
1268 1268 S_WRITE, SEGP_FORCE_WIRED);
1269 1269 if (pplist != NULL) {
1270 1270 ASSERT(sptd->spt_ppa == pplist);
1271 1271 /*
1272 1272 * Since we cache the entire segment, we want to
1273 1273 * set ppp to point to the first slot that corresponds
1274 1274 * to the requested addr, i.e. page_index.
1275 1275 */
1276 1276 mutex_exit(&sptd->spt_lock);
1277 1277 *ppp = &(sptd->spt_ppa[page_index]);
1278 1278 return (0);
1279 1279 }
1280 1280
1281 1281 if (seg_pinsert_check(seg, NULL, seg->s_base, sptd->spt_amp->size,
1282 1282 SEGP_FORCE_WIRED) == SEGP_FAIL) {
1283 1283 mutex_exit(&sptd->spt_lock);
1284 1284 *ppp = NULL;
1285 1285 return (ENOTSUP);
1286 1286 }
1287 1287
1288 1288 /*
1289 1289 * No need to worry about protections because ISM pages
1290 1290 * are always rw.
1291 1291 */
1292 1292 pl = pplist = NULL;
1293 1293
1294 1294 /*
1295 1295 * Do we need to build the ppa array?
1296 1296 */
1297 1297 if (sptd->spt_ppa == NULL) {
1298 1298 ASSERT(sptd->spt_ppa == pplist);
1299 1299
1300 1300 spt_base = sptseg->s_base;
1301 1301 pl_built = 1;
1302 1302
1303 1303 /*
1304 1304 * availrmem is decremented once during anon_swap_adjust()
1305 1305 * and is incremented during the anon_unresv(), which is
1306 1306 * called from shm_rm_amp() when the segment is destroyed.
1307 1307 */
1308 1308 amp = sptd->spt_amp;
1309 1309 ASSERT(amp != NULL);
1310 1310
1311 1311 /* pcachecnt is protected by sptd->spt_lock */
1312 1312 ASSERT(sptd->spt_pcachecnt == 0);
1313 1313 pplist = kmem_zalloc(sizeof (page_t *)
1314 1314 * btopr(sptd->spt_amp->size), KM_SLEEP);
1315 1315 pl = pplist;
1316 1316
1317 1317 anon_index = seg_page(sptseg, spt_base);
1318 1318
1319 1319 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1320 1320 for (a = spt_base; a < (spt_base + sptd->spt_amp->size);
1321 1321 a += PAGESIZE, anon_index++, pplist++) {
1322 1322 ap = anon_get_ptr(amp->ahp, anon_index);
1323 1323 ASSERT(ap != NULL);
1324 1324 swap_xlate(ap, &vp, &off);
1325 1325 pp = page_lookup(vp, off, SE_SHARED);
1326 1326 ASSERT(pp != NULL);
1327 1327 *pplist = pp;
1328 1328 }
1329 1329 ANON_LOCK_EXIT(&->a_rwlock);
1330 1330
1331 1331 if (a < (spt_base + sptd->spt_amp->size)) {
1332 1332 ret = ENOTSUP;
1333 1333 goto insert_fail;
1334 1334 }
1335 1335 sptd->spt_ppa = pl;
1336 1336 } else {
1337 1337 /*
1338 1338 * We already have a valid ppa[].
1339 1339 */
1340 1340 pl = sptd->spt_ppa;
1341 1341 }
1342 1342
1343 1343 ASSERT(pl != NULL);
1344 1344
1345 1345 ret = seg_pinsert(seg, NULL, seg->s_base, sptd->spt_amp->size,
1346 1346 sptd->spt_amp->size, pl, S_WRITE, SEGP_FORCE_WIRED,
1347 1347 segspt_reclaim);
1348 1348 if (ret == SEGP_FAIL) {
1349 1349 /*
1350 1350 * seg_pinsert failed. We return
1351 1351 * ENOTSUP, so that the as_pagelock() code will
1352 1352 * then try the slower F_SOFTLOCK path.
1353 1353 */
1354 1354 if (pl_built) {
1355 1355 /*
1356 1356 * No one else has referenced the ppa[].
1357 1357 * We created it and we need to destroy it.
1358 1358 */
1359 1359 sptd->spt_ppa = NULL;
1360 1360 }
1361 1361 ret = ENOTSUP;
1362 1362 goto insert_fail;
1363 1363 }
1364 1364
1365 1365 /*
1366 1366 * In either case, we increment softlockcnt on the 'real' segment.
1367 1367 */
1368 1368 sptd->spt_pcachecnt++;
1369 1369 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1370 1370
1371 1371 /*
1372 1372 * We can now drop the sptd->spt_lock since the ppa[]
1373 1373 * exists and he have incremented pacachecnt.
1374 1374 */
1375 1375 mutex_exit(&sptd->spt_lock);
1376 1376
1377 1377 /*
1378 1378 * Since we cache the entire segment, we want to
1379 1379 * set ppp to point to the first slot that corresponds
1380 1380 * to the requested addr, i.e. page_index.
1381 1381 */
1382 1382 *ppp = &(sptd->spt_ppa[page_index]);
1383 1383 return (0);
1384 1384
1385 1385 insert_fail:
1386 1386 /*
1387 1387 * We will only reach this code if we tried and failed.
1388 1388 *
1389 1389 * And we can drop the lock on the dummy seg, once we've failed
1390 1390 * to set up a new ppa[].
1391 1391 */
1392 1392 mutex_exit(&sptd->spt_lock);
1393 1393
1394 1394 if (pl_built) {
1395 1395 /*
1396 1396 * We created pl and we need to destroy it.
1397 1397 */
1398 1398 pplist = pl;
1399 1399 np = (((uintptr_t)(a - spt_base)) >> PAGESHIFT);
1400 1400 while (np) {
1401 1401 page_unlock(*pplist);
1402 1402 np--;
1403 1403 pplist++;
1404 1404 }
1405 1405 kmem_free(pl, sizeof (page_t *) * btopr(sptd->spt_amp->size));
1406 1406 }
1407 1407 if (shmd->shm_softlockcnt <= 0) {
1408 1408 if (AS_ISUNMAPWAIT(seg->s_as)) {
1409 1409 mutex_enter(&seg->s_as->a_contents);
1410 1410 if (AS_ISUNMAPWAIT(seg->s_as)) {
1411 1411 AS_CLRUNMAPWAIT(seg->s_as);
1412 1412 cv_broadcast(&seg->s_as->a_cv);
1413 1413 }
1414 1414 mutex_exit(&seg->s_as->a_contents);
1415 1415 }
1416 1416 }
1417 1417 *ppp = NULL;
1418 1418 return (ret);
1419 1419 }
1420 1420
1421 1421 /*
1422 1422 * purge any cached pages in the I/O page cache
1423 1423 */
1424 1424 static void
1425 1425 segspt_purge(struct seg *seg)
1426 1426 {
1427 1427 seg_ppurge(seg, NULL, SEGP_FORCE_WIRED);
1428 1428 }
1429 1429
1430 1430 static int
1431 1431 segspt_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
1432 1432 enum seg_rw rw, int async)
1433 1433 {
1434 1434 struct seg *seg = (struct seg *)ptag;
1435 1435 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1436 1436 struct seg *sptseg;
1437 1437 struct spt_data *sptd;
1438 1438 pgcnt_t npages, i, free_availrmem = 0;
1439 1439 int done = 0;
1440 1440
1441 1441 #ifdef lint
1442 1442 addr = addr;
1443 1443 #endif
1444 1444 sptseg = shmd->shm_sptseg;
1445 1445 sptd = sptseg->s_data;
1446 1446 npages = (len >> PAGESHIFT);
1447 1447 ASSERT(npages);
1448 1448 ASSERT(sptd->spt_pcachecnt != 0);
1449 1449 ASSERT(sptd->spt_ppa == pplist);
1450 1450 ASSERT(npages == btopr(sptd->spt_amp->size));
1451 1451 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1452 1452
1453 1453 /*
1454 1454 * Acquire the lock on the dummy seg and destroy the
1455 1455 * ppa array IF this is the last pcachecnt.
1456 1456 */
1457 1457 mutex_enter(&sptd->spt_lock);
1458 1458 if (--sptd->spt_pcachecnt == 0) {
1459 1459 for (i = 0; i < npages; i++) {
1460 1460 if (pplist[i] == NULL) {
1461 1461 continue;
1462 1462 }
1463 1463 if (rw == S_WRITE) {
1464 1464 hat_setrefmod(pplist[i]);
1465 1465 } else {
1466 1466 hat_setref(pplist[i]);
1467 1467 }
1468 1468 if ((sptd->spt_flags & SHM_PAGEABLE) &&
1469 1469 (sptd->spt_ppa_lckcnt[i] == 0))
1470 1470 free_availrmem++;
1471 1471 page_unlock(pplist[i]);
1472 1472 }
1473 1473 if ((sptd->spt_flags & SHM_PAGEABLE) && free_availrmem) {
1474 1474 mutex_enter(&freemem_lock);
1475 1475 availrmem += free_availrmem;
1476 1476 mutex_exit(&freemem_lock);
1477 1477 }
1478 1478 /*
1479 1479 * Since we want to cach/uncache the entire ISM segment,
1480 1480 * we will track the pplist in a segspt specific field
1481 1481 * ppa, that is initialized at the time we add an entry to
1482 1482 * the cache.
1483 1483 */
1484 1484 ASSERT(sptd->spt_pcachecnt == 0);
1485 1485 kmem_free(pplist, sizeof (page_t *) * npages);
1486 1486 sptd->spt_ppa = NULL;
1487 1487 sptd->spt_flags &= ~DISM_PPA_CHANGED;
1488 1488 sptd->spt_gen++;
1489 1489 cv_broadcast(&sptd->spt_cv);
1490 1490 done = 1;
1491 1491 }
1492 1492 mutex_exit(&sptd->spt_lock);
1493 1493
1494 1494 /*
1495 1495 * If we are pcache async thread or called via seg_ppurge_wiredpp() we
1496 1496 * may not hold AS lock (in this case async argument is not 0). This
1497 1497 * means if softlockcnt drops to 0 after the decrement below address
1498 1498 * space may get freed. We can't allow it since after softlock
1499 1499 * derement to 0 we still need to access as structure for possible
1500 1500 * wakeup of unmap waiters. To prevent the disappearance of as we take
1501 1501 * this segment's shm_segfree_syncmtx. segspt_shmfree() also takes
1502 1502 * this mutex as a barrier to make sure this routine completes before
1503 1503 * segment is freed.
1504 1504 *
1505 1505 * The second complication we have to deal with in async case is a
1506 1506 * possibility of missed wake up of unmap wait thread. When we don't
1507 1507 * hold as lock here we may take a_contents lock before unmap wait
1508 1508 * thread that was first to see softlockcnt was still not 0. As a
1509 1509 * result we'll fail to wake up an unmap wait thread. To avoid this
1510 1510 * race we set nounmapwait flag in as structure if we drop softlockcnt
1511 1511 * to 0 if async is not 0. unmapwait thread
1512 1512 * will not block if this flag is set.
1513 1513 */
1514 1514 if (async)
1515 1515 mutex_enter(&shmd->shm_segfree_syncmtx);
1516 1516
1517 1517 /*
1518 1518 * Now decrement softlockcnt.
1519 1519 */
1520 1520 ASSERT(shmd->shm_softlockcnt > 0);
1521 1521 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
1522 1522
1523 1523 if (shmd->shm_softlockcnt <= 0) {
1524 1524 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
1525 1525 mutex_enter(&seg->s_as->a_contents);
1526 1526 if (async)
1527 1527 AS_SETNOUNMAPWAIT(seg->s_as);
1528 1528 if (AS_ISUNMAPWAIT(seg->s_as)) {
1529 1529 AS_CLRUNMAPWAIT(seg->s_as);
1530 1530 cv_broadcast(&seg->s_as->a_cv);
1531 1531 }
1532 1532 mutex_exit(&seg->s_as->a_contents);
1533 1533 }
1534 1534 }
1535 1535
1536 1536 if (async)
1537 1537 mutex_exit(&shmd->shm_segfree_syncmtx);
1538 1538
1539 1539 return (done);
1540 1540 }
1541 1541
1542 1542 /*
1543 1543 * Do a F_SOFTUNLOCK call over the range requested.
1544 1544 * The range must have already been F_SOFTLOCK'ed.
1545 1545 *
1546 1546 * The calls to acquire and release the anon map lock mutex were
1547 1547 * removed in order to avoid a deadly embrace during a DR
1548 1548 * memory delete operation. (Eg. DR blocks while waiting for a
1549 1549 * exclusive lock on a page that is being used for kaio; the
1550 1550 * thread that will complete the kaio and call segspt_softunlock
1551 1551 * blocks on the anon map lock; another thread holding the anon
1552 1552 * map lock blocks on another page lock via the segspt_shmfault
1553 1553 * -> page_lookup -> page_lookup_create -> page_lock_es code flow.)
1554 1554 *
1555 1555 * The appropriateness of the removal is based upon the following:
1556 1556 * 1. If we are holding a segment's reader lock and the page is held
1557 1557 * shared, then the corresponding element in anonmap which points to
1558 1558 * anon struct cannot change and there is no need to acquire the
1559 1559 * anonymous map lock.
1560 1560 * 2. Threads in segspt_softunlock have a reader lock on the segment
1561 1561 * and already have the shared page lock, so we are guaranteed that
1562 1562 * the anon map slot cannot change and therefore can call anon_get_ptr()
1563 1563 * without grabbing the anonymous map lock.
1564 1564 * 3. Threads that softlock a shared page break copy-on-write, even if
1565 1565 * its a read. Thus cow faults can be ignored with respect to soft
1566 1566 * unlocking, since the breaking of cow means that the anon slot(s) will
1567 1567 * not be shared.
1568 1568 */
1569 1569 static void
1570 1570 segspt_softunlock(struct seg *seg, caddr_t sptseg_addr,
1571 1571 size_t len, enum seg_rw rw)
1572 1572 {
1573 1573 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1574 1574 struct seg *sptseg;
1575 1575 struct spt_data *sptd;
1576 1576 page_t *pp;
1577 1577 caddr_t adr;
1578 1578 struct vnode *vp;
1579 1579 u_offset_t offset;
1580 1580 ulong_t anon_index;
1581 1581 struct anon_map *amp; /* XXX - for locknest */
1582 1582 struct anon *ap = NULL;
1583 1583 pgcnt_t npages;
1584 1584
1585 1585 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1586 1586
1587 1587 sptseg = shmd->shm_sptseg;
1588 1588 sptd = sptseg->s_data;
1589 1589
1590 1590 /*
1591 1591 * Some platforms assume that ISM mappings are HAT_LOAD_LOCK
1592 1592 * and therefore their pages are SE_SHARED locked
1593 1593 * for the entire life of the segment.
1594 1594 */
1595 1595 if ((!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) &&
1596 1596 ((sptd->spt_flags & SHM_PAGEABLE) == 0)) {
1597 1597 goto softlock_decrement;
1598 1598 }
1599 1599
1600 1600 /*
1601 1601 * Any thread is free to do a page_find and
1602 1602 * page_unlock() on the pages within this seg.
1603 1603 *
1604 1604 * We are already holding the as->a_lock on the user's
1605 1605 * real segment, but we need to hold the a_lock on the
1606 1606 * underlying dummy as. This is mostly to satisfy the
1607 1607 * underlying HAT layer.
1608 1608 */
1609 1609 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1610 1610 hat_unlock(sptseg->s_as->a_hat, sptseg_addr, len);
1611 1611 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1612 1612
1613 1613 amp = sptd->spt_amp;
1614 1614 ASSERT(amp != NULL);
1615 1615 anon_index = seg_page(sptseg, sptseg_addr);
1616 1616
1617 1617 for (adr = sptseg_addr; adr < sptseg_addr + len; adr += PAGESIZE) {
1618 1618 ap = anon_get_ptr(amp->ahp, anon_index++);
1619 1619 ASSERT(ap != NULL);
1620 1620 swap_xlate(ap, &vp, &offset);
1621 1621
1622 1622 /*
1623 1623 * Use page_find() instead of page_lookup() to
1624 1624 * find the page since we know that it has a
1625 1625 * "shared" lock.
1626 1626 */
1627 1627 pp = page_find(vp, offset);
1628 1628 ASSERT(ap == anon_get_ptr(amp->ahp, anon_index - 1));
1629 1629 if (pp == NULL) {
1630 1630 panic("segspt_softunlock: "
1631 1631 "addr %p, ap %p, vp %p, off %llx",
1632 1632 (void *)adr, (void *)ap, (void *)vp, offset);
1633 1633 /*NOTREACHED*/
1634 1634 }
1635 1635
1636 1636 if (rw == S_WRITE) {
1637 1637 hat_setrefmod(pp);
1638 1638 } else if (rw != S_OTHER) {
1639 1639 hat_setref(pp);
1640 1640 }
1641 1641 page_unlock(pp);
1642 1642 }
1643 1643
1644 1644 softlock_decrement:
1645 1645 npages = btopr(len);
1646 1646 ASSERT(shmd->shm_softlockcnt >= npages);
1647 1647 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), -npages);
1648 1648 if (shmd->shm_softlockcnt == 0) {
1649 1649 /*
1650 1650 * All SOFTLOCKS are gone. Wakeup any waiting
1651 1651 * unmappers so they can try again to unmap.
1652 1652 * Check for waiters first without the mutex
1653 1653 * held so we don't always grab the mutex on
1654 1654 * softunlocks.
1655 1655 */
1656 1656 if (AS_ISUNMAPWAIT(seg->s_as)) {
1657 1657 mutex_enter(&seg->s_as->a_contents);
1658 1658 if (AS_ISUNMAPWAIT(seg->s_as)) {
1659 1659 AS_CLRUNMAPWAIT(seg->s_as);
1660 1660 cv_broadcast(&seg->s_as->a_cv);
1661 1661 }
1662 1662 mutex_exit(&seg->s_as->a_contents);
1663 1663 }
1664 1664 }
1665 1665 }
1666 1666
1667 1667 int
1668 1668 segspt_shmattach(struct seg *seg, caddr_t *argsp)
1669 1669 {
1670 1670 struct shm_data *shmd_arg = (struct shm_data *)argsp;
1671 1671 struct shm_data *shmd;
1672 1672 struct anon_map *shm_amp = shmd_arg->shm_amp;
1673 1673 struct spt_data *sptd;
1674 1674 int error = 0;
1675 1675
1676 1676 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1677 1677
1678 1678 shmd = kmem_zalloc((sizeof (*shmd)), KM_NOSLEEP);
1679 1679 if (shmd == NULL)
1680 1680 return (ENOMEM);
1681 1681
1682 1682 shmd->shm_sptas = shmd_arg->shm_sptas;
1683 1683 shmd->shm_amp = shm_amp;
1684 1684 shmd->shm_sptseg = shmd_arg->shm_sptseg;
1685 1685
1686 1686 (void) lgrp_shm_policy_set(LGRP_MEM_POLICY_DEFAULT, shm_amp, 0,
1687 1687 NULL, 0, seg->s_size);
1688 1688
1689 1689 mutex_init(&shmd->shm_segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
1690 1690
1691 1691 seg->s_data = (void *)shmd;
1692 1692 seg->s_ops = &segspt_shmops;
1693 1693 seg->s_szc = shmd->shm_sptseg->s_szc;
1694 1694 sptd = shmd->shm_sptseg->s_data;
1695 1695
1696 1696 if (sptd->spt_flags & SHM_PAGEABLE) {
1697 1697 if ((shmd->shm_vpage = kmem_zalloc(btopr(shm_amp->size),
1698 1698 KM_NOSLEEP)) == NULL) {
1699 1699 seg->s_data = (void *)NULL;
1700 1700 kmem_free(shmd, (sizeof (*shmd)));
1701 1701 return (ENOMEM);
1702 1702 }
1703 1703 shmd->shm_lckpgs = 0;
1704 1704 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
1705 1705 if ((error = hat_share(seg->s_as->a_hat, seg->s_base,
1706 1706 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1707 1707 seg->s_size, seg->s_szc)) != 0) {
1708 1708 kmem_free(shmd->shm_vpage,
1709 1709 btopr(shm_amp->size));
1710 1710 }
1711 1711 }
1712 1712 } else {
1713 1713 error = hat_share(seg->s_as->a_hat, seg->s_base,
1714 1714 shmd_arg->shm_sptas->a_hat, SEGSPTADDR,
1715 1715 seg->s_size, seg->s_szc);
1716 1716 }
1717 1717 if (error) {
1718 1718 seg->s_szc = 0;
1719 1719 seg->s_data = (void *)NULL;
1720 1720 kmem_free(shmd, (sizeof (*shmd)));
1721 1721 } else {
1722 1722 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1723 1723 shm_amp->refcnt++;
1724 1724 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1725 1725 }
1726 1726 return (error);
1727 1727 }
1728 1728
1729 1729 int
1730 1730 segspt_shmunmap(struct seg *seg, caddr_t raddr, size_t ssize)
1731 1731 {
1732 1732 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1733 1733 int reclaim = 1;
1734 1734
1735 1735 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1736 1736 retry:
1737 1737 if (shmd->shm_softlockcnt > 0) {
1738 1738 if (reclaim == 1) {
1739 1739 segspt_purge(seg);
1740 1740 reclaim = 0;
1741 1741 goto retry;
1742 1742 }
1743 1743 return (EAGAIN);
1744 1744 }
1745 1745
1746 1746 if (ssize != seg->s_size) {
1747 1747 #ifdef DEBUG
1748 1748 cmn_err(CE_WARN, "Incompatible ssize %lx s_size %lx\n",
1749 1749 ssize, seg->s_size);
1750 1750 #endif
1751 1751 return (EINVAL);
1752 1752 }
1753 1753
1754 1754 (void) segspt_shmlockop(seg, raddr, shmd->shm_amp->size, 0, MC_UNLOCK,
1755 1755 NULL, 0);
1756 1756 hat_unshare(seg->s_as->a_hat, raddr, ssize, seg->s_szc);
1757 1757
1758 1758 seg_free(seg);
1759 1759
1760 1760 return (0);
1761 1761 }
1762 1762
1763 1763 void
1764 1764 segspt_shmfree(struct seg *seg)
1765 1765 {
1766 1766 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1767 1767 struct anon_map *shm_amp = shmd->shm_amp;
1768 1768
1769 1769 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1770 1770
1771 1771 (void) segspt_shmlockop(seg, seg->s_base, shm_amp->size, 0,
1772 1772 MC_UNLOCK, NULL, 0);
1773 1773
1774 1774 /*
1775 1775 * Need to increment refcnt when attaching
1776 1776 * and decrement when detaching because of dup().
1777 1777 */
1778 1778 ANON_LOCK_ENTER(&shm_amp->a_rwlock, RW_WRITER);
1779 1779 shm_amp->refcnt--;
1780 1780 ANON_LOCK_EXIT(&shm_amp->a_rwlock);
1781 1781
1782 1782 if (shmd->shm_vpage) { /* only for DISM */
1783 1783 kmem_free(shmd->shm_vpage, btopr(shm_amp->size));
1784 1784 shmd->shm_vpage = NULL;
1785 1785 }
1786 1786
1787 1787 /*
1788 1788 * Take shm_segfree_syncmtx lock to let segspt_reclaim() finish if it's
1789 1789 * still working with this segment without holding as lock.
1790 1790 */
1791 1791 ASSERT(shmd->shm_softlockcnt == 0);
1792 1792 mutex_enter(&shmd->shm_segfree_syncmtx);
1793 1793 mutex_destroy(&shmd->shm_segfree_syncmtx);
1794 1794
1795 1795 kmem_free(shmd, sizeof (*shmd));
1796 1796 }
1797 1797
1798 1798 /*ARGSUSED*/
1799 1799 int
1800 1800 segspt_shmsetprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
1801 1801 {
1802 1802 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1803 1803
1804 1804 /*
1805 1805 * Shared page table is more than shared mapping.
1806 1806 * Individual process sharing page tables can't change prot
1807 1807 * because there is only one set of page tables.
1808 1808 * This will be allowed after private page table is
1809 1809 * supported.
1810 1810 */
1811 1811 /* need to return correct status error? */
1812 1812 return (0);
1813 1813 }
1814 1814
1815 1815
1816 1816 faultcode_t
1817 1817 segspt_dismfault(struct hat *hat, struct seg *seg, caddr_t addr,
1818 1818 size_t len, enum fault_type type, enum seg_rw rw)
1819 1819 {
1820 1820 struct shm_data *shmd = (struct shm_data *)seg->s_data;
1821 1821 struct seg *sptseg = shmd->shm_sptseg;
1822 1822 struct as *curspt = shmd->shm_sptas;
1823 1823 struct spt_data *sptd = sptseg->s_data;
1824 1824 pgcnt_t npages;
1825 1825 size_t size;
1826 1826 caddr_t segspt_addr, shm_addr;
1827 1827 page_t **ppa;
1828 1828 int i;
1829 1829 ulong_t an_idx = 0;
1830 1830 int err = 0;
1831 1831 int dyn_ism_unmap = hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0);
1832 1832 size_t pgsz;
1833 1833 pgcnt_t pgcnt;
1834 1834 caddr_t a;
1835 1835 pgcnt_t pidx;
1836 1836
1837 1837 #ifdef lint
1838 1838 hat = hat;
1839 1839 #endif
1840 1840 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1841 1841
1842 1842 /*
1843 1843 * Because of the way spt is implemented
1844 1844 * the realsize of the segment does not have to be
1845 1845 * equal to the segment size itself. The segment size is
1846 1846 * often in multiples of a page size larger than PAGESIZE.
1847 1847 * The realsize is rounded up to the nearest PAGESIZE
1848 1848 * based on what the user requested. This is a bit of
1849 1849 * ungliness that is historical but not easily fixed
1850 1850 * without re-designing the higher levels of ISM.
1851 1851 */
1852 1852 ASSERT(addr >= seg->s_base);
1853 1853 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
1854 1854 return (FC_NOMAP);
1855 1855 /*
1856 1856 * For all of the following cases except F_PROT, we need to
1857 1857 * make any necessary adjustments to addr and len
1858 1858 * and get all of the necessary page_t's into an array called ppa[].
1859 1859 *
1860 1860 * The code in shmat() forces base addr and len of ISM segment
1861 1861 * to be aligned to largest page size supported. Therefore,
1862 1862 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
1863 1863 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
1864 1864 * in large pagesize chunks, or else we will screw up the HAT
1865 1865 * layer by calling hat_memload_array() with differing page sizes
1866 1866 * over a given virtual range.
1867 1867 */
1868 1868 pgsz = page_get_pagesize(sptseg->s_szc);
1869 1869 pgcnt = page_get_pagecnt(sptseg->s_szc);
1870 1870 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
1871 1871 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
1872 1872 npages = btopr(size);
1873 1873
1874 1874 /*
1875 1875 * Now we need to convert from addr in segshm to addr in segspt.
1876 1876 */
1877 1877 an_idx = seg_page(seg, shm_addr);
1878 1878 segspt_addr = sptseg->s_base + ptob(an_idx);
1879 1879
1880 1880 ASSERT((segspt_addr + ptob(npages)) <=
1881 1881 (sptseg->s_base + sptd->spt_realsize));
1882 1882 ASSERT(segspt_addr < (sptseg->s_base + sptseg->s_size));
1883 1883
1884 1884 switch (type) {
1885 1885
1886 1886 case F_SOFTLOCK:
1887 1887
1888 1888 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
1889 1889 /*
1890 1890 * Fall through to the F_INVAL case to load up the hat layer
1891 1891 * entries with the HAT_LOAD_LOCK flag.
1892 1892 */
1893 1893 /* FALLTHRU */
1894 1894 case F_INVAL:
1895 1895
1896 1896 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
1897 1897 return (FC_NOMAP);
1898 1898
1899 1899 ppa = kmem_zalloc(npages * sizeof (page_t *), KM_SLEEP);
1900 1900
1901 1901 err = spt_anon_getpages(sptseg, segspt_addr, size, ppa);
1902 1902 if (err != 0) {
1903 1903 if (type == F_SOFTLOCK) {
1904 1904 atomic_add_long((ulong_t *)(
1905 1905 &(shmd->shm_softlockcnt)), -npages);
1906 1906 }
1907 1907 goto dism_err;
1908 1908 }
1909 1909 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
1910 1910 a = segspt_addr;
1911 1911 pidx = 0;
1912 1912 if (type == F_SOFTLOCK) {
1913 1913
1914 1914 /*
1915 1915 * Load up the translation keeping it
1916 1916 * locked and don't unlock the page.
1917 1917 */
1918 1918 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1919 1919 hat_memload_array(sptseg->s_as->a_hat,
1920 1920 a, pgsz, &ppa[pidx], sptd->spt_prot,
1921 1921 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
1922 1922 }
1923 1923 } else {
1924 1924 /*
1925 1925 * Migrate pages marked for migration
1926 1926 */
1927 1927 if (lgrp_optimizations())
1928 1928 page_migrate(seg, shm_addr, ppa, npages);
1929 1929
1930 1930 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
1931 1931 hat_memload_array(sptseg->s_as->a_hat,
1932 1932 a, pgsz, &ppa[pidx],
1933 1933 sptd->spt_prot,
1934 1934 HAT_LOAD_SHARE);
1935 1935 }
1936 1936
1937 1937 /*
1938 1938 * And now drop the SE_SHARED lock(s).
1939 1939 */
1940 1940 if (dyn_ism_unmap) {
1941 1941 for (i = 0; i < npages; i++) {
1942 1942 page_unlock(ppa[i]);
1943 1943 }
1944 1944 }
1945 1945 }
1946 1946
1947 1947 if (!dyn_ism_unmap) {
1948 1948 if (hat_share(seg->s_as->a_hat, shm_addr,
1949 1949 curspt->a_hat, segspt_addr, ptob(npages),
1950 1950 seg->s_szc) != 0) {
1951 1951 panic("hat_share err in DISM fault");
1952 1952 /* NOTREACHED */
1953 1953 }
1954 1954 if (type == F_INVAL) {
1955 1955 for (i = 0; i < npages; i++) {
1956 1956 page_unlock(ppa[i]);
1957 1957 }
1958 1958 }
1959 1959 }
1960 1960 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
1961 1961 dism_err:
1962 1962 kmem_free(ppa, npages * sizeof (page_t *));
1963 1963 return (err);
1964 1964
1965 1965 case F_SOFTUNLOCK:
1966 1966
1967 1967 /*
1968 1968 * This is a bit ugly, we pass in the real seg pointer,
1969 1969 * but the segspt_addr is the virtual address within the
1970 1970 * dummy seg.
1971 1971 */
1972 1972 segspt_softunlock(seg, segspt_addr, size, rw);
1973 1973 return (0);
1974 1974
1975 1975 case F_PROT:
1976 1976
1977 1977 /*
1978 1978 * This takes care of the unusual case where a user
1979 1979 * allocates a stack in shared memory and a register
1980 1980 * window overflow is written to that stack page before
1981 1981 * it is otherwise modified.
1982 1982 *
1983 1983 * We can get away with this because ISM segments are
1984 1984 * always rw. Other than this unusual case, there
1985 1985 * should be no instances of protection violations.
1986 1986 */
1987 1987 return (0);
1988 1988
1989 1989 default:
1990 1990 #ifdef DEBUG
1991 1991 panic("segspt_dismfault default type?");
1992 1992 #else
1993 1993 return (FC_NOMAP);
1994 1994 #endif
1995 1995 }
1996 1996 }
1997 1997
1998 1998
1999 1999 faultcode_t
2000 2000 segspt_shmfault(struct hat *hat, struct seg *seg, caddr_t addr,
2001 2001 size_t len, enum fault_type type, enum seg_rw rw)
2002 2002 {
2003 2003 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2004 2004 struct seg *sptseg = shmd->shm_sptseg;
2005 2005 struct as *curspt = shmd->shm_sptas;
2006 2006 struct spt_data *sptd = sptseg->s_data;
2007 2007 pgcnt_t npages;
2008 2008 size_t size;
2009 2009 caddr_t sptseg_addr, shm_addr;
2010 2010 page_t *pp, **ppa;
2011 2011 int i;
2012 2012 u_offset_t offset;
2013 2013 ulong_t anon_index = 0;
2014 2014 struct vnode *vp;
2015 2015 struct anon_map *amp; /* XXX - for locknest */
2016 2016 struct anon *ap = NULL;
2017 2017 size_t pgsz;
2018 2018 pgcnt_t pgcnt;
2019 2019 caddr_t a;
2020 2020 pgcnt_t pidx;
2021 2021 size_t sz;
2022 2022
2023 2023 #ifdef lint
2024 2024 hat = hat;
2025 2025 #endif
2026 2026
2027 2027 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2028 2028
2029 2029 if (sptd->spt_flags & SHM_PAGEABLE) {
2030 2030 return (segspt_dismfault(hat, seg, addr, len, type, rw));
2031 2031 }
2032 2032
2033 2033 /*
2034 2034 * Because of the way spt is implemented
2035 2035 * the realsize of the segment does not have to be
2036 2036 * equal to the segment size itself. The segment size is
2037 2037 * often in multiples of a page size larger than PAGESIZE.
2038 2038 * The realsize is rounded up to the nearest PAGESIZE
2039 2039 * based on what the user requested. This is a bit of
2040 2040 * ungliness that is historical but not easily fixed
2041 2041 * without re-designing the higher levels of ISM.
2042 2042 */
2043 2043 ASSERT(addr >= seg->s_base);
2044 2044 if (((addr + len) - seg->s_base) > sptd->spt_realsize)
2045 2045 return (FC_NOMAP);
2046 2046 /*
2047 2047 * For all of the following cases except F_PROT, we need to
2048 2048 * make any necessary adjustments to addr and len
2049 2049 * and get all of the necessary page_t's into an array called ppa[].
2050 2050 *
2051 2051 * The code in shmat() forces base addr and len of ISM segment
2052 2052 * to be aligned to largest page size supported. Therefore,
2053 2053 * we are able to handle F_SOFTLOCK and F_INVAL calls in "large
2054 2054 * pagesize" chunks. We want to make sure that we HAT_LOAD_LOCK
2055 2055 * in large pagesize chunks, or else we will screw up the HAT
2056 2056 * layer by calling hat_memload_array() with differing page sizes
2057 2057 * over a given virtual range.
2058 2058 */
2059 2059 pgsz = page_get_pagesize(sptseg->s_szc);
2060 2060 pgcnt = page_get_pagecnt(sptseg->s_szc);
2061 2061 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz);
2062 2062 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)), pgsz);
2063 2063 npages = btopr(size);
2064 2064
2065 2065 /*
2066 2066 * Now we need to convert from addr in segshm to addr in segspt.
2067 2067 */
2068 2068 anon_index = seg_page(seg, shm_addr);
2069 2069 sptseg_addr = sptseg->s_base + ptob(anon_index);
2070 2070
2071 2071 /*
2072 2072 * And now we may have to adjust npages downward if we have
2073 2073 * exceeded the realsize of the segment or initial anon
2074 2074 * allocations.
2075 2075 */
2076 2076 if ((sptseg_addr + ptob(npages)) >
2077 2077 (sptseg->s_base + sptd->spt_realsize))
2078 2078 size = (sptseg->s_base + sptd->spt_realsize) - sptseg_addr;
2079 2079
2080 2080 npages = btopr(size);
2081 2081
2082 2082 ASSERT(sptseg_addr < (sptseg->s_base + sptseg->s_size));
2083 2083 ASSERT((sptd->spt_flags & SHM_PAGEABLE) == 0);
2084 2084
2085 2085 switch (type) {
2086 2086
2087 2087 case F_SOFTLOCK:
2088 2088
2089 2089 /*
2090 2090 * availrmem is decremented once during anon_swap_adjust()
2091 2091 * and is incremented during the anon_unresv(), which is
2092 2092 * called from shm_rm_amp() when the segment is destroyed.
2093 2093 */
2094 2094 atomic_add_long((ulong_t *)(&(shmd->shm_softlockcnt)), npages);
2095 2095 /*
2096 2096 * Some platforms assume that ISM pages are SE_SHARED
2097 2097 * locked for the entire life of the segment.
2098 2098 */
2099 2099 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0))
2100 2100 return (0);
2101 2101 /*
2102 2102 * Fall through to the F_INVAL case to load up the hat layer
2103 2103 * entries with the HAT_LOAD_LOCK flag.
2104 2104 */
2105 2105
2106 2106 /* FALLTHRU */
2107 2107 case F_INVAL:
2108 2108
2109 2109 if ((rw == S_EXEC) && !(sptd->spt_prot & PROT_EXEC))
2110 2110 return (FC_NOMAP);
2111 2111
2112 2112 /*
2113 2113 * Some platforms that do NOT support DYNAMIC_ISM_UNMAP
2114 2114 * may still rely on this call to hat_share(). That
2115 2115 * would imply that those hat's can fault on a
2116 2116 * HAT_LOAD_LOCK translation, which would seem
2117 2117 * contradictory.
2118 2118 */
2119 2119 if (!hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2120 2120 if (hat_share(seg->s_as->a_hat, seg->s_base,
2121 2121 curspt->a_hat, sptseg->s_base,
2122 2122 sptseg->s_size, sptseg->s_szc) != 0) {
2123 2123 panic("hat_share error in ISM fault");
2124 2124 /*NOTREACHED*/
2125 2125 }
2126 2126 return (0);
2127 2127 }
2128 2128 ppa = kmem_zalloc(sizeof (page_t *) * npages, KM_SLEEP);
2129 2129
2130 2130 /*
2131 2131 * I see no need to lock the real seg,
2132 2132 * here, because all of our work will be on the underlying
2133 2133 * dummy seg.
2134 2134 *
2135 2135 * sptseg_addr and npages now account for large pages.
2136 2136 */
2137 2137 amp = sptd->spt_amp;
2138 2138 ASSERT(amp != NULL);
2139 2139 anon_index = seg_page(sptseg, sptseg_addr);
2140 2140
2141 2141 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2142 2142 for (i = 0; i < npages; i++) {
2143 2143 ap = anon_get_ptr(amp->ahp, anon_index++);
2144 2144 ASSERT(ap != NULL);
2145 2145 swap_xlate(ap, &vp, &offset);
2146 2146 pp = page_lookup(vp, offset, SE_SHARED);
2147 2147 ASSERT(pp != NULL);
2148 2148 ppa[i] = pp;
2149 2149 }
2150 2150 ANON_LOCK_EXIT(&->a_rwlock);
2151 2151 ASSERT(i == npages);
2152 2152
2153 2153 /*
2154 2154 * We are already holding the as->a_lock on the user's
2155 2155 * real segment, but we need to hold the a_lock on the
2156 2156 * underlying dummy as. This is mostly to satisfy the
2157 2157 * underlying HAT layer.
2158 2158 */
2159 2159 AS_LOCK_ENTER(sptseg->s_as, &sptseg->s_as->a_lock, RW_READER);
2160 2160 a = sptseg_addr;
2161 2161 pidx = 0;
2162 2162 if (type == F_SOFTLOCK) {
2163 2163 /*
2164 2164 * Load up the translation keeping it
2165 2165 * locked and don't unlock the page.
2166 2166 */
2167 2167 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2168 2168 sz = MIN(pgsz, ptob(npages - pidx));
2169 2169 hat_memload_array(sptseg->s_as->a_hat, a,
2170 2170 sz, &ppa[pidx], sptd->spt_prot,
2171 2171 HAT_LOAD_LOCK | HAT_LOAD_SHARE);
2172 2172 }
2173 2173 } else {
2174 2174 /*
2175 2175 * Migrate pages marked for migration.
2176 2176 */
2177 2177 if (lgrp_optimizations())
2178 2178 page_migrate(seg, shm_addr, ppa, npages);
2179 2179
2180 2180 for (; pidx < npages; a += pgsz, pidx += pgcnt) {
2181 2181 sz = MIN(pgsz, ptob(npages - pidx));
2182 2182 hat_memload_array(sptseg->s_as->a_hat,
2183 2183 a, sz, &ppa[pidx],
2184 2184 sptd->spt_prot, HAT_LOAD_SHARE);
2185 2185 }
2186 2186
2187 2187 /*
2188 2188 * And now drop the SE_SHARED lock(s).
2189 2189 */
2190 2190 for (i = 0; i < npages; i++)
2191 2191 page_unlock(ppa[i]);
2192 2192 }
2193 2193 AS_LOCK_EXIT(sptseg->s_as, &sptseg->s_as->a_lock);
2194 2194
2195 2195 kmem_free(ppa, sizeof (page_t *) * npages);
2196 2196 return (0);
2197 2197 case F_SOFTUNLOCK:
2198 2198
2199 2199 /*
2200 2200 * This is a bit ugly, we pass in the real seg pointer,
2201 2201 * but the sptseg_addr is the virtual address within the
2202 2202 * dummy seg.
2203 2203 */
2204 2204 segspt_softunlock(seg, sptseg_addr, ptob(npages), rw);
2205 2205 return (0);
2206 2206
2207 2207 case F_PROT:
2208 2208
2209 2209 /*
2210 2210 * This takes care of the unusual case where a user
2211 2211 * allocates a stack in shared memory and a register
2212 2212 * window overflow is written to that stack page before
2213 2213 * it is otherwise modified.
2214 2214 *
2215 2215 * We can get away with this because ISM segments are
2216 2216 * always rw. Other than this unusual case, there
2217 2217 * should be no instances of protection violations.
2218 2218 */
2219 2219 return (0);
2220 2220
2221 2221 default:
2222 2222 #ifdef DEBUG
2223 2223 cmn_err(CE_WARN, "segspt_shmfault default type?");
2224 2224 #endif
2225 2225 return (FC_NOMAP);
2226 2226 }
2227 2227 }
2228 2228
2229 2229 /*ARGSUSED*/
2230 2230 static faultcode_t
2231 2231 segspt_shmfaulta(struct seg *seg, caddr_t addr)
2232 2232 {
2233 2233 return (0);
2234 2234 }
2235 2235
2236 2236 /*ARGSUSED*/
2237 2237 static int
2238 2238 segspt_shmkluster(struct seg *seg, caddr_t addr, ssize_t delta)
2239 2239 {
2240 2240 return (0);
2241 2241 }
2242 2242
2243 2243 /*
2244 2244 * duplicate the shared page tables
2245 2245 */
2246 2246 int
2247 2247 segspt_shmdup(struct seg *seg, struct seg *newseg)
2248 2248 {
2249 2249 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2250 2250 struct anon_map *amp = shmd->shm_amp;
2251 2251 struct shm_data *shmd_new;
2252 2252 struct seg *spt_seg = shmd->shm_sptseg;
2253 2253 struct spt_data *sptd = spt_seg->s_data;
2254 2254 int error = 0;
2255 2255
2256 2256 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2257 2257
2258 2258 shmd_new = kmem_zalloc((sizeof (*shmd_new)), KM_SLEEP);
2259 2259 newseg->s_data = (void *)shmd_new;
2260 2260 shmd_new->shm_sptas = shmd->shm_sptas;
2261 2261 shmd_new->shm_amp = amp;
2262 2262 shmd_new->shm_sptseg = shmd->shm_sptseg;
2263 2263 newseg->s_ops = &segspt_shmops;
2264 2264 newseg->s_szc = seg->s_szc;
2265 2265 ASSERT(seg->s_szc == shmd->shm_sptseg->s_szc);
2266 2266
2267 2267 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2268 2268 amp->refcnt++;
2269 2269 ANON_LOCK_EXIT(&->a_rwlock);
2270 2270
2271 2271 if (sptd->spt_flags & SHM_PAGEABLE) {
2272 2272 shmd_new->shm_vpage = kmem_zalloc(btopr(amp->size), KM_SLEEP);
2273 2273 shmd_new->shm_lckpgs = 0;
2274 2274 if (hat_supported(HAT_DYNAMIC_ISM_UNMAP, (void *)0)) {
2275 2275 if ((error = hat_share(newseg->s_as->a_hat,
2276 2276 newseg->s_base, shmd->shm_sptas->a_hat, SEGSPTADDR,
2277 2277 seg->s_size, seg->s_szc)) != 0) {
2278 2278 kmem_free(shmd_new->shm_vpage,
2279 2279 btopr(amp->size));
2280 2280 }
2281 2281 }
2282 2282 return (error);
2283 2283 } else {
2284 2284 return (hat_share(newseg->s_as->a_hat, newseg->s_base,
2285 2285 shmd->shm_sptas->a_hat, SEGSPTADDR, seg->s_size,
2286 2286 seg->s_szc));
2287 2287
2288 2288 }
2289 2289 }
2290 2290
2291 2291 /*ARGSUSED*/
2292 2292 int
2293 2293 segspt_shmcheckprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
2294 2294 {
2295 2295 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2296 2296 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2297 2297
2298 2298 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2299 2299
2300 2300 /*
2301 2301 * ISM segment is always rw.
2302 2302 */
2303 2303 return (((sptd->spt_prot & prot) != prot) ? EACCES : 0);
2304 2304 }
2305 2305
2306 2306 /*
2307 2307 * Return an array of locked large pages, for empty slots allocate
2308 2308 * private zero-filled anon pages.
2309 2309 */
2310 2310 static int
2311 2311 spt_anon_getpages(
2312 2312 struct seg *sptseg,
2313 2313 caddr_t sptaddr,
2314 2314 size_t len,
2315 2315 page_t *ppa[])
2316 2316 {
2317 2317 struct spt_data *sptd = sptseg->s_data;
2318 2318 struct anon_map *amp = sptd->spt_amp;
2319 2319 enum seg_rw rw = sptd->spt_prot;
2320 2320 uint_t szc = sptseg->s_szc;
2321 2321 size_t pg_sz, share_sz = page_get_pagesize(szc);
2322 2322 pgcnt_t lp_npgs;
2323 2323 caddr_t lp_addr, e_sptaddr;
2324 2324 uint_t vpprot, ppa_szc = 0;
2325 2325 struct vpage *vpage = NULL;
2326 2326 ulong_t j, ppa_idx;
2327 2327 int err, ierr = 0;
2328 2328 pgcnt_t an_idx;
2329 2329 anon_sync_obj_t cookie;
2330 2330 int anon_locked = 0;
2331 2331 pgcnt_t amp_pgs;
2332 2332
2333 2333
2334 2334 ASSERT(IS_P2ALIGNED(sptaddr, share_sz) && IS_P2ALIGNED(len, share_sz));
2335 2335 ASSERT(len != 0);
2336 2336
2337 2337 pg_sz = share_sz;
2338 2338 lp_npgs = btop(pg_sz);
2339 2339 lp_addr = sptaddr;
2340 2340 e_sptaddr = sptaddr + len;
2341 2341 an_idx = seg_page(sptseg, sptaddr);
2342 2342 ppa_idx = 0;
2343 2343
2344 2344 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2345 2345
2346 2346 amp_pgs = page_get_pagecnt(amp->a_szc);
2347 2347
2348 2348 /*CONSTCOND*/
2349 2349 while (1) {
2350 2350 for (; lp_addr < e_sptaddr;
2351 2351 an_idx += lp_npgs, lp_addr += pg_sz, ppa_idx += lp_npgs) {
2352 2352
2353 2353 /*
2354 2354 * If we're currently locked, and we get to a new
2355 2355 * page, unlock our current anon chunk.
2356 2356 */
2357 2357 if (anon_locked && P2PHASE(an_idx, amp_pgs) == 0) {
2358 2358 anon_array_exit(&cookie);
2359 2359 anon_locked = 0;
2360 2360 }
2361 2361 if (!anon_locked) {
2362 2362 anon_array_enter(amp, an_idx, &cookie);
2363 2363 anon_locked = 1;
2364 2364 }
2365 2365 ppa_szc = (uint_t)-1;
2366 2366 ierr = anon_map_getpages(amp, an_idx, szc, sptseg,
2367 2367 lp_addr, sptd->spt_prot, &vpprot, &ppa[ppa_idx],
2368 2368 &ppa_szc, vpage, rw, 0, segvn_anypgsz, 0, kcred);
2369 2369
2370 2370 if (ierr != 0) {
2371 2371 if (ierr > 0) {
2372 2372 err = FC_MAKE_ERR(ierr);
2373 2373 goto lpgs_err;
2374 2374 }
2375 2375 break;
2376 2376 }
2377 2377 }
2378 2378 if (lp_addr == e_sptaddr) {
2379 2379 break;
2380 2380 }
2381 2381 ASSERT(lp_addr < e_sptaddr);
2382 2382
2383 2383 /*
2384 2384 * ierr == -1 means we failed to allocate a large page.
2385 2385 * so do a size down operation.
2386 2386 *
2387 2387 * ierr == -2 means some other process that privately shares
2388 2388 * pages with this process has allocated a larger page and we
2389 2389 * need to retry with larger pages. So do a size up
2390 2390 * operation. This relies on the fact that large pages are
2391 2391 * never partially shared i.e. if we share any constituent
2392 2392 * page of a large page with another process we must share the
2393 2393 * entire large page. Note this cannot happen for SOFTLOCK
2394 2394 * case, unless current address (lpaddr) is at the beginning
2395 2395 * of the next page size boundary because the other process
2396 2396 * couldn't have relocated locked pages.
2397 2397 */
2398 2398 ASSERT(ierr == -1 || ierr == -2);
2399 2399 if (segvn_anypgsz) {
2400 2400 ASSERT(ierr == -2 || szc != 0);
2401 2401 ASSERT(ierr == -1 || szc < sptseg->s_szc);
2402 2402 szc = (ierr == -1) ? szc - 1 : szc + 1;
2403 2403 } else {
2404 2404 /*
2405 2405 * For faults and segvn_anypgsz == 0
2406 2406 * we need to be careful not to loop forever
2407 2407 * if existing page is found with szc other
2408 2408 * than 0 or seg->s_szc. This could be due
2409 2409 * to page relocations on behalf of DR or
2410 2410 * more likely large page creation. For this
2411 2411 * case simply re-size to existing page's szc
2412 2412 * if returned by anon_map_getpages().
2413 2413 */
2414 2414 if (ppa_szc == (uint_t)-1) {
2415 2415 szc = (ierr == -1) ? 0 : sptseg->s_szc;
2416 2416 } else {
2417 2417 ASSERT(ppa_szc <= sptseg->s_szc);
2418 2418 ASSERT(ierr == -2 || ppa_szc < szc);
2419 2419 ASSERT(ierr == -1 || ppa_szc > szc);
2420 2420 szc = ppa_szc;
2421 2421 }
2422 2422 }
2423 2423 pg_sz = page_get_pagesize(szc);
2424 2424 lp_npgs = btop(pg_sz);
2425 2425 ASSERT(IS_P2ALIGNED(lp_addr, pg_sz));
2426 2426 }
2427 2427 if (anon_locked) {
2428 2428 anon_array_exit(&cookie);
2429 2429 }
2430 2430 ANON_LOCK_EXIT(&->a_rwlock);
2431 2431 return (0);
2432 2432
2433 2433 lpgs_err:
2434 2434 if (anon_locked) {
2435 2435 anon_array_exit(&cookie);
2436 2436 }
2437 2437 ANON_LOCK_EXIT(&->a_rwlock);
2438 2438 for (j = 0; j < ppa_idx; j++)
2439 2439 page_unlock(ppa[j]);
2440 2440 return (err);
2441 2441 }
2442 2442
2443 2443 /*
2444 2444 * count the number of bytes in a set of spt pages that are currently not
2445 2445 * locked
2446 2446 */
2447 2447 static rctl_qty_t
2448 2448 spt_unlockedbytes(pgcnt_t npages, page_t **ppa)
2449 2449 {
2450 2450 ulong_t i;
2451 2451 rctl_qty_t unlocked = 0;
2452 2452
2453 2453 for (i = 0; i < npages; i++) {
2454 2454 if (ppa[i]->p_lckcnt == 0)
2455 2455 unlocked += PAGESIZE;
2456 2456 }
2457 2457 return (unlocked);
2458 2458 }
2459 2459
2460 2460 extern u_longlong_t randtick(void);
2461 2461 /* number of locks to reserve/skip by spt_lockpages() and spt_unlockpages() */
2462 2462 #define NLCK (NCPU_P2)
2463 2463 /* Random number with a range [0, n-1], n must be power of two */
2464 2464 #define RAND_P2(n) \
2465 2465 ((((long)curthread >> PTR24_LSB) ^ (long)randtick()) & ((n) - 1))
2466 2466
2467 2467 int
2468 2468 spt_lockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2469 2469 page_t **ppa, ulong_t *lockmap, size_t pos,
2470 2470 rctl_qty_t *locked)
2471 2471 {
2472 2472 struct shm_data *shmd = seg->s_data;
2473 2473 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2474 2474 ulong_t i;
2475 2475 int kernel;
2476 2476 pgcnt_t nlck = 0;
2477 2477 int rv = 0;
2478 2478 int use_reserved = 1;
2479 2479
2480 2480 /* return the number of bytes actually locked */
2481 2481 *locked = 0;
2482 2482
2483 2483 /*
2484 2484 * To avoid contention on freemem_lock, availrmem and pages_locked
2485 2485 * global counters are updated only every nlck locked pages instead of
2486 2486 * every time. Reserve nlck locks up front and deduct from this
2487 2487 * reservation for each page that requires a lock. When the reservation
2488 2488 * is consumed, reserve again. nlck is randomized, so the competing
2489 2489 * threads do not fall into a cyclic lock contention pattern. When
2490 2490 * memory is low, the lock ahead is disabled, and instead page_pp_lock()
2491 2491 * is used to lock pages.
2492 2492 */
2493 2493 for (i = 0; i < npages; anon_index++, pos++, i++) {
2494 2494 if (nlck == 0 && use_reserved == 1) {
2495 2495 nlck = NLCK + RAND_P2(NLCK);
2496 2496 /* if fewer loops left, decrease nlck */
2497 2497 nlck = MIN(nlck, npages - i);
2498 2498 /*
2499 2499 * Reserve nlck locks up front and deduct from this
2500 2500 * reservation for each page that requires a lock. When
2501 2501 * the reservation is consumed, reserve again.
2502 2502 */
2503 2503 mutex_enter(&freemem_lock);
2504 2504 if ((availrmem - nlck) < pages_pp_maximum) {
2505 2505 /* Do not do advance memory reserves */
2506 2506 use_reserved = 0;
2507 2507 } else {
2508 2508 availrmem -= nlck;
2509 2509 pages_locked += nlck;
2510 2510 }
2511 2511 mutex_exit(&freemem_lock);
2512 2512 }
2513 2513 if (!(shmd->shm_vpage[anon_index] & DISM_PG_LOCKED)) {
2514 2514 if (sptd->spt_ppa_lckcnt[anon_index] <
2515 2515 (ushort_t)DISM_LOCK_MAX) {
2516 2516 if (++sptd->spt_ppa_lckcnt[anon_index] ==
2517 2517 (ushort_t)DISM_LOCK_MAX) {
2518 2518 cmn_err(CE_WARN,
2519 2519 "DISM page lock limit "
2520 2520 "reached on DISM offset 0x%lx\n",
2521 2521 anon_index << PAGESHIFT);
2522 2522 }
2523 2523 kernel = (sptd->spt_ppa &&
2524 2524 sptd->spt_ppa[anon_index]);
2525 2525 if (!page_pp_lock(ppa[i], 0, kernel ||
2526 2526 use_reserved)) {
2527 2527 sptd->spt_ppa_lckcnt[anon_index]--;
2528 2528 rv = EAGAIN;
2529 2529 break;
2530 2530 }
2531 2531 /* if this is a newly locked page, count it */
2532 2532 if (ppa[i]->p_lckcnt == 1) {
2533 2533 if (kernel == 0 && use_reserved == 1)
2534 2534 nlck--;
2535 2535 *locked += PAGESIZE;
2536 2536 }
2537 2537 shmd->shm_lckpgs++;
2538 2538 shmd->shm_vpage[anon_index] |= DISM_PG_LOCKED;
2539 2539 if (lockmap != NULL)
2540 2540 BT_SET(lockmap, pos);
2541 2541 }
2542 2542 }
2543 2543 }
2544 2544 /* Return unused lock reservation */
2545 2545 if (nlck != 0 && use_reserved == 1) {
2546 2546 mutex_enter(&freemem_lock);
2547 2547 availrmem += nlck;
2548 2548 pages_locked -= nlck;
2549 2549 mutex_exit(&freemem_lock);
2550 2550 }
2551 2551
2552 2552 return (rv);
2553 2553 }
2554 2554
2555 2555 int
2556 2556 spt_unlockpages(struct seg *seg, pgcnt_t anon_index, pgcnt_t npages,
2557 2557 rctl_qty_t *unlocked)
2558 2558 {
2559 2559 struct shm_data *shmd = seg->s_data;
2560 2560 struct spt_data *sptd = shmd->shm_sptseg->s_data;
2561 2561 struct anon_map *amp = sptd->spt_amp;
2562 2562 struct anon *ap;
2563 2563 struct vnode *vp;
2564 2564 u_offset_t off;
2565 2565 struct page *pp;
2566 2566 int kernel;
2567 2567 anon_sync_obj_t cookie;
2568 2568 ulong_t i;
2569 2569 pgcnt_t nlck = 0;
2570 2570 pgcnt_t nlck_limit = NLCK;
2571 2571
2572 2572 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2573 2573 for (i = 0; i < npages; i++, anon_index++) {
2574 2574 if (shmd->shm_vpage[anon_index] & DISM_PG_LOCKED) {
2575 2575 anon_array_enter(amp, anon_index, &cookie);
2576 2576 ap = anon_get_ptr(amp->ahp, anon_index);
2577 2577 ASSERT(ap);
2578 2578
2579 2579 swap_xlate(ap, &vp, &off);
2580 2580 anon_array_exit(&cookie);
2581 2581 pp = page_lookup(vp, off, SE_SHARED);
2582 2582 ASSERT(pp);
2583 2583 /*
2584 2584 * availrmem is decremented only for pages which are not
2585 2585 * in seg pcache, for pages in seg pcache availrmem was
2586 2586 * decremented in _dismpagelock()
2587 2587 */
2588 2588 kernel = (sptd->spt_ppa && sptd->spt_ppa[anon_index]);
2589 2589 ASSERT(pp->p_lckcnt > 0);
2590 2590
2591 2591 /*
2592 2592 * lock page but do not change availrmem, we do it
2593 2593 * ourselves every nlck loops.
2594 2594 */
2595 2595 page_pp_unlock(pp, 0, 1);
2596 2596 if (pp->p_lckcnt == 0) {
2597 2597 if (kernel == 0)
2598 2598 nlck++;
2599 2599 *unlocked += PAGESIZE;
2600 2600 }
2601 2601 page_unlock(pp);
2602 2602 shmd->shm_vpage[anon_index] &= ~DISM_PG_LOCKED;
2603 2603 sptd->spt_ppa_lckcnt[anon_index]--;
2604 2604 shmd->shm_lckpgs--;
2605 2605 }
2606 2606
2607 2607 /*
2608 2608 * To reduce freemem_lock contention, do not update availrmem
2609 2609 * until at least NLCK pages have been unlocked.
2610 2610 * 1. No need to update if nlck is zero
2611 2611 * 2. Always update if the last iteration
2612 2612 */
2613 2613 if (nlck > 0 && (nlck == nlck_limit || i == npages - 1)) {
2614 2614 mutex_enter(&freemem_lock);
2615 2615 availrmem += nlck;
2616 2616 pages_locked -= nlck;
2617 2617 mutex_exit(&freemem_lock);
2618 2618 nlck = 0;
2619 2619 nlck_limit = NLCK + RAND_P2(NLCK);
2620 2620 }
2621 2621 }
2622 2622 ANON_LOCK_EXIT(&->a_rwlock);
2623 2623
2624 2624 return (0);
2625 2625 }
2626 2626
2627 2627 /*ARGSUSED*/
2628 2628 static int
2629 2629 segspt_shmlockop(struct seg *seg, caddr_t addr, size_t len,
2630 2630 int attr, int op, ulong_t *lockmap, size_t pos)
2631 2631 {
2632 2632 struct shm_data *shmd = seg->s_data;
2633 2633 struct seg *sptseg = shmd->shm_sptseg;
2634 2634 struct spt_data *sptd = sptseg->s_data;
2635 2635 struct kshmid *sp = sptd->spt_amp->a_sp;
2636 2636 pgcnt_t npages, a_npages;
2637 2637 page_t **ppa;
2638 2638 pgcnt_t an_idx, a_an_idx, ppa_idx;
2639 2639 caddr_t spt_addr, a_addr; /* spt and aligned address */
2640 2640 size_t a_len; /* aligned len */
2641 2641 size_t share_sz;
2642 2642 ulong_t i;
2643 2643 int sts = 0;
2644 2644 rctl_qty_t unlocked = 0;
2645 2645 rctl_qty_t locked = 0;
2646 2646 struct proc *p = curproc;
2647 2647 kproject_t *proj;
2648 2648
2649 2649 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2650 2650 ASSERT(sp != NULL);
2651 2651
2652 2652 if ((sptd->spt_flags & SHM_PAGEABLE) == 0) {
2653 2653 return (0);
2654 2654 }
2655 2655
2656 2656 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
2657 2657 an_idx = seg_page(seg, addr);
2658 2658 npages = btopr(len);
2659 2659
2660 2660 if (an_idx + npages > btopr(shmd->shm_amp->size)) {
2661 2661 return (ENOMEM);
2662 2662 }
2663 2663
2664 2664 /*
2665 2665 * A shm's project never changes, so no lock needed.
2666 2666 * The shm has a hold on the project, so it will not go away.
2667 2667 * Since we have a mapping to shm within this zone, we know
2668 2668 * that the zone will not go away.
2669 2669 */
2670 2670 proj = sp->shm_perm.ipc_proj;
2671 2671
2672 2672 if (op == MC_LOCK) {
2673 2673
2674 2674 /*
2675 2675 * Need to align addr and size request if they are not
2676 2676 * aligned so we can always allocate large page(s) however
2677 2677 * we only lock what was requested in initial request.
2678 2678 */
2679 2679 share_sz = page_get_pagesize(sptseg->s_szc);
2680 2680 a_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_sz);
2681 2681 a_len = P2ROUNDUP((uintptr_t)(((addr + len) - a_addr)),
2682 2682 share_sz);
2683 2683 a_npages = btop(a_len);
2684 2684 a_an_idx = seg_page(seg, a_addr);
2685 2685 spt_addr = sptseg->s_base + ptob(a_an_idx);
2686 2686 ppa_idx = an_idx - a_an_idx;
2687 2687
2688 2688 if ((ppa = kmem_zalloc(((sizeof (page_t *)) * a_npages),
2689 2689 KM_NOSLEEP)) == NULL) {
2690 2690 return (ENOMEM);
2691 2691 }
2692 2692
2693 2693 /*
2694 2694 * Don't cache any new pages for IO and
2695 2695 * flush any cached pages.
2696 2696 */
2697 2697 mutex_enter(&sptd->spt_lock);
2698 2698 if (sptd->spt_ppa != NULL)
2699 2699 sptd->spt_flags |= DISM_PPA_CHANGED;
2700 2700
2701 2701 sts = spt_anon_getpages(sptseg, spt_addr, a_len, ppa);
2702 2702 if (sts != 0) {
2703 2703 mutex_exit(&sptd->spt_lock);
2704 2704 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2705 2705 return (sts);
2706 2706 }
2707 2707
2708 2708 mutex_enter(&sp->shm_mlock);
2709 2709 /* enforce locked memory rctl */
2710 2710 unlocked = spt_unlockedbytes(npages, &ppa[ppa_idx]);
2711 2711
2712 2712 mutex_enter(&p->p_lock);
2713 2713 if (rctl_incr_locked_mem(p, proj, unlocked, 0)) {
2714 2714 mutex_exit(&p->p_lock);
2715 2715 sts = EAGAIN;
2716 2716 } else {
2717 2717 mutex_exit(&p->p_lock);
2718 2718 sts = spt_lockpages(seg, an_idx, npages,
2719 2719 &ppa[ppa_idx], lockmap, pos, &locked);
2720 2720
2721 2721 /*
2722 2722 * correct locked count if not all pages could be
2723 2723 * locked
2724 2724 */
2725 2725 if ((unlocked - locked) > 0) {
2726 2726 rctl_decr_locked_mem(NULL, proj,
2727 2727 (unlocked - locked), 0);
2728 2728 }
2729 2729 }
2730 2730 /*
2731 2731 * unlock pages
2732 2732 */
2733 2733 for (i = 0; i < a_npages; i++)
2734 2734 page_unlock(ppa[i]);
2735 2735 if (sptd->spt_ppa != NULL)
2736 2736 sptd->spt_flags |= DISM_PPA_CHANGED;
2737 2737 mutex_exit(&sp->shm_mlock);
2738 2738 mutex_exit(&sptd->spt_lock);
2739 2739
2740 2740 kmem_free(ppa, ((sizeof (page_t *)) * a_npages));
2741 2741
2742 2742 } else if (op == MC_UNLOCK) { /* unlock */
2743 2743 page_t **ppa;
2744 2744
2745 2745 mutex_enter(&sptd->spt_lock);
2746 2746 if (shmd->shm_lckpgs == 0) {
2747 2747 mutex_exit(&sptd->spt_lock);
2748 2748 return (0);
2749 2749 }
2750 2750 /*
2751 2751 * Don't cache new IO pages.
2752 2752 */
2753 2753 if (sptd->spt_ppa != NULL)
2754 2754 sptd->spt_flags |= DISM_PPA_CHANGED;
2755 2755
2756 2756 mutex_enter(&sp->shm_mlock);
2757 2757 sts = spt_unlockpages(seg, an_idx, npages, &unlocked);
2758 2758 if ((ppa = sptd->spt_ppa) != NULL)
2759 2759 sptd->spt_flags |= DISM_PPA_CHANGED;
2760 2760 mutex_exit(&sptd->spt_lock);
2761 2761
2762 2762 rctl_decr_locked_mem(NULL, proj, unlocked, 0);
2763 2763 mutex_exit(&sp->shm_mlock);
2764 2764
2765 2765 if (ppa != NULL)
2766 2766 seg_ppurge_wiredpp(ppa);
2767 2767 }
2768 2768 return (sts);
2769 2769 }
2770 2770
2771 2771 /*ARGSUSED*/
2772 2772 int
2773 2773 segspt_shmgetprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2774 2774 {
2775 2775 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2776 2776 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2777 2777 spgcnt_t pgno = seg_page(seg, addr+len) - seg_page(seg, addr) + 1;
2778 2778
2779 2779 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2780 2780
2781 2781 /*
2782 2782 * ISM segment is always rw.
2783 2783 */
2784 2784 while (--pgno >= 0)
2785 2785 *protv++ = sptd->spt_prot;
2786 2786 return (0);
2787 2787 }
2788 2788
2789 2789 /*ARGSUSED*/
2790 2790 u_offset_t
2791 2791 segspt_shmgetoffset(struct seg *seg, caddr_t addr)
2792 2792 {
2793 2793 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2794 2794
2795 2795 /* Offset does not matter in ISM memory */
2796 2796
2797 2797 return ((u_offset_t)0);
2798 2798 }
2799 2799
2800 2800 /* ARGSUSED */
2801 2801 int
2802 2802 segspt_shmgettype(struct seg *seg, caddr_t addr)
2803 2803 {
2804 2804 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2805 2805 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2806 2806
2807 2807 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2808 2808
2809 2809 /*
2810 2810 * The shared memory mapping is always MAP_SHARED, SWAP is only
2811 2811 * reserved for DISM
2812 2812 */
2813 2813 return (MAP_SHARED |
2814 2814 ((sptd->spt_flags & SHM_PAGEABLE) ? 0 : MAP_NORESERVE));
2815 2815 }
2816 2816
2817 2817 /*ARGSUSED*/
2818 2818 int
2819 2819 segspt_shmgetvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
2820 2820 {
2821 2821 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2822 2822 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2823 2823
2824 2824 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2825 2825
2826 2826 *vpp = sptd->spt_vp;
2827 2827 return (0);
2828 2828 }
2829 2829
2830 2830 /*
2831 2831 * We need to wait for pending IO to complete to a DISM segment in order for
2832 2832 * pages to get kicked out of the seg_pcache. 120 seconds should be more
2833 2833 * than enough time to wait.
2834 2834 */
2835 2835 static clock_t spt_pcache_wait = 120;
2836 2836
2837 2837 /*ARGSUSED*/
2838 2838 static int
2839 2839 segspt_shmadvise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2840 2840 {
2841 2841 struct shm_data *shmd = (struct shm_data *)seg->s_data;
2842 2842 struct spt_data *sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2843 2843 struct anon_map *amp;
2844 2844 pgcnt_t pg_idx;
2845 2845 ushort_t gen;
2846 2846 clock_t end_lbolt;
2847 2847 int writer;
2848 2848 page_t **ppa;
2849 2849
2850 2850 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2851 2851
2852 2852 if (behav == MADV_FREE) {
2853 2853 if ((sptd->spt_flags & SHM_PAGEABLE) == 0)
2854 2854 return (0);
2855 2855
2856 2856 amp = sptd->spt_amp;
2857 2857 pg_idx = seg_page(seg, addr);
2858 2858
2859 2859 mutex_enter(&sptd->spt_lock);
2860 2860 if ((ppa = sptd->spt_ppa) == NULL) {
2861 2861 mutex_exit(&sptd->spt_lock);
2862 2862 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2863 2863 anon_disclaim(amp, pg_idx, len);
2864 2864 ANON_LOCK_EXIT(&->a_rwlock);
2865 2865 return (0);
2866 2866 }
2867 2867
2868 2868 sptd->spt_flags |= DISM_PPA_CHANGED;
2869 2869 gen = sptd->spt_gen;
2870 2870
2871 2871 mutex_exit(&sptd->spt_lock);
2872 2872
2873 2873 /*
2874 2874 * Purge all DISM cached pages
2875 2875 */
2876 2876 seg_ppurge_wiredpp(ppa);
2877 2877
2878 2878 /*
2879 2879 * Drop the AS_LOCK so that other threads can grab it
2880 2880 * in the as_pageunlock path and hopefully get the segment
2881 2881 * kicked out of the seg_pcache. We bump the shm_softlockcnt
2882 2882 * to keep this segment resident.
2883 2883 */
2884 2884 writer = AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock);
2885 2885 atomic_inc_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2886 2886 AS_LOCK_EXIT(seg->s_as, &seg->s_as->a_lock);
2887 2887
2888 2888 mutex_enter(&sptd->spt_lock);
2889 2889
2890 2890 end_lbolt = ddi_get_lbolt() + (hz * spt_pcache_wait);
2891 2891
2892 2892 /*
2893 2893 * Try to wait for pages to get kicked out of the seg_pcache.
2894 2894 */
2895 2895 while (sptd->spt_gen == gen &&
2896 2896 (sptd->spt_flags & DISM_PPA_CHANGED) &&
2897 2897 ddi_get_lbolt() < end_lbolt) {
2898 2898 if (!cv_timedwait_sig(&sptd->spt_cv,
2899 2899 &sptd->spt_lock, end_lbolt)) {
2900 2900 break;
2901 2901 }
2902 2902 }
2903 2903
2904 2904 mutex_exit(&sptd->spt_lock);
2905 2905
2906 2906 /* Regrab the AS_LOCK and release our hold on the segment */
2907 2907 AS_LOCK_ENTER(seg->s_as, &seg->s_as->a_lock,
2908 2908 writer ? RW_WRITER : RW_READER);
2909 2909 atomic_dec_ulong((ulong_t *)(&(shmd->shm_softlockcnt)));
2910 2910 if (shmd->shm_softlockcnt <= 0) {
2911 2911 if (AS_ISUNMAPWAIT(seg->s_as)) {
2912 2912 mutex_enter(&seg->s_as->a_contents);
2913 2913 if (AS_ISUNMAPWAIT(seg->s_as)) {
2914 2914 AS_CLRUNMAPWAIT(seg->s_as);
2915 2915 cv_broadcast(&seg->s_as->a_cv);
2916 2916 }
2917 2917 mutex_exit(&seg->s_as->a_contents);
2918 2918 }
2919 2919 }
2920 2920
2921 2921 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2922 2922 anon_disclaim(amp, pg_idx, len);
2923 2923 ANON_LOCK_EXIT(&->a_rwlock);
2924 2924 } else if (lgrp_optimizations() && (behav == MADV_ACCESS_LWP ||
2925 2925 behav == MADV_ACCESS_MANY || behav == MADV_ACCESS_DEFAULT)) {
2926 2926 int already_set;
2927 2927 ulong_t anon_index;
2928 2928 lgrp_mem_policy_t policy;
2929 2929 caddr_t shm_addr;
2930 2930 size_t share_size;
2931 2931 size_t size;
2932 2932 struct seg *sptseg = shmd->shm_sptseg;
2933 2933 caddr_t sptseg_addr;
2934 2934
2935 2935 /*
2936 2936 * Align address and length to page size of underlying segment
2937 2937 */
2938 2938 share_size = page_get_pagesize(shmd->shm_sptseg->s_szc);
2939 2939 shm_addr = (caddr_t)P2ALIGN((uintptr_t)(addr), share_size);
2940 2940 size = P2ROUNDUP((uintptr_t)(((addr + len) - shm_addr)),
2941 2941 share_size);
2942 2942
2943 2943 amp = shmd->shm_amp;
2944 2944 anon_index = seg_page(seg, shm_addr);
2945 2945
2946 2946 /*
2947 2947 * And now we may have to adjust size downward if we have
2948 2948 * exceeded the realsize of the segment or initial anon
2949 2949 * allocations.
2950 2950 */
2951 2951 sptseg_addr = sptseg->s_base + ptob(anon_index);
2952 2952 if ((sptseg_addr + size) >
2953 2953 (sptseg->s_base + sptd->spt_realsize))
2954 2954 size = (sptseg->s_base + sptd->spt_realsize) -
2955 2955 sptseg_addr;
2956 2956
2957 2957 /*
2958 2958 * Set memory allocation policy for this segment
2959 2959 */
2960 2960 policy = lgrp_madv_to_policy(behav, len, MAP_SHARED);
2961 2961 already_set = lgrp_shm_policy_set(policy, amp, anon_index,
2962 2962 NULL, 0, len);
2963 2963
2964 2964 /*
2965 2965 * If random memory allocation policy set already,
2966 2966 * don't bother reapplying it.
2967 2967 */
2968 2968 if (already_set && !LGRP_MEM_POLICY_REAPPLICABLE(policy))
2969 2969 return (0);
2970 2970
2971 2971 /*
2972 2972 * Mark any existing pages in the given range for
2973 2973 * migration, flushing the I/O page cache, and using
2974 2974 * underlying segment to calculate anon index and get
2975 2975 * anonmap and vnode pointer from
2976 2976 */
2977 2977 if (shmd->shm_softlockcnt > 0)
2978 2978 segspt_purge(seg);
2979 2979
2980 2980 page_mark_migrate(seg, shm_addr, size, amp, 0, NULL, 0, 0);
2981 2981 }
2982 2982
2983 2983 return (0);
2984 2984 }
2985 2985
2986 2986 /*ARGSUSED*/
2987 2987 void
2988 2988 segspt_shmdump(struct seg *seg)
2989 2989 {
2990 2990 /* no-op for ISM segment */
2991 2991 }
2992 2992
2993 2993 /*ARGSUSED*/
2994 2994 static faultcode_t
2995 2995 segspt_shmsetpgsz(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
2996 2996 {
2997 2997 return (ENOTSUP);
2998 2998 }
2999 2999
3000 3000 /*
3001 3001 * get a memory ID for an addr in a given segment
3002 3002 */
3003 3003 static int
3004 3004 segspt_shmgetmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3005 3005 {
3006 3006 struct shm_data *shmd = (struct shm_data *)seg->s_data;
3007 3007 struct anon *ap;
3008 3008 size_t anon_index;
3009 3009 struct anon_map *amp = shmd->shm_amp;
3010 3010 struct spt_data *sptd = shmd->shm_sptseg->s_data;
3011 3011 struct seg *sptseg = shmd->shm_sptseg;
3012 3012 anon_sync_obj_t cookie;
3013 3013
3014 3014 anon_index = seg_page(seg, addr);
3015 3015
3016 3016 if (addr > (seg->s_base + sptd->spt_realsize)) {
3017 3017 return (EFAULT);
3018 3018 }
3019 3019
3020 3020 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3021 3021 anon_array_enter(amp, anon_index, &cookie);
3022 3022 ap = anon_get_ptr(amp->ahp, anon_index);
3023 3023 if (ap == NULL) {
3024 3024 struct page *pp;
3025 3025 caddr_t spt_addr = sptseg->s_base + ptob(anon_index);
3026 3026
3027 3027 pp = anon_zero(sptseg, spt_addr, &ap, kcred);
3028 3028 if (pp == NULL) {
3029 3029 anon_array_exit(&cookie);
3030 3030 ANON_LOCK_EXIT(&->a_rwlock);
3031 3031 return (ENOMEM);
3032 3032 }
3033 3033 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3034 3034 page_unlock(pp);
3035 3035 }
3036 3036 anon_array_exit(&cookie);
3037 3037 ANON_LOCK_EXIT(&->a_rwlock);
3038 3038 memidp->val[0] = (uintptr_t)ap;
3039 3039 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
3040 3040 return (0);
3041 3041 }
3042 3042
3043 3043 /*
3044 3044 * Get memory allocation policy info for specified address in given segment
3045 3045 */
3046 3046 static lgrp_mem_policy_info_t *
3047 3047 segspt_shmgetpolicy(struct seg *seg, caddr_t addr)
3048 3048 {
3049 3049 struct anon_map *amp;
3050 3050 ulong_t anon_index;
3051 3051 lgrp_mem_policy_info_t *policy_info;
3052 3052 struct shm_data *shm_data;
3053 3053
3054 3054 ASSERT(seg != NULL);
3055 3055
3056 3056 /*
3057 3057 * Get anon_map from segshm
3058 3058 *
3059 3059 * Assume that no lock needs to be held on anon_map, since
3060 3060 * it should be protected by its reference count which must be
3061 3061 * nonzero for an existing segment
3062 3062 * Need to grab readers lock on policy tree though
3063 3063 */
3064 3064 shm_data = (struct shm_data *)seg->s_data;
3065 3065 if (shm_data == NULL)
3066 3066 return (NULL);
3067 3067 amp = shm_data->shm_amp;
3068 3068 ASSERT(amp->refcnt != 0);
3069 3069
3070 3070 /*
3071 3071 * Get policy info
3072 3072 *
3073 3073 * Assume starting anon index of 0
3074 3074 */
3075 3075 anon_index = seg_page(seg, addr);
3076 3076 policy_info = lgrp_shm_policy_get(amp, anon_index, NULL, 0);
3077 3077
3078 3078 return (policy_info);
3079 3079 }
3080 3080
3081 3081 /*ARGSUSED*/
3082 3082 static int
3083 3083 segspt_shmcapable(struct seg *seg, segcapability_t capability)
3084 3084 {
3085 3085 return (0);
3086 3086 }
↓ open down ↓ |
2907 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX