Print this page
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_kp.c
+++ new/usr/src/uts/common/vm/seg_kp.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 26 /* All Rights Reserved */
27 27
28 28 /*
29 29 * Portions of this source code were derived from Berkeley 4.3 BSD
30 30 * under license from the Regents of the University of California.
31 31 */
32 32
33 33 /*
34 34 * segkp is a segment driver that administers the allocation and deallocation
35 35 * of pageable variable size chunks of kernel virtual address space. Each
36 36 * allocated resource is page-aligned.
37 37 *
38 38 * The user may specify whether the resource should be initialized to 0,
39 39 * include a redzone, or locked in memory.
40 40 */
41 41
42 42 #include <sys/types.h>
43 43 #include <sys/t_lock.h>
44 44 #include <sys/thread.h>
45 45 #include <sys/param.h>
46 46 #include <sys/errno.h>
47 47 #include <sys/sysmacros.h>
48 48 #include <sys/systm.h>
49 49 #include <sys/buf.h>
50 50 #include <sys/mman.h>
51 51 #include <sys/vnode.h>
52 52 #include <sys/cmn_err.h>
53 53 #include <sys/swap.h>
54 54 #include <sys/tuneable.h>
55 55 #include <sys/kmem.h>
56 56 #include <sys/vmem.h>
57 57 #include <sys/cred.h>
58 58 #include <sys/dumphdr.h>
59 59 #include <sys/debug.h>
60 60 #include <sys/vtrace.h>
61 61 #include <sys/stack.h>
62 62 #include <sys/atomic.h>
63 63 #include <sys/archsystm.h>
64 64 #include <sys/lgrp.h>
65 65
66 66 #include <vm/as.h>
67 67 #include <vm/seg.h>
68 68 #include <vm/seg_kp.h>
69 69 #include <vm/seg_kmem.h>
70 70 #include <vm/anon.h>
71 71 #include <vm/page.h>
72 72 #include <vm/hat.h>
73 73 #include <sys/bitmap.h>
74 74
75 75 /*
76 76 * Private seg op routines
77 77 */
78 78 static void segkp_dump(struct seg *seg);
79 79 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
80 80 uint_t prot);
81 81 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
82 82 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
83 83 struct page ***page, enum lock_type type,
84 84 enum seg_rw rw);
85 85 static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
86 86 static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
87 87 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
88 88 struct segkp_data **tkpd, struct anon_map *amp);
89 89 static void segkp_release_internal(struct seg *seg,
90 90 struct segkp_data *kpd, size_t len);
91 91 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
92 92 size_t len, struct segkp_data *kpd, uint_t flags);
93 93 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
94 94 size_t len, struct segkp_data *kpd, uint_t flags);
95 95 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
96 96 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
97 97 static lgrp_mem_policy_info_t *segkp_getpolicy(struct seg *seg,
98 98 caddr_t addr);
99 99 static int segkp_capable(struct seg *seg, segcapability_t capability);
100 100
101 101 /*
102 102 * Lock used to protect the hash table(s) and caches.
103 103 */
104 104 static kmutex_t segkp_lock;
105 105
106 106 /*
107 107 * The segkp caches
108 108 */
109 109 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
110 110
111 111 /*
112 112 * When there are fewer than red_minavail bytes left on the stack,
113 113 * segkp_map_red() will map in the redzone (if called). 5000 seems
114 114 * to work reasonably well...
115 115 */
116 116 long red_minavail = 5000;
117 117
118 118 /*
119 119 * will be set to 1 for 32 bit x86 systems only, in startup.c
120 120 */
121 121 int segkp_fromheap = 0;
122 122 ulong_t *segkp_bitmap;
123 123
124 124 /*
125 125 * If segkp_map_red() is called with the redzone already mapped and
126 126 * with less than RED_DEEP_THRESHOLD bytes available on the stack,
127 127 * then the stack situation has become quite serious; if much more stack
128 128 * is consumed, we have the potential of scrogging the next thread/LWP
129 129 * structure. To help debug the "can't happen" panics which may
130 130 * result from this condition, we record hrestime and the calling thread
131 131 * in red_deep_hires and red_deep_thread respectively.
132 132 */
133 133 #define RED_DEEP_THRESHOLD 2000
134 134
135 135 hrtime_t red_deep_hires;
136 136 kthread_t *red_deep_thread;
137 137
138 138 uint32_t red_nmapped;
139 139 uint32_t red_closest = UINT_MAX;
140 140 uint32_t red_ndoubles;
141 141
142 142 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
143 143 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
↓ open down ↓ |
143 lines elided |
↑ open up ↑ |
144 144
145 145 static struct seg_ops segkp_ops = {
146 146 .fault = segkp_fault,
147 147 .checkprot = segkp_checkprot,
148 148 .kluster = segkp_kluster,
149 149 .dump = segkp_dump,
150 150 .pagelock = segkp_pagelock,
151 151 .getmemid = segkp_getmemid,
152 152 .getpolicy = segkp_getpolicy,
153 153 .capable = segkp_capable,
154 - .inherit = seg_inherit_notsup,
155 154 };
156 155
157 156
158 157 static void segkpinit_mem_config(struct seg *);
159 158
160 159 static uint32_t segkp_indel;
161 160
162 161 /*
163 162 * Allocate the segment specific private data struct and fill it in
164 163 * with the per kp segment mutex, anon ptr. array and hash table.
165 164 */
166 165 int
167 166 segkp_create(struct seg *seg)
168 167 {
169 168 struct segkp_segdata *kpsd;
170 169 size_t np;
171 170
172 171 ASSERT(seg != NULL && seg->s_as == &kas);
173 172 ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));
174 173
175 174 if (seg->s_size & PAGEOFFSET) {
176 175 panic("Bad segkp size");
177 176 /*NOTREACHED*/
178 177 }
179 178
180 179 kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP);
181 180
182 181 /*
183 182 * Allocate the virtual memory for segkp and initialize it
184 183 */
185 184 if (segkp_fromheap) {
186 185 np = btop(kvseg.s_size);
187 186 segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP);
188 187 kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE,
189 188 vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP);
190 189 } else {
191 190 segkp_bitmap = NULL;
192 191 np = btop(seg->s_size);
193 192 kpsd->kpsd_arena = vmem_create("segkp", seg->s_base,
194 193 seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE,
195 194 VM_SLEEP);
196 195 }
197 196
198 197 kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE);
199 198
200 199 kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *),
201 200 KM_SLEEP);
202 201 seg->s_data = (void *)kpsd;
203 202 seg->s_ops = &segkp_ops;
204 203 segkpinit_mem_config(seg);
205 204 return (0);
206 205 }
207 206
208 207
209 208 /*
210 209 * Find a free 'freelist' and initialize it with the appropriate attributes
211 210 */
212 211 void *
213 212 segkp_cache_init(struct seg *seg, int maxsize, size_t len, uint_t flags)
214 213 {
215 214 int i;
216 215
217 216 if ((flags & KPD_NO_ANON) && !(flags & KPD_LOCKED))
218 217 return ((void *)-1);
219 218
220 219 mutex_enter(&segkp_lock);
221 220 for (i = 0; i < SEGKP_MAX_CACHE; i++) {
222 221 if (segkp_cache[i].kpf_inuse)
223 222 continue;
224 223 segkp_cache[i].kpf_inuse = 1;
225 224 segkp_cache[i].kpf_max = maxsize;
226 225 segkp_cache[i].kpf_flags = flags;
227 226 segkp_cache[i].kpf_seg = seg;
228 227 segkp_cache[i].kpf_len = len;
229 228 mutex_exit(&segkp_lock);
230 229 return ((void *)(uintptr_t)i);
231 230 }
232 231 mutex_exit(&segkp_lock);
233 232 return ((void *)-1);
234 233 }
235 234
236 235 /*
237 236 * Free all the cache resources.
238 237 */
239 238 void
240 239 segkp_cache_free(void)
241 240 {
242 241 struct segkp_data *kpd;
243 242 struct seg *seg;
244 243 int i;
245 244
246 245 mutex_enter(&segkp_lock);
247 246 for (i = 0; i < SEGKP_MAX_CACHE; i++) {
248 247 if (!segkp_cache[i].kpf_inuse)
249 248 continue;
250 249 /*
251 250 * Disconnect the freelist and process each element
252 251 */
253 252 kpd = segkp_cache[i].kpf_list;
254 253 seg = segkp_cache[i].kpf_seg;
255 254 segkp_cache[i].kpf_list = NULL;
256 255 segkp_cache[i].kpf_count = 0;
257 256 mutex_exit(&segkp_lock);
258 257
259 258 while (kpd != NULL) {
260 259 struct segkp_data *next;
261 260
262 261 next = kpd->kp_next;
263 262 segkp_release_internal(seg, kpd, kpd->kp_len);
264 263 kpd = next;
265 264 }
266 265 mutex_enter(&segkp_lock);
267 266 }
268 267 mutex_exit(&segkp_lock);
269 268 }
270 269
271 270 /*
272 271 * There are 2 entries into segkp_get_internal. The first includes a cookie
273 272 * used to access a pool of cached segkp resources. The second does not
274 273 * use the cache.
275 274 */
276 275 caddr_t
277 276 segkp_get(struct seg *seg, size_t len, uint_t flags)
278 277 {
279 278 struct segkp_data *kpd = NULL;
280 279
281 280 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
282 281 kpd->kp_cookie = -1;
283 282 return (stom(kpd->kp_base, flags));
284 283 }
285 284 return (NULL);
286 285 }
287 286
288 287 /*
289 288 * Return a 'cached' segkp address
290 289 */
291 290 caddr_t
292 291 segkp_cache_get(void *cookie)
293 292 {
294 293 struct segkp_cache *freelist = NULL;
295 294 struct segkp_data *kpd = NULL;
296 295 int index = (int)(uintptr_t)cookie;
297 296 struct seg *seg;
298 297 size_t len;
299 298 uint_t flags;
300 299
301 300 if (index < 0 || index >= SEGKP_MAX_CACHE)
302 301 return (NULL);
303 302 freelist = &segkp_cache[index];
304 303
305 304 mutex_enter(&segkp_lock);
306 305 seg = freelist->kpf_seg;
307 306 flags = freelist->kpf_flags;
308 307 if (freelist->kpf_list != NULL) {
309 308 kpd = freelist->kpf_list;
310 309 freelist->kpf_list = kpd->kp_next;
311 310 freelist->kpf_count--;
312 311 mutex_exit(&segkp_lock);
313 312 kpd->kp_next = NULL;
314 313 segkp_insert(seg, kpd);
315 314 return (stom(kpd->kp_base, flags));
316 315 }
317 316 len = freelist->kpf_len;
318 317 mutex_exit(&segkp_lock);
319 318 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
320 319 kpd->kp_cookie = index;
321 320 return (stom(kpd->kp_base, flags));
322 321 }
323 322 return (NULL);
324 323 }
325 324
326 325 caddr_t
327 326 segkp_get_withanonmap(
328 327 struct seg *seg,
329 328 size_t len,
330 329 uint_t flags,
331 330 struct anon_map *amp)
332 331 {
333 332 struct segkp_data *kpd = NULL;
334 333
335 334 ASSERT(amp != NULL);
336 335 flags |= KPD_HASAMP;
337 336 if (segkp_get_internal(seg, len, flags, &kpd, amp) != NULL) {
338 337 kpd->kp_cookie = -1;
339 338 return (stom(kpd->kp_base, flags));
340 339 }
341 340 return (NULL);
342 341 }
343 342
344 343 /*
345 344 * This does the real work of segkp allocation.
346 345 * Return to client base addr. len must be page-aligned. A null value is
347 346 * returned if there are no more vm resources (e.g. pages, swap). The len
348 347 * and base recorded in the private data structure include the redzone
349 348 * and the redzone length (if applicable). If the user requests a redzone
350 349 * either the first or last page is left unmapped depending whether stacks
351 350 * grow to low or high memory.
352 351 *
353 352 * The client may also specify a no-wait flag. If that is set then the
354 353 * request will choose a non-blocking path when requesting resources.
355 354 * The default is make the client wait.
356 355 */
357 356 static caddr_t
358 357 segkp_get_internal(
359 358 struct seg *seg,
360 359 size_t len,
361 360 uint_t flags,
362 361 struct segkp_data **tkpd,
363 362 struct anon_map *amp)
364 363 {
365 364 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
366 365 struct segkp_data *kpd;
367 366 caddr_t vbase = NULL; /* always first virtual, may not be mapped */
368 367 pgcnt_t np = 0; /* number of pages in the resource */
369 368 pgcnt_t segkpindex;
370 369 long i;
371 370 caddr_t va;
372 371 pgcnt_t pages = 0;
373 372 ulong_t anon_idx = 0;
374 373 int kmflag = (flags & KPD_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
375 374 caddr_t s_base = (segkp_fromheap) ? kvseg.s_base : seg->s_base;
376 375
377 376 if (len & PAGEOFFSET) {
378 377 panic("segkp_get: len is not page-aligned");
379 378 /*NOTREACHED*/
380 379 }
381 380
382 381 ASSERT(((flags & KPD_HASAMP) == 0) == (amp == NULL));
383 382
384 383 /* Only allow KPD_NO_ANON if we are going to lock it down */
385 384 if ((flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON)
386 385 return (NULL);
387 386
388 387 if ((kpd = kmem_zalloc(sizeof (struct segkp_data), kmflag)) == NULL)
389 388 return (NULL);
390 389 /*
391 390 * Fix up the len to reflect the REDZONE if applicable
392 391 */
393 392 if (flags & KPD_HASREDZONE)
394 393 len += PAGESIZE;
395 394 np = btop(len);
396 395
397 396 vbase = vmem_alloc(SEGKP_VMEM(seg), len, kmflag | VM_BESTFIT);
398 397 if (vbase == NULL) {
399 398 kmem_free(kpd, sizeof (struct segkp_data));
400 399 return (NULL);
401 400 }
402 401
403 402 /* If locking, reserve physical memory */
404 403 if (flags & KPD_LOCKED) {
405 404 pages = btop(SEGKP_MAPLEN(len, flags));
406 405 if (page_resv(pages, kmflag) == 0) {
407 406 vmem_free(SEGKP_VMEM(seg), vbase, len);
408 407 kmem_free(kpd, sizeof (struct segkp_data));
409 408 return (NULL);
410 409 }
411 410 if ((flags & KPD_NO_ANON) == 0)
412 411 atomic_add_long(&anon_segkp_pages_locked, pages);
413 412 }
414 413
415 414 /*
416 415 * Reserve sufficient swap space for this vm resource. We'll
417 416 * actually allocate it in the loop below, but reserving it
418 417 * here allows us to back out more gracefully than if we
419 418 * had an allocation failure in the body of the loop.
420 419 *
421 420 * Note that we don't need swap space for the red zone page.
422 421 */
423 422 if (amp != NULL) {
424 423 /*
425 424 * The swap reservation has been done, if required, and the
426 425 * anon_hdr is separate.
427 426 */
428 427 anon_idx = 0;
429 428 kpd->kp_anon_idx = anon_idx;
430 429 kpd->kp_anon = amp->ahp;
431 430
432 431 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
433 432 kpd, vbase, len, flags, 1);
434 433
435 434 } else if ((flags & KPD_NO_ANON) == 0) {
436 435 if (anon_resv_zone(SEGKP_MAPLEN(len, flags), NULL) == 0) {
437 436 if (flags & KPD_LOCKED) {
438 437 atomic_add_long(&anon_segkp_pages_locked,
439 438 -pages);
440 439 page_unresv(pages);
441 440 }
442 441 vmem_free(SEGKP_VMEM(seg), vbase, len);
443 442 kmem_free(kpd, sizeof (struct segkp_data));
444 443 return (NULL);
445 444 }
446 445 atomic_add_long(&anon_segkp_pages_resv,
447 446 btop(SEGKP_MAPLEN(len, flags)));
448 447 anon_idx = ((uintptr_t)(vbase - s_base)) >> PAGESHIFT;
449 448 kpd->kp_anon_idx = anon_idx;
450 449 kpd->kp_anon = kpsd->kpsd_anon;
451 450
452 451 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
453 452 kpd, vbase, len, flags, 1);
454 453 } else {
455 454 kpd->kp_anon = NULL;
456 455 kpd->kp_anon_idx = 0;
457 456 }
458 457
459 458 /*
460 459 * Allocate page and anon resources for the virtual address range
461 460 * except the redzone
462 461 */
463 462 if (segkp_fromheap)
464 463 segkpindex = btop((uintptr_t)(vbase - kvseg.s_base));
465 464 for (i = 0, va = vbase; i < np; i++, va += PAGESIZE) {
466 465 page_t *pl[2];
467 466 struct vnode *vp;
468 467 anoff_t off;
469 468 int err;
470 469 page_t *pp = NULL;
471 470
472 471 /*
473 472 * Mark this page to be a segkp page in the bitmap.
474 473 */
475 474 if (segkp_fromheap) {
476 475 BT_ATOMIC_SET(segkp_bitmap, segkpindex);
477 476 segkpindex++;
478 477 }
479 478
480 479 /*
481 480 * If this page is the red zone page, we don't need swap
482 481 * space for it. Note that we skip over the code that
483 482 * establishes MMU mappings, so that the page remains
484 483 * invalid.
485 484 */
486 485 if ((flags & KPD_HASREDZONE) && KPD_REDZONE(kpd) == i)
487 486 continue;
488 487
489 488 if (kpd->kp_anon != NULL) {
490 489 struct anon *ap;
491 490
492 491 ASSERT(anon_get_ptr(kpd->kp_anon, anon_idx + i)
493 492 == NULL);
494 493 /*
495 494 * Determine the "vp" and "off" of the anon slot.
496 495 */
497 496 ap = anon_alloc(NULL, 0);
498 497 if (amp != NULL)
499 498 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
500 499 (void) anon_set_ptr(kpd->kp_anon, anon_idx + i,
501 500 ap, ANON_SLEEP);
502 501 if (amp != NULL)
503 502 ANON_LOCK_EXIT(&->a_rwlock);
504 503 swap_xlate(ap, &vp, &off);
505 504
506 505 /*
507 506 * Create a page with the specified identity. The
508 507 * page is returned with the "shared" lock held.
509 508 */
510 509 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
511 510 NULL, pl, PAGESIZE, seg, va, S_CREATE,
512 511 kcred, NULL);
513 512 if (err) {
514 513 /*
515 514 * XXX - This should not fail.
516 515 */
517 516 panic("segkp_get: no pages");
518 517 /*NOTREACHED*/
519 518 }
520 519 pp = pl[0];
521 520 } else {
522 521 ASSERT(page_exists(&kvp,
523 522 (u_offset_t)(uintptr_t)va) == NULL);
524 523
525 524 if ((pp = page_create_va(&kvp,
526 525 (u_offset_t)(uintptr_t)va, PAGESIZE,
527 526 (flags & KPD_NOWAIT ? 0 : PG_WAIT) | PG_EXCL |
528 527 PG_NORELOC, seg, va)) == NULL) {
529 528 /*
530 529 * Legitimize resource; then destroy it.
531 530 * Easier than trying to unwind here.
532 531 */
533 532 kpd->kp_flags = flags;
534 533 kpd->kp_base = vbase;
535 534 kpd->kp_len = len;
536 535 segkp_release_internal(seg, kpd, va - vbase);
537 536 return (NULL);
538 537 }
539 538 page_io_unlock(pp);
540 539 }
541 540
542 541 if (flags & KPD_ZERO)
543 542 pagezero(pp, 0, PAGESIZE);
544 543
545 544 /*
546 545 * Load and lock an MMU translation for the page.
547 546 */
548 547 hat_memload(seg->s_as->a_hat, va, pp, (PROT_READ|PROT_WRITE),
549 548 ((flags & KPD_LOCKED) ? HAT_LOAD_LOCK : HAT_LOAD));
550 549
551 550 /*
552 551 * Now, release lock on the page.
553 552 */
554 553 if (flags & KPD_LOCKED) {
555 554 /*
556 555 * Indicate to page_retire framework that this
557 556 * page can only be retired when it is freed.
558 557 */
559 558 PP_SETRAF(pp);
560 559 page_downgrade(pp);
561 560 } else
562 561 page_unlock(pp);
563 562 }
564 563
565 564 kpd->kp_flags = flags;
566 565 kpd->kp_base = vbase;
567 566 kpd->kp_len = len;
568 567 segkp_insert(seg, kpd);
569 568 *tkpd = kpd;
570 569 return (stom(kpd->kp_base, flags));
571 570 }
572 571
573 572 /*
574 573 * Release the resource to cache if the pool(designate by the cookie)
575 574 * has less than the maximum allowable. If inserted in cache,
576 575 * segkp_delete insures element is taken off of active list.
577 576 */
578 577 void
579 578 segkp_release(struct seg *seg, caddr_t vaddr)
580 579 {
581 580 struct segkp_cache *freelist;
582 581 struct segkp_data *kpd = NULL;
583 582
584 583 if ((kpd = segkp_find(seg, vaddr)) == NULL) {
585 584 panic("segkp_release: null kpd");
586 585 /*NOTREACHED*/
587 586 }
588 587
589 588 if (kpd->kp_cookie != -1) {
590 589 freelist = &segkp_cache[kpd->kp_cookie];
591 590 mutex_enter(&segkp_lock);
592 591 if (!segkp_indel && freelist->kpf_count < freelist->kpf_max) {
593 592 segkp_delete(seg, kpd);
594 593 kpd->kp_next = freelist->kpf_list;
595 594 freelist->kpf_list = kpd;
596 595 freelist->kpf_count++;
597 596 mutex_exit(&segkp_lock);
598 597 return;
599 598 } else {
600 599 mutex_exit(&segkp_lock);
601 600 kpd->kp_cookie = -1;
602 601 }
603 602 }
604 603 segkp_release_internal(seg, kpd, kpd->kp_len);
605 604 }
606 605
607 606 /*
608 607 * Free the entire resource. segkp_unlock gets called with the start of the
609 608 * mapped portion of the resource. The length is the size of the mapped
610 609 * portion
611 610 */
612 611 static void
613 612 segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len)
614 613 {
615 614 caddr_t va;
616 615 long i;
617 616 long redzone;
618 617 size_t np;
619 618 page_t *pp;
620 619 struct vnode *vp;
621 620 anoff_t off;
622 621 struct anon *ap;
623 622 pgcnt_t segkpindex;
624 623
625 624 ASSERT(kpd != NULL);
626 625 ASSERT((kpd->kp_flags & KPD_HASAMP) == 0 || kpd->kp_cookie == -1);
627 626 np = btop(len);
628 627
629 628 /* Remove from active hash list */
630 629 if (kpd->kp_cookie == -1) {
631 630 mutex_enter(&segkp_lock);
632 631 segkp_delete(seg, kpd);
633 632 mutex_exit(&segkp_lock);
634 633 }
635 634
636 635 /*
637 636 * Precompute redzone page index.
638 637 */
639 638 redzone = -1;
640 639 if (kpd->kp_flags & KPD_HASREDZONE)
641 640 redzone = KPD_REDZONE(kpd);
642 641
643 642
644 643 va = kpd->kp_base;
645 644
646 645 hat_unload(seg->s_as->a_hat, va, (np << PAGESHIFT),
647 646 ((kpd->kp_flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
648 647 /*
649 648 * Free up those anon resources that are quiescent.
650 649 */
651 650 if (segkp_fromheap)
652 651 segkpindex = btop((uintptr_t)(va - kvseg.s_base));
653 652 for (i = 0; i < np; i++, va += PAGESIZE) {
654 653
655 654 /*
656 655 * Clear the bit for this page from the bitmap.
657 656 */
658 657 if (segkp_fromheap) {
659 658 BT_ATOMIC_CLEAR(segkp_bitmap, segkpindex);
660 659 segkpindex++;
661 660 }
662 661
663 662 if (i == redzone)
664 663 continue;
665 664 if (kpd->kp_anon) {
666 665 /*
667 666 * Free up anon resources and destroy the
668 667 * associated pages.
669 668 *
670 669 * Release the lock if there is one. Have to get the
671 670 * page to do this, unfortunately.
672 671 */
673 672 if (kpd->kp_flags & KPD_LOCKED) {
674 673 ap = anon_get_ptr(kpd->kp_anon,
675 674 kpd->kp_anon_idx + i);
676 675 swap_xlate(ap, &vp, &off);
677 676 /* Find the shared-locked page. */
678 677 pp = page_find(vp, (u_offset_t)off);
679 678 if (pp == NULL) {
680 679 panic("segkp_release: "
681 680 "kp_anon: no page to unlock ");
682 681 /*NOTREACHED*/
683 682 }
684 683 if (PP_ISRAF(pp))
685 684 PP_CLRRAF(pp);
686 685
687 686 page_unlock(pp);
688 687 }
689 688 if ((kpd->kp_flags & KPD_HASAMP) == 0) {
690 689 anon_free(kpd->kp_anon, kpd->kp_anon_idx + i,
691 690 PAGESIZE);
692 691 anon_unresv_zone(PAGESIZE, NULL);
693 692 atomic_dec_ulong(&anon_segkp_pages_resv);
694 693 }
695 694 TRACE_5(TR_FAC_VM,
696 695 TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
697 696 kpd, va, PAGESIZE, 0, 0);
698 697 } else {
699 698 if (kpd->kp_flags & KPD_LOCKED) {
700 699 pp = page_find(&kvp, (u_offset_t)(uintptr_t)va);
701 700 if (pp == NULL) {
702 701 panic("segkp_release: "
703 702 "no page to unlock");
704 703 /*NOTREACHED*/
705 704 }
706 705 if (PP_ISRAF(pp))
707 706 PP_CLRRAF(pp);
708 707 /*
709 708 * We should just upgrade the lock here
710 709 * but there is no upgrade that waits.
711 710 */
712 711 page_unlock(pp);
713 712 }
714 713 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)va,
715 714 SE_EXCL);
716 715 if (pp != NULL)
717 716 page_destroy(pp, 0);
718 717 }
719 718 }
720 719
721 720 /* If locked, release physical memory reservation */
722 721 if (kpd->kp_flags & KPD_LOCKED) {
723 722 pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
724 723 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
725 724 atomic_add_long(&anon_segkp_pages_locked, -pages);
726 725 page_unresv(pages);
727 726 }
728 727
729 728 vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len);
730 729 kmem_free(kpd, sizeof (struct segkp_data));
731 730 }
732 731
733 732 /*
734 733 * segkp_map_red() will check the current frame pointer against the
735 734 * stack base. If the amount of stack remaining is questionable
736 735 * (less than red_minavail), then segkp_map_red() will map in the redzone
737 736 * and return 1. Otherwise, it will return 0. segkp_map_red() can
738 737 * _only_ be called when it is safe to sleep on page_create_va().
739 738 *
740 739 * It is up to the caller to remember whether segkp_map_red() successfully
741 740 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
742 741 * time.
743 742 *
744 743 * Currently, this routine is only called from pagefault() (which necessarily
745 744 * satisfies the above conditions).
746 745 */
747 746 #if defined(STACK_GROWTH_DOWN)
748 747 int
749 748 segkp_map_red(void)
750 749 {
751 750 uintptr_t fp = STACK_BIAS + (uintptr_t)getfp();
752 751 #ifndef _LP64
753 752 caddr_t stkbase;
754 753 #endif
755 754
756 755 /*
757 756 * Optimize for the common case where we simply return.
758 757 */
759 758 if ((curthread->t_red_pp == NULL) &&
760 759 (fp - (uintptr_t)curthread->t_stkbase >= red_minavail))
761 760 return (0);
762 761
763 762 #if defined(_LP64)
764 763 /*
765 764 * XXX We probably need something better than this.
766 765 */
767 766 panic("kernel stack overflow");
768 767 /*NOTREACHED*/
769 768 #else /* _LP64 */
770 769 if (curthread->t_red_pp == NULL) {
771 770 page_t *red_pp;
772 771 struct seg kseg;
773 772
774 773 caddr_t red_va = (caddr_t)
775 774 (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) -
776 775 PAGESIZE);
777 776
778 777 ASSERT(page_exists(&kvp, (u_offset_t)(uintptr_t)red_va) ==
779 778 NULL);
780 779
781 780 /*
782 781 * Allocate the physical for the red page.
783 782 */
784 783 /*
785 784 * No PG_NORELOC here to avoid waits. Unlikely to get
786 785 * a relocate happening in the short time the page exists
787 786 * and it will be OK anyway.
788 787 */
789 788
790 789 kseg.s_as = &kas;
791 790 red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va,
792 791 PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va);
793 792 ASSERT(red_pp != NULL);
794 793
795 794 /*
796 795 * So we now have a page to jam into the redzone...
797 796 */
798 797 page_io_unlock(red_pp);
799 798
800 799 hat_memload(kas.a_hat, red_va, red_pp,
801 800 (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK);
802 801 page_downgrade(red_pp);
803 802
804 803 /*
805 804 * The page is left SE_SHARED locked so we can hold on to
806 805 * the page_t pointer.
807 806 */
808 807 curthread->t_red_pp = red_pp;
809 808
810 809 atomic_inc_32(&red_nmapped);
811 810 while (fp - (uintptr_t)curthread->t_stkbase < red_closest) {
812 811 (void) atomic_cas_32(&red_closest, red_closest,
813 812 (uint32_t)(fp - (uintptr_t)curthread->t_stkbase));
814 813 }
815 814 return (1);
816 815 }
817 816
818 817 stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase &
819 818 (uintptr_t)PAGEMASK) - PAGESIZE);
820 819
821 820 atomic_inc_32(&red_ndoubles);
822 821
823 822 if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) {
824 823 /*
825 824 * Oh boy. We're already deep within the mapped-in
826 825 * redzone page, and the caller is trying to prepare
827 826 * for a deep stack run. We're running without a
828 827 * redzone right now: if the caller plows off the
829 828 * end of the stack, it'll plow another thread or
830 829 * LWP structure. That situation could result in
831 830 * a very hard-to-debug panic, so, in the spirit of
832 831 * recording the name of one's killer in one's own
833 832 * blood, we're going to record hrestime and the calling
834 833 * thread.
835 834 */
836 835 red_deep_hires = hrestime.tv_nsec;
837 836 red_deep_thread = curthread;
838 837 }
839 838
840 839 /*
841 840 * If this is a DEBUG kernel, and we've run too deep for comfort, toss.
842 841 */
843 842 ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD);
844 843 return (0);
845 844 #endif /* _LP64 */
846 845 }
847 846
848 847 void
849 848 segkp_unmap_red(void)
850 849 {
851 850 page_t *pp;
852 851 caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase &
853 852 (uintptr_t)PAGEMASK) - PAGESIZE);
854 853
855 854 ASSERT(curthread->t_red_pp != NULL);
856 855
857 856 /*
858 857 * Because we locked the mapping down, we can't simply rely
859 858 * on page_destroy() to clean everything up; we need to call
860 859 * hat_unload() to explicitly unlock the mapping resources.
861 860 */
862 861 hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK);
863 862
864 863 pp = curthread->t_red_pp;
865 864
866 865 ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va));
867 866
868 867 /*
869 868 * Need to upgrade the SE_SHARED lock to SE_EXCL.
870 869 */
871 870 if (!page_tryupgrade(pp)) {
872 871 /*
873 872 * As there is now wait for upgrade, release the
874 873 * SE_SHARED lock and wait for SE_EXCL.
875 874 */
876 875 page_unlock(pp);
877 876 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)red_va, SE_EXCL);
878 877 /* pp may be NULL here, hence the test below */
879 878 }
880 879
881 880 /*
882 881 * Destroy the page, with dontfree set to zero (i.e. free it).
883 882 */
884 883 if (pp != NULL)
885 884 page_destroy(pp, 0);
886 885 curthread->t_red_pp = NULL;
887 886 }
888 887 #else
889 888 #error Red stacks only supported with downwards stack growth.
890 889 #endif
891 890
892 891 /*
893 892 * Handle a fault on an address corresponding to one of the
894 893 * resources in the segkp segment.
895 894 */
896 895 faultcode_t
897 896 segkp_fault(
898 897 struct hat *hat,
899 898 struct seg *seg,
900 899 caddr_t vaddr,
901 900 size_t len,
902 901 enum fault_type type,
903 902 enum seg_rw rw)
904 903 {
905 904 struct segkp_data *kpd = NULL;
906 905 int err;
907 906
908 907 ASSERT(seg->s_as == &kas && RW_READ_HELD(&seg->s_as->a_lock));
909 908
910 909 /*
911 910 * Sanity checks.
912 911 */
913 912 if (type == F_PROT) {
914 913 panic("segkp_fault: unexpected F_PROT fault");
915 914 /*NOTREACHED*/
916 915 }
917 916
918 917 if ((kpd = segkp_find(seg, vaddr)) == NULL)
919 918 return (FC_NOMAP);
920 919
921 920 mutex_enter(&kpd->kp_lock);
922 921
923 922 if (type == F_SOFTLOCK) {
924 923 ASSERT(!(kpd->kp_flags & KPD_LOCKED));
925 924 /*
926 925 * The F_SOFTLOCK case has more stringent
927 926 * range requirements: the given range must exactly coincide
928 927 * with the resource's mapped portion. Note reference to
929 928 * redzone is handled since vaddr would not equal base
930 929 */
931 930 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
932 931 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
933 932 mutex_exit(&kpd->kp_lock);
934 933 return (FC_MAKE_ERR(EFAULT));
935 934 }
936 935
937 936 if ((err = segkp_load(hat, seg, vaddr, len, kpd, KPD_LOCKED))) {
938 937 mutex_exit(&kpd->kp_lock);
939 938 return (FC_MAKE_ERR(err));
940 939 }
941 940 kpd->kp_flags |= KPD_LOCKED;
942 941 mutex_exit(&kpd->kp_lock);
943 942 return (0);
944 943 }
945 944
946 945 if (type == F_INVAL) {
947 946 ASSERT(!(kpd->kp_flags & KPD_NO_ANON));
948 947
949 948 /*
950 949 * Check if we touched the redzone. Somewhat optimistic
951 950 * here if we are touching the redzone of our own stack
952 951 * since we wouldn't have a stack to get this far...
953 952 */
954 953 if ((kpd->kp_flags & KPD_HASREDZONE) &&
955 954 btop((uintptr_t)(vaddr - kpd->kp_base)) == KPD_REDZONE(kpd))
956 955 panic("segkp_fault: accessing redzone");
957 956
958 957 /*
959 958 * This fault may occur while the page is being F_SOFTLOCK'ed.
960 959 * Return since a 2nd segkp_load is unnecessary and also would
961 960 * result in the page being locked twice and eventually
962 961 * hang the thread_reaper thread.
963 962 */
964 963 if (kpd->kp_flags & KPD_LOCKED) {
965 964 mutex_exit(&kpd->kp_lock);
966 965 return (0);
967 966 }
968 967
969 968 err = segkp_load(hat, seg, vaddr, len, kpd, kpd->kp_flags);
970 969 mutex_exit(&kpd->kp_lock);
971 970 return (err ? FC_MAKE_ERR(err) : 0);
972 971 }
973 972
974 973 if (type == F_SOFTUNLOCK) {
975 974 uint_t flags;
976 975
977 976 /*
978 977 * Make sure the addr is LOCKED and it has anon backing
979 978 * before unlocking
980 979 */
981 980 if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) != KPD_LOCKED) {
982 981 panic("segkp_fault: bad unlock");
983 982 /*NOTREACHED*/
984 983 }
985 984
986 985 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
987 986 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
988 987 panic("segkp_fault: bad range");
989 988 /*NOTREACHED*/
990 989 }
991 990
992 991 if (rw == S_WRITE)
993 992 flags = kpd->kp_flags | KPD_WRITEDIRTY;
994 993 else
995 994 flags = kpd->kp_flags;
996 995 err = segkp_unlock(hat, seg, vaddr, len, kpd, flags);
997 996 kpd->kp_flags &= ~KPD_LOCKED;
998 997 mutex_exit(&kpd->kp_lock);
999 998 return (err ? FC_MAKE_ERR(err) : 0);
1000 999 }
1001 1000 mutex_exit(&kpd->kp_lock);
1002 1001 panic("segkp_fault: bogus fault type: %d\n", type);
1003 1002 /*NOTREACHED*/
1004 1003 }
1005 1004
1006 1005 /*
1007 1006 * Check that the given protections suffice over the range specified by
1008 1007 * vaddr and len. For this segment type, the only issue is whether or
1009 1008 * not the range lies completely within the mapped part of an allocated
1010 1009 * resource.
1011 1010 */
1012 1011 /* ARGSUSED */
1013 1012 static int
1014 1013 segkp_checkprot(struct seg *seg, caddr_t vaddr, size_t len, uint_t prot)
1015 1014 {
1016 1015 struct segkp_data *kpd = NULL;
1017 1016 caddr_t mbase;
1018 1017 size_t mlen;
1019 1018
1020 1019 if ((kpd = segkp_find(seg, vaddr)) == NULL)
1021 1020 return (EACCES);
1022 1021
1023 1022 mutex_enter(&kpd->kp_lock);
1024 1023 mbase = stom(kpd->kp_base, kpd->kp_flags);
1025 1024 mlen = SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags);
1026 1025 if (len > mlen || vaddr < mbase ||
1027 1026 ((vaddr + len) > (mbase + mlen))) {
1028 1027 mutex_exit(&kpd->kp_lock);
1029 1028 return (EACCES);
1030 1029 }
1031 1030 mutex_exit(&kpd->kp_lock);
1032 1031 return (0);
1033 1032 }
1034 1033
1035 1034
1036 1035 /*
1037 1036 * Check to see if it makes sense to do kluster/read ahead to
1038 1037 * addr + delta relative to the mapping at addr. We assume here
1039 1038 * that delta is a signed PAGESIZE'd multiple (which can be negative).
1040 1039 *
1041 1040 * For seg_u we always "approve" of this action from our standpoint.
1042 1041 */
1043 1042 /*ARGSUSED*/
1044 1043 static int
1045 1044 segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
1046 1045 {
1047 1046 return (0);
1048 1047 }
1049 1048
1050 1049 /*
1051 1050 * Load and possibly lock intra-slot resources in the range given by
1052 1051 * vaddr and len.
1053 1052 */
1054 1053 static int
1055 1054 segkp_load(
1056 1055 struct hat *hat,
1057 1056 struct seg *seg,
1058 1057 caddr_t vaddr,
1059 1058 size_t len,
1060 1059 struct segkp_data *kpd,
1061 1060 uint_t flags)
1062 1061 {
1063 1062 caddr_t va;
1064 1063 caddr_t vlim;
1065 1064 ulong_t i;
1066 1065 uint_t lock;
1067 1066
1068 1067 ASSERT(MUTEX_HELD(&kpd->kp_lock));
1069 1068
1070 1069 len = P2ROUNDUP(len, PAGESIZE);
1071 1070
1072 1071 /* If locking, reserve physical memory */
1073 1072 if (flags & KPD_LOCKED) {
1074 1073 pgcnt_t pages = btop(len);
1075 1074 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
1076 1075 atomic_add_long(&anon_segkp_pages_locked, pages);
1077 1076 (void) page_resv(pages, KM_SLEEP);
1078 1077 }
1079 1078
1080 1079 /*
1081 1080 * Loop through the pages in the given range.
1082 1081 */
1083 1082 va = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK);
1084 1083 vaddr = va;
1085 1084 vlim = va + len;
1086 1085 lock = flags & KPD_LOCKED;
1087 1086 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
1088 1087 for (; va < vlim; va += PAGESIZE, i++) {
1089 1088 page_t *pl[2]; /* second element NULL terminator */
1090 1089 struct vnode *vp;
1091 1090 anoff_t off;
1092 1091 int err;
1093 1092 struct anon *ap;
1094 1093
1095 1094 /*
1096 1095 * Summon the page. If it's not resident, arrange
1097 1096 * for synchronous i/o to pull it in.
1098 1097 */
1099 1098 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
1100 1099 swap_xlate(ap, &vp, &off);
1101 1100
1102 1101 /*
1103 1102 * The returned page list will have exactly one entry,
1104 1103 * which is returned to us already kept.
1105 1104 */
1106 1105 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, NULL,
1107 1106 pl, PAGESIZE, seg, va, S_READ, kcred, NULL);
1108 1107
1109 1108 if (err) {
1110 1109 /*
1111 1110 * Back out of what we've done so far.
1112 1111 */
1113 1112 (void) segkp_unlock(hat, seg, vaddr,
1114 1113 (va - vaddr), kpd, flags);
1115 1114 return (err);
1116 1115 }
1117 1116
1118 1117 /*
1119 1118 * Load an MMU translation for the page.
1120 1119 */
1121 1120 hat_memload(hat, va, pl[0], (PROT_READ|PROT_WRITE),
1122 1121 lock ? HAT_LOAD_LOCK : HAT_LOAD);
1123 1122
1124 1123 if (!lock) {
1125 1124 /*
1126 1125 * Now, release "shared" lock on the page.
1127 1126 */
1128 1127 page_unlock(pl[0]);
1129 1128 }
1130 1129 }
1131 1130 return (0);
1132 1131 }
1133 1132
1134 1133 /*
1135 1134 * At the very least unload the mmu-translations and unlock the range if locked
1136 1135 * Can be called with the following flag value KPD_WRITEDIRTY which specifies
1137 1136 * any dirty pages should be written to disk.
1138 1137 */
1139 1138 static int
1140 1139 segkp_unlock(
1141 1140 struct hat *hat,
1142 1141 struct seg *seg,
1143 1142 caddr_t vaddr,
1144 1143 size_t len,
1145 1144 struct segkp_data *kpd,
1146 1145 uint_t flags)
1147 1146 {
1148 1147 caddr_t va;
1149 1148 caddr_t vlim;
1150 1149 ulong_t i;
1151 1150 struct page *pp;
1152 1151 struct vnode *vp;
1153 1152 anoff_t off;
1154 1153 struct anon *ap;
1155 1154
1156 1155 #ifdef lint
1157 1156 seg = seg;
1158 1157 #endif /* lint */
1159 1158
1160 1159 ASSERT(MUTEX_HELD(&kpd->kp_lock));
1161 1160
1162 1161 /*
1163 1162 * Loop through the pages in the given range. It is assumed
1164 1163 * segkp_unlock is called with page aligned base
1165 1164 */
1166 1165 va = vaddr;
1167 1166 vlim = va + len;
1168 1167 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
1169 1168 hat_unload(hat, va, len,
1170 1169 ((flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
1171 1170 for (; va < vlim; va += PAGESIZE, i++) {
1172 1171 /*
1173 1172 * Find the page associated with this part of the
1174 1173 * slot, tracking it down through its associated swap
1175 1174 * space.
1176 1175 */
1177 1176 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
1178 1177 swap_xlate(ap, &vp, &off);
1179 1178
1180 1179 if (flags & KPD_LOCKED) {
1181 1180 if ((pp = page_find(vp, off)) == NULL) {
1182 1181 if (flags & KPD_LOCKED) {
1183 1182 panic("segkp_softunlock: missing page");
1184 1183 /*NOTREACHED*/
1185 1184 }
1186 1185 }
1187 1186 } else {
1188 1187 /*
1189 1188 * Nothing to do if the slot is not locked and the
1190 1189 * page doesn't exist.
1191 1190 */
1192 1191 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL)
1193 1192 continue;
1194 1193 }
1195 1194
1196 1195 /*
1197 1196 * If the page doesn't have any translations, is
1198 1197 * dirty and not being shared, then push it out
1199 1198 * asynchronously and avoid waiting for the
1200 1199 * pageout daemon to do it for us.
1201 1200 *
1202 1201 * XXX - Do we really need to get the "exclusive"
1203 1202 * lock via an upgrade?
1204 1203 */
1205 1204 if ((flags & KPD_WRITEDIRTY) && !hat_page_is_mapped(pp) &&
1206 1205 hat_ismod(pp) && page_tryupgrade(pp)) {
1207 1206 /*
1208 1207 * Hold the vnode before releasing the page lock to
1209 1208 * prevent it from being freed and re-used by some
1210 1209 * other thread.
1211 1210 */
1212 1211 VN_HOLD(vp);
1213 1212 page_unlock(pp);
1214 1213
1215 1214 /*
1216 1215 * Want most powerful credentials we can get so
1217 1216 * use kcred.
1218 1217 */
1219 1218 (void) VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
1220 1219 B_ASYNC | B_FREE, kcred, NULL);
1221 1220 VN_RELE(vp);
1222 1221 } else {
1223 1222 page_unlock(pp);
1224 1223 }
1225 1224 }
1226 1225
1227 1226 /* If unlocking, release physical memory */
1228 1227 if (flags & KPD_LOCKED) {
1229 1228 pgcnt_t pages = btopr(len);
1230 1229 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
1231 1230 atomic_add_long(&anon_segkp_pages_locked, -pages);
1232 1231 page_unresv(pages);
1233 1232 }
1234 1233 return (0);
1235 1234 }
1236 1235
1237 1236 /*
1238 1237 * Insert the kpd in the hash table.
1239 1238 */
1240 1239 static void
1241 1240 segkp_insert(struct seg *seg, struct segkp_data *kpd)
1242 1241 {
1243 1242 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1244 1243 int index;
1245 1244
1246 1245 /*
1247 1246 * Insert the kpd based on the address that will be returned
1248 1247 * via segkp_release.
1249 1248 */
1250 1249 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
1251 1250 mutex_enter(&segkp_lock);
1252 1251 kpd->kp_next = kpsd->kpsd_hash[index];
1253 1252 kpsd->kpsd_hash[index] = kpd;
1254 1253 mutex_exit(&segkp_lock);
1255 1254 }
1256 1255
1257 1256 /*
1258 1257 * Remove kpd from the hash table.
1259 1258 */
1260 1259 static void
1261 1260 segkp_delete(struct seg *seg, struct segkp_data *kpd)
1262 1261 {
1263 1262 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1264 1263 struct segkp_data **kpp;
1265 1264 int index;
1266 1265
1267 1266 ASSERT(MUTEX_HELD(&segkp_lock));
1268 1267
1269 1268 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
1270 1269 for (kpp = &kpsd->kpsd_hash[index];
1271 1270 *kpp != NULL; kpp = &((*kpp)->kp_next)) {
1272 1271 if (*kpp == kpd) {
1273 1272 *kpp = kpd->kp_next;
1274 1273 return;
1275 1274 }
1276 1275 }
1277 1276 panic("segkp_delete: unable to find element to delete");
1278 1277 /*NOTREACHED*/
1279 1278 }
1280 1279
1281 1280 /*
1282 1281 * Find the kpd associated with a vaddr.
1283 1282 *
1284 1283 * Most of the callers of segkp_find will pass the vaddr that
1285 1284 * hashes to the desired index, but there are cases where
1286 1285 * this is not true in which case we have to (potentially) scan
1287 1286 * the whole table looking for it. This should be very rare
1288 1287 * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the
1289 1288 * middle of the segkp_data region).
1290 1289 */
1291 1290 static struct segkp_data *
1292 1291 segkp_find(struct seg *seg, caddr_t vaddr)
1293 1292 {
1294 1293 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1295 1294 struct segkp_data *kpd;
1296 1295 int i;
1297 1296 int stop;
1298 1297
1299 1298 i = stop = SEGKP_HASH(vaddr);
1300 1299 mutex_enter(&segkp_lock);
1301 1300 do {
1302 1301 for (kpd = kpsd->kpsd_hash[i]; kpd != NULL;
1303 1302 kpd = kpd->kp_next) {
1304 1303 if (vaddr >= kpd->kp_base &&
1305 1304 vaddr < kpd->kp_base + kpd->kp_len) {
1306 1305 mutex_exit(&segkp_lock);
1307 1306 return (kpd);
1308 1307 }
1309 1308 }
1310 1309 if (--i < 0)
1311 1310 i = SEGKP_HASHSZ - 1; /* Wrap */
1312 1311 } while (i != stop);
1313 1312 mutex_exit(&segkp_lock);
1314 1313 return (NULL); /* Not found */
1315 1314 }
1316 1315
1317 1316 /*
1318 1317 * returns size of swappable area.
1319 1318 */
1320 1319 size_t
1321 1320 swapsize(caddr_t v)
1322 1321 {
1323 1322 struct segkp_data *kpd;
1324 1323
1325 1324 if ((kpd = segkp_find(segkp, v)) != NULL)
1326 1325 return (SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
1327 1326 else
1328 1327 return (NULL);
1329 1328 }
1330 1329
1331 1330 /*
1332 1331 * Dump out all the active segkp pages
1333 1332 */
1334 1333 static void
1335 1334 segkp_dump(struct seg *seg)
1336 1335 {
1337 1336 int i;
1338 1337 struct segkp_data *kpd;
1339 1338 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1340 1339
1341 1340 for (i = 0; i < SEGKP_HASHSZ; i++) {
1342 1341 for (kpd = kpsd->kpsd_hash[i];
1343 1342 kpd != NULL; kpd = kpd->kp_next) {
1344 1343 pfn_t pfn;
1345 1344 caddr_t addr;
1346 1345 caddr_t eaddr;
1347 1346
1348 1347 addr = kpd->kp_base;
1349 1348 eaddr = addr + kpd->kp_len;
1350 1349 while (addr < eaddr) {
1351 1350 ASSERT(seg->s_as == &kas);
1352 1351 pfn = hat_getpfnum(seg->s_as->a_hat, addr);
1353 1352 if (pfn != PFN_INVALID)
1354 1353 dump_addpage(seg->s_as, addr, pfn);
1355 1354 addr += PAGESIZE;
1356 1355 dump_timeleft = dump_timeout;
1357 1356 }
1358 1357 }
1359 1358 }
1360 1359 }
1361 1360
1362 1361 /*ARGSUSED*/
1363 1362 static int
1364 1363 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
1365 1364 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1366 1365 {
1367 1366 return (ENOTSUP);
1368 1367 }
1369 1368
1370 1369 /*ARGSUSED*/
1371 1370 static int
1372 1371 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1373 1372 {
1374 1373 return (ENODEV);
1375 1374 }
1376 1375
1377 1376 /*ARGSUSED*/
1378 1377 static lgrp_mem_policy_info_t *
1379 1378 segkp_getpolicy(struct seg *seg, caddr_t addr)
1380 1379 {
1381 1380 return (NULL);
1382 1381 }
1383 1382
1384 1383 /*ARGSUSED*/
1385 1384 static int
1386 1385 segkp_capable(struct seg *seg, segcapability_t capability)
1387 1386 {
1388 1387 return (0);
1389 1388 }
1390 1389
1391 1390 #include <sys/mem_config.h>
1392 1391
1393 1392 /*ARGSUSED*/
1394 1393 static void
1395 1394 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1396 1395 {}
1397 1396
1398 1397 /*
1399 1398 * During memory delete, turn off caches so that pages are not held.
1400 1399 * A better solution may be to unlock the pages while they are
1401 1400 * in the cache so that they may be collected naturally.
1402 1401 */
1403 1402
1404 1403 /*ARGSUSED*/
1405 1404 static int
1406 1405 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1407 1406 {
1408 1407 atomic_inc_32(&segkp_indel);
1409 1408 segkp_cache_free();
1410 1409 return (0);
1411 1410 }
1412 1411
1413 1412 /*ARGSUSED*/
1414 1413 static void
1415 1414 segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
1416 1415 {
1417 1416 atomic_dec_32(&segkp_indel);
1418 1417 }
1419 1418
1420 1419 static kphysm_setup_vector_t segkp_mem_config_vec = {
1421 1420 KPHYSM_SETUP_VECTOR_VERSION,
1422 1421 segkp_mem_config_post_add,
1423 1422 segkp_mem_config_pre_del,
1424 1423 segkp_mem_config_post_del,
1425 1424 };
1426 1425
1427 1426 static void
1428 1427 segkpinit_mem_config(struct seg *seg)
1429 1428 {
1430 1429 int ret;
1431 1430
1432 1431 ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg);
1433 1432 ASSERT(ret == 0);
1434 1433 }
↓ open down ↓ |
1270 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX