Print this page
use NULL getmemid segop as a shorthand for ENODEV
Instead of forcing every segment driver to implement a dummy function to
return (hopefully) ENODEV, handle NULL getmemid segop function pointer as
"return ENODEV" shorthand.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_kp.c
+++ new/usr/src/uts/common/vm/seg_kp.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 26 /* All Rights Reserved */
27 27
28 28 /*
29 29 * Portions of this source code were derived from Berkeley 4.3 BSD
30 30 * under license from the Regents of the University of California.
31 31 */
32 32
33 33 /*
34 34 * segkp is a segment driver that administers the allocation and deallocation
35 35 * of pageable variable size chunks of kernel virtual address space. Each
36 36 * allocated resource is page-aligned.
37 37 *
38 38 * The user may specify whether the resource should be initialized to 0,
39 39 * include a redzone, or locked in memory.
40 40 */
41 41
42 42 #include <sys/types.h>
43 43 #include <sys/t_lock.h>
44 44 #include <sys/thread.h>
45 45 #include <sys/param.h>
46 46 #include <sys/errno.h>
47 47 #include <sys/sysmacros.h>
48 48 #include <sys/systm.h>
49 49 #include <sys/buf.h>
50 50 #include <sys/mman.h>
51 51 #include <sys/vnode.h>
52 52 #include <sys/cmn_err.h>
53 53 #include <sys/swap.h>
54 54 #include <sys/tuneable.h>
55 55 #include <sys/kmem.h>
56 56 #include <sys/vmem.h>
57 57 #include <sys/cred.h>
58 58 #include <sys/dumphdr.h>
59 59 #include <sys/debug.h>
60 60 #include <sys/vtrace.h>
61 61 #include <sys/stack.h>
62 62 #include <sys/atomic.h>
63 63 #include <sys/archsystm.h>
64 64 #include <sys/lgrp.h>
65 65
66 66 #include <vm/as.h>
67 67 #include <vm/seg.h>
68 68 #include <vm/seg_kp.h>
69 69 #include <vm/seg_kmem.h>
70 70 #include <vm/anon.h>
71 71 #include <vm/page.h>
72 72 #include <vm/hat.h>
73 73 #include <sys/bitmap.h>
74 74
75 75 /*
76 76 * Private seg op routines
77 77 */
78 78 static void segkp_dump(struct seg *seg);
79 79 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
80 80 uint_t prot);
81 81 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
82 82 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
83 83 struct page ***page, enum lock_type type,
84 84 enum seg_rw rw);
85 85 static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
↓ open down ↓ |
85 lines elided |
↑ open up ↑ |
86 86 static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
87 87 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
88 88 struct segkp_data **tkpd, struct anon_map *amp);
89 89 static void segkp_release_internal(struct seg *seg,
90 90 struct segkp_data *kpd, size_t len);
91 91 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
92 92 size_t len, struct segkp_data *kpd, uint_t flags);
93 93 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
94 94 size_t len, struct segkp_data *kpd, uint_t flags);
95 95 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
96 -static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
97 96
98 97 /*
99 98 * Lock used to protect the hash table(s) and caches.
100 99 */
101 100 static kmutex_t segkp_lock;
102 101
103 102 /*
104 103 * The segkp caches
105 104 */
106 105 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
107 106
108 107 /*
109 108 * When there are fewer than red_minavail bytes left on the stack,
110 109 * segkp_map_red() will map in the redzone (if called). 5000 seems
111 110 * to work reasonably well...
112 111 */
113 112 long red_minavail = 5000;
114 113
115 114 /*
116 115 * will be set to 1 for 32 bit x86 systems only, in startup.c
117 116 */
118 117 int segkp_fromheap = 0;
119 118 ulong_t *segkp_bitmap;
120 119
121 120 /*
122 121 * If segkp_map_red() is called with the redzone already mapped and
123 122 * with less than RED_DEEP_THRESHOLD bytes available on the stack,
124 123 * then the stack situation has become quite serious; if much more stack
125 124 * is consumed, we have the potential of scrogging the next thread/LWP
126 125 * structure. To help debug the "can't happen" panics which may
127 126 * result from this condition, we record hrestime and the calling thread
128 127 * in red_deep_hires and red_deep_thread respectively.
129 128 */
130 129 #define RED_DEEP_THRESHOLD 2000
131 130
132 131 hrtime_t red_deep_hires;
133 132 kthread_t *red_deep_thread;
134 133
135 134 uint32_t red_nmapped;
136 135 uint32_t red_closest = UINT_MAX;
137 136 uint32_t red_ndoubles;
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
138 137
139 138 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
140 139 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
141 140
142 141 static struct seg_ops segkp_ops = {
143 142 .fault = segkp_fault,
144 143 .checkprot = segkp_checkprot,
145 144 .kluster = segkp_kluster,
146 145 .dump = segkp_dump,
147 146 .pagelock = segkp_pagelock,
148 - .getmemid = segkp_getmemid,
149 147 };
150 148
151 149
152 150 static void segkpinit_mem_config(struct seg *);
153 151
154 152 static uint32_t segkp_indel;
155 153
156 154 /*
157 155 * Allocate the segment specific private data struct and fill it in
158 156 * with the per kp segment mutex, anon ptr. array and hash table.
159 157 */
160 158 int
161 159 segkp_create(struct seg *seg)
162 160 {
163 161 struct segkp_segdata *kpsd;
164 162 size_t np;
165 163
166 164 ASSERT(seg != NULL && seg->s_as == &kas);
167 165 ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));
168 166
169 167 if (seg->s_size & PAGEOFFSET) {
170 168 panic("Bad segkp size");
171 169 /*NOTREACHED*/
172 170 }
173 171
174 172 kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP);
175 173
176 174 /*
177 175 * Allocate the virtual memory for segkp and initialize it
178 176 */
179 177 if (segkp_fromheap) {
180 178 np = btop(kvseg.s_size);
181 179 segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP);
182 180 kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE,
183 181 vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP);
184 182 } else {
185 183 segkp_bitmap = NULL;
186 184 np = btop(seg->s_size);
187 185 kpsd->kpsd_arena = vmem_create("segkp", seg->s_base,
188 186 seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE,
189 187 VM_SLEEP);
190 188 }
191 189
192 190 kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE);
193 191
194 192 kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *),
195 193 KM_SLEEP);
196 194 seg->s_data = (void *)kpsd;
197 195 seg->s_ops = &segkp_ops;
198 196 segkpinit_mem_config(seg);
199 197 return (0);
200 198 }
201 199
202 200
203 201 /*
204 202 * Find a free 'freelist' and initialize it with the appropriate attributes
205 203 */
206 204 void *
207 205 segkp_cache_init(struct seg *seg, int maxsize, size_t len, uint_t flags)
208 206 {
209 207 int i;
210 208
211 209 if ((flags & KPD_NO_ANON) && !(flags & KPD_LOCKED))
212 210 return ((void *)-1);
213 211
214 212 mutex_enter(&segkp_lock);
215 213 for (i = 0; i < SEGKP_MAX_CACHE; i++) {
216 214 if (segkp_cache[i].kpf_inuse)
217 215 continue;
218 216 segkp_cache[i].kpf_inuse = 1;
219 217 segkp_cache[i].kpf_max = maxsize;
220 218 segkp_cache[i].kpf_flags = flags;
221 219 segkp_cache[i].kpf_seg = seg;
222 220 segkp_cache[i].kpf_len = len;
223 221 mutex_exit(&segkp_lock);
224 222 return ((void *)(uintptr_t)i);
225 223 }
226 224 mutex_exit(&segkp_lock);
227 225 return ((void *)-1);
228 226 }
229 227
230 228 /*
231 229 * Free all the cache resources.
232 230 */
233 231 void
234 232 segkp_cache_free(void)
235 233 {
236 234 struct segkp_data *kpd;
237 235 struct seg *seg;
238 236 int i;
239 237
240 238 mutex_enter(&segkp_lock);
241 239 for (i = 0; i < SEGKP_MAX_CACHE; i++) {
242 240 if (!segkp_cache[i].kpf_inuse)
243 241 continue;
244 242 /*
245 243 * Disconnect the freelist and process each element
246 244 */
247 245 kpd = segkp_cache[i].kpf_list;
248 246 seg = segkp_cache[i].kpf_seg;
249 247 segkp_cache[i].kpf_list = NULL;
250 248 segkp_cache[i].kpf_count = 0;
251 249 mutex_exit(&segkp_lock);
252 250
253 251 while (kpd != NULL) {
254 252 struct segkp_data *next;
255 253
256 254 next = kpd->kp_next;
257 255 segkp_release_internal(seg, kpd, kpd->kp_len);
258 256 kpd = next;
259 257 }
260 258 mutex_enter(&segkp_lock);
261 259 }
262 260 mutex_exit(&segkp_lock);
263 261 }
264 262
265 263 /*
266 264 * There are 2 entries into segkp_get_internal. The first includes a cookie
267 265 * used to access a pool of cached segkp resources. The second does not
268 266 * use the cache.
269 267 */
270 268 caddr_t
271 269 segkp_get(struct seg *seg, size_t len, uint_t flags)
272 270 {
273 271 struct segkp_data *kpd = NULL;
274 272
275 273 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
276 274 kpd->kp_cookie = -1;
277 275 return (stom(kpd->kp_base, flags));
278 276 }
279 277 return (NULL);
280 278 }
281 279
282 280 /*
283 281 * Return a 'cached' segkp address
284 282 */
285 283 caddr_t
286 284 segkp_cache_get(void *cookie)
287 285 {
288 286 struct segkp_cache *freelist = NULL;
289 287 struct segkp_data *kpd = NULL;
290 288 int index = (int)(uintptr_t)cookie;
291 289 struct seg *seg;
292 290 size_t len;
293 291 uint_t flags;
294 292
295 293 if (index < 0 || index >= SEGKP_MAX_CACHE)
296 294 return (NULL);
297 295 freelist = &segkp_cache[index];
298 296
299 297 mutex_enter(&segkp_lock);
300 298 seg = freelist->kpf_seg;
301 299 flags = freelist->kpf_flags;
302 300 if (freelist->kpf_list != NULL) {
303 301 kpd = freelist->kpf_list;
304 302 freelist->kpf_list = kpd->kp_next;
305 303 freelist->kpf_count--;
306 304 mutex_exit(&segkp_lock);
307 305 kpd->kp_next = NULL;
308 306 segkp_insert(seg, kpd);
309 307 return (stom(kpd->kp_base, flags));
310 308 }
311 309 len = freelist->kpf_len;
312 310 mutex_exit(&segkp_lock);
313 311 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
314 312 kpd->kp_cookie = index;
315 313 return (stom(kpd->kp_base, flags));
316 314 }
317 315 return (NULL);
318 316 }
319 317
320 318 caddr_t
321 319 segkp_get_withanonmap(
322 320 struct seg *seg,
323 321 size_t len,
324 322 uint_t flags,
325 323 struct anon_map *amp)
326 324 {
327 325 struct segkp_data *kpd = NULL;
328 326
329 327 ASSERT(amp != NULL);
330 328 flags |= KPD_HASAMP;
331 329 if (segkp_get_internal(seg, len, flags, &kpd, amp) != NULL) {
332 330 kpd->kp_cookie = -1;
333 331 return (stom(kpd->kp_base, flags));
334 332 }
335 333 return (NULL);
336 334 }
337 335
338 336 /*
339 337 * This does the real work of segkp allocation.
340 338 * Return to client base addr. len must be page-aligned. A null value is
341 339 * returned if there are no more vm resources (e.g. pages, swap). The len
342 340 * and base recorded in the private data structure include the redzone
343 341 * and the redzone length (if applicable). If the user requests a redzone
344 342 * either the first or last page is left unmapped depending whether stacks
345 343 * grow to low or high memory.
346 344 *
347 345 * The client may also specify a no-wait flag. If that is set then the
348 346 * request will choose a non-blocking path when requesting resources.
349 347 * The default is make the client wait.
350 348 */
351 349 static caddr_t
352 350 segkp_get_internal(
353 351 struct seg *seg,
354 352 size_t len,
355 353 uint_t flags,
356 354 struct segkp_data **tkpd,
357 355 struct anon_map *amp)
358 356 {
359 357 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
360 358 struct segkp_data *kpd;
361 359 caddr_t vbase = NULL; /* always first virtual, may not be mapped */
362 360 pgcnt_t np = 0; /* number of pages in the resource */
363 361 pgcnt_t segkpindex;
364 362 long i;
365 363 caddr_t va;
366 364 pgcnt_t pages = 0;
367 365 ulong_t anon_idx = 0;
368 366 int kmflag = (flags & KPD_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
369 367 caddr_t s_base = (segkp_fromheap) ? kvseg.s_base : seg->s_base;
370 368
371 369 if (len & PAGEOFFSET) {
372 370 panic("segkp_get: len is not page-aligned");
373 371 /*NOTREACHED*/
374 372 }
375 373
376 374 ASSERT(((flags & KPD_HASAMP) == 0) == (amp == NULL));
377 375
378 376 /* Only allow KPD_NO_ANON if we are going to lock it down */
379 377 if ((flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON)
380 378 return (NULL);
381 379
382 380 if ((kpd = kmem_zalloc(sizeof (struct segkp_data), kmflag)) == NULL)
383 381 return (NULL);
384 382 /*
385 383 * Fix up the len to reflect the REDZONE if applicable
386 384 */
387 385 if (flags & KPD_HASREDZONE)
388 386 len += PAGESIZE;
389 387 np = btop(len);
390 388
391 389 vbase = vmem_alloc(SEGKP_VMEM(seg), len, kmflag | VM_BESTFIT);
392 390 if (vbase == NULL) {
393 391 kmem_free(kpd, sizeof (struct segkp_data));
394 392 return (NULL);
395 393 }
396 394
397 395 /* If locking, reserve physical memory */
398 396 if (flags & KPD_LOCKED) {
399 397 pages = btop(SEGKP_MAPLEN(len, flags));
400 398 if (page_resv(pages, kmflag) == 0) {
401 399 vmem_free(SEGKP_VMEM(seg), vbase, len);
402 400 kmem_free(kpd, sizeof (struct segkp_data));
403 401 return (NULL);
404 402 }
405 403 if ((flags & KPD_NO_ANON) == 0)
406 404 atomic_add_long(&anon_segkp_pages_locked, pages);
407 405 }
408 406
409 407 /*
410 408 * Reserve sufficient swap space for this vm resource. We'll
411 409 * actually allocate it in the loop below, but reserving it
412 410 * here allows us to back out more gracefully than if we
413 411 * had an allocation failure in the body of the loop.
414 412 *
415 413 * Note that we don't need swap space for the red zone page.
416 414 */
417 415 if (amp != NULL) {
418 416 /*
419 417 * The swap reservation has been done, if required, and the
420 418 * anon_hdr is separate.
421 419 */
422 420 anon_idx = 0;
423 421 kpd->kp_anon_idx = anon_idx;
424 422 kpd->kp_anon = amp->ahp;
425 423
426 424 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
427 425 kpd, vbase, len, flags, 1);
428 426
429 427 } else if ((flags & KPD_NO_ANON) == 0) {
430 428 if (anon_resv_zone(SEGKP_MAPLEN(len, flags), NULL) == 0) {
431 429 if (flags & KPD_LOCKED) {
432 430 atomic_add_long(&anon_segkp_pages_locked,
433 431 -pages);
434 432 page_unresv(pages);
435 433 }
436 434 vmem_free(SEGKP_VMEM(seg), vbase, len);
437 435 kmem_free(kpd, sizeof (struct segkp_data));
438 436 return (NULL);
439 437 }
440 438 atomic_add_long(&anon_segkp_pages_resv,
441 439 btop(SEGKP_MAPLEN(len, flags)));
442 440 anon_idx = ((uintptr_t)(vbase - s_base)) >> PAGESHIFT;
443 441 kpd->kp_anon_idx = anon_idx;
444 442 kpd->kp_anon = kpsd->kpsd_anon;
445 443
446 444 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
447 445 kpd, vbase, len, flags, 1);
448 446 } else {
449 447 kpd->kp_anon = NULL;
450 448 kpd->kp_anon_idx = 0;
451 449 }
452 450
453 451 /*
454 452 * Allocate page and anon resources for the virtual address range
455 453 * except the redzone
456 454 */
457 455 if (segkp_fromheap)
458 456 segkpindex = btop((uintptr_t)(vbase - kvseg.s_base));
459 457 for (i = 0, va = vbase; i < np; i++, va += PAGESIZE) {
460 458 page_t *pl[2];
461 459 struct vnode *vp;
462 460 anoff_t off;
463 461 int err;
464 462 page_t *pp = NULL;
465 463
466 464 /*
467 465 * Mark this page to be a segkp page in the bitmap.
468 466 */
469 467 if (segkp_fromheap) {
470 468 BT_ATOMIC_SET(segkp_bitmap, segkpindex);
471 469 segkpindex++;
472 470 }
473 471
474 472 /*
475 473 * If this page is the red zone page, we don't need swap
476 474 * space for it. Note that we skip over the code that
477 475 * establishes MMU mappings, so that the page remains
478 476 * invalid.
479 477 */
480 478 if ((flags & KPD_HASREDZONE) && KPD_REDZONE(kpd) == i)
481 479 continue;
482 480
483 481 if (kpd->kp_anon != NULL) {
484 482 struct anon *ap;
485 483
486 484 ASSERT(anon_get_ptr(kpd->kp_anon, anon_idx + i)
487 485 == NULL);
488 486 /*
489 487 * Determine the "vp" and "off" of the anon slot.
490 488 */
491 489 ap = anon_alloc(NULL, 0);
492 490 if (amp != NULL)
493 491 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
494 492 (void) anon_set_ptr(kpd->kp_anon, anon_idx + i,
495 493 ap, ANON_SLEEP);
496 494 if (amp != NULL)
497 495 ANON_LOCK_EXIT(&->a_rwlock);
498 496 swap_xlate(ap, &vp, &off);
499 497
500 498 /*
501 499 * Create a page with the specified identity. The
502 500 * page is returned with the "shared" lock held.
503 501 */
504 502 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
505 503 NULL, pl, PAGESIZE, seg, va, S_CREATE,
506 504 kcred, NULL);
507 505 if (err) {
508 506 /*
509 507 * XXX - This should not fail.
510 508 */
511 509 panic("segkp_get: no pages");
512 510 /*NOTREACHED*/
513 511 }
514 512 pp = pl[0];
515 513 } else {
516 514 ASSERT(page_exists(&kvp,
517 515 (u_offset_t)(uintptr_t)va) == NULL);
518 516
519 517 if ((pp = page_create_va(&kvp,
520 518 (u_offset_t)(uintptr_t)va, PAGESIZE,
521 519 (flags & KPD_NOWAIT ? 0 : PG_WAIT) | PG_EXCL |
522 520 PG_NORELOC, seg, va)) == NULL) {
523 521 /*
524 522 * Legitimize resource; then destroy it.
525 523 * Easier than trying to unwind here.
526 524 */
527 525 kpd->kp_flags = flags;
528 526 kpd->kp_base = vbase;
529 527 kpd->kp_len = len;
530 528 segkp_release_internal(seg, kpd, va - vbase);
531 529 return (NULL);
532 530 }
533 531 page_io_unlock(pp);
534 532 }
535 533
536 534 if (flags & KPD_ZERO)
537 535 pagezero(pp, 0, PAGESIZE);
538 536
539 537 /*
540 538 * Load and lock an MMU translation for the page.
541 539 */
542 540 hat_memload(seg->s_as->a_hat, va, pp, (PROT_READ|PROT_WRITE),
543 541 ((flags & KPD_LOCKED) ? HAT_LOAD_LOCK : HAT_LOAD));
544 542
545 543 /*
546 544 * Now, release lock on the page.
547 545 */
548 546 if (flags & KPD_LOCKED) {
549 547 /*
550 548 * Indicate to page_retire framework that this
551 549 * page can only be retired when it is freed.
552 550 */
553 551 PP_SETRAF(pp);
554 552 page_downgrade(pp);
555 553 } else
556 554 page_unlock(pp);
557 555 }
558 556
559 557 kpd->kp_flags = flags;
560 558 kpd->kp_base = vbase;
561 559 kpd->kp_len = len;
562 560 segkp_insert(seg, kpd);
563 561 *tkpd = kpd;
564 562 return (stom(kpd->kp_base, flags));
565 563 }
566 564
567 565 /*
568 566 * Release the resource to cache if the pool(designate by the cookie)
569 567 * has less than the maximum allowable. If inserted in cache,
570 568 * segkp_delete insures element is taken off of active list.
571 569 */
572 570 void
573 571 segkp_release(struct seg *seg, caddr_t vaddr)
574 572 {
575 573 struct segkp_cache *freelist;
576 574 struct segkp_data *kpd = NULL;
577 575
578 576 if ((kpd = segkp_find(seg, vaddr)) == NULL) {
579 577 panic("segkp_release: null kpd");
580 578 /*NOTREACHED*/
581 579 }
582 580
583 581 if (kpd->kp_cookie != -1) {
584 582 freelist = &segkp_cache[kpd->kp_cookie];
585 583 mutex_enter(&segkp_lock);
586 584 if (!segkp_indel && freelist->kpf_count < freelist->kpf_max) {
587 585 segkp_delete(seg, kpd);
588 586 kpd->kp_next = freelist->kpf_list;
589 587 freelist->kpf_list = kpd;
590 588 freelist->kpf_count++;
591 589 mutex_exit(&segkp_lock);
592 590 return;
593 591 } else {
594 592 mutex_exit(&segkp_lock);
595 593 kpd->kp_cookie = -1;
596 594 }
597 595 }
598 596 segkp_release_internal(seg, kpd, kpd->kp_len);
599 597 }
600 598
601 599 /*
602 600 * Free the entire resource. segkp_unlock gets called with the start of the
603 601 * mapped portion of the resource. The length is the size of the mapped
604 602 * portion
605 603 */
606 604 static void
607 605 segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len)
608 606 {
609 607 caddr_t va;
610 608 long i;
611 609 long redzone;
612 610 size_t np;
613 611 page_t *pp;
614 612 struct vnode *vp;
615 613 anoff_t off;
616 614 struct anon *ap;
617 615 pgcnt_t segkpindex;
618 616
619 617 ASSERT(kpd != NULL);
620 618 ASSERT((kpd->kp_flags & KPD_HASAMP) == 0 || kpd->kp_cookie == -1);
621 619 np = btop(len);
622 620
623 621 /* Remove from active hash list */
624 622 if (kpd->kp_cookie == -1) {
625 623 mutex_enter(&segkp_lock);
626 624 segkp_delete(seg, kpd);
627 625 mutex_exit(&segkp_lock);
628 626 }
629 627
630 628 /*
631 629 * Precompute redzone page index.
632 630 */
633 631 redzone = -1;
634 632 if (kpd->kp_flags & KPD_HASREDZONE)
635 633 redzone = KPD_REDZONE(kpd);
636 634
637 635
638 636 va = kpd->kp_base;
639 637
640 638 hat_unload(seg->s_as->a_hat, va, (np << PAGESHIFT),
641 639 ((kpd->kp_flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
642 640 /*
643 641 * Free up those anon resources that are quiescent.
644 642 */
645 643 if (segkp_fromheap)
646 644 segkpindex = btop((uintptr_t)(va - kvseg.s_base));
647 645 for (i = 0; i < np; i++, va += PAGESIZE) {
648 646
649 647 /*
650 648 * Clear the bit for this page from the bitmap.
651 649 */
652 650 if (segkp_fromheap) {
653 651 BT_ATOMIC_CLEAR(segkp_bitmap, segkpindex);
654 652 segkpindex++;
655 653 }
656 654
657 655 if (i == redzone)
658 656 continue;
659 657 if (kpd->kp_anon) {
660 658 /*
661 659 * Free up anon resources and destroy the
662 660 * associated pages.
663 661 *
664 662 * Release the lock if there is one. Have to get the
665 663 * page to do this, unfortunately.
666 664 */
667 665 if (kpd->kp_flags & KPD_LOCKED) {
668 666 ap = anon_get_ptr(kpd->kp_anon,
669 667 kpd->kp_anon_idx + i);
670 668 swap_xlate(ap, &vp, &off);
671 669 /* Find the shared-locked page. */
672 670 pp = page_find(vp, (u_offset_t)off);
673 671 if (pp == NULL) {
674 672 panic("segkp_release: "
675 673 "kp_anon: no page to unlock ");
676 674 /*NOTREACHED*/
677 675 }
678 676 if (PP_ISRAF(pp))
679 677 PP_CLRRAF(pp);
680 678
681 679 page_unlock(pp);
682 680 }
683 681 if ((kpd->kp_flags & KPD_HASAMP) == 0) {
684 682 anon_free(kpd->kp_anon, kpd->kp_anon_idx + i,
685 683 PAGESIZE);
686 684 anon_unresv_zone(PAGESIZE, NULL);
687 685 atomic_dec_ulong(&anon_segkp_pages_resv);
688 686 }
689 687 TRACE_5(TR_FAC_VM,
690 688 TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
691 689 kpd, va, PAGESIZE, 0, 0);
692 690 } else {
693 691 if (kpd->kp_flags & KPD_LOCKED) {
694 692 pp = page_find(&kvp, (u_offset_t)(uintptr_t)va);
695 693 if (pp == NULL) {
696 694 panic("segkp_release: "
697 695 "no page to unlock");
698 696 /*NOTREACHED*/
699 697 }
700 698 if (PP_ISRAF(pp))
701 699 PP_CLRRAF(pp);
702 700 /*
703 701 * We should just upgrade the lock here
704 702 * but there is no upgrade that waits.
705 703 */
706 704 page_unlock(pp);
707 705 }
708 706 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)va,
709 707 SE_EXCL);
710 708 if (pp != NULL)
711 709 page_destroy(pp, 0);
712 710 }
713 711 }
714 712
715 713 /* If locked, release physical memory reservation */
716 714 if (kpd->kp_flags & KPD_LOCKED) {
717 715 pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
718 716 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
719 717 atomic_add_long(&anon_segkp_pages_locked, -pages);
720 718 page_unresv(pages);
721 719 }
722 720
723 721 vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len);
724 722 kmem_free(kpd, sizeof (struct segkp_data));
725 723 }
726 724
727 725 /*
728 726 * segkp_map_red() will check the current frame pointer against the
729 727 * stack base. If the amount of stack remaining is questionable
730 728 * (less than red_minavail), then segkp_map_red() will map in the redzone
731 729 * and return 1. Otherwise, it will return 0. segkp_map_red() can
732 730 * _only_ be called when it is safe to sleep on page_create_va().
733 731 *
734 732 * It is up to the caller to remember whether segkp_map_red() successfully
735 733 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
736 734 * time.
737 735 *
738 736 * Currently, this routine is only called from pagefault() (which necessarily
739 737 * satisfies the above conditions).
740 738 */
741 739 #if defined(STACK_GROWTH_DOWN)
742 740 int
743 741 segkp_map_red(void)
744 742 {
745 743 uintptr_t fp = STACK_BIAS + (uintptr_t)getfp();
746 744 #ifndef _LP64
747 745 caddr_t stkbase;
748 746 #endif
749 747
750 748 /*
751 749 * Optimize for the common case where we simply return.
752 750 */
753 751 if ((curthread->t_red_pp == NULL) &&
754 752 (fp - (uintptr_t)curthread->t_stkbase >= red_minavail))
755 753 return (0);
756 754
757 755 #if defined(_LP64)
758 756 /*
759 757 * XXX We probably need something better than this.
760 758 */
761 759 panic("kernel stack overflow");
762 760 /*NOTREACHED*/
763 761 #else /* _LP64 */
764 762 if (curthread->t_red_pp == NULL) {
765 763 page_t *red_pp;
766 764 struct seg kseg;
767 765
768 766 caddr_t red_va = (caddr_t)
769 767 (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) -
770 768 PAGESIZE);
771 769
772 770 ASSERT(page_exists(&kvp, (u_offset_t)(uintptr_t)red_va) ==
773 771 NULL);
774 772
775 773 /*
776 774 * Allocate the physical for the red page.
777 775 */
778 776 /*
779 777 * No PG_NORELOC here to avoid waits. Unlikely to get
780 778 * a relocate happening in the short time the page exists
781 779 * and it will be OK anyway.
782 780 */
783 781
784 782 kseg.s_as = &kas;
785 783 red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va,
786 784 PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va);
787 785 ASSERT(red_pp != NULL);
788 786
789 787 /*
790 788 * So we now have a page to jam into the redzone...
791 789 */
792 790 page_io_unlock(red_pp);
793 791
794 792 hat_memload(kas.a_hat, red_va, red_pp,
795 793 (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK);
796 794 page_downgrade(red_pp);
797 795
798 796 /*
799 797 * The page is left SE_SHARED locked so we can hold on to
800 798 * the page_t pointer.
801 799 */
802 800 curthread->t_red_pp = red_pp;
803 801
804 802 atomic_inc_32(&red_nmapped);
805 803 while (fp - (uintptr_t)curthread->t_stkbase < red_closest) {
806 804 (void) atomic_cas_32(&red_closest, red_closest,
807 805 (uint32_t)(fp - (uintptr_t)curthread->t_stkbase));
808 806 }
809 807 return (1);
810 808 }
811 809
812 810 stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase &
813 811 (uintptr_t)PAGEMASK) - PAGESIZE);
814 812
815 813 atomic_inc_32(&red_ndoubles);
816 814
817 815 if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) {
818 816 /*
819 817 * Oh boy. We're already deep within the mapped-in
820 818 * redzone page, and the caller is trying to prepare
821 819 * for a deep stack run. We're running without a
822 820 * redzone right now: if the caller plows off the
823 821 * end of the stack, it'll plow another thread or
824 822 * LWP structure. That situation could result in
825 823 * a very hard-to-debug panic, so, in the spirit of
826 824 * recording the name of one's killer in one's own
827 825 * blood, we're going to record hrestime and the calling
828 826 * thread.
829 827 */
830 828 red_deep_hires = hrestime.tv_nsec;
831 829 red_deep_thread = curthread;
832 830 }
833 831
834 832 /*
835 833 * If this is a DEBUG kernel, and we've run too deep for comfort, toss.
836 834 */
837 835 ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD);
838 836 return (0);
839 837 #endif /* _LP64 */
840 838 }
841 839
842 840 void
843 841 segkp_unmap_red(void)
844 842 {
845 843 page_t *pp;
846 844 caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase &
847 845 (uintptr_t)PAGEMASK) - PAGESIZE);
848 846
849 847 ASSERT(curthread->t_red_pp != NULL);
850 848
851 849 /*
852 850 * Because we locked the mapping down, we can't simply rely
853 851 * on page_destroy() to clean everything up; we need to call
854 852 * hat_unload() to explicitly unlock the mapping resources.
855 853 */
856 854 hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK);
857 855
858 856 pp = curthread->t_red_pp;
859 857
860 858 ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va));
861 859
862 860 /*
863 861 * Need to upgrade the SE_SHARED lock to SE_EXCL.
864 862 */
865 863 if (!page_tryupgrade(pp)) {
866 864 /*
867 865 * As there is now wait for upgrade, release the
868 866 * SE_SHARED lock and wait for SE_EXCL.
869 867 */
870 868 page_unlock(pp);
871 869 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)red_va, SE_EXCL);
872 870 /* pp may be NULL here, hence the test below */
873 871 }
874 872
875 873 /*
876 874 * Destroy the page, with dontfree set to zero (i.e. free it).
877 875 */
878 876 if (pp != NULL)
879 877 page_destroy(pp, 0);
880 878 curthread->t_red_pp = NULL;
881 879 }
882 880 #else
883 881 #error Red stacks only supported with downwards stack growth.
884 882 #endif
885 883
886 884 /*
887 885 * Handle a fault on an address corresponding to one of the
888 886 * resources in the segkp segment.
889 887 */
890 888 faultcode_t
891 889 segkp_fault(
892 890 struct hat *hat,
893 891 struct seg *seg,
894 892 caddr_t vaddr,
895 893 size_t len,
896 894 enum fault_type type,
897 895 enum seg_rw rw)
898 896 {
899 897 struct segkp_data *kpd = NULL;
900 898 int err;
901 899
902 900 ASSERT(seg->s_as == &kas && RW_READ_HELD(&seg->s_as->a_lock));
903 901
904 902 /*
905 903 * Sanity checks.
906 904 */
907 905 if (type == F_PROT) {
908 906 panic("segkp_fault: unexpected F_PROT fault");
909 907 /*NOTREACHED*/
910 908 }
911 909
912 910 if ((kpd = segkp_find(seg, vaddr)) == NULL)
913 911 return (FC_NOMAP);
914 912
915 913 mutex_enter(&kpd->kp_lock);
916 914
917 915 if (type == F_SOFTLOCK) {
918 916 ASSERT(!(kpd->kp_flags & KPD_LOCKED));
919 917 /*
920 918 * The F_SOFTLOCK case has more stringent
921 919 * range requirements: the given range must exactly coincide
922 920 * with the resource's mapped portion. Note reference to
923 921 * redzone is handled since vaddr would not equal base
924 922 */
925 923 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
926 924 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
927 925 mutex_exit(&kpd->kp_lock);
928 926 return (FC_MAKE_ERR(EFAULT));
929 927 }
930 928
931 929 if ((err = segkp_load(hat, seg, vaddr, len, kpd, KPD_LOCKED))) {
932 930 mutex_exit(&kpd->kp_lock);
933 931 return (FC_MAKE_ERR(err));
934 932 }
935 933 kpd->kp_flags |= KPD_LOCKED;
936 934 mutex_exit(&kpd->kp_lock);
937 935 return (0);
938 936 }
939 937
940 938 if (type == F_INVAL) {
941 939 ASSERT(!(kpd->kp_flags & KPD_NO_ANON));
942 940
943 941 /*
944 942 * Check if we touched the redzone. Somewhat optimistic
945 943 * here if we are touching the redzone of our own stack
946 944 * since we wouldn't have a stack to get this far...
947 945 */
948 946 if ((kpd->kp_flags & KPD_HASREDZONE) &&
949 947 btop((uintptr_t)(vaddr - kpd->kp_base)) == KPD_REDZONE(kpd))
950 948 panic("segkp_fault: accessing redzone");
951 949
952 950 /*
953 951 * This fault may occur while the page is being F_SOFTLOCK'ed.
954 952 * Return since a 2nd segkp_load is unnecessary and also would
955 953 * result in the page being locked twice and eventually
956 954 * hang the thread_reaper thread.
957 955 */
958 956 if (kpd->kp_flags & KPD_LOCKED) {
959 957 mutex_exit(&kpd->kp_lock);
960 958 return (0);
961 959 }
962 960
963 961 err = segkp_load(hat, seg, vaddr, len, kpd, kpd->kp_flags);
964 962 mutex_exit(&kpd->kp_lock);
965 963 return (err ? FC_MAKE_ERR(err) : 0);
966 964 }
967 965
968 966 if (type == F_SOFTUNLOCK) {
969 967 uint_t flags;
970 968
971 969 /*
972 970 * Make sure the addr is LOCKED and it has anon backing
973 971 * before unlocking
974 972 */
975 973 if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) != KPD_LOCKED) {
976 974 panic("segkp_fault: bad unlock");
977 975 /*NOTREACHED*/
978 976 }
979 977
980 978 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
981 979 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
982 980 panic("segkp_fault: bad range");
983 981 /*NOTREACHED*/
984 982 }
985 983
986 984 if (rw == S_WRITE)
987 985 flags = kpd->kp_flags | KPD_WRITEDIRTY;
988 986 else
989 987 flags = kpd->kp_flags;
990 988 err = segkp_unlock(hat, seg, vaddr, len, kpd, flags);
991 989 kpd->kp_flags &= ~KPD_LOCKED;
992 990 mutex_exit(&kpd->kp_lock);
993 991 return (err ? FC_MAKE_ERR(err) : 0);
994 992 }
995 993 mutex_exit(&kpd->kp_lock);
996 994 panic("segkp_fault: bogus fault type: %d\n", type);
997 995 /*NOTREACHED*/
998 996 }
999 997
1000 998 /*
1001 999 * Check that the given protections suffice over the range specified by
1002 1000 * vaddr and len. For this segment type, the only issue is whether or
1003 1001 * not the range lies completely within the mapped part of an allocated
1004 1002 * resource.
1005 1003 */
1006 1004 /* ARGSUSED */
1007 1005 static int
1008 1006 segkp_checkprot(struct seg *seg, caddr_t vaddr, size_t len, uint_t prot)
1009 1007 {
1010 1008 struct segkp_data *kpd = NULL;
1011 1009 caddr_t mbase;
1012 1010 size_t mlen;
1013 1011
1014 1012 if ((kpd = segkp_find(seg, vaddr)) == NULL)
1015 1013 return (EACCES);
1016 1014
1017 1015 mutex_enter(&kpd->kp_lock);
1018 1016 mbase = stom(kpd->kp_base, kpd->kp_flags);
1019 1017 mlen = SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags);
1020 1018 if (len > mlen || vaddr < mbase ||
1021 1019 ((vaddr + len) > (mbase + mlen))) {
1022 1020 mutex_exit(&kpd->kp_lock);
1023 1021 return (EACCES);
1024 1022 }
1025 1023 mutex_exit(&kpd->kp_lock);
1026 1024 return (0);
1027 1025 }
1028 1026
1029 1027
1030 1028 /*
1031 1029 * Check to see if it makes sense to do kluster/read ahead to
1032 1030 * addr + delta relative to the mapping at addr. We assume here
1033 1031 * that delta is a signed PAGESIZE'd multiple (which can be negative).
1034 1032 *
1035 1033 * For seg_u we always "approve" of this action from our standpoint.
1036 1034 */
1037 1035 /*ARGSUSED*/
1038 1036 static int
1039 1037 segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
1040 1038 {
1041 1039 return (0);
1042 1040 }
1043 1041
1044 1042 /*
1045 1043 * Load and possibly lock intra-slot resources in the range given by
1046 1044 * vaddr and len.
1047 1045 */
1048 1046 static int
1049 1047 segkp_load(
1050 1048 struct hat *hat,
1051 1049 struct seg *seg,
1052 1050 caddr_t vaddr,
1053 1051 size_t len,
1054 1052 struct segkp_data *kpd,
1055 1053 uint_t flags)
1056 1054 {
1057 1055 caddr_t va;
1058 1056 caddr_t vlim;
1059 1057 ulong_t i;
1060 1058 uint_t lock;
1061 1059
1062 1060 ASSERT(MUTEX_HELD(&kpd->kp_lock));
1063 1061
1064 1062 len = P2ROUNDUP(len, PAGESIZE);
1065 1063
1066 1064 /* If locking, reserve physical memory */
1067 1065 if (flags & KPD_LOCKED) {
1068 1066 pgcnt_t pages = btop(len);
1069 1067 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
1070 1068 atomic_add_long(&anon_segkp_pages_locked, pages);
1071 1069 (void) page_resv(pages, KM_SLEEP);
1072 1070 }
1073 1071
1074 1072 /*
1075 1073 * Loop through the pages in the given range.
1076 1074 */
1077 1075 va = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK);
1078 1076 vaddr = va;
1079 1077 vlim = va + len;
1080 1078 lock = flags & KPD_LOCKED;
1081 1079 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
1082 1080 for (; va < vlim; va += PAGESIZE, i++) {
1083 1081 page_t *pl[2]; /* second element NULL terminator */
1084 1082 struct vnode *vp;
1085 1083 anoff_t off;
1086 1084 int err;
1087 1085 struct anon *ap;
1088 1086
1089 1087 /*
1090 1088 * Summon the page. If it's not resident, arrange
1091 1089 * for synchronous i/o to pull it in.
1092 1090 */
1093 1091 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
1094 1092 swap_xlate(ap, &vp, &off);
1095 1093
1096 1094 /*
1097 1095 * The returned page list will have exactly one entry,
1098 1096 * which is returned to us already kept.
1099 1097 */
1100 1098 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, NULL,
1101 1099 pl, PAGESIZE, seg, va, S_READ, kcred, NULL);
1102 1100
1103 1101 if (err) {
1104 1102 /*
1105 1103 * Back out of what we've done so far.
1106 1104 */
1107 1105 (void) segkp_unlock(hat, seg, vaddr,
1108 1106 (va - vaddr), kpd, flags);
1109 1107 return (err);
1110 1108 }
1111 1109
1112 1110 /*
1113 1111 * Load an MMU translation for the page.
1114 1112 */
1115 1113 hat_memload(hat, va, pl[0], (PROT_READ|PROT_WRITE),
1116 1114 lock ? HAT_LOAD_LOCK : HAT_LOAD);
1117 1115
1118 1116 if (!lock) {
1119 1117 /*
1120 1118 * Now, release "shared" lock on the page.
1121 1119 */
1122 1120 page_unlock(pl[0]);
1123 1121 }
1124 1122 }
1125 1123 return (0);
1126 1124 }
1127 1125
1128 1126 /*
1129 1127 * At the very least unload the mmu-translations and unlock the range if locked
1130 1128 * Can be called with the following flag value KPD_WRITEDIRTY which specifies
1131 1129 * any dirty pages should be written to disk.
1132 1130 */
1133 1131 static int
1134 1132 segkp_unlock(
1135 1133 struct hat *hat,
1136 1134 struct seg *seg,
1137 1135 caddr_t vaddr,
1138 1136 size_t len,
1139 1137 struct segkp_data *kpd,
1140 1138 uint_t flags)
1141 1139 {
1142 1140 caddr_t va;
1143 1141 caddr_t vlim;
1144 1142 ulong_t i;
1145 1143 struct page *pp;
1146 1144 struct vnode *vp;
1147 1145 anoff_t off;
1148 1146 struct anon *ap;
1149 1147
1150 1148 #ifdef lint
1151 1149 seg = seg;
1152 1150 #endif /* lint */
1153 1151
1154 1152 ASSERT(MUTEX_HELD(&kpd->kp_lock));
1155 1153
1156 1154 /*
1157 1155 * Loop through the pages in the given range. It is assumed
1158 1156 * segkp_unlock is called with page aligned base
1159 1157 */
1160 1158 va = vaddr;
1161 1159 vlim = va + len;
1162 1160 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
1163 1161 hat_unload(hat, va, len,
1164 1162 ((flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
1165 1163 for (; va < vlim; va += PAGESIZE, i++) {
1166 1164 /*
1167 1165 * Find the page associated with this part of the
1168 1166 * slot, tracking it down through its associated swap
1169 1167 * space.
1170 1168 */
1171 1169 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
1172 1170 swap_xlate(ap, &vp, &off);
1173 1171
1174 1172 if (flags & KPD_LOCKED) {
1175 1173 if ((pp = page_find(vp, off)) == NULL) {
1176 1174 if (flags & KPD_LOCKED) {
1177 1175 panic("segkp_softunlock: missing page");
1178 1176 /*NOTREACHED*/
1179 1177 }
1180 1178 }
1181 1179 } else {
1182 1180 /*
1183 1181 * Nothing to do if the slot is not locked and the
1184 1182 * page doesn't exist.
1185 1183 */
1186 1184 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL)
1187 1185 continue;
1188 1186 }
1189 1187
1190 1188 /*
1191 1189 * If the page doesn't have any translations, is
1192 1190 * dirty and not being shared, then push it out
1193 1191 * asynchronously and avoid waiting for the
1194 1192 * pageout daemon to do it for us.
1195 1193 *
1196 1194 * XXX - Do we really need to get the "exclusive"
1197 1195 * lock via an upgrade?
1198 1196 */
1199 1197 if ((flags & KPD_WRITEDIRTY) && !hat_page_is_mapped(pp) &&
1200 1198 hat_ismod(pp) && page_tryupgrade(pp)) {
1201 1199 /*
1202 1200 * Hold the vnode before releasing the page lock to
1203 1201 * prevent it from being freed and re-used by some
1204 1202 * other thread.
1205 1203 */
1206 1204 VN_HOLD(vp);
1207 1205 page_unlock(pp);
1208 1206
1209 1207 /*
1210 1208 * Want most powerful credentials we can get so
1211 1209 * use kcred.
1212 1210 */
1213 1211 (void) VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
1214 1212 B_ASYNC | B_FREE, kcred, NULL);
1215 1213 VN_RELE(vp);
1216 1214 } else {
1217 1215 page_unlock(pp);
1218 1216 }
1219 1217 }
1220 1218
1221 1219 /* If unlocking, release physical memory */
1222 1220 if (flags & KPD_LOCKED) {
1223 1221 pgcnt_t pages = btopr(len);
1224 1222 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
1225 1223 atomic_add_long(&anon_segkp_pages_locked, -pages);
1226 1224 page_unresv(pages);
1227 1225 }
1228 1226 return (0);
1229 1227 }
1230 1228
1231 1229 /*
1232 1230 * Insert the kpd in the hash table.
1233 1231 */
1234 1232 static void
1235 1233 segkp_insert(struct seg *seg, struct segkp_data *kpd)
1236 1234 {
1237 1235 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1238 1236 int index;
1239 1237
1240 1238 /*
1241 1239 * Insert the kpd based on the address that will be returned
1242 1240 * via segkp_release.
1243 1241 */
1244 1242 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
1245 1243 mutex_enter(&segkp_lock);
1246 1244 kpd->kp_next = kpsd->kpsd_hash[index];
1247 1245 kpsd->kpsd_hash[index] = kpd;
1248 1246 mutex_exit(&segkp_lock);
1249 1247 }
1250 1248
1251 1249 /*
1252 1250 * Remove kpd from the hash table.
1253 1251 */
1254 1252 static void
1255 1253 segkp_delete(struct seg *seg, struct segkp_data *kpd)
1256 1254 {
1257 1255 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1258 1256 struct segkp_data **kpp;
1259 1257 int index;
1260 1258
1261 1259 ASSERT(MUTEX_HELD(&segkp_lock));
1262 1260
1263 1261 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
1264 1262 for (kpp = &kpsd->kpsd_hash[index];
1265 1263 *kpp != NULL; kpp = &((*kpp)->kp_next)) {
1266 1264 if (*kpp == kpd) {
1267 1265 *kpp = kpd->kp_next;
1268 1266 return;
1269 1267 }
1270 1268 }
1271 1269 panic("segkp_delete: unable to find element to delete");
1272 1270 /*NOTREACHED*/
1273 1271 }
1274 1272
1275 1273 /*
1276 1274 * Find the kpd associated with a vaddr.
1277 1275 *
1278 1276 * Most of the callers of segkp_find will pass the vaddr that
1279 1277 * hashes to the desired index, but there are cases where
1280 1278 * this is not true in which case we have to (potentially) scan
1281 1279 * the whole table looking for it. This should be very rare
1282 1280 * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the
1283 1281 * middle of the segkp_data region).
1284 1282 */
1285 1283 static struct segkp_data *
1286 1284 segkp_find(struct seg *seg, caddr_t vaddr)
1287 1285 {
1288 1286 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1289 1287 struct segkp_data *kpd;
1290 1288 int i;
1291 1289 int stop;
1292 1290
1293 1291 i = stop = SEGKP_HASH(vaddr);
1294 1292 mutex_enter(&segkp_lock);
1295 1293 do {
1296 1294 for (kpd = kpsd->kpsd_hash[i]; kpd != NULL;
1297 1295 kpd = kpd->kp_next) {
1298 1296 if (vaddr >= kpd->kp_base &&
1299 1297 vaddr < kpd->kp_base + kpd->kp_len) {
1300 1298 mutex_exit(&segkp_lock);
1301 1299 return (kpd);
1302 1300 }
1303 1301 }
1304 1302 if (--i < 0)
1305 1303 i = SEGKP_HASHSZ - 1; /* Wrap */
1306 1304 } while (i != stop);
1307 1305 mutex_exit(&segkp_lock);
1308 1306 return (NULL); /* Not found */
1309 1307 }
1310 1308
1311 1309 /*
1312 1310 * returns size of swappable area.
1313 1311 */
1314 1312 size_t
1315 1313 swapsize(caddr_t v)
1316 1314 {
1317 1315 struct segkp_data *kpd;
1318 1316
1319 1317 if ((kpd = segkp_find(segkp, v)) != NULL)
1320 1318 return (SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
1321 1319 else
1322 1320 return (NULL);
1323 1321 }
1324 1322
1325 1323 /*
1326 1324 * Dump out all the active segkp pages
1327 1325 */
1328 1326 static void
1329 1327 segkp_dump(struct seg *seg)
1330 1328 {
1331 1329 int i;
1332 1330 struct segkp_data *kpd;
1333 1331 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1334 1332
1335 1333 for (i = 0; i < SEGKP_HASHSZ; i++) {
1336 1334 for (kpd = kpsd->kpsd_hash[i];
1337 1335 kpd != NULL; kpd = kpd->kp_next) {
1338 1336 pfn_t pfn;
1339 1337 caddr_t addr;
1340 1338 caddr_t eaddr;
1341 1339
1342 1340 addr = kpd->kp_base;
1343 1341 eaddr = addr + kpd->kp_len;
1344 1342 while (addr < eaddr) {
1345 1343 ASSERT(seg->s_as == &kas);
1346 1344 pfn = hat_getpfnum(seg->s_as->a_hat, addr);
1347 1345 if (pfn != PFN_INVALID)
1348 1346 dump_addpage(seg->s_as, addr, pfn);
1349 1347 addr += PAGESIZE;
1350 1348 dump_timeleft = dump_timeout;
1351 1349 }
↓ open down ↓ |
1193 lines elided |
↑ open up ↑ |
1352 1350 }
1353 1351 }
1354 1352 }
1355 1353
1356 1354 /*ARGSUSED*/
1357 1355 static int
1358 1356 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
1359 1357 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1360 1358 {
1361 1359 return (ENOTSUP);
1362 -}
1363 -
1364 -/*ARGSUSED*/
1365 -static int
1366 -segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1367 -{
1368 - return (ENODEV);
1369 1360 }
1370 1361
1371 1362 #include <sys/mem_config.h>
1372 1363
1373 1364 /*ARGSUSED*/
1374 1365 static void
1375 1366 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1376 1367 {}
1377 1368
1378 1369 /*
1379 1370 * During memory delete, turn off caches so that pages are not held.
1380 1371 * A better solution may be to unlock the pages while they are
1381 1372 * in the cache so that they may be collected naturally.
1382 1373 */
1383 1374
1384 1375 /*ARGSUSED*/
1385 1376 static int
1386 1377 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1387 1378 {
1388 1379 atomic_inc_32(&segkp_indel);
1389 1380 segkp_cache_free();
1390 1381 return (0);
1391 1382 }
1392 1383
1393 1384 /*ARGSUSED*/
1394 1385 static void
1395 1386 segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
1396 1387 {
1397 1388 atomic_dec_32(&segkp_indel);
1398 1389 }
1399 1390
1400 1391 static kphysm_setup_vector_t segkp_mem_config_vec = {
1401 1392 KPHYSM_SETUP_VECTOR_VERSION,
1402 1393 segkp_mem_config_post_add,
1403 1394 segkp_mem_config_pre_del,
1404 1395 segkp_mem_config_post_del,
1405 1396 };
1406 1397
1407 1398 static void
1408 1399 segkpinit_mem_config(struct seg *seg)
1409 1400 {
1410 1401 int ret;
1411 1402
1412 1403 ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg);
1413 1404 ASSERT(ret == 0);
1414 1405 }
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX