Print this page
6150 use NULL getmemid segop as a shorthand for ENODEV
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_kp.c
+++ new/usr/src/uts/common/vm/seg_kp.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 26 /* All Rights Reserved */
27 27
28 28 /*
29 29 * Portions of this source code were derived from Berkeley 4.3 BSD
30 30 * under license from the Regents of the University of California.
31 31 */
32 32
33 33 /*
34 34 * segkp is a segment driver that administers the allocation and deallocation
35 35 * of pageable variable size chunks of kernel virtual address space. Each
36 36 * allocated resource is page-aligned.
37 37 *
38 38 * The user may specify whether the resource should be initialized to 0,
39 39 * include a redzone, or locked in memory.
40 40 */
41 41
42 42 #include <sys/types.h>
43 43 #include <sys/t_lock.h>
44 44 #include <sys/thread.h>
45 45 #include <sys/param.h>
46 46 #include <sys/errno.h>
47 47 #include <sys/sysmacros.h>
48 48 #include <sys/systm.h>
49 49 #include <sys/buf.h>
50 50 #include <sys/mman.h>
51 51 #include <sys/vnode.h>
52 52 #include <sys/cmn_err.h>
53 53 #include <sys/swap.h>
54 54 #include <sys/tuneable.h>
55 55 #include <sys/kmem.h>
56 56 #include <sys/vmem.h>
57 57 #include <sys/cred.h>
58 58 #include <sys/dumphdr.h>
59 59 #include <sys/debug.h>
60 60 #include <sys/vtrace.h>
61 61 #include <sys/stack.h>
62 62 #include <sys/atomic.h>
63 63 #include <sys/archsystm.h>
64 64 #include <sys/lgrp.h>
65 65
66 66 #include <vm/as.h>
67 67 #include <vm/seg.h>
68 68 #include <vm/seg_kp.h>
69 69 #include <vm/seg_kmem.h>
70 70 #include <vm/anon.h>
71 71 #include <vm/page.h>
72 72 #include <vm/hat.h>
73 73 #include <sys/bitmap.h>
74 74
75 75 /*
76 76 * Private seg op routines
77 77 */
78 78 static void segkp_badop(void);
79 79 static void segkp_dump(struct seg *seg);
80 80 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
81 81 uint_t prot);
82 82 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
83 83 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
84 84 struct page ***page, enum lock_type type,
85 85 enum seg_rw rw);
86 86 static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
↓ open down ↓ |
86 lines elided |
↑ open up ↑ |
87 87 static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
88 88 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
89 89 struct segkp_data **tkpd, struct anon_map *amp);
90 90 static void segkp_release_internal(struct seg *seg,
91 91 struct segkp_data *kpd, size_t len);
92 92 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
93 93 size_t len, struct segkp_data *kpd, uint_t flags);
94 94 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
95 95 size_t len, struct segkp_data *kpd, uint_t flags);
96 96 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
97 -static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
98 97
99 98 /*
100 99 * Lock used to protect the hash table(s) and caches.
101 100 */
102 101 static kmutex_t segkp_lock;
103 102
104 103 /*
105 104 * The segkp caches
106 105 */
107 106 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
108 107
109 108 #define SEGKP_BADOP(t) (t(*)())segkp_badop
110 109
111 110 /*
112 111 * When there are fewer than red_minavail bytes left on the stack,
113 112 * segkp_map_red() will map in the redzone (if called). 5000 seems
114 113 * to work reasonably well...
115 114 */
116 115 long red_minavail = 5000;
117 116
118 117 /*
119 118 * will be set to 1 for 32 bit x86 systems only, in startup.c
120 119 */
121 120 int segkp_fromheap = 0;
122 121 ulong_t *segkp_bitmap;
123 122
124 123 /*
125 124 * If segkp_map_red() is called with the redzone already mapped and
126 125 * with less than RED_DEEP_THRESHOLD bytes available on the stack,
127 126 * then the stack situation has become quite serious; if much more stack
128 127 * is consumed, we have the potential of scrogging the next thread/LWP
129 128 * structure. To help debug the "can't happen" panics which may
130 129 * result from this condition, we record hrestime and the calling thread
131 130 * in red_deep_hires and red_deep_thread respectively.
132 131 */
133 132 #define RED_DEEP_THRESHOLD 2000
134 133
135 134 hrtime_t red_deep_hires;
136 135 kthread_t *red_deep_thread;
137 136
138 137 uint32_t red_nmapped;
139 138 uint32_t red_closest = UINT_MAX;
140 139 uint32_t red_ndoubles;
141 140
142 141 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
143 142 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
144 143
145 144 static struct seg_ops segkp_ops = {
146 145 .dup = SEGKP_BADOP(int),
147 146 .unmap = SEGKP_BADOP(int),
148 147 .free = SEGKP_BADOP(void),
149 148 .fault = segkp_fault,
150 149 .faulta = SEGKP_BADOP(faultcode_t),
151 150 .setprot = SEGKP_BADOP(int),
152 151 .checkprot = segkp_checkprot,
153 152 .kluster = segkp_kluster,
154 153 .swapout = SEGKP_BADOP(size_t),
155 154 .sync = SEGKP_BADOP(int),
↓ open down ↓ |
48 lines elided |
↑ open up ↑ |
156 155 .incore = SEGKP_BADOP(size_t),
157 156 .lockop = SEGKP_BADOP(int),
158 157 .getprot = SEGKP_BADOP(int),
159 158 .getoffset = SEGKP_BADOP(u_offset_t),
160 159 .gettype = SEGKP_BADOP(int),
161 160 .getvp = SEGKP_BADOP(int),
162 161 .advise = SEGKP_BADOP(int),
163 162 .dump = segkp_dump,
164 163 .pagelock = segkp_pagelock,
165 164 .setpagesize = SEGKP_BADOP(int),
166 - .getmemid = segkp_getmemid,
167 165 };
168 166
169 167
170 168 static void
171 169 segkp_badop(void)
172 170 {
173 171 panic("segkp_badop");
174 172 /*NOTREACHED*/
175 173 }
176 174
177 175 static void segkpinit_mem_config(struct seg *);
178 176
179 177 static uint32_t segkp_indel;
180 178
181 179 /*
182 180 * Allocate the segment specific private data struct and fill it in
183 181 * with the per kp segment mutex, anon ptr. array and hash table.
184 182 */
185 183 int
186 184 segkp_create(struct seg *seg)
187 185 {
188 186 struct segkp_segdata *kpsd;
189 187 size_t np;
190 188
191 189 ASSERT(seg != NULL && seg->s_as == &kas);
192 190 ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));
193 191
194 192 if (seg->s_size & PAGEOFFSET) {
195 193 panic("Bad segkp size");
196 194 /*NOTREACHED*/
197 195 }
198 196
199 197 kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP);
200 198
201 199 /*
202 200 * Allocate the virtual memory for segkp and initialize it
203 201 */
204 202 if (segkp_fromheap) {
205 203 np = btop(kvseg.s_size);
206 204 segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP);
207 205 kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE,
208 206 vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP);
209 207 } else {
210 208 segkp_bitmap = NULL;
211 209 np = btop(seg->s_size);
212 210 kpsd->kpsd_arena = vmem_create("segkp", seg->s_base,
213 211 seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE,
214 212 VM_SLEEP);
215 213 }
216 214
217 215 kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE);
218 216
219 217 kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *),
220 218 KM_SLEEP);
221 219 seg->s_data = (void *)kpsd;
222 220 seg->s_ops = &segkp_ops;
223 221 segkpinit_mem_config(seg);
224 222 return (0);
225 223 }
226 224
227 225
228 226 /*
229 227 * Find a free 'freelist' and initialize it with the appropriate attributes
230 228 */
231 229 void *
232 230 segkp_cache_init(struct seg *seg, int maxsize, size_t len, uint_t flags)
233 231 {
234 232 int i;
235 233
236 234 if ((flags & KPD_NO_ANON) && !(flags & KPD_LOCKED))
237 235 return ((void *)-1);
238 236
239 237 mutex_enter(&segkp_lock);
240 238 for (i = 0; i < SEGKP_MAX_CACHE; i++) {
241 239 if (segkp_cache[i].kpf_inuse)
242 240 continue;
243 241 segkp_cache[i].kpf_inuse = 1;
244 242 segkp_cache[i].kpf_max = maxsize;
245 243 segkp_cache[i].kpf_flags = flags;
246 244 segkp_cache[i].kpf_seg = seg;
247 245 segkp_cache[i].kpf_len = len;
248 246 mutex_exit(&segkp_lock);
249 247 return ((void *)(uintptr_t)i);
250 248 }
251 249 mutex_exit(&segkp_lock);
252 250 return ((void *)-1);
253 251 }
254 252
255 253 /*
256 254 * Free all the cache resources.
257 255 */
258 256 void
259 257 segkp_cache_free(void)
260 258 {
261 259 struct segkp_data *kpd;
262 260 struct seg *seg;
263 261 int i;
264 262
265 263 mutex_enter(&segkp_lock);
266 264 for (i = 0; i < SEGKP_MAX_CACHE; i++) {
267 265 if (!segkp_cache[i].kpf_inuse)
268 266 continue;
269 267 /*
270 268 * Disconnect the freelist and process each element
271 269 */
272 270 kpd = segkp_cache[i].kpf_list;
273 271 seg = segkp_cache[i].kpf_seg;
274 272 segkp_cache[i].kpf_list = NULL;
275 273 segkp_cache[i].kpf_count = 0;
276 274 mutex_exit(&segkp_lock);
277 275
278 276 while (kpd != NULL) {
279 277 struct segkp_data *next;
280 278
281 279 next = kpd->kp_next;
282 280 segkp_release_internal(seg, kpd, kpd->kp_len);
283 281 kpd = next;
284 282 }
285 283 mutex_enter(&segkp_lock);
286 284 }
287 285 mutex_exit(&segkp_lock);
288 286 }
289 287
290 288 /*
291 289 * There are 2 entries into segkp_get_internal. The first includes a cookie
292 290 * used to access a pool of cached segkp resources. The second does not
293 291 * use the cache.
294 292 */
295 293 caddr_t
296 294 segkp_get(struct seg *seg, size_t len, uint_t flags)
297 295 {
298 296 struct segkp_data *kpd = NULL;
299 297
300 298 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
301 299 kpd->kp_cookie = -1;
302 300 return (stom(kpd->kp_base, flags));
303 301 }
304 302 return (NULL);
305 303 }
306 304
307 305 /*
308 306 * Return a 'cached' segkp address
309 307 */
310 308 caddr_t
311 309 segkp_cache_get(void *cookie)
312 310 {
313 311 struct segkp_cache *freelist = NULL;
314 312 struct segkp_data *kpd = NULL;
315 313 int index = (int)(uintptr_t)cookie;
316 314 struct seg *seg;
317 315 size_t len;
318 316 uint_t flags;
319 317
320 318 if (index < 0 || index >= SEGKP_MAX_CACHE)
321 319 return (NULL);
322 320 freelist = &segkp_cache[index];
323 321
324 322 mutex_enter(&segkp_lock);
325 323 seg = freelist->kpf_seg;
326 324 flags = freelist->kpf_flags;
327 325 if (freelist->kpf_list != NULL) {
328 326 kpd = freelist->kpf_list;
329 327 freelist->kpf_list = kpd->kp_next;
330 328 freelist->kpf_count--;
331 329 mutex_exit(&segkp_lock);
332 330 kpd->kp_next = NULL;
333 331 segkp_insert(seg, kpd);
334 332 return (stom(kpd->kp_base, flags));
335 333 }
336 334 len = freelist->kpf_len;
337 335 mutex_exit(&segkp_lock);
338 336 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
339 337 kpd->kp_cookie = index;
340 338 return (stom(kpd->kp_base, flags));
341 339 }
342 340 return (NULL);
343 341 }
344 342
345 343 caddr_t
346 344 segkp_get_withanonmap(
347 345 struct seg *seg,
348 346 size_t len,
349 347 uint_t flags,
350 348 struct anon_map *amp)
351 349 {
352 350 struct segkp_data *kpd = NULL;
353 351
354 352 ASSERT(amp != NULL);
355 353 flags |= KPD_HASAMP;
356 354 if (segkp_get_internal(seg, len, flags, &kpd, amp) != NULL) {
357 355 kpd->kp_cookie = -1;
358 356 return (stom(kpd->kp_base, flags));
359 357 }
360 358 return (NULL);
361 359 }
362 360
363 361 /*
364 362 * This does the real work of segkp allocation.
365 363 * Return to client base addr. len must be page-aligned. A null value is
366 364 * returned if there are no more vm resources (e.g. pages, swap). The len
367 365 * and base recorded in the private data structure include the redzone
368 366 * and the redzone length (if applicable). If the user requests a redzone
369 367 * either the first or last page is left unmapped depending whether stacks
370 368 * grow to low or high memory.
371 369 *
372 370 * The client may also specify a no-wait flag. If that is set then the
373 371 * request will choose a non-blocking path when requesting resources.
374 372 * The default is make the client wait.
375 373 */
376 374 static caddr_t
377 375 segkp_get_internal(
378 376 struct seg *seg,
379 377 size_t len,
380 378 uint_t flags,
381 379 struct segkp_data **tkpd,
382 380 struct anon_map *amp)
383 381 {
384 382 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
385 383 struct segkp_data *kpd;
386 384 caddr_t vbase = NULL; /* always first virtual, may not be mapped */
387 385 pgcnt_t np = 0; /* number of pages in the resource */
388 386 pgcnt_t segkpindex;
389 387 long i;
390 388 caddr_t va;
391 389 pgcnt_t pages = 0;
392 390 ulong_t anon_idx = 0;
393 391 int kmflag = (flags & KPD_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
394 392 caddr_t s_base = (segkp_fromheap) ? kvseg.s_base : seg->s_base;
395 393
396 394 if (len & PAGEOFFSET) {
397 395 panic("segkp_get: len is not page-aligned");
398 396 /*NOTREACHED*/
399 397 }
400 398
401 399 ASSERT(((flags & KPD_HASAMP) == 0) == (amp == NULL));
402 400
403 401 /* Only allow KPD_NO_ANON if we are going to lock it down */
404 402 if ((flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON)
405 403 return (NULL);
406 404
407 405 if ((kpd = kmem_zalloc(sizeof (struct segkp_data), kmflag)) == NULL)
408 406 return (NULL);
409 407 /*
410 408 * Fix up the len to reflect the REDZONE if applicable
411 409 */
412 410 if (flags & KPD_HASREDZONE)
413 411 len += PAGESIZE;
414 412 np = btop(len);
415 413
416 414 vbase = vmem_alloc(SEGKP_VMEM(seg), len, kmflag | VM_BESTFIT);
417 415 if (vbase == NULL) {
418 416 kmem_free(kpd, sizeof (struct segkp_data));
419 417 return (NULL);
420 418 }
421 419
422 420 /* If locking, reserve physical memory */
423 421 if (flags & KPD_LOCKED) {
424 422 pages = btop(SEGKP_MAPLEN(len, flags));
425 423 if (page_resv(pages, kmflag) == 0) {
426 424 vmem_free(SEGKP_VMEM(seg), vbase, len);
427 425 kmem_free(kpd, sizeof (struct segkp_data));
428 426 return (NULL);
429 427 }
430 428 if ((flags & KPD_NO_ANON) == 0)
431 429 atomic_add_long(&anon_segkp_pages_locked, pages);
432 430 }
433 431
434 432 /*
435 433 * Reserve sufficient swap space for this vm resource. We'll
436 434 * actually allocate it in the loop below, but reserving it
437 435 * here allows us to back out more gracefully than if we
438 436 * had an allocation failure in the body of the loop.
439 437 *
440 438 * Note that we don't need swap space for the red zone page.
441 439 */
442 440 if (amp != NULL) {
443 441 /*
444 442 * The swap reservation has been done, if required, and the
445 443 * anon_hdr is separate.
446 444 */
447 445 anon_idx = 0;
448 446 kpd->kp_anon_idx = anon_idx;
449 447 kpd->kp_anon = amp->ahp;
450 448
451 449 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
452 450 kpd, vbase, len, flags, 1);
453 451
454 452 } else if ((flags & KPD_NO_ANON) == 0) {
455 453 if (anon_resv_zone(SEGKP_MAPLEN(len, flags), NULL) == 0) {
456 454 if (flags & KPD_LOCKED) {
457 455 atomic_add_long(&anon_segkp_pages_locked,
458 456 -pages);
459 457 page_unresv(pages);
460 458 }
461 459 vmem_free(SEGKP_VMEM(seg), vbase, len);
462 460 kmem_free(kpd, sizeof (struct segkp_data));
463 461 return (NULL);
464 462 }
465 463 atomic_add_long(&anon_segkp_pages_resv,
466 464 btop(SEGKP_MAPLEN(len, flags)));
467 465 anon_idx = ((uintptr_t)(vbase - s_base)) >> PAGESHIFT;
468 466 kpd->kp_anon_idx = anon_idx;
469 467 kpd->kp_anon = kpsd->kpsd_anon;
470 468
471 469 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
472 470 kpd, vbase, len, flags, 1);
473 471 } else {
474 472 kpd->kp_anon = NULL;
475 473 kpd->kp_anon_idx = 0;
476 474 }
477 475
478 476 /*
479 477 * Allocate page and anon resources for the virtual address range
480 478 * except the redzone
481 479 */
482 480 if (segkp_fromheap)
483 481 segkpindex = btop((uintptr_t)(vbase - kvseg.s_base));
484 482 for (i = 0, va = vbase; i < np; i++, va += PAGESIZE) {
485 483 page_t *pl[2];
486 484 struct vnode *vp;
487 485 anoff_t off;
488 486 int err;
489 487 page_t *pp = NULL;
490 488
491 489 /*
492 490 * Mark this page to be a segkp page in the bitmap.
493 491 */
494 492 if (segkp_fromheap) {
495 493 BT_ATOMIC_SET(segkp_bitmap, segkpindex);
496 494 segkpindex++;
497 495 }
498 496
499 497 /*
500 498 * If this page is the red zone page, we don't need swap
501 499 * space for it. Note that we skip over the code that
502 500 * establishes MMU mappings, so that the page remains
503 501 * invalid.
504 502 */
505 503 if ((flags & KPD_HASREDZONE) && KPD_REDZONE(kpd) == i)
506 504 continue;
507 505
508 506 if (kpd->kp_anon != NULL) {
509 507 struct anon *ap;
510 508
511 509 ASSERT(anon_get_ptr(kpd->kp_anon, anon_idx + i)
512 510 == NULL);
513 511 /*
514 512 * Determine the "vp" and "off" of the anon slot.
515 513 */
516 514 ap = anon_alloc(NULL, 0);
517 515 if (amp != NULL)
518 516 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
519 517 (void) anon_set_ptr(kpd->kp_anon, anon_idx + i,
520 518 ap, ANON_SLEEP);
521 519 if (amp != NULL)
522 520 ANON_LOCK_EXIT(&->a_rwlock);
523 521 swap_xlate(ap, &vp, &off);
524 522
525 523 /*
526 524 * Create a page with the specified identity. The
527 525 * page is returned with the "shared" lock held.
528 526 */
529 527 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
530 528 NULL, pl, PAGESIZE, seg, va, S_CREATE,
531 529 kcred, NULL);
532 530 if (err) {
533 531 /*
534 532 * XXX - This should not fail.
535 533 */
536 534 panic("segkp_get: no pages");
537 535 /*NOTREACHED*/
538 536 }
539 537 pp = pl[0];
540 538 } else {
541 539 ASSERT(page_exists(&kvp,
542 540 (u_offset_t)(uintptr_t)va) == NULL);
543 541
544 542 if ((pp = page_create_va(&kvp,
545 543 (u_offset_t)(uintptr_t)va, PAGESIZE,
546 544 (flags & KPD_NOWAIT ? 0 : PG_WAIT) | PG_EXCL |
547 545 PG_NORELOC, seg, va)) == NULL) {
548 546 /*
549 547 * Legitimize resource; then destroy it.
550 548 * Easier than trying to unwind here.
551 549 */
552 550 kpd->kp_flags = flags;
553 551 kpd->kp_base = vbase;
554 552 kpd->kp_len = len;
555 553 segkp_release_internal(seg, kpd, va - vbase);
556 554 return (NULL);
557 555 }
558 556 page_io_unlock(pp);
559 557 }
560 558
561 559 if (flags & KPD_ZERO)
562 560 pagezero(pp, 0, PAGESIZE);
563 561
564 562 /*
565 563 * Load and lock an MMU translation for the page.
566 564 */
567 565 hat_memload(seg->s_as->a_hat, va, pp, (PROT_READ|PROT_WRITE),
568 566 ((flags & KPD_LOCKED) ? HAT_LOAD_LOCK : HAT_LOAD));
569 567
570 568 /*
571 569 * Now, release lock on the page.
572 570 */
573 571 if (flags & KPD_LOCKED) {
574 572 /*
575 573 * Indicate to page_retire framework that this
576 574 * page can only be retired when it is freed.
577 575 */
578 576 PP_SETRAF(pp);
579 577 page_downgrade(pp);
580 578 } else
581 579 page_unlock(pp);
582 580 }
583 581
584 582 kpd->kp_flags = flags;
585 583 kpd->kp_base = vbase;
586 584 kpd->kp_len = len;
587 585 segkp_insert(seg, kpd);
588 586 *tkpd = kpd;
589 587 return (stom(kpd->kp_base, flags));
590 588 }
591 589
592 590 /*
593 591 * Release the resource to cache if the pool(designate by the cookie)
594 592 * has less than the maximum allowable. If inserted in cache,
595 593 * segkp_delete insures element is taken off of active list.
596 594 */
597 595 void
598 596 segkp_release(struct seg *seg, caddr_t vaddr)
599 597 {
600 598 struct segkp_cache *freelist;
601 599 struct segkp_data *kpd = NULL;
602 600
603 601 if ((kpd = segkp_find(seg, vaddr)) == NULL) {
604 602 panic("segkp_release: null kpd");
605 603 /*NOTREACHED*/
606 604 }
607 605
608 606 if (kpd->kp_cookie != -1) {
609 607 freelist = &segkp_cache[kpd->kp_cookie];
610 608 mutex_enter(&segkp_lock);
611 609 if (!segkp_indel && freelist->kpf_count < freelist->kpf_max) {
612 610 segkp_delete(seg, kpd);
613 611 kpd->kp_next = freelist->kpf_list;
614 612 freelist->kpf_list = kpd;
615 613 freelist->kpf_count++;
616 614 mutex_exit(&segkp_lock);
617 615 return;
618 616 } else {
619 617 mutex_exit(&segkp_lock);
620 618 kpd->kp_cookie = -1;
621 619 }
622 620 }
623 621 segkp_release_internal(seg, kpd, kpd->kp_len);
624 622 }
625 623
626 624 /*
627 625 * Free the entire resource. segkp_unlock gets called with the start of the
628 626 * mapped portion of the resource. The length is the size of the mapped
629 627 * portion
630 628 */
631 629 static void
632 630 segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len)
633 631 {
634 632 caddr_t va;
635 633 long i;
636 634 long redzone;
637 635 size_t np;
638 636 page_t *pp;
639 637 struct vnode *vp;
640 638 anoff_t off;
641 639 struct anon *ap;
642 640 pgcnt_t segkpindex;
643 641
644 642 ASSERT(kpd != NULL);
645 643 ASSERT((kpd->kp_flags & KPD_HASAMP) == 0 || kpd->kp_cookie == -1);
646 644 np = btop(len);
647 645
648 646 /* Remove from active hash list */
649 647 if (kpd->kp_cookie == -1) {
650 648 mutex_enter(&segkp_lock);
651 649 segkp_delete(seg, kpd);
652 650 mutex_exit(&segkp_lock);
653 651 }
654 652
655 653 /*
656 654 * Precompute redzone page index.
657 655 */
658 656 redzone = -1;
659 657 if (kpd->kp_flags & KPD_HASREDZONE)
660 658 redzone = KPD_REDZONE(kpd);
661 659
662 660
663 661 va = kpd->kp_base;
664 662
665 663 hat_unload(seg->s_as->a_hat, va, (np << PAGESHIFT),
666 664 ((kpd->kp_flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
667 665 /*
668 666 * Free up those anon resources that are quiescent.
669 667 */
670 668 if (segkp_fromheap)
671 669 segkpindex = btop((uintptr_t)(va - kvseg.s_base));
672 670 for (i = 0; i < np; i++, va += PAGESIZE) {
673 671
674 672 /*
675 673 * Clear the bit for this page from the bitmap.
676 674 */
677 675 if (segkp_fromheap) {
678 676 BT_ATOMIC_CLEAR(segkp_bitmap, segkpindex);
679 677 segkpindex++;
680 678 }
681 679
682 680 if (i == redzone)
683 681 continue;
684 682 if (kpd->kp_anon) {
685 683 /*
686 684 * Free up anon resources and destroy the
687 685 * associated pages.
688 686 *
689 687 * Release the lock if there is one. Have to get the
690 688 * page to do this, unfortunately.
691 689 */
692 690 if (kpd->kp_flags & KPD_LOCKED) {
693 691 ap = anon_get_ptr(kpd->kp_anon,
694 692 kpd->kp_anon_idx + i);
695 693 swap_xlate(ap, &vp, &off);
696 694 /* Find the shared-locked page. */
697 695 pp = page_find(vp, (u_offset_t)off);
698 696 if (pp == NULL) {
699 697 panic("segkp_release: "
700 698 "kp_anon: no page to unlock ");
701 699 /*NOTREACHED*/
702 700 }
703 701 if (PP_ISRAF(pp))
704 702 PP_CLRRAF(pp);
705 703
706 704 page_unlock(pp);
707 705 }
708 706 if ((kpd->kp_flags & KPD_HASAMP) == 0) {
709 707 anon_free(kpd->kp_anon, kpd->kp_anon_idx + i,
710 708 PAGESIZE);
711 709 anon_unresv_zone(PAGESIZE, NULL);
712 710 atomic_dec_ulong(&anon_segkp_pages_resv);
713 711 }
714 712 TRACE_5(TR_FAC_VM,
715 713 TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
716 714 kpd, va, PAGESIZE, 0, 0);
717 715 } else {
718 716 if (kpd->kp_flags & KPD_LOCKED) {
719 717 pp = page_find(&kvp, (u_offset_t)(uintptr_t)va);
720 718 if (pp == NULL) {
721 719 panic("segkp_release: "
722 720 "no page to unlock");
723 721 /*NOTREACHED*/
724 722 }
725 723 if (PP_ISRAF(pp))
726 724 PP_CLRRAF(pp);
727 725 /*
728 726 * We should just upgrade the lock here
729 727 * but there is no upgrade that waits.
730 728 */
731 729 page_unlock(pp);
732 730 }
733 731 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)va,
734 732 SE_EXCL);
735 733 if (pp != NULL)
736 734 page_destroy(pp, 0);
737 735 }
738 736 }
739 737
740 738 /* If locked, release physical memory reservation */
741 739 if (kpd->kp_flags & KPD_LOCKED) {
742 740 pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
743 741 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
744 742 atomic_add_long(&anon_segkp_pages_locked, -pages);
745 743 page_unresv(pages);
746 744 }
747 745
748 746 vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len);
749 747 kmem_free(kpd, sizeof (struct segkp_data));
750 748 }
751 749
752 750 /*
753 751 * segkp_map_red() will check the current frame pointer against the
754 752 * stack base. If the amount of stack remaining is questionable
755 753 * (less than red_minavail), then segkp_map_red() will map in the redzone
756 754 * and return 1. Otherwise, it will return 0. segkp_map_red() can
757 755 * _only_ be called when:
758 756 *
759 757 * - it is safe to sleep on page_create_va().
760 758 * - the caller is non-swappable.
761 759 *
762 760 * It is up to the caller to remember whether segkp_map_red() successfully
763 761 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
764 762 * time. Note that the caller must _remain_ non-swappable until after
765 763 * calling segkp_unmap_red().
766 764 *
767 765 * Currently, this routine is only called from pagefault() (which necessarily
768 766 * satisfies the above conditions).
769 767 */
770 768 #if defined(STACK_GROWTH_DOWN)
771 769 int
772 770 segkp_map_red(void)
773 771 {
774 772 uintptr_t fp = STACK_BIAS + (uintptr_t)getfp();
775 773 #ifndef _LP64
776 774 caddr_t stkbase;
777 775 #endif
778 776
779 777 ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
780 778
781 779 /*
782 780 * Optimize for the common case where we simply return.
783 781 */
784 782 if ((curthread->t_red_pp == NULL) &&
785 783 (fp - (uintptr_t)curthread->t_stkbase >= red_minavail))
786 784 return (0);
787 785
788 786 #if defined(_LP64)
789 787 /*
790 788 * XXX We probably need something better than this.
791 789 */
792 790 panic("kernel stack overflow");
793 791 /*NOTREACHED*/
794 792 #else /* _LP64 */
795 793 if (curthread->t_red_pp == NULL) {
796 794 page_t *red_pp;
797 795 struct seg kseg;
798 796
799 797 caddr_t red_va = (caddr_t)
800 798 (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) -
801 799 PAGESIZE);
802 800
803 801 ASSERT(page_exists(&kvp, (u_offset_t)(uintptr_t)red_va) ==
804 802 NULL);
805 803
806 804 /*
807 805 * Allocate the physical for the red page.
808 806 */
809 807 /*
810 808 * No PG_NORELOC here to avoid waits. Unlikely to get
811 809 * a relocate happening in the short time the page exists
812 810 * and it will be OK anyway.
813 811 */
814 812
815 813 kseg.s_as = &kas;
816 814 red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va,
817 815 PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va);
818 816 ASSERT(red_pp != NULL);
819 817
820 818 /*
821 819 * So we now have a page to jam into the redzone...
822 820 */
823 821 page_io_unlock(red_pp);
824 822
825 823 hat_memload(kas.a_hat, red_va, red_pp,
826 824 (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK);
827 825 page_downgrade(red_pp);
828 826
829 827 /*
830 828 * The page is left SE_SHARED locked so we can hold on to
831 829 * the page_t pointer.
832 830 */
833 831 curthread->t_red_pp = red_pp;
834 832
835 833 atomic_inc_32(&red_nmapped);
836 834 while (fp - (uintptr_t)curthread->t_stkbase < red_closest) {
837 835 (void) atomic_cas_32(&red_closest, red_closest,
838 836 (uint32_t)(fp - (uintptr_t)curthread->t_stkbase));
839 837 }
840 838 return (1);
841 839 }
842 840
843 841 stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase &
844 842 (uintptr_t)PAGEMASK) - PAGESIZE);
845 843
846 844 atomic_inc_32(&red_ndoubles);
847 845
848 846 if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) {
849 847 /*
850 848 * Oh boy. We're already deep within the mapped-in
851 849 * redzone page, and the caller is trying to prepare
852 850 * for a deep stack run. We're running without a
853 851 * redzone right now: if the caller plows off the
854 852 * end of the stack, it'll plow another thread or
855 853 * LWP structure. That situation could result in
856 854 * a very hard-to-debug panic, so, in the spirit of
857 855 * recording the name of one's killer in one's own
858 856 * blood, we're going to record hrestime and the calling
859 857 * thread.
860 858 */
861 859 red_deep_hires = hrestime.tv_nsec;
862 860 red_deep_thread = curthread;
863 861 }
864 862
865 863 /*
866 864 * If this is a DEBUG kernel, and we've run too deep for comfort, toss.
867 865 */
868 866 ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD);
869 867 return (0);
870 868 #endif /* _LP64 */
871 869 }
872 870
873 871 void
874 872 segkp_unmap_red(void)
875 873 {
876 874 page_t *pp;
877 875 caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase &
878 876 (uintptr_t)PAGEMASK) - PAGESIZE);
879 877
880 878 ASSERT(curthread->t_red_pp != NULL);
881 879 ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
882 880
883 881 /*
884 882 * Because we locked the mapping down, we can't simply rely
885 883 * on page_destroy() to clean everything up; we need to call
886 884 * hat_unload() to explicitly unlock the mapping resources.
887 885 */
888 886 hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK);
889 887
890 888 pp = curthread->t_red_pp;
891 889
892 890 ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va));
893 891
894 892 /*
895 893 * Need to upgrade the SE_SHARED lock to SE_EXCL.
896 894 */
897 895 if (!page_tryupgrade(pp)) {
898 896 /*
899 897 * As there is now wait for upgrade, release the
900 898 * SE_SHARED lock and wait for SE_EXCL.
901 899 */
902 900 page_unlock(pp);
903 901 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)red_va, SE_EXCL);
904 902 /* pp may be NULL here, hence the test below */
905 903 }
906 904
907 905 /*
908 906 * Destroy the page, with dontfree set to zero (i.e. free it).
909 907 */
910 908 if (pp != NULL)
911 909 page_destroy(pp, 0);
912 910 curthread->t_red_pp = NULL;
913 911 }
914 912 #else
915 913 #error Red stacks only supported with downwards stack growth.
916 914 #endif
917 915
918 916 /*
919 917 * Handle a fault on an address corresponding to one of the
920 918 * resources in the segkp segment.
921 919 */
922 920 faultcode_t
923 921 segkp_fault(
924 922 struct hat *hat,
925 923 struct seg *seg,
926 924 caddr_t vaddr,
927 925 size_t len,
928 926 enum fault_type type,
929 927 enum seg_rw rw)
930 928 {
931 929 struct segkp_data *kpd = NULL;
932 930 int err;
933 931
934 932 ASSERT(seg->s_as == &kas && RW_READ_HELD(&seg->s_as->a_lock));
935 933
936 934 /*
937 935 * Sanity checks.
938 936 */
939 937 if (type == F_PROT) {
940 938 panic("segkp_fault: unexpected F_PROT fault");
941 939 /*NOTREACHED*/
942 940 }
943 941
944 942 if ((kpd = segkp_find(seg, vaddr)) == NULL)
945 943 return (FC_NOMAP);
946 944
947 945 mutex_enter(&kpd->kp_lock);
948 946
949 947 if (type == F_SOFTLOCK) {
950 948 ASSERT(!(kpd->kp_flags & KPD_LOCKED));
951 949 /*
952 950 * The F_SOFTLOCK case has more stringent
953 951 * range requirements: the given range must exactly coincide
954 952 * with the resource's mapped portion. Note reference to
955 953 * redzone is handled since vaddr would not equal base
956 954 */
957 955 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
958 956 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
959 957 mutex_exit(&kpd->kp_lock);
960 958 return (FC_MAKE_ERR(EFAULT));
961 959 }
962 960
963 961 if ((err = segkp_load(hat, seg, vaddr, len, kpd, KPD_LOCKED))) {
964 962 mutex_exit(&kpd->kp_lock);
965 963 return (FC_MAKE_ERR(err));
966 964 }
967 965 kpd->kp_flags |= KPD_LOCKED;
968 966 mutex_exit(&kpd->kp_lock);
969 967 return (0);
970 968 }
971 969
972 970 if (type == F_INVAL) {
973 971 ASSERT(!(kpd->kp_flags & KPD_NO_ANON));
974 972
975 973 /*
976 974 * Check if we touched the redzone. Somewhat optimistic
977 975 * here if we are touching the redzone of our own stack
978 976 * since we wouldn't have a stack to get this far...
979 977 */
980 978 if ((kpd->kp_flags & KPD_HASREDZONE) &&
981 979 btop((uintptr_t)(vaddr - kpd->kp_base)) == KPD_REDZONE(kpd))
982 980 panic("segkp_fault: accessing redzone");
983 981
984 982 /*
985 983 * This fault may occur while the page is being F_SOFTLOCK'ed.
986 984 * Return since a 2nd segkp_load is unnecessary and also would
987 985 * result in the page being locked twice and eventually
988 986 * hang the thread_reaper thread.
989 987 */
990 988 if (kpd->kp_flags & KPD_LOCKED) {
991 989 mutex_exit(&kpd->kp_lock);
992 990 return (0);
993 991 }
994 992
995 993 err = segkp_load(hat, seg, vaddr, len, kpd, kpd->kp_flags);
996 994 mutex_exit(&kpd->kp_lock);
997 995 return (err ? FC_MAKE_ERR(err) : 0);
998 996 }
999 997
1000 998 if (type == F_SOFTUNLOCK) {
1001 999 uint_t flags;
1002 1000
1003 1001 /*
1004 1002 * Make sure the addr is LOCKED and it has anon backing
1005 1003 * before unlocking
1006 1004 */
1007 1005 if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) != KPD_LOCKED) {
1008 1006 panic("segkp_fault: bad unlock");
1009 1007 /*NOTREACHED*/
1010 1008 }
1011 1009
1012 1010 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
1013 1011 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
1014 1012 panic("segkp_fault: bad range");
1015 1013 /*NOTREACHED*/
1016 1014 }
1017 1015
1018 1016 if (rw == S_WRITE)
1019 1017 flags = kpd->kp_flags | KPD_WRITEDIRTY;
1020 1018 else
1021 1019 flags = kpd->kp_flags;
1022 1020 err = segkp_unlock(hat, seg, vaddr, len, kpd, flags);
1023 1021 kpd->kp_flags &= ~KPD_LOCKED;
1024 1022 mutex_exit(&kpd->kp_lock);
1025 1023 return (err ? FC_MAKE_ERR(err) : 0);
1026 1024 }
1027 1025 mutex_exit(&kpd->kp_lock);
1028 1026 panic("segkp_fault: bogus fault type: %d\n", type);
1029 1027 /*NOTREACHED*/
1030 1028 }
1031 1029
1032 1030 /*
1033 1031 * Check that the given protections suffice over the range specified by
1034 1032 * vaddr and len. For this segment type, the only issue is whether or
1035 1033 * not the range lies completely within the mapped part of an allocated
1036 1034 * resource.
1037 1035 */
1038 1036 /* ARGSUSED */
1039 1037 static int
1040 1038 segkp_checkprot(struct seg *seg, caddr_t vaddr, size_t len, uint_t prot)
1041 1039 {
1042 1040 struct segkp_data *kpd = NULL;
1043 1041 caddr_t mbase;
1044 1042 size_t mlen;
1045 1043
1046 1044 if ((kpd = segkp_find(seg, vaddr)) == NULL)
1047 1045 return (EACCES);
1048 1046
1049 1047 mutex_enter(&kpd->kp_lock);
1050 1048 mbase = stom(kpd->kp_base, kpd->kp_flags);
1051 1049 mlen = SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags);
1052 1050 if (len > mlen || vaddr < mbase ||
1053 1051 ((vaddr + len) > (mbase + mlen))) {
1054 1052 mutex_exit(&kpd->kp_lock);
1055 1053 return (EACCES);
1056 1054 }
1057 1055 mutex_exit(&kpd->kp_lock);
1058 1056 return (0);
1059 1057 }
1060 1058
1061 1059
1062 1060 /*
1063 1061 * Check to see if it makes sense to do kluster/read ahead to
1064 1062 * addr + delta relative to the mapping at addr. We assume here
1065 1063 * that delta is a signed PAGESIZE'd multiple (which can be negative).
1066 1064 *
1067 1065 * For seg_u we always "approve" of this action from our standpoint.
1068 1066 */
1069 1067 /*ARGSUSED*/
1070 1068 static int
1071 1069 segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
1072 1070 {
1073 1071 return (0);
1074 1072 }
1075 1073
1076 1074 /*
1077 1075 * Load and possibly lock intra-slot resources in the range given by
1078 1076 * vaddr and len.
1079 1077 */
1080 1078 static int
1081 1079 segkp_load(
1082 1080 struct hat *hat,
1083 1081 struct seg *seg,
1084 1082 caddr_t vaddr,
1085 1083 size_t len,
1086 1084 struct segkp_data *kpd,
1087 1085 uint_t flags)
1088 1086 {
1089 1087 caddr_t va;
1090 1088 caddr_t vlim;
1091 1089 ulong_t i;
1092 1090 uint_t lock;
1093 1091
1094 1092 ASSERT(MUTEX_HELD(&kpd->kp_lock));
1095 1093
1096 1094 len = P2ROUNDUP(len, PAGESIZE);
1097 1095
1098 1096 /* If locking, reserve physical memory */
1099 1097 if (flags & KPD_LOCKED) {
1100 1098 pgcnt_t pages = btop(len);
1101 1099 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
1102 1100 atomic_add_long(&anon_segkp_pages_locked, pages);
1103 1101 (void) page_resv(pages, KM_SLEEP);
1104 1102 }
1105 1103
1106 1104 /*
1107 1105 * Loop through the pages in the given range.
1108 1106 */
1109 1107 va = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK);
1110 1108 vaddr = va;
1111 1109 vlim = va + len;
1112 1110 lock = flags & KPD_LOCKED;
1113 1111 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
1114 1112 for (; va < vlim; va += PAGESIZE, i++) {
1115 1113 page_t *pl[2]; /* second element NULL terminator */
1116 1114 struct vnode *vp;
1117 1115 anoff_t off;
1118 1116 int err;
1119 1117 struct anon *ap;
1120 1118
1121 1119 /*
1122 1120 * Summon the page. If it's not resident, arrange
1123 1121 * for synchronous i/o to pull it in.
1124 1122 */
1125 1123 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
1126 1124 swap_xlate(ap, &vp, &off);
1127 1125
1128 1126 /*
1129 1127 * The returned page list will have exactly one entry,
1130 1128 * which is returned to us already kept.
1131 1129 */
1132 1130 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, NULL,
1133 1131 pl, PAGESIZE, seg, va, S_READ, kcred, NULL);
1134 1132
1135 1133 if (err) {
1136 1134 /*
1137 1135 * Back out of what we've done so far.
1138 1136 */
1139 1137 (void) segkp_unlock(hat, seg, vaddr,
1140 1138 (va - vaddr), kpd, flags);
1141 1139 return (err);
1142 1140 }
1143 1141
1144 1142 /*
1145 1143 * Load an MMU translation for the page.
1146 1144 */
1147 1145 hat_memload(hat, va, pl[0], (PROT_READ|PROT_WRITE),
1148 1146 lock ? HAT_LOAD_LOCK : HAT_LOAD);
1149 1147
1150 1148 if (!lock) {
1151 1149 /*
1152 1150 * Now, release "shared" lock on the page.
1153 1151 */
1154 1152 page_unlock(pl[0]);
1155 1153 }
1156 1154 }
1157 1155 return (0);
1158 1156 }
1159 1157
1160 1158 /*
1161 1159 * At the very least unload the mmu-translations and unlock the range if locked
1162 1160 * Can be called with the following flag value KPD_WRITEDIRTY which specifies
1163 1161 * any dirty pages should be written to disk.
1164 1162 */
1165 1163 static int
1166 1164 segkp_unlock(
1167 1165 struct hat *hat,
1168 1166 struct seg *seg,
1169 1167 caddr_t vaddr,
1170 1168 size_t len,
1171 1169 struct segkp_data *kpd,
1172 1170 uint_t flags)
1173 1171 {
1174 1172 caddr_t va;
1175 1173 caddr_t vlim;
1176 1174 ulong_t i;
1177 1175 struct page *pp;
1178 1176 struct vnode *vp;
1179 1177 anoff_t off;
1180 1178 struct anon *ap;
1181 1179
1182 1180 #ifdef lint
1183 1181 seg = seg;
1184 1182 #endif /* lint */
1185 1183
1186 1184 ASSERT(MUTEX_HELD(&kpd->kp_lock));
1187 1185
1188 1186 /*
1189 1187 * Loop through the pages in the given range. It is assumed
1190 1188 * segkp_unlock is called with page aligned base
1191 1189 */
1192 1190 va = vaddr;
1193 1191 vlim = va + len;
1194 1192 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
1195 1193 hat_unload(hat, va, len,
1196 1194 ((flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
1197 1195 for (; va < vlim; va += PAGESIZE, i++) {
1198 1196 /*
1199 1197 * Find the page associated with this part of the
1200 1198 * slot, tracking it down through its associated swap
1201 1199 * space.
1202 1200 */
1203 1201 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
1204 1202 swap_xlate(ap, &vp, &off);
1205 1203
1206 1204 if (flags & KPD_LOCKED) {
1207 1205 if ((pp = page_find(vp, off)) == NULL) {
1208 1206 if (flags & KPD_LOCKED) {
1209 1207 panic("segkp_softunlock: missing page");
1210 1208 /*NOTREACHED*/
1211 1209 }
1212 1210 }
1213 1211 } else {
1214 1212 /*
1215 1213 * Nothing to do if the slot is not locked and the
1216 1214 * page doesn't exist.
1217 1215 */
1218 1216 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL)
1219 1217 continue;
1220 1218 }
1221 1219
1222 1220 /*
1223 1221 * If the page doesn't have any translations, is
1224 1222 * dirty and not being shared, then push it out
1225 1223 * asynchronously and avoid waiting for the
1226 1224 * pageout daemon to do it for us.
1227 1225 *
1228 1226 * XXX - Do we really need to get the "exclusive"
1229 1227 * lock via an upgrade?
1230 1228 */
1231 1229 if ((flags & KPD_WRITEDIRTY) && !hat_page_is_mapped(pp) &&
1232 1230 hat_ismod(pp) && page_tryupgrade(pp)) {
1233 1231 /*
1234 1232 * Hold the vnode before releasing the page lock to
1235 1233 * prevent it from being freed and re-used by some
1236 1234 * other thread.
1237 1235 */
1238 1236 VN_HOLD(vp);
1239 1237 page_unlock(pp);
1240 1238
1241 1239 /*
1242 1240 * Want most powerful credentials we can get so
1243 1241 * use kcred.
1244 1242 */
1245 1243 (void) VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
1246 1244 B_ASYNC | B_FREE, kcred, NULL);
1247 1245 VN_RELE(vp);
1248 1246 } else {
1249 1247 page_unlock(pp);
1250 1248 }
1251 1249 }
1252 1250
1253 1251 /* If unlocking, release physical memory */
1254 1252 if (flags & KPD_LOCKED) {
1255 1253 pgcnt_t pages = btopr(len);
1256 1254 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
1257 1255 atomic_add_long(&anon_segkp_pages_locked, -pages);
1258 1256 page_unresv(pages);
1259 1257 }
1260 1258 return (0);
1261 1259 }
1262 1260
1263 1261 /*
1264 1262 * Insert the kpd in the hash table.
1265 1263 */
1266 1264 static void
1267 1265 segkp_insert(struct seg *seg, struct segkp_data *kpd)
1268 1266 {
1269 1267 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1270 1268 int index;
1271 1269
1272 1270 /*
1273 1271 * Insert the kpd based on the address that will be returned
1274 1272 * via segkp_release.
1275 1273 */
1276 1274 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
1277 1275 mutex_enter(&segkp_lock);
1278 1276 kpd->kp_next = kpsd->kpsd_hash[index];
1279 1277 kpsd->kpsd_hash[index] = kpd;
1280 1278 mutex_exit(&segkp_lock);
1281 1279 }
1282 1280
1283 1281 /*
1284 1282 * Remove kpd from the hash table.
1285 1283 */
1286 1284 static void
1287 1285 segkp_delete(struct seg *seg, struct segkp_data *kpd)
1288 1286 {
1289 1287 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1290 1288 struct segkp_data **kpp;
1291 1289 int index;
1292 1290
1293 1291 ASSERT(MUTEX_HELD(&segkp_lock));
1294 1292
1295 1293 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
1296 1294 for (kpp = &kpsd->kpsd_hash[index];
1297 1295 *kpp != NULL; kpp = &((*kpp)->kp_next)) {
1298 1296 if (*kpp == kpd) {
1299 1297 *kpp = kpd->kp_next;
1300 1298 return;
1301 1299 }
1302 1300 }
1303 1301 panic("segkp_delete: unable to find element to delete");
1304 1302 /*NOTREACHED*/
1305 1303 }
1306 1304
1307 1305 /*
1308 1306 * Find the kpd associated with a vaddr.
1309 1307 *
1310 1308 * Most of the callers of segkp_find will pass the vaddr that
1311 1309 * hashes to the desired index, but there are cases where
1312 1310 * this is not true in which case we have to (potentially) scan
1313 1311 * the whole table looking for it. This should be very rare
1314 1312 * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the
1315 1313 * middle of the segkp_data region).
1316 1314 */
1317 1315 static struct segkp_data *
1318 1316 segkp_find(struct seg *seg, caddr_t vaddr)
1319 1317 {
1320 1318 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1321 1319 struct segkp_data *kpd;
1322 1320 int i;
1323 1321 int stop;
1324 1322
1325 1323 i = stop = SEGKP_HASH(vaddr);
1326 1324 mutex_enter(&segkp_lock);
1327 1325 do {
1328 1326 for (kpd = kpsd->kpsd_hash[i]; kpd != NULL;
1329 1327 kpd = kpd->kp_next) {
1330 1328 if (vaddr >= kpd->kp_base &&
1331 1329 vaddr < kpd->kp_base + kpd->kp_len) {
1332 1330 mutex_exit(&segkp_lock);
1333 1331 return (kpd);
1334 1332 }
1335 1333 }
1336 1334 if (--i < 0)
1337 1335 i = SEGKP_HASHSZ - 1; /* Wrap */
1338 1336 } while (i != stop);
1339 1337 mutex_exit(&segkp_lock);
1340 1338 return (NULL); /* Not found */
1341 1339 }
1342 1340
1343 1341 /*
1344 1342 * returns size of swappable area.
1345 1343 */
1346 1344 size_t
1347 1345 swapsize(caddr_t v)
1348 1346 {
1349 1347 struct segkp_data *kpd;
1350 1348
1351 1349 if ((kpd = segkp_find(segkp, v)) != NULL)
1352 1350 return (SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
1353 1351 else
1354 1352 return (NULL);
1355 1353 }
1356 1354
1357 1355 /*
1358 1356 * Dump out all the active segkp pages
1359 1357 */
1360 1358 static void
1361 1359 segkp_dump(struct seg *seg)
1362 1360 {
1363 1361 int i;
1364 1362 struct segkp_data *kpd;
1365 1363 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1366 1364
1367 1365 for (i = 0; i < SEGKP_HASHSZ; i++) {
1368 1366 for (kpd = kpsd->kpsd_hash[i];
1369 1367 kpd != NULL; kpd = kpd->kp_next) {
1370 1368 pfn_t pfn;
1371 1369 caddr_t addr;
1372 1370 caddr_t eaddr;
1373 1371
1374 1372 addr = kpd->kp_base;
1375 1373 eaddr = addr + kpd->kp_len;
1376 1374 while (addr < eaddr) {
1377 1375 ASSERT(seg->s_as == &kas);
1378 1376 pfn = hat_getpfnum(seg->s_as->a_hat, addr);
1379 1377 if (pfn != PFN_INVALID)
1380 1378 dump_addpage(seg->s_as, addr, pfn);
1381 1379 addr += PAGESIZE;
1382 1380 dump_timeleft = dump_timeout;
1383 1381 }
↓ open down ↓ |
1207 lines elided |
↑ open up ↑ |
1384 1382 }
1385 1383 }
1386 1384 }
1387 1385
1388 1386 /*ARGSUSED*/
1389 1387 static int
1390 1388 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
1391 1389 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1392 1390 {
1393 1391 return (ENOTSUP);
1394 -}
1395 -
1396 -/*ARGSUSED*/
1397 -static int
1398 -segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1399 -{
1400 - return (ENODEV);
1401 1392 }
1402 1393
1403 1394 #include <sys/mem_config.h>
1404 1395
1405 1396 /*ARGSUSED*/
1406 1397 static void
1407 1398 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1408 1399 {}
1409 1400
1410 1401 /*
1411 1402 * During memory delete, turn off caches so that pages are not held.
1412 1403 * A better solution may be to unlock the pages while they are
1413 1404 * in the cache so that they may be collected naturally.
1414 1405 */
1415 1406
1416 1407 /*ARGSUSED*/
1417 1408 static int
1418 1409 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1419 1410 {
1420 1411 atomic_inc_32(&segkp_indel);
1421 1412 segkp_cache_free();
1422 1413 return (0);
1423 1414 }
1424 1415
1425 1416 /*ARGSUSED*/
1426 1417 static void
1427 1418 segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
1428 1419 {
1429 1420 atomic_dec_32(&segkp_indel);
1430 1421 }
1431 1422
1432 1423 static kphysm_setup_vector_t segkp_mem_config_vec = {
1433 1424 KPHYSM_SETUP_VECTOR_VERSION,
1434 1425 segkp_mem_config_post_add,
1435 1426 segkp_mem_config_pre_del,
1436 1427 segkp_mem_config_post_del,
1437 1428 };
1438 1429
1439 1430 static void
1440 1431 segkpinit_mem_config(struct seg *seg)
1441 1432 {
1442 1433 int ret;
1443 1434
1444 1435 ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg);
1445 1436 ASSERT(ret == 0);
1446 1437 }
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX