Print this page
use NULL capable segop as a shorthand for no-capabilities
Instead of forcing every segment driver to implement a dummy "return 0"
function, handle NULL capable segop function pointer as "no copabilities
supported" shorthand.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_kp.c
+++ new/usr/src/uts/common/vm/seg_kp.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 26 /* All Rights Reserved */
27 27
28 28 /*
29 29 * Portions of this source code were derived from Berkeley 4.3 BSD
30 30 * under license from the Regents of the University of California.
31 31 */
32 32
33 33 /*
34 34 * segkp is a segment driver that administers the allocation and deallocation
35 35 * of pageable variable size chunks of kernel virtual address space. Each
36 36 * allocated resource is page-aligned.
37 37 *
38 38 * The user may specify whether the resource should be initialized to 0,
39 39 * include a redzone, or locked in memory.
40 40 */
41 41
42 42 #include <sys/types.h>
43 43 #include <sys/t_lock.h>
44 44 #include <sys/thread.h>
45 45 #include <sys/param.h>
46 46 #include <sys/errno.h>
47 47 #include <sys/sysmacros.h>
48 48 #include <sys/systm.h>
49 49 #include <sys/buf.h>
50 50 #include <sys/mman.h>
51 51 #include <sys/vnode.h>
52 52 #include <sys/cmn_err.h>
53 53 #include <sys/swap.h>
54 54 #include <sys/tuneable.h>
55 55 #include <sys/kmem.h>
56 56 #include <sys/vmem.h>
57 57 #include <sys/cred.h>
58 58 #include <sys/dumphdr.h>
59 59 #include <sys/debug.h>
60 60 #include <sys/vtrace.h>
61 61 #include <sys/stack.h>
62 62 #include <sys/atomic.h>
63 63 #include <sys/archsystm.h>
64 64 #include <sys/lgrp.h>
65 65
66 66 #include <vm/as.h>
67 67 #include <vm/seg.h>
68 68 #include <vm/seg_kp.h>
69 69 #include <vm/seg_kmem.h>
70 70 #include <vm/anon.h>
71 71 #include <vm/page.h>
72 72 #include <vm/hat.h>
73 73 #include <sys/bitmap.h>
74 74
75 75 /*
76 76 * Private seg op routines
77 77 */
78 78 static void segkp_dump(struct seg *seg);
79 79 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
80 80 uint_t prot);
81 81 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
82 82 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
83 83 struct page ***page, enum lock_type type,
84 84 enum seg_rw rw);
85 85 static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
86 86 static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
↓ open down ↓ |
86 lines elided |
↑ open up ↑ |
87 87 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
88 88 struct segkp_data **tkpd, struct anon_map *amp);
89 89 static void segkp_release_internal(struct seg *seg,
90 90 struct segkp_data *kpd, size_t len);
91 91 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
92 92 size_t len, struct segkp_data *kpd, uint_t flags);
93 93 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
94 94 size_t len, struct segkp_data *kpd, uint_t flags);
95 95 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
96 96 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
97 -static int segkp_capable(struct seg *seg, segcapability_t capability);
98 97
99 98 /*
100 99 * Lock used to protect the hash table(s) and caches.
101 100 */
102 101 static kmutex_t segkp_lock;
103 102
104 103 /*
105 104 * The segkp caches
106 105 */
107 106 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
108 107
109 108 /*
110 109 * When there are fewer than red_minavail bytes left on the stack,
111 110 * segkp_map_red() will map in the redzone (if called). 5000 seems
112 111 * to work reasonably well...
113 112 */
114 113 long red_minavail = 5000;
115 114
116 115 /*
117 116 * will be set to 1 for 32 bit x86 systems only, in startup.c
118 117 */
119 118 int segkp_fromheap = 0;
120 119 ulong_t *segkp_bitmap;
121 120
122 121 /*
123 122 * If segkp_map_red() is called with the redzone already mapped and
124 123 * with less than RED_DEEP_THRESHOLD bytes available on the stack,
125 124 * then the stack situation has become quite serious; if much more stack
126 125 * is consumed, we have the potential of scrogging the next thread/LWP
127 126 * structure. To help debug the "can't happen" panics which may
128 127 * result from this condition, we record hrestime and the calling thread
129 128 * in red_deep_hires and red_deep_thread respectively.
130 129 */
131 130 #define RED_DEEP_THRESHOLD 2000
132 131
133 132 hrtime_t red_deep_hires;
134 133 kthread_t *red_deep_thread;
135 134
136 135 uint32_t red_nmapped;
137 136 uint32_t red_closest = UINT_MAX;
138 137 uint32_t red_ndoubles;
139 138
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
140 139 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
141 140 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
142 141
143 142 static struct seg_ops segkp_ops = {
144 143 .fault = segkp_fault,
145 144 .checkprot = segkp_checkprot,
146 145 .kluster = segkp_kluster,
147 146 .dump = segkp_dump,
148 147 .pagelock = segkp_pagelock,
149 148 .getmemid = segkp_getmemid,
150 - .capable = segkp_capable,
151 149 };
152 150
153 151
154 152 static void segkpinit_mem_config(struct seg *);
155 153
156 154 static uint32_t segkp_indel;
157 155
158 156 /*
159 157 * Allocate the segment specific private data struct and fill it in
160 158 * with the per kp segment mutex, anon ptr. array and hash table.
161 159 */
162 160 int
163 161 segkp_create(struct seg *seg)
164 162 {
165 163 struct segkp_segdata *kpsd;
166 164 size_t np;
167 165
168 166 ASSERT(seg != NULL && seg->s_as == &kas);
169 167 ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));
170 168
171 169 if (seg->s_size & PAGEOFFSET) {
172 170 panic("Bad segkp size");
173 171 /*NOTREACHED*/
174 172 }
175 173
176 174 kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP);
177 175
178 176 /*
179 177 * Allocate the virtual memory for segkp and initialize it
180 178 */
181 179 if (segkp_fromheap) {
182 180 np = btop(kvseg.s_size);
183 181 segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP);
184 182 kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE,
185 183 vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP);
186 184 } else {
187 185 segkp_bitmap = NULL;
188 186 np = btop(seg->s_size);
189 187 kpsd->kpsd_arena = vmem_create("segkp", seg->s_base,
190 188 seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE,
191 189 VM_SLEEP);
192 190 }
193 191
194 192 kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE);
195 193
196 194 kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *),
197 195 KM_SLEEP);
198 196 seg->s_data = (void *)kpsd;
199 197 seg->s_ops = &segkp_ops;
200 198 segkpinit_mem_config(seg);
201 199 return (0);
202 200 }
203 201
204 202
205 203 /*
206 204 * Find a free 'freelist' and initialize it with the appropriate attributes
207 205 */
208 206 void *
209 207 segkp_cache_init(struct seg *seg, int maxsize, size_t len, uint_t flags)
210 208 {
211 209 int i;
212 210
213 211 if ((flags & KPD_NO_ANON) && !(flags & KPD_LOCKED))
214 212 return ((void *)-1);
215 213
216 214 mutex_enter(&segkp_lock);
217 215 for (i = 0; i < SEGKP_MAX_CACHE; i++) {
218 216 if (segkp_cache[i].kpf_inuse)
219 217 continue;
220 218 segkp_cache[i].kpf_inuse = 1;
221 219 segkp_cache[i].kpf_max = maxsize;
222 220 segkp_cache[i].kpf_flags = flags;
223 221 segkp_cache[i].kpf_seg = seg;
224 222 segkp_cache[i].kpf_len = len;
225 223 mutex_exit(&segkp_lock);
226 224 return ((void *)(uintptr_t)i);
227 225 }
228 226 mutex_exit(&segkp_lock);
229 227 return ((void *)-1);
230 228 }
231 229
232 230 /*
233 231 * Free all the cache resources.
234 232 */
235 233 void
236 234 segkp_cache_free(void)
237 235 {
238 236 struct segkp_data *kpd;
239 237 struct seg *seg;
240 238 int i;
241 239
242 240 mutex_enter(&segkp_lock);
243 241 for (i = 0; i < SEGKP_MAX_CACHE; i++) {
244 242 if (!segkp_cache[i].kpf_inuse)
245 243 continue;
246 244 /*
247 245 * Disconnect the freelist and process each element
248 246 */
249 247 kpd = segkp_cache[i].kpf_list;
250 248 seg = segkp_cache[i].kpf_seg;
251 249 segkp_cache[i].kpf_list = NULL;
252 250 segkp_cache[i].kpf_count = 0;
253 251 mutex_exit(&segkp_lock);
254 252
255 253 while (kpd != NULL) {
256 254 struct segkp_data *next;
257 255
258 256 next = kpd->kp_next;
259 257 segkp_release_internal(seg, kpd, kpd->kp_len);
260 258 kpd = next;
261 259 }
262 260 mutex_enter(&segkp_lock);
263 261 }
264 262 mutex_exit(&segkp_lock);
265 263 }
266 264
267 265 /*
268 266 * There are 2 entries into segkp_get_internal. The first includes a cookie
269 267 * used to access a pool of cached segkp resources. The second does not
270 268 * use the cache.
271 269 */
272 270 caddr_t
273 271 segkp_get(struct seg *seg, size_t len, uint_t flags)
274 272 {
275 273 struct segkp_data *kpd = NULL;
276 274
277 275 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
278 276 kpd->kp_cookie = -1;
279 277 return (stom(kpd->kp_base, flags));
280 278 }
281 279 return (NULL);
282 280 }
283 281
284 282 /*
285 283 * Return a 'cached' segkp address
286 284 */
287 285 caddr_t
288 286 segkp_cache_get(void *cookie)
289 287 {
290 288 struct segkp_cache *freelist = NULL;
291 289 struct segkp_data *kpd = NULL;
292 290 int index = (int)(uintptr_t)cookie;
293 291 struct seg *seg;
294 292 size_t len;
295 293 uint_t flags;
296 294
297 295 if (index < 0 || index >= SEGKP_MAX_CACHE)
298 296 return (NULL);
299 297 freelist = &segkp_cache[index];
300 298
301 299 mutex_enter(&segkp_lock);
302 300 seg = freelist->kpf_seg;
303 301 flags = freelist->kpf_flags;
304 302 if (freelist->kpf_list != NULL) {
305 303 kpd = freelist->kpf_list;
306 304 freelist->kpf_list = kpd->kp_next;
307 305 freelist->kpf_count--;
308 306 mutex_exit(&segkp_lock);
309 307 kpd->kp_next = NULL;
310 308 segkp_insert(seg, kpd);
311 309 return (stom(kpd->kp_base, flags));
312 310 }
313 311 len = freelist->kpf_len;
314 312 mutex_exit(&segkp_lock);
315 313 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
316 314 kpd->kp_cookie = index;
317 315 return (stom(kpd->kp_base, flags));
318 316 }
319 317 return (NULL);
320 318 }
321 319
322 320 caddr_t
323 321 segkp_get_withanonmap(
324 322 struct seg *seg,
325 323 size_t len,
326 324 uint_t flags,
327 325 struct anon_map *amp)
328 326 {
329 327 struct segkp_data *kpd = NULL;
330 328
331 329 ASSERT(amp != NULL);
332 330 flags |= KPD_HASAMP;
333 331 if (segkp_get_internal(seg, len, flags, &kpd, amp) != NULL) {
334 332 kpd->kp_cookie = -1;
335 333 return (stom(kpd->kp_base, flags));
336 334 }
337 335 return (NULL);
338 336 }
339 337
340 338 /*
341 339 * This does the real work of segkp allocation.
342 340 * Return to client base addr. len must be page-aligned. A null value is
343 341 * returned if there are no more vm resources (e.g. pages, swap). The len
344 342 * and base recorded in the private data structure include the redzone
345 343 * and the redzone length (if applicable). If the user requests a redzone
346 344 * either the first or last page is left unmapped depending whether stacks
347 345 * grow to low or high memory.
348 346 *
349 347 * The client may also specify a no-wait flag. If that is set then the
350 348 * request will choose a non-blocking path when requesting resources.
351 349 * The default is make the client wait.
352 350 */
353 351 static caddr_t
354 352 segkp_get_internal(
355 353 struct seg *seg,
356 354 size_t len,
357 355 uint_t flags,
358 356 struct segkp_data **tkpd,
359 357 struct anon_map *amp)
360 358 {
361 359 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
362 360 struct segkp_data *kpd;
363 361 caddr_t vbase = NULL; /* always first virtual, may not be mapped */
364 362 pgcnt_t np = 0; /* number of pages in the resource */
365 363 pgcnt_t segkpindex;
366 364 long i;
367 365 caddr_t va;
368 366 pgcnt_t pages = 0;
369 367 ulong_t anon_idx = 0;
370 368 int kmflag = (flags & KPD_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
371 369 caddr_t s_base = (segkp_fromheap) ? kvseg.s_base : seg->s_base;
372 370
373 371 if (len & PAGEOFFSET) {
374 372 panic("segkp_get: len is not page-aligned");
375 373 /*NOTREACHED*/
376 374 }
377 375
378 376 ASSERT(((flags & KPD_HASAMP) == 0) == (amp == NULL));
379 377
380 378 /* Only allow KPD_NO_ANON if we are going to lock it down */
381 379 if ((flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON)
382 380 return (NULL);
383 381
384 382 if ((kpd = kmem_zalloc(sizeof (struct segkp_data), kmflag)) == NULL)
385 383 return (NULL);
386 384 /*
387 385 * Fix up the len to reflect the REDZONE if applicable
388 386 */
389 387 if (flags & KPD_HASREDZONE)
390 388 len += PAGESIZE;
391 389 np = btop(len);
392 390
393 391 vbase = vmem_alloc(SEGKP_VMEM(seg), len, kmflag | VM_BESTFIT);
394 392 if (vbase == NULL) {
395 393 kmem_free(kpd, sizeof (struct segkp_data));
396 394 return (NULL);
397 395 }
398 396
399 397 /* If locking, reserve physical memory */
400 398 if (flags & KPD_LOCKED) {
401 399 pages = btop(SEGKP_MAPLEN(len, flags));
402 400 if (page_resv(pages, kmflag) == 0) {
403 401 vmem_free(SEGKP_VMEM(seg), vbase, len);
404 402 kmem_free(kpd, sizeof (struct segkp_data));
405 403 return (NULL);
406 404 }
407 405 if ((flags & KPD_NO_ANON) == 0)
408 406 atomic_add_long(&anon_segkp_pages_locked, pages);
409 407 }
410 408
411 409 /*
412 410 * Reserve sufficient swap space for this vm resource. We'll
413 411 * actually allocate it in the loop below, but reserving it
414 412 * here allows us to back out more gracefully than if we
415 413 * had an allocation failure in the body of the loop.
416 414 *
417 415 * Note that we don't need swap space for the red zone page.
418 416 */
419 417 if (amp != NULL) {
420 418 /*
421 419 * The swap reservation has been done, if required, and the
422 420 * anon_hdr is separate.
423 421 */
424 422 anon_idx = 0;
425 423 kpd->kp_anon_idx = anon_idx;
426 424 kpd->kp_anon = amp->ahp;
427 425
428 426 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
429 427 kpd, vbase, len, flags, 1);
430 428
431 429 } else if ((flags & KPD_NO_ANON) == 0) {
432 430 if (anon_resv_zone(SEGKP_MAPLEN(len, flags), NULL) == 0) {
433 431 if (flags & KPD_LOCKED) {
434 432 atomic_add_long(&anon_segkp_pages_locked,
435 433 -pages);
436 434 page_unresv(pages);
437 435 }
438 436 vmem_free(SEGKP_VMEM(seg), vbase, len);
439 437 kmem_free(kpd, sizeof (struct segkp_data));
440 438 return (NULL);
441 439 }
442 440 atomic_add_long(&anon_segkp_pages_resv,
443 441 btop(SEGKP_MAPLEN(len, flags)));
444 442 anon_idx = ((uintptr_t)(vbase - s_base)) >> PAGESHIFT;
445 443 kpd->kp_anon_idx = anon_idx;
446 444 kpd->kp_anon = kpsd->kpsd_anon;
447 445
448 446 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
449 447 kpd, vbase, len, flags, 1);
450 448 } else {
451 449 kpd->kp_anon = NULL;
452 450 kpd->kp_anon_idx = 0;
453 451 }
454 452
455 453 /*
456 454 * Allocate page and anon resources for the virtual address range
457 455 * except the redzone
458 456 */
459 457 if (segkp_fromheap)
460 458 segkpindex = btop((uintptr_t)(vbase - kvseg.s_base));
461 459 for (i = 0, va = vbase; i < np; i++, va += PAGESIZE) {
462 460 page_t *pl[2];
463 461 struct vnode *vp;
464 462 anoff_t off;
465 463 int err;
466 464 page_t *pp = NULL;
467 465
468 466 /*
469 467 * Mark this page to be a segkp page in the bitmap.
470 468 */
471 469 if (segkp_fromheap) {
472 470 BT_ATOMIC_SET(segkp_bitmap, segkpindex);
473 471 segkpindex++;
474 472 }
475 473
476 474 /*
477 475 * If this page is the red zone page, we don't need swap
478 476 * space for it. Note that we skip over the code that
479 477 * establishes MMU mappings, so that the page remains
480 478 * invalid.
481 479 */
482 480 if ((flags & KPD_HASREDZONE) && KPD_REDZONE(kpd) == i)
483 481 continue;
484 482
485 483 if (kpd->kp_anon != NULL) {
486 484 struct anon *ap;
487 485
488 486 ASSERT(anon_get_ptr(kpd->kp_anon, anon_idx + i)
489 487 == NULL);
490 488 /*
491 489 * Determine the "vp" and "off" of the anon slot.
492 490 */
493 491 ap = anon_alloc(NULL, 0);
494 492 if (amp != NULL)
495 493 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
496 494 (void) anon_set_ptr(kpd->kp_anon, anon_idx + i,
497 495 ap, ANON_SLEEP);
498 496 if (amp != NULL)
499 497 ANON_LOCK_EXIT(&->a_rwlock);
500 498 swap_xlate(ap, &vp, &off);
501 499
502 500 /*
503 501 * Create a page with the specified identity. The
504 502 * page is returned with the "shared" lock held.
505 503 */
506 504 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
507 505 NULL, pl, PAGESIZE, seg, va, S_CREATE,
508 506 kcred, NULL);
509 507 if (err) {
510 508 /*
511 509 * XXX - This should not fail.
512 510 */
513 511 panic("segkp_get: no pages");
514 512 /*NOTREACHED*/
515 513 }
516 514 pp = pl[0];
517 515 } else {
518 516 ASSERT(page_exists(&kvp,
519 517 (u_offset_t)(uintptr_t)va) == NULL);
520 518
521 519 if ((pp = page_create_va(&kvp,
522 520 (u_offset_t)(uintptr_t)va, PAGESIZE,
523 521 (flags & KPD_NOWAIT ? 0 : PG_WAIT) | PG_EXCL |
524 522 PG_NORELOC, seg, va)) == NULL) {
525 523 /*
526 524 * Legitimize resource; then destroy it.
527 525 * Easier than trying to unwind here.
528 526 */
529 527 kpd->kp_flags = flags;
530 528 kpd->kp_base = vbase;
531 529 kpd->kp_len = len;
532 530 segkp_release_internal(seg, kpd, va - vbase);
533 531 return (NULL);
534 532 }
535 533 page_io_unlock(pp);
536 534 }
537 535
538 536 if (flags & KPD_ZERO)
539 537 pagezero(pp, 0, PAGESIZE);
540 538
541 539 /*
542 540 * Load and lock an MMU translation for the page.
543 541 */
544 542 hat_memload(seg->s_as->a_hat, va, pp, (PROT_READ|PROT_WRITE),
545 543 ((flags & KPD_LOCKED) ? HAT_LOAD_LOCK : HAT_LOAD));
546 544
547 545 /*
548 546 * Now, release lock on the page.
549 547 */
550 548 if (flags & KPD_LOCKED) {
551 549 /*
552 550 * Indicate to page_retire framework that this
553 551 * page can only be retired when it is freed.
554 552 */
555 553 PP_SETRAF(pp);
556 554 page_downgrade(pp);
557 555 } else
558 556 page_unlock(pp);
559 557 }
560 558
561 559 kpd->kp_flags = flags;
562 560 kpd->kp_base = vbase;
563 561 kpd->kp_len = len;
564 562 segkp_insert(seg, kpd);
565 563 *tkpd = kpd;
566 564 return (stom(kpd->kp_base, flags));
567 565 }
568 566
569 567 /*
570 568 * Release the resource to cache if the pool(designate by the cookie)
571 569 * has less than the maximum allowable. If inserted in cache,
572 570 * segkp_delete insures element is taken off of active list.
573 571 */
574 572 void
575 573 segkp_release(struct seg *seg, caddr_t vaddr)
576 574 {
577 575 struct segkp_cache *freelist;
578 576 struct segkp_data *kpd = NULL;
579 577
580 578 if ((kpd = segkp_find(seg, vaddr)) == NULL) {
581 579 panic("segkp_release: null kpd");
582 580 /*NOTREACHED*/
583 581 }
584 582
585 583 if (kpd->kp_cookie != -1) {
586 584 freelist = &segkp_cache[kpd->kp_cookie];
587 585 mutex_enter(&segkp_lock);
588 586 if (!segkp_indel && freelist->kpf_count < freelist->kpf_max) {
589 587 segkp_delete(seg, kpd);
590 588 kpd->kp_next = freelist->kpf_list;
591 589 freelist->kpf_list = kpd;
592 590 freelist->kpf_count++;
593 591 mutex_exit(&segkp_lock);
594 592 return;
595 593 } else {
596 594 mutex_exit(&segkp_lock);
597 595 kpd->kp_cookie = -1;
598 596 }
599 597 }
600 598 segkp_release_internal(seg, kpd, kpd->kp_len);
601 599 }
602 600
603 601 /*
604 602 * Free the entire resource. segkp_unlock gets called with the start of the
605 603 * mapped portion of the resource. The length is the size of the mapped
606 604 * portion
607 605 */
608 606 static void
609 607 segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len)
610 608 {
611 609 caddr_t va;
612 610 long i;
613 611 long redzone;
614 612 size_t np;
615 613 page_t *pp;
616 614 struct vnode *vp;
617 615 anoff_t off;
618 616 struct anon *ap;
619 617 pgcnt_t segkpindex;
620 618
621 619 ASSERT(kpd != NULL);
622 620 ASSERT((kpd->kp_flags & KPD_HASAMP) == 0 || kpd->kp_cookie == -1);
623 621 np = btop(len);
624 622
625 623 /* Remove from active hash list */
626 624 if (kpd->kp_cookie == -1) {
627 625 mutex_enter(&segkp_lock);
628 626 segkp_delete(seg, kpd);
629 627 mutex_exit(&segkp_lock);
630 628 }
631 629
632 630 /*
633 631 * Precompute redzone page index.
634 632 */
635 633 redzone = -1;
636 634 if (kpd->kp_flags & KPD_HASREDZONE)
637 635 redzone = KPD_REDZONE(kpd);
638 636
639 637
640 638 va = kpd->kp_base;
641 639
642 640 hat_unload(seg->s_as->a_hat, va, (np << PAGESHIFT),
643 641 ((kpd->kp_flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
644 642 /*
645 643 * Free up those anon resources that are quiescent.
646 644 */
647 645 if (segkp_fromheap)
648 646 segkpindex = btop((uintptr_t)(va - kvseg.s_base));
649 647 for (i = 0; i < np; i++, va += PAGESIZE) {
650 648
651 649 /*
652 650 * Clear the bit for this page from the bitmap.
653 651 */
654 652 if (segkp_fromheap) {
655 653 BT_ATOMIC_CLEAR(segkp_bitmap, segkpindex);
656 654 segkpindex++;
657 655 }
658 656
659 657 if (i == redzone)
660 658 continue;
661 659 if (kpd->kp_anon) {
662 660 /*
663 661 * Free up anon resources and destroy the
664 662 * associated pages.
665 663 *
666 664 * Release the lock if there is one. Have to get the
667 665 * page to do this, unfortunately.
668 666 */
669 667 if (kpd->kp_flags & KPD_LOCKED) {
670 668 ap = anon_get_ptr(kpd->kp_anon,
671 669 kpd->kp_anon_idx + i);
672 670 swap_xlate(ap, &vp, &off);
673 671 /* Find the shared-locked page. */
674 672 pp = page_find(vp, (u_offset_t)off);
675 673 if (pp == NULL) {
676 674 panic("segkp_release: "
677 675 "kp_anon: no page to unlock ");
678 676 /*NOTREACHED*/
679 677 }
680 678 if (PP_ISRAF(pp))
681 679 PP_CLRRAF(pp);
682 680
683 681 page_unlock(pp);
684 682 }
685 683 if ((kpd->kp_flags & KPD_HASAMP) == 0) {
686 684 anon_free(kpd->kp_anon, kpd->kp_anon_idx + i,
687 685 PAGESIZE);
688 686 anon_unresv_zone(PAGESIZE, NULL);
689 687 atomic_dec_ulong(&anon_segkp_pages_resv);
690 688 }
691 689 TRACE_5(TR_FAC_VM,
692 690 TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
693 691 kpd, va, PAGESIZE, 0, 0);
694 692 } else {
695 693 if (kpd->kp_flags & KPD_LOCKED) {
696 694 pp = page_find(&kvp, (u_offset_t)(uintptr_t)va);
697 695 if (pp == NULL) {
698 696 panic("segkp_release: "
699 697 "no page to unlock");
700 698 /*NOTREACHED*/
701 699 }
702 700 if (PP_ISRAF(pp))
703 701 PP_CLRRAF(pp);
704 702 /*
705 703 * We should just upgrade the lock here
706 704 * but there is no upgrade that waits.
707 705 */
708 706 page_unlock(pp);
709 707 }
710 708 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)va,
711 709 SE_EXCL);
712 710 if (pp != NULL)
713 711 page_destroy(pp, 0);
714 712 }
715 713 }
716 714
717 715 /* If locked, release physical memory reservation */
718 716 if (kpd->kp_flags & KPD_LOCKED) {
719 717 pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
720 718 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
721 719 atomic_add_long(&anon_segkp_pages_locked, -pages);
722 720 page_unresv(pages);
723 721 }
724 722
725 723 vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len);
726 724 kmem_free(kpd, sizeof (struct segkp_data));
727 725 }
728 726
729 727 /*
730 728 * segkp_map_red() will check the current frame pointer against the
731 729 * stack base. If the amount of stack remaining is questionable
732 730 * (less than red_minavail), then segkp_map_red() will map in the redzone
733 731 * and return 1. Otherwise, it will return 0. segkp_map_red() can
734 732 * _only_ be called when it is safe to sleep on page_create_va().
735 733 *
736 734 * It is up to the caller to remember whether segkp_map_red() successfully
737 735 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
738 736 * time.
739 737 *
740 738 * Currently, this routine is only called from pagefault() (which necessarily
741 739 * satisfies the above conditions).
742 740 */
743 741 #if defined(STACK_GROWTH_DOWN)
744 742 int
745 743 segkp_map_red(void)
746 744 {
747 745 uintptr_t fp = STACK_BIAS + (uintptr_t)getfp();
748 746 #ifndef _LP64
749 747 caddr_t stkbase;
750 748 #endif
751 749
752 750 /*
753 751 * Optimize for the common case where we simply return.
754 752 */
755 753 if ((curthread->t_red_pp == NULL) &&
756 754 (fp - (uintptr_t)curthread->t_stkbase >= red_minavail))
757 755 return (0);
758 756
759 757 #if defined(_LP64)
760 758 /*
761 759 * XXX We probably need something better than this.
762 760 */
763 761 panic("kernel stack overflow");
764 762 /*NOTREACHED*/
765 763 #else /* _LP64 */
766 764 if (curthread->t_red_pp == NULL) {
767 765 page_t *red_pp;
768 766 struct seg kseg;
769 767
770 768 caddr_t red_va = (caddr_t)
771 769 (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) -
772 770 PAGESIZE);
773 771
774 772 ASSERT(page_exists(&kvp, (u_offset_t)(uintptr_t)red_va) ==
775 773 NULL);
776 774
777 775 /*
778 776 * Allocate the physical for the red page.
779 777 */
780 778 /*
781 779 * No PG_NORELOC here to avoid waits. Unlikely to get
782 780 * a relocate happening in the short time the page exists
783 781 * and it will be OK anyway.
784 782 */
785 783
786 784 kseg.s_as = &kas;
787 785 red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va,
788 786 PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va);
789 787 ASSERT(red_pp != NULL);
790 788
791 789 /*
792 790 * So we now have a page to jam into the redzone...
793 791 */
794 792 page_io_unlock(red_pp);
795 793
796 794 hat_memload(kas.a_hat, red_va, red_pp,
797 795 (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK);
798 796 page_downgrade(red_pp);
799 797
800 798 /*
801 799 * The page is left SE_SHARED locked so we can hold on to
802 800 * the page_t pointer.
803 801 */
804 802 curthread->t_red_pp = red_pp;
805 803
806 804 atomic_inc_32(&red_nmapped);
807 805 while (fp - (uintptr_t)curthread->t_stkbase < red_closest) {
808 806 (void) atomic_cas_32(&red_closest, red_closest,
809 807 (uint32_t)(fp - (uintptr_t)curthread->t_stkbase));
810 808 }
811 809 return (1);
812 810 }
813 811
814 812 stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase &
815 813 (uintptr_t)PAGEMASK) - PAGESIZE);
816 814
817 815 atomic_inc_32(&red_ndoubles);
818 816
819 817 if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) {
820 818 /*
821 819 * Oh boy. We're already deep within the mapped-in
822 820 * redzone page, and the caller is trying to prepare
823 821 * for a deep stack run. We're running without a
824 822 * redzone right now: if the caller plows off the
825 823 * end of the stack, it'll plow another thread or
826 824 * LWP structure. That situation could result in
827 825 * a very hard-to-debug panic, so, in the spirit of
828 826 * recording the name of one's killer in one's own
829 827 * blood, we're going to record hrestime and the calling
830 828 * thread.
831 829 */
832 830 red_deep_hires = hrestime.tv_nsec;
833 831 red_deep_thread = curthread;
834 832 }
835 833
836 834 /*
837 835 * If this is a DEBUG kernel, and we've run too deep for comfort, toss.
838 836 */
839 837 ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD);
840 838 return (0);
841 839 #endif /* _LP64 */
842 840 }
843 841
844 842 void
845 843 segkp_unmap_red(void)
846 844 {
847 845 page_t *pp;
848 846 caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase &
849 847 (uintptr_t)PAGEMASK) - PAGESIZE);
850 848
851 849 ASSERT(curthread->t_red_pp != NULL);
852 850
853 851 /*
854 852 * Because we locked the mapping down, we can't simply rely
855 853 * on page_destroy() to clean everything up; we need to call
856 854 * hat_unload() to explicitly unlock the mapping resources.
857 855 */
858 856 hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK);
859 857
860 858 pp = curthread->t_red_pp;
861 859
862 860 ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va));
863 861
864 862 /*
865 863 * Need to upgrade the SE_SHARED lock to SE_EXCL.
866 864 */
867 865 if (!page_tryupgrade(pp)) {
868 866 /*
869 867 * As there is now wait for upgrade, release the
870 868 * SE_SHARED lock and wait for SE_EXCL.
871 869 */
872 870 page_unlock(pp);
873 871 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)red_va, SE_EXCL);
874 872 /* pp may be NULL here, hence the test below */
875 873 }
876 874
877 875 /*
878 876 * Destroy the page, with dontfree set to zero (i.e. free it).
879 877 */
880 878 if (pp != NULL)
881 879 page_destroy(pp, 0);
882 880 curthread->t_red_pp = NULL;
883 881 }
884 882 #else
885 883 #error Red stacks only supported with downwards stack growth.
886 884 #endif
887 885
888 886 /*
889 887 * Handle a fault on an address corresponding to one of the
890 888 * resources in the segkp segment.
891 889 */
892 890 faultcode_t
893 891 segkp_fault(
894 892 struct hat *hat,
895 893 struct seg *seg,
896 894 caddr_t vaddr,
897 895 size_t len,
898 896 enum fault_type type,
899 897 enum seg_rw rw)
900 898 {
901 899 struct segkp_data *kpd = NULL;
902 900 int err;
903 901
904 902 ASSERT(seg->s_as == &kas && RW_READ_HELD(&seg->s_as->a_lock));
905 903
906 904 /*
907 905 * Sanity checks.
908 906 */
909 907 if (type == F_PROT) {
910 908 panic("segkp_fault: unexpected F_PROT fault");
911 909 /*NOTREACHED*/
912 910 }
913 911
914 912 if ((kpd = segkp_find(seg, vaddr)) == NULL)
915 913 return (FC_NOMAP);
916 914
917 915 mutex_enter(&kpd->kp_lock);
918 916
919 917 if (type == F_SOFTLOCK) {
920 918 ASSERT(!(kpd->kp_flags & KPD_LOCKED));
921 919 /*
922 920 * The F_SOFTLOCK case has more stringent
923 921 * range requirements: the given range must exactly coincide
924 922 * with the resource's mapped portion. Note reference to
925 923 * redzone is handled since vaddr would not equal base
926 924 */
927 925 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
928 926 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
929 927 mutex_exit(&kpd->kp_lock);
930 928 return (FC_MAKE_ERR(EFAULT));
931 929 }
932 930
933 931 if ((err = segkp_load(hat, seg, vaddr, len, kpd, KPD_LOCKED))) {
934 932 mutex_exit(&kpd->kp_lock);
935 933 return (FC_MAKE_ERR(err));
936 934 }
937 935 kpd->kp_flags |= KPD_LOCKED;
938 936 mutex_exit(&kpd->kp_lock);
939 937 return (0);
940 938 }
941 939
942 940 if (type == F_INVAL) {
943 941 ASSERT(!(kpd->kp_flags & KPD_NO_ANON));
944 942
945 943 /*
946 944 * Check if we touched the redzone. Somewhat optimistic
947 945 * here if we are touching the redzone of our own stack
948 946 * since we wouldn't have a stack to get this far...
949 947 */
950 948 if ((kpd->kp_flags & KPD_HASREDZONE) &&
951 949 btop((uintptr_t)(vaddr - kpd->kp_base)) == KPD_REDZONE(kpd))
952 950 panic("segkp_fault: accessing redzone");
953 951
954 952 /*
955 953 * This fault may occur while the page is being F_SOFTLOCK'ed.
956 954 * Return since a 2nd segkp_load is unnecessary and also would
957 955 * result in the page being locked twice and eventually
958 956 * hang the thread_reaper thread.
959 957 */
960 958 if (kpd->kp_flags & KPD_LOCKED) {
961 959 mutex_exit(&kpd->kp_lock);
962 960 return (0);
963 961 }
964 962
965 963 err = segkp_load(hat, seg, vaddr, len, kpd, kpd->kp_flags);
966 964 mutex_exit(&kpd->kp_lock);
967 965 return (err ? FC_MAKE_ERR(err) : 0);
968 966 }
969 967
970 968 if (type == F_SOFTUNLOCK) {
971 969 uint_t flags;
972 970
973 971 /*
974 972 * Make sure the addr is LOCKED and it has anon backing
975 973 * before unlocking
976 974 */
977 975 if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) != KPD_LOCKED) {
978 976 panic("segkp_fault: bad unlock");
979 977 /*NOTREACHED*/
980 978 }
981 979
982 980 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
983 981 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
984 982 panic("segkp_fault: bad range");
985 983 /*NOTREACHED*/
986 984 }
987 985
988 986 if (rw == S_WRITE)
989 987 flags = kpd->kp_flags | KPD_WRITEDIRTY;
990 988 else
991 989 flags = kpd->kp_flags;
992 990 err = segkp_unlock(hat, seg, vaddr, len, kpd, flags);
993 991 kpd->kp_flags &= ~KPD_LOCKED;
994 992 mutex_exit(&kpd->kp_lock);
995 993 return (err ? FC_MAKE_ERR(err) : 0);
996 994 }
997 995 mutex_exit(&kpd->kp_lock);
998 996 panic("segkp_fault: bogus fault type: %d\n", type);
999 997 /*NOTREACHED*/
1000 998 }
1001 999
1002 1000 /*
1003 1001 * Check that the given protections suffice over the range specified by
1004 1002 * vaddr and len. For this segment type, the only issue is whether or
1005 1003 * not the range lies completely within the mapped part of an allocated
1006 1004 * resource.
1007 1005 */
1008 1006 /* ARGSUSED */
1009 1007 static int
1010 1008 segkp_checkprot(struct seg *seg, caddr_t vaddr, size_t len, uint_t prot)
1011 1009 {
1012 1010 struct segkp_data *kpd = NULL;
1013 1011 caddr_t mbase;
1014 1012 size_t mlen;
1015 1013
1016 1014 if ((kpd = segkp_find(seg, vaddr)) == NULL)
1017 1015 return (EACCES);
1018 1016
1019 1017 mutex_enter(&kpd->kp_lock);
1020 1018 mbase = stom(kpd->kp_base, kpd->kp_flags);
1021 1019 mlen = SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags);
1022 1020 if (len > mlen || vaddr < mbase ||
1023 1021 ((vaddr + len) > (mbase + mlen))) {
1024 1022 mutex_exit(&kpd->kp_lock);
1025 1023 return (EACCES);
1026 1024 }
1027 1025 mutex_exit(&kpd->kp_lock);
1028 1026 return (0);
1029 1027 }
1030 1028
1031 1029
1032 1030 /*
1033 1031 * Check to see if it makes sense to do kluster/read ahead to
1034 1032 * addr + delta relative to the mapping at addr. We assume here
1035 1033 * that delta is a signed PAGESIZE'd multiple (which can be negative).
1036 1034 *
1037 1035 * For seg_u we always "approve" of this action from our standpoint.
1038 1036 */
1039 1037 /*ARGSUSED*/
1040 1038 static int
1041 1039 segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
1042 1040 {
1043 1041 return (0);
1044 1042 }
1045 1043
1046 1044 /*
1047 1045 * Load and possibly lock intra-slot resources in the range given by
1048 1046 * vaddr and len.
1049 1047 */
1050 1048 static int
1051 1049 segkp_load(
1052 1050 struct hat *hat,
1053 1051 struct seg *seg,
1054 1052 caddr_t vaddr,
1055 1053 size_t len,
1056 1054 struct segkp_data *kpd,
1057 1055 uint_t flags)
1058 1056 {
1059 1057 caddr_t va;
1060 1058 caddr_t vlim;
1061 1059 ulong_t i;
1062 1060 uint_t lock;
1063 1061
1064 1062 ASSERT(MUTEX_HELD(&kpd->kp_lock));
1065 1063
1066 1064 len = P2ROUNDUP(len, PAGESIZE);
1067 1065
1068 1066 /* If locking, reserve physical memory */
1069 1067 if (flags & KPD_LOCKED) {
1070 1068 pgcnt_t pages = btop(len);
1071 1069 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
1072 1070 atomic_add_long(&anon_segkp_pages_locked, pages);
1073 1071 (void) page_resv(pages, KM_SLEEP);
1074 1072 }
1075 1073
1076 1074 /*
1077 1075 * Loop through the pages in the given range.
1078 1076 */
1079 1077 va = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK);
1080 1078 vaddr = va;
1081 1079 vlim = va + len;
1082 1080 lock = flags & KPD_LOCKED;
1083 1081 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
1084 1082 for (; va < vlim; va += PAGESIZE, i++) {
1085 1083 page_t *pl[2]; /* second element NULL terminator */
1086 1084 struct vnode *vp;
1087 1085 anoff_t off;
1088 1086 int err;
1089 1087 struct anon *ap;
1090 1088
1091 1089 /*
1092 1090 * Summon the page. If it's not resident, arrange
1093 1091 * for synchronous i/o to pull it in.
1094 1092 */
1095 1093 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
1096 1094 swap_xlate(ap, &vp, &off);
1097 1095
1098 1096 /*
1099 1097 * The returned page list will have exactly one entry,
1100 1098 * which is returned to us already kept.
1101 1099 */
1102 1100 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, NULL,
1103 1101 pl, PAGESIZE, seg, va, S_READ, kcred, NULL);
1104 1102
1105 1103 if (err) {
1106 1104 /*
1107 1105 * Back out of what we've done so far.
1108 1106 */
1109 1107 (void) segkp_unlock(hat, seg, vaddr,
1110 1108 (va - vaddr), kpd, flags);
1111 1109 return (err);
1112 1110 }
1113 1111
1114 1112 /*
1115 1113 * Load an MMU translation for the page.
1116 1114 */
1117 1115 hat_memload(hat, va, pl[0], (PROT_READ|PROT_WRITE),
1118 1116 lock ? HAT_LOAD_LOCK : HAT_LOAD);
1119 1117
1120 1118 if (!lock) {
1121 1119 /*
1122 1120 * Now, release "shared" lock on the page.
1123 1121 */
1124 1122 page_unlock(pl[0]);
1125 1123 }
1126 1124 }
1127 1125 return (0);
1128 1126 }
1129 1127
1130 1128 /*
1131 1129 * At the very least unload the mmu-translations and unlock the range if locked
1132 1130 * Can be called with the following flag value KPD_WRITEDIRTY which specifies
1133 1131 * any dirty pages should be written to disk.
1134 1132 */
1135 1133 static int
1136 1134 segkp_unlock(
1137 1135 struct hat *hat,
1138 1136 struct seg *seg,
1139 1137 caddr_t vaddr,
1140 1138 size_t len,
1141 1139 struct segkp_data *kpd,
1142 1140 uint_t flags)
1143 1141 {
1144 1142 caddr_t va;
1145 1143 caddr_t vlim;
1146 1144 ulong_t i;
1147 1145 struct page *pp;
1148 1146 struct vnode *vp;
1149 1147 anoff_t off;
1150 1148 struct anon *ap;
1151 1149
1152 1150 #ifdef lint
1153 1151 seg = seg;
1154 1152 #endif /* lint */
1155 1153
1156 1154 ASSERT(MUTEX_HELD(&kpd->kp_lock));
1157 1155
1158 1156 /*
1159 1157 * Loop through the pages in the given range. It is assumed
1160 1158 * segkp_unlock is called with page aligned base
1161 1159 */
1162 1160 va = vaddr;
1163 1161 vlim = va + len;
1164 1162 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
1165 1163 hat_unload(hat, va, len,
1166 1164 ((flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
1167 1165 for (; va < vlim; va += PAGESIZE, i++) {
1168 1166 /*
1169 1167 * Find the page associated with this part of the
1170 1168 * slot, tracking it down through its associated swap
1171 1169 * space.
1172 1170 */
1173 1171 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
1174 1172 swap_xlate(ap, &vp, &off);
1175 1173
1176 1174 if (flags & KPD_LOCKED) {
1177 1175 if ((pp = page_find(vp, off)) == NULL) {
1178 1176 if (flags & KPD_LOCKED) {
1179 1177 panic("segkp_softunlock: missing page");
1180 1178 /*NOTREACHED*/
1181 1179 }
1182 1180 }
1183 1181 } else {
1184 1182 /*
1185 1183 * Nothing to do if the slot is not locked and the
1186 1184 * page doesn't exist.
1187 1185 */
1188 1186 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL)
1189 1187 continue;
1190 1188 }
1191 1189
1192 1190 /*
1193 1191 * If the page doesn't have any translations, is
1194 1192 * dirty and not being shared, then push it out
1195 1193 * asynchronously and avoid waiting for the
1196 1194 * pageout daemon to do it for us.
1197 1195 *
1198 1196 * XXX - Do we really need to get the "exclusive"
1199 1197 * lock via an upgrade?
1200 1198 */
1201 1199 if ((flags & KPD_WRITEDIRTY) && !hat_page_is_mapped(pp) &&
1202 1200 hat_ismod(pp) && page_tryupgrade(pp)) {
1203 1201 /*
1204 1202 * Hold the vnode before releasing the page lock to
1205 1203 * prevent it from being freed and re-used by some
1206 1204 * other thread.
1207 1205 */
1208 1206 VN_HOLD(vp);
1209 1207 page_unlock(pp);
1210 1208
1211 1209 /*
1212 1210 * Want most powerful credentials we can get so
1213 1211 * use kcred.
1214 1212 */
1215 1213 (void) VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
1216 1214 B_ASYNC | B_FREE, kcred, NULL);
1217 1215 VN_RELE(vp);
1218 1216 } else {
1219 1217 page_unlock(pp);
1220 1218 }
1221 1219 }
1222 1220
1223 1221 /* If unlocking, release physical memory */
1224 1222 if (flags & KPD_LOCKED) {
1225 1223 pgcnt_t pages = btopr(len);
1226 1224 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
1227 1225 atomic_add_long(&anon_segkp_pages_locked, -pages);
1228 1226 page_unresv(pages);
1229 1227 }
1230 1228 return (0);
1231 1229 }
1232 1230
1233 1231 /*
1234 1232 * Insert the kpd in the hash table.
1235 1233 */
1236 1234 static void
1237 1235 segkp_insert(struct seg *seg, struct segkp_data *kpd)
1238 1236 {
1239 1237 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1240 1238 int index;
1241 1239
1242 1240 /*
1243 1241 * Insert the kpd based on the address that will be returned
1244 1242 * via segkp_release.
1245 1243 */
1246 1244 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
1247 1245 mutex_enter(&segkp_lock);
1248 1246 kpd->kp_next = kpsd->kpsd_hash[index];
1249 1247 kpsd->kpsd_hash[index] = kpd;
1250 1248 mutex_exit(&segkp_lock);
1251 1249 }
1252 1250
1253 1251 /*
1254 1252 * Remove kpd from the hash table.
1255 1253 */
1256 1254 static void
1257 1255 segkp_delete(struct seg *seg, struct segkp_data *kpd)
1258 1256 {
1259 1257 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1260 1258 struct segkp_data **kpp;
1261 1259 int index;
1262 1260
1263 1261 ASSERT(MUTEX_HELD(&segkp_lock));
1264 1262
1265 1263 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
1266 1264 for (kpp = &kpsd->kpsd_hash[index];
1267 1265 *kpp != NULL; kpp = &((*kpp)->kp_next)) {
1268 1266 if (*kpp == kpd) {
1269 1267 *kpp = kpd->kp_next;
1270 1268 return;
1271 1269 }
1272 1270 }
1273 1271 panic("segkp_delete: unable to find element to delete");
1274 1272 /*NOTREACHED*/
1275 1273 }
1276 1274
1277 1275 /*
1278 1276 * Find the kpd associated with a vaddr.
1279 1277 *
1280 1278 * Most of the callers of segkp_find will pass the vaddr that
1281 1279 * hashes to the desired index, but there are cases where
1282 1280 * this is not true in which case we have to (potentially) scan
1283 1281 * the whole table looking for it. This should be very rare
1284 1282 * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the
1285 1283 * middle of the segkp_data region).
1286 1284 */
1287 1285 static struct segkp_data *
1288 1286 segkp_find(struct seg *seg, caddr_t vaddr)
1289 1287 {
1290 1288 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1291 1289 struct segkp_data *kpd;
1292 1290 int i;
1293 1291 int stop;
1294 1292
1295 1293 i = stop = SEGKP_HASH(vaddr);
1296 1294 mutex_enter(&segkp_lock);
1297 1295 do {
1298 1296 for (kpd = kpsd->kpsd_hash[i]; kpd != NULL;
1299 1297 kpd = kpd->kp_next) {
1300 1298 if (vaddr >= kpd->kp_base &&
1301 1299 vaddr < kpd->kp_base + kpd->kp_len) {
1302 1300 mutex_exit(&segkp_lock);
1303 1301 return (kpd);
1304 1302 }
1305 1303 }
1306 1304 if (--i < 0)
1307 1305 i = SEGKP_HASHSZ - 1; /* Wrap */
1308 1306 } while (i != stop);
1309 1307 mutex_exit(&segkp_lock);
1310 1308 return (NULL); /* Not found */
1311 1309 }
1312 1310
1313 1311 /*
1314 1312 * returns size of swappable area.
1315 1313 */
1316 1314 size_t
1317 1315 swapsize(caddr_t v)
1318 1316 {
1319 1317 struct segkp_data *kpd;
1320 1318
1321 1319 if ((kpd = segkp_find(segkp, v)) != NULL)
1322 1320 return (SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
1323 1321 else
1324 1322 return (NULL);
1325 1323 }
1326 1324
1327 1325 /*
1328 1326 * Dump out all the active segkp pages
1329 1327 */
1330 1328 static void
1331 1329 segkp_dump(struct seg *seg)
1332 1330 {
1333 1331 int i;
1334 1332 struct segkp_data *kpd;
1335 1333 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1336 1334
1337 1335 for (i = 0; i < SEGKP_HASHSZ; i++) {
1338 1336 for (kpd = kpsd->kpsd_hash[i];
1339 1337 kpd != NULL; kpd = kpd->kp_next) {
1340 1338 pfn_t pfn;
1341 1339 caddr_t addr;
1342 1340 caddr_t eaddr;
1343 1341
1344 1342 addr = kpd->kp_base;
1345 1343 eaddr = addr + kpd->kp_len;
1346 1344 while (addr < eaddr) {
1347 1345 ASSERT(seg->s_as == &kas);
1348 1346 pfn = hat_getpfnum(seg->s_as->a_hat, addr);
1349 1347 if (pfn != PFN_INVALID)
1350 1348 dump_addpage(seg->s_as, addr, pfn);
1351 1349 addr += PAGESIZE;
1352 1350 dump_timeleft = dump_timeout;
1353 1351 }
1354 1352 }
1355 1353 }
1356 1354 }
1357 1355
1358 1356 /*ARGSUSED*/
1359 1357 static int
1360 1358 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
↓ open down ↓ |
1200 lines elided |
↑ open up ↑ |
1361 1359 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1362 1360 {
1363 1361 return (ENOTSUP);
1364 1362 }
1365 1363
1366 1364 /*ARGSUSED*/
1367 1365 static int
1368 1366 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1369 1367 {
1370 1368 return (ENODEV);
1371 -}
1372 -
1373 -/*ARGSUSED*/
1374 -static int
1375 -segkp_capable(struct seg *seg, segcapability_t capability)
1376 -{
1377 - return (0);
1378 1369 }
1379 1370
1380 1371 #include <sys/mem_config.h>
1381 1372
1382 1373 /*ARGSUSED*/
1383 1374 static void
1384 1375 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1385 1376 {}
1386 1377
1387 1378 /*
1388 1379 * During memory delete, turn off caches so that pages are not held.
1389 1380 * A better solution may be to unlock the pages while they are
1390 1381 * in the cache so that they may be collected naturally.
1391 1382 */
1392 1383
1393 1384 /*ARGSUSED*/
1394 1385 static int
1395 1386 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1396 1387 {
1397 1388 atomic_inc_32(&segkp_indel);
1398 1389 segkp_cache_free();
1399 1390 return (0);
1400 1391 }
1401 1392
1402 1393 /*ARGSUSED*/
1403 1394 static void
1404 1395 segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
1405 1396 {
1406 1397 atomic_dec_32(&segkp_indel);
1407 1398 }
1408 1399
1409 1400 static kphysm_setup_vector_t segkp_mem_config_vec = {
1410 1401 KPHYSM_SETUP_VECTOR_VERSION,
1411 1402 segkp_mem_config_post_add,
1412 1403 segkp_mem_config_pre_del,
1413 1404 segkp_mem_config_post_del,
1414 1405 };
1415 1406
1416 1407 static void
1417 1408 segkpinit_mem_config(struct seg *seg)
1418 1409 {
1419 1410 int ret;
1420 1411
1421 1412 ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg);
1422 1413 ASSERT(ret == 0);
1423 1414 }
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX