Print this page
segop_getpolicy already checks for a NULL op
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_kp.c
+++ new/usr/src/uts/common/vm/seg_kp.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 26 /* All Rights Reserved */
27 27
28 28 /*
29 29 * Portions of this source code were derived from Berkeley 4.3 BSD
30 30 * under license from the Regents of the University of California.
31 31 */
32 32
33 33 /*
34 34 * segkp is a segment driver that administers the allocation and deallocation
35 35 * of pageable variable size chunks of kernel virtual address space. Each
36 36 * allocated resource is page-aligned.
37 37 *
38 38 * The user may specify whether the resource should be initialized to 0,
39 39 * include a redzone, or locked in memory.
40 40 */
41 41
42 42 #include <sys/types.h>
43 43 #include <sys/t_lock.h>
44 44 #include <sys/thread.h>
45 45 #include <sys/param.h>
46 46 #include <sys/errno.h>
47 47 #include <sys/sysmacros.h>
48 48 #include <sys/systm.h>
49 49 #include <sys/buf.h>
50 50 #include <sys/mman.h>
51 51 #include <sys/vnode.h>
52 52 #include <sys/cmn_err.h>
53 53 #include <sys/swap.h>
54 54 #include <sys/tuneable.h>
55 55 #include <sys/kmem.h>
56 56 #include <sys/vmem.h>
57 57 #include <sys/cred.h>
58 58 #include <sys/dumphdr.h>
59 59 #include <sys/debug.h>
60 60 #include <sys/vtrace.h>
61 61 #include <sys/stack.h>
62 62 #include <sys/atomic.h>
63 63 #include <sys/archsystm.h>
64 64 #include <sys/lgrp.h>
65 65
66 66 #include <vm/as.h>
67 67 #include <vm/seg.h>
68 68 #include <vm/seg_kp.h>
69 69 #include <vm/seg_kmem.h>
70 70 #include <vm/anon.h>
71 71 #include <vm/page.h>
72 72 #include <vm/hat.h>
73 73 #include <sys/bitmap.h>
74 74
75 75 /*
76 76 * Private seg op routines
77 77 */
78 78 static void segkp_dump(struct seg *seg);
79 79 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
80 80 uint_t prot);
81 81 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
82 82 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
83 83 struct page ***page, enum lock_type type,
84 84 enum seg_rw rw);
85 85 static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
86 86 static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
↓ open down ↓ |
86 lines elided |
↑ open up ↑ |
87 87 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
88 88 struct segkp_data **tkpd, struct anon_map *amp);
89 89 static void segkp_release_internal(struct seg *seg,
90 90 struct segkp_data *kpd, size_t len);
91 91 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
92 92 size_t len, struct segkp_data *kpd, uint_t flags);
93 93 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
94 94 size_t len, struct segkp_data *kpd, uint_t flags);
95 95 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
96 96 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
97 -static lgrp_mem_policy_info_t *segkp_getpolicy(struct seg *seg,
98 - caddr_t addr);
99 97 static int segkp_capable(struct seg *seg, segcapability_t capability);
100 98
101 99 /*
102 100 * Lock used to protect the hash table(s) and caches.
103 101 */
104 102 static kmutex_t segkp_lock;
105 103
106 104 /*
107 105 * The segkp caches
108 106 */
109 107 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
110 108
111 109 /*
112 110 * When there are fewer than red_minavail bytes left on the stack,
113 111 * segkp_map_red() will map in the redzone (if called). 5000 seems
114 112 * to work reasonably well...
115 113 */
116 114 long red_minavail = 5000;
117 115
118 116 /*
119 117 * will be set to 1 for 32 bit x86 systems only, in startup.c
120 118 */
121 119 int segkp_fromheap = 0;
122 120 ulong_t *segkp_bitmap;
123 121
124 122 /*
125 123 * If segkp_map_red() is called with the redzone already mapped and
126 124 * with less than RED_DEEP_THRESHOLD bytes available on the stack,
127 125 * then the stack situation has become quite serious; if much more stack
128 126 * is consumed, we have the potential of scrogging the next thread/LWP
129 127 * structure. To help debug the "can't happen" panics which may
130 128 * result from this condition, we record hrestime and the calling thread
131 129 * in red_deep_hires and red_deep_thread respectively.
132 130 */
133 131 #define RED_DEEP_THRESHOLD 2000
134 132
135 133 hrtime_t red_deep_hires;
136 134 kthread_t *red_deep_thread;
137 135
138 136 uint32_t red_nmapped;
139 137 uint32_t red_closest = UINT_MAX;
140 138 uint32_t red_ndoubles;
141 139
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
142 140 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
143 141 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
144 142
145 143 static struct seg_ops segkp_ops = {
146 144 .fault = segkp_fault,
147 145 .checkprot = segkp_checkprot,
148 146 .kluster = segkp_kluster,
149 147 .dump = segkp_dump,
150 148 .pagelock = segkp_pagelock,
151 149 .getmemid = segkp_getmemid,
152 - .getpolicy = segkp_getpolicy,
153 150 .capable = segkp_capable,
154 151 };
155 152
156 153
157 154 static void segkpinit_mem_config(struct seg *);
158 155
159 156 static uint32_t segkp_indel;
160 157
161 158 /*
162 159 * Allocate the segment specific private data struct and fill it in
163 160 * with the per kp segment mutex, anon ptr. array and hash table.
164 161 */
165 162 int
166 163 segkp_create(struct seg *seg)
167 164 {
168 165 struct segkp_segdata *kpsd;
169 166 size_t np;
170 167
171 168 ASSERT(seg != NULL && seg->s_as == &kas);
172 169 ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));
173 170
174 171 if (seg->s_size & PAGEOFFSET) {
175 172 panic("Bad segkp size");
176 173 /*NOTREACHED*/
177 174 }
178 175
179 176 kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP);
180 177
181 178 /*
182 179 * Allocate the virtual memory for segkp and initialize it
183 180 */
184 181 if (segkp_fromheap) {
185 182 np = btop(kvseg.s_size);
186 183 segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP);
187 184 kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE,
188 185 vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP);
189 186 } else {
190 187 segkp_bitmap = NULL;
191 188 np = btop(seg->s_size);
192 189 kpsd->kpsd_arena = vmem_create("segkp", seg->s_base,
193 190 seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE,
194 191 VM_SLEEP);
195 192 }
196 193
197 194 kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE);
198 195
199 196 kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *),
200 197 KM_SLEEP);
201 198 seg->s_data = (void *)kpsd;
202 199 seg->s_ops = &segkp_ops;
203 200 segkpinit_mem_config(seg);
204 201 return (0);
205 202 }
206 203
207 204
208 205 /*
209 206 * Find a free 'freelist' and initialize it with the appropriate attributes
210 207 */
211 208 void *
212 209 segkp_cache_init(struct seg *seg, int maxsize, size_t len, uint_t flags)
213 210 {
214 211 int i;
215 212
216 213 if ((flags & KPD_NO_ANON) && !(flags & KPD_LOCKED))
217 214 return ((void *)-1);
218 215
219 216 mutex_enter(&segkp_lock);
220 217 for (i = 0; i < SEGKP_MAX_CACHE; i++) {
221 218 if (segkp_cache[i].kpf_inuse)
222 219 continue;
223 220 segkp_cache[i].kpf_inuse = 1;
224 221 segkp_cache[i].kpf_max = maxsize;
225 222 segkp_cache[i].kpf_flags = flags;
226 223 segkp_cache[i].kpf_seg = seg;
227 224 segkp_cache[i].kpf_len = len;
228 225 mutex_exit(&segkp_lock);
229 226 return ((void *)(uintptr_t)i);
230 227 }
231 228 mutex_exit(&segkp_lock);
232 229 return ((void *)-1);
233 230 }
234 231
235 232 /*
236 233 * Free all the cache resources.
237 234 */
238 235 void
239 236 segkp_cache_free(void)
240 237 {
241 238 struct segkp_data *kpd;
242 239 struct seg *seg;
243 240 int i;
244 241
245 242 mutex_enter(&segkp_lock);
246 243 for (i = 0; i < SEGKP_MAX_CACHE; i++) {
247 244 if (!segkp_cache[i].kpf_inuse)
248 245 continue;
249 246 /*
250 247 * Disconnect the freelist and process each element
251 248 */
252 249 kpd = segkp_cache[i].kpf_list;
253 250 seg = segkp_cache[i].kpf_seg;
254 251 segkp_cache[i].kpf_list = NULL;
255 252 segkp_cache[i].kpf_count = 0;
256 253 mutex_exit(&segkp_lock);
257 254
258 255 while (kpd != NULL) {
259 256 struct segkp_data *next;
260 257
261 258 next = kpd->kp_next;
262 259 segkp_release_internal(seg, kpd, kpd->kp_len);
263 260 kpd = next;
264 261 }
265 262 mutex_enter(&segkp_lock);
266 263 }
267 264 mutex_exit(&segkp_lock);
268 265 }
269 266
270 267 /*
271 268 * There are 2 entries into segkp_get_internal. The first includes a cookie
272 269 * used to access a pool of cached segkp resources. The second does not
273 270 * use the cache.
274 271 */
275 272 caddr_t
276 273 segkp_get(struct seg *seg, size_t len, uint_t flags)
277 274 {
278 275 struct segkp_data *kpd = NULL;
279 276
280 277 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
281 278 kpd->kp_cookie = -1;
282 279 return (stom(kpd->kp_base, flags));
283 280 }
284 281 return (NULL);
285 282 }
286 283
287 284 /*
288 285 * Return a 'cached' segkp address
289 286 */
290 287 caddr_t
291 288 segkp_cache_get(void *cookie)
292 289 {
293 290 struct segkp_cache *freelist = NULL;
294 291 struct segkp_data *kpd = NULL;
295 292 int index = (int)(uintptr_t)cookie;
296 293 struct seg *seg;
297 294 size_t len;
298 295 uint_t flags;
299 296
300 297 if (index < 0 || index >= SEGKP_MAX_CACHE)
301 298 return (NULL);
302 299 freelist = &segkp_cache[index];
303 300
304 301 mutex_enter(&segkp_lock);
305 302 seg = freelist->kpf_seg;
306 303 flags = freelist->kpf_flags;
307 304 if (freelist->kpf_list != NULL) {
308 305 kpd = freelist->kpf_list;
309 306 freelist->kpf_list = kpd->kp_next;
310 307 freelist->kpf_count--;
311 308 mutex_exit(&segkp_lock);
312 309 kpd->kp_next = NULL;
313 310 segkp_insert(seg, kpd);
314 311 return (stom(kpd->kp_base, flags));
315 312 }
316 313 len = freelist->kpf_len;
317 314 mutex_exit(&segkp_lock);
318 315 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
319 316 kpd->kp_cookie = index;
320 317 return (stom(kpd->kp_base, flags));
321 318 }
322 319 return (NULL);
323 320 }
324 321
325 322 caddr_t
326 323 segkp_get_withanonmap(
327 324 struct seg *seg,
328 325 size_t len,
329 326 uint_t flags,
330 327 struct anon_map *amp)
331 328 {
332 329 struct segkp_data *kpd = NULL;
333 330
334 331 ASSERT(amp != NULL);
335 332 flags |= KPD_HASAMP;
336 333 if (segkp_get_internal(seg, len, flags, &kpd, amp) != NULL) {
337 334 kpd->kp_cookie = -1;
338 335 return (stom(kpd->kp_base, flags));
339 336 }
340 337 return (NULL);
341 338 }
342 339
343 340 /*
344 341 * This does the real work of segkp allocation.
345 342 * Return to client base addr. len must be page-aligned. A null value is
346 343 * returned if there are no more vm resources (e.g. pages, swap). The len
347 344 * and base recorded in the private data structure include the redzone
348 345 * and the redzone length (if applicable). If the user requests a redzone
349 346 * either the first or last page is left unmapped depending whether stacks
350 347 * grow to low or high memory.
351 348 *
352 349 * The client may also specify a no-wait flag. If that is set then the
353 350 * request will choose a non-blocking path when requesting resources.
354 351 * The default is make the client wait.
355 352 */
356 353 static caddr_t
357 354 segkp_get_internal(
358 355 struct seg *seg,
359 356 size_t len,
360 357 uint_t flags,
361 358 struct segkp_data **tkpd,
362 359 struct anon_map *amp)
363 360 {
364 361 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
365 362 struct segkp_data *kpd;
366 363 caddr_t vbase = NULL; /* always first virtual, may not be mapped */
367 364 pgcnt_t np = 0; /* number of pages in the resource */
368 365 pgcnt_t segkpindex;
369 366 long i;
370 367 caddr_t va;
371 368 pgcnt_t pages = 0;
372 369 ulong_t anon_idx = 0;
373 370 int kmflag = (flags & KPD_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
374 371 caddr_t s_base = (segkp_fromheap) ? kvseg.s_base : seg->s_base;
375 372
376 373 if (len & PAGEOFFSET) {
377 374 panic("segkp_get: len is not page-aligned");
378 375 /*NOTREACHED*/
379 376 }
380 377
381 378 ASSERT(((flags & KPD_HASAMP) == 0) == (amp == NULL));
382 379
383 380 /* Only allow KPD_NO_ANON if we are going to lock it down */
384 381 if ((flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON)
385 382 return (NULL);
386 383
387 384 if ((kpd = kmem_zalloc(sizeof (struct segkp_data), kmflag)) == NULL)
388 385 return (NULL);
389 386 /*
390 387 * Fix up the len to reflect the REDZONE if applicable
391 388 */
392 389 if (flags & KPD_HASREDZONE)
393 390 len += PAGESIZE;
394 391 np = btop(len);
395 392
396 393 vbase = vmem_alloc(SEGKP_VMEM(seg), len, kmflag | VM_BESTFIT);
397 394 if (vbase == NULL) {
398 395 kmem_free(kpd, sizeof (struct segkp_data));
399 396 return (NULL);
400 397 }
401 398
402 399 /* If locking, reserve physical memory */
403 400 if (flags & KPD_LOCKED) {
404 401 pages = btop(SEGKP_MAPLEN(len, flags));
405 402 if (page_resv(pages, kmflag) == 0) {
406 403 vmem_free(SEGKP_VMEM(seg), vbase, len);
407 404 kmem_free(kpd, sizeof (struct segkp_data));
408 405 return (NULL);
409 406 }
410 407 if ((flags & KPD_NO_ANON) == 0)
411 408 atomic_add_long(&anon_segkp_pages_locked, pages);
412 409 }
413 410
414 411 /*
415 412 * Reserve sufficient swap space for this vm resource. We'll
416 413 * actually allocate it in the loop below, but reserving it
417 414 * here allows us to back out more gracefully than if we
418 415 * had an allocation failure in the body of the loop.
419 416 *
420 417 * Note that we don't need swap space for the red zone page.
421 418 */
422 419 if (amp != NULL) {
423 420 /*
424 421 * The swap reservation has been done, if required, and the
425 422 * anon_hdr is separate.
426 423 */
427 424 anon_idx = 0;
428 425 kpd->kp_anon_idx = anon_idx;
429 426 kpd->kp_anon = amp->ahp;
430 427
431 428 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
432 429 kpd, vbase, len, flags, 1);
433 430
434 431 } else if ((flags & KPD_NO_ANON) == 0) {
435 432 if (anon_resv_zone(SEGKP_MAPLEN(len, flags), NULL) == 0) {
436 433 if (flags & KPD_LOCKED) {
437 434 atomic_add_long(&anon_segkp_pages_locked,
438 435 -pages);
439 436 page_unresv(pages);
440 437 }
441 438 vmem_free(SEGKP_VMEM(seg), vbase, len);
442 439 kmem_free(kpd, sizeof (struct segkp_data));
443 440 return (NULL);
444 441 }
445 442 atomic_add_long(&anon_segkp_pages_resv,
446 443 btop(SEGKP_MAPLEN(len, flags)));
447 444 anon_idx = ((uintptr_t)(vbase - s_base)) >> PAGESHIFT;
448 445 kpd->kp_anon_idx = anon_idx;
449 446 kpd->kp_anon = kpsd->kpsd_anon;
450 447
451 448 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
452 449 kpd, vbase, len, flags, 1);
453 450 } else {
454 451 kpd->kp_anon = NULL;
455 452 kpd->kp_anon_idx = 0;
456 453 }
457 454
458 455 /*
459 456 * Allocate page and anon resources for the virtual address range
460 457 * except the redzone
461 458 */
462 459 if (segkp_fromheap)
463 460 segkpindex = btop((uintptr_t)(vbase - kvseg.s_base));
464 461 for (i = 0, va = vbase; i < np; i++, va += PAGESIZE) {
465 462 page_t *pl[2];
466 463 struct vnode *vp;
467 464 anoff_t off;
468 465 int err;
469 466 page_t *pp = NULL;
470 467
471 468 /*
472 469 * Mark this page to be a segkp page in the bitmap.
473 470 */
474 471 if (segkp_fromheap) {
475 472 BT_ATOMIC_SET(segkp_bitmap, segkpindex);
476 473 segkpindex++;
477 474 }
478 475
479 476 /*
480 477 * If this page is the red zone page, we don't need swap
481 478 * space for it. Note that we skip over the code that
482 479 * establishes MMU mappings, so that the page remains
483 480 * invalid.
484 481 */
485 482 if ((flags & KPD_HASREDZONE) && KPD_REDZONE(kpd) == i)
486 483 continue;
487 484
488 485 if (kpd->kp_anon != NULL) {
489 486 struct anon *ap;
490 487
491 488 ASSERT(anon_get_ptr(kpd->kp_anon, anon_idx + i)
492 489 == NULL);
493 490 /*
494 491 * Determine the "vp" and "off" of the anon slot.
495 492 */
496 493 ap = anon_alloc(NULL, 0);
497 494 if (amp != NULL)
498 495 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
499 496 (void) anon_set_ptr(kpd->kp_anon, anon_idx + i,
500 497 ap, ANON_SLEEP);
501 498 if (amp != NULL)
502 499 ANON_LOCK_EXIT(&->a_rwlock);
503 500 swap_xlate(ap, &vp, &off);
504 501
505 502 /*
506 503 * Create a page with the specified identity. The
507 504 * page is returned with the "shared" lock held.
508 505 */
509 506 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
510 507 NULL, pl, PAGESIZE, seg, va, S_CREATE,
511 508 kcred, NULL);
512 509 if (err) {
513 510 /*
514 511 * XXX - This should not fail.
515 512 */
516 513 panic("segkp_get: no pages");
517 514 /*NOTREACHED*/
518 515 }
519 516 pp = pl[0];
520 517 } else {
521 518 ASSERT(page_exists(&kvp,
522 519 (u_offset_t)(uintptr_t)va) == NULL);
523 520
524 521 if ((pp = page_create_va(&kvp,
525 522 (u_offset_t)(uintptr_t)va, PAGESIZE,
526 523 (flags & KPD_NOWAIT ? 0 : PG_WAIT) | PG_EXCL |
527 524 PG_NORELOC, seg, va)) == NULL) {
528 525 /*
529 526 * Legitimize resource; then destroy it.
530 527 * Easier than trying to unwind here.
531 528 */
532 529 kpd->kp_flags = flags;
533 530 kpd->kp_base = vbase;
534 531 kpd->kp_len = len;
535 532 segkp_release_internal(seg, kpd, va - vbase);
536 533 return (NULL);
537 534 }
538 535 page_io_unlock(pp);
539 536 }
540 537
541 538 if (flags & KPD_ZERO)
542 539 pagezero(pp, 0, PAGESIZE);
543 540
544 541 /*
545 542 * Load and lock an MMU translation for the page.
546 543 */
547 544 hat_memload(seg->s_as->a_hat, va, pp, (PROT_READ|PROT_WRITE),
548 545 ((flags & KPD_LOCKED) ? HAT_LOAD_LOCK : HAT_LOAD));
549 546
550 547 /*
551 548 * Now, release lock on the page.
552 549 */
553 550 if (flags & KPD_LOCKED) {
554 551 /*
555 552 * Indicate to page_retire framework that this
556 553 * page can only be retired when it is freed.
557 554 */
558 555 PP_SETRAF(pp);
559 556 page_downgrade(pp);
560 557 } else
561 558 page_unlock(pp);
562 559 }
563 560
564 561 kpd->kp_flags = flags;
565 562 kpd->kp_base = vbase;
566 563 kpd->kp_len = len;
567 564 segkp_insert(seg, kpd);
568 565 *tkpd = kpd;
569 566 return (stom(kpd->kp_base, flags));
570 567 }
571 568
572 569 /*
573 570 * Release the resource to cache if the pool(designate by the cookie)
574 571 * has less than the maximum allowable. If inserted in cache,
575 572 * segkp_delete insures element is taken off of active list.
576 573 */
577 574 void
578 575 segkp_release(struct seg *seg, caddr_t vaddr)
579 576 {
580 577 struct segkp_cache *freelist;
581 578 struct segkp_data *kpd = NULL;
582 579
583 580 if ((kpd = segkp_find(seg, vaddr)) == NULL) {
584 581 panic("segkp_release: null kpd");
585 582 /*NOTREACHED*/
586 583 }
587 584
588 585 if (kpd->kp_cookie != -1) {
589 586 freelist = &segkp_cache[kpd->kp_cookie];
590 587 mutex_enter(&segkp_lock);
591 588 if (!segkp_indel && freelist->kpf_count < freelist->kpf_max) {
592 589 segkp_delete(seg, kpd);
593 590 kpd->kp_next = freelist->kpf_list;
594 591 freelist->kpf_list = kpd;
595 592 freelist->kpf_count++;
596 593 mutex_exit(&segkp_lock);
597 594 return;
598 595 } else {
599 596 mutex_exit(&segkp_lock);
600 597 kpd->kp_cookie = -1;
601 598 }
602 599 }
603 600 segkp_release_internal(seg, kpd, kpd->kp_len);
604 601 }
605 602
606 603 /*
607 604 * Free the entire resource. segkp_unlock gets called with the start of the
608 605 * mapped portion of the resource. The length is the size of the mapped
609 606 * portion
610 607 */
611 608 static void
612 609 segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len)
613 610 {
614 611 caddr_t va;
615 612 long i;
616 613 long redzone;
617 614 size_t np;
618 615 page_t *pp;
619 616 struct vnode *vp;
620 617 anoff_t off;
621 618 struct anon *ap;
622 619 pgcnt_t segkpindex;
623 620
624 621 ASSERT(kpd != NULL);
625 622 ASSERT((kpd->kp_flags & KPD_HASAMP) == 0 || kpd->kp_cookie == -1);
626 623 np = btop(len);
627 624
628 625 /* Remove from active hash list */
629 626 if (kpd->kp_cookie == -1) {
630 627 mutex_enter(&segkp_lock);
631 628 segkp_delete(seg, kpd);
632 629 mutex_exit(&segkp_lock);
633 630 }
634 631
635 632 /*
636 633 * Precompute redzone page index.
637 634 */
638 635 redzone = -1;
639 636 if (kpd->kp_flags & KPD_HASREDZONE)
640 637 redzone = KPD_REDZONE(kpd);
641 638
642 639
643 640 va = kpd->kp_base;
644 641
645 642 hat_unload(seg->s_as->a_hat, va, (np << PAGESHIFT),
646 643 ((kpd->kp_flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
647 644 /*
648 645 * Free up those anon resources that are quiescent.
649 646 */
650 647 if (segkp_fromheap)
651 648 segkpindex = btop((uintptr_t)(va - kvseg.s_base));
652 649 for (i = 0; i < np; i++, va += PAGESIZE) {
653 650
654 651 /*
655 652 * Clear the bit for this page from the bitmap.
656 653 */
657 654 if (segkp_fromheap) {
658 655 BT_ATOMIC_CLEAR(segkp_bitmap, segkpindex);
659 656 segkpindex++;
660 657 }
661 658
662 659 if (i == redzone)
663 660 continue;
664 661 if (kpd->kp_anon) {
665 662 /*
666 663 * Free up anon resources and destroy the
667 664 * associated pages.
668 665 *
669 666 * Release the lock if there is one. Have to get the
670 667 * page to do this, unfortunately.
671 668 */
672 669 if (kpd->kp_flags & KPD_LOCKED) {
673 670 ap = anon_get_ptr(kpd->kp_anon,
674 671 kpd->kp_anon_idx + i);
675 672 swap_xlate(ap, &vp, &off);
676 673 /* Find the shared-locked page. */
677 674 pp = page_find(vp, (u_offset_t)off);
678 675 if (pp == NULL) {
679 676 panic("segkp_release: "
680 677 "kp_anon: no page to unlock ");
681 678 /*NOTREACHED*/
682 679 }
683 680 if (PP_ISRAF(pp))
684 681 PP_CLRRAF(pp);
685 682
686 683 page_unlock(pp);
687 684 }
688 685 if ((kpd->kp_flags & KPD_HASAMP) == 0) {
689 686 anon_free(kpd->kp_anon, kpd->kp_anon_idx + i,
690 687 PAGESIZE);
691 688 anon_unresv_zone(PAGESIZE, NULL);
692 689 atomic_dec_ulong(&anon_segkp_pages_resv);
693 690 }
694 691 TRACE_5(TR_FAC_VM,
695 692 TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
696 693 kpd, va, PAGESIZE, 0, 0);
697 694 } else {
698 695 if (kpd->kp_flags & KPD_LOCKED) {
699 696 pp = page_find(&kvp, (u_offset_t)(uintptr_t)va);
700 697 if (pp == NULL) {
701 698 panic("segkp_release: "
702 699 "no page to unlock");
703 700 /*NOTREACHED*/
704 701 }
705 702 if (PP_ISRAF(pp))
706 703 PP_CLRRAF(pp);
707 704 /*
708 705 * We should just upgrade the lock here
709 706 * but there is no upgrade that waits.
710 707 */
711 708 page_unlock(pp);
712 709 }
713 710 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)va,
714 711 SE_EXCL);
715 712 if (pp != NULL)
716 713 page_destroy(pp, 0);
717 714 }
718 715 }
719 716
720 717 /* If locked, release physical memory reservation */
721 718 if (kpd->kp_flags & KPD_LOCKED) {
722 719 pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
723 720 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
724 721 atomic_add_long(&anon_segkp_pages_locked, -pages);
725 722 page_unresv(pages);
726 723 }
727 724
728 725 vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len);
729 726 kmem_free(kpd, sizeof (struct segkp_data));
730 727 }
731 728
732 729 /*
733 730 * segkp_map_red() will check the current frame pointer against the
734 731 * stack base. If the amount of stack remaining is questionable
735 732 * (less than red_minavail), then segkp_map_red() will map in the redzone
736 733 * and return 1. Otherwise, it will return 0. segkp_map_red() can
737 734 * _only_ be called when it is safe to sleep on page_create_va().
738 735 *
739 736 * It is up to the caller to remember whether segkp_map_red() successfully
740 737 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
741 738 * time.
742 739 *
743 740 * Currently, this routine is only called from pagefault() (which necessarily
744 741 * satisfies the above conditions).
745 742 */
746 743 #if defined(STACK_GROWTH_DOWN)
747 744 int
748 745 segkp_map_red(void)
749 746 {
750 747 uintptr_t fp = STACK_BIAS + (uintptr_t)getfp();
751 748 #ifndef _LP64
752 749 caddr_t stkbase;
753 750 #endif
754 751
755 752 /*
756 753 * Optimize for the common case where we simply return.
757 754 */
758 755 if ((curthread->t_red_pp == NULL) &&
759 756 (fp - (uintptr_t)curthread->t_stkbase >= red_minavail))
760 757 return (0);
761 758
762 759 #if defined(_LP64)
763 760 /*
764 761 * XXX We probably need something better than this.
765 762 */
766 763 panic("kernel stack overflow");
767 764 /*NOTREACHED*/
768 765 #else /* _LP64 */
769 766 if (curthread->t_red_pp == NULL) {
770 767 page_t *red_pp;
771 768 struct seg kseg;
772 769
773 770 caddr_t red_va = (caddr_t)
774 771 (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) -
775 772 PAGESIZE);
776 773
777 774 ASSERT(page_exists(&kvp, (u_offset_t)(uintptr_t)red_va) ==
778 775 NULL);
779 776
780 777 /*
781 778 * Allocate the physical for the red page.
782 779 */
783 780 /*
784 781 * No PG_NORELOC here to avoid waits. Unlikely to get
785 782 * a relocate happening in the short time the page exists
786 783 * and it will be OK anyway.
787 784 */
788 785
789 786 kseg.s_as = &kas;
790 787 red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va,
791 788 PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va);
792 789 ASSERT(red_pp != NULL);
793 790
794 791 /*
795 792 * So we now have a page to jam into the redzone...
796 793 */
797 794 page_io_unlock(red_pp);
798 795
799 796 hat_memload(kas.a_hat, red_va, red_pp,
800 797 (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK);
801 798 page_downgrade(red_pp);
802 799
803 800 /*
804 801 * The page is left SE_SHARED locked so we can hold on to
805 802 * the page_t pointer.
806 803 */
807 804 curthread->t_red_pp = red_pp;
808 805
809 806 atomic_inc_32(&red_nmapped);
810 807 while (fp - (uintptr_t)curthread->t_stkbase < red_closest) {
811 808 (void) atomic_cas_32(&red_closest, red_closest,
812 809 (uint32_t)(fp - (uintptr_t)curthread->t_stkbase));
813 810 }
814 811 return (1);
815 812 }
816 813
817 814 stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase &
818 815 (uintptr_t)PAGEMASK) - PAGESIZE);
819 816
820 817 atomic_inc_32(&red_ndoubles);
821 818
822 819 if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) {
823 820 /*
824 821 * Oh boy. We're already deep within the mapped-in
825 822 * redzone page, and the caller is trying to prepare
826 823 * for a deep stack run. We're running without a
827 824 * redzone right now: if the caller plows off the
828 825 * end of the stack, it'll plow another thread or
829 826 * LWP structure. That situation could result in
830 827 * a very hard-to-debug panic, so, in the spirit of
831 828 * recording the name of one's killer in one's own
832 829 * blood, we're going to record hrestime and the calling
833 830 * thread.
834 831 */
835 832 red_deep_hires = hrestime.tv_nsec;
836 833 red_deep_thread = curthread;
837 834 }
838 835
839 836 /*
840 837 * If this is a DEBUG kernel, and we've run too deep for comfort, toss.
841 838 */
842 839 ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD);
843 840 return (0);
844 841 #endif /* _LP64 */
845 842 }
846 843
847 844 void
848 845 segkp_unmap_red(void)
849 846 {
850 847 page_t *pp;
851 848 caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase &
852 849 (uintptr_t)PAGEMASK) - PAGESIZE);
853 850
854 851 ASSERT(curthread->t_red_pp != NULL);
855 852
856 853 /*
857 854 * Because we locked the mapping down, we can't simply rely
858 855 * on page_destroy() to clean everything up; we need to call
859 856 * hat_unload() to explicitly unlock the mapping resources.
860 857 */
861 858 hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK);
862 859
863 860 pp = curthread->t_red_pp;
864 861
865 862 ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va));
866 863
867 864 /*
868 865 * Need to upgrade the SE_SHARED lock to SE_EXCL.
869 866 */
870 867 if (!page_tryupgrade(pp)) {
871 868 /*
872 869 * As there is now wait for upgrade, release the
873 870 * SE_SHARED lock and wait for SE_EXCL.
874 871 */
875 872 page_unlock(pp);
876 873 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)red_va, SE_EXCL);
877 874 /* pp may be NULL here, hence the test below */
878 875 }
879 876
880 877 /*
881 878 * Destroy the page, with dontfree set to zero (i.e. free it).
882 879 */
883 880 if (pp != NULL)
884 881 page_destroy(pp, 0);
885 882 curthread->t_red_pp = NULL;
886 883 }
887 884 #else
888 885 #error Red stacks only supported with downwards stack growth.
889 886 #endif
890 887
891 888 /*
892 889 * Handle a fault on an address corresponding to one of the
893 890 * resources in the segkp segment.
894 891 */
895 892 faultcode_t
896 893 segkp_fault(
897 894 struct hat *hat,
898 895 struct seg *seg,
899 896 caddr_t vaddr,
900 897 size_t len,
901 898 enum fault_type type,
902 899 enum seg_rw rw)
903 900 {
904 901 struct segkp_data *kpd = NULL;
905 902 int err;
906 903
907 904 ASSERT(seg->s_as == &kas && RW_READ_HELD(&seg->s_as->a_lock));
908 905
909 906 /*
910 907 * Sanity checks.
911 908 */
912 909 if (type == F_PROT) {
913 910 panic("segkp_fault: unexpected F_PROT fault");
914 911 /*NOTREACHED*/
915 912 }
916 913
917 914 if ((kpd = segkp_find(seg, vaddr)) == NULL)
918 915 return (FC_NOMAP);
919 916
920 917 mutex_enter(&kpd->kp_lock);
921 918
922 919 if (type == F_SOFTLOCK) {
923 920 ASSERT(!(kpd->kp_flags & KPD_LOCKED));
924 921 /*
925 922 * The F_SOFTLOCK case has more stringent
926 923 * range requirements: the given range must exactly coincide
927 924 * with the resource's mapped portion. Note reference to
928 925 * redzone is handled since vaddr would not equal base
929 926 */
930 927 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
931 928 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
932 929 mutex_exit(&kpd->kp_lock);
933 930 return (FC_MAKE_ERR(EFAULT));
934 931 }
935 932
936 933 if ((err = segkp_load(hat, seg, vaddr, len, kpd, KPD_LOCKED))) {
937 934 mutex_exit(&kpd->kp_lock);
938 935 return (FC_MAKE_ERR(err));
939 936 }
940 937 kpd->kp_flags |= KPD_LOCKED;
941 938 mutex_exit(&kpd->kp_lock);
942 939 return (0);
943 940 }
944 941
945 942 if (type == F_INVAL) {
946 943 ASSERT(!(kpd->kp_flags & KPD_NO_ANON));
947 944
948 945 /*
949 946 * Check if we touched the redzone. Somewhat optimistic
950 947 * here if we are touching the redzone of our own stack
951 948 * since we wouldn't have a stack to get this far...
952 949 */
953 950 if ((kpd->kp_flags & KPD_HASREDZONE) &&
954 951 btop((uintptr_t)(vaddr - kpd->kp_base)) == KPD_REDZONE(kpd))
955 952 panic("segkp_fault: accessing redzone");
956 953
957 954 /*
958 955 * This fault may occur while the page is being F_SOFTLOCK'ed.
959 956 * Return since a 2nd segkp_load is unnecessary and also would
960 957 * result in the page being locked twice and eventually
961 958 * hang the thread_reaper thread.
962 959 */
963 960 if (kpd->kp_flags & KPD_LOCKED) {
964 961 mutex_exit(&kpd->kp_lock);
965 962 return (0);
966 963 }
967 964
968 965 err = segkp_load(hat, seg, vaddr, len, kpd, kpd->kp_flags);
969 966 mutex_exit(&kpd->kp_lock);
970 967 return (err ? FC_MAKE_ERR(err) : 0);
971 968 }
972 969
973 970 if (type == F_SOFTUNLOCK) {
974 971 uint_t flags;
975 972
976 973 /*
977 974 * Make sure the addr is LOCKED and it has anon backing
978 975 * before unlocking
979 976 */
980 977 if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) != KPD_LOCKED) {
981 978 panic("segkp_fault: bad unlock");
982 979 /*NOTREACHED*/
983 980 }
984 981
985 982 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
986 983 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
987 984 panic("segkp_fault: bad range");
988 985 /*NOTREACHED*/
989 986 }
990 987
991 988 if (rw == S_WRITE)
992 989 flags = kpd->kp_flags | KPD_WRITEDIRTY;
993 990 else
994 991 flags = kpd->kp_flags;
995 992 err = segkp_unlock(hat, seg, vaddr, len, kpd, flags);
996 993 kpd->kp_flags &= ~KPD_LOCKED;
997 994 mutex_exit(&kpd->kp_lock);
998 995 return (err ? FC_MAKE_ERR(err) : 0);
999 996 }
1000 997 mutex_exit(&kpd->kp_lock);
1001 998 panic("segkp_fault: bogus fault type: %d\n", type);
1002 999 /*NOTREACHED*/
1003 1000 }
1004 1001
1005 1002 /*
1006 1003 * Check that the given protections suffice over the range specified by
1007 1004 * vaddr and len. For this segment type, the only issue is whether or
1008 1005 * not the range lies completely within the mapped part of an allocated
1009 1006 * resource.
1010 1007 */
1011 1008 /* ARGSUSED */
1012 1009 static int
1013 1010 segkp_checkprot(struct seg *seg, caddr_t vaddr, size_t len, uint_t prot)
1014 1011 {
1015 1012 struct segkp_data *kpd = NULL;
1016 1013 caddr_t mbase;
1017 1014 size_t mlen;
1018 1015
1019 1016 if ((kpd = segkp_find(seg, vaddr)) == NULL)
1020 1017 return (EACCES);
1021 1018
1022 1019 mutex_enter(&kpd->kp_lock);
1023 1020 mbase = stom(kpd->kp_base, kpd->kp_flags);
1024 1021 mlen = SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags);
1025 1022 if (len > mlen || vaddr < mbase ||
1026 1023 ((vaddr + len) > (mbase + mlen))) {
1027 1024 mutex_exit(&kpd->kp_lock);
1028 1025 return (EACCES);
1029 1026 }
1030 1027 mutex_exit(&kpd->kp_lock);
1031 1028 return (0);
1032 1029 }
1033 1030
1034 1031
1035 1032 /*
1036 1033 * Check to see if it makes sense to do kluster/read ahead to
1037 1034 * addr + delta relative to the mapping at addr. We assume here
1038 1035 * that delta is a signed PAGESIZE'd multiple (which can be negative).
1039 1036 *
1040 1037 * For seg_u we always "approve" of this action from our standpoint.
1041 1038 */
1042 1039 /*ARGSUSED*/
1043 1040 static int
1044 1041 segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
1045 1042 {
1046 1043 return (0);
1047 1044 }
1048 1045
1049 1046 /*
1050 1047 * Load and possibly lock intra-slot resources in the range given by
1051 1048 * vaddr and len.
1052 1049 */
1053 1050 static int
1054 1051 segkp_load(
1055 1052 struct hat *hat,
1056 1053 struct seg *seg,
1057 1054 caddr_t vaddr,
1058 1055 size_t len,
1059 1056 struct segkp_data *kpd,
1060 1057 uint_t flags)
1061 1058 {
1062 1059 caddr_t va;
1063 1060 caddr_t vlim;
1064 1061 ulong_t i;
1065 1062 uint_t lock;
1066 1063
1067 1064 ASSERT(MUTEX_HELD(&kpd->kp_lock));
1068 1065
1069 1066 len = P2ROUNDUP(len, PAGESIZE);
1070 1067
1071 1068 /* If locking, reserve physical memory */
1072 1069 if (flags & KPD_LOCKED) {
1073 1070 pgcnt_t pages = btop(len);
1074 1071 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
1075 1072 atomic_add_long(&anon_segkp_pages_locked, pages);
1076 1073 (void) page_resv(pages, KM_SLEEP);
1077 1074 }
1078 1075
1079 1076 /*
1080 1077 * Loop through the pages in the given range.
1081 1078 */
1082 1079 va = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK);
1083 1080 vaddr = va;
1084 1081 vlim = va + len;
1085 1082 lock = flags & KPD_LOCKED;
1086 1083 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
1087 1084 for (; va < vlim; va += PAGESIZE, i++) {
1088 1085 page_t *pl[2]; /* second element NULL terminator */
1089 1086 struct vnode *vp;
1090 1087 anoff_t off;
1091 1088 int err;
1092 1089 struct anon *ap;
1093 1090
1094 1091 /*
1095 1092 * Summon the page. If it's not resident, arrange
1096 1093 * for synchronous i/o to pull it in.
1097 1094 */
1098 1095 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
1099 1096 swap_xlate(ap, &vp, &off);
1100 1097
1101 1098 /*
1102 1099 * The returned page list will have exactly one entry,
1103 1100 * which is returned to us already kept.
1104 1101 */
1105 1102 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, NULL,
1106 1103 pl, PAGESIZE, seg, va, S_READ, kcred, NULL);
1107 1104
1108 1105 if (err) {
1109 1106 /*
1110 1107 * Back out of what we've done so far.
1111 1108 */
1112 1109 (void) segkp_unlock(hat, seg, vaddr,
1113 1110 (va - vaddr), kpd, flags);
1114 1111 return (err);
1115 1112 }
1116 1113
1117 1114 /*
1118 1115 * Load an MMU translation for the page.
1119 1116 */
1120 1117 hat_memload(hat, va, pl[0], (PROT_READ|PROT_WRITE),
1121 1118 lock ? HAT_LOAD_LOCK : HAT_LOAD);
1122 1119
1123 1120 if (!lock) {
1124 1121 /*
1125 1122 * Now, release "shared" lock on the page.
1126 1123 */
1127 1124 page_unlock(pl[0]);
1128 1125 }
1129 1126 }
1130 1127 return (0);
1131 1128 }
1132 1129
1133 1130 /*
1134 1131 * At the very least unload the mmu-translations and unlock the range if locked
1135 1132 * Can be called with the following flag value KPD_WRITEDIRTY which specifies
1136 1133 * any dirty pages should be written to disk.
1137 1134 */
1138 1135 static int
1139 1136 segkp_unlock(
1140 1137 struct hat *hat,
1141 1138 struct seg *seg,
1142 1139 caddr_t vaddr,
1143 1140 size_t len,
1144 1141 struct segkp_data *kpd,
1145 1142 uint_t flags)
1146 1143 {
1147 1144 caddr_t va;
1148 1145 caddr_t vlim;
1149 1146 ulong_t i;
1150 1147 struct page *pp;
1151 1148 struct vnode *vp;
1152 1149 anoff_t off;
1153 1150 struct anon *ap;
1154 1151
1155 1152 #ifdef lint
1156 1153 seg = seg;
1157 1154 #endif /* lint */
1158 1155
1159 1156 ASSERT(MUTEX_HELD(&kpd->kp_lock));
1160 1157
1161 1158 /*
1162 1159 * Loop through the pages in the given range. It is assumed
1163 1160 * segkp_unlock is called with page aligned base
1164 1161 */
1165 1162 va = vaddr;
1166 1163 vlim = va + len;
1167 1164 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
1168 1165 hat_unload(hat, va, len,
1169 1166 ((flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
1170 1167 for (; va < vlim; va += PAGESIZE, i++) {
1171 1168 /*
1172 1169 * Find the page associated with this part of the
1173 1170 * slot, tracking it down through its associated swap
1174 1171 * space.
1175 1172 */
1176 1173 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
1177 1174 swap_xlate(ap, &vp, &off);
1178 1175
1179 1176 if (flags & KPD_LOCKED) {
1180 1177 if ((pp = page_find(vp, off)) == NULL) {
1181 1178 if (flags & KPD_LOCKED) {
1182 1179 panic("segkp_softunlock: missing page");
1183 1180 /*NOTREACHED*/
1184 1181 }
1185 1182 }
1186 1183 } else {
1187 1184 /*
1188 1185 * Nothing to do if the slot is not locked and the
1189 1186 * page doesn't exist.
1190 1187 */
1191 1188 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL)
1192 1189 continue;
1193 1190 }
1194 1191
1195 1192 /*
1196 1193 * If the page doesn't have any translations, is
1197 1194 * dirty and not being shared, then push it out
1198 1195 * asynchronously and avoid waiting for the
1199 1196 * pageout daemon to do it for us.
1200 1197 *
1201 1198 * XXX - Do we really need to get the "exclusive"
1202 1199 * lock via an upgrade?
1203 1200 */
1204 1201 if ((flags & KPD_WRITEDIRTY) && !hat_page_is_mapped(pp) &&
1205 1202 hat_ismod(pp) && page_tryupgrade(pp)) {
1206 1203 /*
1207 1204 * Hold the vnode before releasing the page lock to
1208 1205 * prevent it from being freed and re-used by some
1209 1206 * other thread.
1210 1207 */
1211 1208 VN_HOLD(vp);
1212 1209 page_unlock(pp);
1213 1210
1214 1211 /*
1215 1212 * Want most powerful credentials we can get so
1216 1213 * use kcred.
1217 1214 */
1218 1215 (void) VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
1219 1216 B_ASYNC | B_FREE, kcred, NULL);
1220 1217 VN_RELE(vp);
1221 1218 } else {
1222 1219 page_unlock(pp);
1223 1220 }
1224 1221 }
1225 1222
1226 1223 /* If unlocking, release physical memory */
1227 1224 if (flags & KPD_LOCKED) {
1228 1225 pgcnt_t pages = btopr(len);
1229 1226 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
1230 1227 atomic_add_long(&anon_segkp_pages_locked, -pages);
1231 1228 page_unresv(pages);
1232 1229 }
1233 1230 return (0);
1234 1231 }
1235 1232
1236 1233 /*
1237 1234 * Insert the kpd in the hash table.
1238 1235 */
1239 1236 static void
1240 1237 segkp_insert(struct seg *seg, struct segkp_data *kpd)
1241 1238 {
1242 1239 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1243 1240 int index;
1244 1241
1245 1242 /*
1246 1243 * Insert the kpd based on the address that will be returned
1247 1244 * via segkp_release.
1248 1245 */
1249 1246 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
1250 1247 mutex_enter(&segkp_lock);
1251 1248 kpd->kp_next = kpsd->kpsd_hash[index];
1252 1249 kpsd->kpsd_hash[index] = kpd;
1253 1250 mutex_exit(&segkp_lock);
1254 1251 }
1255 1252
1256 1253 /*
1257 1254 * Remove kpd from the hash table.
1258 1255 */
1259 1256 static void
1260 1257 segkp_delete(struct seg *seg, struct segkp_data *kpd)
1261 1258 {
1262 1259 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1263 1260 struct segkp_data **kpp;
1264 1261 int index;
1265 1262
1266 1263 ASSERT(MUTEX_HELD(&segkp_lock));
1267 1264
1268 1265 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
1269 1266 for (kpp = &kpsd->kpsd_hash[index];
1270 1267 *kpp != NULL; kpp = &((*kpp)->kp_next)) {
1271 1268 if (*kpp == kpd) {
1272 1269 *kpp = kpd->kp_next;
1273 1270 return;
1274 1271 }
1275 1272 }
1276 1273 panic("segkp_delete: unable to find element to delete");
1277 1274 /*NOTREACHED*/
1278 1275 }
1279 1276
1280 1277 /*
1281 1278 * Find the kpd associated with a vaddr.
1282 1279 *
1283 1280 * Most of the callers of segkp_find will pass the vaddr that
1284 1281 * hashes to the desired index, but there are cases where
1285 1282 * this is not true in which case we have to (potentially) scan
1286 1283 * the whole table looking for it. This should be very rare
1287 1284 * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the
1288 1285 * middle of the segkp_data region).
1289 1286 */
1290 1287 static struct segkp_data *
1291 1288 segkp_find(struct seg *seg, caddr_t vaddr)
1292 1289 {
1293 1290 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1294 1291 struct segkp_data *kpd;
1295 1292 int i;
1296 1293 int stop;
1297 1294
1298 1295 i = stop = SEGKP_HASH(vaddr);
1299 1296 mutex_enter(&segkp_lock);
1300 1297 do {
1301 1298 for (kpd = kpsd->kpsd_hash[i]; kpd != NULL;
1302 1299 kpd = kpd->kp_next) {
1303 1300 if (vaddr >= kpd->kp_base &&
1304 1301 vaddr < kpd->kp_base + kpd->kp_len) {
1305 1302 mutex_exit(&segkp_lock);
1306 1303 return (kpd);
1307 1304 }
1308 1305 }
1309 1306 if (--i < 0)
1310 1307 i = SEGKP_HASHSZ - 1; /* Wrap */
1311 1308 } while (i != stop);
1312 1309 mutex_exit(&segkp_lock);
1313 1310 return (NULL); /* Not found */
1314 1311 }
1315 1312
1316 1313 /*
1317 1314 * returns size of swappable area.
1318 1315 */
1319 1316 size_t
1320 1317 swapsize(caddr_t v)
1321 1318 {
1322 1319 struct segkp_data *kpd;
1323 1320
1324 1321 if ((kpd = segkp_find(segkp, v)) != NULL)
1325 1322 return (SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
1326 1323 else
1327 1324 return (NULL);
1328 1325 }
1329 1326
1330 1327 /*
1331 1328 * Dump out all the active segkp pages
1332 1329 */
1333 1330 static void
1334 1331 segkp_dump(struct seg *seg)
1335 1332 {
1336 1333 int i;
1337 1334 struct segkp_data *kpd;
1338 1335 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1339 1336
1340 1337 for (i = 0; i < SEGKP_HASHSZ; i++) {
1341 1338 for (kpd = kpsd->kpsd_hash[i];
1342 1339 kpd != NULL; kpd = kpd->kp_next) {
1343 1340 pfn_t pfn;
1344 1341 caddr_t addr;
1345 1342 caddr_t eaddr;
1346 1343
1347 1344 addr = kpd->kp_base;
1348 1345 eaddr = addr + kpd->kp_len;
1349 1346 while (addr < eaddr) {
1350 1347 ASSERT(seg->s_as == &kas);
1351 1348 pfn = hat_getpfnum(seg->s_as->a_hat, addr);
1352 1349 if (pfn != PFN_INVALID)
1353 1350 dump_addpage(seg->s_as, addr, pfn);
1354 1351 addr += PAGESIZE;
1355 1352 dump_timeleft = dump_timeout;
1356 1353 }
1357 1354 }
1358 1355 }
1359 1356 }
1360 1357
1361 1358 /*ARGSUSED*/
1362 1359 static int
1363 1360 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
↓ open down ↓ |
1201 lines elided |
↑ open up ↑ |
1364 1361 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1365 1362 {
1366 1363 return (ENOTSUP);
1367 1364 }
1368 1365
1369 1366 /*ARGSUSED*/
1370 1367 static int
1371 1368 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1372 1369 {
1373 1370 return (ENODEV);
1374 -}
1375 -
1376 -/*ARGSUSED*/
1377 -static lgrp_mem_policy_info_t *
1378 -segkp_getpolicy(struct seg *seg, caddr_t addr)
1379 -{
1380 - return (NULL);
1381 1371 }
1382 1372
1383 1373 /*ARGSUSED*/
1384 1374 static int
1385 1375 segkp_capable(struct seg *seg, segcapability_t capability)
1386 1376 {
1387 1377 return (0);
1388 1378 }
1389 1379
1390 1380 #include <sys/mem_config.h>
1391 1381
1392 1382 /*ARGSUSED*/
1393 1383 static void
1394 1384 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1395 1385 {}
1396 1386
1397 1387 /*
1398 1388 * During memory delete, turn off caches so that pages are not held.
1399 1389 * A better solution may be to unlock the pages while they are
1400 1390 * in the cache so that they may be collected naturally.
1401 1391 */
1402 1392
1403 1393 /*ARGSUSED*/
1404 1394 static int
1405 1395 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1406 1396 {
1407 1397 atomic_inc_32(&segkp_indel);
1408 1398 segkp_cache_free();
1409 1399 return (0);
1410 1400 }
1411 1401
1412 1402 /*ARGSUSED*/
1413 1403 static void
1414 1404 segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
1415 1405 {
1416 1406 atomic_dec_32(&segkp_indel);
1417 1407 }
1418 1408
1419 1409 static kphysm_setup_vector_t segkp_mem_config_vec = {
1420 1410 KPHYSM_SETUP_VECTOR_VERSION,
1421 1411 segkp_mem_config_post_add,
1422 1412 segkp_mem_config_pre_del,
1423 1413 segkp_mem_config_post_del,
1424 1414 };
1425 1415
1426 1416 static void
1427 1417 segkpinit_mem_config(struct seg *seg)
1428 1418 {
1429 1419 int ret;
1430 1420
1431 1421 ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg);
1432 1422 ASSERT(ret == 0);
1433 1423 }
↓ open down ↓ |
43 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX