Print this page
6147 segop_getpolicy already checks for a NULL op
Reviewed by: Garrett D'Amore <garrett@damore.org>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_kp.c
+++ new/usr/src/uts/common/vm/seg_kp.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
26 26 /* All Rights Reserved */
27 27
28 28 /*
29 29 * Portions of this source code were derived from Berkeley 4.3 BSD
30 30 * under license from the Regents of the University of California.
31 31 */
32 32
33 33 /*
34 34 * segkp is a segment driver that administers the allocation and deallocation
35 35 * of pageable variable size chunks of kernel virtual address space. Each
36 36 * allocated resource is page-aligned.
37 37 *
38 38 * The user may specify whether the resource should be initialized to 0,
39 39 * include a redzone, or locked in memory.
40 40 */
41 41
42 42 #include <sys/types.h>
43 43 #include <sys/t_lock.h>
44 44 #include <sys/thread.h>
45 45 #include <sys/param.h>
46 46 #include <sys/errno.h>
47 47 #include <sys/sysmacros.h>
48 48 #include <sys/systm.h>
49 49 #include <sys/buf.h>
50 50 #include <sys/mman.h>
51 51 #include <sys/vnode.h>
52 52 #include <sys/cmn_err.h>
53 53 #include <sys/swap.h>
54 54 #include <sys/tuneable.h>
55 55 #include <sys/kmem.h>
56 56 #include <sys/vmem.h>
57 57 #include <sys/cred.h>
58 58 #include <sys/dumphdr.h>
59 59 #include <sys/debug.h>
60 60 #include <sys/vtrace.h>
61 61 #include <sys/stack.h>
62 62 #include <sys/atomic.h>
63 63 #include <sys/archsystm.h>
64 64 #include <sys/lgrp.h>
65 65
66 66 #include <vm/as.h>
67 67 #include <vm/seg.h>
68 68 #include <vm/seg_kp.h>
69 69 #include <vm/seg_kmem.h>
70 70 #include <vm/anon.h>
71 71 #include <vm/page.h>
72 72 #include <vm/hat.h>
73 73 #include <sys/bitmap.h>
74 74
75 75 /*
76 76 * Private seg op routines
77 77 */
78 78 static void segkp_badop(void);
79 79 static void segkp_dump(struct seg *seg);
80 80 static int segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
81 81 uint_t prot);
82 82 static int segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
83 83 static int segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
84 84 struct page ***page, enum lock_type type,
85 85 enum seg_rw rw);
86 86 static void segkp_insert(struct seg *seg, struct segkp_data *kpd);
87 87 static void segkp_delete(struct seg *seg, struct segkp_data *kpd);
↓ open down ↓ |
87 lines elided |
↑ open up ↑ |
88 88 static caddr_t segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
89 89 struct segkp_data **tkpd, struct anon_map *amp);
90 90 static void segkp_release_internal(struct seg *seg,
91 91 struct segkp_data *kpd, size_t len);
92 92 static int segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
93 93 size_t len, struct segkp_data *kpd, uint_t flags);
94 94 static int segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
95 95 size_t len, struct segkp_data *kpd, uint_t flags);
96 96 static struct segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
97 97 static int segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
98 -static lgrp_mem_policy_info_t *segkp_getpolicy(struct seg *seg,
99 - caddr_t addr);
100 98 static int segkp_capable(struct seg *seg, segcapability_t capability);
101 99
102 100 /*
103 101 * Lock used to protect the hash table(s) and caches.
104 102 */
105 103 static kmutex_t segkp_lock;
106 104
107 105 /*
108 106 * The segkp caches
109 107 */
110 108 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
111 109
112 110 #define SEGKP_BADOP(t) (t(*)())segkp_badop
113 111
114 112 /*
115 113 * When there are fewer than red_minavail bytes left on the stack,
116 114 * segkp_map_red() will map in the redzone (if called). 5000 seems
117 115 * to work reasonably well...
118 116 */
119 117 long red_minavail = 5000;
120 118
121 119 /*
122 120 * will be set to 1 for 32 bit x86 systems only, in startup.c
123 121 */
124 122 int segkp_fromheap = 0;
125 123 ulong_t *segkp_bitmap;
126 124
127 125 /*
128 126 * If segkp_map_red() is called with the redzone already mapped and
129 127 * with less than RED_DEEP_THRESHOLD bytes available on the stack,
130 128 * then the stack situation has become quite serious; if much more stack
131 129 * is consumed, we have the potential of scrogging the next thread/LWP
132 130 * structure. To help debug the "can't happen" panics which may
133 131 * result from this condition, we record hrestime and the calling thread
134 132 * in red_deep_hires and red_deep_thread respectively.
135 133 */
136 134 #define RED_DEEP_THRESHOLD 2000
137 135
138 136 hrtime_t red_deep_hires;
139 137 kthread_t *red_deep_thread;
140 138
141 139 uint32_t red_nmapped;
142 140 uint32_t red_closest = UINT_MAX;
143 141 uint32_t red_ndoubles;
144 142
145 143 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
146 144 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
147 145
148 146 static struct seg_ops segkp_ops = {
149 147 .dup = SEGKP_BADOP(int),
150 148 .unmap = SEGKP_BADOP(int),
151 149 .free = SEGKP_BADOP(void),
152 150 .fault = segkp_fault,
153 151 .faulta = SEGKP_BADOP(faultcode_t),
154 152 .setprot = SEGKP_BADOP(int),
155 153 .checkprot = segkp_checkprot,
156 154 .kluster = segkp_kluster,
157 155 .swapout = SEGKP_BADOP(size_t),
158 156 .sync = SEGKP_BADOP(int),
159 157 .incore = SEGKP_BADOP(size_t),
↓ open down ↓ |
50 lines elided |
↑ open up ↑ |
160 158 .lockop = SEGKP_BADOP(int),
161 159 .getprot = SEGKP_BADOP(int),
162 160 .getoffset = SEGKP_BADOP(u_offset_t),
163 161 .gettype = SEGKP_BADOP(int),
164 162 .getvp = SEGKP_BADOP(int),
165 163 .advise = SEGKP_BADOP(int),
166 164 .dump = segkp_dump,
167 165 .pagelock = segkp_pagelock,
168 166 .setpagesize = SEGKP_BADOP(int),
169 167 .getmemid = segkp_getmemid,
170 - .getpolicy = segkp_getpolicy,
171 168 .capable = segkp_capable,
172 169 };
173 170
174 171
175 172 static void
176 173 segkp_badop(void)
177 174 {
178 175 panic("segkp_badop");
179 176 /*NOTREACHED*/
180 177 }
181 178
182 179 static void segkpinit_mem_config(struct seg *);
183 180
184 181 static uint32_t segkp_indel;
185 182
186 183 /*
187 184 * Allocate the segment specific private data struct and fill it in
188 185 * with the per kp segment mutex, anon ptr. array and hash table.
189 186 */
190 187 int
191 188 segkp_create(struct seg *seg)
192 189 {
193 190 struct segkp_segdata *kpsd;
194 191 size_t np;
195 192
196 193 ASSERT(seg != NULL && seg->s_as == &kas);
197 194 ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));
198 195
199 196 if (seg->s_size & PAGEOFFSET) {
200 197 panic("Bad segkp size");
201 198 /*NOTREACHED*/
202 199 }
203 200
204 201 kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP);
205 202
206 203 /*
207 204 * Allocate the virtual memory for segkp and initialize it
208 205 */
209 206 if (segkp_fromheap) {
210 207 np = btop(kvseg.s_size);
211 208 segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP);
212 209 kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE,
213 210 vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP);
214 211 } else {
215 212 segkp_bitmap = NULL;
216 213 np = btop(seg->s_size);
217 214 kpsd->kpsd_arena = vmem_create("segkp", seg->s_base,
218 215 seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE,
219 216 VM_SLEEP);
220 217 }
221 218
222 219 kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE);
223 220
224 221 kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *),
225 222 KM_SLEEP);
226 223 seg->s_data = (void *)kpsd;
227 224 seg->s_ops = &segkp_ops;
228 225 segkpinit_mem_config(seg);
229 226 return (0);
230 227 }
231 228
232 229
233 230 /*
234 231 * Find a free 'freelist' and initialize it with the appropriate attributes
235 232 */
236 233 void *
237 234 segkp_cache_init(struct seg *seg, int maxsize, size_t len, uint_t flags)
238 235 {
239 236 int i;
240 237
241 238 if ((flags & KPD_NO_ANON) && !(flags & KPD_LOCKED))
242 239 return ((void *)-1);
243 240
244 241 mutex_enter(&segkp_lock);
245 242 for (i = 0; i < SEGKP_MAX_CACHE; i++) {
246 243 if (segkp_cache[i].kpf_inuse)
247 244 continue;
248 245 segkp_cache[i].kpf_inuse = 1;
249 246 segkp_cache[i].kpf_max = maxsize;
250 247 segkp_cache[i].kpf_flags = flags;
251 248 segkp_cache[i].kpf_seg = seg;
252 249 segkp_cache[i].kpf_len = len;
253 250 mutex_exit(&segkp_lock);
254 251 return ((void *)(uintptr_t)i);
255 252 }
256 253 mutex_exit(&segkp_lock);
257 254 return ((void *)-1);
258 255 }
259 256
260 257 /*
261 258 * Free all the cache resources.
262 259 */
263 260 void
264 261 segkp_cache_free(void)
265 262 {
266 263 struct segkp_data *kpd;
267 264 struct seg *seg;
268 265 int i;
269 266
270 267 mutex_enter(&segkp_lock);
271 268 for (i = 0; i < SEGKP_MAX_CACHE; i++) {
272 269 if (!segkp_cache[i].kpf_inuse)
273 270 continue;
274 271 /*
275 272 * Disconnect the freelist and process each element
276 273 */
277 274 kpd = segkp_cache[i].kpf_list;
278 275 seg = segkp_cache[i].kpf_seg;
279 276 segkp_cache[i].kpf_list = NULL;
280 277 segkp_cache[i].kpf_count = 0;
281 278 mutex_exit(&segkp_lock);
282 279
283 280 while (kpd != NULL) {
284 281 struct segkp_data *next;
285 282
286 283 next = kpd->kp_next;
287 284 segkp_release_internal(seg, kpd, kpd->kp_len);
288 285 kpd = next;
289 286 }
290 287 mutex_enter(&segkp_lock);
291 288 }
292 289 mutex_exit(&segkp_lock);
293 290 }
294 291
295 292 /*
296 293 * There are 2 entries into segkp_get_internal. The first includes a cookie
297 294 * used to access a pool of cached segkp resources. The second does not
298 295 * use the cache.
299 296 */
300 297 caddr_t
301 298 segkp_get(struct seg *seg, size_t len, uint_t flags)
302 299 {
303 300 struct segkp_data *kpd = NULL;
304 301
305 302 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
306 303 kpd->kp_cookie = -1;
307 304 return (stom(kpd->kp_base, flags));
308 305 }
309 306 return (NULL);
310 307 }
311 308
312 309 /*
313 310 * Return a 'cached' segkp address
314 311 */
315 312 caddr_t
316 313 segkp_cache_get(void *cookie)
317 314 {
318 315 struct segkp_cache *freelist = NULL;
319 316 struct segkp_data *kpd = NULL;
320 317 int index = (int)(uintptr_t)cookie;
321 318 struct seg *seg;
322 319 size_t len;
323 320 uint_t flags;
324 321
325 322 if (index < 0 || index >= SEGKP_MAX_CACHE)
326 323 return (NULL);
327 324 freelist = &segkp_cache[index];
328 325
329 326 mutex_enter(&segkp_lock);
330 327 seg = freelist->kpf_seg;
331 328 flags = freelist->kpf_flags;
332 329 if (freelist->kpf_list != NULL) {
333 330 kpd = freelist->kpf_list;
334 331 freelist->kpf_list = kpd->kp_next;
335 332 freelist->kpf_count--;
336 333 mutex_exit(&segkp_lock);
337 334 kpd->kp_next = NULL;
338 335 segkp_insert(seg, kpd);
339 336 return (stom(kpd->kp_base, flags));
340 337 }
341 338 len = freelist->kpf_len;
342 339 mutex_exit(&segkp_lock);
343 340 if (segkp_get_internal(seg, len, flags, &kpd, NULL) != NULL) {
344 341 kpd->kp_cookie = index;
345 342 return (stom(kpd->kp_base, flags));
346 343 }
347 344 return (NULL);
348 345 }
349 346
350 347 caddr_t
351 348 segkp_get_withanonmap(
352 349 struct seg *seg,
353 350 size_t len,
354 351 uint_t flags,
355 352 struct anon_map *amp)
356 353 {
357 354 struct segkp_data *kpd = NULL;
358 355
359 356 ASSERT(amp != NULL);
360 357 flags |= KPD_HASAMP;
361 358 if (segkp_get_internal(seg, len, flags, &kpd, amp) != NULL) {
362 359 kpd->kp_cookie = -1;
363 360 return (stom(kpd->kp_base, flags));
364 361 }
365 362 return (NULL);
366 363 }
367 364
368 365 /*
369 366 * This does the real work of segkp allocation.
370 367 * Return to client base addr. len must be page-aligned. A null value is
371 368 * returned if there are no more vm resources (e.g. pages, swap). The len
372 369 * and base recorded in the private data structure include the redzone
373 370 * and the redzone length (if applicable). If the user requests a redzone
374 371 * either the first or last page is left unmapped depending whether stacks
375 372 * grow to low or high memory.
376 373 *
377 374 * The client may also specify a no-wait flag. If that is set then the
378 375 * request will choose a non-blocking path when requesting resources.
379 376 * The default is make the client wait.
380 377 */
381 378 static caddr_t
382 379 segkp_get_internal(
383 380 struct seg *seg,
384 381 size_t len,
385 382 uint_t flags,
386 383 struct segkp_data **tkpd,
387 384 struct anon_map *amp)
388 385 {
389 386 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
390 387 struct segkp_data *kpd;
391 388 caddr_t vbase = NULL; /* always first virtual, may not be mapped */
392 389 pgcnt_t np = 0; /* number of pages in the resource */
393 390 pgcnt_t segkpindex;
394 391 long i;
395 392 caddr_t va;
396 393 pgcnt_t pages = 0;
397 394 ulong_t anon_idx = 0;
398 395 int kmflag = (flags & KPD_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
399 396 caddr_t s_base = (segkp_fromheap) ? kvseg.s_base : seg->s_base;
400 397
401 398 if (len & PAGEOFFSET) {
402 399 panic("segkp_get: len is not page-aligned");
403 400 /*NOTREACHED*/
404 401 }
405 402
406 403 ASSERT(((flags & KPD_HASAMP) == 0) == (amp == NULL));
407 404
408 405 /* Only allow KPD_NO_ANON if we are going to lock it down */
409 406 if ((flags & (KPD_LOCKED|KPD_NO_ANON)) == KPD_NO_ANON)
410 407 return (NULL);
411 408
412 409 if ((kpd = kmem_zalloc(sizeof (struct segkp_data), kmflag)) == NULL)
413 410 return (NULL);
414 411 /*
415 412 * Fix up the len to reflect the REDZONE if applicable
416 413 */
417 414 if (flags & KPD_HASREDZONE)
418 415 len += PAGESIZE;
419 416 np = btop(len);
420 417
421 418 vbase = vmem_alloc(SEGKP_VMEM(seg), len, kmflag | VM_BESTFIT);
422 419 if (vbase == NULL) {
423 420 kmem_free(kpd, sizeof (struct segkp_data));
424 421 return (NULL);
425 422 }
426 423
427 424 /* If locking, reserve physical memory */
428 425 if (flags & KPD_LOCKED) {
429 426 pages = btop(SEGKP_MAPLEN(len, flags));
430 427 if (page_resv(pages, kmflag) == 0) {
431 428 vmem_free(SEGKP_VMEM(seg), vbase, len);
432 429 kmem_free(kpd, sizeof (struct segkp_data));
433 430 return (NULL);
434 431 }
435 432 if ((flags & KPD_NO_ANON) == 0)
436 433 atomic_add_long(&anon_segkp_pages_locked, pages);
437 434 }
438 435
439 436 /*
440 437 * Reserve sufficient swap space for this vm resource. We'll
441 438 * actually allocate it in the loop below, but reserving it
442 439 * here allows us to back out more gracefully than if we
443 440 * had an allocation failure in the body of the loop.
444 441 *
445 442 * Note that we don't need swap space for the red zone page.
446 443 */
447 444 if (amp != NULL) {
448 445 /*
449 446 * The swap reservation has been done, if required, and the
450 447 * anon_hdr is separate.
451 448 */
452 449 anon_idx = 0;
453 450 kpd->kp_anon_idx = anon_idx;
454 451 kpd->kp_anon = amp->ahp;
455 452
456 453 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
457 454 kpd, vbase, len, flags, 1);
458 455
459 456 } else if ((flags & KPD_NO_ANON) == 0) {
460 457 if (anon_resv_zone(SEGKP_MAPLEN(len, flags), NULL) == 0) {
461 458 if (flags & KPD_LOCKED) {
462 459 atomic_add_long(&anon_segkp_pages_locked,
463 460 -pages);
464 461 page_unresv(pages);
465 462 }
466 463 vmem_free(SEGKP_VMEM(seg), vbase, len);
467 464 kmem_free(kpd, sizeof (struct segkp_data));
468 465 return (NULL);
469 466 }
470 467 atomic_add_long(&anon_segkp_pages_resv,
471 468 btop(SEGKP_MAPLEN(len, flags)));
472 469 anon_idx = ((uintptr_t)(vbase - s_base)) >> PAGESHIFT;
473 470 kpd->kp_anon_idx = anon_idx;
474 471 kpd->kp_anon = kpsd->kpsd_anon;
475 472
476 473 TRACE_5(TR_FAC_VM, TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
477 474 kpd, vbase, len, flags, 1);
478 475 } else {
479 476 kpd->kp_anon = NULL;
480 477 kpd->kp_anon_idx = 0;
481 478 }
482 479
483 480 /*
484 481 * Allocate page and anon resources for the virtual address range
485 482 * except the redzone
486 483 */
487 484 if (segkp_fromheap)
488 485 segkpindex = btop((uintptr_t)(vbase - kvseg.s_base));
489 486 for (i = 0, va = vbase; i < np; i++, va += PAGESIZE) {
490 487 page_t *pl[2];
491 488 struct vnode *vp;
492 489 anoff_t off;
493 490 int err;
494 491 page_t *pp = NULL;
495 492
496 493 /*
497 494 * Mark this page to be a segkp page in the bitmap.
498 495 */
499 496 if (segkp_fromheap) {
500 497 BT_ATOMIC_SET(segkp_bitmap, segkpindex);
501 498 segkpindex++;
502 499 }
503 500
504 501 /*
505 502 * If this page is the red zone page, we don't need swap
506 503 * space for it. Note that we skip over the code that
507 504 * establishes MMU mappings, so that the page remains
508 505 * invalid.
509 506 */
510 507 if ((flags & KPD_HASREDZONE) && KPD_REDZONE(kpd) == i)
511 508 continue;
512 509
513 510 if (kpd->kp_anon != NULL) {
514 511 struct anon *ap;
515 512
516 513 ASSERT(anon_get_ptr(kpd->kp_anon, anon_idx + i)
517 514 == NULL);
518 515 /*
519 516 * Determine the "vp" and "off" of the anon slot.
520 517 */
521 518 ap = anon_alloc(NULL, 0);
522 519 if (amp != NULL)
523 520 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
524 521 (void) anon_set_ptr(kpd->kp_anon, anon_idx + i,
525 522 ap, ANON_SLEEP);
526 523 if (amp != NULL)
527 524 ANON_LOCK_EXIT(&->a_rwlock);
528 525 swap_xlate(ap, &vp, &off);
529 526
530 527 /*
531 528 * Create a page with the specified identity. The
532 529 * page is returned with the "shared" lock held.
533 530 */
534 531 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
535 532 NULL, pl, PAGESIZE, seg, va, S_CREATE,
536 533 kcred, NULL);
537 534 if (err) {
538 535 /*
539 536 * XXX - This should not fail.
540 537 */
541 538 panic("segkp_get: no pages");
542 539 /*NOTREACHED*/
543 540 }
544 541 pp = pl[0];
545 542 } else {
546 543 ASSERT(page_exists(&kvp,
547 544 (u_offset_t)(uintptr_t)va) == NULL);
548 545
549 546 if ((pp = page_create_va(&kvp,
550 547 (u_offset_t)(uintptr_t)va, PAGESIZE,
551 548 (flags & KPD_NOWAIT ? 0 : PG_WAIT) | PG_EXCL |
552 549 PG_NORELOC, seg, va)) == NULL) {
553 550 /*
554 551 * Legitimize resource; then destroy it.
555 552 * Easier than trying to unwind here.
556 553 */
557 554 kpd->kp_flags = flags;
558 555 kpd->kp_base = vbase;
559 556 kpd->kp_len = len;
560 557 segkp_release_internal(seg, kpd, va - vbase);
561 558 return (NULL);
562 559 }
563 560 page_io_unlock(pp);
564 561 }
565 562
566 563 if (flags & KPD_ZERO)
567 564 pagezero(pp, 0, PAGESIZE);
568 565
569 566 /*
570 567 * Load and lock an MMU translation for the page.
571 568 */
572 569 hat_memload(seg->s_as->a_hat, va, pp, (PROT_READ|PROT_WRITE),
573 570 ((flags & KPD_LOCKED) ? HAT_LOAD_LOCK : HAT_LOAD));
574 571
575 572 /*
576 573 * Now, release lock on the page.
577 574 */
578 575 if (flags & KPD_LOCKED) {
579 576 /*
580 577 * Indicate to page_retire framework that this
581 578 * page can only be retired when it is freed.
582 579 */
583 580 PP_SETRAF(pp);
584 581 page_downgrade(pp);
585 582 } else
586 583 page_unlock(pp);
587 584 }
588 585
589 586 kpd->kp_flags = flags;
590 587 kpd->kp_base = vbase;
591 588 kpd->kp_len = len;
592 589 segkp_insert(seg, kpd);
593 590 *tkpd = kpd;
594 591 return (stom(kpd->kp_base, flags));
595 592 }
596 593
597 594 /*
598 595 * Release the resource to cache if the pool(designate by the cookie)
599 596 * has less than the maximum allowable. If inserted in cache,
600 597 * segkp_delete insures element is taken off of active list.
601 598 */
602 599 void
603 600 segkp_release(struct seg *seg, caddr_t vaddr)
604 601 {
605 602 struct segkp_cache *freelist;
606 603 struct segkp_data *kpd = NULL;
607 604
608 605 if ((kpd = segkp_find(seg, vaddr)) == NULL) {
609 606 panic("segkp_release: null kpd");
610 607 /*NOTREACHED*/
611 608 }
612 609
613 610 if (kpd->kp_cookie != -1) {
614 611 freelist = &segkp_cache[kpd->kp_cookie];
615 612 mutex_enter(&segkp_lock);
616 613 if (!segkp_indel && freelist->kpf_count < freelist->kpf_max) {
617 614 segkp_delete(seg, kpd);
618 615 kpd->kp_next = freelist->kpf_list;
619 616 freelist->kpf_list = kpd;
620 617 freelist->kpf_count++;
621 618 mutex_exit(&segkp_lock);
622 619 return;
623 620 } else {
624 621 mutex_exit(&segkp_lock);
625 622 kpd->kp_cookie = -1;
626 623 }
627 624 }
628 625 segkp_release_internal(seg, kpd, kpd->kp_len);
629 626 }
630 627
631 628 /*
632 629 * Free the entire resource. segkp_unlock gets called with the start of the
633 630 * mapped portion of the resource. The length is the size of the mapped
634 631 * portion
635 632 */
636 633 static void
637 634 segkp_release_internal(struct seg *seg, struct segkp_data *kpd, size_t len)
638 635 {
639 636 caddr_t va;
640 637 long i;
641 638 long redzone;
642 639 size_t np;
643 640 page_t *pp;
644 641 struct vnode *vp;
645 642 anoff_t off;
646 643 struct anon *ap;
647 644 pgcnt_t segkpindex;
648 645
649 646 ASSERT(kpd != NULL);
650 647 ASSERT((kpd->kp_flags & KPD_HASAMP) == 0 || kpd->kp_cookie == -1);
651 648 np = btop(len);
652 649
653 650 /* Remove from active hash list */
654 651 if (kpd->kp_cookie == -1) {
655 652 mutex_enter(&segkp_lock);
656 653 segkp_delete(seg, kpd);
657 654 mutex_exit(&segkp_lock);
658 655 }
659 656
660 657 /*
661 658 * Precompute redzone page index.
662 659 */
663 660 redzone = -1;
664 661 if (kpd->kp_flags & KPD_HASREDZONE)
665 662 redzone = KPD_REDZONE(kpd);
666 663
667 664
668 665 va = kpd->kp_base;
669 666
670 667 hat_unload(seg->s_as->a_hat, va, (np << PAGESHIFT),
671 668 ((kpd->kp_flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
672 669 /*
673 670 * Free up those anon resources that are quiescent.
674 671 */
675 672 if (segkp_fromheap)
676 673 segkpindex = btop((uintptr_t)(va - kvseg.s_base));
677 674 for (i = 0; i < np; i++, va += PAGESIZE) {
678 675
679 676 /*
680 677 * Clear the bit for this page from the bitmap.
681 678 */
682 679 if (segkp_fromheap) {
683 680 BT_ATOMIC_CLEAR(segkp_bitmap, segkpindex);
684 681 segkpindex++;
685 682 }
686 683
687 684 if (i == redzone)
688 685 continue;
689 686 if (kpd->kp_anon) {
690 687 /*
691 688 * Free up anon resources and destroy the
692 689 * associated pages.
693 690 *
694 691 * Release the lock if there is one. Have to get the
695 692 * page to do this, unfortunately.
696 693 */
697 694 if (kpd->kp_flags & KPD_LOCKED) {
698 695 ap = anon_get_ptr(kpd->kp_anon,
699 696 kpd->kp_anon_idx + i);
700 697 swap_xlate(ap, &vp, &off);
701 698 /* Find the shared-locked page. */
702 699 pp = page_find(vp, (u_offset_t)off);
703 700 if (pp == NULL) {
704 701 panic("segkp_release: "
705 702 "kp_anon: no page to unlock ");
706 703 /*NOTREACHED*/
707 704 }
708 705 if (PP_ISRAF(pp))
709 706 PP_CLRRAF(pp);
710 707
711 708 page_unlock(pp);
712 709 }
713 710 if ((kpd->kp_flags & KPD_HASAMP) == 0) {
714 711 anon_free(kpd->kp_anon, kpd->kp_anon_idx + i,
715 712 PAGESIZE);
716 713 anon_unresv_zone(PAGESIZE, NULL);
717 714 atomic_dec_ulong(&anon_segkp_pages_resv);
718 715 }
719 716 TRACE_5(TR_FAC_VM,
720 717 TR_ANON_SEGKP, "anon segkp:%p %p %lu %u %u",
721 718 kpd, va, PAGESIZE, 0, 0);
722 719 } else {
723 720 if (kpd->kp_flags & KPD_LOCKED) {
724 721 pp = page_find(&kvp, (u_offset_t)(uintptr_t)va);
725 722 if (pp == NULL) {
726 723 panic("segkp_release: "
727 724 "no page to unlock");
728 725 /*NOTREACHED*/
729 726 }
730 727 if (PP_ISRAF(pp))
731 728 PP_CLRRAF(pp);
732 729 /*
733 730 * We should just upgrade the lock here
734 731 * but there is no upgrade that waits.
735 732 */
736 733 page_unlock(pp);
737 734 }
738 735 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)va,
739 736 SE_EXCL);
740 737 if (pp != NULL)
741 738 page_destroy(pp, 0);
742 739 }
743 740 }
744 741
745 742 /* If locked, release physical memory reservation */
746 743 if (kpd->kp_flags & KPD_LOCKED) {
747 744 pgcnt_t pages = btop(SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
748 745 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
749 746 atomic_add_long(&anon_segkp_pages_locked, -pages);
750 747 page_unresv(pages);
751 748 }
752 749
753 750 vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len);
754 751 kmem_free(kpd, sizeof (struct segkp_data));
755 752 }
756 753
757 754 /*
758 755 * segkp_map_red() will check the current frame pointer against the
759 756 * stack base. If the amount of stack remaining is questionable
760 757 * (less than red_minavail), then segkp_map_red() will map in the redzone
761 758 * and return 1. Otherwise, it will return 0. segkp_map_red() can
762 759 * _only_ be called when:
763 760 *
764 761 * - it is safe to sleep on page_create_va().
765 762 * - the caller is non-swappable.
766 763 *
767 764 * It is up to the caller to remember whether segkp_map_red() successfully
768 765 * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
769 766 * time. Note that the caller must _remain_ non-swappable until after
770 767 * calling segkp_unmap_red().
771 768 *
772 769 * Currently, this routine is only called from pagefault() (which necessarily
773 770 * satisfies the above conditions).
774 771 */
775 772 #if defined(STACK_GROWTH_DOWN)
776 773 int
777 774 segkp_map_red(void)
778 775 {
779 776 uintptr_t fp = STACK_BIAS + (uintptr_t)getfp();
780 777 #ifndef _LP64
781 778 caddr_t stkbase;
782 779 #endif
783 780
784 781 ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
785 782
786 783 /*
787 784 * Optimize for the common case where we simply return.
788 785 */
789 786 if ((curthread->t_red_pp == NULL) &&
790 787 (fp - (uintptr_t)curthread->t_stkbase >= red_minavail))
791 788 return (0);
792 789
793 790 #if defined(_LP64)
794 791 /*
795 792 * XXX We probably need something better than this.
796 793 */
797 794 panic("kernel stack overflow");
798 795 /*NOTREACHED*/
799 796 #else /* _LP64 */
800 797 if (curthread->t_red_pp == NULL) {
801 798 page_t *red_pp;
802 799 struct seg kseg;
803 800
804 801 caddr_t red_va = (caddr_t)
805 802 (((uintptr_t)curthread->t_stkbase & (uintptr_t)PAGEMASK) -
806 803 PAGESIZE);
807 804
808 805 ASSERT(page_exists(&kvp, (u_offset_t)(uintptr_t)red_va) ==
809 806 NULL);
810 807
811 808 /*
812 809 * Allocate the physical for the red page.
813 810 */
814 811 /*
815 812 * No PG_NORELOC here to avoid waits. Unlikely to get
816 813 * a relocate happening in the short time the page exists
817 814 * and it will be OK anyway.
818 815 */
819 816
820 817 kseg.s_as = &kas;
821 818 red_pp = page_create_va(&kvp, (u_offset_t)(uintptr_t)red_va,
822 819 PAGESIZE, PG_WAIT | PG_EXCL, &kseg, red_va);
823 820 ASSERT(red_pp != NULL);
824 821
825 822 /*
826 823 * So we now have a page to jam into the redzone...
827 824 */
828 825 page_io_unlock(red_pp);
829 826
830 827 hat_memload(kas.a_hat, red_va, red_pp,
831 828 (PROT_READ|PROT_WRITE), HAT_LOAD_LOCK);
832 829 page_downgrade(red_pp);
833 830
834 831 /*
835 832 * The page is left SE_SHARED locked so we can hold on to
836 833 * the page_t pointer.
837 834 */
838 835 curthread->t_red_pp = red_pp;
839 836
840 837 atomic_inc_32(&red_nmapped);
841 838 while (fp - (uintptr_t)curthread->t_stkbase < red_closest) {
842 839 (void) atomic_cas_32(&red_closest, red_closest,
843 840 (uint32_t)(fp - (uintptr_t)curthread->t_stkbase));
844 841 }
845 842 return (1);
846 843 }
847 844
848 845 stkbase = (caddr_t)(((uintptr_t)curthread->t_stkbase &
849 846 (uintptr_t)PAGEMASK) - PAGESIZE);
850 847
851 848 atomic_inc_32(&red_ndoubles);
852 849
853 850 if (fp - (uintptr_t)stkbase < RED_DEEP_THRESHOLD) {
854 851 /*
855 852 * Oh boy. We're already deep within the mapped-in
856 853 * redzone page, and the caller is trying to prepare
857 854 * for a deep stack run. We're running without a
858 855 * redzone right now: if the caller plows off the
859 856 * end of the stack, it'll plow another thread or
860 857 * LWP structure. That situation could result in
861 858 * a very hard-to-debug panic, so, in the spirit of
862 859 * recording the name of one's killer in one's own
863 860 * blood, we're going to record hrestime and the calling
864 861 * thread.
865 862 */
866 863 red_deep_hires = hrestime.tv_nsec;
867 864 red_deep_thread = curthread;
868 865 }
869 866
870 867 /*
871 868 * If this is a DEBUG kernel, and we've run too deep for comfort, toss.
872 869 */
873 870 ASSERT(fp - (uintptr_t)stkbase >= RED_DEEP_THRESHOLD);
874 871 return (0);
875 872 #endif /* _LP64 */
876 873 }
877 874
878 875 void
879 876 segkp_unmap_red(void)
880 877 {
881 878 page_t *pp;
882 879 caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase &
883 880 (uintptr_t)PAGEMASK) - PAGESIZE);
884 881
885 882 ASSERT(curthread->t_red_pp != NULL);
886 883 ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
887 884
888 885 /*
889 886 * Because we locked the mapping down, we can't simply rely
890 887 * on page_destroy() to clean everything up; we need to call
891 888 * hat_unload() to explicitly unlock the mapping resources.
892 889 */
893 890 hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK);
894 891
895 892 pp = curthread->t_red_pp;
896 893
897 894 ASSERT(pp == page_find(&kvp, (u_offset_t)(uintptr_t)red_va));
898 895
899 896 /*
900 897 * Need to upgrade the SE_SHARED lock to SE_EXCL.
901 898 */
902 899 if (!page_tryupgrade(pp)) {
903 900 /*
904 901 * As there is now wait for upgrade, release the
905 902 * SE_SHARED lock and wait for SE_EXCL.
906 903 */
907 904 page_unlock(pp);
908 905 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)red_va, SE_EXCL);
909 906 /* pp may be NULL here, hence the test below */
910 907 }
911 908
912 909 /*
913 910 * Destroy the page, with dontfree set to zero (i.e. free it).
914 911 */
915 912 if (pp != NULL)
916 913 page_destroy(pp, 0);
917 914 curthread->t_red_pp = NULL;
918 915 }
919 916 #else
920 917 #error Red stacks only supported with downwards stack growth.
921 918 #endif
922 919
923 920 /*
924 921 * Handle a fault on an address corresponding to one of the
925 922 * resources in the segkp segment.
926 923 */
927 924 faultcode_t
928 925 segkp_fault(
929 926 struct hat *hat,
930 927 struct seg *seg,
931 928 caddr_t vaddr,
932 929 size_t len,
933 930 enum fault_type type,
934 931 enum seg_rw rw)
935 932 {
936 933 struct segkp_data *kpd = NULL;
937 934 int err;
938 935
939 936 ASSERT(seg->s_as == &kas && RW_READ_HELD(&seg->s_as->a_lock));
940 937
941 938 /*
942 939 * Sanity checks.
943 940 */
944 941 if (type == F_PROT) {
945 942 panic("segkp_fault: unexpected F_PROT fault");
946 943 /*NOTREACHED*/
947 944 }
948 945
949 946 if ((kpd = segkp_find(seg, vaddr)) == NULL)
950 947 return (FC_NOMAP);
951 948
952 949 mutex_enter(&kpd->kp_lock);
953 950
954 951 if (type == F_SOFTLOCK) {
955 952 ASSERT(!(kpd->kp_flags & KPD_LOCKED));
956 953 /*
957 954 * The F_SOFTLOCK case has more stringent
958 955 * range requirements: the given range must exactly coincide
959 956 * with the resource's mapped portion. Note reference to
960 957 * redzone is handled since vaddr would not equal base
961 958 */
962 959 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
963 960 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
964 961 mutex_exit(&kpd->kp_lock);
965 962 return (FC_MAKE_ERR(EFAULT));
966 963 }
967 964
968 965 if ((err = segkp_load(hat, seg, vaddr, len, kpd, KPD_LOCKED))) {
969 966 mutex_exit(&kpd->kp_lock);
970 967 return (FC_MAKE_ERR(err));
971 968 }
972 969 kpd->kp_flags |= KPD_LOCKED;
973 970 mutex_exit(&kpd->kp_lock);
974 971 return (0);
975 972 }
976 973
977 974 if (type == F_INVAL) {
978 975 ASSERT(!(kpd->kp_flags & KPD_NO_ANON));
979 976
980 977 /*
981 978 * Check if we touched the redzone. Somewhat optimistic
982 979 * here if we are touching the redzone of our own stack
983 980 * since we wouldn't have a stack to get this far...
984 981 */
985 982 if ((kpd->kp_flags & KPD_HASREDZONE) &&
986 983 btop((uintptr_t)(vaddr - kpd->kp_base)) == KPD_REDZONE(kpd))
987 984 panic("segkp_fault: accessing redzone");
988 985
989 986 /*
990 987 * This fault may occur while the page is being F_SOFTLOCK'ed.
991 988 * Return since a 2nd segkp_load is unnecessary and also would
992 989 * result in the page being locked twice and eventually
993 990 * hang the thread_reaper thread.
994 991 */
995 992 if (kpd->kp_flags & KPD_LOCKED) {
996 993 mutex_exit(&kpd->kp_lock);
997 994 return (0);
998 995 }
999 996
1000 997 err = segkp_load(hat, seg, vaddr, len, kpd, kpd->kp_flags);
1001 998 mutex_exit(&kpd->kp_lock);
1002 999 return (err ? FC_MAKE_ERR(err) : 0);
1003 1000 }
1004 1001
1005 1002 if (type == F_SOFTUNLOCK) {
1006 1003 uint_t flags;
1007 1004
1008 1005 /*
1009 1006 * Make sure the addr is LOCKED and it has anon backing
1010 1007 * before unlocking
1011 1008 */
1012 1009 if ((kpd->kp_flags & (KPD_LOCKED|KPD_NO_ANON)) != KPD_LOCKED) {
1013 1010 panic("segkp_fault: bad unlock");
1014 1011 /*NOTREACHED*/
1015 1012 }
1016 1013
1017 1014 if (vaddr != stom(kpd->kp_base, kpd->kp_flags) ||
1018 1015 len != SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags)) {
1019 1016 panic("segkp_fault: bad range");
1020 1017 /*NOTREACHED*/
1021 1018 }
1022 1019
1023 1020 if (rw == S_WRITE)
1024 1021 flags = kpd->kp_flags | KPD_WRITEDIRTY;
1025 1022 else
1026 1023 flags = kpd->kp_flags;
1027 1024 err = segkp_unlock(hat, seg, vaddr, len, kpd, flags);
1028 1025 kpd->kp_flags &= ~KPD_LOCKED;
1029 1026 mutex_exit(&kpd->kp_lock);
1030 1027 return (err ? FC_MAKE_ERR(err) : 0);
1031 1028 }
1032 1029 mutex_exit(&kpd->kp_lock);
1033 1030 panic("segkp_fault: bogus fault type: %d\n", type);
1034 1031 /*NOTREACHED*/
1035 1032 }
1036 1033
1037 1034 /*
1038 1035 * Check that the given protections suffice over the range specified by
1039 1036 * vaddr and len. For this segment type, the only issue is whether or
1040 1037 * not the range lies completely within the mapped part of an allocated
1041 1038 * resource.
1042 1039 */
1043 1040 /* ARGSUSED */
1044 1041 static int
1045 1042 segkp_checkprot(struct seg *seg, caddr_t vaddr, size_t len, uint_t prot)
1046 1043 {
1047 1044 struct segkp_data *kpd = NULL;
1048 1045 caddr_t mbase;
1049 1046 size_t mlen;
1050 1047
1051 1048 if ((kpd = segkp_find(seg, vaddr)) == NULL)
1052 1049 return (EACCES);
1053 1050
1054 1051 mutex_enter(&kpd->kp_lock);
1055 1052 mbase = stom(kpd->kp_base, kpd->kp_flags);
1056 1053 mlen = SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags);
1057 1054 if (len > mlen || vaddr < mbase ||
1058 1055 ((vaddr + len) > (mbase + mlen))) {
1059 1056 mutex_exit(&kpd->kp_lock);
1060 1057 return (EACCES);
1061 1058 }
1062 1059 mutex_exit(&kpd->kp_lock);
1063 1060 return (0);
1064 1061 }
1065 1062
1066 1063
1067 1064 /*
1068 1065 * Check to see if it makes sense to do kluster/read ahead to
1069 1066 * addr + delta relative to the mapping at addr. We assume here
1070 1067 * that delta is a signed PAGESIZE'd multiple (which can be negative).
1071 1068 *
1072 1069 * For seg_u we always "approve" of this action from our standpoint.
1073 1070 */
1074 1071 /*ARGSUSED*/
1075 1072 static int
1076 1073 segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
1077 1074 {
1078 1075 return (0);
1079 1076 }
1080 1077
1081 1078 /*
1082 1079 * Load and possibly lock intra-slot resources in the range given by
1083 1080 * vaddr and len.
1084 1081 */
1085 1082 static int
1086 1083 segkp_load(
1087 1084 struct hat *hat,
1088 1085 struct seg *seg,
1089 1086 caddr_t vaddr,
1090 1087 size_t len,
1091 1088 struct segkp_data *kpd,
1092 1089 uint_t flags)
1093 1090 {
1094 1091 caddr_t va;
1095 1092 caddr_t vlim;
1096 1093 ulong_t i;
1097 1094 uint_t lock;
1098 1095
1099 1096 ASSERT(MUTEX_HELD(&kpd->kp_lock));
1100 1097
1101 1098 len = P2ROUNDUP(len, PAGESIZE);
1102 1099
1103 1100 /* If locking, reserve physical memory */
1104 1101 if (flags & KPD_LOCKED) {
1105 1102 pgcnt_t pages = btop(len);
1106 1103 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
1107 1104 atomic_add_long(&anon_segkp_pages_locked, pages);
1108 1105 (void) page_resv(pages, KM_SLEEP);
1109 1106 }
1110 1107
1111 1108 /*
1112 1109 * Loop through the pages in the given range.
1113 1110 */
1114 1111 va = (caddr_t)((uintptr_t)vaddr & (uintptr_t)PAGEMASK);
1115 1112 vaddr = va;
1116 1113 vlim = va + len;
1117 1114 lock = flags & KPD_LOCKED;
1118 1115 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
1119 1116 for (; va < vlim; va += PAGESIZE, i++) {
1120 1117 page_t *pl[2]; /* second element NULL terminator */
1121 1118 struct vnode *vp;
1122 1119 anoff_t off;
1123 1120 int err;
1124 1121 struct anon *ap;
1125 1122
1126 1123 /*
1127 1124 * Summon the page. If it's not resident, arrange
1128 1125 * for synchronous i/o to pull it in.
1129 1126 */
1130 1127 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
1131 1128 swap_xlate(ap, &vp, &off);
1132 1129
1133 1130 /*
1134 1131 * The returned page list will have exactly one entry,
1135 1132 * which is returned to us already kept.
1136 1133 */
1137 1134 err = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, NULL,
1138 1135 pl, PAGESIZE, seg, va, S_READ, kcred, NULL);
1139 1136
1140 1137 if (err) {
1141 1138 /*
1142 1139 * Back out of what we've done so far.
1143 1140 */
1144 1141 (void) segkp_unlock(hat, seg, vaddr,
1145 1142 (va - vaddr), kpd, flags);
1146 1143 return (err);
1147 1144 }
1148 1145
1149 1146 /*
1150 1147 * Load an MMU translation for the page.
1151 1148 */
1152 1149 hat_memload(hat, va, pl[0], (PROT_READ|PROT_WRITE),
1153 1150 lock ? HAT_LOAD_LOCK : HAT_LOAD);
1154 1151
1155 1152 if (!lock) {
1156 1153 /*
1157 1154 * Now, release "shared" lock on the page.
1158 1155 */
1159 1156 page_unlock(pl[0]);
1160 1157 }
1161 1158 }
1162 1159 return (0);
1163 1160 }
1164 1161
1165 1162 /*
1166 1163 * At the very least unload the mmu-translations and unlock the range if locked
1167 1164 * Can be called with the following flag value KPD_WRITEDIRTY which specifies
1168 1165 * any dirty pages should be written to disk.
1169 1166 */
1170 1167 static int
1171 1168 segkp_unlock(
1172 1169 struct hat *hat,
1173 1170 struct seg *seg,
1174 1171 caddr_t vaddr,
1175 1172 size_t len,
1176 1173 struct segkp_data *kpd,
1177 1174 uint_t flags)
1178 1175 {
1179 1176 caddr_t va;
1180 1177 caddr_t vlim;
1181 1178 ulong_t i;
1182 1179 struct page *pp;
1183 1180 struct vnode *vp;
1184 1181 anoff_t off;
1185 1182 struct anon *ap;
1186 1183
1187 1184 #ifdef lint
1188 1185 seg = seg;
1189 1186 #endif /* lint */
1190 1187
1191 1188 ASSERT(MUTEX_HELD(&kpd->kp_lock));
1192 1189
1193 1190 /*
1194 1191 * Loop through the pages in the given range. It is assumed
1195 1192 * segkp_unlock is called with page aligned base
1196 1193 */
1197 1194 va = vaddr;
1198 1195 vlim = va + len;
1199 1196 i = ((uintptr_t)(va - kpd->kp_base)) >> PAGESHIFT;
1200 1197 hat_unload(hat, va, len,
1201 1198 ((flags & KPD_LOCKED) ? HAT_UNLOAD_UNLOCK : HAT_UNLOAD));
1202 1199 for (; va < vlim; va += PAGESIZE, i++) {
1203 1200 /*
1204 1201 * Find the page associated with this part of the
1205 1202 * slot, tracking it down through its associated swap
1206 1203 * space.
1207 1204 */
1208 1205 ap = anon_get_ptr(kpd->kp_anon, kpd->kp_anon_idx + i);
1209 1206 swap_xlate(ap, &vp, &off);
1210 1207
1211 1208 if (flags & KPD_LOCKED) {
1212 1209 if ((pp = page_find(vp, off)) == NULL) {
1213 1210 if (flags & KPD_LOCKED) {
1214 1211 panic("segkp_softunlock: missing page");
1215 1212 /*NOTREACHED*/
1216 1213 }
1217 1214 }
1218 1215 } else {
1219 1216 /*
1220 1217 * Nothing to do if the slot is not locked and the
1221 1218 * page doesn't exist.
1222 1219 */
1223 1220 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL)
1224 1221 continue;
1225 1222 }
1226 1223
1227 1224 /*
1228 1225 * If the page doesn't have any translations, is
1229 1226 * dirty and not being shared, then push it out
1230 1227 * asynchronously and avoid waiting for the
1231 1228 * pageout daemon to do it for us.
1232 1229 *
1233 1230 * XXX - Do we really need to get the "exclusive"
1234 1231 * lock via an upgrade?
1235 1232 */
1236 1233 if ((flags & KPD_WRITEDIRTY) && !hat_page_is_mapped(pp) &&
1237 1234 hat_ismod(pp) && page_tryupgrade(pp)) {
1238 1235 /*
1239 1236 * Hold the vnode before releasing the page lock to
1240 1237 * prevent it from being freed and re-used by some
1241 1238 * other thread.
1242 1239 */
1243 1240 VN_HOLD(vp);
1244 1241 page_unlock(pp);
1245 1242
1246 1243 /*
1247 1244 * Want most powerful credentials we can get so
1248 1245 * use kcred.
1249 1246 */
1250 1247 (void) VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
1251 1248 B_ASYNC | B_FREE, kcred, NULL);
1252 1249 VN_RELE(vp);
1253 1250 } else {
1254 1251 page_unlock(pp);
1255 1252 }
1256 1253 }
1257 1254
1258 1255 /* If unlocking, release physical memory */
1259 1256 if (flags & KPD_LOCKED) {
1260 1257 pgcnt_t pages = btopr(len);
1261 1258 if ((kpd->kp_flags & KPD_NO_ANON) == 0)
1262 1259 atomic_add_long(&anon_segkp_pages_locked, -pages);
1263 1260 page_unresv(pages);
1264 1261 }
1265 1262 return (0);
1266 1263 }
1267 1264
1268 1265 /*
1269 1266 * Insert the kpd in the hash table.
1270 1267 */
1271 1268 static void
1272 1269 segkp_insert(struct seg *seg, struct segkp_data *kpd)
1273 1270 {
1274 1271 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1275 1272 int index;
1276 1273
1277 1274 /*
1278 1275 * Insert the kpd based on the address that will be returned
1279 1276 * via segkp_release.
1280 1277 */
1281 1278 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
1282 1279 mutex_enter(&segkp_lock);
1283 1280 kpd->kp_next = kpsd->kpsd_hash[index];
1284 1281 kpsd->kpsd_hash[index] = kpd;
1285 1282 mutex_exit(&segkp_lock);
1286 1283 }
1287 1284
1288 1285 /*
1289 1286 * Remove kpd from the hash table.
1290 1287 */
1291 1288 static void
1292 1289 segkp_delete(struct seg *seg, struct segkp_data *kpd)
1293 1290 {
1294 1291 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1295 1292 struct segkp_data **kpp;
1296 1293 int index;
1297 1294
1298 1295 ASSERT(MUTEX_HELD(&segkp_lock));
1299 1296
1300 1297 index = SEGKP_HASH(stom(kpd->kp_base, kpd->kp_flags));
1301 1298 for (kpp = &kpsd->kpsd_hash[index];
1302 1299 *kpp != NULL; kpp = &((*kpp)->kp_next)) {
1303 1300 if (*kpp == kpd) {
1304 1301 *kpp = kpd->kp_next;
1305 1302 return;
1306 1303 }
1307 1304 }
1308 1305 panic("segkp_delete: unable to find element to delete");
1309 1306 /*NOTREACHED*/
1310 1307 }
1311 1308
1312 1309 /*
1313 1310 * Find the kpd associated with a vaddr.
1314 1311 *
1315 1312 * Most of the callers of segkp_find will pass the vaddr that
1316 1313 * hashes to the desired index, but there are cases where
1317 1314 * this is not true in which case we have to (potentially) scan
1318 1315 * the whole table looking for it. This should be very rare
1319 1316 * (e.g. a segkp_fault(F_INVAL) on an address somewhere in the
1320 1317 * middle of the segkp_data region).
1321 1318 */
1322 1319 static struct segkp_data *
1323 1320 segkp_find(struct seg *seg, caddr_t vaddr)
1324 1321 {
1325 1322 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1326 1323 struct segkp_data *kpd;
1327 1324 int i;
1328 1325 int stop;
1329 1326
1330 1327 i = stop = SEGKP_HASH(vaddr);
1331 1328 mutex_enter(&segkp_lock);
1332 1329 do {
1333 1330 for (kpd = kpsd->kpsd_hash[i]; kpd != NULL;
1334 1331 kpd = kpd->kp_next) {
1335 1332 if (vaddr >= kpd->kp_base &&
1336 1333 vaddr < kpd->kp_base + kpd->kp_len) {
1337 1334 mutex_exit(&segkp_lock);
1338 1335 return (kpd);
1339 1336 }
1340 1337 }
1341 1338 if (--i < 0)
1342 1339 i = SEGKP_HASHSZ - 1; /* Wrap */
1343 1340 } while (i != stop);
1344 1341 mutex_exit(&segkp_lock);
1345 1342 return (NULL); /* Not found */
1346 1343 }
1347 1344
1348 1345 /*
1349 1346 * returns size of swappable area.
1350 1347 */
1351 1348 size_t
1352 1349 swapsize(caddr_t v)
1353 1350 {
1354 1351 struct segkp_data *kpd;
1355 1352
1356 1353 if ((kpd = segkp_find(segkp, v)) != NULL)
1357 1354 return (SEGKP_MAPLEN(kpd->kp_len, kpd->kp_flags));
1358 1355 else
1359 1356 return (NULL);
1360 1357 }
1361 1358
1362 1359 /*
1363 1360 * Dump out all the active segkp pages
1364 1361 */
1365 1362 static void
1366 1363 segkp_dump(struct seg *seg)
1367 1364 {
1368 1365 int i;
1369 1366 struct segkp_data *kpd;
1370 1367 struct segkp_segdata *kpsd = (struct segkp_segdata *)seg->s_data;
1371 1368
1372 1369 for (i = 0; i < SEGKP_HASHSZ; i++) {
1373 1370 for (kpd = kpsd->kpsd_hash[i];
1374 1371 kpd != NULL; kpd = kpd->kp_next) {
1375 1372 pfn_t pfn;
1376 1373 caddr_t addr;
1377 1374 caddr_t eaddr;
1378 1375
1379 1376 addr = kpd->kp_base;
1380 1377 eaddr = addr + kpd->kp_len;
1381 1378 while (addr < eaddr) {
1382 1379 ASSERT(seg->s_as == &kas);
1383 1380 pfn = hat_getpfnum(seg->s_as->a_hat, addr);
1384 1381 if (pfn != PFN_INVALID)
1385 1382 dump_addpage(seg->s_as, addr, pfn);
1386 1383 addr += PAGESIZE;
1387 1384 dump_timeleft = dump_timeout;
1388 1385 }
1389 1386 }
1390 1387 }
1391 1388 }
1392 1389
1393 1390 /*ARGSUSED*/
1394 1391 static int
1395 1392 segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
↓ open down ↓ |
1215 lines elided |
↑ open up ↑ |
1396 1393 struct page ***ppp, enum lock_type type, enum seg_rw rw)
1397 1394 {
1398 1395 return (ENOTSUP);
1399 1396 }
1400 1397
1401 1398 /*ARGSUSED*/
1402 1399 static int
1403 1400 segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1404 1401 {
1405 1402 return (ENODEV);
1406 -}
1407 -
1408 -/*ARGSUSED*/
1409 -static lgrp_mem_policy_info_t *
1410 -segkp_getpolicy(struct seg *seg, caddr_t addr)
1411 -{
1412 - return (NULL);
1413 1403 }
1414 1404
1415 1405 /*ARGSUSED*/
1416 1406 static int
1417 1407 segkp_capable(struct seg *seg, segcapability_t capability)
1418 1408 {
1419 1409 return (0);
1420 1410 }
1421 1411
1422 1412 #include <sys/mem_config.h>
1423 1413
1424 1414 /*ARGSUSED*/
1425 1415 static void
1426 1416 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1427 1417 {}
1428 1418
1429 1419 /*
1430 1420 * During memory delete, turn off caches so that pages are not held.
1431 1421 * A better solution may be to unlock the pages while they are
1432 1422 * in the cache so that they may be collected naturally.
1433 1423 */
1434 1424
1435 1425 /*ARGSUSED*/
1436 1426 static int
1437 1427 segkp_mem_config_pre_del(void *arg, pgcnt_t delta_pages)
1438 1428 {
1439 1429 atomic_inc_32(&segkp_indel);
1440 1430 segkp_cache_free();
1441 1431 return (0);
1442 1432 }
1443 1433
1444 1434 /*ARGSUSED*/
1445 1435 static void
1446 1436 segkp_mem_config_post_del(void *arg, pgcnt_t delta_pages, int cancelled)
1447 1437 {
1448 1438 atomic_dec_32(&segkp_indel);
1449 1439 }
1450 1440
1451 1441 static kphysm_setup_vector_t segkp_mem_config_vec = {
1452 1442 KPHYSM_SETUP_VECTOR_VERSION,
1453 1443 segkp_mem_config_post_add,
1454 1444 segkp_mem_config_pre_del,
1455 1445 segkp_mem_config_post_del,
1456 1446 };
1457 1447
1458 1448 static void
1459 1449 segkpinit_mem_config(struct seg *seg)
1460 1450 {
1461 1451 int ret;
1462 1452
1463 1453 ret = kphysm_setup_func_register(&segkp_mem_config_vec, (void *)seg);
1464 1454 ASSERT(ret == 0);
1465 1455 }
↓ open down ↓ |
43 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX