Print this page
segop_getpolicy already checks for a NULL op
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_map.c
+++ new/usr/src/uts/common/vm/seg_map.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
27 27 /* All Rights Reserved */
28 28
29 29 /*
30 30 * Portions of this source code were derived from Berkeley 4.3 BSD
31 31 * under license from the Regents of the University of California.
32 32 */
33 33
34 34 /*
35 35 * VM - generic vnode mapping segment.
36 36 *
37 37 * The segmap driver is used only by the kernel to get faster (than seg_vn)
38 38 * mappings [lower routine overhead; more persistent cache] to random
39 39 * vnode/offsets. Note than the kernel may (and does) use seg_vn as well.
40 40 */
41 41
42 42 #include <sys/types.h>
43 43 #include <sys/t_lock.h>
44 44 #include <sys/param.h>
45 45 #include <sys/sysmacros.h>
46 46 #include <sys/buf.h>
47 47 #include <sys/systm.h>
48 48 #include <sys/vnode.h>
49 49 #include <sys/mman.h>
50 50 #include <sys/errno.h>
51 51 #include <sys/cred.h>
52 52 #include <sys/kmem.h>
53 53 #include <sys/vtrace.h>
54 54 #include <sys/cmn_err.h>
55 55 #include <sys/debug.h>
56 56 #include <sys/thread.h>
57 57 #include <sys/dumphdr.h>
58 58 #include <sys/bitmap.h>
59 59 #include <sys/lgrp.h>
60 60
61 61 #include <vm/seg_kmem.h>
62 62 #include <vm/hat.h>
63 63 #include <vm/as.h>
64 64 #include <vm/seg.h>
65 65 #include <vm/seg_kpm.h>
66 66 #include <vm/seg_map.h>
67 67 #include <vm/page.h>
68 68 #include <vm/pvn.h>
69 69 #include <vm/rm.h>
70 70
71 71 /*
72 72 * Private seg op routines.
73 73 */
74 74 static void segmap_free(struct seg *seg);
75 75 faultcode_t segmap_fault(struct hat *hat, struct seg *seg, caddr_t addr,
76 76 size_t len, enum fault_type type, enum seg_rw rw);
77 77 static faultcode_t segmap_faulta(struct seg *seg, caddr_t addr);
78 78 static int segmap_checkprot(struct seg *seg, caddr_t addr, size_t len,
79 79 uint_t prot);
80 80 static int segmap_kluster(struct seg *seg, caddr_t addr, ssize_t);
↓ open down ↓ |
80 lines elided |
↑ open up ↑ |
81 81 static int segmap_getprot(struct seg *seg, caddr_t addr, size_t len,
82 82 uint_t *protv);
83 83 static u_offset_t segmap_getoffset(struct seg *seg, caddr_t addr);
84 84 static int segmap_gettype(struct seg *seg, caddr_t addr);
85 85 static int segmap_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
86 86 static void segmap_dump(struct seg *seg);
87 87 static int segmap_pagelock(struct seg *seg, caddr_t addr, size_t len,
88 88 struct page ***ppp, enum lock_type type,
89 89 enum seg_rw rw);
90 90 static int segmap_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
91 -static lgrp_mem_policy_info_t *segmap_getpolicy(struct seg *seg,
92 - caddr_t addr);
93 91 static int segmap_capable(struct seg *seg, segcapability_t capability);
94 92
95 93 /* segkpm support */
96 94 static caddr_t segmap_pagecreate_kpm(struct seg *, vnode_t *, u_offset_t,
97 95 struct smap *, enum seg_rw);
98 96 struct smap *get_smap_kpm(caddr_t, page_t **);
99 97
100 98 static struct seg_ops segmap_ops = {
101 99 .free = segmap_free,
102 100 .fault = segmap_fault,
103 101 .faulta = segmap_faulta,
104 102 .checkprot = segmap_checkprot,
105 103 .kluster = segmap_kluster,
106 104 .getprot = segmap_getprot,
107 105 .getoffset = segmap_getoffset,
108 106 .gettype = segmap_gettype,
109 107 .getvp = segmap_getvp,
110 108 .dump = segmap_dump,
111 109 .pagelock = segmap_pagelock,
112 110 .getmemid = segmap_getmemid,
113 - .getpolicy = segmap_getpolicy,
114 111 .capable = segmap_capable,
115 112 };
116 113
117 114 /*
118 115 * Private segmap routines.
119 116 */
120 117 static void segmap_unlock(struct hat *hat, struct seg *seg, caddr_t addr,
121 118 size_t len, enum seg_rw rw, struct smap *smp);
122 119 static void segmap_smapadd(struct smap *smp);
123 120 static struct smap *segmap_hashin(struct smap *smp, struct vnode *vp,
124 121 u_offset_t off, int hashid);
125 122 static void segmap_hashout(struct smap *smp);
126 123
127 124
128 125 /*
129 126 * Statistics for segmap operations.
130 127 *
131 128 * No explicit locking to protect these stats.
132 129 */
133 130 struct segmapcnt segmapcnt = {
134 131 { "fault", KSTAT_DATA_ULONG },
135 132 { "faulta", KSTAT_DATA_ULONG },
136 133 { "getmap", KSTAT_DATA_ULONG },
137 134 { "get_use", KSTAT_DATA_ULONG },
138 135 { "get_reclaim", KSTAT_DATA_ULONG },
139 136 { "get_reuse", KSTAT_DATA_ULONG },
140 137 { "get_unused", KSTAT_DATA_ULONG },
141 138 { "get_nofree", KSTAT_DATA_ULONG },
142 139 { "rel_async", KSTAT_DATA_ULONG },
143 140 { "rel_write", KSTAT_DATA_ULONG },
144 141 { "rel_free", KSTAT_DATA_ULONG },
145 142 { "rel_abort", KSTAT_DATA_ULONG },
146 143 { "rel_dontneed", KSTAT_DATA_ULONG },
147 144 { "release", KSTAT_DATA_ULONG },
148 145 { "pagecreate", KSTAT_DATA_ULONG },
149 146 { "free_notfree", KSTAT_DATA_ULONG },
150 147 { "free_dirty", KSTAT_DATA_ULONG },
151 148 { "free", KSTAT_DATA_ULONG },
152 149 { "stolen", KSTAT_DATA_ULONG },
153 150 { "get_nomtx", KSTAT_DATA_ULONG }
154 151 };
155 152
156 153 kstat_named_t *segmapcnt_ptr = (kstat_named_t *)&segmapcnt;
157 154 uint_t segmapcnt_ndata = sizeof (segmapcnt) / sizeof (kstat_named_t);
158 155
159 156 /*
160 157 * Return number of map pages in segment.
161 158 */
162 159 #define MAP_PAGES(seg) ((seg)->s_size >> MAXBSHIFT)
163 160
164 161 /*
165 162 * Translate addr into smap number within segment.
166 163 */
167 164 #define MAP_PAGE(seg, addr) (((addr) - (seg)->s_base) >> MAXBSHIFT)
168 165
169 166 /*
170 167 * Translate addr in seg into struct smap pointer.
171 168 */
172 169 #define GET_SMAP(seg, addr) \
173 170 &(((struct segmap_data *)((seg)->s_data))->smd_sm[MAP_PAGE(seg, addr)])
174 171
175 172 /*
176 173 * Bit in map (16 bit bitmap).
177 174 */
178 175 #define SMAP_BIT_MASK(bitindex) (1 << ((bitindex) & 0xf))
179 176
180 177 static int smd_colormsk = 0;
181 178 static int smd_ncolor = 0;
182 179 static int smd_nfree = 0;
183 180 static int smd_freemsk = 0;
184 181 #ifdef DEBUG
185 182 static int *colors_used;
186 183 #endif
187 184 static struct smap *smd_smap;
188 185 static struct smaphash *smd_hash;
189 186 #ifdef SEGMAP_HASHSTATS
190 187 static unsigned int *smd_hash_len;
191 188 #endif
192 189 static struct smfree *smd_free;
193 190 static ulong_t smd_hashmsk = 0;
194 191
195 192 #define SEGMAP_MAXCOLOR 2
196 193 #define SEGMAP_CACHE_PAD 64
197 194
198 195 union segmap_cpu {
199 196 struct {
200 197 uint32_t scpu_free_ndx[SEGMAP_MAXCOLOR];
201 198 struct smap *scpu_last_smap;
202 199 ulong_t scpu_getmap;
203 200 ulong_t scpu_release;
204 201 ulong_t scpu_get_reclaim;
205 202 ulong_t scpu_fault;
206 203 ulong_t scpu_pagecreate;
207 204 ulong_t scpu_get_reuse;
208 205 } scpu;
209 206 char scpu_pad[SEGMAP_CACHE_PAD];
210 207 };
211 208 static union segmap_cpu *smd_cpu;
212 209
213 210 /*
214 211 * There are three locks in seg_map:
215 212 * - per freelist mutexes
216 213 * - per hashchain mutexes
217 214 * - per smap mutexes
218 215 *
219 216 * The lock ordering is to get the smap mutex to lock down the slot
220 217 * first then the hash lock (for hash in/out (vp, off) list) or the
221 218 * freelist lock to put the slot back on the free list.
222 219 *
223 220 * The hash search is done by only holding the hashchain lock, when a wanted
224 221 * slot is found, we drop the hashchain lock then lock the slot so there
225 222 * is no overlapping of hashchain and smap locks. After the slot is
226 223 * locked, we verify again if the slot is still what we are looking
227 224 * for.
228 225 *
229 226 * Allocation of a free slot is done by holding the freelist lock,
230 227 * then locking the smap slot at the head of the freelist. This is
231 228 * in reversed lock order so mutex_tryenter() is used.
232 229 *
233 230 * The smap lock protects all fields in smap structure except for
234 231 * the link fields for hash/free lists which are protected by
235 232 * hashchain and freelist locks.
236 233 */
237 234
238 235 #define SHASHMTX(hashid) (&smd_hash[hashid].sh_mtx)
239 236
240 237 #define SMP2SMF(smp) (&smd_free[(smp - smd_smap) & smd_freemsk])
241 238 #define SMP2SMF_NDX(smp) (ushort_t)((smp - smd_smap) & smd_freemsk)
242 239
243 240 #define SMAPMTX(smp) (&smp->sm_mtx)
244 241
245 242 #define SMAP_HASHFUNC(vp, off, hashid) \
246 243 { \
247 244 hashid = ((((uintptr_t)(vp) >> 6) + ((uintptr_t)(vp) >> 3) + \
248 245 ((off) >> MAXBSHIFT)) & smd_hashmsk); \
249 246 }
250 247
251 248 /*
252 249 * The most frequently updated kstat counters are kept in the
253 250 * per cpu array to avoid hot cache blocks. The update function
254 251 * sums the cpu local counters to update the global counters.
255 252 */
256 253
257 254 /* ARGSUSED */
258 255 int
259 256 segmap_kstat_update(kstat_t *ksp, int rw)
260 257 {
261 258 int i;
262 259 ulong_t getmap, release, get_reclaim;
263 260 ulong_t fault, pagecreate, get_reuse;
264 261
265 262 if (rw == KSTAT_WRITE)
266 263 return (EACCES);
267 264 getmap = release = get_reclaim = (ulong_t)0;
268 265 fault = pagecreate = get_reuse = (ulong_t)0;
269 266 for (i = 0; i < max_ncpus; i++) {
270 267 getmap += smd_cpu[i].scpu.scpu_getmap;
271 268 release += smd_cpu[i].scpu.scpu_release;
272 269 get_reclaim += smd_cpu[i].scpu.scpu_get_reclaim;
273 270 fault += smd_cpu[i].scpu.scpu_fault;
274 271 pagecreate += smd_cpu[i].scpu.scpu_pagecreate;
275 272 get_reuse += smd_cpu[i].scpu.scpu_get_reuse;
276 273 }
277 274 segmapcnt.smp_getmap.value.ul = getmap;
278 275 segmapcnt.smp_release.value.ul = release;
279 276 segmapcnt.smp_get_reclaim.value.ul = get_reclaim;
280 277 segmapcnt.smp_fault.value.ul = fault;
281 278 segmapcnt.smp_pagecreate.value.ul = pagecreate;
282 279 segmapcnt.smp_get_reuse.value.ul = get_reuse;
283 280 return (0);
284 281 }
285 282
286 283 int
287 284 segmap_create(struct seg *seg, void *argsp)
288 285 {
289 286 struct segmap_data *smd;
290 287 struct smap *smp;
291 288 struct smfree *sm;
292 289 struct segmap_crargs *a = (struct segmap_crargs *)argsp;
293 290 struct smaphash *shashp;
294 291 union segmap_cpu *scpu;
295 292 long i, npages;
296 293 size_t hashsz;
297 294 uint_t nfreelist;
298 295 extern void prefetch_smap_w(void *);
299 296 extern int max_ncpus;
300 297
301 298 ASSERT(seg->s_as && RW_WRITE_HELD(&seg->s_as->a_lock));
302 299
303 300 if (((uintptr_t)seg->s_base | seg->s_size) & MAXBOFFSET) {
304 301 panic("segkmap not MAXBSIZE aligned");
305 302 /*NOTREACHED*/
306 303 }
307 304
308 305 smd = kmem_zalloc(sizeof (struct segmap_data), KM_SLEEP);
309 306
310 307 seg->s_data = (void *)smd;
311 308 seg->s_ops = &segmap_ops;
312 309 smd->smd_prot = a->prot;
313 310
314 311 /*
315 312 * Scale the number of smap freelists to be
316 313 * proportional to max_ncpus * number of virtual colors.
317 314 * The caller can over-ride this scaling by providing
318 315 * a non-zero a->nfreelist argument.
319 316 */
320 317 nfreelist = a->nfreelist;
321 318 if (nfreelist == 0)
322 319 nfreelist = max_ncpus;
323 320 else if (nfreelist < 0 || nfreelist > 4 * max_ncpus) {
324 321 cmn_err(CE_WARN, "segmap_create: nfreelist out of range "
325 322 "%d, using %d", nfreelist, max_ncpus);
326 323 nfreelist = max_ncpus;
327 324 }
328 325 if (!ISP2(nfreelist)) {
329 326 /* round up nfreelist to the next power of two. */
330 327 nfreelist = 1 << (highbit(nfreelist));
331 328 }
332 329
333 330 /*
334 331 * Get the number of virtual colors - must be a power of 2.
335 332 */
336 333 if (a->shmsize)
337 334 smd_ncolor = a->shmsize >> MAXBSHIFT;
338 335 else
339 336 smd_ncolor = 1;
340 337 ASSERT((smd_ncolor & (smd_ncolor - 1)) == 0);
341 338 ASSERT(smd_ncolor <= SEGMAP_MAXCOLOR);
342 339 smd_colormsk = smd_ncolor - 1;
343 340 smd->smd_nfree = smd_nfree = smd_ncolor * nfreelist;
344 341 smd_freemsk = smd_nfree - 1;
345 342
346 343 /*
347 344 * Allocate and initialize the freelist headers.
348 345 * Note that sm_freeq[1] starts out as the release queue. This
349 346 * is known when the smap structures are initialized below.
350 347 */
351 348 smd_free = smd->smd_free =
352 349 kmem_zalloc(smd_nfree * sizeof (struct smfree), KM_SLEEP);
353 350 for (i = 0; i < smd_nfree; i++) {
354 351 sm = &smd->smd_free[i];
355 352 mutex_init(&sm->sm_freeq[0].smq_mtx, NULL, MUTEX_DEFAULT, NULL);
356 353 mutex_init(&sm->sm_freeq[1].smq_mtx, NULL, MUTEX_DEFAULT, NULL);
357 354 sm->sm_allocq = &sm->sm_freeq[0];
358 355 sm->sm_releq = &sm->sm_freeq[1];
359 356 }
360 357
361 358 /*
362 359 * Allocate and initialize the smap hash chain headers.
363 360 * Compute hash size rounding down to the next power of two.
364 361 */
365 362 npages = MAP_PAGES(seg);
366 363 smd->smd_npages = npages;
367 364 hashsz = npages / SMAP_HASHAVELEN;
368 365 hashsz = 1 << (highbit(hashsz)-1);
369 366 smd_hashmsk = hashsz - 1;
370 367 smd_hash = smd->smd_hash =
371 368 kmem_alloc(hashsz * sizeof (struct smaphash), KM_SLEEP);
372 369 #ifdef SEGMAP_HASHSTATS
373 370 smd_hash_len =
374 371 kmem_zalloc(hashsz * sizeof (unsigned int), KM_SLEEP);
375 372 #endif
376 373 for (i = 0, shashp = smd_hash; i < hashsz; i++, shashp++) {
377 374 shashp->sh_hash_list = NULL;
378 375 mutex_init(&shashp->sh_mtx, NULL, MUTEX_DEFAULT, NULL);
379 376 }
380 377
381 378 /*
382 379 * Allocate and initialize the smap structures.
383 380 * Link all slots onto the appropriate freelist.
384 381 * The smap array is large enough to affect boot time
385 382 * on large systems, so use memory prefetching and only
386 383 * go through the array 1 time. Inline a optimized version
387 384 * of segmap_smapadd to add structures to freelists with
388 385 * knowledge that no locks are needed here.
389 386 */
390 387 smd_smap = smd->smd_sm =
391 388 kmem_alloc(sizeof (struct smap) * npages, KM_SLEEP);
392 389
393 390 for (smp = &smd->smd_sm[MAP_PAGES(seg) - 1];
394 391 smp >= smd->smd_sm; smp--) {
395 392 struct smap *smpfreelist;
396 393 struct sm_freeq *releq;
397 394
398 395 prefetch_smap_w((char *)smp);
399 396
400 397 smp->sm_vp = NULL;
401 398 smp->sm_hash = NULL;
402 399 smp->sm_off = 0;
403 400 smp->sm_bitmap = 0;
404 401 smp->sm_refcnt = 0;
405 402 mutex_init(&smp->sm_mtx, NULL, MUTEX_DEFAULT, NULL);
406 403 smp->sm_free_ndx = SMP2SMF_NDX(smp);
407 404
408 405 sm = SMP2SMF(smp);
409 406 releq = sm->sm_releq;
410 407
411 408 smpfreelist = releq->smq_free;
412 409 if (smpfreelist == 0) {
413 410 releq->smq_free = smp->sm_next = smp->sm_prev = smp;
414 411 } else {
415 412 smp->sm_next = smpfreelist;
416 413 smp->sm_prev = smpfreelist->sm_prev;
417 414 smpfreelist->sm_prev = smp;
418 415 smp->sm_prev->sm_next = smp;
419 416 releq->smq_free = smp->sm_next;
420 417 }
421 418
422 419 /*
423 420 * sm_flag = 0 (no SM_QNDX_ZERO) implies smap on sm_freeq[1]
424 421 */
425 422 smp->sm_flags = 0;
426 423
427 424 #ifdef SEGKPM_SUPPORT
428 425 /*
429 426 * Due to the fragile prefetch loop no
430 427 * separate function is used here.
431 428 */
432 429 smp->sm_kpme_next = NULL;
433 430 smp->sm_kpme_prev = NULL;
434 431 smp->sm_kpme_page = NULL;
435 432 #endif
436 433 }
437 434
438 435 /*
439 436 * Allocate the per color indices that distribute allocation
440 437 * requests over the free lists. Each cpu will have a private
441 438 * rotor index to spread the allocations even across the available
442 439 * smap freelists. Init the scpu_last_smap field to the first
443 440 * smap element so there is no need to check for NULL.
444 441 */
445 442 smd_cpu =
446 443 kmem_zalloc(sizeof (union segmap_cpu) * max_ncpus, KM_SLEEP);
447 444 for (i = 0, scpu = smd_cpu; i < max_ncpus; i++, scpu++) {
448 445 int j;
449 446 for (j = 0; j < smd_ncolor; j++)
450 447 scpu->scpu.scpu_free_ndx[j] = j;
451 448 scpu->scpu.scpu_last_smap = smd_smap;
452 449 }
453 450
454 451 vpm_init();
455 452
456 453 #ifdef DEBUG
457 454 /*
458 455 * Keep track of which colors are used more often.
459 456 */
460 457 colors_used = kmem_zalloc(smd_nfree * sizeof (int), KM_SLEEP);
461 458 #endif /* DEBUG */
462 459
463 460 return (0);
464 461 }
465 462
466 463 static void
467 464 segmap_free(seg)
468 465 struct seg *seg;
469 466 {
470 467 ASSERT(seg->s_as && RW_WRITE_HELD(&seg->s_as->a_lock));
471 468 }
472 469
473 470 /*
474 471 * Do a F_SOFTUNLOCK call over the range requested.
475 472 * The range must have already been F_SOFTLOCK'ed.
476 473 */
477 474 static void
478 475 segmap_unlock(
479 476 struct hat *hat,
480 477 struct seg *seg,
481 478 caddr_t addr,
482 479 size_t len,
483 480 enum seg_rw rw,
484 481 struct smap *smp)
485 482 {
486 483 page_t *pp;
487 484 caddr_t adr;
488 485 u_offset_t off;
489 486 struct vnode *vp;
490 487 kmutex_t *smtx;
491 488
492 489 ASSERT(smp->sm_refcnt > 0);
493 490
494 491 #ifdef lint
495 492 seg = seg;
496 493 #endif
497 494
498 495 if (segmap_kpm && IS_KPM_ADDR(addr)) {
499 496
500 497 /*
501 498 * We're called only from segmap_fault and this was a
502 499 * NOP in case of a kpm based smap, so dangerous things
503 500 * must have happened in the meantime. Pages are prefaulted
504 501 * and locked in segmap_getmapflt and they will not be
505 502 * unlocked until segmap_release.
506 503 */
507 504 panic("segmap_unlock: called with kpm addr %p", (void *)addr);
508 505 /*NOTREACHED*/
509 506 }
510 507
511 508 vp = smp->sm_vp;
512 509 off = smp->sm_off + (u_offset_t)((uintptr_t)addr & MAXBOFFSET);
513 510
514 511 hat_unlock(hat, addr, P2ROUNDUP(len, PAGESIZE));
515 512 for (adr = addr; adr < addr + len; adr += PAGESIZE, off += PAGESIZE) {
516 513 ushort_t bitmask;
517 514
518 515 /*
519 516 * Use page_find() instead of page_lookup() to
520 517 * find the page since we know that it has
521 518 * "shared" lock.
522 519 */
523 520 pp = page_find(vp, off);
524 521 if (pp == NULL) {
525 522 panic("segmap_unlock: page not found");
526 523 /*NOTREACHED*/
527 524 }
528 525
529 526 if (rw == S_WRITE) {
530 527 hat_setrefmod(pp);
531 528 } else if (rw != S_OTHER) {
532 529 TRACE_3(TR_FAC_VM, TR_SEGMAP_FAULT,
533 530 "segmap_fault:pp %p vp %p offset %llx", pp, vp, off);
534 531 hat_setref(pp);
535 532 }
536 533
537 534 /*
538 535 * Clear bitmap, if the bit corresponding to "off" is set,
539 536 * since the page and translation are being unlocked.
540 537 */
541 538 bitmask = SMAP_BIT_MASK((off - smp->sm_off) >> PAGESHIFT);
542 539
543 540 /*
544 541 * Large Files: Following assertion is to verify
545 542 * the correctness of the cast to (int) above.
546 543 */
547 544 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
548 545 smtx = SMAPMTX(smp);
549 546 mutex_enter(smtx);
550 547 if (smp->sm_bitmap & bitmask) {
551 548 smp->sm_bitmap &= ~bitmask;
552 549 }
553 550 mutex_exit(smtx);
554 551
555 552 page_unlock(pp);
556 553 }
557 554 }
558 555
559 556 #define MAXPPB (MAXBSIZE/4096) /* assumes minimum page size of 4k */
560 557
561 558 /*
562 559 * This routine is called via a machine specific fault handling
563 560 * routine. It is also called by software routines wishing to
564 561 * lock or unlock a range of addresses.
565 562 *
566 563 * Note that this routine expects a page-aligned "addr".
567 564 */
568 565 faultcode_t
569 566 segmap_fault(
570 567 struct hat *hat,
571 568 struct seg *seg,
572 569 caddr_t addr,
573 570 size_t len,
574 571 enum fault_type type,
575 572 enum seg_rw rw)
576 573 {
577 574 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
578 575 struct smap *smp;
579 576 page_t *pp, **ppp;
580 577 struct vnode *vp;
581 578 u_offset_t off;
582 579 page_t *pl[MAXPPB + 1];
583 580 uint_t prot;
584 581 u_offset_t addroff;
585 582 caddr_t adr;
586 583 int err;
587 584 u_offset_t sm_off;
588 585 int hat_flag;
589 586
590 587 if (segmap_kpm && IS_KPM_ADDR(addr)) {
591 588 int newpage;
592 589 kmutex_t *smtx;
593 590
594 591 /*
595 592 * Pages are successfully prefaulted and locked in
596 593 * segmap_getmapflt and can't be unlocked until
597 594 * segmap_release. No hat mappings have to be locked
598 595 * and they also can't be unlocked as long as the
599 596 * caller owns an active kpm addr.
600 597 */
601 598 #ifndef DEBUG
602 599 if (type != F_SOFTUNLOCK)
603 600 return (0);
604 601 #endif
605 602
606 603 if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
607 604 panic("segmap_fault: smap not found "
608 605 "for addr %p", (void *)addr);
609 606 /*NOTREACHED*/
610 607 }
611 608
612 609 smtx = SMAPMTX(smp);
613 610 #ifdef DEBUG
614 611 newpage = smp->sm_flags & SM_KPM_NEWPAGE;
615 612 if (newpage) {
616 613 cmn_err(CE_WARN, "segmap_fault: newpage? smp %p",
617 614 (void *)smp);
618 615 }
619 616
620 617 if (type != F_SOFTUNLOCK) {
621 618 mutex_exit(smtx);
622 619 return (0);
623 620 }
624 621 #endif
625 622 mutex_exit(smtx);
626 623 vp = smp->sm_vp;
627 624 sm_off = smp->sm_off;
628 625
629 626 if (vp == NULL)
630 627 return (FC_MAKE_ERR(EIO));
631 628
632 629 ASSERT(smp->sm_refcnt > 0);
633 630
634 631 addroff = (u_offset_t)((uintptr_t)addr & MAXBOFFSET);
635 632 if (addroff + len > MAXBSIZE)
636 633 panic("segmap_fault: endaddr %p exceeds MAXBSIZE chunk",
637 634 (void *)(addr + len));
638 635
639 636 off = sm_off + addroff;
640 637
641 638 pp = page_find(vp, off);
642 639
643 640 if (pp == NULL)
644 641 panic("segmap_fault: softunlock page not found");
645 642
646 643 /*
647 644 * Set ref bit also here in case of S_OTHER to avoid the
648 645 * overhead of supporting other cases than F_SOFTUNLOCK
649 646 * with segkpm. We can do this because the underlying
650 647 * pages are locked anyway.
651 648 */
652 649 if (rw == S_WRITE) {
653 650 hat_setrefmod(pp);
654 651 } else {
655 652 TRACE_3(TR_FAC_VM, TR_SEGMAP_FAULT,
656 653 "segmap_fault:pp %p vp %p offset %llx",
657 654 pp, vp, off);
658 655 hat_setref(pp);
659 656 }
660 657
661 658 return (0);
662 659 }
663 660
664 661 smd_cpu[CPU->cpu_seqid].scpu.scpu_fault++;
665 662 smp = GET_SMAP(seg, addr);
666 663 vp = smp->sm_vp;
667 664 sm_off = smp->sm_off;
668 665
669 666 if (vp == NULL)
670 667 return (FC_MAKE_ERR(EIO));
671 668
672 669 ASSERT(smp->sm_refcnt > 0);
673 670
674 671 addroff = (u_offset_t)((uintptr_t)addr & MAXBOFFSET);
675 672 if (addroff + len > MAXBSIZE) {
676 673 panic("segmap_fault: endaddr %p "
677 674 "exceeds MAXBSIZE chunk", (void *)(addr + len));
678 675 /*NOTREACHED*/
679 676 }
680 677 off = sm_off + addroff;
681 678
682 679 /*
683 680 * First handle the easy stuff
684 681 */
685 682 if (type == F_SOFTUNLOCK) {
686 683 segmap_unlock(hat, seg, addr, len, rw, smp);
687 684 return (0);
688 685 }
689 686
690 687 TRACE_3(TR_FAC_VM, TR_SEGMAP_GETPAGE,
691 688 "segmap_getpage:seg %p addr %p vp %p", seg, addr, vp);
692 689 err = VOP_GETPAGE(vp, (offset_t)off, len, &prot, pl, MAXBSIZE,
693 690 seg, addr, rw, CRED(), NULL);
694 691
695 692 if (err)
696 693 return (FC_MAKE_ERR(err));
697 694
698 695 prot &= smd->smd_prot;
699 696
700 697 /*
701 698 * Handle all pages returned in the pl[] array.
702 699 * This loop is coded on the assumption that if
703 700 * there was no error from the VOP_GETPAGE routine,
704 701 * that the page list returned will contain all the
705 702 * needed pages for the vp from [off..off + len].
706 703 */
707 704 ppp = pl;
708 705 while ((pp = *ppp++) != NULL) {
709 706 u_offset_t poff;
710 707 ASSERT(pp->p_vnode == vp);
711 708 hat_flag = HAT_LOAD;
712 709
713 710 /*
714 711 * Verify that the pages returned are within the range
715 712 * of this segmap region. Note that it is theoretically
716 713 * possible for pages outside this range to be returned,
717 714 * but it is not very likely. If we cannot use the
718 715 * page here, just release it and go on to the next one.
719 716 */
720 717 if (pp->p_offset < sm_off ||
721 718 pp->p_offset >= sm_off + MAXBSIZE) {
722 719 (void) page_release(pp, 1);
723 720 continue;
724 721 }
725 722
726 723 ASSERT(hat == kas.a_hat);
727 724 poff = pp->p_offset;
728 725 adr = addr + (poff - off);
729 726 if (adr >= addr && adr < addr + len) {
730 727 hat_setref(pp);
731 728 TRACE_3(TR_FAC_VM, TR_SEGMAP_FAULT,
732 729 "segmap_fault:pp %p vp %p offset %llx",
733 730 pp, vp, poff);
734 731 if (type == F_SOFTLOCK)
735 732 hat_flag = HAT_LOAD_LOCK;
736 733 }
737 734
738 735 /*
739 736 * Deal with VMODSORT pages here. If we know this is a write
740 737 * do the setmod now and allow write protection.
741 738 * As long as it's modified or not S_OTHER, remove write
742 739 * protection. With S_OTHER it's up to the FS to deal with this.
743 740 */
744 741 if (IS_VMODSORT(vp)) {
745 742 if (rw == S_WRITE)
746 743 hat_setmod(pp);
747 744 else if (rw != S_OTHER && !hat_ismod(pp))
748 745 prot &= ~PROT_WRITE;
749 746 }
750 747
751 748 hat_memload(hat, adr, pp, prot, hat_flag);
752 749 if (hat_flag != HAT_LOAD_LOCK)
753 750 page_unlock(pp);
754 751 }
755 752 return (0);
756 753 }
757 754
758 755 /*
759 756 * This routine is used to start I/O on pages asynchronously.
760 757 */
761 758 static faultcode_t
762 759 segmap_faulta(struct seg *seg, caddr_t addr)
763 760 {
764 761 struct smap *smp;
765 762 struct vnode *vp;
766 763 u_offset_t off;
767 764 int err;
768 765
769 766 if (segmap_kpm && IS_KPM_ADDR(addr)) {
770 767 int newpage;
771 768 kmutex_t *smtx;
772 769
773 770 /*
774 771 * Pages are successfully prefaulted and locked in
775 772 * segmap_getmapflt and can't be unlocked until
776 773 * segmap_release. No hat mappings have to be locked
777 774 * and they also can't be unlocked as long as the
778 775 * caller owns an active kpm addr.
779 776 */
780 777 #ifdef DEBUG
781 778 if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
782 779 panic("segmap_faulta: smap not found "
783 780 "for addr %p", (void *)addr);
784 781 /*NOTREACHED*/
785 782 }
786 783
787 784 smtx = SMAPMTX(smp);
788 785 newpage = smp->sm_flags & SM_KPM_NEWPAGE;
789 786 mutex_exit(smtx);
790 787 if (newpage)
791 788 cmn_err(CE_WARN, "segmap_faulta: newpage? smp %p",
792 789 (void *)smp);
793 790 #endif
794 791 return (0);
795 792 }
796 793
797 794 segmapcnt.smp_faulta.value.ul++;
798 795 smp = GET_SMAP(seg, addr);
799 796
800 797 ASSERT(smp->sm_refcnt > 0);
801 798
802 799 vp = smp->sm_vp;
803 800 off = smp->sm_off;
804 801
805 802 if (vp == NULL) {
806 803 cmn_err(CE_WARN, "segmap_faulta - no vp");
807 804 return (FC_MAKE_ERR(EIO));
808 805 }
809 806
810 807 TRACE_3(TR_FAC_VM, TR_SEGMAP_GETPAGE,
811 808 "segmap_getpage:seg %p addr %p vp %p", seg, addr, vp);
812 809
813 810 err = VOP_GETPAGE(vp, (offset_t)(off + ((offset_t)((uintptr_t)addr
814 811 & MAXBOFFSET))), PAGESIZE, (uint_t *)NULL, (page_t **)NULL, 0,
815 812 seg, addr, S_READ, CRED(), NULL);
816 813
817 814 if (err)
818 815 return (FC_MAKE_ERR(err));
819 816 return (0);
820 817 }
821 818
822 819 /*ARGSUSED*/
823 820 static int
824 821 segmap_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
825 822 {
826 823 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
827 824
828 825 ASSERT(seg->s_as && RW_LOCK_HELD(&seg->s_as->a_lock));
829 826
830 827 /*
831 828 * Need not acquire the segment lock since
832 829 * "smd_prot" is a read-only field.
833 830 */
834 831 return (((smd->smd_prot & prot) != prot) ? EACCES : 0);
835 832 }
836 833
837 834 static int
838 835 segmap_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
839 836 {
840 837 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
841 838 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
842 839
843 840 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
844 841
845 842 if (pgno != 0) {
846 843 do {
847 844 protv[--pgno] = smd->smd_prot;
848 845 } while (pgno != 0);
849 846 }
850 847 return (0);
851 848 }
852 849
853 850 static u_offset_t
854 851 segmap_getoffset(struct seg *seg, caddr_t addr)
855 852 {
856 853 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
857 854
858 855 ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock));
859 856
860 857 return ((u_offset_t)smd->smd_sm->sm_off + (addr - seg->s_base));
861 858 }
862 859
863 860 /*ARGSUSED*/
864 861 static int
865 862 segmap_gettype(struct seg *seg, caddr_t addr)
866 863 {
867 864 ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock));
868 865
869 866 return (MAP_SHARED);
870 867 }
871 868
872 869 /*ARGSUSED*/
873 870 static int
874 871 segmap_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
875 872 {
876 873 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
877 874
878 875 ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock));
879 876
880 877 /* XXX - This doesn't make any sense */
881 878 *vpp = smd->smd_sm->sm_vp;
882 879 return (0);
883 880 }
884 881
885 882 /*
886 883 * Check to see if it makes sense to do kluster/read ahead to
887 884 * addr + delta relative to the mapping at addr. We assume here
888 885 * that delta is a signed PAGESIZE'd multiple (which can be negative).
889 886 *
890 887 * For segmap we always "approve" of this action from our standpoint.
891 888 */
892 889 /*ARGSUSED*/
893 890 static int
894 891 segmap_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
895 892 {
896 893 return (0);
897 894 }
898 895
899 896 /*
900 897 * Special private segmap operations
901 898 */
902 899
903 900 /*
904 901 * Add smap to the appropriate free list.
905 902 */
906 903 static void
907 904 segmap_smapadd(struct smap *smp)
908 905 {
909 906 struct smfree *sm;
910 907 struct smap *smpfreelist;
911 908 struct sm_freeq *releq;
912 909
913 910 ASSERT(MUTEX_HELD(SMAPMTX(smp)));
914 911
915 912 if (smp->sm_refcnt != 0) {
916 913 panic("segmap_smapadd");
917 914 /*NOTREACHED*/
918 915 }
919 916
920 917 sm = &smd_free[smp->sm_free_ndx];
921 918 /*
922 919 * Add to the tail of the release queue
923 920 * Note that sm_releq and sm_allocq could toggle
924 921 * before we get the lock. This does not affect
925 922 * correctness as the 2 queues are only maintained
926 923 * to reduce lock pressure.
927 924 */
928 925 releq = sm->sm_releq;
929 926 if (releq == &sm->sm_freeq[0])
930 927 smp->sm_flags |= SM_QNDX_ZERO;
931 928 else
932 929 smp->sm_flags &= ~SM_QNDX_ZERO;
933 930 mutex_enter(&releq->smq_mtx);
934 931 smpfreelist = releq->smq_free;
935 932 if (smpfreelist == 0) {
936 933 int want;
937 934
938 935 releq->smq_free = smp->sm_next = smp->sm_prev = smp;
939 936 /*
940 937 * Both queue mutexes held to set sm_want;
941 938 * snapshot the value before dropping releq mutex.
942 939 * If sm_want appears after the releq mutex is dropped,
943 940 * then the smap just freed is already gone.
944 941 */
945 942 want = sm->sm_want;
946 943 mutex_exit(&releq->smq_mtx);
947 944 /*
948 945 * See if there was a waiter before dropping the releq mutex
949 946 * then recheck after obtaining sm_freeq[0] mutex as
950 947 * the another thread may have already signaled.
951 948 */
952 949 if (want) {
953 950 mutex_enter(&sm->sm_freeq[0].smq_mtx);
954 951 if (sm->sm_want)
955 952 cv_signal(&sm->sm_free_cv);
956 953 mutex_exit(&sm->sm_freeq[0].smq_mtx);
957 954 }
958 955 } else {
959 956 smp->sm_next = smpfreelist;
960 957 smp->sm_prev = smpfreelist->sm_prev;
961 958 smpfreelist->sm_prev = smp;
962 959 smp->sm_prev->sm_next = smp;
963 960 mutex_exit(&releq->smq_mtx);
964 961 }
965 962 }
966 963
967 964
968 965 static struct smap *
969 966 segmap_hashin(struct smap *smp, struct vnode *vp, u_offset_t off, int hashid)
970 967 {
971 968 struct smap **hpp;
972 969 struct smap *tmp;
973 970 kmutex_t *hmtx;
974 971
975 972 ASSERT(MUTEX_HELD(SMAPMTX(smp)));
976 973 ASSERT(smp->sm_vp == NULL);
977 974 ASSERT(smp->sm_hash == NULL);
978 975 ASSERT(smp->sm_prev == NULL);
979 976 ASSERT(smp->sm_next == NULL);
980 977 ASSERT(hashid >= 0 && hashid <= smd_hashmsk);
981 978
982 979 hmtx = SHASHMTX(hashid);
983 980
984 981 mutex_enter(hmtx);
985 982 /*
986 983 * First we need to verify that no one has created a smp
987 984 * with (vp,off) as its tag before we us.
988 985 */
989 986 for (tmp = smd_hash[hashid].sh_hash_list;
990 987 tmp != NULL; tmp = tmp->sm_hash)
991 988 if (tmp->sm_vp == vp && tmp->sm_off == off)
992 989 break;
993 990
994 991 if (tmp == NULL) {
995 992 /*
996 993 * No one created one yet.
997 994 *
998 995 * Funniness here - we don't increment the ref count on the
999 996 * vnode * even though we have another pointer to it here.
1000 997 * The reason for this is that we don't want the fact that
1001 998 * a seg_map entry somewhere refers to a vnode to prevent the
1002 999 * vnode * itself from going away. This is because this
1003 1000 * reference to the vnode is a "soft one". In the case where
1004 1001 * a mapping is being used by a rdwr [or directory routine?]
1005 1002 * there already has to be a non-zero ref count on the vnode.
1006 1003 * In the case where the vp has been freed and the the smap
1007 1004 * structure is on the free list, there are no pages in memory
1008 1005 * that can refer to the vnode. Thus even if we reuse the same
1009 1006 * vnode/smap structure for a vnode which has the same
1010 1007 * address but represents a different object, we are ok.
1011 1008 */
1012 1009 smp->sm_vp = vp;
1013 1010 smp->sm_off = off;
1014 1011
1015 1012 hpp = &smd_hash[hashid].sh_hash_list;
1016 1013 smp->sm_hash = *hpp;
1017 1014 *hpp = smp;
1018 1015 #ifdef SEGMAP_HASHSTATS
1019 1016 smd_hash_len[hashid]++;
1020 1017 #endif
1021 1018 }
1022 1019 mutex_exit(hmtx);
1023 1020
1024 1021 return (tmp);
1025 1022 }
1026 1023
1027 1024 static void
1028 1025 segmap_hashout(struct smap *smp)
1029 1026 {
1030 1027 struct smap **hpp, *hp;
1031 1028 struct vnode *vp;
1032 1029 kmutex_t *mtx;
1033 1030 int hashid;
1034 1031 u_offset_t off;
1035 1032
1036 1033 ASSERT(MUTEX_HELD(SMAPMTX(smp)));
1037 1034
1038 1035 vp = smp->sm_vp;
1039 1036 off = smp->sm_off;
1040 1037
1041 1038 SMAP_HASHFUNC(vp, off, hashid); /* macro assigns hashid */
1042 1039 mtx = SHASHMTX(hashid);
1043 1040 mutex_enter(mtx);
1044 1041
1045 1042 hpp = &smd_hash[hashid].sh_hash_list;
1046 1043 for (;;) {
1047 1044 hp = *hpp;
1048 1045 if (hp == NULL) {
1049 1046 panic("segmap_hashout");
1050 1047 /*NOTREACHED*/
1051 1048 }
1052 1049 if (hp == smp)
1053 1050 break;
1054 1051 hpp = &hp->sm_hash;
1055 1052 }
1056 1053
1057 1054 *hpp = smp->sm_hash;
1058 1055 smp->sm_hash = NULL;
1059 1056 #ifdef SEGMAP_HASHSTATS
1060 1057 smd_hash_len[hashid]--;
1061 1058 #endif
1062 1059 mutex_exit(mtx);
1063 1060
1064 1061 smp->sm_vp = NULL;
1065 1062 smp->sm_off = (u_offset_t)0;
1066 1063
1067 1064 }
1068 1065
1069 1066 /*
1070 1067 * Attempt to free unmodified, unmapped, and non locked segmap
1071 1068 * pages.
1072 1069 */
1073 1070 void
1074 1071 segmap_pagefree(struct vnode *vp, u_offset_t off)
1075 1072 {
1076 1073 u_offset_t pgoff;
1077 1074 page_t *pp;
1078 1075
1079 1076 for (pgoff = off; pgoff < off + MAXBSIZE; pgoff += PAGESIZE) {
1080 1077
1081 1078 if ((pp = page_lookup_nowait(vp, pgoff, SE_EXCL)) == NULL)
1082 1079 continue;
1083 1080
1084 1081 switch (page_release(pp, 1)) {
1085 1082 case PGREL_NOTREL:
1086 1083 segmapcnt.smp_free_notfree.value.ul++;
1087 1084 break;
1088 1085 case PGREL_MOD:
1089 1086 segmapcnt.smp_free_dirty.value.ul++;
1090 1087 break;
1091 1088 case PGREL_CLEAN:
1092 1089 segmapcnt.smp_free.value.ul++;
1093 1090 break;
1094 1091 }
1095 1092 }
1096 1093 }
1097 1094
1098 1095 /*
1099 1096 * Locks held on entry: smap lock
1100 1097 * Locks held on exit : smap lock.
1101 1098 */
1102 1099
1103 1100 static void
1104 1101 grab_smp(struct smap *smp, page_t *pp)
1105 1102 {
1106 1103 ASSERT(MUTEX_HELD(SMAPMTX(smp)));
1107 1104 ASSERT(smp->sm_refcnt == 0);
1108 1105
1109 1106 if (smp->sm_vp != (struct vnode *)NULL) {
1110 1107 struct vnode *vp = smp->sm_vp;
1111 1108 u_offset_t off = smp->sm_off;
1112 1109 /*
1113 1110 * Destroy old vnode association and
1114 1111 * unload any hardware translations to
1115 1112 * the old object.
1116 1113 */
1117 1114 smd_cpu[CPU->cpu_seqid].scpu.scpu_get_reuse++;
1118 1115 segmap_hashout(smp);
1119 1116
1120 1117 /*
1121 1118 * This node is off freelist and hashlist,
1122 1119 * so there is no reason to drop/reacquire sm_mtx
1123 1120 * across calls to hat_unload.
1124 1121 */
1125 1122 if (segmap_kpm) {
1126 1123 caddr_t vaddr;
1127 1124 int hat_unload_needed = 0;
1128 1125
1129 1126 /*
1130 1127 * unload kpm mapping
1131 1128 */
1132 1129 if (pp != NULL) {
1133 1130 vaddr = hat_kpm_page2va(pp, 1);
1134 1131 hat_kpm_mapout(pp, GET_KPME(smp), vaddr);
1135 1132 page_unlock(pp);
1136 1133 }
1137 1134
1138 1135 /*
1139 1136 * Check if we have (also) the rare case of a
1140 1137 * non kpm mapping.
1141 1138 */
1142 1139 if (smp->sm_flags & SM_NOTKPM_RELEASED) {
1143 1140 hat_unload_needed = 1;
1144 1141 smp->sm_flags &= ~SM_NOTKPM_RELEASED;
1145 1142 }
1146 1143
1147 1144 if (hat_unload_needed) {
1148 1145 hat_unload(kas.a_hat, segkmap->s_base +
1149 1146 ((smp - smd_smap) * MAXBSIZE),
1150 1147 MAXBSIZE, HAT_UNLOAD);
1151 1148 }
1152 1149
1153 1150 } else {
1154 1151 ASSERT(smp->sm_flags & SM_NOTKPM_RELEASED);
1155 1152 smp->sm_flags &= ~SM_NOTKPM_RELEASED;
1156 1153 hat_unload(kas.a_hat, segkmap->s_base +
1157 1154 ((smp - smd_smap) * MAXBSIZE),
1158 1155 MAXBSIZE, HAT_UNLOAD);
1159 1156 }
1160 1157 segmap_pagefree(vp, off);
1161 1158 }
1162 1159 }
1163 1160
1164 1161 static struct smap *
1165 1162 get_free_smp(int free_ndx)
1166 1163 {
1167 1164 struct smfree *sm;
1168 1165 kmutex_t *smtx;
1169 1166 struct smap *smp, *first;
1170 1167 struct sm_freeq *allocq, *releq;
1171 1168 struct kpme *kpme;
1172 1169 page_t *pp = NULL;
1173 1170 int end_ndx, page_locked = 0;
1174 1171
1175 1172 end_ndx = free_ndx;
1176 1173 sm = &smd_free[free_ndx];
1177 1174
1178 1175 retry_queue:
1179 1176 allocq = sm->sm_allocq;
1180 1177 mutex_enter(&allocq->smq_mtx);
1181 1178
1182 1179 if ((smp = allocq->smq_free) == NULL) {
1183 1180
1184 1181 skip_queue:
1185 1182 /*
1186 1183 * The alloc list is empty or this queue is being skipped;
1187 1184 * first see if the allocq toggled.
1188 1185 */
1189 1186 if (sm->sm_allocq != allocq) {
1190 1187 /* queue changed */
1191 1188 mutex_exit(&allocq->smq_mtx);
1192 1189 goto retry_queue;
1193 1190 }
1194 1191 releq = sm->sm_releq;
1195 1192 if (!mutex_tryenter(&releq->smq_mtx)) {
1196 1193 /* cannot get releq; a free smp may be there now */
1197 1194 mutex_exit(&allocq->smq_mtx);
1198 1195
1199 1196 /*
1200 1197 * This loop could spin forever if this thread has
1201 1198 * higher priority than the thread that is holding
1202 1199 * releq->smq_mtx. In order to force the other thread
1203 1200 * to run, we'll lock/unlock the mutex which is safe
1204 1201 * since we just unlocked the allocq mutex.
1205 1202 */
1206 1203 mutex_enter(&releq->smq_mtx);
1207 1204 mutex_exit(&releq->smq_mtx);
1208 1205 goto retry_queue;
1209 1206 }
1210 1207 if (releq->smq_free == NULL) {
1211 1208 /*
1212 1209 * This freelist is empty.
1213 1210 * This should not happen unless clients
1214 1211 * are failing to release the segmap
1215 1212 * window after accessing the data.
1216 1213 * Before resorting to sleeping, try
1217 1214 * the next list of the same color.
1218 1215 */
1219 1216 free_ndx = (free_ndx + smd_ncolor) & smd_freemsk;
1220 1217 if (free_ndx != end_ndx) {
1221 1218 mutex_exit(&releq->smq_mtx);
1222 1219 mutex_exit(&allocq->smq_mtx);
1223 1220 sm = &smd_free[free_ndx];
1224 1221 goto retry_queue;
1225 1222 }
1226 1223 /*
1227 1224 * Tried all freelists of the same color once,
1228 1225 * wait on this list and hope something gets freed.
1229 1226 */
1230 1227 segmapcnt.smp_get_nofree.value.ul++;
1231 1228 sm->sm_want++;
1232 1229 mutex_exit(&sm->sm_freeq[1].smq_mtx);
1233 1230 cv_wait(&sm->sm_free_cv,
1234 1231 &sm->sm_freeq[0].smq_mtx);
1235 1232 sm->sm_want--;
1236 1233 mutex_exit(&sm->sm_freeq[0].smq_mtx);
1237 1234 sm = &smd_free[free_ndx];
1238 1235 goto retry_queue;
1239 1236 } else {
1240 1237 /*
1241 1238 * Something on the rele queue; flip the alloc
1242 1239 * and rele queues and retry.
1243 1240 */
1244 1241 sm->sm_allocq = releq;
1245 1242 sm->sm_releq = allocq;
1246 1243 mutex_exit(&allocq->smq_mtx);
1247 1244 mutex_exit(&releq->smq_mtx);
1248 1245 if (page_locked) {
1249 1246 delay(hz >> 2);
1250 1247 page_locked = 0;
1251 1248 }
1252 1249 goto retry_queue;
1253 1250 }
1254 1251 } else {
1255 1252 /*
1256 1253 * Fastpath the case we get the smap mutex
1257 1254 * on the first try.
1258 1255 */
1259 1256 first = smp;
1260 1257 next_smap:
1261 1258 smtx = SMAPMTX(smp);
1262 1259 if (!mutex_tryenter(smtx)) {
1263 1260 /*
1264 1261 * Another thread is trying to reclaim this slot.
1265 1262 * Skip to the next queue or smap.
1266 1263 */
1267 1264 if ((smp = smp->sm_next) == first) {
1268 1265 goto skip_queue;
1269 1266 } else {
1270 1267 goto next_smap;
1271 1268 }
1272 1269 } else {
1273 1270 /*
1274 1271 * if kpme exists, get shared lock on the page
1275 1272 */
1276 1273 if (segmap_kpm && smp->sm_vp != NULL) {
1277 1274
1278 1275 kpme = GET_KPME(smp);
1279 1276 pp = kpme->kpe_page;
1280 1277
1281 1278 if (pp != NULL) {
1282 1279 if (!page_trylock(pp, SE_SHARED)) {
1283 1280 smp = smp->sm_next;
1284 1281 mutex_exit(smtx);
1285 1282 page_locked = 1;
1286 1283
1287 1284 pp = NULL;
1288 1285
1289 1286 if (smp == first) {
1290 1287 goto skip_queue;
1291 1288 } else {
1292 1289 goto next_smap;
1293 1290 }
1294 1291 } else {
1295 1292 if (kpme->kpe_page == NULL) {
1296 1293 page_unlock(pp);
1297 1294 pp = NULL;
1298 1295 }
1299 1296 }
1300 1297 }
1301 1298 }
1302 1299
1303 1300 /*
1304 1301 * At this point, we've selected smp. Remove smp
1305 1302 * from its freelist. If smp is the first one in
1306 1303 * the freelist, update the head of the freelist.
1307 1304 */
1308 1305 if (first == smp) {
1309 1306 ASSERT(first == allocq->smq_free);
1310 1307 allocq->smq_free = smp->sm_next;
1311 1308 }
1312 1309
1313 1310 /*
1314 1311 * if the head of the freelist still points to smp,
1315 1312 * then there are no more free smaps in that list.
1316 1313 */
1317 1314 if (allocq->smq_free == smp)
1318 1315 /*
1319 1316 * Took the last one
1320 1317 */
1321 1318 allocq->smq_free = NULL;
1322 1319 else {
1323 1320 smp->sm_prev->sm_next = smp->sm_next;
1324 1321 smp->sm_next->sm_prev = smp->sm_prev;
1325 1322 }
1326 1323 mutex_exit(&allocq->smq_mtx);
1327 1324 smp->sm_prev = smp->sm_next = NULL;
1328 1325
1329 1326 /*
1330 1327 * if pp != NULL, pp must have been locked;
1331 1328 * grab_smp() unlocks pp.
1332 1329 */
1333 1330 ASSERT((pp == NULL) || PAGE_LOCKED(pp));
1334 1331 grab_smp(smp, pp);
1335 1332 /* return smp locked. */
1336 1333 ASSERT(SMAPMTX(smp) == smtx);
1337 1334 ASSERT(MUTEX_HELD(smtx));
1338 1335 return (smp);
1339 1336 }
1340 1337 }
1341 1338 }
1342 1339
1343 1340 /*
1344 1341 * Special public segmap operations
1345 1342 */
1346 1343
1347 1344 /*
1348 1345 * Create pages (without using VOP_GETPAGE) and load up translations to them.
1349 1346 * If softlock is TRUE, then set things up so that it looks like a call
1350 1347 * to segmap_fault with F_SOFTLOCK.
1351 1348 *
1352 1349 * Returns 1, if a page is created by calling page_create_va(), or 0 otherwise.
1353 1350 *
1354 1351 * All fields in the generic segment (struct seg) are considered to be
1355 1352 * read-only for "segmap" even though the kernel address space (kas) may
1356 1353 * not be locked, hence no lock is needed to access them.
1357 1354 */
1358 1355 int
1359 1356 segmap_pagecreate(struct seg *seg, caddr_t addr, size_t len, int softlock)
1360 1357 {
1361 1358 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
1362 1359 page_t *pp;
1363 1360 u_offset_t off;
1364 1361 struct smap *smp;
1365 1362 struct vnode *vp;
1366 1363 caddr_t eaddr;
1367 1364 int newpage = 0;
1368 1365 uint_t prot;
1369 1366 kmutex_t *smtx;
1370 1367 int hat_flag;
1371 1368
1372 1369 ASSERT(seg->s_as == &kas);
1373 1370
1374 1371 if (segmap_kpm && IS_KPM_ADDR(addr)) {
1375 1372 /*
1376 1373 * Pages are successfully prefaulted and locked in
1377 1374 * segmap_getmapflt and can't be unlocked until
1378 1375 * segmap_release. The SM_KPM_NEWPAGE flag is set
1379 1376 * in segmap_pagecreate_kpm when new pages are created.
1380 1377 * and it is returned as "newpage" indication here.
1381 1378 */
1382 1379 if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
1383 1380 panic("segmap_pagecreate: smap not found "
1384 1381 "for addr %p", (void *)addr);
1385 1382 /*NOTREACHED*/
1386 1383 }
1387 1384
1388 1385 smtx = SMAPMTX(smp);
1389 1386 newpage = smp->sm_flags & SM_KPM_NEWPAGE;
1390 1387 smp->sm_flags &= ~SM_KPM_NEWPAGE;
1391 1388 mutex_exit(smtx);
1392 1389
1393 1390 return (newpage);
1394 1391 }
1395 1392
1396 1393 smd_cpu[CPU->cpu_seqid].scpu.scpu_pagecreate++;
1397 1394
1398 1395 eaddr = addr + len;
1399 1396 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1400 1397
1401 1398 smp = GET_SMAP(seg, addr);
1402 1399
1403 1400 /*
1404 1401 * We don't grab smp mutex here since we assume the smp
1405 1402 * has a refcnt set already which prevents the slot from
1406 1403 * changing its id.
1407 1404 */
1408 1405 ASSERT(smp->sm_refcnt > 0);
1409 1406
1410 1407 vp = smp->sm_vp;
1411 1408 off = smp->sm_off + ((u_offset_t)((uintptr_t)addr & MAXBOFFSET));
1412 1409 prot = smd->smd_prot;
1413 1410
1414 1411 for (; addr < eaddr; addr += PAGESIZE, off += PAGESIZE) {
1415 1412 hat_flag = HAT_LOAD;
1416 1413 pp = page_lookup(vp, off, SE_SHARED);
1417 1414 if (pp == NULL) {
1418 1415 ushort_t bitindex;
1419 1416
1420 1417 if ((pp = page_create_va(vp, off,
1421 1418 PAGESIZE, PG_WAIT, seg, addr)) == NULL) {
1422 1419 panic("segmap_pagecreate: page_create failed");
1423 1420 /*NOTREACHED*/
1424 1421 }
1425 1422 newpage = 1;
1426 1423 page_io_unlock(pp);
1427 1424
1428 1425 /*
1429 1426 * Since pages created here do not contain valid
1430 1427 * data until the caller writes into them, the
1431 1428 * "exclusive" lock will not be dropped to prevent
1432 1429 * other users from accessing the page. We also
1433 1430 * have to lock the translation to prevent a fault
1434 1431 * from occurring when the virtual address mapped by
1435 1432 * this page is written into. This is necessary to
1436 1433 * avoid a deadlock since we haven't dropped the
1437 1434 * "exclusive" lock.
1438 1435 */
1439 1436 bitindex = (ushort_t)((off - smp->sm_off) >> PAGESHIFT);
1440 1437
1441 1438 /*
1442 1439 * Large Files: The following assertion is to
1443 1440 * verify the cast above.
1444 1441 */
1445 1442 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
1446 1443 smtx = SMAPMTX(smp);
1447 1444 mutex_enter(smtx);
1448 1445 smp->sm_bitmap |= SMAP_BIT_MASK(bitindex);
1449 1446 mutex_exit(smtx);
1450 1447
1451 1448 hat_flag = HAT_LOAD_LOCK;
1452 1449 } else if (softlock) {
1453 1450 hat_flag = HAT_LOAD_LOCK;
1454 1451 }
1455 1452
1456 1453 if (IS_VMODSORT(pp->p_vnode) && (prot & PROT_WRITE))
1457 1454 hat_setmod(pp);
1458 1455
1459 1456 hat_memload(kas.a_hat, addr, pp, prot, hat_flag);
1460 1457
1461 1458 if (hat_flag != HAT_LOAD_LOCK)
1462 1459 page_unlock(pp);
1463 1460
1464 1461 TRACE_5(TR_FAC_VM, TR_SEGMAP_PAGECREATE,
1465 1462 "segmap_pagecreate:seg %p addr %p pp %p vp %p offset %llx",
1466 1463 seg, addr, pp, vp, off);
1467 1464 }
1468 1465
1469 1466 return (newpage);
1470 1467 }
1471 1468
1472 1469 void
1473 1470 segmap_pageunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw)
1474 1471 {
1475 1472 struct smap *smp;
1476 1473 ushort_t bitmask;
1477 1474 page_t *pp;
1478 1475 struct vnode *vp;
1479 1476 u_offset_t off;
1480 1477 caddr_t eaddr;
1481 1478 kmutex_t *smtx;
1482 1479
1483 1480 ASSERT(seg->s_as == &kas);
1484 1481
1485 1482 eaddr = addr + len;
1486 1483 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1487 1484
1488 1485 if (segmap_kpm && IS_KPM_ADDR(addr)) {
1489 1486 /*
1490 1487 * Pages are successfully prefaulted and locked in
1491 1488 * segmap_getmapflt and can't be unlocked until
1492 1489 * segmap_release, so no pages or hat mappings have
1493 1490 * to be unlocked at this point.
1494 1491 */
1495 1492 #ifdef DEBUG
1496 1493 if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
1497 1494 panic("segmap_pageunlock: smap not found "
1498 1495 "for addr %p", (void *)addr);
1499 1496 /*NOTREACHED*/
1500 1497 }
1501 1498
1502 1499 ASSERT(smp->sm_refcnt > 0);
1503 1500 mutex_exit(SMAPMTX(smp));
1504 1501 #endif
1505 1502 return;
1506 1503 }
1507 1504
1508 1505 smp = GET_SMAP(seg, addr);
1509 1506 smtx = SMAPMTX(smp);
1510 1507
1511 1508 ASSERT(smp->sm_refcnt > 0);
1512 1509
1513 1510 vp = smp->sm_vp;
1514 1511 off = smp->sm_off + ((u_offset_t)((uintptr_t)addr & MAXBOFFSET));
1515 1512
1516 1513 for (; addr < eaddr; addr += PAGESIZE, off += PAGESIZE) {
1517 1514 bitmask = SMAP_BIT_MASK((int)(off - smp->sm_off) >> PAGESHIFT);
1518 1515
1519 1516 /*
1520 1517 * Large Files: Following assertion is to verify
1521 1518 * the correctness of the cast to (int) above.
1522 1519 */
1523 1520 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
1524 1521
1525 1522 /*
1526 1523 * If the bit corresponding to "off" is set,
1527 1524 * clear this bit in the bitmap, unlock translations,
1528 1525 * and release the "exclusive" lock on the page.
1529 1526 */
1530 1527 if (smp->sm_bitmap & bitmask) {
1531 1528 mutex_enter(smtx);
1532 1529 smp->sm_bitmap &= ~bitmask;
1533 1530 mutex_exit(smtx);
1534 1531
1535 1532 hat_unlock(kas.a_hat, addr, PAGESIZE);
1536 1533
1537 1534 /*
1538 1535 * Use page_find() instead of page_lookup() to
1539 1536 * find the page since we know that it has
1540 1537 * "exclusive" lock.
1541 1538 */
1542 1539 pp = page_find(vp, off);
1543 1540 if (pp == NULL) {
1544 1541 panic("segmap_pageunlock: page not found");
1545 1542 /*NOTREACHED*/
1546 1543 }
1547 1544 if (rw == S_WRITE) {
1548 1545 hat_setrefmod(pp);
1549 1546 } else if (rw != S_OTHER) {
1550 1547 hat_setref(pp);
1551 1548 }
1552 1549
1553 1550 page_unlock(pp);
1554 1551 }
1555 1552 }
1556 1553 }
1557 1554
1558 1555 caddr_t
1559 1556 segmap_getmap(struct seg *seg, struct vnode *vp, u_offset_t off)
1560 1557 {
1561 1558 return (segmap_getmapflt(seg, vp, off, MAXBSIZE, 0, S_OTHER));
1562 1559 }
1563 1560
1564 1561 /*
1565 1562 * This is the magic virtual address that offset 0 of an ELF
1566 1563 * file gets mapped to in user space. This is used to pick
1567 1564 * the vac color on the freelist.
1568 1565 */
1569 1566 #define ELF_OFFZERO_VA (0x10000)
1570 1567 /*
1571 1568 * segmap_getmap allocates a MAXBSIZE big slot to map the vnode vp
1572 1569 * in the range <off, off + len). off doesn't need to be MAXBSIZE aligned.
1573 1570 * The return address is always MAXBSIZE aligned.
1574 1571 *
1575 1572 * If forcefault is nonzero and the MMU translations haven't yet been created,
1576 1573 * segmap_getmap will call segmap_fault(..., F_INVAL, rw) to create them.
1577 1574 */
1578 1575 caddr_t
1579 1576 segmap_getmapflt(
1580 1577 struct seg *seg,
1581 1578 struct vnode *vp,
1582 1579 u_offset_t off,
1583 1580 size_t len,
1584 1581 int forcefault,
1585 1582 enum seg_rw rw)
1586 1583 {
1587 1584 struct smap *smp, *nsmp;
1588 1585 extern struct vnode *common_specvp();
1589 1586 caddr_t baseaddr; /* MAXBSIZE aligned */
1590 1587 u_offset_t baseoff;
1591 1588 int newslot;
1592 1589 caddr_t vaddr;
1593 1590 int color, hashid;
1594 1591 kmutex_t *hashmtx, *smapmtx;
1595 1592 struct smfree *sm;
1596 1593 page_t *pp;
1597 1594 struct kpme *kpme;
1598 1595 uint_t prot;
1599 1596 caddr_t base;
1600 1597 page_t *pl[MAXPPB + 1];
1601 1598 int error;
1602 1599 int is_kpm = 1;
1603 1600
1604 1601 ASSERT(seg->s_as == &kas);
1605 1602 ASSERT(seg == segkmap);
1606 1603
1607 1604 baseoff = off & (offset_t)MAXBMASK;
1608 1605 if (off + len > baseoff + MAXBSIZE) {
1609 1606 panic("segmap_getmap bad len");
1610 1607 /*NOTREACHED*/
1611 1608 }
1612 1609
1613 1610 /*
1614 1611 * If this is a block device we have to be sure to use the
1615 1612 * "common" block device vnode for the mapping.
1616 1613 */
1617 1614 if (vp->v_type == VBLK)
1618 1615 vp = common_specvp(vp);
1619 1616
1620 1617 smd_cpu[CPU->cpu_seqid].scpu.scpu_getmap++;
1621 1618
1622 1619 if (segmap_kpm == 0 ||
1623 1620 (forcefault == SM_PAGECREATE && rw != S_WRITE)) {
1624 1621 is_kpm = 0;
1625 1622 }
1626 1623
1627 1624 SMAP_HASHFUNC(vp, off, hashid); /* macro assigns hashid */
1628 1625 hashmtx = SHASHMTX(hashid);
1629 1626
1630 1627 retry_hash:
1631 1628 mutex_enter(hashmtx);
1632 1629 for (smp = smd_hash[hashid].sh_hash_list;
1633 1630 smp != NULL; smp = smp->sm_hash)
1634 1631 if (smp->sm_vp == vp && smp->sm_off == baseoff)
1635 1632 break;
1636 1633 mutex_exit(hashmtx);
1637 1634
1638 1635 vrfy_smp:
1639 1636 if (smp != NULL) {
1640 1637
1641 1638 ASSERT(vp->v_count != 0);
1642 1639
1643 1640 /*
1644 1641 * Get smap lock and recheck its tag. The hash lock
1645 1642 * is dropped since the hash is based on (vp, off)
1646 1643 * and (vp, off) won't change when we have smap mtx.
1647 1644 */
1648 1645 smapmtx = SMAPMTX(smp);
1649 1646 mutex_enter(smapmtx);
1650 1647 if (smp->sm_vp != vp || smp->sm_off != baseoff) {
1651 1648 mutex_exit(smapmtx);
1652 1649 goto retry_hash;
1653 1650 }
1654 1651
1655 1652 if (smp->sm_refcnt == 0) {
1656 1653
1657 1654 smd_cpu[CPU->cpu_seqid].scpu.scpu_get_reclaim++;
1658 1655
1659 1656 /*
1660 1657 * Could still be on the free list. However, this
1661 1658 * could also be an smp that is transitioning from
1662 1659 * the free list when we have too much contention
1663 1660 * for the smapmtx's. In this case, we have an
1664 1661 * unlocked smp that is not on the free list any
1665 1662 * longer, but still has a 0 refcnt. The only way
1666 1663 * to be sure is to check the freelist pointers.
1667 1664 * Since we now have the smapmtx, we are guaranteed
1668 1665 * that the (vp, off) won't change, so we are safe
1669 1666 * to reclaim it. get_free_smp() knows that this
1670 1667 * can happen, and it will check the refcnt.
1671 1668 */
1672 1669
1673 1670 if ((smp->sm_next != NULL)) {
1674 1671 struct sm_freeq *freeq;
1675 1672
1676 1673 ASSERT(smp->sm_prev != NULL);
1677 1674 sm = &smd_free[smp->sm_free_ndx];
1678 1675
1679 1676 if (smp->sm_flags & SM_QNDX_ZERO)
1680 1677 freeq = &sm->sm_freeq[0];
1681 1678 else
1682 1679 freeq = &sm->sm_freeq[1];
1683 1680
1684 1681 mutex_enter(&freeq->smq_mtx);
1685 1682 if (freeq->smq_free != smp) {
1686 1683 /*
1687 1684 * fastpath normal case
1688 1685 */
1689 1686 smp->sm_prev->sm_next = smp->sm_next;
1690 1687 smp->sm_next->sm_prev = smp->sm_prev;
1691 1688 } else if (smp == smp->sm_next) {
1692 1689 /*
1693 1690 * Taking the last smap on freelist
1694 1691 */
1695 1692 freeq->smq_free = NULL;
1696 1693 } else {
1697 1694 /*
1698 1695 * Reclaiming 1st smap on list
1699 1696 */
1700 1697 freeq->smq_free = smp->sm_next;
1701 1698 smp->sm_prev->sm_next = smp->sm_next;
1702 1699 smp->sm_next->sm_prev = smp->sm_prev;
1703 1700 }
1704 1701 mutex_exit(&freeq->smq_mtx);
1705 1702 smp->sm_prev = smp->sm_next = NULL;
1706 1703 } else {
1707 1704 ASSERT(smp->sm_prev == NULL);
1708 1705 segmapcnt.smp_stolen.value.ul++;
1709 1706 }
1710 1707
1711 1708 } else {
1712 1709 segmapcnt.smp_get_use.value.ul++;
1713 1710 }
1714 1711 smp->sm_refcnt++; /* another user */
1715 1712
1716 1713 /*
1717 1714 * We don't invoke segmap_fault via TLB miss, so we set ref
1718 1715 * and mod bits in advance. For S_OTHER we set them in
1719 1716 * segmap_fault F_SOFTUNLOCK.
1720 1717 */
1721 1718 if (is_kpm) {
1722 1719 if (rw == S_WRITE) {
1723 1720 smp->sm_flags |= SM_WRITE_DATA;
1724 1721 } else if (rw == S_READ) {
1725 1722 smp->sm_flags |= SM_READ_DATA;
1726 1723 }
1727 1724 }
1728 1725 mutex_exit(smapmtx);
1729 1726
1730 1727 newslot = 0;
1731 1728 } else {
1732 1729
1733 1730 uint32_t free_ndx, *free_ndxp;
1734 1731 union segmap_cpu *scpu;
1735 1732
1736 1733 /*
1737 1734 * On a PAC machine or a machine with anti-alias
1738 1735 * hardware, smd_colormsk will be zero.
1739 1736 *
1740 1737 * On a VAC machine- pick color by offset in the file
1741 1738 * so we won't get VAC conflicts on elf files.
1742 1739 * On data files, color does not matter but we
1743 1740 * don't know what kind of file it is so we always
1744 1741 * pick color by offset. This causes color
1745 1742 * corresponding to file offset zero to be used more
1746 1743 * heavily.
1747 1744 */
1748 1745 color = (baseoff >> MAXBSHIFT) & smd_colormsk;
1749 1746 scpu = smd_cpu+CPU->cpu_seqid;
1750 1747 free_ndxp = &scpu->scpu.scpu_free_ndx[color];
1751 1748 free_ndx = (*free_ndxp += smd_ncolor) & smd_freemsk;
1752 1749 #ifdef DEBUG
1753 1750 colors_used[free_ndx]++;
1754 1751 #endif /* DEBUG */
1755 1752
1756 1753 /*
1757 1754 * Get a locked smp slot from the free list.
1758 1755 */
1759 1756 smp = get_free_smp(free_ndx);
1760 1757 smapmtx = SMAPMTX(smp);
1761 1758
1762 1759 ASSERT(smp->sm_vp == NULL);
1763 1760
1764 1761 if ((nsmp = segmap_hashin(smp, vp, baseoff, hashid)) != NULL) {
1765 1762 /*
1766 1763 * Failed to hashin, there exists one now.
1767 1764 * Return the smp we just allocated.
1768 1765 */
1769 1766 segmap_smapadd(smp);
1770 1767 mutex_exit(smapmtx);
1771 1768
1772 1769 smp = nsmp;
1773 1770 goto vrfy_smp;
1774 1771 }
1775 1772 smp->sm_refcnt++; /* another user */
1776 1773
1777 1774 /*
1778 1775 * We don't invoke segmap_fault via TLB miss, so we set ref
1779 1776 * and mod bits in advance. For S_OTHER we set them in
1780 1777 * segmap_fault F_SOFTUNLOCK.
1781 1778 */
1782 1779 if (is_kpm) {
1783 1780 if (rw == S_WRITE) {
1784 1781 smp->sm_flags |= SM_WRITE_DATA;
1785 1782 } else if (rw == S_READ) {
1786 1783 smp->sm_flags |= SM_READ_DATA;
1787 1784 }
1788 1785 }
1789 1786 mutex_exit(smapmtx);
1790 1787
1791 1788 newslot = 1;
1792 1789 }
1793 1790
1794 1791 if (!is_kpm)
1795 1792 goto use_segmap_range;
1796 1793
1797 1794 /*
1798 1795 * Use segkpm
1799 1796 */
1800 1797 /* Lint directive required until 6746211 is fixed */
1801 1798 /*CONSTCOND*/
1802 1799 ASSERT(PAGESIZE == MAXBSIZE);
1803 1800
1804 1801 /*
1805 1802 * remember the last smp faulted on this cpu.
1806 1803 */
1807 1804 (smd_cpu+CPU->cpu_seqid)->scpu.scpu_last_smap = smp;
1808 1805
1809 1806 if (forcefault == SM_PAGECREATE) {
1810 1807 baseaddr = segmap_pagecreate_kpm(seg, vp, baseoff, smp, rw);
1811 1808 return (baseaddr);
1812 1809 }
1813 1810
1814 1811 if (newslot == 0 &&
1815 1812 (pp = GET_KPME(smp)->kpe_page) != NULL) {
1816 1813
1817 1814 /* fastpath */
1818 1815 switch (rw) {
1819 1816 case S_READ:
1820 1817 case S_WRITE:
1821 1818 if (page_trylock(pp, SE_SHARED)) {
1822 1819 if (PP_ISFREE(pp) ||
1823 1820 !(pp->p_vnode == vp &&
1824 1821 pp->p_offset == baseoff)) {
1825 1822 page_unlock(pp);
1826 1823 pp = page_lookup(vp, baseoff,
1827 1824 SE_SHARED);
1828 1825 }
1829 1826 } else {
1830 1827 pp = page_lookup(vp, baseoff, SE_SHARED);
1831 1828 }
1832 1829
1833 1830 if (pp == NULL) {
1834 1831 ASSERT(GET_KPME(smp)->kpe_page == NULL);
1835 1832 break;
1836 1833 }
1837 1834
1838 1835 if (rw == S_WRITE &&
1839 1836 hat_page_getattr(pp, P_MOD | P_REF) !=
1840 1837 (P_MOD | P_REF)) {
1841 1838 page_unlock(pp);
1842 1839 break;
1843 1840 }
1844 1841
1845 1842 /*
1846 1843 * We have the p_selock as reader, grab_smp
1847 1844 * can't hit us, we have bumped the smap
1848 1845 * refcnt and hat_pageunload needs the
1849 1846 * p_selock exclusive.
1850 1847 */
1851 1848 kpme = GET_KPME(smp);
1852 1849 if (kpme->kpe_page == pp) {
1853 1850 baseaddr = hat_kpm_page2va(pp, 0);
1854 1851 } else if (kpme->kpe_page == NULL) {
1855 1852 baseaddr = hat_kpm_mapin(pp, kpme);
1856 1853 } else {
1857 1854 panic("segmap_getmapflt: stale "
1858 1855 "kpme page, kpme %p", (void *)kpme);
1859 1856 /*NOTREACHED*/
1860 1857 }
1861 1858
1862 1859 /*
1863 1860 * We don't invoke segmap_fault via TLB miss,
1864 1861 * so we set ref and mod bits in advance.
1865 1862 * For S_OTHER and we set them in segmap_fault
1866 1863 * F_SOFTUNLOCK.
1867 1864 */
1868 1865 if (rw == S_READ && !hat_isref(pp))
1869 1866 hat_setref(pp);
1870 1867
1871 1868 return (baseaddr);
1872 1869 default:
1873 1870 break;
1874 1871 }
1875 1872 }
1876 1873
1877 1874 base = segkpm_create_va(baseoff);
1878 1875 error = VOP_GETPAGE(vp, (offset_t)baseoff, len, &prot, pl, MAXBSIZE,
1879 1876 seg, base, rw, CRED(), NULL);
1880 1877
1881 1878 pp = pl[0];
1882 1879 if (error || pp == NULL) {
1883 1880 /*
1884 1881 * Use segmap address slot and let segmap_fault deal
1885 1882 * with the error cases. There is no error return
1886 1883 * possible here.
1887 1884 */
1888 1885 goto use_segmap_range;
1889 1886 }
1890 1887
1891 1888 ASSERT(pl[1] == NULL);
1892 1889
1893 1890 /*
1894 1891 * When prot is not returned w/ PROT_ALL the returned pages
1895 1892 * are not backed by fs blocks. For most of the segmap users
1896 1893 * this is no problem, they don't write to the pages in the
1897 1894 * same request and therefore don't rely on a following
1898 1895 * trap driven segmap_fault. With SM_LOCKPROTO users it
1899 1896 * is more secure to use segkmap adresses to allow
1900 1897 * protection segmap_fault's.
1901 1898 */
1902 1899 if (prot != PROT_ALL && forcefault == SM_LOCKPROTO) {
1903 1900 /*
1904 1901 * Use segmap address slot and let segmap_fault
1905 1902 * do the error return.
1906 1903 */
1907 1904 ASSERT(rw != S_WRITE);
1908 1905 ASSERT(PAGE_LOCKED(pp));
1909 1906 page_unlock(pp);
1910 1907 forcefault = 0;
1911 1908 goto use_segmap_range;
1912 1909 }
1913 1910
1914 1911 /*
1915 1912 * We have the p_selock as reader, grab_smp can't hit us, we
1916 1913 * have bumped the smap refcnt and hat_pageunload needs the
1917 1914 * p_selock exclusive.
1918 1915 */
1919 1916 kpme = GET_KPME(smp);
1920 1917 if (kpme->kpe_page == pp) {
1921 1918 baseaddr = hat_kpm_page2va(pp, 0);
1922 1919 } else if (kpme->kpe_page == NULL) {
1923 1920 baseaddr = hat_kpm_mapin(pp, kpme);
1924 1921 } else {
1925 1922 panic("segmap_getmapflt: stale kpme page after "
1926 1923 "VOP_GETPAGE, kpme %p", (void *)kpme);
1927 1924 /*NOTREACHED*/
1928 1925 }
1929 1926
1930 1927 smd_cpu[CPU->cpu_seqid].scpu.scpu_fault++;
1931 1928
1932 1929 return (baseaddr);
1933 1930
1934 1931
1935 1932 use_segmap_range:
1936 1933 baseaddr = seg->s_base + ((smp - smd_smap) * MAXBSIZE);
1937 1934 TRACE_4(TR_FAC_VM, TR_SEGMAP_GETMAP,
1938 1935 "segmap_getmap:seg %p addr %p vp %p offset %llx",
1939 1936 seg, baseaddr, vp, baseoff);
1940 1937
1941 1938 /*
1942 1939 * Prefault the translations
1943 1940 */
1944 1941 vaddr = baseaddr + (off - baseoff);
1945 1942 if (forcefault && (newslot || !hat_probe(kas.a_hat, vaddr))) {
1946 1943
1947 1944 caddr_t pgaddr = (caddr_t)((uintptr_t)vaddr &
1948 1945 (uintptr_t)PAGEMASK);
1949 1946
1950 1947 (void) segmap_fault(kas.a_hat, seg, pgaddr,
1951 1948 (vaddr + len - pgaddr + PAGESIZE - 1) & (uintptr_t)PAGEMASK,
1952 1949 F_INVAL, rw);
1953 1950 }
1954 1951
1955 1952 return (baseaddr);
1956 1953 }
1957 1954
1958 1955 int
1959 1956 segmap_release(struct seg *seg, caddr_t addr, uint_t flags)
1960 1957 {
1961 1958 struct smap *smp;
1962 1959 int error;
1963 1960 int bflags = 0;
1964 1961 struct vnode *vp;
1965 1962 u_offset_t offset;
1966 1963 kmutex_t *smtx;
1967 1964 int is_kpm = 0;
1968 1965 page_t *pp;
1969 1966
1970 1967 if (segmap_kpm && IS_KPM_ADDR(addr)) {
1971 1968
1972 1969 if (((uintptr_t)addr & MAXBOFFSET) != 0) {
1973 1970 panic("segmap_release: addr %p not "
1974 1971 "MAXBSIZE aligned", (void *)addr);
1975 1972 /*NOTREACHED*/
1976 1973 }
1977 1974
1978 1975 if ((smp = get_smap_kpm(addr, &pp)) == NULL) {
1979 1976 panic("segmap_release: smap not found "
1980 1977 "for addr %p", (void *)addr);
1981 1978 /*NOTREACHED*/
1982 1979 }
1983 1980
1984 1981 TRACE_3(TR_FAC_VM, TR_SEGMAP_RELMAP,
1985 1982 "segmap_relmap:seg %p addr %p smp %p",
1986 1983 seg, addr, smp);
1987 1984
1988 1985 smtx = SMAPMTX(smp);
1989 1986
1990 1987 /*
1991 1988 * For compatibility reasons segmap_pagecreate_kpm sets this
1992 1989 * flag to allow a following segmap_pagecreate to return
1993 1990 * this as "newpage" flag. When segmap_pagecreate is not
1994 1991 * called at all we clear it now.
1995 1992 */
1996 1993 smp->sm_flags &= ~SM_KPM_NEWPAGE;
1997 1994 is_kpm = 1;
1998 1995 if (smp->sm_flags & SM_WRITE_DATA) {
1999 1996 hat_setrefmod(pp);
2000 1997 } else if (smp->sm_flags & SM_READ_DATA) {
2001 1998 hat_setref(pp);
2002 1999 }
2003 2000 } else {
2004 2001 if (addr < seg->s_base || addr >= seg->s_base + seg->s_size ||
2005 2002 ((uintptr_t)addr & MAXBOFFSET) != 0) {
2006 2003 panic("segmap_release: bad addr %p", (void *)addr);
2007 2004 /*NOTREACHED*/
2008 2005 }
2009 2006 smp = GET_SMAP(seg, addr);
2010 2007
2011 2008 TRACE_3(TR_FAC_VM, TR_SEGMAP_RELMAP,
2012 2009 "segmap_relmap:seg %p addr %p smp %p",
2013 2010 seg, addr, smp);
2014 2011
2015 2012 smtx = SMAPMTX(smp);
2016 2013 mutex_enter(smtx);
2017 2014 smp->sm_flags |= SM_NOTKPM_RELEASED;
2018 2015 }
2019 2016
2020 2017 ASSERT(smp->sm_refcnt > 0);
2021 2018
2022 2019 /*
2023 2020 * Need to call VOP_PUTPAGE() if any flags (except SM_DONTNEED)
2024 2021 * are set.
2025 2022 */
2026 2023 if ((flags & ~SM_DONTNEED) != 0) {
2027 2024 if (flags & SM_WRITE)
2028 2025 segmapcnt.smp_rel_write.value.ul++;
2029 2026 if (flags & SM_ASYNC) {
2030 2027 bflags |= B_ASYNC;
2031 2028 segmapcnt.smp_rel_async.value.ul++;
2032 2029 }
2033 2030 if (flags & SM_INVAL) {
2034 2031 bflags |= B_INVAL;
2035 2032 segmapcnt.smp_rel_abort.value.ul++;
2036 2033 }
2037 2034 if (flags & SM_DESTROY) {
2038 2035 bflags |= (B_INVAL|B_TRUNC);
2039 2036 segmapcnt.smp_rel_abort.value.ul++;
2040 2037 }
2041 2038 if (smp->sm_refcnt == 1) {
2042 2039 /*
2043 2040 * We only bother doing the FREE and DONTNEED flags
2044 2041 * if no one else is still referencing this mapping.
2045 2042 */
2046 2043 if (flags & SM_FREE) {
2047 2044 bflags |= B_FREE;
2048 2045 segmapcnt.smp_rel_free.value.ul++;
2049 2046 }
2050 2047 if (flags & SM_DONTNEED) {
2051 2048 bflags |= B_DONTNEED;
2052 2049 segmapcnt.smp_rel_dontneed.value.ul++;
2053 2050 }
2054 2051 }
2055 2052 } else {
2056 2053 smd_cpu[CPU->cpu_seqid].scpu.scpu_release++;
2057 2054 }
2058 2055
2059 2056 vp = smp->sm_vp;
2060 2057 offset = smp->sm_off;
2061 2058
2062 2059 if (--smp->sm_refcnt == 0) {
2063 2060
2064 2061 smp->sm_flags &= ~(SM_WRITE_DATA | SM_READ_DATA);
2065 2062
2066 2063 if (flags & (SM_INVAL|SM_DESTROY)) {
2067 2064 segmap_hashout(smp); /* remove map info */
2068 2065 if (is_kpm) {
2069 2066 hat_kpm_mapout(pp, GET_KPME(smp), addr);
2070 2067 if (smp->sm_flags & SM_NOTKPM_RELEASED) {
2071 2068 smp->sm_flags &= ~SM_NOTKPM_RELEASED;
2072 2069 hat_unload(kas.a_hat, segkmap->s_base +
2073 2070 ((smp - smd_smap) * MAXBSIZE),
2074 2071 MAXBSIZE, HAT_UNLOAD);
2075 2072 }
2076 2073
2077 2074 } else {
2078 2075 if (segmap_kpm)
2079 2076 segkpm_mapout_validkpme(GET_KPME(smp));
2080 2077
2081 2078 smp->sm_flags &= ~SM_NOTKPM_RELEASED;
2082 2079 hat_unload(kas.a_hat, addr, MAXBSIZE,
2083 2080 HAT_UNLOAD);
2084 2081 }
2085 2082 }
2086 2083 segmap_smapadd(smp); /* add to free list */
2087 2084 }
2088 2085
2089 2086 mutex_exit(smtx);
2090 2087
2091 2088 if (is_kpm)
2092 2089 page_unlock(pp);
2093 2090 /*
2094 2091 * Now invoke VOP_PUTPAGE() if any flags (except SM_DONTNEED)
2095 2092 * are set.
2096 2093 */
2097 2094 if ((flags & ~SM_DONTNEED) != 0) {
2098 2095 error = VOP_PUTPAGE(vp, offset, MAXBSIZE,
2099 2096 bflags, CRED(), NULL);
2100 2097 } else {
2101 2098 error = 0;
2102 2099 }
2103 2100
2104 2101 return (error);
2105 2102 }
2106 2103
2107 2104 /*
2108 2105 * Dump the pages belonging to this segmap segment.
2109 2106 */
2110 2107 static void
2111 2108 segmap_dump(struct seg *seg)
2112 2109 {
2113 2110 struct segmap_data *smd;
2114 2111 struct smap *smp, *smp_end;
2115 2112 page_t *pp;
2116 2113 pfn_t pfn;
2117 2114 u_offset_t off;
2118 2115 caddr_t addr;
2119 2116
2120 2117 smd = (struct segmap_data *)seg->s_data;
2121 2118 addr = seg->s_base;
2122 2119 for (smp = smd->smd_sm, smp_end = smp + smd->smd_npages;
2123 2120 smp < smp_end; smp++) {
2124 2121
2125 2122 if (smp->sm_refcnt) {
2126 2123 for (off = 0; off < MAXBSIZE; off += PAGESIZE) {
2127 2124 int we_own_it = 0;
2128 2125
2129 2126 /*
2130 2127 * If pp == NULL, the page either does
2131 2128 * not exist or is exclusively locked.
2132 2129 * So determine if it exists before
2133 2130 * searching for it.
2134 2131 */
2135 2132 if ((pp = page_lookup_nowait(smp->sm_vp,
2136 2133 smp->sm_off + off, SE_SHARED)))
2137 2134 we_own_it = 1;
2138 2135 else
2139 2136 pp = page_exists(smp->sm_vp,
2140 2137 smp->sm_off + off);
2141 2138
2142 2139 if (pp) {
2143 2140 pfn = page_pptonum(pp);
2144 2141 dump_addpage(seg->s_as,
2145 2142 addr + off, pfn);
2146 2143 if (we_own_it)
2147 2144 page_unlock(pp);
2148 2145 }
2149 2146 dump_timeleft = dump_timeout;
2150 2147 }
2151 2148 }
2152 2149 addr += MAXBSIZE;
2153 2150 }
2154 2151 }
2155 2152
2156 2153 /*ARGSUSED*/
2157 2154 static int
2158 2155 segmap_pagelock(struct seg *seg, caddr_t addr, size_t len,
2159 2156 struct page ***ppp, enum lock_type type, enum seg_rw rw)
2160 2157 {
2161 2158 return (ENOTSUP);
↓ open down ↓ |
2038 lines elided |
↑ open up ↑ |
2162 2159 }
2163 2160
2164 2161 static int
2165 2162 segmap_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
2166 2163 {
2167 2164 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
2168 2165
2169 2166 memidp->val[0] = (uintptr_t)smd->smd_sm->sm_vp;
2170 2167 memidp->val[1] = smd->smd_sm->sm_off + (uintptr_t)(addr - seg->s_base);
2171 2168 return (0);
2172 -}
2173 -
2174 -/*ARGSUSED*/
2175 -static lgrp_mem_policy_info_t *
2176 -segmap_getpolicy(struct seg *seg, caddr_t addr)
2177 -{
2178 - return (NULL);
2179 2169 }
2180 2170
2181 2171 /*ARGSUSED*/
2182 2172 static int
2183 2173 segmap_capable(struct seg *seg, segcapability_t capability)
2184 2174 {
2185 2175 return (0);
2186 2176 }
2187 2177
2188 2178
2189 2179 #ifdef SEGKPM_SUPPORT
2190 2180
2191 2181 /*
2192 2182 * segkpm support routines
2193 2183 */
2194 2184
2195 2185 static caddr_t
2196 2186 segmap_pagecreate_kpm(struct seg *seg, vnode_t *vp, u_offset_t off,
2197 2187 struct smap *smp, enum seg_rw rw)
2198 2188 {
2199 2189 caddr_t base;
2200 2190 page_t *pp;
2201 2191 int newpage = 0;
2202 2192 struct kpme *kpme;
2203 2193
2204 2194 ASSERT(smp->sm_refcnt > 0);
2205 2195
2206 2196 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
2207 2197 kmutex_t *smtx;
2208 2198
2209 2199 base = segkpm_create_va(off);
2210 2200
2211 2201 if ((pp = page_create_va(vp, off, PAGESIZE, PG_WAIT,
2212 2202 seg, base)) == NULL) {
2213 2203 panic("segmap_pagecreate_kpm: "
2214 2204 "page_create failed");
2215 2205 /*NOTREACHED*/
2216 2206 }
2217 2207
2218 2208 newpage = 1;
2219 2209 page_io_unlock(pp);
2220 2210 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
2221 2211
2222 2212 /*
2223 2213 * Mark this here until the following segmap_pagecreate
2224 2214 * or segmap_release.
2225 2215 */
2226 2216 smtx = SMAPMTX(smp);
2227 2217 mutex_enter(smtx);
2228 2218 smp->sm_flags |= SM_KPM_NEWPAGE;
2229 2219 mutex_exit(smtx);
2230 2220 }
2231 2221
2232 2222 kpme = GET_KPME(smp);
2233 2223 if (!newpage && kpme->kpe_page == pp)
2234 2224 base = hat_kpm_page2va(pp, 0);
2235 2225 else
2236 2226 base = hat_kpm_mapin(pp, kpme);
2237 2227
2238 2228 /*
2239 2229 * FS code may decide not to call segmap_pagecreate and we
2240 2230 * don't invoke segmap_fault via TLB miss, so we have to set
2241 2231 * ref and mod bits in advance.
2242 2232 */
2243 2233 if (rw == S_WRITE) {
2244 2234 hat_setrefmod(pp);
2245 2235 } else {
2246 2236 ASSERT(rw == S_READ);
2247 2237 hat_setref(pp);
2248 2238 }
2249 2239
2250 2240 smd_cpu[CPU->cpu_seqid].scpu.scpu_pagecreate++;
2251 2241
2252 2242 return (base);
2253 2243 }
2254 2244
2255 2245 /*
2256 2246 * Find the smap structure corresponding to the
2257 2247 * KPM addr and return it locked.
2258 2248 */
2259 2249 struct smap *
2260 2250 get_smap_kpm(caddr_t addr, page_t **ppp)
2261 2251 {
2262 2252 struct smap *smp;
2263 2253 struct vnode *vp;
2264 2254 u_offset_t offset;
2265 2255 caddr_t baseaddr = (caddr_t)((uintptr_t)addr & MAXBMASK);
2266 2256 int hashid;
2267 2257 kmutex_t *hashmtx;
2268 2258 page_t *pp;
2269 2259 union segmap_cpu *scpu;
2270 2260
2271 2261 pp = hat_kpm_vaddr2page(baseaddr);
2272 2262
2273 2263 ASSERT(pp && !PP_ISFREE(pp));
2274 2264 ASSERT(PAGE_LOCKED(pp));
2275 2265 ASSERT(((uintptr_t)pp->p_offset & MAXBOFFSET) == 0);
2276 2266
2277 2267 vp = pp->p_vnode;
2278 2268 offset = pp->p_offset;
2279 2269 ASSERT(vp != NULL);
2280 2270
2281 2271 /*
2282 2272 * Assume the last smap used on this cpu is the one needed.
2283 2273 */
2284 2274 scpu = smd_cpu+CPU->cpu_seqid;
2285 2275 smp = scpu->scpu.scpu_last_smap;
2286 2276 mutex_enter(&smp->sm_mtx);
2287 2277 if (smp->sm_vp == vp && smp->sm_off == offset) {
2288 2278 ASSERT(smp->sm_refcnt > 0);
2289 2279 } else {
2290 2280 /*
2291 2281 * Assumption wrong, find the smap on the hash chain.
2292 2282 */
2293 2283 mutex_exit(&smp->sm_mtx);
2294 2284 SMAP_HASHFUNC(vp, offset, hashid); /* macro assigns hashid */
2295 2285 hashmtx = SHASHMTX(hashid);
2296 2286
2297 2287 mutex_enter(hashmtx);
2298 2288 smp = smd_hash[hashid].sh_hash_list;
2299 2289 for (; smp != NULL; smp = smp->sm_hash) {
2300 2290 if (smp->sm_vp == vp && smp->sm_off == offset)
2301 2291 break;
2302 2292 }
2303 2293 mutex_exit(hashmtx);
2304 2294 if (smp) {
2305 2295 mutex_enter(&smp->sm_mtx);
2306 2296 ASSERT(smp->sm_vp == vp && smp->sm_off == offset);
2307 2297 }
2308 2298 }
2309 2299
2310 2300 if (ppp)
2311 2301 *ppp = smp ? pp : NULL;
2312 2302
2313 2303 return (smp);
2314 2304 }
2315 2305
2316 2306 #else /* SEGKPM_SUPPORT */
2317 2307
2318 2308 /* segkpm stubs */
2319 2309
2320 2310 /*ARGSUSED*/
2321 2311 static caddr_t
2322 2312 segmap_pagecreate_kpm(struct seg *seg, vnode_t *vp, u_offset_t off,
2323 2313 struct smap *smp, enum seg_rw rw)
2324 2314 {
2325 2315 return (NULL);
2326 2316 }
2327 2317
2328 2318 /*ARGSUSED*/
2329 2319 struct smap *
2330 2320 get_smap_kpm(caddr_t addr, page_t **ppp)
2331 2321 {
2332 2322 return (NULL);
2333 2323 }
2334 2324
2335 2325 #endif /* SEGKPM_SUPPORT */
↓ open down ↓ |
147 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX