Print this page
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_map.c
+++ new/usr/src/uts/common/vm/seg_map.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
27 27 /* All Rights Reserved */
28 28
29 29 /*
30 30 * Portions of this source code were derived from Berkeley 4.3 BSD
31 31 * under license from the Regents of the University of California.
32 32 */
33 33
34 34 /*
35 35 * VM - generic vnode mapping segment.
36 36 *
37 37 * The segmap driver is used only by the kernel to get faster (than seg_vn)
38 38 * mappings [lower routine overhead; more persistent cache] to random
39 39 * vnode/offsets. Note than the kernel may (and does) use seg_vn as well.
40 40 */
41 41
42 42 #include <sys/types.h>
43 43 #include <sys/t_lock.h>
44 44 #include <sys/param.h>
45 45 #include <sys/sysmacros.h>
46 46 #include <sys/buf.h>
47 47 #include <sys/systm.h>
48 48 #include <sys/vnode.h>
49 49 #include <sys/mman.h>
50 50 #include <sys/errno.h>
51 51 #include <sys/cred.h>
52 52 #include <sys/kmem.h>
53 53 #include <sys/vtrace.h>
54 54 #include <sys/cmn_err.h>
55 55 #include <sys/debug.h>
56 56 #include <sys/thread.h>
57 57 #include <sys/dumphdr.h>
58 58 #include <sys/bitmap.h>
59 59 #include <sys/lgrp.h>
60 60
61 61 #include <vm/seg_kmem.h>
62 62 #include <vm/hat.h>
63 63 #include <vm/as.h>
64 64 #include <vm/seg.h>
65 65 #include <vm/seg_kpm.h>
66 66 #include <vm/seg_map.h>
67 67 #include <vm/page.h>
68 68 #include <vm/pvn.h>
69 69 #include <vm/rm.h>
70 70
71 71 /*
72 72 * Private seg op routines.
73 73 */
74 74 static void segmap_free(struct seg *seg);
75 75 faultcode_t segmap_fault(struct hat *hat, struct seg *seg, caddr_t addr,
76 76 size_t len, enum fault_type type, enum seg_rw rw);
77 77 static faultcode_t segmap_faulta(struct seg *seg, caddr_t addr);
78 78 static int segmap_checkprot(struct seg *seg, caddr_t addr, size_t len,
79 79 uint_t prot);
80 80 static int segmap_kluster(struct seg *seg, caddr_t addr, ssize_t);
81 81 static int segmap_getprot(struct seg *seg, caddr_t addr, size_t len,
82 82 uint_t *protv);
83 83 static u_offset_t segmap_getoffset(struct seg *seg, caddr_t addr);
84 84 static int segmap_gettype(struct seg *seg, caddr_t addr);
85 85 static int segmap_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
86 86 static void segmap_dump(struct seg *seg);
87 87 static int segmap_pagelock(struct seg *seg, caddr_t addr, size_t len,
88 88 struct page ***ppp, enum lock_type type,
89 89 enum seg_rw rw);
90 90 static int segmap_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
91 91 static lgrp_mem_policy_info_t *segmap_getpolicy(struct seg *seg,
92 92 caddr_t addr);
93 93 static int segmap_capable(struct seg *seg, segcapability_t capability);
94 94
95 95 /* segkpm support */
96 96 static caddr_t segmap_pagecreate_kpm(struct seg *, vnode_t *, u_offset_t,
97 97 struct smap *, enum seg_rw);
98 98 struct smap *get_smap_kpm(caddr_t, page_t **);
99 99
100 100 static struct seg_ops segmap_ops = {
101 101 .free = segmap_free,
102 102 .fault = segmap_fault,
103 103 .faulta = segmap_faulta,
104 104 .checkprot = segmap_checkprot,
↓ open down ↓ |
104 lines elided |
↑ open up ↑ |
105 105 .kluster = segmap_kluster,
106 106 .getprot = segmap_getprot,
107 107 .getoffset = segmap_getoffset,
108 108 .gettype = segmap_gettype,
109 109 .getvp = segmap_getvp,
110 110 .dump = segmap_dump,
111 111 .pagelock = segmap_pagelock,
112 112 .getmemid = segmap_getmemid,
113 113 .getpolicy = segmap_getpolicy,
114 114 .capable = segmap_capable,
115 - .inherit = seg_inherit_notsup,
116 115 };
117 116
118 117 /*
119 118 * Private segmap routines.
120 119 */
121 120 static void segmap_unlock(struct hat *hat, struct seg *seg, caddr_t addr,
122 121 size_t len, enum seg_rw rw, struct smap *smp);
123 122 static void segmap_smapadd(struct smap *smp);
124 123 static struct smap *segmap_hashin(struct smap *smp, struct vnode *vp,
125 124 u_offset_t off, int hashid);
126 125 static void segmap_hashout(struct smap *smp);
127 126
128 127
129 128 /*
130 129 * Statistics for segmap operations.
131 130 *
132 131 * No explicit locking to protect these stats.
133 132 */
134 133 struct segmapcnt segmapcnt = {
135 134 { "fault", KSTAT_DATA_ULONG },
136 135 { "faulta", KSTAT_DATA_ULONG },
137 136 { "getmap", KSTAT_DATA_ULONG },
138 137 { "get_use", KSTAT_DATA_ULONG },
139 138 { "get_reclaim", KSTAT_DATA_ULONG },
140 139 { "get_reuse", KSTAT_DATA_ULONG },
141 140 { "get_unused", KSTAT_DATA_ULONG },
142 141 { "get_nofree", KSTAT_DATA_ULONG },
143 142 { "rel_async", KSTAT_DATA_ULONG },
144 143 { "rel_write", KSTAT_DATA_ULONG },
145 144 { "rel_free", KSTAT_DATA_ULONG },
146 145 { "rel_abort", KSTAT_DATA_ULONG },
147 146 { "rel_dontneed", KSTAT_DATA_ULONG },
148 147 { "release", KSTAT_DATA_ULONG },
149 148 { "pagecreate", KSTAT_DATA_ULONG },
150 149 { "free_notfree", KSTAT_DATA_ULONG },
151 150 { "free_dirty", KSTAT_DATA_ULONG },
152 151 { "free", KSTAT_DATA_ULONG },
153 152 { "stolen", KSTAT_DATA_ULONG },
154 153 { "get_nomtx", KSTAT_DATA_ULONG }
155 154 };
156 155
157 156 kstat_named_t *segmapcnt_ptr = (kstat_named_t *)&segmapcnt;
158 157 uint_t segmapcnt_ndata = sizeof (segmapcnt) / sizeof (kstat_named_t);
159 158
160 159 /*
161 160 * Return number of map pages in segment.
162 161 */
163 162 #define MAP_PAGES(seg) ((seg)->s_size >> MAXBSHIFT)
164 163
165 164 /*
166 165 * Translate addr into smap number within segment.
167 166 */
168 167 #define MAP_PAGE(seg, addr) (((addr) - (seg)->s_base) >> MAXBSHIFT)
169 168
170 169 /*
171 170 * Translate addr in seg into struct smap pointer.
172 171 */
173 172 #define GET_SMAP(seg, addr) \
174 173 &(((struct segmap_data *)((seg)->s_data))->smd_sm[MAP_PAGE(seg, addr)])
175 174
176 175 /*
177 176 * Bit in map (16 bit bitmap).
178 177 */
179 178 #define SMAP_BIT_MASK(bitindex) (1 << ((bitindex) & 0xf))
180 179
181 180 static int smd_colormsk = 0;
182 181 static int smd_ncolor = 0;
183 182 static int smd_nfree = 0;
184 183 static int smd_freemsk = 0;
185 184 #ifdef DEBUG
186 185 static int *colors_used;
187 186 #endif
188 187 static struct smap *smd_smap;
189 188 static struct smaphash *smd_hash;
190 189 #ifdef SEGMAP_HASHSTATS
191 190 static unsigned int *smd_hash_len;
192 191 #endif
193 192 static struct smfree *smd_free;
194 193 static ulong_t smd_hashmsk = 0;
195 194
196 195 #define SEGMAP_MAXCOLOR 2
197 196 #define SEGMAP_CACHE_PAD 64
198 197
199 198 union segmap_cpu {
200 199 struct {
201 200 uint32_t scpu_free_ndx[SEGMAP_MAXCOLOR];
202 201 struct smap *scpu_last_smap;
203 202 ulong_t scpu_getmap;
204 203 ulong_t scpu_release;
205 204 ulong_t scpu_get_reclaim;
206 205 ulong_t scpu_fault;
207 206 ulong_t scpu_pagecreate;
208 207 ulong_t scpu_get_reuse;
209 208 } scpu;
210 209 char scpu_pad[SEGMAP_CACHE_PAD];
211 210 };
212 211 static union segmap_cpu *smd_cpu;
213 212
214 213 /*
215 214 * There are three locks in seg_map:
216 215 * - per freelist mutexes
217 216 * - per hashchain mutexes
218 217 * - per smap mutexes
219 218 *
220 219 * The lock ordering is to get the smap mutex to lock down the slot
221 220 * first then the hash lock (for hash in/out (vp, off) list) or the
222 221 * freelist lock to put the slot back on the free list.
223 222 *
224 223 * The hash search is done by only holding the hashchain lock, when a wanted
225 224 * slot is found, we drop the hashchain lock then lock the slot so there
226 225 * is no overlapping of hashchain and smap locks. After the slot is
227 226 * locked, we verify again if the slot is still what we are looking
228 227 * for.
229 228 *
230 229 * Allocation of a free slot is done by holding the freelist lock,
231 230 * then locking the smap slot at the head of the freelist. This is
232 231 * in reversed lock order so mutex_tryenter() is used.
233 232 *
234 233 * The smap lock protects all fields in smap structure except for
235 234 * the link fields for hash/free lists which are protected by
236 235 * hashchain and freelist locks.
237 236 */
238 237
239 238 #define SHASHMTX(hashid) (&smd_hash[hashid].sh_mtx)
240 239
241 240 #define SMP2SMF(smp) (&smd_free[(smp - smd_smap) & smd_freemsk])
242 241 #define SMP2SMF_NDX(smp) (ushort_t)((smp - smd_smap) & smd_freemsk)
243 242
244 243 #define SMAPMTX(smp) (&smp->sm_mtx)
245 244
246 245 #define SMAP_HASHFUNC(vp, off, hashid) \
247 246 { \
248 247 hashid = ((((uintptr_t)(vp) >> 6) + ((uintptr_t)(vp) >> 3) + \
249 248 ((off) >> MAXBSHIFT)) & smd_hashmsk); \
250 249 }
251 250
252 251 /*
253 252 * The most frequently updated kstat counters are kept in the
254 253 * per cpu array to avoid hot cache blocks. The update function
255 254 * sums the cpu local counters to update the global counters.
256 255 */
257 256
258 257 /* ARGSUSED */
259 258 int
260 259 segmap_kstat_update(kstat_t *ksp, int rw)
261 260 {
262 261 int i;
263 262 ulong_t getmap, release, get_reclaim;
264 263 ulong_t fault, pagecreate, get_reuse;
265 264
266 265 if (rw == KSTAT_WRITE)
267 266 return (EACCES);
268 267 getmap = release = get_reclaim = (ulong_t)0;
269 268 fault = pagecreate = get_reuse = (ulong_t)0;
270 269 for (i = 0; i < max_ncpus; i++) {
271 270 getmap += smd_cpu[i].scpu.scpu_getmap;
272 271 release += smd_cpu[i].scpu.scpu_release;
273 272 get_reclaim += smd_cpu[i].scpu.scpu_get_reclaim;
274 273 fault += smd_cpu[i].scpu.scpu_fault;
275 274 pagecreate += smd_cpu[i].scpu.scpu_pagecreate;
276 275 get_reuse += smd_cpu[i].scpu.scpu_get_reuse;
277 276 }
278 277 segmapcnt.smp_getmap.value.ul = getmap;
279 278 segmapcnt.smp_release.value.ul = release;
280 279 segmapcnt.smp_get_reclaim.value.ul = get_reclaim;
281 280 segmapcnt.smp_fault.value.ul = fault;
282 281 segmapcnt.smp_pagecreate.value.ul = pagecreate;
283 282 segmapcnt.smp_get_reuse.value.ul = get_reuse;
284 283 return (0);
285 284 }
286 285
287 286 int
288 287 segmap_create(struct seg *seg, void *argsp)
289 288 {
290 289 struct segmap_data *smd;
291 290 struct smap *smp;
292 291 struct smfree *sm;
293 292 struct segmap_crargs *a = (struct segmap_crargs *)argsp;
294 293 struct smaphash *shashp;
295 294 union segmap_cpu *scpu;
296 295 long i, npages;
297 296 size_t hashsz;
298 297 uint_t nfreelist;
299 298 extern void prefetch_smap_w(void *);
300 299 extern int max_ncpus;
301 300
302 301 ASSERT(seg->s_as && RW_WRITE_HELD(&seg->s_as->a_lock));
303 302
304 303 if (((uintptr_t)seg->s_base | seg->s_size) & MAXBOFFSET) {
305 304 panic("segkmap not MAXBSIZE aligned");
306 305 /*NOTREACHED*/
307 306 }
308 307
309 308 smd = kmem_zalloc(sizeof (struct segmap_data), KM_SLEEP);
310 309
311 310 seg->s_data = (void *)smd;
312 311 seg->s_ops = &segmap_ops;
313 312 smd->smd_prot = a->prot;
314 313
315 314 /*
316 315 * Scale the number of smap freelists to be
317 316 * proportional to max_ncpus * number of virtual colors.
318 317 * The caller can over-ride this scaling by providing
319 318 * a non-zero a->nfreelist argument.
320 319 */
321 320 nfreelist = a->nfreelist;
322 321 if (nfreelist == 0)
323 322 nfreelist = max_ncpus;
324 323 else if (nfreelist < 0 || nfreelist > 4 * max_ncpus) {
325 324 cmn_err(CE_WARN, "segmap_create: nfreelist out of range "
326 325 "%d, using %d", nfreelist, max_ncpus);
327 326 nfreelist = max_ncpus;
328 327 }
329 328 if (!ISP2(nfreelist)) {
330 329 /* round up nfreelist to the next power of two. */
331 330 nfreelist = 1 << (highbit(nfreelist));
332 331 }
333 332
334 333 /*
335 334 * Get the number of virtual colors - must be a power of 2.
336 335 */
337 336 if (a->shmsize)
338 337 smd_ncolor = a->shmsize >> MAXBSHIFT;
339 338 else
340 339 smd_ncolor = 1;
341 340 ASSERT((smd_ncolor & (smd_ncolor - 1)) == 0);
342 341 ASSERT(smd_ncolor <= SEGMAP_MAXCOLOR);
343 342 smd_colormsk = smd_ncolor - 1;
344 343 smd->smd_nfree = smd_nfree = smd_ncolor * nfreelist;
345 344 smd_freemsk = smd_nfree - 1;
346 345
347 346 /*
348 347 * Allocate and initialize the freelist headers.
349 348 * Note that sm_freeq[1] starts out as the release queue. This
350 349 * is known when the smap structures are initialized below.
351 350 */
352 351 smd_free = smd->smd_free =
353 352 kmem_zalloc(smd_nfree * sizeof (struct smfree), KM_SLEEP);
354 353 for (i = 0; i < smd_nfree; i++) {
355 354 sm = &smd->smd_free[i];
356 355 mutex_init(&sm->sm_freeq[0].smq_mtx, NULL, MUTEX_DEFAULT, NULL);
357 356 mutex_init(&sm->sm_freeq[1].smq_mtx, NULL, MUTEX_DEFAULT, NULL);
358 357 sm->sm_allocq = &sm->sm_freeq[0];
359 358 sm->sm_releq = &sm->sm_freeq[1];
360 359 }
361 360
362 361 /*
363 362 * Allocate and initialize the smap hash chain headers.
364 363 * Compute hash size rounding down to the next power of two.
365 364 */
366 365 npages = MAP_PAGES(seg);
367 366 smd->smd_npages = npages;
368 367 hashsz = npages / SMAP_HASHAVELEN;
369 368 hashsz = 1 << (highbit(hashsz)-1);
370 369 smd_hashmsk = hashsz - 1;
371 370 smd_hash = smd->smd_hash =
372 371 kmem_alloc(hashsz * sizeof (struct smaphash), KM_SLEEP);
373 372 #ifdef SEGMAP_HASHSTATS
374 373 smd_hash_len =
375 374 kmem_zalloc(hashsz * sizeof (unsigned int), KM_SLEEP);
376 375 #endif
377 376 for (i = 0, shashp = smd_hash; i < hashsz; i++, shashp++) {
378 377 shashp->sh_hash_list = NULL;
379 378 mutex_init(&shashp->sh_mtx, NULL, MUTEX_DEFAULT, NULL);
380 379 }
381 380
382 381 /*
383 382 * Allocate and initialize the smap structures.
384 383 * Link all slots onto the appropriate freelist.
385 384 * The smap array is large enough to affect boot time
386 385 * on large systems, so use memory prefetching and only
387 386 * go through the array 1 time. Inline a optimized version
388 387 * of segmap_smapadd to add structures to freelists with
389 388 * knowledge that no locks are needed here.
390 389 */
391 390 smd_smap = smd->smd_sm =
392 391 kmem_alloc(sizeof (struct smap) * npages, KM_SLEEP);
393 392
394 393 for (smp = &smd->smd_sm[MAP_PAGES(seg) - 1];
395 394 smp >= smd->smd_sm; smp--) {
396 395 struct smap *smpfreelist;
397 396 struct sm_freeq *releq;
398 397
399 398 prefetch_smap_w((char *)smp);
400 399
401 400 smp->sm_vp = NULL;
402 401 smp->sm_hash = NULL;
403 402 smp->sm_off = 0;
404 403 smp->sm_bitmap = 0;
405 404 smp->sm_refcnt = 0;
406 405 mutex_init(&smp->sm_mtx, NULL, MUTEX_DEFAULT, NULL);
407 406 smp->sm_free_ndx = SMP2SMF_NDX(smp);
408 407
409 408 sm = SMP2SMF(smp);
410 409 releq = sm->sm_releq;
411 410
412 411 smpfreelist = releq->smq_free;
413 412 if (smpfreelist == 0) {
414 413 releq->smq_free = smp->sm_next = smp->sm_prev = smp;
415 414 } else {
416 415 smp->sm_next = smpfreelist;
417 416 smp->sm_prev = smpfreelist->sm_prev;
418 417 smpfreelist->sm_prev = smp;
419 418 smp->sm_prev->sm_next = smp;
420 419 releq->smq_free = smp->sm_next;
421 420 }
422 421
423 422 /*
424 423 * sm_flag = 0 (no SM_QNDX_ZERO) implies smap on sm_freeq[1]
425 424 */
426 425 smp->sm_flags = 0;
427 426
428 427 #ifdef SEGKPM_SUPPORT
429 428 /*
430 429 * Due to the fragile prefetch loop no
431 430 * separate function is used here.
432 431 */
433 432 smp->sm_kpme_next = NULL;
434 433 smp->sm_kpme_prev = NULL;
435 434 smp->sm_kpme_page = NULL;
436 435 #endif
437 436 }
438 437
439 438 /*
440 439 * Allocate the per color indices that distribute allocation
441 440 * requests over the free lists. Each cpu will have a private
442 441 * rotor index to spread the allocations even across the available
443 442 * smap freelists. Init the scpu_last_smap field to the first
444 443 * smap element so there is no need to check for NULL.
445 444 */
446 445 smd_cpu =
447 446 kmem_zalloc(sizeof (union segmap_cpu) * max_ncpus, KM_SLEEP);
448 447 for (i = 0, scpu = smd_cpu; i < max_ncpus; i++, scpu++) {
449 448 int j;
450 449 for (j = 0; j < smd_ncolor; j++)
451 450 scpu->scpu.scpu_free_ndx[j] = j;
452 451 scpu->scpu.scpu_last_smap = smd_smap;
453 452 }
454 453
455 454 vpm_init();
456 455
457 456 #ifdef DEBUG
458 457 /*
459 458 * Keep track of which colors are used more often.
460 459 */
461 460 colors_used = kmem_zalloc(smd_nfree * sizeof (int), KM_SLEEP);
462 461 #endif /* DEBUG */
463 462
464 463 return (0);
465 464 }
466 465
467 466 static void
468 467 segmap_free(seg)
469 468 struct seg *seg;
470 469 {
471 470 ASSERT(seg->s_as && RW_WRITE_HELD(&seg->s_as->a_lock));
472 471 }
473 472
474 473 /*
475 474 * Do a F_SOFTUNLOCK call over the range requested.
476 475 * The range must have already been F_SOFTLOCK'ed.
477 476 */
478 477 static void
479 478 segmap_unlock(
480 479 struct hat *hat,
481 480 struct seg *seg,
482 481 caddr_t addr,
483 482 size_t len,
484 483 enum seg_rw rw,
485 484 struct smap *smp)
486 485 {
487 486 page_t *pp;
488 487 caddr_t adr;
489 488 u_offset_t off;
490 489 struct vnode *vp;
491 490 kmutex_t *smtx;
492 491
493 492 ASSERT(smp->sm_refcnt > 0);
494 493
495 494 #ifdef lint
496 495 seg = seg;
497 496 #endif
498 497
499 498 if (segmap_kpm && IS_KPM_ADDR(addr)) {
500 499
501 500 /*
502 501 * We're called only from segmap_fault and this was a
503 502 * NOP in case of a kpm based smap, so dangerous things
504 503 * must have happened in the meantime. Pages are prefaulted
505 504 * and locked in segmap_getmapflt and they will not be
506 505 * unlocked until segmap_release.
507 506 */
508 507 panic("segmap_unlock: called with kpm addr %p", (void *)addr);
509 508 /*NOTREACHED*/
510 509 }
511 510
512 511 vp = smp->sm_vp;
513 512 off = smp->sm_off + (u_offset_t)((uintptr_t)addr & MAXBOFFSET);
514 513
515 514 hat_unlock(hat, addr, P2ROUNDUP(len, PAGESIZE));
516 515 for (adr = addr; adr < addr + len; adr += PAGESIZE, off += PAGESIZE) {
517 516 ushort_t bitmask;
518 517
519 518 /*
520 519 * Use page_find() instead of page_lookup() to
521 520 * find the page since we know that it has
522 521 * "shared" lock.
523 522 */
524 523 pp = page_find(vp, off);
525 524 if (pp == NULL) {
526 525 panic("segmap_unlock: page not found");
527 526 /*NOTREACHED*/
528 527 }
529 528
530 529 if (rw == S_WRITE) {
531 530 hat_setrefmod(pp);
532 531 } else if (rw != S_OTHER) {
533 532 TRACE_3(TR_FAC_VM, TR_SEGMAP_FAULT,
534 533 "segmap_fault:pp %p vp %p offset %llx", pp, vp, off);
535 534 hat_setref(pp);
536 535 }
537 536
538 537 /*
539 538 * Clear bitmap, if the bit corresponding to "off" is set,
540 539 * since the page and translation are being unlocked.
541 540 */
542 541 bitmask = SMAP_BIT_MASK((off - smp->sm_off) >> PAGESHIFT);
543 542
544 543 /*
545 544 * Large Files: Following assertion is to verify
546 545 * the correctness of the cast to (int) above.
547 546 */
548 547 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
549 548 smtx = SMAPMTX(smp);
550 549 mutex_enter(smtx);
551 550 if (smp->sm_bitmap & bitmask) {
552 551 smp->sm_bitmap &= ~bitmask;
553 552 }
554 553 mutex_exit(smtx);
555 554
556 555 page_unlock(pp);
557 556 }
558 557 }
559 558
560 559 #define MAXPPB (MAXBSIZE/4096) /* assumes minimum page size of 4k */
561 560
562 561 /*
563 562 * This routine is called via a machine specific fault handling
564 563 * routine. It is also called by software routines wishing to
565 564 * lock or unlock a range of addresses.
566 565 *
567 566 * Note that this routine expects a page-aligned "addr".
568 567 */
569 568 faultcode_t
570 569 segmap_fault(
571 570 struct hat *hat,
572 571 struct seg *seg,
573 572 caddr_t addr,
574 573 size_t len,
575 574 enum fault_type type,
576 575 enum seg_rw rw)
577 576 {
578 577 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
579 578 struct smap *smp;
580 579 page_t *pp, **ppp;
581 580 struct vnode *vp;
582 581 u_offset_t off;
583 582 page_t *pl[MAXPPB + 1];
584 583 uint_t prot;
585 584 u_offset_t addroff;
586 585 caddr_t adr;
587 586 int err;
588 587 u_offset_t sm_off;
589 588 int hat_flag;
590 589
591 590 if (segmap_kpm && IS_KPM_ADDR(addr)) {
592 591 int newpage;
593 592 kmutex_t *smtx;
594 593
595 594 /*
596 595 * Pages are successfully prefaulted and locked in
597 596 * segmap_getmapflt and can't be unlocked until
598 597 * segmap_release. No hat mappings have to be locked
599 598 * and they also can't be unlocked as long as the
600 599 * caller owns an active kpm addr.
601 600 */
602 601 #ifndef DEBUG
603 602 if (type != F_SOFTUNLOCK)
604 603 return (0);
605 604 #endif
606 605
607 606 if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
608 607 panic("segmap_fault: smap not found "
609 608 "for addr %p", (void *)addr);
610 609 /*NOTREACHED*/
611 610 }
612 611
613 612 smtx = SMAPMTX(smp);
614 613 #ifdef DEBUG
615 614 newpage = smp->sm_flags & SM_KPM_NEWPAGE;
616 615 if (newpage) {
617 616 cmn_err(CE_WARN, "segmap_fault: newpage? smp %p",
618 617 (void *)smp);
619 618 }
620 619
621 620 if (type != F_SOFTUNLOCK) {
622 621 mutex_exit(smtx);
623 622 return (0);
624 623 }
625 624 #endif
626 625 mutex_exit(smtx);
627 626 vp = smp->sm_vp;
628 627 sm_off = smp->sm_off;
629 628
630 629 if (vp == NULL)
631 630 return (FC_MAKE_ERR(EIO));
632 631
633 632 ASSERT(smp->sm_refcnt > 0);
634 633
635 634 addroff = (u_offset_t)((uintptr_t)addr & MAXBOFFSET);
636 635 if (addroff + len > MAXBSIZE)
637 636 panic("segmap_fault: endaddr %p exceeds MAXBSIZE chunk",
638 637 (void *)(addr + len));
639 638
640 639 off = sm_off + addroff;
641 640
642 641 pp = page_find(vp, off);
643 642
644 643 if (pp == NULL)
645 644 panic("segmap_fault: softunlock page not found");
646 645
647 646 /*
648 647 * Set ref bit also here in case of S_OTHER to avoid the
649 648 * overhead of supporting other cases than F_SOFTUNLOCK
650 649 * with segkpm. We can do this because the underlying
651 650 * pages are locked anyway.
652 651 */
653 652 if (rw == S_WRITE) {
654 653 hat_setrefmod(pp);
655 654 } else {
656 655 TRACE_3(TR_FAC_VM, TR_SEGMAP_FAULT,
657 656 "segmap_fault:pp %p vp %p offset %llx",
658 657 pp, vp, off);
659 658 hat_setref(pp);
660 659 }
661 660
662 661 return (0);
663 662 }
664 663
665 664 smd_cpu[CPU->cpu_seqid].scpu.scpu_fault++;
666 665 smp = GET_SMAP(seg, addr);
667 666 vp = smp->sm_vp;
668 667 sm_off = smp->sm_off;
669 668
670 669 if (vp == NULL)
671 670 return (FC_MAKE_ERR(EIO));
672 671
673 672 ASSERT(smp->sm_refcnt > 0);
674 673
675 674 addroff = (u_offset_t)((uintptr_t)addr & MAXBOFFSET);
676 675 if (addroff + len > MAXBSIZE) {
677 676 panic("segmap_fault: endaddr %p "
678 677 "exceeds MAXBSIZE chunk", (void *)(addr + len));
679 678 /*NOTREACHED*/
680 679 }
681 680 off = sm_off + addroff;
682 681
683 682 /*
684 683 * First handle the easy stuff
685 684 */
686 685 if (type == F_SOFTUNLOCK) {
687 686 segmap_unlock(hat, seg, addr, len, rw, smp);
688 687 return (0);
689 688 }
690 689
691 690 TRACE_3(TR_FAC_VM, TR_SEGMAP_GETPAGE,
692 691 "segmap_getpage:seg %p addr %p vp %p", seg, addr, vp);
693 692 err = VOP_GETPAGE(vp, (offset_t)off, len, &prot, pl, MAXBSIZE,
694 693 seg, addr, rw, CRED(), NULL);
695 694
696 695 if (err)
697 696 return (FC_MAKE_ERR(err));
698 697
699 698 prot &= smd->smd_prot;
700 699
701 700 /*
702 701 * Handle all pages returned in the pl[] array.
703 702 * This loop is coded on the assumption that if
704 703 * there was no error from the VOP_GETPAGE routine,
705 704 * that the page list returned will contain all the
706 705 * needed pages for the vp from [off..off + len].
707 706 */
708 707 ppp = pl;
709 708 while ((pp = *ppp++) != NULL) {
710 709 u_offset_t poff;
711 710 ASSERT(pp->p_vnode == vp);
712 711 hat_flag = HAT_LOAD;
713 712
714 713 /*
715 714 * Verify that the pages returned are within the range
716 715 * of this segmap region. Note that it is theoretically
717 716 * possible for pages outside this range to be returned,
718 717 * but it is not very likely. If we cannot use the
719 718 * page here, just release it and go on to the next one.
720 719 */
721 720 if (pp->p_offset < sm_off ||
722 721 pp->p_offset >= sm_off + MAXBSIZE) {
723 722 (void) page_release(pp, 1);
724 723 continue;
725 724 }
726 725
727 726 ASSERT(hat == kas.a_hat);
728 727 poff = pp->p_offset;
729 728 adr = addr + (poff - off);
730 729 if (adr >= addr && adr < addr + len) {
731 730 hat_setref(pp);
732 731 TRACE_3(TR_FAC_VM, TR_SEGMAP_FAULT,
733 732 "segmap_fault:pp %p vp %p offset %llx",
734 733 pp, vp, poff);
735 734 if (type == F_SOFTLOCK)
736 735 hat_flag = HAT_LOAD_LOCK;
737 736 }
738 737
739 738 /*
740 739 * Deal with VMODSORT pages here. If we know this is a write
741 740 * do the setmod now and allow write protection.
742 741 * As long as it's modified or not S_OTHER, remove write
743 742 * protection. With S_OTHER it's up to the FS to deal with this.
744 743 */
745 744 if (IS_VMODSORT(vp)) {
746 745 if (rw == S_WRITE)
747 746 hat_setmod(pp);
748 747 else if (rw != S_OTHER && !hat_ismod(pp))
749 748 prot &= ~PROT_WRITE;
750 749 }
751 750
752 751 hat_memload(hat, adr, pp, prot, hat_flag);
753 752 if (hat_flag != HAT_LOAD_LOCK)
754 753 page_unlock(pp);
755 754 }
756 755 return (0);
757 756 }
758 757
759 758 /*
760 759 * This routine is used to start I/O on pages asynchronously.
761 760 */
762 761 static faultcode_t
763 762 segmap_faulta(struct seg *seg, caddr_t addr)
764 763 {
765 764 struct smap *smp;
766 765 struct vnode *vp;
767 766 u_offset_t off;
768 767 int err;
769 768
770 769 if (segmap_kpm && IS_KPM_ADDR(addr)) {
771 770 int newpage;
772 771 kmutex_t *smtx;
773 772
774 773 /*
775 774 * Pages are successfully prefaulted and locked in
776 775 * segmap_getmapflt and can't be unlocked until
777 776 * segmap_release. No hat mappings have to be locked
778 777 * and they also can't be unlocked as long as the
779 778 * caller owns an active kpm addr.
780 779 */
781 780 #ifdef DEBUG
782 781 if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
783 782 panic("segmap_faulta: smap not found "
784 783 "for addr %p", (void *)addr);
785 784 /*NOTREACHED*/
786 785 }
787 786
788 787 smtx = SMAPMTX(smp);
789 788 newpage = smp->sm_flags & SM_KPM_NEWPAGE;
790 789 mutex_exit(smtx);
791 790 if (newpage)
792 791 cmn_err(CE_WARN, "segmap_faulta: newpage? smp %p",
793 792 (void *)smp);
794 793 #endif
795 794 return (0);
796 795 }
797 796
798 797 segmapcnt.smp_faulta.value.ul++;
799 798 smp = GET_SMAP(seg, addr);
800 799
801 800 ASSERT(smp->sm_refcnt > 0);
802 801
803 802 vp = smp->sm_vp;
804 803 off = smp->sm_off;
805 804
806 805 if (vp == NULL) {
807 806 cmn_err(CE_WARN, "segmap_faulta - no vp");
808 807 return (FC_MAKE_ERR(EIO));
809 808 }
810 809
811 810 TRACE_3(TR_FAC_VM, TR_SEGMAP_GETPAGE,
812 811 "segmap_getpage:seg %p addr %p vp %p", seg, addr, vp);
813 812
814 813 err = VOP_GETPAGE(vp, (offset_t)(off + ((offset_t)((uintptr_t)addr
815 814 & MAXBOFFSET))), PAGESIZE, (uint_t *)NULL, (page_t **)NULL, 0,
816 815 seg, addr, S_READ, CRED(), NULL);
817 816
818 817 if (err)
819 818 return (FC_MAKE_ERR(err));
820 819 return (0);
821 820 }
822 821
823 822 /*ARGSUSED*/
824 823 static int
825 824 segmap_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
826 825 {
827 826 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
828 827
829 828 ASSERT(seg->s_as && RW_LOCK_HELD(&seg->s_as->a_lock));
830 829
831 830 /*
832 831 * Need not acquire the segment lock since
833 832 * "smd_prot" is a read-only field.
834 833 */
835 834 return (((smd->smd_prot & prot) != prot) ? EACCES : 0);
836 835 }
837 836
838 837 static int
839 838 segmap_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
840 839 {
841 840 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
842 841 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
843 842
844 843 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
845 844
846 845 if (pgno != 0) {
847 846 do {
848 847 protv[--pgno] = smd->smd_prot;
849 848 } while (pgno != 0);
850 849 }
851 850 return (0);
852 851 }
853 852
854 853 static u_offset_t
855 854 segmap_getoffset(struct seg *seg, caddr_t addr)
856 855 {
857 856 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
858 857
859 858 ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock));
860 859
861 860 return ((u_offset_t)smd->smd_sm->sm_off + (addr - seg->s_base));
862 861 }
863 862
864 863 /*ARGSUSED*/
865 864 static int
866 865 segmap_gettype(struct seg *seg, caddr_t addr)
867 866 {
868 867 ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock));
869 868
870 869 return (MAP_SHARED);
871 870 }
872 871
873 872 /*ARGSUSED*/
874 873 static int
875 874 segmap_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
876 875 {
877 876 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
878 877
879 878 ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock));
880 879
881 880 /* XXX - This doesn't make any sense */
882 881 *vpp = smd->smd_sm->sm_vp;
883 882 return (0);
884 883 }
885 884
886 885 /*
887 886 * Check to see if it makes sense to do kluster/read ahead to
888 887 * addr + delta relative to the mapping at addr. We assume here
889 888 * that delta is a signed PAGESIZE'd multiple (which can be negative).
890 889 *
891 890 * For segmap we always "approve" of this action from our standpoint.
892 891 */
893 892 /*ARGSUSED*/
894 893 static int
895 894 segmap_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
896 895 {
897 896 return (0);
898 897 }
899 898
900 899 /*
901 900 * Special private segmap operations
902 901 */
903 902
904 903 /*
905 904 * Add smap to the appropriate free list.
906 905 */
907 906 static void
908 907 segmap_smapadd(struct smap *smp)
909 908 {
910 909 struct smfree *sm;
911 910 struct smap *smpfreelist;
912 911 struct sm_freeq *releq;
913 912
914 913 ASSERT(MUTEX_HELD(SMAPMTX(smp)));
915 914
916 915 if (smp->sm_refcnt != 0) {
917 916 panic("segmap_smapadd");
918 917 /*NOTREACHED*/
919 918 }
920 919
921 920 sm = &smd_free[smp->sm_free_ndx];
922 921 /*
923 922 * Add to the tail of the release queue
924 923 * Note that sm_releq and sm_allocq could toggle
925 924 * before we get the lock. This does not affect
926 925 * correctness as the 2 queues are only maintained
927 926 * to reduce lock pressure.
928 927 */
929 928 releq = sm->sm_releq;
930 929 if (releq == &sm->sm_freeq[0])
931 930 smp->sm_flags |= SM_QNDX_ZERO;
932 931 else
933 932 smp->sm_flags &= ~SM_QNDX_ZERO;
934 933 mutex_enter(&releq->smq_mtx);
935 934 smpfreelist = releq->smq_free;
936 935 if (smpfreelist == 0) {
937 936 int want;
938 937
939 938 releq->smq_free = smp->sm_next = smp->sm_prev = smp;
940 939 /*
941 940 * Both queue mutexes held to set sm_want;
942 941 * snapshot the value before dropping releq mutex.
943 942 * If sm_want appears after the releq mutex is dropped,
944 943 * then the smap just freed is already gone.
945 944 */
946 945 want = sm->sm_want;
947 946 mutex_exit(&releq->smq_mtx);
948 947 /*
949 948 * See if there was a waiter before dropping the releq mutex
950 949 * then recheck after obtaining sm_freeq[0] mutex as
951 950 * the another thread may have already signaled.
952 951 */
953 952 if (want) {
954 953 mutex_enter(&sm->sm_freeq[0].smq_mtx);
955 954 if (sm->sm_want)
956 955 cv_signal(&sm->sm_free_cv);
957 956 mutex_exit(&sm->sm_freeq[0].smq_mtx);
958 957 }
959 958 } else {
960 959 smp->sm_next = smpfreelist;
961 960 smp->sm_prev = smpfreelist->sm_prev;
962 961 smpfreelist->sm_prev = smp;
963 962 smp->sm_prev->sm_next = smp;
964 963 mutex_exit(&releq->smq_mtx);
965 964 }
966 965 }
967 966
968 967
969 968 static struct smap *
970 969 segmap_hashin(struct smap *smp, struct vnode *vp, u_offset_t off, int hashid)
971 970 {
972 971 struct smap **hpp;
973 972 struct smap *tmp;
974 973 kmutex_t *hmtx;
975 974
976 975 ASSERT(MUTEX_HELD(SMAPMTX(smp)));
977 976 ASSERT(smp->sm_vp == NULL);
978 977 ASSERT(smp->sm_hash == NULL);
979 978 ASSERT(smp->sm_prev == NULL);
980 979 ASSERT(smp->sm_next == NULL);
981 980 ASSERT(hashid >= 0 && hashid <= smd_hashmsk);
982 981
983 982 hmtx = SHASHMTX(hashid);
984 983
985 984 mutex_enter(hmtx);
986 985 /*
987 986 * First we need to verify that no one has created a smp
988 987 * with (vp,off) as its tag before we us.
989 988 */
990 989 for (tmp = smd_hash[hashid].sh_hash_list;
991 990 tmp != NULL; tmp = tmp->sm_hash)
992 991 if (tmp->sm_vp == vp && tmp->sm_off == off)
993 992 break;
994 993
995 994 if (tmp == NULL) {
996 995 /*
997 996 * No one created one yet.
998 997 *
999 998 * Funniness here - we don't increment the ref count on the
1000 999 * vnode * even though we have another pointer to it here.
1001 1000 * The reason for this is that we don't want the fact that
1002 1001 * a seg_map entry somewhere refers to a vnode to prevent the
1003 1002 * vnode * itself from going away. This is because this
1004 1003 * reference to the vnode is a "soft one". In the case where
1005 1004 * a mapping is being used by a rdwr [or directory routine?]
1006 1005 * there already has to be a non-zero ref count on the vnode.
1007 1006 * In the case where the vp has been freed and the the smap
1008 1007 * structure is on the free list, there are no pages in memory
1009 1008 * that can refer to the vnode. Thus even if we reuse the same
1010 1009 * vnode/smap structure for a vnode which has the same
1011 1010 * address but represents a different object, we are ok.
1012 1011 */
1013 1012 smp->sm_vp = vp;
1014 1013 smp->sm_off = off;
1015 1014
1016 1015 hpp = &smd_hash[hashid].sh_hash_list;
1017 1016 smp->sm_hash = *hpp;
1018 1017 *hpp = smp;
1019 1018 #ifdef SEGMAP_HASHSTATS
1020 1019 smd_hash_len[hashid]++;
1021 1020 #endif
1022 1021 }
1023 1022 mutex_exit(hmtx);
1024 1023
1025 1024 return (tmp);
1026 1025 }
1027 1026
1028 1027 static void
1029 1028 segmap_hashout(struct smap *smp)
1030 1029 {
1031 1030 struct smap **hpp, *hp;
1032 1031 struct vnode *vp;
1033 1032 kmutex_t *mtx;
1034 1033 int hashid;
1035 1034 u_offset_t off;
1036 1035
1037 1036 ASSERT(MUTEX_HELD(SMAPMTX(smp)));
1038 1037
1039 1038 vp = smp->sm_vp;
1040 1039 off = smp->sm_off;
1041 1040
1042 1041 SMAP_HASHFUNC(vp, off, hashid); /* macro assigns hashid */
1043 1042 mtx = SHASHMTX(hashid);
1044 1043 mutex_enter(mtx);
1045 1044
1046 1045 hpp = &smd_hash[hashid].sh_hash_list;
1047 1046 for (;;) {
1048 1047 hp = *hpp;
1049 1048 if (hp == NULL) {
1050 1049 panic("segmap_hashout");
1051 1050 /*NOTREACHED*/
1052 1051 }
1053 1052 if (hp == smp)
1054 1053 break;
1055 1054 hpp = &hp->sm_hash;
1056 1055 }
1057 1056
1058 1057 *hpp = smp->sm_hash;
1059 1058 smp->sm_hash = NULL;
1060 1059 #ifdef SEGMAP_HASHSTATS
1061 1060 smd_hash_len[hashid]--;
1062 1061 #endif
1063 1062 mutex_exit(mtx);
1064 1063
1065 1064 smp->sm_vp = NULL;
1066 1065 smp->sm_off = (u_offset_t)0;
1067 1066
1068 1067 }
1069 1068
1070 1069 /*
1071 1070 * Attempt to free unmodified, unmapped, and non locked segmap
1072 1071 * pages.
1073 1072 */
1074 1073 void
1075 1074 segmap_pagefree(struct vnode *vp, u_offset_t off)
1076 1075 {
1077 1076 u_offset_t pgoff;
1078 1077 page_t *pp;
1079 1078
1080 1079 for (pgoff = off; pgoff < off + MAXBSIZE; pgoff += PAGESIZE) {
1081 1080
1082 1081 if ((pp = page_lookup_nowait(vp, pgoff, SE_EXCL)) == NULL)
1083 1082 continue;
1084 1083
1085 1084 switch (page_release(pp, 1)) {
1086 1085 case PGREL_NOTREL:
1087 1086 segmapcnt.smp_free_notfree.value.ul++;
1088 1087 break;
1089 1088 case PGREL_MOD:
1090 1089 segmapcnt.smp_free_dirty.value.ul++;
1091 1090 break;
1092 1091 case PGREL_CLEAN:
1093 1092 segmapcnt.smp_free.value.ul++;
1094 1093 break;
1095 1094 }
1096 1095 }
1097 1096 }
1098 1097
1099 1098 /*
1100 1099 * Locks held on entry: smap lock
1101 1100 * Locks held on exit : smap lock.
1102 1101 */
1103 1102
1104 1103 static void
1105 1104 grab_smp(struct smap *smp, page_t *pp)
1106 1105 {
1107 1106 ASSERT(MUTEX_HELD(SMAPMTX(smp)));
1108 1107 ASSERT(smp->sm_refcnt == 0);
1109 1108
1110 1109 if (smp->sm_vp != (struct vnode *)NULL) {
1111 1110 struct vnode *vp = smp->sm_vp;
1112 1111 u_offset_t off = smp->sm_off;
1113 1112 /*
1114 1113 * Destroy old vnode association and
1115 1114 * unload any hardware translations to
1116 1115 * the old object.
1117 1116 */
1118 1117 smd_cpu[CPU->cpu_seqid].scpu.scpu_get_reuse++;
1119 1118 segmap_hashout(smp);
1120 1119
1121 1120 /*
1122 1121 * This node is off freelist and hashlist,
1123 1122 * so there is no reason to drop/reacquire sm_mtx
1124 1123 * across calls to hat_unload.
1125 1124 */
1126 1125 if (segmap_kpm) {
1127 1126 caddr_t vaddr;
1128 1127 int hat_unload_needed = 0;
1129 1128
1130 1129 /*
1131 1130 * unload kpm mapping
1132 1131 */
1133 1132 if (pp != NULL) {
1134 1133 vaddr = hat_kpm_page2va(pp, 1);
1135 1134 hat_kpm_mapout(pp, GET_KPME(smp), vaddr);
1136 1135 page_unlock(pp);
1137 1136 }
1138 1137
1139 1138 /*
1140 1139 * Check if we have (also) the rare case of a
1141 1140 * non kpm mapping.
1142 1141 */
1143 1142 if (smp->sm_flags & SM_NOTKPM_RELEASED) {
1144 1143 hat_unload_needed = 1;
1145 1144 smp->sm_flags &= ~SM_NOTKPM_RELEASED;
1146 1145 }
1147 1146
1148 1147 if (hat_unload_needed) {
1149 1148 hat_unload(kas.a_hat, segkmap->s_base +
1150 1149 ((smp - smd_smap) * MAXBSIZE),
1151 1150 MAXBSIZE, HAT_UNLOAD);
1152 1151 }
1153 1152
1154 1153 } else {
1155 1154 ASSERT(smp->sm_flags & SM_NOTKPM_RELEASED);
1156 1155 smp->sm_flags &= ~SM_NOTKPM_RELEASED;
1157 1156 hat_unload(kas.a_hat, segkmap->s_base +
1158 1157 ((smp - smd_smap) * MAXBSIZE),
1159 1158 MAXBSIZE, HAT_UNLOAD);
1160 1159 }
1161 1160 segmap_pagefree(vp, off);
1162 1161 }
1163 1162 }
1164 1163
1165 1164 static struct smap *
1166 1165 get_free_smp(int free_ndx)
1167 1166 {
1168 1167 struct smfree *sm;
1169 1168 kmutex_t *smtx;
1170 1169 struct smap *smp, *first;
1171 1170 struct sm_freeq *allocq, *releq;
1172 1171 struct kpme *kpme;
1173 1172 page_t *pp = NULL;
1174 1173 int end_ndx, page_locked = 0;
1175 1174
1176 1175 end_ndx = free_ndx;
1177 1176 sm = &smd_free[free_ndx];
1178 1177
1179 1178 retry_queue:
1180 1179 allocq = sm->sm_allocq;
1181 1180 mutex_enter(&allocq->smq_mtx);
1182 1181
1183 1182 if ((smp = allocq->smq_free) == NULL) {
1184 1183
1185 1184 skip_queue:
1186 1185 /*
1187 1186 * The alloc list is empty or this queue is being skipped;
1188 1187 * first see if the allocq toggled.
1189 1188 */
1190 1189 if (sm->sm_allocq != allocq) {
1191 1190 /* queue changed */
1192 1191 mutex_exit(&allocq->smq_mtx);
1193 1192 goto retry_queue;
1194 1193 }
1195 1194 releq = sm->sm_releq;
1196 1195 if (!mutex_tryenter(&releq->smq_mtx)) {
1197 1196 /* cannot get releq; a free smp may be there now */
1198 1197 mutex_exit(&allocq->smq_mtx);
1199 1198
1200 1199 /*
1201 1200 * This loop could spin forever if this thread has
1202 1201 * higher priority than the thread that is holding
1203 1202 * releq->smq_mtx. In order to force the other thread
1204 1203 * to run, we'll lock/unlock the mutex which is safe
1205 1204 * since we just unlocked the allocq mutex.
1206 1205 */
1207 1206 mutex_enter(&releq->smq_mtx);
1208 1207 mutex_exit(&releq->smq_mtx);
1209 1208 goto retry_queue;
1210 1209 }
1211 1210 if (releq->smq_free == NULL) {
1212 1211 /*
1213 1212 * This freelist is empty.
1214 1213 * This should not happen unless clients
1215 1214 * are failing to release the segmap
1216 1215 * window after accessing the data.
1217 1216 * Before resorting to sleeping, try
1218 1217 * the next list of the same color.
1219 1218 */
1220 1219 free_ndx = (free_ndx + smd_ncolor) & smd_freemsk;
1221 1220 if (free_ndx != end_ndx) {
1222 1221 mutex_exit(&releq->smq_mtx);
1223 1222 mutex_exit(&allocq->smq_mtx);
1224 1223 sm = &smd_free[free_ndx];
1225 1224 goto retry_queue;
1226 1225 }
1227 1226 /*
1228 1227 * Tried all freelists of the same color once,
1229 1228 * wait on this list and hope something gets freed.
1230 1229 */
1231 1230 segmapcnt.smp_get_nofree.value.ul++;
1232 1231 sm->sm_want++;
1233 1232 mutex_exit(&sm->sm_freeq[1].smq_mtx);
1234 1233 cv_wait(&sm->sm_free_cv,
1235 1234 &sm->sm_freeq[0].smq_mtx);
1236 1235 sm->sm_want--;
1237 1236 mutex_exit(&sm->sm_freeq[0].smq_mtx);
1238 1237 sm = &smd_free[free_ndx];
1239 1238 goto retry_queue;
1240 1239 } else {
1241 1240 /*
1242 1241 * Something on the rele queue; flip the alloc
1243 1242 * and rele queues and retry.
1244 1243 */
1245 1244 sm->sm_allocq = releq;
1246 1245 sm->sm_releq = allocq;
1247 1246 mutex_exit(&allocq->smq_mtx);
1248 1247 mutex_exit(&releq->smq_mtx);
1249 1248 if (page_locked) {
1250 1249 delay(hz >> 2);
1251 1250 page_locked = 0;
1252 1251 }
1253 1252 goto retry_queue;
1254 1253 }
1255 1254 } else {
1256 1255 /*
1257 1256 * Fastpath the case we get the smap mutex
1258 1257 * on the first try.
1259 1258 */
1260 1259 first = smp;
1261 1260 next_smap:
1262 1261 smtx = SMAPMTX(smp);
1263 1262 if (!mutex_tryenter(smtx)) {
1264 1263 /*
1265 1264 * Another thread is trying to reclaim this slot.
1266 1265 * Skip to the next queue or smap.
1267 1266 */
1268 1267 if ((smp = smp->sm_next) == first) {
1269 1268 goto skip_queue;
1270 1269 } else {
1271 1270 goto next_smap;
1272 1271 }
1273 1272 } else {
1274 1273 /*
1275 1274 * if kpme exists, get shared lock on the page
1276 1275 */
1277 1276 if (segmap_kpm && smp->sm_vp != NULL) {
1278 1277
1279 1278 kpme = GET_KPME(smp);
1280 1279 pp = kpme->kpe_page;
1281 1280
1282 1281 if (pp != NULL) {
1283 1282 if (!page_trylock(pp, SE_SHARED)) {
1284 1283 smp = smp->sm_next;
1285 1284 mutex_exit(smtx);
1286 1285 page_locked = 1;
1287 1286
1288 1287 pp = NULL;
1289 1288
1290 1289 if (smp == first) {
1291 1290 goto skip_queue;
1292 1291 } else {
1293 1292 goto next_smap;
1294 1293 }
1295 1294 } else {
1296 1295 if (kpme->kpe_page == NULL) {
1297 1296 page_unlock(pp);
1298 1297 pp = NULL;
1299 1298 }
1300 1299 }
1301 1300 }
1302 1301 }
1303 1302
1304 1303 /*
1305 1304 * At this point, we've selected smp. Remove smp
1306 1305 * from its freelist. If smp is the first one in
1307 1306 * the freelist, update the head of the freelist.
1308 1307 */
1309 1308 if (first == smp) {
1310 1309 ASSERT(first == allocq->smq_free);
1311 1310 allocq->smq_free = smp->sm_next;
1312 1311 }
1313 1312
1314 1313 /*
1315 1314 * if the head of the freelist still points to smp,
1316 1315 * then there are no more free smaps in that list.
1317 1316 */
1318 1317 if (allocq->smq_free == smp)
1319 1318 /*
1320 1319 * Took the last one
1321 1320 */
1322 1321 allocq->smq_free = NULL;
1323 1322 else {
1324 1323 smp->sm_prev->sm_next = smp->sm_next;
1325 1324 smp->sm_next->sm_prev = smp->sm_prev;
1326 1325 }
1327 1326 mutex_exit(&allocq->smq_mtx);
1328 1327 smp->sm_prev = smp->sm_next = NULL;
1329 1328
1330 1329 /*
1331 1330 * if pp != NULL, pp must have been locked;
1332 1331 * grab_smp() unlocks pp.
1333 1332 */
1334 1333 ASSERT((pp == NULL) || PAGE_LOCKED(pp));
1335 1334 grab_smp(smp, pp);
1336 1335 /* return smp locked. */
1337 1336 ASSERT(SMAPMTX(smp) == smtx);
1338 1337 ASSERT(MUTEX_HELD(smtx));
1339 1338 return (smp);
1340 1339 }
1341 1340 }
1342 1341 }
1343 1342
1344 1343 /*
1345 1344 * Special public segmap operations
1346 1345 */
1347 1346
1348 1347 /*
1349 1348 * Create pages (without using VOP_GETPAGE) and load up translations to them.
1350 1349 * If softlock is TRUE, then set things up so that it looks like a call
1351 1350 * to segmap_fault with F_SOFTLOCK.
1352 1351 *
1353 1352 * Returns 1, if a page is created by calling page_create_va(), or 0 otherwise.
1354 1353 *
1355 1354 * All fields in the generic segment (struct seg) are considered to be
1356 1355 * read-only for "segmap" even though the kernel address space (kas) may
1357 1356 * not be locked, hence no lock is needed to access them.
1358 1357 */
1359 1358 int
1360 1359 segmap_pagecreate(struct seg *seg, caddr_t addr, size_t len, int softlock)
1361 1360 {
1362 1361 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
1363 1362 page_t *pp;
1364 1363 u_offset_t off;
1365 1364 struct smap *smp;
1366 1365 struct vnode *vp;
1367 1366 caddr_t eaddr;
1368 1367 int newpage = 0;
1369 1368 uint_t prot;
1370 1369 kmutex_t *smtx;
1371 1370 int hat_flag;
1372 1371
1373 1372 ASSERT(seg->s_as == &kas);
1374 1373
1375 1374 if (segmap_kpm && IS_KPM_ADDR(addr)) {
1376 1375 /*
1377 1376 * Pages are successfully prefaulted and locked in
1378 1377 * segmap_getmapflt and can't be unlocked until
1379 1378 * segmap_release. The SM_KPM_NEWPAGE flag is set
1380 1379 * in segmap_pagecreate_kpm when new pages are created.
1381 1380 * and it is returned as "newpage" indication here.
1382 1381 */
1383 1382 if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
1384 1383 panic("segmap_pagecreate: smap not found "
1385 1384 "for addr %p", (void *)addr);
1386 1385 /*NOTREACHED*/
1387 1386 }
1388 1387
1389 1388 smtx = SMAPMTX(smp);
1390 1389 newpage = smp->sm_flags & SM_KPM_NEWPAGE;
1391 1390 smp->sm_flags &= ~SM_KPM_NEWPAGE;
1392 1391 mutex_exit(smtx);
1393 1392
1394 1393 return (newpage);
1395 1394 }
1396 1395
1397 1396 smd_cpu[CPU->cpu_seqid].scpu.scpu_pagecreate++;
1398 1397
1399 1398 eaddr = addr + len;
1400 1399 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1401 1400
1402 1401 smp = GET_SMAP(seg, addr);
1403 1402
1404 1403 /*
1405 1404 * We don't grab smp mutex here since we assume the smp
1406 1405 * has a refcnt set already which prevents the slot from
1407 1406 * changing its id.
1408 1407 */
1409 1408 ASSERT(smp->sm_refcnt > 0);
1410 1409
1411 1410 vp = smp->sm_vp;
1412 1411 off = smp->sm_off + ((u_offset_t)((uintptr_t)addr & MAXBOFFSET));
1413 1412 prot = smd->smd_prot;
1414 1413
1415 1414 for (; addr < eaddr; addr += PAGESIZE, off += PAGESIZE) {
1416 1415 hat_flag = HAT_LOAD;
1417 1416 pp = page_lookup(vp, off, SE_SHARED);
1418 1417 if (pp == NULL) {
1419 1418 ushort_t bitindex;
1420 1419
1421 1420 if ((pp = page_create_va(vp, off,
1422 1421 PAGESIZE, PG_WAIT, seg, addr)) == NULL) {
1423 1422 panic("segmap_pagecreate: page_create failed");
1424 1423 /*NOTREACHED*/
1425 1424 }
1426 1425 newpage = 1;
1427 1426 page_io_unlock(pp);
1428 1427
1429 1428 /*
1430 1429 * Since pages created here do not contain valid
1431 1430 * data until the caller writes into them, the
1432 1431 * "exclusive" lock will not be dropped to prevent
1433 1432 * other users from accessing the page. We also
1434 1433 * have to lock the translation to prevent a fault
1435 1434 * from occurring when the virtual address mapped by
1436 1435 * this page is written into. This is necessary to
1437 1436 * avoid a deadlock since we haven't dropped the
1438 1437 * "exclusive" lock.
1439 1438 */
1440 1439 bitindex = (ushort_t)((off - smp->sm_off) >> PAGESHIFT);
1441 1440
1442 1441 /*
1443 1442 * Large Files: The following assertion is to
1444 1443 * verify the cast above.
1445 1444 */
1446 1445 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
1447 1446 smtx = SMAPMTX(smp);
1448 1447 mutex_enter(smtx);
1449 1448 smp->sm_bitmap |= SMAP_BIT_MASK(bitindex);
1450 1449 mutex_exit(smtx);
1451 1450
1452 1451 hat_flag = HAT_LOAD_LOCK;
1453 1452 } else if (softlock) {
1454 1453 hat_flag = HAT_LOAD_LOCK;
1455 1454 }
1456 1455
1457 1456 if (IS_VMODSORT(pp->p_vnode) && (prot & PROT_WRITE))
1458 1457 hat_setmod(pp);
1459 1458
1460 1459 hat_memload(kas.a_hat, addr, pp, prot, hat_flag);
1461 1460
1462 1461 if (hat_flag != HAT_LOAD_LOCK)
1463 1462 page_unlock(pp);
1464 1463
1465 1464 TRACE_5(TR_FAC_VM, TR_SEGMAP_PAGECREATE,
1466 1465 "segmap_pagecreate:seg %p addr %p pp %p vp %p offset %llx",
1467 1466 seg, addr, pp, vp, off);
1468 1467 }
1469 1468
1470 1469 return (newpage);
1471 1470 }
1472 1471
1473 1472 void
1474 1473 segmap_pageunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw)
1475 1474 {
1476 1475 struct smap *smp;
1477 1476 ushort_t bitmask;
1478 1477 page_t *pp;
1479 1478 struct vnode *vp;
1480 1479 u_offset_t off;
1481 1480 caddr_t eaddr;
1482 1481 kmutex_t *smtx;
1483 1482
1484 1483 ASSERT(seg->s_as == &kas);
1485 1484
1486 1485 eaddr = addr + len;
1487 1486 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1488 1487
1489 1488 if (segmap_kpm && IS_KPM_ADDR(addr)) {
1490 1489 /*
1491 1490 * Pages are successfully prefaulted and locked in
1492 1491 * segmap_getmapflt and can't be unlocked until
1493 1492 * segmap_release, so no pages or hat mappings have
1494 1493 * to be unlocked at this point.
1495 1494 */
1496 1495 #ifdef DEBUG
1497 1496 if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
1498 1497 panic("segmap_pageunlock: smap not found "
1499 1498 "for addr %p", (void *)addr);
1500 1499 /*NOTREACHED*/
1501 1500 }
1502 1501
1503 1502 ASSERT(smp->sm_refcnt > 0);
1504 1503 mutex_exit(SMAPMTX(smp));
1505 1504 #endif
1506 1505 return;
1507 1506 }
1508 1507
1509 1508 smp = GET_SMAP(seg, addr);
1510 1509 smtx = SMAPMTX(smp);
1511 1510
1512 1511 ASSERT(smp->sm_refcnt > 0);
1513 1512
1514 1513 vp = smp->sm_vp;
1515 1514 off = smp->sm_off + ((u_offset_t)((uintptr_t)addr & MAXBOFFSET));
1516 1515
1517 1516 for (; addr < eaddr; addr += PAGESIZE, off += PAGESIZE) {
1518 1517 bitmask = SMAP_BIT_MASK((int)(off - smp->sm_off) >> PAGESHIFT);
1519 1518
1520 1519 /*
1521 1520 * Large Files: Following assertion is to verify
1522 1521 * the correctness of the cast to (int) above.
1523 1522 */
1524 1523 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
1525 1524
1526 1525 /*
1527 1526 * If the bit corresponding to "off" is set,
1528 1527 * clear this bit in the bitmap, unlock translations,
1529 1528 * and release the "exclusive" lock on the page.
1530 1529 */
1531 1530 if (smp->sm_bitmap & bitmask) {
1532 1531 mutex_enter(smtx);
1533 1532 smp->sm_bitmap &= ~bitmask;
1534 1533 mutex_exit(smtx);
1535 1534
1536 1535 hat_unlock(kas.a_hat, addr, PAGESIZE);
1537 1536
1538 1537 /*
1539 1538 * Use page_find() instead of page_lookup() to
1540 1539 * find the page since we know that it has
1541 1540 * "exclusive" lock.
1542 1541 */
1543 1542 pp = page_find(vp, off);
1544 1543 if (pp == NULL) {
1545 1544 panic("segmap_pageunlock: page not found");
1546 1545 /*NOTREACHED*/
1547 1546 }
1548 1547 if (rw == S_WRITE) {
1549 1548 hat_setrefmod(pp);
1550 1549 } else if (rw != S_OTHER) {
1551 1550 hat_setref(pp);
1552 1551 }
1553 1552
1554 1553 page_unlock(pp);
1555 1554 }
1556 1555 }
1557 1556 }
1558 1557
1559 1558 caddr_t
1560 1559 segmap_getmap(struct seg *seg, struct vnode *vp, u_offset_t off)
1561 1560 {
1562 1561 return (segmap_getmapflt(seg, vp, off, MAXBSIZE, 0, S_OTHER));
1563 1562 }
1564 1563
1565 1564 /*
1566 1565 * This is the magic virtual address that offset 0 of an ELF
1567 1566 * file gets mapped to in user space. This is used to pick
1568 1567 * the vac color on the freelist.
1569 1568 */
1570 1569 #define ELF_OFFZERO_VA (0x10000)
1571 1570 /*
1572 1571 * segmap_getmap allocates a MAXBSIZE big slot to map the vnode vp
1573 1572 * in the range <off, off + len). off doesn't need to be MAXBSIZE aligned.
1574 1573 * The return address is always MAXBSIZE aligned.
1575 1574 *
1576 1575 * If forcefault is nonzero and the MMU translations haven't yet been created,
1577 1576 * segmap_getmap will call segmap_fault(..., F_INVAL, rw) to create them.
1578 1577 */
1579 1578 caddr_t
1580 1579 segmap_getmapflt(
1581 1580 struct seg *seg,
1582 1581 struct vnode *vp,
1583 1582 u_offset_t off,
1584 1583 size_t len,
1585 1584 int forcefault,
1586 1585 enum seg_rw rw)
1587 1586 {
1588 1587 struct smap *smp, *nsmp;
1589 1588 extern struct vnode *common_specvp();
1590 1589 caddr_t baseaddr; /* MAXBSIZE aligned */
1591 1590 u_offset_t baseoff;
1592 1591 int newslot;
1593 1592 caddr_t vaddr;
1594 1593 int color, hashid;
1595 1594 kmutex_t *hashmtx, *smapmtx;
1596 1595 struct smfree *sm;
1597 1596 page_t *pp;
1598 1597 struct kpme *kpme;
1599 1598 uint_t prot;
1600 1599 caddr_t base;
1601 1600 page_t *pl[MAXPPB + 1];
1602 1601 int error;
1603 1602 int is_kpm = 1;
1604 1603
1605 1604 ASSERT(seg->s_as == &kas);
1606 1605 ASSERT(seg == segkmap);
1607 1606
1608 1607 baseoff = off & (offset_t)MAXBMASK;
1609 1608 if (off + len > baseoff + MAXBSIZE) {
1610 1609 panic("segmap_getmap bad len");
1611 1610 /*NOTREACHED*/
1612 1611 }
1613 1612
1614 1613 /*
1615 1614 * If this is a block device we have to be sure to use the
1616 1615 * "common" block device vnode for the mapping.
1617 1616 */
1618 1617 if (vp->v_type == VBLK)
1619 1618 vp = common_specvp(vp);
1620 1619
1621 1620 smd_cpu[CPU->cpu_seqid].scpu.scpu_getmap++;
1622 1621
1623 1622 if (segmap_kpm == 0 ||
1624 1623 (forcefault == SM_PAGECREATE && rw != S_WRITE)) {
1625 1624 is_kpm = 0;
1626 1625 }
1627 1626
1628 1627 SMAP_HASHFUNC(vp, off, hashid); /* macro assigns hashid */
1629 1628 hashmtx = SHASHMTX(hashid);
1630 1629
1631 1630 retry_hash:
1632 1631 mutex_enter(hashmtx);
1633 1632 for (smp = smd_hash[hashid].sh_hash_list;
1634 1633 smp != NULL; smp = smp->sm_hash)
1635 1634 if (smp->sm_vp == vp && smp->sm_off == baseoff)
1636 1635 break;
1637 1636 mutex_exit(hashmtx);
1638 1637
1639 1638 vrfy_smp:
1640 1639 if (smp != NULL) {
1641 1640
1642 1641 ASSERT(vp->v_count != 0);
1643 1642
1644 1643 /*
1645 1644 * Get smap lock and recheck its tag. The hash lock
1646 1645 * is dropped since the hash is based on (vp, off)
1647 1646 * and (vp, off) won't change when we have smap mtx.
1648 1647 */
1649 1648 smapmtx = SMAPMTX(smp);
1650 1649 mutex_enter(smapmtx);
1651 1650 if (smp->sm_vp != vp || smp->sm_off != baseoff) {
1652 1651 mutex_exit(smapmtx);
1653 1652 goto retry_hash;
1654 1653 }
1655 1654
1656 1655 if (smp->sm_refcnt == 0) {
1657 1656
1658 1657 smd_cpu[CPU->cpu_seqid].scpu.scpu_get_reclaim++;
1659 1658
1660 1659 /*
1661 1660 * Could still be on the free list. However, this
1662 1661 * could also be an smp that is transitioning from
1663 1662 * the free list when we have too much contention
1664 1663 * for the smapmtx's. In this case, we have an
1665 1664 * unlocked smp that is not on the free list any
1666 1665 * longer, but still has a 0 refcnt. The only way
1667 1666 * to be sure is to check the freelist pointers.
1668 1667 * Since we now have the smapmtx, we are guaranteed
1669 1668 * that the (vp, off) won't change, so we are safe
1670 1669 * to reclaim it. get_free_smp() knows that this
1671 1670 * can happen, and it will check the refcnt.
1672 1671 */
1673 1672
1674 1673 if ((smp->sm_next != NULL)) {
1675 1674 struct sm_freeq *freeq;
1676 1675
1677 1676 ASSERT(smp->sm_prev != NULL);
1678 1677 sm = &smd_free[smp->sm_free_ndx];
1679 1678
1680 1679 if (smp->sm_flags & SM_QNDX_ZERO)
1681 1680 freeq = &sm->sm_freeq[0];
1682 1681 else
1683 1682 freeq = &sm->sm_freeq[1];
1684 1683
1685 1684 mutex_enter(&freeq->smq_mtx);
1686 1685 if (freeq->smq_free != smp) {
1687 1686 /*
1688 1687 * fastpath normal case
1689 1688 */
1690 1689 smp->sm_prev->sm_next = smp->sm_next;
1691 1690 smp->sm_next->sm_prev = smp->sm_prev;
1692 1691 } else if (smp == smp->sm_next) {
1693 1692 /*
1694 1693 * Taking the last smap on freelist
1695 1694 */
1696 1695 freeq->smq_free = NULL;
1697 1696 } else {
1698 1697 /*
1699 1698 * Reclaiming 1st smap on list
1700 1699 */
1701 1700 freeq->smq_free = smp->sm_next;
1702 1701 smp->sm_prev->sm_next = smp->sm_next;
1703 1702 smp->sm_next->sm_prev = smp->sm_prev;
1704 1703 }
1705 1704 mutex_exit(&freeq->smq_mtx);
1706 1705 smp->sm_prev = smp->sm_next = NULL;
1707 1706 } else {
1708 1707 ASSERT(smp->sm_prev == NULL);
1709 1708 segmapcnt.smp_stolen.value.ul++;
1710 1709 }
1711 1710
1712 1711 } else {
1713 1712 segmapcnt.smp_get_use.value.ul++;
1714 1713 }
1715 1714 smp->sm_refcnt++; /* another user */
1716 1715
1717 1716 /*
1718 1717 * We don't invoke segmap_fault via TLB miss, so we set ref
1719 1718 * and mod bits in advance. For S_OTHER we set them in
1720 1719 * segmap_fault F_SOFTUNLOCK.
1721 1720 */
1722 1721 if (is_kpm) {
1723 1722 if (rw == S_WRITE) {
1724 1723 smp->sm_flags |= SM_WRITE_DATA;
1725 1724 } else if (rw == S_READ) {
1726 1725 smp->sm_flags |= SM_READ_DATA;
1727 1726 }
1728 1727 }
1729 1728 mutex_exit(smapmtx);
1730 1729
1731 1730 newslot = 0;
1732 1731 } else {
1733 1732
1734 1733 uint32_t free_ndx, *free_ndxp;
1735 1734 union segmap_cpu *scpu;
1736 1735
1737 1736 /*
1738 1737 * On a PAC machine or a machine with anti-alias
1739 1738 * hardware, smd_colormsk will be zero.
1740 1739 *
1741 1740 * On a VAC machine- pick color by offset in the file
1742 1741 * so we won't get VAC conflicts on elf files.
1743 1742 * On data files, color does not matter but we
1744 1743 * don't know what kind of file it is so we always
1745 1744 * pick color by offset. This causes color
1746 1745 * corresponding to file offset zero to be used more
1747 1746 * heavily.
1748 1747 */
1749 1748 color = (baseoff >> MAXBSHIFT) & smd_colormsk;
1750 1749 scpu = smd_cpu+CPU->cpu_seqid;
1751 1750 free_ndxp = &scpu->scpu.scpu_free_ndx[color];
1752 1751 free_ndx = (*free_ndxp += smd_ncolor) & smd_freemsk;
1753 1752 #ifdef DEBUG
1754 1753 colors_used[free_ndx]++;
1755 1754 #endif /* DEBUG */
1756 1755
1757 1756 /*
1758 1757 * Get a locked smp slot from the free list.
1759 1758 */
1760 1759 smp = get_free_smp(free_ndx);
1761 1760 smapmtx = SMAPMTX(smp);
1762 1761
1763 1762 ASSERT(smp->sm_vp == NULL);
1764 1763
1765 1764 if ((nsmp = segmap_hashin(smp, vp, baseoff, hashid)) != NULL) {
1766 1765 /*
1767 1766 * Failed to hashin, there exists one now.
1768 1767 * Return the smp we just allocated.
1769 1768 */
1770 1769 segmap_smapadd(smp);
1771 1770 mutex_exit(smapmtx);
1772 1771
1773 1772 smp = nsmp;
1774 1773 goto vrfy_smp;
1775 1774 }
1776 1775 smp->sm_refcnt++; /* another user */
1777 1776
1778 1777 /*
1779 1778 * We don't invoke segmap_fault via TLB miss, so we set ref
1780 1779 * and mod bits in advance. For S_OTHER we set them in
1781 1780 * segmap_fault F_SOFTUNLOCK.
1782 1781 */
1783 1782 if (is_kpm) {
1784 1783 if (rw == S_WRITE) {
1785 1784 smp->sm_flags |= SM_WRITE_DATA;
1786 1785 } else if (rw == S_READ) {
1787 1786 smp->sm_flags |= SM_READ_DATA;
1788 1787 }
1789 1788 }
1790 1789 mutex_exit(smapmtx);
1791 1790
1792 1791 newslot = 1;
1793 1792 }
1794 1793
1795 1794 if (!is_kpm)
1796 1795 goto use_segmap_range;
1797 1796
1798 1797 /*
1799 1798 * Use segkpm
1800 1799 */
1801 1800 /* Lint directive required until 6746211 is fixed */
1802 1801 /*CONSTCOND*/
1803 1802 ASSERT(PAGESIZE == MAXBSIZE);
1804 1803
1805 1804 /*
1806 1805 * remember the last smp faulted on this cpu.
1807 1806 */
1808 1807 (smd_cpu+CPU->cpu_seqid)->scpu.scpu_last_smap = smp;
1809 1808
1810 1809 if (forcefault == SM_PAGECREATE) {
1811 1810 baseaddr = segmap_pagecreate_kpm(seg, vp, baseoff, smp, rw);
1812 1811 return (baseaddr);
1813 1812 }
1814 1813
1815 1814 if (newslot == 0 &&
1816 1815 (pp = GET_KPME(smp)->kpe_page) != NULL) {
1817 1816
1818 1817 /* fastpath */
1819 1818 switch (rw) {
1820 1819 case S_READ:
1821 1820 case S_WRITE:
1822 1821 if (page_trylock(pp, SE_SHARED)) {
1823 1822 if (PP_ISFREE(pp) ||
1824 1823 !(pp->p_vnode == vp &&
1825 1824 pp->p_offset == baseoff)) {
1826 1825 page_unlock(pp);
1827 1826 pp = page_lookup(vp, baseoff,
1828 1827 SE_SHARED);
1829 1828 }
1830 1829 } else {
1831 1830 pp = page_lookup(vp, baseoff, SE_SHARED);
1832 1831 }
1833 1832
1834 1833 if (pp == NULL) {
1835 1834 ASSERT(GET_KPME(smp)->kpe_page == NULL);
1836 1835 break;
1837 1836 }
1838 1837
1839 1838 if (rw == S_WRITE &&
1840 1839 hat_page_getattr(pp, P_MOD | P_REF) !=
1841 1840 (P_MOD | P_REF)) {
1842 1841 page_unlock(pp);
1843 1842 break;
1844 1843 }
1845 1844
1846 1845 /*
1847 1846 * We have the p_selock as reader, grab_smp
1848 1847 * can't hit us, we have bumped the smap
1849 1848 * refcnt and hat_pageunload needs the
1850 1849 * p_selock exclusive.
1851 1850 */
1852 1851 kpme = GET_KPME(smp);
1853 1852 if (kpme->kpe_page == pp) {
1854 1853 baseaddr = hat_kpm_page2va(pp, 0);
1855 1854 } else if (kpme->kpe_page == NULL) {
1856 1855 baseaddr = hat_kpm_mapin(pp, kpme);
1857 1856 } else {
1858 1857 panic("segmap_getmapflt: stale "
1859 1858 "kpme page, kpme %p", (void *)kpme);
1860 1859 /*NOTREACHED*/
1861 1860 }
1862 1861
1863 1862 /*
1864 1863 * We don't invoke segmap_fault via TLB miss,
1865 1864 * so we set ref and mod bits in advance.
1866 1865 * For S_OTHER and we set them in segmap_fault
1867 1866 * F_SOFTUNLOCK.
1868 1867 */
1869 1868 if (rw == S_READ && !hat_isref(pp))
1870 1869 hat_setref(pp);
1871 1870
1872 1871 return (baseaddr);
1873 1872 default:
1874 1873 break;
1875 1874 }
1876 1875 }
1877 1876
1878 1877 base = segkpm_create_va(baseoff);
1879 1878 error = VOP_GETPAGE(vp, (offset_t)baseoff, len, &prot, pl, MAXBSIZE,
1880 1879 seg, base, rw, CRED(), NULL);
1881 1880
1882 1881 pp = pl[0];
1883 1882 if (error || pp == NULL) {
1884 1883 /*
1885 1884 * Use segmap address slot and let segmap_fault deal
1886 1885 * with the error cases. There is no error return
1887 1886 * possible here.
1888 1887 */
1889 1888 goto use_segmap_range;
1890 1889 }
1891 1890
1892 1891 ASSERT(pl[1] == NULL);
1893 1892
1894 1893 /*
1895 1894 * When prot is not returned w/ PROT_ALL the returned pages
1896 1895 * are not backed by fs blocks. For most of the segmap users
1897 1896 * this is no problem, they don't write to the pages in the
1898 1897 * same request and therefore don't rely on a following
1899 1898 * trap driven segmap_fault. With SM_LOCKPROTO users it
1900 1899 * is more secure to use segkmap adresses to allow
1901 1900 * protection segmap_fault's.
1902 1901 */
1903 1902 if (prot != PROT_ALL && forcefault == SM_LOCKPROTO) {
1904 1903 /*
1905 1904 * Use segmap address slot and let segmap_fault
1906 1905 * do the error return.
1907 1906 */
1908 1907 ASSERT(rw != S_WRITE);
1909 1908 ASSERT(PAGE_LOCKED(pp));
1910 1909 page_unlock(pp);
1911 1910 forcefault = 0;
1912 1911 goto use_segmap_range;
1913 1912 }
1914 1913
1915 1914 /*
1916 1915 * We have the p_selock as reader, grab_smp can't hit us, we
1917 1916 * have bumped the smap refcnt and hat_pageunload needs the
1918 1917 * p_selock exclusive.
1919 1918 */
1920 1919 kpme = GET_KPME(smp);
1921 1920 if (kpme->kpe_page == pp) {
1922 1921 baseaddr = hat_kpm_page2va(pp, 0);
1923 1922 } else if (kpme->kpe_page == NULL) {
1924 1923 baseaddr = hat_kpm_mapin(pp, kpme);
1925 1924 } else {
1926 1925 panic("segmap_getmapflt: stale kpme page after "
1927 1926 "VOP_GETPAGE, kpme %p", (void *)kpme);
1928 1927 /*NOTREACHED*/
1929 1928 }
1930 1929
1931 1930 smd_cpu[CPU->cpu_seqid].scpu.scpu_fault++;
1932 1931
1933 1932 return (baseaddr);
1934 1933
1935 1934
1936 1935 use_segmap_range:
1937 1936 baseaddr = seg->s_base + ((smp - smd_smap) * MAXBSIZE);
1938 1937 TRACE_4(TR_FAC_VM, TR_SEGMAP_GETMAP,
1939 1938 "segmap_getmap:seg %p addr %p vp %p offset %llx",
1940 1939 seg, baseaddr, vp, baseoff);
1941 1940
1942 1941 /*
1943 1942 * Prefault the translations
1944 1943 */
1945 1944 vaddr = baseaddr + (off - baseoff);
1946 1945 if (forcefault && (newslot || !hat_probe(kas.a_hat, vaddr))) {
1947 1946
1948 1947 caddr_t pgaddr = (caddr_t)((uintptr_t)vaddr &
1949 1948 (uintptr_t)PAGEMASK);
1950 1949
1951 1950 (void) segmap_fault(kas.a_hat, seg, pgaddr,
1952 1951 (vaddr + len - pgaddr + PAGESIZE - 1) & (uintptr_t)PAGEMASK,
1953 1952 F_INVAL, rw);
1954 1953 }
1955 1954
1956 1955 return (baseaddr);
1957 1956 }
1958 1957
1959 1958 int
1960 1959 segmap_release(struct seg *seg, caddr_t addr, uint_t flags)
1961 1960 {
1962 1961 struct smap *smp;
1963 1962 int error;
1964 1963 int bflags = 0;
1965 1964 struct vnode *vp;
1966 1965 u_offset_t offset;
1967 1966 kmutex_t *smtx;
1968 1967 int is_kpm = 0;
1969 1968 page_t *pp;
1970 1969
1971 1970 if (segmap_kpm && IS_KPM_ADDR(addr)) {
1972 1971
1973 1972 if (((uintptr_t)addr & MAXBOFFSET) != 0) {
1974 1973 panic("segmap_release: addr %p not "
1975 1974 "MAXBSIZE aligned", (void *)addr);
1976 1975 /*NOTREACHED*/
1977 1976 }
1978 1977
1979 1978 if ((smp = get_smap_kpm(addr, &pp)) == NULL) {
1980 1979 panic("segmap_release: smap not found "
1981 1980 "for addr %p", (void *)addr);
1982 1981 /*NOTREACHED*/
1983 1982 }
1984 1983
1985 1984 TRACE_3(TR_FAC_VM, TR_SEGMAP_RELMAP,
1986 1985 "segmap_relmap:seg %p addr %p smp %p",
1987 1986 seg, addr, smp);
1988 1987
1989 1988 smtx = SMAPMTX(smp);
1990 1989
1991 1990 /*
1992 1991 * For compatibility reasons segmap_pagecreate_kpm sets this
1993 1992 * flag to allow a following segmap_pagecreate to return
1994 1993 * this as "newpage" flag. When segmap_pagecreate is not
1995 1994 * called at all we clear it now.
1996 1995 */
1997 1996 smp->sm_flags &= ~SM_KPM_NEWPAGE;
1998 1997 is_kpm = 1;
1999 1998 if (smp->sm_flags & SM_WRITE_DATA) {
2000 1999 hat_setrefmod(pp);
2001 2000 } else if (smp->sm_flags & SM_READ_DATA) {
2002 2001 hat_setref(pp);
2003 2002 }
2004 2003 } else {
2005 2004 if (addr < seg->s_base || addr >= seg->s_base + seg->s_size ||
2006 2005 ((uintptr_t)addr & MAXBOFFSET) != 0) {
2007 2006 panic("segmap_release: bad addr %p", (void *)addr);
2008 2007 /*NOTREACHED*/
2009 2008 }
2010 2009 smp = GET_SMAP(seg, addr);
2011 2010
2012 2011 TRACE_3(TR_FAC_VM, TR_SEGMAP_RELMAP,
2013 2012 "segmap_relmap:seg %p addr %p smp %p",
2014 2013 seg, addr, smp);
2015 2014
2016 2015 smtx = SMAPMTX(smp);
2017 2016 mutex_enter(smtx);
2018 2017 smp->sm_flags |= SM_NOTKPM_RELEASED;
2019 2018 }
2020 2019
2021 2020 ASSERT(smp->sm_refcnt > 0);
2022 2021
2023 2022 /*
2024 2023 * Need to call VOP_PUTPAGE() if any flags (except SM_DONTNEED)
2025 2024 * are set.
2026 2025 */
2027 2026 if ((flags & ~SM_DONTNEED) != 0) {
2028 2027 if (flags & SM_WRITE)
2029 2028 segmapcnt.smp_rel_write.value.ul++;
2030 2029 if (flags & SM_ASYNC) {
2031 2030 bflags |= B_ASYNC;
2032 2031 segmapcnt.smp_rel_async.value.ul++;
2033 2032 }
2034 2033 if (flags & SM_INVAL) {
2035 2034 bflags |= B_INVAL;
2036 2035 segmapcnt.smp_rel_abort.value.ul++;
2037 2036 }
2038 2037 if (flags & SM_DESTROY) {
2039 2038 bflags |= (B_INVAL|B_TRUNC);
2040 2039 segmapcnt.smp_rel_abort.value.ul++;
2041 2040 }
2042 2041 if (smp->sm_refcnt == 1) {
2043 2042 /*
2044 2043 * We only bother doing the FREE and DONTNEED flags
2045 2044 * if no one else is still referencing this mapping.
2046 2045 */
2047 2046 if (flags & SM_FREE) {
2048 2047 bflags |= B_FREE;
2049 2048 segmapcnt.smp_rel_free.value.ul++;
2050 2049 }
2051 2050 if (flags & SM_DONTNEED) {
2052 2051 bflags |= B_DONTNEED;
2053 2052 segmapcnt.smp_rel_dontneed.value.ul++;
2054 2053 }
2055 2054 }
2056 2055 } else {
2057 2056 smd_cpu[CPU->cpu_seqid].scpu.scpu_release++;
2058 2057 }
2059 2058
2060 2059 vp = smp->sm_vp;
2061 2060 offset = smp->sm_off;
2062 2061
2063 2062 if (--smp->sm_refcnt == 0) {
2064 2063
2065 2064 smp->sm_flags &= ~(SM_WRITE_DATA | SM_READ_DATA);
2066 2065
2067 2066 if (flags & (SM_INVAL|SM_DESTROY)) {
2068 2067 segmap_hashout(smp); /* remove map info */
2069 2068 if (is_kpm) {
2070 2069 hat_kpm_mapout(pp, GET_KPME(smp), addr);
2071 2070 if (smp->sm_flags & SM_NOTKPM_RELEASED) {
2072 2071 smp->sm_flags &= ~SM_NOTKPM_RELEASED;
2073 2072 hat_unload(kas.a_hat, segkmap->s_base +
2074 2073 ((smp - smd_smap) * MAXBSIZE),
2075 2074 MAXBSIZE, HAT_UNLOAD);
2076 2075 }
2077 2076
2078 2077 } else {
2079 2078 if (segmap_kpm)
2080 2079 segkpm_mapout_validkpme(GET_KPME(smp));
2081 2080
2082 2081 smp->sm_flags &= ~SM_NOTKPM_RELEASED;
2083 2082 hat_unload(kas.a_hat, addr, MAXBSIZE,
2084 2083 HAT_UNLOAD);
2085 2084 }
2086 2085 }
2087 2086 segmap_smapadd(smp); /* add to free list */
2088 2087 }
2089 2088
2090 2089 mutex_exit(smtx);
2091 2090
2092 2091 if (is_kpm)
2093 2092 page_unlock(pp);
2094 2093 /*
2095 2094 * Now invoke VOP_PUTPAGE() if any flags (except SM_DONTNEED)
2096 2095 * are set.
2097 2096 */
2098 2097 if ((flags & ~SM_DONTNEED) != 0) {
2099 2098 error = VOP_PUTPAGE(vp, offset, MAXBSIZE,
2100 2099 bflags, CRED(), NULL);
2101 2100 } else {
2102 2101 error = 0;
2103 2102 }
2104 2103
2105 2104 return (error);
2106 2105 }
2107 2106
2108 2107 /*
2109 2108 * Dump the pages belonging to this segmap segment.
2110 2109 */
2111 2110 static void
2112 2111 segmap_dump(struct seg *seg)
2113 2112 {
2114 2113 struct segmap_data *smd;
2115 2114 struct smap *smp, *smp_end;
2116 2115 page_t *pp;
2117 2116 pfn_t pfn;
2118 2117 u_offset_t off;
2119 2118 caddr_t addr;
2120 2119
2121 2120 smd = (struct segmap_data *)seg->s_data;
2122 2121 addr = seg->s_base;
2123 2122 for (smp = smd->smd_sm, smp_end = smp + smd->smd_npages;
2124 2123 smp < smp_end; smp++) {
2125 2124
2126 2125 if (smp->sm_refcnt) {
2127 2126 for (off = 0; off < MAXBSIZE; off += PAGESIZE) {
2128 2127 int we_own_it = 0;
2129 2128
2130 2129 /*
2131 2130 * If pp == NULL, the page either does
2132 2131 * not exist or is exclusively locked.
2133 2132 * So determine if it exists before
2134 2133 * searching for it.
2135 2134 */
2136 2135 if ((pp = page_lookup_nowait(smp->sm_vp,
2137 2136 smp->sm_off + off, SE_SHARED)))
2138 2137 we_own_it = 1;
2139 2138 else
2140 2139 pp = page_exists(smp->sm_vp,
2141 2140 smp->sm_off + off);
2142 2141
2143 2142 if (pp) {
2144 2143 pfn = page_pptonum(pp);
2145 2144 dump_addpage(seg->s_as,
2146 2145 addr + off, pfn);
2147 2146 if (we_own_it)
2148 2147 page_unlock(pp);
2149 2148 }
2150 2149 dump_timeleft = dump_timeout;
2151 2150 }
2152 2151 }
2153 2152 addr += MAXBSIZE;
2154 2153 }
2155 2154 }
2156 2155
2157 2156 /*ARGSUSED*/
2158 2157 static int
2159 2158 segmap_pagelock(struct seg *seg, caddr_t addr, size_t len,
2160 2159 struct page ***ppp, enum lock_type type, enum seg_rw rw)
2161 2160 {
2162 2161 return (ENOTSUP);
2163 2162 }
2164 2163
2165 2164 static int
2166 2165 segmap_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
2167 2166 {
2168 2167 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
2169 2168
2170 2169 memidp->val[0] = (uintptr_t)smd->smd_sm->sm_vp;
2171 2170 memidp->val[1] = smd->smd_sm->sm_off + (uintptr_t)(addr - seg->s_base);
2172 2171 return (0);
2173 2172 }
2174 2173
2175 2174 /*ARGSUSED*/
2176 2175 static lgrp_mem_policy_info_t *
2177 2176 segmap_getpolicy(struct seg *seg, caddr_t addr)
2178 2177 {
2179 2178 return (NULL);
2180 2179 }
2181 2180
2182 2181 /*ARGSUSED*/
2183 2182 static int
2184 2183 segmap_capable(struct seg *seg, segcapability_t capability)
2185 2184 {
2186 2185 return (0);
2187 2186 }
2188 2187
2189 2188
2190 2189 #ifdef SEGKPM_SUPPORT
2191 2190
2192 2191 /*
2193 2192 * segkpm support routines
2194 2193 */
2195 2194
2196 2195 static caddr_t
2197 2196 segmap_pagecreate_kpm(struct seg *seg, vnode_t *vp, u_offset_t off,
2198 2197 struct smap *smp, enum seg_rw rw)
2199 2198 {
2200 2199 caddr_t base;
2201 2200 page_t *pp;
2202 2201 int newpage = 0;
2203 2202 struct kpme *kpme;
2204 2203
2205 2204 ASSERT(smp->sm_refcnt > 0);
2206 2205
2207 2206 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
2208 2207 kmutex_t *smtx;
2209 2208
2210 2209 base = segkpm_create_va(off);
2211 2210
2212 2211 if ((pp = page_create_va(vp, off, PAGESIZE, PG_WAIT,
2213 2212 seg, base)) == NULL) {
2214 2213 panic("segmap_pagecreate_kpm: "
2215 2214 "page_create failed");
2216 2215 /*NOTREACHED*/
2217 2216 }
2218 2217
2219 2218 newpage = 1;
2220 2219 page_io_unlock(pp);
2221 2220 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
2222 2221
2223 2222 /*
2224 2223 * Mark this here until the following segmap_pagecreate
2225 2224 * or segmap_release.
2226 2225 */
2227 2226 smtx = SMAPMTX(smp);
2228 2227 mutex_enter(smtx);
2229 2228 smp->sm_flags |= SM_KPM_NEWPAGE;
2230 2229 mutex_exit(smtx);
2231 2230 }
2232 2231
2233 2232 kpme = GET_KPME(smp);
2234 2233 if (!newpage && kpme->kpe_page == pp)
2235 2234 base = hat_kpm_page2va(pp, 0);
2236 2235 else
2237 2236 base = hat_kpm_mapin(pp, kpme);
2238 2237
2239 2238 /*
2240 2239 * FS code may decide not to call segmap_pagecreate and we
2241 2240 * don't invoke segmap_fault via TLB miss, so we have to set
2242 2241 * ref and mod bits in advance.
2243 2242 */
2244 2243 if (rw == S_WRITE) {
2245 2244 hat_setrefmod(pp);
2246 2245 } else {
2247 2246 ASSERT(rw == S_READ);
2248 2247 hat_setref(pp);
2249 2248 }
2250 2249
2251 2250 smd_cpu[CPU->cpu_seqid].scpu.scpu_pagecreate++;
2252 2251
2253 2252 return (base);
2254 2253 }
2255 2254
2256 2255 /*
2257 2256 * Find the smap structure corresponding to the
2258 2257 * KPM addr and return it locked.
2259 2258 */
2260 2259 struct smap *
2261 2260 get_smap_kpm(caddr_t addr, page_t **ppp)
2262 2261 {
2263 2262 struct smap *smp;
2264 2263 struct vnode *vp;
2265 2264 u_offset_t offset;
2266 2265 caddr_t baseaddr = (caddr_t)((uintptr_t)addr & MAXBMASK);
2267 2266 int hashid;
2268 2267 kmutex_t *hashmtx;
2269 2268 page_t *pp;
2270 2269 union segmap_cpu *scpu;
2271 2270
2272 2271 pp = hat_kpm_vaddr2page(baseaddr);
2273 2272
2274 2273 ASSERT(pp && !PP_ISFREE(pp));
2275 2274 ASSERT(PAGE_LOCKED(pp));
2276 2275 ASSERT(((uintptr_t)pp->p_offset & MAXBOFFSET) == 0);
2277 2276
2278 2277 vp = pp->p_vnode;
2279 2278 offset = pp->p_offset;
2280 2279 ASSERT(vp != NULL);
2281 2280
2282 2281 /*
2283 2282 * Assume the last smap used on this cpu is the one needed.
2284 2283 */
2285 2284 scpu = smd_cpu+CPU->cpu_seqid;
2286 2285 smp = scpu->scpu.scpu_last_smap;
2287 2286 mutex_enter(&smp->sm_mtx);
2288 2287 if (smp->sm_vp == vp && smp->sm_off == offset) {
2289 2288 ASSERT(smp->sm_refcnt > 0);
2290 2289 } else {
2291 2290 /*
2292 2291 * Assumption wrong, find the smap on the hash chain.
2293 2292 */
2294 2293 mutex_exit(&smp->sm_mtx);
2295 2294 SMAP_HASHFUNC(vp, offset, hashid); /* macro assigns hashid */
2296 2295 hashmtx = SHASHMTX(hashid);
2297 2296
2298 2297 mutex_enter(hashmtx);
2299 2298 smp = smd_hash[hashid].sh_hash_list;
2300 2299 for (; smp != NULL; smp = smp->sm_hash) {
2301 2300 if (smp->sm_vp == vp && smp->sm_off == offset)
2302 2301 break;
2303 2302 }
2304 2303 mutex_exit(hashmtx);
2305 2304 if (smp) {
2306 2305 mutex_enter(&smp->sm_mtx);
2307 2306 ASSERT(smp->sm_vp == vp && smp->sm_off == offset);
2308 2307 }
2309 2308 }
2310 2309
2311 2310 if (ppp)
2312 2311 *ppp = smp ? pp : NULL;
2313 2312
2314 2313 return (smp);
2315 2314 }
2316 2315
2317 2316 #else /* SEGKPM_SUPPORT */
2318 2317
2319 2318 /* segkpm stubs */
2320 2319
2321 2320 /*ARGSUSED*/
2322 2321 static caddr_t
2323 2322 segmap_pagecreate_kpm(struct seg *seg, vnode_t *vp, u_offset_t off,
2324 2323 struct smap *smp, enum seg_rw rw)
2325 2324 {
2326 2325 return (NULL);
2327 2326 }
2328 2327
2329 2328 /*ARGSUSED*/
2330 2329 struct smap *
2331 2330 get_smap_kpm(caddr_t addr, page_t **ppp)
2332 2331 {
2333 2332 return (NULL);
2334 2333 }
2335 2334
2336 2335 #endif /* SEGKPM_SUPPORT */
↓ open down ↓ |
2211 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX