Print this page
no need for bad-op segment op functions
The segment drivers have a number of bad-op functions that simply panic.
Keeping the function pointer NULL will accomplish the same thing in most
cases. In other cases, keeping the function pointer NULL will result in
proper error code being returned.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_map.c
+++ new/usr/src/uts/common/vm/seg_map.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
27 27 /* All Rights Reserved */
28 28
29 29 /*
30 30 * Portions of this source code were derived from Berkeley 4.3 BSD
31 31 * under license from the Regents of the University of California.
32 32 */
33 33
34 34 /*
35 35 * VM - generic vnode mapping segment.
36 36 *
37 37 * The segmap driver is used only by the kernel to get faster (than seg_vn)
38 38 * mappings [lower routine overhead; more persistent cache] to random
39 39 * vnode/offsets. Note than the kernel may (and does) use seg_vn as well.
40 40 */
41 41
42 42 #include <sys/types.h>
43 43 #include <sys/t_lock.h>
44 44 #include <sys/param.h>
45 45 #include <sys/sysmacros.h>
46 46 #include <sys/buf.h>
47 47 #include <sys/systm.h>
48 48 #include <sys/vnode.h>
49 49 #include <sys/mman.h>
50 50 #include <sys/errno.h>
51 51 #include <sys/cred.h>
52 52 #include <sys/kmem.h>
53 53 #include <sys/vtrace.h>
54 54 #include <sys/cmn_err.h>
55 55 #include <sys/debug.h>
56 56 #include <sys/thread.h>
57 57 #include <sys/dumphdr.h>
58 58 #include <sys/bitmap.h>
59 59 #include <sys/lgrp.h>
60 60
61 61 #include <vm/seg_kmem.h>
62 62 #include <vm/hat.h>
63 63 #include <vm/as.h>
64 64 #include <vm/seg.h>
65 65 #include <vm/seg_kpm.h>
66 66 #include <vm/seg_map.h>
67 67 #include <vm/page.h>
68 68 #include <vm/pvn.h>
69 69 #include <vm/rm.h>
70 70
71 71 /*
72 72 * Private seg op routines.
73 73 */
74 74 static void segmap_free(struct seg *seg);
75 75 faultcode_t segmap_fault(struct hat *hat, struct seg *seg, caddr_t addr,
76 76 size_t len, enum fault_type type, enum seg_rw rw);
77 77 static faultcode_t segmap_faulta(struct seg *seg, caddr_t addr);
78 78 static int segmap_checkprot(struct seg *seg, caddr_t addr, size_t len,
79 79 uint_t prot);
↓ open down ↓ |
79 lines elided |
↑ open up ↑ |
80 80 static int segmap_kluster(struct seg *seg, caddr_t addr, ssize_t);
81 81 static int segmap_getprot(struct seg *seg, caddr_t addr, size_t len,
82 82 uint_t *protv);
83 83 static u_offset_t segmap_getoffset(struct seg *seg, caddr_t addr);
84 84 static int segmap_gettype(struct seg *seg, caddr_t addr);
85 85 static int segmap_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
86 86 static void segmap_dump(struct seg *seg);
87 87 static int segmap_pagelock(struct seg *seg, caddr_t addr, size_t len,
88 88 struct page ***ppp, enum lock_type type,
89 89 enum seg_rw rw);
90 -static void segmap_badop(void);
91 90 static int segmap_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
92 91 static lgrp_mem_policy_info_t *segmap_getpolicy(struct seg *seg,
93 92 caddr_t addr);
94 93 static int segmap_capable(struct seg *seg, segcapability_t capability);
95 94
96 95 /* segkpm support */
97 96 static caddr_t segmap_pagecreate_kpm(struct seg *, vnode_t *, u_offset_t,
98 97 struct smap *, enum seg_rw);
99 98 struct smap *get_smap_kpm(caddr_t, page_t **);
100 99
101 -#define SEGMAP_BADOP(t) (t(*)())segmap_badop
102 -
103 100 static struct seg_ops segmap_ops = {
104 - .dup = SEGMAP_BADOP(int),
105 - .unmap = SEGMAP_BADOP(int),
106 101 .free = segmap_free,
107 102 .fault = segmap_fault,
108 103 .faulta = segmap_faulta,
109 - .setprot = SEGMAP_BADOP(int),
110 104 .checkprot = segmap_checkprot,
111 105 .kluster = segmap_kluster,
112 - .sync = SEGMAP_BADOP(int),
113 - .incore = SEGMAP_BADOP(size_t),
114 - .lockop = SEGMAP_BADOP(int),
115 106 .getprot = segmap_getprot,
116 107 .getoffset = segmap_getoffset,
117 108 .gettype = segmap_gettype,
118 109 .getvp = segmap_getvp,
119 - .advise = SEGMAP_BADOP(int),
120 110 .dump = segmap_dump,
121 111 .pagelock = segmap_pagelock,
122 - .setpagesize = SEGMAP_BADOP(int),
123 112 .getmemid = segmap_getmemid,
124 113 .getpolicy = segmap_getpolicy,
125 114 .capable = segmap_capable,
126 115 .inherit = seg_inherit_notsup,
127 116 };
128 117
129 118 /*
130 119 * Private segmap routines.
131 120 */
132 121 static void segmap_unlock(struct hat *hat, struct seg *seg, caddr_t addr,
133 122 size_t len, enum seg_rw rw, struct smap *smp);
134 123 static void segmap_smapadd(struct smap *smp);
135 124 static struct smap *segmap_hashin(struct smap *smp, struct vnode *vp,
136 125 u_offset_t off, int hashid);
137 126 static void segmap_hashout(struct smap *smp);
138 127
139 128
140 129 /*
141 130 * Statistics for segmap operations.
142 131 *
143 132 * No explicit locking to protect these stats.
144 133 */
145 134 struct segmapcnt segmapcnt = {
146 135 { "fault", KSTAT_DATA_ULONG },
147 136 { "faulta", KSTAT_DATA_ULONG },
148 137 { "getmap", KSTAT_DATA_ULONG },
149 138 { "get_use", KSTAT_DATA_ULONG },
150 139 { "get_reclaim", KSTAT_DATA_ULONG },
151 140 { "get_reuse", KSTAT_DATA_ULONG },
152 141 { "get_unused", KSTAT_DATA_ULONG },
153 142 { "get_nofree", KSTAT_DATA_ULONG },
154 143 { "rel_async", KSTAT_DATA_ULONG },
155 144 { "rel_write", KSTAT_DATA_ULONG },
156 145 { "rel_free", KSTAT_DATA_ULONG },
157 146 { "rel_abort", KSTAT_DATA_ULONG },
158 147 { "rel_dontneed", KSTAT_DATA_ULONG },
159 148 { "release", KSTAT_DATA_ULONG },
160 149 { "pagecreate", KSTAT_DATA_ULONG },
161 150 { "free_notfree", KSTAT_DATA_ULONG },
162 151 { "free_dirty", KSTAT_DATA_ULONG },
163 152 { "free", KSTAT_DATA_ULONG },
164 153 { "stolen", KSTAT_DATA_ULONG },
165 154 { "get_nomtx", KSTAT_DATA_ULONG }
166 155 };
167 156
168 157 kstat_named_t *segmapcnt_ptr = (kstat_named_t *)&segmapcnt;
169 158 uint_t segmapcnt_ndata = sizeof (segmapcnt) / sizeof (kstat_named_t);
170 159
171 160 /*
172 161 * Return number of map pages in segment.
173 162 */
174 163 #define MAP_PAGES(seg) ((seg)->s_size >> MAXBSHIFT)
175 164
176 165 /*
177 166 * Translate addr into smap number within segment.
178 167 */
179 168 #define MAP_PAGE(seg, addr) (((addr) - (seg)->s_base) >> MAXBSHIFT)
180 169
181 170 /*
182 171 * Translate addr in seg into struct smap pointer.
183 172 */
184 173 #define GET_SMAP(seg, addr) \
185 174 &(((struct segmap_data *)((seg)->s_data))->smd_sm[MAP_PAGE(seg, addr)])
186 175
187 176 /*
188 177 * Bit in map (16 bit bitmap).
189 178 */
190 179 #define SMAP_BIT_MASK(bitindex) (1 << ((bitindex) & 0xf))
191 180
192 181 static int smd_colormsk = 0;
193 182 static int smd_ncolor = 0;
194 183 static int smd_nfree = 0;
195 184 static int smd_freemsk = 0;
196 185 #ifdef DEBUG
197 186 static int *colors_used;
198 187 #endif
199 188 static struct smap *smd_smap;
200 189 static struct smaphash *smd_hash;
201 190 #ifdef SEGMAP_HASHSTATS
202 191 static unsigned int *smd_hash_len;
203 192 #endif
204 193 static struct smfree *smd_free;
205 194 static ulong_t smd_hashmsk = 0;
206 195
207 196 #define SEGMAP_MAXCOLOR 2
208 197 #define SEGMAP_CACHE_PAD 64
209 198
210 199 union segmap_cpu {
211 200 struct {
212 201 uint32_t scpu_free_ndx[SEGMAP_MAXCOLOR];
213 202 struct smap *scpu_last_smap;
214 203 ulong_t scpu_getmap;
215 204 ulong_t scpu_release;
216 205 ulong_t scpu_get_reclaim;
217 206 ulong_t scpu_fault;
218 207 ulong_t scpu_pagecreate;
219 208 ulong_t scpu_get_reuse;
220 209 } scpu;
221 210 char scpu_pad[SEGMAP_CACHE_PAD];
222 211 };
223 212 static union segmap_cpu *smd_cpu;
224 213
225 214 /*
226 215 * There are three locks in seg_map:
227 216 * - per freelist mutexes
228 217 * - per hashchain mutexes
229 218 * - per smap mutexes
230 219 *
231 220 * The lock ordering is to get the smap mutex to lock down the slot
232 221 * first then the hash lock (for hash in/out (vp, off) list) or the
233 222 * freelist lock to put the slot back on the free list.
234 223 *
235 224 * The hash search is done by only holding the hashchain lock, when a wanted
236 225 * slot is found, we drop the hashchain lock then lock the slot so there
237 226 * is no overlapping of hashchain and smap locks. After the slot is
238 227 * locked, we verify again if the slot is still what we are looking
239 228 * for.
240 229 *
241 230 * Allocation of a free slot is done by holding the freelist lock,
242 231 * then locking the smap slot at the head of the freelist. This is
243 232 * in reversed lock order so mutex_tryenter() is used.
244 233 *
245 234 * The smap lock protects all fields in smap structure except for
246 235 * the link fields for hash/free lists which are protected by
247 236 * hashchain and freelist locks.
248 237 */
249 238
250 239 #define SHASHMTX(hashid) (&smd_hash[hashid].sh_mtx)
251 240
252 241 #define SMP2SMF(smp) (&smd_free[(smp - smd_smap) & smd_freemsk])
253 242 #define SMP2SMF_NDX(smp) (ushort_t)((smp - smd_smap) & smd_freemsk)
254 243
255 244 #define SMAPMTX(smp) (&smp->sm_mtx)
256 245
257 246 #define SMAP_HASHFUNC(vp, off, hashid) \
258 247 { \
259 248 hashid = ((((uintptr_t)(vp) >> 6) + ((uintptr_t)(vp) >> 3) + \
260 249 ((off) >> MAXBSHIFT)) & smd_hashmsk); \
261 250 }
262 251
263 252 /*
264 253 * The most frequently updated kstat counters are kept in the
265 254 * per cpu array to avoid hot cache blocks. The update function
266 255 * sums the cpu local counters to update the global counters.
267 256 */
268 257
269 258 /* ARGSUSED */
270 259 int
271 260 segmap_kstat_update(kstat_t *ksp, int rw)
272 261 {
273 262 int i;
274 263 ulong_t getmap, release, get_reclaim;
275 264 ulong_t fault, pagecreate, get_reuse;
276 265
277 266 if (rw == KSTAT_WRITE)
278 267 return (EACCES);
279 268 getmap = release = get_reclaim = (ulong_t)0;
280 269 fault = pagecreate = get_reuse = (ulong_t)0;
281 270 for (i = 0; i < max_ncpus; i++) {
282 271 getmap += smd_cpu[i].scpu.scpu_getmap;
283 272 release += smd_cpu[i].scpu.scpu_release;
284 273 get_reclaim += smd_cpu[i].scpu.scpu_get_reclaim;
285 274 fault += smd_cpu[i].scpu.scpu_fault;
286 275 pagecreate += smd_cpu[i].scpu.scpu_pagecreate;
287 276 get_reuse += smd_cpu[i].scpu.scpu_get_reuse;
288 277 }
289 278 segmapcnt.smp_getmap.value.ul = getmap;
290 279 segmapcnt.smp_release.value.ul = release;
291 280 segmapcnt.smp_get_reclaim.value.ul = get_reclaim;
292 281 segmapcnt.smp_fault.value.ul = fault;
293 282 segmapcnt.smp_pagecreate.value.ul = pagecreate;
294 283 segmapcnt.smp_get_reuse.value.ul = get_reuse;
295 284 return (0);
296 285 }
297 286
298 287 int
299 288 segmap_create(struct seg *seg, void *argsp)
300 289 {
301 290 struct segmap_data *smd;
302 291 struct smap *smp;
303 292 struct smfree *sm;
304 293 struct segmap_crargs *a = (struct segmap_crargs *)argsp;
305 294 struct smaphash *shashp;
306 295 union segmap_cpu *scpu;
307 296 long i, npages;
308 297 size_t hashsz;
309 298 uint_t nfreelist;
310 299 extern void prefetch_smap_w(void *);
311 300 extern int max_ncpus;
312 301
313 302 ASSERT(seg->s_as && RW_WRITE_HELD(&seg->s_as->a_lock));
314 303
315 304 if (((uintptr_t)seg->s_base | seg->s_size) & MAXBOFFSET) {
316 305 panic("segkmap not MAXBSIZE aligned");
317 306 /*NOTREACHED*/
318 307 }
319 308
320 309 smd = kmem_zalloc(sizeof (struct segmap_data), KM_SLEEP);
321 310
322 311 seg->s_data = (void *)smd;
323 312 seg->s_ops = &segmap_ops;
324 313 smd->smd_prot = a->prot;
325 314
326 315 /*
327 316 * Scale the number of smap freelists to be
328 317 * proportional to max_ncpus * number of virtual colors.
329 318 * The caller can over-ride this scaling by providing
330 319 * a non-zero a->nfreelist argument.
331 320 */
332 321 nfreelist = a->nfreelist;
333 322 if (nfreelist == 0)
334 323 nfreelist = max_ncpus;
335 324 else if (nfreelist < 0 || nfreelist > 4 * max_ncpus) {
336 325 cmn_err(CE_WARN, "segmap_create: nfreelist out of range "
337 326 "%d, using %d", nfreelist, max_ncpus);
338 327 nfreelist = max_ncpus;
339 328 }
340 329 if (!ISP2(nfreelist)) {
341 330 /* round up nfreelist to the next power of two. */
342 331 nfreelist = 1 << (highbit(nfreelist));
343 332 }
344 333
345 334 /*
346 335 * Get the number of virtual colors - must be a power of 2.
347 336 */
348 337 if (a->shmsize)
349 338 smd_ncolor = a->shmsize >> MAXBSHIFT;
350 339 else
351 340 smd_ncolor = 1;
352 341 ASSERT((smd_ncolor & (smd_ncolor - 1)) == 0);
353 342 ASSERT(smd_ncolor <= SEGMAP_MAXCOLOR);
354 343 smd_colormsk = smd_ncolor - 1;
355 344 smd->smd_nfree = smd_nfree = smd_ncolor * nfreelist;
356 345 smd_freemsk = smd_nfree - 1;
357 346
358 347 /*
359 348 * Allocate and initialize the freelist headers.
360 349 * Note that sm_freeq[1] starts out as the release queue. This
361 350 * is known when the smap structures are initialized below.
362 351 */
363 352 smd_free = smd->smd_free =
364 353 kmem_zalloc(smd_nfree * sizeof (struct smfree), KM_SLEEP);
365 354 for (i = 0; i < smd_nfree; i++) {
366 355 sm = &smd->smd_free[i];
367 356 mutex_init(&sm->sm_freeq[0].smq_mtx, NULL, MUTEX_DEFAULT, NULL);
368 357 mutex_init(&sm->sm_freeq[1].smq_mtx, NULL, MUTEX_DEFAULT, NULL);
369 358 sm->sm_allocq = &sm->sm_freeq[0];
370 359 sm->sm_releq = &sm->sm_freeq[1];
371 360 }
372 361
373 362 /*
374 363 * Allocate and initialize the smap hash chain headers.
375 364 * Compute hash size rounding down to the next power of two.
376 365 */
377 366 npages = MAP_PAGES(seg);
378 367 smd->smd_npages = npages;
379 368 hashsz = npages / SMAP_HASHAVELEN;
380 369 hashsz = 1 << (highbit(hashsz)-1);
381 370 smd_hashmsk = hashsz - 1;
382 371 smd_hash = smd->smd_hash =
383 372 kmem_alloc(hashsz * sizeof (struct smaphash), KM_SLEEP);
384 373 #ifdef SEGMAP_HASHSTATS
385 374 smd_hash_len =
386 375 kmem_zalloc(hashsz * sizeof (unsigned int), KM_SLEEP);
387 376 #endif
388 377 for (i = 0, shashp = smd_hash; i < hashsz; i++, shashp++) {
389 378 shashp->sh_hash_list = NULL;
390 379 mutex_init(&shashp->sh_mtx, NULL, MUTEX_DEFAULT, NULL);
391 380 }
392 381
393 382 /*
394 383 * Allocate and initialize the smap structures.
395 384 * Link all slots onto the appropriate freelist.
396 385 * The smap array is large enough to affect boot time
397 386 * on large systems, so use memory prefetching and only
398 387 * go through the array 1 time. Inline a optimized version
399 388 * of segmap_smapadd to add structures to freelists with
400 389 * knowledge that no locks are needed here.
401 390 */
402 391 smd_smap = smd->smd_sm =
403 392 kmem_alloc(sizeof (struct smap) * npages, KM_SLEEP);
404 393
405 394 for (smp = &smd->smd_sm[MAP_PAGES(seg) - 1];
406 395 smp >= smd->smd_sm; smp--) {
407 396 struct smap *smpfreelist;
408 397 struct sm_freeq *releq;
409 398
410 399 prefetch_smap_w((char *)smp);
411 400
412 401 smp->sm_vp = NULL;
413 402 smp->sm_hash = NULL;
414 403 smp->sm_off = 0;
415 404 smp->sm_bitmap = 0;
416 405 smp->sm_refcnt = 0;
417 406 mutex_init(&smp->sm_mtx, NULL, MUTEX_DEFAULT, NULL);
418 407 smp->sm_free_ndx = SMP2SMF_NDX(smp);
419 408
420 409 sm = SMP2SMF(smp);
421 410 releq = sm->sm_releq;
422 411
423 412 smpfreelist = releq->smq_free;
424 413 if (smpfreelist == 0) {
425 414 releq->smq_free = smp->sm_next = smp->sm_prev = smp;
426 415 } else {
427 416 smp->sm_next = smpfreelist;
428 417 smp->sm_prev = smpfreelist->sm_prev;
429 418 smpfreelist->sm_prev = smp;
430 419 smp->sm_prev->sm_next = smp;
431 420 releq->smq_free = smp->sm_next;
432 421 }
433 422
434 423 /*
435 424 * sm_flag = 0 (no SM_QNDX_ZERO) implies smap on sm_freeq[1]
436 425 */
437 426 smp->sm_flags = 0;
438 427
439 428 #ifdef SEGKPM_SUPPORT
440 429 /*
441 430 * Due to the fragile prefetch loop no
442 431 * separate function is used here.
443 432 */
444 433 smp->sm_kpme_next = NULL;
445 434 smp->sm_kpme_prev = NULL;
446 435 smp->sm_kpme_page = NULL;
447 436 #endif
448 437 }
449 438
450 439 /*
451 440 * Allocate the per color indices that distribute allocation
452 441 * requests over the free lists. Each cpu will have a private
453 442 * rotor index to spread the allocations even across the available
454 443 * smap freelists. Init the scpu_last_smap field to the first
455 444 * smap element so there is no need to check for NULL.
456 445 */
457 446 smd_cpu =
458 447 kmem_zalloc(sizeof (union segmap_cpu) * max_ncpus, KM_SLEEP);
459 448 for (i = 0, scpu = smd_cpu; i < max_ncpus; i++, scpu++) {
460 449 int j;
461 450 for (j = 0; j < smd_ncolor; j++)
462 451 scpu->scpu.scpu_free_ndx[j] = j;
463 452 scpu->scpu.scpu_last_smap = smd_smap;
464 453 }
465 454
466 455 vpm_init();
467 456
468 457 #ifdef DEBUG
469 458 /*
470 459 * Keep track of which colors are used more often.
471 460 */
472 461 colors_used = kmem_zalloc(smd_nfree * sizeof (int), KM_SLEEP);
473 462 #endif /* DEBUG */
474 463
475 464 return (0);
476 465 }
477 466
478 467 static void
479 468 segmap_free(seg)
480 469 struct seg *seg;
481 470 {
482 471 ASSERT(seg->s_as && RW_WRITE_HELD(&seg->s_as->a_lock));
483 472 }
484 473
485 474 /*
486 475 * Do a F_SOFTUNLOCK call over the range requested.
487 476 * The range must have already been F_SOFTLOCK'ed.
488 477 */
489 478 static void
490 479 segmap_unlock(
491 480 struct hat *hat,
492 481 struct seg *seg,
493 482 caddr_t addr,
494 483 size_t len,
495 484 enum seg_rw rw,
496 485 struct smap *smp)
497 486 {
498 487 page_t *pp;
499 488 caddr_t adr;
500 489 u_offset_t off;
501 490 struct vnode *vp;
502 491 kmutex_t *smtx;
503 492
504 493 ASSERT(smp->sm_refcnt > 0);
505 494
506 495 #ifdef lint
507 496 seg = seg;
508 497 #endif
509 498
510 499 if (segmap_kpm && IS_KPM_ADDR(addr)) {
511 500
512 501 /*
513 502 * We're called only from segmap_fault and this was a
514 503 * NOP in case of a kpm based smap, so dangerous things
515 504 * must have happened in the meantime. Pages are prefaulted
516 505 * and locked in segmap_getmapflt and they will not be
517 506 * unlocked until segmap_release.
518 507 */
519 508 panic("segmap_unlock: called with kpm addr %p", (void *)addr);
520 509 /*NOTREACHED*/
521 510 }
522 511
523 512 vp = smp->sm_vp;
524 513 off = smp->sm_off + (u_offset_t)((uintptr_t)addr & MAXBOFFSET);
525 514
526 515 hat_unlock(hat, addr, P2ROUNDUP(len, PAGESIZE));
527 516 for (adr = addr; adr < addr + len; adr += PAGESIZE, off += PAGESIZE) {
528 517 ushort_t bitmask;
529 518
530 519 /*
531 520 * Use page_find() instead of page_lookup() to
532 521 * find the page since we know that it has
533 522 * "shared" lock.
534 523 */
535 524 pp = page_find(vp, off);
536 525 if (pp == NULL) {
537 526 panic("segmap_unlock: page not found");
538 527 /*NOTREACHED*/
539 528 }
540 529
541 530 if (rw == S_WRITE) {
542 531 hat_setrefmod(pp);
543 532 } else if (rw != S_OTHER) {
544 533 TRACE_3(TR_FAC_VM, TR_SEGMAP_FAULT,
545 534 "segmap_fault:pp %p vp %p offset %llx", pp, vp, off);
546 535 hat_setref(pp);
547 536 }
548 537
549 538 /*
550 539 * Clear bitmap, if the bit corresponding to "off" is set,
551 540 * since the page and translation are being unlocked.
552 541 */
553 542 bitmask = SMAP_BIT_MASK((off - smp->sm_off) >> PAGESHIFT);
554 543
555 544 /*
556 545 * Large Files: Following assertion is to verify
557 546 * the correctness of the cast to (int) above.
558 547 */
559 548 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
560 549 smtx = SMAPMTX(smp);
561 550 mutex_enter(smtx);
562 551 if (smp->sm_bitmap & bitmask) {
563 552 smp->sm_bitmap &= ~bitmask;
564 553 }
565 554 mutex_exit(smtx);
566 555
567 556 page_unlock(pp);
568 557 }
569 558 }
570 559
571 560 #define MAXPPB (MAXBSIZE/4096) /* assumes minimum page size of 4k */
572 561
573 562 /*
574 563 * This routine is called via a machine specific fault handling
575 564 * routine. It is also called by software routines wishing to
576 565 * lock or unlock a range of addresses.
577 566 *
578 567 * Note that this routine expects a page-aligned "addr".
579 568 */
580 569 faultcode_t
581 570 segmap_fault(
582 571 struct hat *hat,
583 572 struct seg *seg,
584 573 caddr_t addr,
585 574 size_t len,
586 575 enum fault_type type,
587 576 enum seg_rw rw)
588 577 {
589 578 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
590 579 struct smap *smp;
591 580 page_t *pp, **ppp;
592 581 struct vnode *vp;
593 582 u_offset_t off;
594 583 page_t *pl[MAXPPB + 1];
595 584 uint_t prot;
596 585 u_offset_t addroff;
597 586 caddr_t adr;
598 587 int err;
599 588 u_offset_t sm_off;
600 589 int hat_flag;
601 590
602 591 if (segmap_kpm && IS_KPM_ADDR(addr)) {
603 592 int newpage;
604 593 kmutex_t *smtx;
605 594
606 595 /*
607 596 * Pages are successfully prefaulted and locked in
608 597 * segmap_getmapflt and can't be unlocked until
609 598 * segmap_release. No hat mappings have to be locked
610 599 * and they also can't be unlocked as long as the
611 600 * caller owns an active kpm addr.
612 601 */
613 602 #ifndef DEBUG
614 603 if (type != F_SOFTUNLOCK)
615 604 return (0);
616 605 #endif
617 606
618 607 if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
619 608 panic("segmap_fault: smap not found "
620 609 "for addr %p", (void *)addr);
621 610 /*NOTREACHED*/
622 611 }
623 612
624 613 smtx = SMAPMTX(smp);
625 614 #ifdef DEBUG
626 615 newpage = smp->sm_flags & SM_KPM_NEWPAGE;
627 616 if (newpage) {
628 617 cmn_err(CE_WARN, "segmap_fault: newpage? smp %p",
629 618 (void *)smp);
630 619 }
631 620
632 621 if (type != F_SOFTUNLOCK) {
633 622 mutex_exit(smtx);
634 623 return (0);
635 624 }
636 625 #endif
637 626 mutex_exit(smtx);
638 627 vp = smp->sm_vp;
639 628 sm_off = smp->sm_off;
640 629
641 630 if (vp == NULL)
642 631 return (FC_MAKE_ERR(EIO));
643 632
644 633 ASSERT(smp->sm_refcnt > 0);
645 634
646 635 addroff = (u_offset_t)((uintptr_t)addr & MAXBOFFSET);
647 636 if (addroff + len > MAXBSIZE)
648 637 panic("segmap_fault: endaddr %p exceeds MAXBSIZE chunk",
649 638 (void *)(addr + len));
650 639
651 640 off = sm_off + addroff;
652 641
653 642 pp = page_find(vp, off);
654 643
655 644 if (pp == NULL)
656 645 panic("segmap_fault: softunlock page not found");
657 646
658 647 /*
659 648 * Set ref bit also here in case of S_OTHER to avoid the
660 649 * overhead of supporting other cases than F_SOFTUNLOCK
661 650 * with segkpm. We can do this because the underlying
662 651 * pages are locked anyway.
663 652 */
664 653 if (rw == S_WRITE) {
665 654 hat_setrefmod(pp);
666 655 } else {
667 656 TRACE_3(TR_FAC_VM, TR_SEGMAP_FAULT,
668 657 "segmap_fault:pp %p vp %p offset %llx",
669 658 pp, vp, off);
670 659 hat_setref(pp);
671 660 }
672 661
673 662 return (0);
674 663 }
675 664
676 665 smd_cpu[CPU->cpu_seqid].scpu.scpu_fault++;
677 666 smp = GET_SMAP(seg, addr);
678 667 vp = smp->sm_vp;
679 668 sm_off = smp->sm_off;
680 669
681 670 if (vp == NULL)
682 671 return (FC_MAKE_ERR(EIO));
683 672
684 673 ASSERT(smp->sm_refcnt > 0);
685 674
686 675 addroff = (u_offset_t)((uintptr_t)addr & MAXBOFFSET);
687 676 if (addroff + len > MAXBSIZE) {
688 677 panic("segmap_fault: endaddr %p "
689 678 "exceeds MAXBSIZE chunk", (void *)(addr + len));
690 679 /*NOTREACHED*/
691 680 }
692 681 off = sm_off + addroff;
693 682
694 683 /*
695 684 * First handle the easy stuff
696 685 */
697 686 if (type == F_SOFTUNLOCK) {
698 687 segmap_unlock(hat, seg, addr, len, rw, smp);
699 688 return (0);
700 689 }
701 690
702 691 TRACE_3(TR_FAC_VM, TR_SEGMAP_GETPAGE,
703 692 "segmap_getpage:seg %p addr %p vp %p", seg, addr, vp);
704 693 err = VOP_GETPAGE(vp, (offset_t)off, len, &prot, pl, MAXBSIZE,
705 694 seg, addr, rw, CRED(), NULL);
706 695
707 696 if (err)
708 697 return (FC_MAKE_ERR(err));
709 698
710 699 prot &= smd->smd_prot;
711 700
712 701 /*
713 702 * Handle all pages returned in the pl[] array.
714 703 * This loop is coded on the assumption that if
715 704 * there was no error from the VOP_GETPAGE routine,
716 705 * that the page list returned will contain all the
717 706 * needed pages for the vp from [off..off + len].
718 707 */
719 708 ppp = pl;
720 709 while ((pp = *ppp++) != NULL) {
721 710 u_offset_t poff;
722 711 ASSERT(pp->p_vnode == vp);
723 712 hat_flag = HAT_LOAD;
724 713
725 714 /*
726 715 * Verify that the pages returned are within the range
727 716 * of this segmap region. Note that it is theoretically
728 717 * possible for pages outside this range to be returned,
729 718 * but it is not very likely. If we cannot use the
730 719 * page here, just release it and go on to the next one.
731 720 */
732 721 if (pp->p_offset < sm_off ||
733 722 pp->p_offset >= sm_off + MAXBSIZE) {
734 723 (void) page_release(pp, 1);
735 724 continue;
736 725 }
737 726
738 727 ASSERT(hat == kas.a_hat);
739 728 poff = pp->p_offset;
740 729 adr = addr + (poff - off);
741 730 if (adr >= addr && adr < addr + len) {
742 731 hat_setref(pp);
743 732 TRACE_3(TR_FAC_VM, TR_SEGMAP_FAULT,
744 733 "segmap_fault:pp %p vp %p offset %llx",
745 734 pp, vp, poff);
746 735 if (type == F_SOFTLOCK)
747 736 hat_flag = HAT_LOAD_LOCK;
748 737 }
749 738
750 739 /*
751 740 * Deal with VMODSORT pages here. If we know this is a write
752 741 * do the setmod now and allow write protection.
753 742 * As long as it's modified or not S_OTHER, remove write
754 743 * protection. With S_OTHER it's up to the FS to deal with this.
755 744 */
756 745 if (IS_VMODSORT(vp)) {
757 746 if (rw == S_WRITE)
758 747 hat_setmod(pp);
759 748 else if (rw != S_OTHER && !hat_ismod(pp))
760 749 prot &= ~PROT_WRITE;
761 750 }
762 751
763 752 hat_memload(hat, adr, pp, prot, hat_flag);
764 753 if (hat_flag != HAT_LOAD_LOCK)
765 754 page_unlock(pp);
766 755 }
767 756 return (0);
768 757 }
769 758
770 759 /*
771 760 * This routine is used to start I/O on pages asynchronously.
772 761 */
773 762 static faultcode_t
774 763 segmap_faulta(struct seg *seg, caddr_t addr)
775 764 {
776 765 struct smap *smp;
777 766 struct vnode *vp;
778 767 u_offset_t off;
779 768 int err;
780 769
781 770 if (segmap_kpm && IS_KPM_ADDR(addr)) {
782 771 int newpage;
783 772 kmutex_t *smtx;
784 773
785 774 /*
786 775 * Pages are successfully prefaulted and locked in
787 776 * segmap_getmapflt and can't be unlocked until
788 777 * segmap_release. No hat mappings have to be locked
789 778 * and they also can't be unlocked as long as the
790 779 * caller owns an active kpm addr.
791 780 */
792 781 #ifdef DEBUG
793 782 if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
794 783 panic("segmap_faulta: smap not found "
795 784 "for addr %p", (void *)addr);
796 785 /*NOTREACHED*/
797 786 }
798 787
799 788 smtx = SMAPMTX(smp);
800 789 newpage = smp->sm_flags & SM_KPM_NEWPAGE;
801 790 mutex_exit(smtx);
802 791 if (newpage)
803 792 cmn_err(CE_WARN, "segmap_faulta: newpage? smp %p",
804 793 (void *)smp);
805 794 #endif
806 795 return (0);
807 796 }
808 797
809 798 segmapcnt.smp_faulta.value.ul++;
810 799 smp = GET_SMAP(seg, addr);
811 800
812 801 ASSERT(smp->sm_refcnt > 0);
813 802
814 803 vp = smp->sm_vp;
815 804 off = smp->sm_off;
816 805
817 806 if (vp == NULL) {
818 807 cmn_err(CE_WARN, "segmap_faulta - no vp");
819 808 return (FC_MAKE_ERR(EIO));
820 809 }
821 810
822 811 TRACE_3(TR_FAC_VM, TR_SEGMAP_GETPAGE,
823 812 "segmap_getpage:seg %p addr %p vp %p", seg, addr, vp);
824 813
825 814 err = VOP_GETPAGE(vp, (offset_t)(off + ((offset_t)((uintptr_t)addr
826 815 & MAXBOFFSET))), PAGESIZE, (uint_t *)NULL, (page_t **)NULL, 0,
827 816 seg, addr, S_READ, CRED(), NULL);
828 817
829 818 if (err)
830 819 return (FC_MAKE_ERR(err));
831 820 return (0);
832 821 }
833 822
834 823 /*ARGSUSED*/
835 824 static int
836 825 segmap_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
837 826 {
838 827 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
839 828
840 829 ASSERT(seg->s_as && RW_LOCK_HELD(&seg->s_as->a_lock));
841 830
842 831 /*
843 832 * Need not acquire the segment lock since
844 833 * "smd_prot" is a read-only field.
845 834 */
846 835 return (((smd->smd_prot & prot) != prot) ? EACCES : 0);
847 836 }
848 837
849 838 static int
850 839 segmap_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
851 840 {
852 841 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
853 842 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
854 843
855 844 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
856 845
857 846 if (pgno != 0) {
858 847 do {
859 848 protv[--pgno] = smd->smd_prot;
860 849 } while (pgno != 0);
861 850 }
862 851 return (0);
863 852 }
864 853
865 854 static u_offset_t
866 855 segmap_getoffset(struct seg *seg, caddr_t addr)
867 856 {
868 857 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
869 858
870 859 ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock));
871 860
872 861 return ((u_offset_t)smd->smd_sm->sm_off + (addr - seg->s_base));
873 862 }
874 863
875 864 /*ARGSUSED*/
876 865 static int
877 866 segmap_gettype(struct seg *seg, caddr_t addr)
878 867 {
879 868 ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock));
880 869
881 870 return (MAP_SHARED);
882 871 }
883 872
884 873 /*ARGSUSED*/
885 874 static int
886 875 segmap_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
887 876 {
888 877 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
889 878
890 879 ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock));
891 880
892 881 /* XXX - This doesn't make any sense */
893 882 *vpp = smd->smd_sm->sm_vp;
894 883 return (0);
895 884 }
896 885
897 886 /*
898 887 * Check to see if it makes sense to do kluster/read ahead to
↓ open down ↓ |
766 lines elided |
↑ open up ↑ |
899 888 * addr + delta relative to the mapping at addr. We assume here
900 889 * that delta is a signed PAGESIZE'd multiple (which can be negative).
901 890 *
902 891 * For segmap we always "approve" of this action from our standpoint.
903 892 */
904 893 /*ARGSUSED*/
905 894 static int
906 895 segmap_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
907 896 {
908 897 return (0);
909 -}
910 -
911 -static void
912 -segmap_badop()
913 -{
914 - panic("segmap_badop");
915 - /*NOTREACHED*/
916 898 }
917 899
918 900 /*
919 901 * Special private segmap operations
920 902 */
921 903
922 904 /*
923 905 * Add smap to the appropriate free list.
924 906 */
925 907 static void
926 908 segmap_smapadd(struct smap *smp)
927 909 {
928 910 struct smfree *sm;
929 911 struct smap *smpfreelist;
930 912 struct sm_freeq *releq;
931 913
932 914 ASSERT(MUTEX_HELD(SMAPMTX(smp)));
933 915
934 916 if (smp->sm_refcnt != 0) {
935 917 panic("segmap_smapadd");
936 918 /*NOTREACHED*/
937 919 }
938 920
939 921 sm = &smd_free[smp->sm_free_ndx];
940 922 /*
941 923 * Add to the tail of the release queue
942 924 * Note that sm_releq and sm_allocq could toggle
943 925 * before we get the lock. This does not affect
944 926 * correctness as the 2 queues are only maintained
945 927 * to reduce lock pressure.
946 928 */
947 929 releq = sm->sm_releq;
948 930 if (releq == &sm->sm_freeq[0])
949 931 smp->sm_flags |= SM_QNDX_ZERO;
950 932 else
951 933 smp->sm_flags &= ~SM_QNDX_ZERO;
952 934 mutex_enter(&releq->smq_mtx);
953 935 smpfreelist = releq->smq_free;
954 936 if (smpfreelist == 0) {
955 937 int want;
956 938
957 939 releq->smq_free = smp->sm_next = smp->sm_prev = smp;
958 940 /*
959 941 * Both queue mutexes held to set sm_want;
960 942 * snapshot the value before dropping releq mutex.
961 943 * If sm_want appears after the releq mutex is dropped,
962 944 * then the smap just freed is already gone.
963 945 */
964 946 want = sm->sm_want;
965 947 mutex_exit(&releq->smq_mtx);
966 948 /*
967 949 * See if there was a waiter before dropping the releq mutex
968 950 * then recheck after obtaining sm_freeq[0] mutex as
969 951 * the another thread may have already signaled.
970 952 */
971 953 if (want) {
972 954 mutex_enter(&sm->sm_freeq[0].smq_mtx);
973 955 if (sm->sm_want)
974 956 cv_signal(&sm->sm_free_cv);
975 957 mutex_exit(&sm->sm_freeq[0].smq_mtx);
976 958 }
977 959 } else {
978 960 smp->sm_next = smpfreelist;
979 961 smp->sm_prev = smpfreelist->sm_prev;
980 962 smpfreelist->sm_prev = smp;
981 963 smp->sm_prev->sm_next = smp;
982 964 mutex_exit(&releq->smq_mtx);
983 965 }
984 966 }
985 967
986 968
987 969 static struct smap *
988 970 segmap_hashin(struct smap *smp, struct vnode *vp, u_offset_t off, int hashid)
989 971 {
990 972 struct smap **hpp;
991 973 struct smap *tmp;
992 974 kmutex_t *hmtx;
993 975
994 976 ASSERT(MUTEX_HELD(SMAPMTX(smp)));
995 977 ASSERT(smp->sm_vp == NULL);
996 978 ASSERT(smp->sm_hash == NULL);
997 979 ASSERT(smp->sm_prev == NULL);
998 980 ASSERT(smp->sm_next == NULL);
999 981 ASSERT(hashid >= 0 && hashid <= smd_hashmsk);
1000 982
1001 983 hmtx = SHASHMTX(hashid);
1002 984
1003 985 mutex_enter(hmtx);
1004 986 /*
1005 987 * First we need to verify that no one has created a smp
1006 988 * with (vp,off) as its tag before we us.
1007 989 */
1008 990 for (tmp = smd_hash[hashid].sh_hash_list;
1009 991 tmp != NULL; tmp = tmp->sm_hash)
1010 992 if (tmp->sm_vp == vp && tmp->sm_off == off)
1011 993 break;
1012 994
1013 995 if (tmp == NULL) {
1014 996 /*
1015 997 * No one created one yet.
1016 998 *
1017 999 * Funniness here - we don't increment the ref count on the
1018 1000 * vnode * even though we have another pointer to it here.
1019 1001 * The reason for this is that we don't want the fact that
1020 1002 * a seg_map entry somewhere refers to a vnode to prevent the
1021 1003 * vnode * itself from going away. This is because this
1022 1004 * reference to the vnode is a "soft one". In the case where
1023 1005 * a mapping is being used by a rdwr [or directory routine?]
1024 1006 * there already has to be a non-zero ref count on the vnode.
1025 1007 * In the case where the vp has been freed and the the smap
1026 1008 * structure is on the free list, there are no pages in memory
1027 1009 * that can refer to the vnode. Thus even if we reuse the same
1028 1010 * vnode/smap structure for a vnode which has the same
1029 1011 * address but represents a different object, we are ok.
1030 1012 */
1031 1013 smp->sm_vp = vp;
1032 1014 smp->sm_off = off;
1033 1015
1034 1016 hpp = &smd_hash[hashid].sh_hash_list;
1035 1017 smp->sm_hash = *hpp;
1036 1018 *hpp = smp;
1037 1019 #ifdef SEGMAP_HASHSTATS
1038 1020 smd_hash_len[hashid]++;
1039 1021 #endif
1040 1022 }
1041 1023 mutex_exit(hmtx);
1042 1024
1043 1025 return (tmp);
1044 1026 }
1045 1027
1046 1028 static void
1047 1029 segmap_hashout(struct smap *smp)
1048 1030 {
1049 1031 struct smap **hpp, *hp;
1050 1032 struct vnode *vp;
1051 1033 kmutex_t *mtx;
1052 1034 int hashid;
1053 1035 u_offset_t off;
1054 1036
1055 1037 ASSERT(MUTEX_HELD(SMAPMTX(smp)));
1056 1038
1057 1039 vp = smp->sm_vp;
1058 1040 off = smp->sm_off;
1059 1041
1060 1042 SMAP_HASHFUNC(vp, off, hashid); /* macro assigns hashid */
1061 1043 mtx = SHASHMTX(hashid);
1062 1044 mutex_enter(mtx);
1063 1045
1064 1046 hpp = &smd_hash[hashid].sh_hash_list;
1065 1047 for (;;) {
1066 1048 hp = *hpp;
1067 1049 if (hp == NULL) {
1068 1050 panic("segmap_hashout");
1069 1051 /*NOTREACHED*/
1070 1052 }
1071 1053 if (hp == smp)
1072 1054 break;
1073 1055 hpp = &hp->sm_hash;
1074 1056 }
1075 1057
1076 1058 *hpp = smp->sm_hash;
1077 1059 smp->sm_hash = NULL;
1078 1060 #ifdef SEGMAP_HASHSTATS
1079 1061 smd_hash_len[hashid]--;
1080 1062 #endif
1081 1063 mutex_exit(mtx);
1082 1064
1083 1065 smp->sm_vp = NULL;
1084 1066 smp->sm_off = (u_offset_t)0;
1085 1067
1086 1068 }
1087 1069
1088 1070 /*
1089 1071 * Attempt to free unmodified, unmapped, and non locked segmap
1090 1072 * pages.
1091 1073 */
1092 1074 void
1093 1075 segmap_pagefree(struct vnode *vp, u_offset_t off)
1094 1076 {
1095 1077 u_offset_t pgoff;
1096 1078 page_t *pp;
1097 1079
1098 1080 for (pgoff = off; pgoff < off + MAXBSIZE; pgoff += PAGESIZE) {
1099 1081
1100 1082 if ((pp = page_lookup_nowait(vp, pgoff, SE_EXCL)) == NULL)
1101 1083 continue;
1102 1084
1103 1085 switch (page_release(pp, 1)) {
1104 1086 case PGREL_NOTREL:
1105 1087 segmapcnt.smp_free_notfree.value.ul++;
1106 1088 break;
1107 1089 case PGREL_MOD:
1108 1090 segmapcnt.smp_free_dirty.value.ul++;
1109 1091 break;
1110 1092 case PGREL_CLEAN:
1111 1093 segmapcnt.smp_free.value.ul++;
1112 1094 break;
1113 1095 }
1114 1096 }
1115 1097 }
1116 1098
1117 1099 /*
1118 1100 * Locks held on entry: smap lock
1119 1101 * Locks held on exit : smap lock.
1120 1102 */
1121 1103
1122 1104 static void
1123 1105 grab_smp(struct smap *smp, page_t *pp)
1124 1106 {
1125 1107 ASSERT(MUTEX_HELD(SMAPMTX(smp)));
1126 1108 ASSERT(smp->sm_refcnt == 0);
1127 1109
1128 1110 if (smp->sm_vp != (struct vnode *)NULL) {
1129 1111 struct vnode *vp = smp->sm_vp;
1130 1112 u_offset_t off = smp->sm_off;
1131 1113 /*
1132 1114 * Destroy old vnode association and
1133 1115 * unload any hardware translations to
1134 1116 * the old object.
1135 1117 */
1136 1118 smd_cpu[CPU->cpu_seqid].scpu.scpu_get_reuse++;
1137 1119 segmap_hashout(smp);
1138 1120
1139 1121 /*
1140 1122 * This node is off freelist and hashlist,
1141 1123 * so there is no reason to drop/reacquire sm_mtx
1142 1124 * across calls to hat_unload.
1143 1125 */
1144 1126 if (segmap_kpm) {
1145 1127 caddr_t vaddr;
1146 1128 int hat_unload_needed = 0;
1147 1129
1148 1130 /*
1149 1131 * unload kpm mapping
1150 1132 */
1151 1133 if (pp != NULL) {
1152 1134 vaddr = hat_kpm_page2va(pp, 1);
1153 1135 hat_kpm_mapout(pp, GET_KPME(smp), vaddr);
1154 1136 page_unlock(pp);
1155 1137 }
1156 1138
1157 1139 /*
1158 1140 * Check if we have (also) the rare case of a
1159 1141 * non kpm mapping.
1160 1142 */
1161 1143 if (smp->sm_flags & SM_NOTKPM_RELEASED) {
1162 1144 hat_unload_needed = 1;
1163 1145 smp->sm_flags &= ~SM_NOTKPM_RELEASED;
1164 1146 }
1165 1147
1166 1148 if (hat_unload_needed) {
1167 1149 hat_unload(kas.a_hat, segkmap->s_base +
1168 1150 ((smp - smd_smap) * MAXBSIZE),
1169 1151 MAXBSIZE, HAT_UNLOAD);
1170 1152 }
1171 1153
1172 1154 } else {
1173 1155 ASSERT(smp->sm_flags & SM_NOTKPM_RELEASED);
1174 1156 smp->sm_flags &= ~SM_NOTKPM_RELEASED;
1175 1157 hat_unload(kas.a_hat, segkmap->s_base +
1176 1158 ((smp - smd_smap) * MAXBSIZE),
1177 1159 MAXBSIZE, HAT_UNLOAD);
1178 1160 }
1179 1161 segmap_pagefree(vp, off);
1180 1162 }
1181 1163 }
1182 1164
1183 1165 static struct smap *
1184 1166 get_free_smp(int free_ndx)
1185 1167 {
1186 1168 struct smfree *sm;
1187 1169 kmutex_t *smtx;
1188 1170 struct smap *smp, *first;
1189 1171 struct sm_freeq *allocq, *releq;
1190 1172 struct kpme *kpme;
1191 1173 page_t *pp = NULL;
1192 1174 int end_ndx, page_locked = 0;
1193 1175
1194 1176 end_ndx = free_ndx;
1195 1177 sm = &smd_free[free_ndx];
1196 1178
1197 1179 retry_queue:
1198 1180 allocq = sm->sm_allocq;
1199 1181 mutex_enter(&allocq->smq_mtx);
1200 1182
1201 1183 if ((smp = allocq->smq_free) == NULL) {
1202 1184
1203 1185 skip_queue:
1204 1186 /*
1205 1187 * The alloc list is empty or this queue is being skipped;
1206 1188 * first see if the allocq toggled.
1207 1189 */
1208 1190 if (sm->sm_allocq != allocq) {
1209 1191 /* queue changed */
1210 1192 mutex_exit(&allocq->smq_mtx);
1211 1193 goto retry_queue;
1212 1194 }
1213 1195 releq = sm->sm_releq;
1214 1196 if (!mutex_tryenter(&releq->smq_mtx)) {
1215 1197 /* cannot get releq; a free smp may be there now */
1216 1198 mutex_exit(&allocq->smq_mtx);
1217 1199
1218 1200 /*
1219 1201 * This loop could spin forever if this thread has
1220 1202 * higher priority than the thread that is holding
1221 1203 * releq->smq_mtx. In order to force the other thread
1222 1204 * to run, we'll lock/unlock the mutex which is safe
1223 1205 * since we just unlocked the allocq mutex.
1224 1206 */
1225 1207 mutex_enter(&releq->smq_mtx);
1226 1208 mutex_exit(&releq->smq_mtx);
1227 1209 goto retry_queue;
1228 1210 }
1229 1211 if (releq->smq_free == NULL) {
1230 1212 /*
1231 1213 * This freelist is empty.
1232 1214 * This should not happen unless clients
1233 1215 * are failing to release the segmap
1234 1216 * window after accessing the data.
1235 1217 * Before resorting to sleeping, try
1236 1218 * the next list of the same color.
1237 1219 */
1238 1220 free_ndx = (free_ndx + smd_ncolor) & smd_freemsk;
1239 1221 if (free_ndx != end_ndx) {
1240 1222 mutex_exit(&releq->smq_mtx);
1241 1223 mutex_exit(&allocq->smq_mtx);
1242 1224 sm = &smd_free[free_ndx];
1243 1225 goto retry_queue;
1244 1226 }
1245 1227 /*
1246 1228 * Tried all freelists of the same color once,
1247 1229 * wait on this list and hope something gets freed.
1248 1230 */
1249 1231 segmapcnt.smp_get_nofree.value.ul++;
1250 1232 sm->sm_want++;
1251 1233 mutex_exit(&sm->sm_freeq[1].smq_mtx);
1252 1234 cv_wait(&sm->sm_free_cv,
1253 1235 &sm->sm_freeq[0].smq_mtx);
1254 1236 sm->sm_want--;
1255 1237 mutex_exit(&sm->sm_freeq[0].smq_mtx);
1256 1238 sm = &smd_free[free_ndx];
1257 1239 goto retry_queue;
1258 1240 } else {
1259 1241 /*
1260 1242 * Something on the rele queue; flip the alloc
1261 1243 * and rele queues and retry.
1262 1244 */
1263 1245 sm->sm_allocq = releq;
1264 1246 sm->sm_releq = allocq;
1265 1247 mutex_exit(&allocq->smq_mtx);
1266 1248 mutex_exit(&releq->smq_mtx);
1267 1249 if (page_locked) {
1268 1250 delay(hz >> 2);
1269 1251 page_locked = 0;
1270 1252 }
1271 1253 goto retry_queue;
1272 1254 }
1273 1255 } else {
1274 1256 /*
1275 1257 * Fastpath the case we get the smap mutex
1276 1258 * on the first try.
1277 1259 */
1278 1260 first = smp;
1279 1261 next_smap:
1280 1262 smtx = SMAPMTX(smp);
1281 1263 if (!mutex_tryenter(smtx)) {
1282 1264 /*
1283 1265 * Another thread is trying to reclaim this slot.
1284 1266 * Skip to the next queue or smap.
1285 1267 */
1286 1268 if ((smp = smp->sm_next) == first) {
1287 1269 goto skip_queue;
1288 1270 } else {
1289 1271 goto next_smap;
1290 1272 }
1291 1273 } else {
1292 1274 /*
1293 1275 * if kpme exists, get shared lock on the page
1294 1276 */
1295 1277 if (segmap_kpm && smp->sm_vp != NULL) {
1296 1278
1297 1279 kpme = GET_KPME(smp);
1298 1280 pp = kpme->kpe_page;
1299 1281
1300 1282 if (pp != NULL) {
1301 1283 if (!page_trylock(pp, SE_SHARED)) {
1302 1284 smp = smp->sm_next;
1303 1285 mutex_exit(smtx);
1304 1286 page_locked = 1;
1305 1287
1306 1288 pp = NULL;
1307 1289
1308 1290 if (smp == first) {
1309 1291 goto skip_queue;
1310 1292 } else {
1311 1293 goto next_smap;
1312 1294 }
1313 1295 } else {
1314 1296 if (kpme->kpe_page == NULL) {
1315 1297 page_unlock(pp);
1316 1298 pp = NULL;
1317 1299 }
1318 1300 }
1319 1301 }
1320 1302 }
1321 1303
1322 1304 /*
1323 1305 * At this point, we've selected smp. Remove smp
1324 1306 * from its freelist. If smp is the first one in
1325 1307 * the freelist, update the head of the freelist.
1326 1308 */
1327 1309 if (first == smp) {
1328 1310 ASSERT(first == allocq->smq_free);
1329 1311 allocq->smq_free = smp->sm_next;
1330 1312 }
1331 1313
1332 1314 /*
1333 1315 * if the head of the freelist still points to smp,
1334 1316 * then there are no more free smaps in that list.
1335 1317 */
1336 1318 if (allocq->smq_free == smp)
1337 1319 /*
1338 1320 * Took the last one
1339 1321 */
1340 1322 allocq->smq_free = NULL;
1341 1323 else {
1342 1324 smp->sm_prev->sm_next = smp->sm_next;
1343 1325 smp->sm_next->sm_prev = smp->sm_prev;
1344 1326 }
1345 1327 mutex_exit(&allocq->smq_mtx);
1346 1328 smp->sm_prev = smp->sm_next = NULL;
1347 1329
1348 1330 /*
1349 1331 * if pp != NULL, pp must have been locked;
1350 1332 * grab_smp() unlocks pp.
1351 1333 */
1352 1334 ASSERT((pp == NULL) || PAGE_LOCKED(pp));
1353 1335 grab_smp(smp, pp);
1354 1336 /* return smp locked. */
1355 1337 ASSERT(SMAPMTX(smp) == smtx);
1356 1338 ASSERT(MUTEX_HELD(smtx));
1357 1339 return (smp);
1358 1340 }
1359 1341 }
1360 1342 }
1361 1343
1362 1344 /*
1363 1345 * Special public segmap operations
1364 1346 */
1365 1347
1366 1348 /*
1367 1349 * Create pages (without using VOP_GETPAGE) and load up translations to them.
1368 1350 * If softlock is TRUE, then set things up so that it looks like a call
1369 1351 * to segmap_fault with F_SOFTLOCK.
1370 1352 *
1371 1353 * Returns 1, if a page is created by calling page_create_va(), or 0 otherwise.
1372 1354 *
1373 1355 * All fields in the generic segment (struct seg) are considered to be
1374 1356 * read-only for "segmap" even though the kernel address space (kas) may
1375 1357 * not be locked, hence no lock is needed to access them.
1376 1358 */
1377 1359 int
1378 1360 segmap_pagecreate(struct seg *seg, caddr_t addr, size_t len, int softlock)
1379 1361 {
1380 1362 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
1381 1363 page_t *pp;
1382 1364 u_offset_t off;
1383 1365 struct smap *smp;
1384 1366 struct vnode *vp;
1385 1367 caddr_t eaddr;
1386 1368 int newpage = 0;
1387 1369 uint_t prot;
1388 1370 kmutex_t *smtx;
1389 1371 int hat_flag;
1390 1372
1391 1373 ASSERT(seg->s_as == &kas);
1392 1374
1393 1375 if (segmap_kpm && IS_KPM_ADDR(addr)) {
1394 1376 /*
1395 1377 * Pages are successfully prefaulted and locked in
1396 1378 * segmap_getmapflt and can't be unlocked until
1397 1379 * segmap_release. The SM_KPM_NEWPAGE flag is set
1398 1380 * in segmap_pagecreate_kpm when new pages are created.
1399 1381 * and it is returned as "newpage" indication here.
1400 1382 */
1401 1383 if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
1402 1384 panic("segmap_pagecreate: smap not found "
1403 1385 "for addr %p", (void *)addr);
1404 1386 /*NOTREACHED*/
1405 1387 }
1406 1388
1407 1389 smtx = SMAPMTX(smp);
1408 1390 newpage = smp->sm_flags & SM_KPM_NEWPAGE;
1409 1391 smp->sm_flags &= ~SM_KPM_NEWPAGE;
1410 1392 mutex_exit(smtx);
1411 1393
1412 1394 return (newpage);
1413 1395 }
1414 1396
1415 1397 smd_cpu[CPU->cpu_seqid].scpu.scpu_pagecreate++;
1416 1398
1417 1399 eaddr = addr + len;
1418 1400 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1419 1401
1420 1402 smp = GET_SMAP(seg, addr);
1421 1403
1422 1404 /*
1423 1405 * We don't grab smp mutex here since we assume the smp
1424 1406 * has a refcnt set already which prevents the slot from
1425 1407 * changing its id.
1426 1408 */
1427 1409 ASSERT(smp->sm_refcnt > 0);
1428 1410
1429 1411 vp = smp->sm_vp;
1430 1412 off = smp->sm_off + ((u_offset_t)((uintptr_t)addr & MAXBOFFSET));
1431 1413 prot = smd->smd_prot;
1432 1414
1433 1415 for (; addr < eaddr; addr += PAGESIZE, off += PAGESIZE) {
1434 1416 hat_flag = HAT_LOAD;
1435 1417 pp = page_lookup(vp, off, SE_SHARED);
1436 1418 if (pp == NULL) {
1437 1419 ushort_t bitindex;
1438 1420
1439 1421 if ((pp = page_create_va(vp, off,
1440 1422 PAGESIZE, PG_WAIT, seg, addr)) == NULL) {
1441 1423 panic("segmap_pagecreate: page_create failed");
1442 1424 /*NOTREACHED*/
1443 1425 }
1444 1426 newpage = 1;
1445 1427 page_io_unlock(pp);
1446 1428
1447 1429 /*
1448 1430 * Since pages created here do not contain valid
1449 1431 * data until the caller writes into them, the
1450 1432 * "exclusive" lock will not be dropped to prevent
1451 1433 * other users from accessing the page. We also
1452 1434 * have to lock the translation to prevent a fault
1453 1435 * from occurring when the virtual address mapped by
1454 1436 * this page is written into. This is necessary to
1455 1437 * avoid a deadlock since we haven't dropped the
1456 1438 * "exclusive" lock.
1457 1439 */
1458 1440 bitindex = (ushort_t)((off - smp->sm_off) >> PAGESHIFT);
1459 1441
1460 1442 /*
1461 1443 * Large Files: The following assertion is to
1462 1444 * verify the cast above.
1463 1445 */
1464 1446 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
1465 1447 smtx = SMAPMTX(smp);
1466 1448 mutex_enter(smtx);
1467 1449 smp->sm_bitmap |= SMAP_BIT_MASK(bitindex);
1468 1450 mutex_exit(smtx);
1469 1451
1470 1452 hat_flag = HAT_LOAD_LOCK;
1471 1453 } else if (softlock) {
1472 1454 hat_flag = HAT_LOAD_LOCK;
1473 1455 }
1474 1456
1475 1457 if (IS_VMODSORT(pp->p_vnode) && (prot & PROT_WRITE))
1476 1458 hat_setmod(pp);
1477 1459
1478 1460 hat_memload(kas.a_hat, addr, pp, prot, hat_flag);
1479 1461
1480 1462 if (hat_flag != HAT_LOAD_LOCK)
1481 1463 page_unlock(pp);
1482 1464
1483 1465 TRACE_5(TR_FAC_VM, TR_SEGMAP_PAGECREATE,
1484 1466 "segmap_pagecreate:seg %p addr %p pp %p vp %p offset %llx",
1485 1467 seg, addr, pp, vp, off);
1486 1468 }
1487 1469
1488 1470 return (newpage);
1489 1471 }
1490 1472
1491 1473 void
1492 1474 segmap_pageunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw)
1493 1475 {
1494 1476 struct smap *smp;
1495 1477 ushort_t bitmask;
1496 1478 page_t *pp;
1497 1479 struct vnode *vp;
1498 1480 u_offset_t off;
1499 1481 caddr_t eaddr;
1500 1482 kmutex_t *smtx;
1501 1483
1502 1484 ASSERT(seg->s_as == &kas);
1503 1485
1504 1486 eaddr = addr + len;
1505 1487 addr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
1506 1488
1507 1489 if (segmap_kpm && IS_KPM_ADDR(addr)) {
1508 1490 /*
1509 1491 * Pages are successfully prefaulted and locked in
1510 1492 * segmap_getmapflt and can't be unlocked until
1511 1493 * segmap_release, so no pages or hat mappings have
1512 1494 * to be unlocked at this point.
1513 1495 */
1514 1496 #ifdef DEBUG
1515 1497 if ((smp = get_smap_kpm(addr, NULL)) == NULL) {
1516 1498 panic("segmap_pageunlock: smap not found "
1517 1499 "for addr %p", (void *)addr);
1518 1500 /*NOTREACHED*/
1519 1501 }
1520 1502
1521 1503 ASSERT(smp->sm_refcnt > 0);
1522 1504 mutex_exit(SMAPMTX(smp));
1523 1505 #endif
1524 1506 return;
1525 1507 }
1526 1508
1527 1509 smp = GET_SMAP(seg, addr);
1528 1510 smtx = SMAPMTX(smp);
1529 1511
1530 1512 ASSERT(smp->sm_refcnt > 0);
1531 1513
1532 1514 vp = smp->sm_vp;
1533 1515 off = smp->sm_off + ((u_offset_t)((uintptr_t)addr & MAXBOFFSET));
1534 1516
1535 1517 for (; addr < eaddr; addr += PAGESIZE, off += PAGESIZE) {
1536 1518 bitmask = SMAP_BIT_MASK((int)(off - smp->sm_off) >> PAGESHIFT);
1537 1519
1538 1520 /*
1539 1521 * Large Files: Following assertion is to verify
1540 1522 * the correctness of the cast to (int) above.
1541 1523 */
1542 1524 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
1543 1525
1544 1526 /*
1545 1527 * If the bit corresponding to "off" is set,
1546 1528 * clear this bit in the bitmap, unlock translations,
1547 1529 * and release the "exclusive" lock on the page.
1548 1530 */
1549 1531 if (smp->sm_bitmap & bitmask) {
1550 1532 mutex_enter(smtx);
1551 1533 smp->sm_bitmap &= ~bitmask;
1552 1534 mutex_exit(smtx);
1553 1535
1554 1536 hat_unlock(kas.a_hat, addr, PAGESIZE);
1555 1537
1556 1538 /*
1557 1539 * Use page_find() instead of page_lookup() to
1558 1540 * find the page since we know that it has
1559 1541 * "exclusive" lock.
1560 1542 */
1561 1543 pp = page_find(vp, off);
1562 1544 if (pp == NULL) {
1563 1545 panic("segmap_pageunlock: page not found");
1564 1546 /*NOTREACHED*/
1565 1547 }
1566 1548 if (rw == S_WRITE) {
1567 1549 hat_setrefmod(pp);
1568 1550 } else if (rw != S_OTHER) {
1569 1551 hat_setref(pp);
1570 1552 }
1571 1553
1572 1554 page_unlock(pp);
1573 1555 }
1574 1556 }
1575 1557 }
1576 1558
1577 1559 caddr_t
1578 1560 segmap_getmap(struct seg *seg, struct vnode *vp, u_offset_t off)
1579 1561 {
1580 1562 return (segmap_getmapflt(seg, vp, off, MAXBSIZE, 0, S_OTHER));
1581 1563 }
1582 1564
1583 1565 /*
1584 1566 * This is the magic virtual address that offset 0 of an ELF
1585 1567 * file gets mapped to in user space. This is used to pick
1586 1568 * the vac color on the freelist.
1587 1569 */
1588 1570 #define ELF_OFFZERO_VA (0x10000)
1589 1571 /*
1590 1572 * segmap_getmap allocates a MAXBSIZE big slot to map the vnode vp
1591 1573 * in the range <off, off + len). off doesn't need to be MAXBSIZE aligned.
1592 1574 * The return address is always MAXBSIZE aligned.
1593 1575 *
1594 1576 * If forcefault is nonzero and the MMU translations haven't yet been created,
1595 1577 * segmap_getmap will call segmap_fault(..., F_INVAL, rw) to create them.
1596 1578 */
1597 1579 caddr_t
1598 1580 segmap_getmapflt(
1599 1581 struct seg *seg,
1600 1582 struct vnode *vp,
1601 1583 u_offset_t off,
1602 1584 size_t len,
1603 1585 int forcefault,
1604 1586 enum seg_rw rw)
1605 1587 {
1606 1588 struct smap *smp, *nsmp;
1607 1589 extern struct vnode *common_specvp();
1608 1590 caddr_t baseaddr; /* MAXBSIZE aligned */
1609 1591 u_offset_t baseoff;
1610 1592 int newslot;
1611 1593 caddr_t vaddr;
1612 1594 int color, hashid;
1613 1595 kmutex_t *hashmtx, *smapmtx;
1614 1596 struct smfree *sm;
1615 1597 page_t *pp;
1616 1598 struct kpme *kpme;
1617 1599 uint_t prot;
1618 1600 caddr_t base;
1619 1601 page_t *pl[MAXPPB + 1];
1620 1602 int error;
1621 1603 int is_kpm = 1;
1622 1604
1623 1605 ASSERT(seg->s_as == &kas);
1624 1606 ASSERT(seg == segkmap);
1625 1607
1626 1608 baseoff = off & (offset_t)MAXBMASK;
1627 1609 if (off + len > baseoff + MAXBSIZE) {
1628 1610 panic("segmap_getmap bad len");
1629 1611 /*NOTREACHED*/
1630 1612 }
1631 1613
1632 1614 /*
1633 1615 * If this is a block device we have to be sure to use the
1634 1616 * "common" block device vnode for the mapping.
1635 1617 */
1636 1618 if (vp->v_type == VBLK)
1637 1619 vp = common_specvp(vp);
1638 1620
1639 1621 smd_cpu[CPU->cpu_seqid].scpu.scpu_getmap++;
1640 1622
1641 1623 if (segmap_kpm == 0 ||
1642 1624 (forcefault == SM_PAGECREATE && rw != S_WRITE)) {
1643 1625 is_kpm = 0;
1644 1626 }
1645 1627
1646 1628 SMAP_HASHFUNC(vp, off, hashid); /* macro assigns hashid */
1647 1629 hashmtx = SHASHMTX(hashid);
1648 1630
1649 1631 retry_hash:
1650 1632 mutex_enter(hashmtx);
1651 1633 for (smp = smd_hash[hashid].sh_hash_list;
1652 1634 smp != NULL; smp = smp->sm_hash)
1653 1635 if (smp->sm_vp == vp && smp->sm_off == baseoff)
1654 1636 break;
1655 1637 mutex_exit(hashmtx);
1656 1638
1657 1639 vrfy_smp:
1658 1640 if (smp != NULL) {
1659 1641
1660 1642 ASSERT(vp->v_count != 0);
1661 1643
1662 1644 /*
1663 1645 * Get smap lock and recheck its tag. The hash lock
1664 1646 * is dropped since the hash is based on (vp, off)
1665 1647 * and (vp, off) won't change when we have smap mtx.
1666 1648 */
1667 1649 smapmtx = SMAPMTX(smp);
1668 1650 mutex_enter(smapmtx);
1669 1651 if (smp->sm_vp != vp || smp->sm_off != baseoff) {
1670 1652 mutex_exit(smapmtx);
1671 1653 goto retry_hash;
1672 1654 }
1673 1655
1674 1656 if (smp->sm_refcnt == 0) {
1675 1657
1676 1658 smd_cpu[CPU->cpu_seqid].scpu.scpu_get_reclaim++;
1677 1659
1678 1660 /*
1679 1661 * Could still be on the free list. However, this
1680 1662 * could also be an smp that is transitioning from
1681 1663 * the free list when we have too much contention
1682 1664 * for the smapmtx's. In this case, we have an
1683 1665 * unlocked smp that is not on the free list any
1684 1666 * longer, but still has a 0 refcnt. The only way
1685 1667 * to be sure is to check the freelist pointers.
1686 1668 * Since we now have the smapmtx, we are guaranteed
1687 1669 * that the (vp, off) won't change, so we are safe
1688 1670 * to reclaim it. get_free_smp() knows that this
1689 1671 * can happen, and it will check the refcnt.
1690 1672 */
1691 1673
1692 1674 if ((smp->sm_next != NULL)) {
1693 1675 struct sm_freeq *freeq;
1694 1676
1695 1677 ASSERT(smp->sm_prev != NULL);
1696 1678 sm = &smd_free[smp->sm_free_ndx];
1697 1679
1698 1680 if (smp->sm_flags & SM_QNDX_ZERO)
1699 1681 freeq = &sm->sm_freeq[0];
1700 1682 else
1701 1683 freeq = &sm->sm_freeq[1];
1702 1684
1703 1685 mutex_enter(&freeq->smq_mtx);
1704 1686 if (freeq->smq_free != smp) {
1705 1687 /*
1706 1688 * fastpath normal case
1707 1689 */
1708 1690 smp->sm_prev->sm_next = smp->sm_next;
1709 1691 smp->sm_next->sm_prev = smp->sm_prev;
1710 1692 } else if (smp == smp->sm_next) {
1711 1693 /*
1712 1694 * Taking the last smap on freelist
1713 1695 */
1714 1696 freeq->smq_free = NULL;
1715 1697 } else {
1716 1698 /*
1717 1699 * Reclaiming 1st smap on list
1718 1700 */
1719 1701 freeq->smq_free = smp->sm_next;
1720 1702 smp->sm_prev->sm_next = smp->sm_next;
1721 1703 smp->sm_next->sm_prev = smp->sm_prev;
1722 1704 }
1723 1705 mutex_exit(&freeq->smq_mtx);
1724 1706 smp->sm_prev = smp->sm_next = NULL;
1725 1707 } else {
1726 1708 ASSERT(smp->sm_prev == NULL);
1727 1709 segmapcnt.smp_stolen.value.ul++;
1728 1710 }
1729 1711
1730 1712 } else {
1731 1713 segmapcnt.smp_get_use.value.ul++;
1732 1714 }
1733 1715 smp->sm_refcnt++; /* another user */
1734 1716
1735 1717 /*
1736 1718 * We don't invoke segmap_fault via TLB miss, so we set ref
1737 1719 * and mod bits in advance. For S_OTHER we set them in
1738 1720 * segmap_fault F_SOFTUNLOCK.
1739 1721 */
1740 1722 if (is_kpm) {
1741 1723 if (rw == S_WRITE) {
1742 1724 smp->sm_flags |= SM_WRITE_DATA;
1743 1725 } else if (rw == S_READ) {
1744 1726 smp->sm_flags |= SM_READ_DATA;
1745 1727 }
1746 1728 }
1747 1729 mutex_exit(smapmtx);
1748 1730
1749 1731 newslot = 0;
1750 1732 } else {
1751 1733
1752 1734 uint32_t free_ndx, *free_ndxp;
1753 1735 union segmap_cpu *scpu;
1754 1736
1755 1737 /*
1756 1738 * On a PAC machine or a machine with anti-alias
1757 1739 * hardware, smd_colormsk will be zero.
1758 1740 *
1759 1741 * On a VAC machine- pick color by offset in the file
1760 1742 * so we won't get VAC conflicts on elf files.
1761 1743 * On data files, color does not matter but we
1762 1744 * don't know what kind of file it is so we always
1763 1745 * pick color by offset. This causes color
1764 1746 * corresponding to file offset zero to be used more
1765 1747 * heavily.
1766 1748 */
1767 1749 color = (baseoff >> MAXBSHIFT) & smd_colormsk;
1768 1750 scpu = smd_cpu+CPU->cpu_seqid;
1769 1751 free_ndxp = &scpu->scpu.scpu_free_ndx[color];
1770 1752 free_ndx = (*free_ndxp += smd_ncolor) & smd_freemsk;
1771 1753 #ifdef DEBUG
1772 1754 colors_used[free_ndx]++;
1773 1755 #endif /* DEBUG */
1774 1756
1775 1757 /*
1776 1758 * Get a locked smp slot from the free list.
1777 1759 */
1778 1760 smp = get_free_smp(free_ndx);
1779 1761 smapmtx = SMAPMTX(smp);
1780 1762
1781 1763 ASSERT(smp->sm_vp == NULL);
1782 1764
1783 1765 if ((nsmp = segmap_hashin(smp, vp, baseoff, hashid)) != NULL) {
1784 1766 /*
1785 1767 * Failed to hashin, there exists one now.
1786 1768 * Return the smp we just allocated.
1787 1769 */
1788 1770 segmap_smapadd(smp);
1789 1771 mutex_exit(smapmtx);
1790 1772
1791 1773 smp = nsmp;
1792 1774 goto vrfy_smp;
1793 1775 }
1794 1776 smp->sm_refcnt++; /* another user */
1795 1777
1796 1778 /*
1797 1779 * We don't invoke segmap_fault via TLB miss, so we set ref
1798 1780 * and mod bits in advance. For S_OTHER we set them in
1799 1781 * segmap_fault F_SOFTUNLOCK.
1800 1782 */
1801 1783 if (is_kpm) {
1802 1784 if (rw == S_WRITE) {
1803 1785 smp->sm_flags |= SM_WRITE_DATA;
1804 1786 } else if (rw == S_READ) {
1805 1787 smp->sm_flags |= SM_READ_DATA;
1806 1788 }
1807 1789 }
1808 1790 mutex_exit(smapmtx);
1809 1791
1810 1792 newslot = 1;
1811 1793 }
1812 1794
1813 1795 if (!is_kpm)
1814 1796 goto use_segmap_range;
1815 1797
1816 1798 /*
1817 1799 * Use segkpm
1818 1800 */
1819 1801 /* Lint directive required until 6746211 is fixed */
1820 1802 /*CONSTCOND*/
1821 1803 ASSERT(PAGESIZE == MAXBSIZE);
1822 1804
1823 1805 /*
1824 1806 * remember the last smp faulted on this cpu.
1825 1807 */
1826 1808 (smd_cpu+CPU->cpu_seqid)->scpu.scpu_last_smap = smp;
1827 1809
1828 1810 if (forcefault == SM_PAGECREATE) {
1829 1811 baseaddr = segmap_pagecreate_kpm(seg, vp, baseoff, smp, rw);
1830 1812 return (baseaddr);
1831 1813 }
1832 1814
1833 1815 if (newslot == 0 &&
1834 1816 (pp = GET_KPME(smp)->kpe_page) != NULL) {
1835 1817
1836 1818 /* fastpath */
1837 1819 switch (rw) {
1838 1820 case S_READ:
1839 1821 case S_WRITE:
1840 1822 if (page_trylock(pp, SE_SHARED)) {
1841 1823 if (PP_ISFREE(pp) ||
1842 1824 !(pp->p_vnode == vp &&
1843 1825 pp->p_offset == baseoff)) {
1844 1826 page_unlock(pp);
1845 1827 pp = page_lookup(vp, baseoff,
1846 1828 SE_SHARED);
1847 1829 }
1848 1830 } else {
1849 1831 pp = page_lookup(vp, baseoff, SE_SHARED);
1850 1832 }
1851 1833
1852 1834 if (pp == NULL) {
1853 1835 ASSERT(GET_KPME(smp)->kpe_page == NULL);
1854 1836 break;
1855 1837 }
1856 1838
1857 1839 if (rw == S_WRITE &&
1858 1840 hat_page_getattr(pp, P_MOD | P_REF) !=
1859 1841 (P_MOD | P_REF)) {
1860 1842 page_unlock(pp);
1861 1843 break;
1862 1844 }
1863 1845
1864 1846 /*
1865 1847 * We have the p_selock as reader, grab_smp
1866 1848 * can't hit us, we have bumped the smap
1867 1849 * refcnt and hat_pageunload needs the
1868 1850 * p_selock exclusive.
1869 1851 */
1870 1852 kpme = GET_KPME(smp);
1871 1853 if (kpme->kpe_page == pp) {
1872 1854 baseaddr = hat_kpm_page2va(pp, 0);
1873 1855 } else if (kpme->kpe_page == NULL) {
1874 1856 baseaddr = hat_kpm_mapin(pp, kpme);
1875 1857 } else {
1876 1858 panic("segmap_getmapflt: stale "
1877 1859 "kpme page, kpme %p", (void *)kpme);
1878 1860 /*NOTREACHED*/
1879 1861 }
1880 1862
1881 1863 /*
1882 1864 * We don't invoke segmap_fault via TLB miss,
1883 1865 * so we set ref and mod bits in advance.
1884 1866 * For S_OTHER and we set them in segmap_fault
1885 1867 * F_SOFTUNLOCK.
1886 1868 */
1887 1869 if (rw == S_READ && !hat_isref(pp))
1888 1870 hat_setref(pp);
1889 1871
1890 1872 return (baseaddr);
1891 1873 default:
1892 1874 break;
1893 1875 }
1894 1876 }
1895 1877
1896 1878 base = segkpm_create_va(baseoff);
1897 1879 error = VOP_GETPAGE(vp, (offset_t)baseoff, len, &prot, pl, MAXBSIZE,
1898 1880 seg, base, rw, CRED(), NULL);
1899 1881
1900 1882 pp = pl[0];
1901 1883 if (error || pp == NULL) {
1902 1884 /*
1903 1885 * Use segmap address slot and let segmap_fault deal
1904 1886 * with the error cases. There is no error return
1905 1887 * possible here.
1906 1888 */
1907 1889 goto use_segmap_range;
1908 1890 }
1909 1891
1910 1892 ASSERT(pl[1] == NULL);
1911 1893
1912 1894 /*
1913 1895 * When prot is not returned w/ PROT_ALL the returned pages
1914 1896 * are not backed by fs blocks. For most of the segmap users
1915 1897 * this is no problem, they don't write to the pages in the
1916 1898 * same request and therefore don't rely on a following
1917 1899 * trap driven segmap_fault. With SM_LOCKPROTO users it
1918 1900 * is more secure to use segkmap adresses to allow
1919 1901 * protection segmap_fault's.
1920 1902 */
1921 1903 if (prot != PROT_ALL && forcefault == SM_LOCKPROTO) {
1922 1904 /*
1923 1905 * Use segmap address slot and let segmap_fault
1924 1906 * do the error return.
1925 1907 */
1926 1908 ASSERT(rw != S_WRITE);
1927 1909 ASSERT(PAGE_LOCKED(pp));
1928 1910 page_unlock(pp);
1929 1911 forcefault = 0;
1930 1912 goto use_segmap_range;
1931 1913 }
1932 1914
1933 1915 /*
1934 1916 * We have the p_selock as reader, grab_smp can't hit us, we
1935 1917 * have bumped the smap refcnt and hat_pageunload needs the
1936 1918 * p_selock exclusive.
1937 1919 */
1938 1920 kpme = GET_KPME(smp);
1939 1921 if (kpme->kpe_page == pp) {
1940 1922 baseaddr = hat_kpm_page2va(pp, 0);
1941 1923 } else if (kpme->kpe_page == NULL) {
1942 1924 baseaddr = hat_kpm_mapin(pp, kpme);
1943 1925 } else {
1944 1926 panic("segmap_getmapflt: stale kpme page after "
1945 1927 "VOP_GETPAGE, kpme %p", (void *)kpme);
1946 1928 /*NOTREACHED*/
1947 1929 }
1948 1930
1949 1931 smd_cpu[CPU->cpu_seqid].scpu.scpu_fault++;
1950 1932
1951 1933 return (baseaddr);
1952 1934
1953 1935
1954 1936 use_segmap_range:
1955 1937 baseaddr = seg->s_base + ((smp - smd_smap) * MAXBSIZE);
1956 1938 TRACE_4(TR_FAC_VM, TR_SEGMAP_GETMAP,
1957 1939 "segmap_getmap:seg %p addr %p vp %p offset %llx",
1958 1940 seg, baseaddr, vp, baseoff);
1959 1941
1960 1942 /*
1961 1943 * Prefault the translations
1962 1944 */
1963 1945 vaddr = baseaddr + (off - baseoff);
1964 1946 if (forcefault && (newslot || !hat_probe(kas.a_hat, vaddr))) {
1965 1947
1966 1948 caddr_t pgaddr = (caddr_t)((uintptr_t)vaddr &
1967 1949 (uintptr_t)PAGEMASK);
1968 1950
1969 1951 (void) segmap_fault(kas.a_hat, seg, pgaddr,
1970 1952 (vaddr + len - pgaddr + PAGESIZE - 1) & (uintptr_t)PAGEMASK,
1971 1953 F_INVAL, rw);
1972 1954 }
1973 1955
1974 1956 return (baseaddr);
1975 1957 }
1976 1958
1977 1959 int
1978 1960 segmap_release(struct seg *seg, caddr_t addr, uint_t flags)
1979 1961 {
1980 1962 struct smap *smp;
1981 1963 int error;
1982 1964 int bflags = 0;
1983 1965 struct vnode *vp;
1984 1966 u_offset_t offset;
1985 1967 kmutex_t *smtx;
1986 1968 int is_kpm = 0;
1987 1969 page_t *pp;
1988 1970
1989 1971 if (segmap_kpm && IS_KPM_ADDR(addr)) {
1990 1972
1991 1973 if (((uintptr_t)addr & MAXBOFFSET) != 0) {
1992 1974 panic("segmap_release: addr %p not "
1993 1975 "MAXBSIZE aligned", (void *)addr);
1994 1976 /*NOTREACHED*/
1995 1977 }
1996 1978
1997 1979 if ((smp = get_smap_kpm(addr, &pp)) == NULL) {
1998 1980 panic("segmap_release: smap not found "
1999 1981 "for addr %p", (void *)addr);
2000 1982 /*NOTREACHED*/
2001 1983 }
2002 1984
2003 1985 TRACE_3(TR_FAC_VM, TR_SEGMAP_RELMAP,
2004 1986 "segmap_relmap:seg %p addr %p smp %p",
2005 1987 seg, addr, smp);
2006 1988
2007 1989 smtx = SMAPMTX(smp);
2008 1990
2009 1991 /*
2010 1992 * For compatibility reasons segmap_pagecreate_kpm sets this
2011 1993 * flag to allow a following segmap_pagecreate to return
2012 1994 * this as "newpage" flag. When segmap_pagecreate is not
2013 1995 * called at all we clear it now.
2014 1996 */
2015 1997 smp->sm_flags &= ~SM_KPM_NEWPAGE;
2016 1998 is_kpm = 1;
2017 1999 if (smp->sm_flags & SM_WRITE_DATA) {
2018 2000 hat_setrefmod(pp);
2019 2001 } else if (smp->sm_flags & SM_READ_DATA) {
2020 2002 hat_setref(pp);
2021 2003 }
2022 2004 } else {
2023 2005 if (addr < seg->s_base || addr >= seg->s_base + seg->s_size ||
2024 2006 ((uintptr_t)addr & MAXBOFFSET) != 0) {
2025 2007 panic("segmap_release: bad addr %p", (void *)addr);
2026 2008 /*NOTREACHED*/
2027 2009 }
2028 2010 smp = GET_SMAP(seg, addr);
2029 2011
2030 2012 TRACE_3(TR_FAC_VM, TR_SEGMAP_RELMAP,
2031 2013 "segmap_relmap:seg %p addr %p smp %p",
2032 2014 seg, addr, smp);
2033 2015
2034 2016 smtx = SMAPMTX(smp);
2035 2017 mutex_enter(smtx);
2036 2018 smp->sm_flags |= SM_NOTKPM_RELEASED;
2037 2019 }
2038 2020
2039 2021 ASSERT(smp->sm_refcnt > 0);
2040 2022
2041 2023 /*
2042 2024 * Need to call VOP_PUTPAGE() if any flags (except SM_DONTNEED)
2043 2025 * are set.
2044 2026 */
2045 2027 if ((flags & ~SM_DONTNEED) != 0) {
2046 2028 if (flags & SM_WRITE)
2047 2029 segmapcnt.smp_rel_write.value.ul++;
2048 2030 if (flags & SM_ASYNC) {
2049 2031 bflags |= B_ASYNC;
2050 2032 segmapcnt.smp_rel_async.value.ul++;
2051 2033 }
2052 2034 if (flags & SM_INVAL) {
2053 2035 bflags |= B_INVAL;
2054 2036 segmapcnt.smp_rel_abort.value.ul++;
2055 2037 }
2056 2038 if (flags & SM_DESTROY) {
2057 2039 bflags |= (B_INVAL|B_TRUNC);
2058 2040 segmapcnt.smp_rel_abort.value.ul++;
2059 2041 }
2060 2042 if (smp->sm_refcnt == 1) {
2061 2043 /*
2062 2044 * We only bother doing the FREE and DONTNEED flags
2063 2045 * if no one else is still referencing this mapping.
2064 2046 */
2065 2047 if (flags & SM_FREE) {
2066 2048 bflags |= B_FREE;
2067 2049 segmapcnt.smp_rel_free.value.ul++;
2068 2050 }
2069 2051 if (flags & SM_DONTNEED) {
2070 2052 bflags |= B_DONTNEED;
2071 2053 segmapcnt.smp_rel_dontneed.value.ul++;
2072 2054 }
2073 2055 }
2074 2056 } else {
2075 2057 smd_cpu[CPU->cpu_seqid].scpu.scpu_release++;
2076 2058 }
2077 2059
2078 2060 vp = smp->sm_vp;
2079 2061 offset = smp->sm_off;
2080 2062
2081 2063 if (--smp->sm_refcnt == 0) {
2082 2064
2083 2065 smp->sm_flags &= ~(SM_WRITE_DATA | SM_READ_DATA);
2084 2066
2085 2067 if (flags & (SM_INVAL|SM_DESTROY)) {
2086 2068 segmap_hashout(smp); /* remove map info */
2087 2069 if (is_kpm) {
2088 2070 hat_kpm_mapout(pp, GET_KPME(smp), addr);
2089 2071 if (smp->sm_flags & SM_NOTKPM_RELEASED) {
2090 2072 smp->sm_flags &= ~SM_NOTKPM_RELEASED;
2091 2073 hat_unload(kas.a_hat, segkmap->s_base +
2092 2074 ((smp - smd_smap) * MAXBSIZE),
2093 2075 MAXBSIZE, HAT_UNLOAD);
2094 2076 }
2095 2077
2096 2078 } else {
2097 2079 if (segmap_kpm)
2098 2080 segkpm_mapout_validkpme(GET_KPME(smp));
2099 2081
2100 2082 smp->sm_flags &= ~SM_NOTKPM_RELEASED;
2101 2083 hat_unload(kas.a_hat, addr, MAXBSIZE,
2102 2084 HAT_UNLOAD);
2103 2085 }
2104 2086 }
2105 2087 segmap_smapadd(smp); /* add to free list */
2106 2088 }
2107 2089
2108 2090 mutex_exit(smtx);
2109 2091
2110 2092 if (is_kpm)
2111 2093 page_unlock(pp);
2112 2094 /*
2113 2095 * Now invoke VOP_PUTPAGE() if any flags (except SM_DONTNEED)
2114 2096 * are set.
2115 2097 */
2116 2098 if ((flags & ~SM_DONTNEED) != 0) {
2117 2099 error = VOP_PUTPAGE(vp, offset, MAXBSIZE,
2118 2100 bflags, CRED(), NULL);
2119 2101 } else {
2120 2102 error = 0;
2121 2103 }
2122 2104
2123 2105 return (error);
2124 2106 }
2125 2107
2126 2108 /*
2127 2109 * Dump the pages belonging to this segmap segment.
2128 2110 */
2129 2111 static void
2130 2112 segmap_dump(struct seg *seg)
2131 2113 {
2132 2114 struct segmap_data *smd;
2133 2115 struct smap *smp, *smp_end;
2134 2116 page_t *pp;
2135 2117 pfn_t pfn;
2136 2118 u_offset_t off;
2137 2119 caddr_t addr;
2138 2120
2139 2121 smd = (struct segmap_data *)seg->s_data;
2140 2122 addr = seg->s_base;
2141 2123 for (smp = smd->smd_sm, smp_end = smp + smd->smd_npages;
2142 2124 smp < smp_end; smp++) {
2143 2125
2144 2126 if (smp->sm_refcnt) {
2145 2127 for (off = 0; off < MAXBSIZE; off += PAGESIZE) {
2146 2128 int we_own_it = 0;
2147 2129
2148 2130 /*
2149 2131 * If pp == NULL, the page either does
2150 2132 * not exist or is exclusively locked.
2151 2133 * So determine if it exists before
2152 2134 * searching for it.
2153 2135 */
2154 2136 if ((pp = page_lookup_nowait(smp->sm_vp,
2155 2137 smp->sm_off + off, SE_SHARED)))
2156 2138 we_own_it = 1;
2157 2139 else
2158 2140 pp = page_exists(smp->sm_vp,
2159 2141 smp->sm_off + off);
2160 2142
2161 2143 if (pp) {
2162 2144 pfn = page_pptonum(pp);
2163 2145 dump_addpage(seg->s_as,
2164 2146 addr + off, pfn);
2165 2147 if (we_own_it)
2166 2148 page_unlock(pp);
2167 2149 }
2168 2150 dump_timeleft = dump_timeout;
2169 2151 }
2170 2152 }
2171 2153 addr += MAXBSIZE;
2172 2154 }
2173 2155 }
2174 2156
2175 2157 /*ARGSUSED*/
2176 2158 static int
2177 2159 segmap_pagelock(struct seg *seg, caddr_t addr, size_t len,
2178 2160 struct page ***ppp, enum lock_type type, enum seg_rw rw)
2179 2161 {
2180 2162 return (ENOTSUP);
2181 2163 }
2182 2164
2183 2165 static int
2184 2166 segmap_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
2185 2167 {
2186 2168 struct segmap_data *smd = (struct segmap_data *)seg->s_data;
2187 2169
2188 2170 memidp->val[0] = (uintptr_t)smd->smd_sm->sm_vp;
2189 2171 memidp->val[1] = smd->smd_sm->sm_off + (uintptr_t)(addr - seg->s_base);
2190 2172 return (0);
2191 2173 }
2192 2174
2193 2175 /*ARGSUSED*/
2194 2176 static lgrp_mem_policy_info_t *
2195 2177 segmap_getpolicy(struct seg *seg, caddr_t addr)
2196 2178 {
2197 2179 return (NULL);
2198 2180 }
2199 2181
2200 2182 /*ARGSUSED*/
2201 2183 static int
2202 2184 segmap_capable(struct seg *seg, segcapability_t capability)
2203 2185 {
2204 2186 return (0);
2205 2187 }
2206 2188
2207 2189
2208 2190 #ifdef SEGKPM_SUPPORT
2209 2191
2210 2192 /*
2211 2193 * segkpm support routines
2212 2194 */
2213 2195
2214 2196 static caddr_t
2215 2197 segmap_pagecreate_kpm(struct seg *seg, vnode_t *vp, u_offset_t off,
2216 2198 struct smap *smp, enum seg_rw rw)
2217 2199 {
2218 2200 caddr_t base;
2219 2201 page_t *pp;
2220 2202 int newpage = 0;
2221 2203 struct kpme *kpme;
2222 2204
2223 2205 ASSERT(smp->sm_refcnt > 0);
2224 2206
2225 2207 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
2226 2208 kmutex_t *smtx;
2227 2209
2228 2210 base = segkpm_create_va(off);
2229 2211
2230 2212 if ((pp = page_create_va(vp, off, PAGESIZE, PG_WAIT,
2231 2213 seg, base)) == NULL) {
2232 2214 panic("segmap_pagecreate_kpm: "
2233 2215 "page_create failed");
2234 2216 /*NOTREACHED*/
2235 2217 }
2236 2218
2237 2219 newpage = 1;
2238 2220 page_io_unlock(pp);
2239 2221 ASSERT((u_offset_t)(off - smp->sm_off) <= INT_MAX);
2240 2222
2241 2223 /*
2242 2224 * Mark this here until the following segmap_pagecreate
2243 2225 * or segmap_release.
2244 2226 */
2245 2227 smtx = SMAPMTX(smp);
2246 2228 mutex_enter(smtx);
2247 2229 smp->sm_flags |= SM_KPM_NEWPAGE;
2248 2230 mutex_exit(smtx);
2249 2231 }
2250 2232
2251 2233 kpme = GET_KPME(smp);
2252 2234 if (!newpage && kpme->kpe_page == pp)
2253 2235 base = hat_kpm_page2va(pp, 0);
2254 2236 else
2255 2237 base = hat_kpm_mapin(pp, kpme);
2256 2238
2257 2239 /*
2258 2240 * FS code may decide not to call segmap_pagecreate and we
2259 2241 * don't invoke segmap_fault via TLB miss, so we have to set
2260 2242 * ref and mod bits in advance.
2261 2243 */
2262 2244 if (rw == S_WRITE) {
2263 2245 hat_setrefmod(pp);
2264 2246 } else {
2265 2247 ASSERT(rw == S_READ);
2266 2248 hat_setref(pp);
2267 2249 }
2268 2250
2269 2251 smd_cpu[CPU->cpu_seqid].scpu.scpu_pagecreate++;
2270 2252
2271 2253 return (base);
2272 2254 }
2273 2255
2274 2256 /*
2275 2257 * Find the smap structure corresponding to the
2276 2258 * KPM addr and return it locked.
2277 2259 */
2278 2260 struct smap *
2279 2261 get_smap_kpm(caddr_t addr, page_t **ppp)
2280 2262 {
2281 2263 struct smap *smp;
2282 2264 struct vnode *vp;
2283 2265 u_offset_t offset;
2284 2266 caddr_t baseaddr = (caddr_t)((uintptr_t)addr & MAXBMASK);
2285 2267 int hashid;
2286 2268 kmutex_t *hashmtx;
2287 2269 page_t *pp;
2288 2270 union segmap_cpu *scpu;
2289 2271
2290 2272 pp = hat_kpm_vaddr2page(baseaddr);
2291 2273
2292 2274 ASSERT(pp && !PP_ISFREE(pp));
2293 2275 ASSERT(PAGE_LOCKED(pp));
2294 2276 ASSERT(((uintptr_t)pp->p_offset & MAXBOFFSET) == 0);
2295 2277
2296 2278 vp = pp->p_vnode;
2297 2279 offset = pp->p_offset;
2298 2280 ASSERT(vp != NULL);
2299 2281
2300 2282 /*
2301 2283 * Assume the last smap used on this cpu is the one needed.
2302 2284 */
2303 2285 scpu = smd_cpu+CPU->cpu_seqid;
2304 2286 smp = scpu->scpu.scpu_last_smap;
2305 2287 mutex_enter(&smp->sm_mtx);
2306 2288 if (smp->sm_vp == vp && smp->sm_off == offset) {
2307 2289 ASSERT(smp->sm_refcnt > 0);
2308 2290 } else {
2309 2291 /*
2310 2292 * Assumption wrong, find the smap on the hash chain.
2311 2293 */
2312 2294 mutex_exit(&smp->sm_mtx);
2313 2295 SMAP_HASHFUNC(vp, offset, hashid); /* macro assigns hashid */
2314 2296 hashmtx = SHASHMTX(hashid);
2315 2297
2316 2298 mutex_enter(hashmtx);
2317 2299 smp = smd_hash[hashid].sh_hash_list;
2318 2300 for (; smp != NULL; smp = smp->sm_hash) {
2319 2301 if (smp->sm_vp == vp && smp->sm_off == offset)
2320 2302 break;
2321 2303 }
2322 2304 mutex_exit(hashmtx);
2323 2305 if (smp) {
2324 2306 mutex_enter(&smp->sm_mtx);
2325 2307 ASSERT(smp->sm_vp == vp && smp->sm_off == offset);
2326 2308 }
2327 2309 }
2328 2310
2329 2311 if (ppp)
2330 2312 *ppp = smp ? pp : NULL;
2331 2313
2332 2314 return (smp);
2333 2315 }
2334 2316
2335 2317 #else /* SEGKPM_SUPPORT */
2336 2318
2337 2319 /* segkpm stubs */
2338 2320
2339 2321 /*ARGSUSED*/
2340 2322 static caddr_t
2341 2323 segmap_pagecreate_kpm(struct seg *seg, vnode_t *vp, u_offset_t off,
2342 2324 struct smap *smp, enum seg_rw rw)
2343 2325 {
2344 2326 return (NULL);
2345 2327 }
2346 2328
2347 2329 /*ARGSUSED*/
2348 2330 struct smap *
2349 2331 get_smap_kpm(caddr_t addr, page_t **ppp)
2350 2332 {
2351 2333 return (NULL);
2352 2334 }
2353 2335
2354 2336 #endif /* SEGKPM_SUPPORT */
↓ open down ↓ |
1429 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX