Print this page
no need for bad-op segment op functions
The segment drivers have a number of bad-op functions that simply panic.
Keeping the function pointer NULL will accomplish the same thing in most
cases. In other cases, keeping the function pointer NULL will result in
proper error code being returned.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sparc/v9/vm/seg_nf.c
+++ new/usr/src/uts/sparc/v9/vm/seg_nf.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
27 27 /* All Rights Reserved */
28 28
29 29 /*
30 30 * Portions of this source code were derived from Berkeley 4.3 BSD
31 31 * under license from the Regents of the University of California.
32 32 */
33 33
34 34 /*
35 35 * VM - segment for non-faulting loads.
36 36 */
37 37
38 38 #include <sys/types.h>
39 39 #include <sys/t_lock.h>
40 40 #include <sys/param.h>
41 41 #include <sys/mman.h>
42 42 #include <sys/errno.h>
43 43 #include <sys/kmem.h>
44 44 #include <sys/cmn_err.h>
45 45 #include <sys/vnode.h>
46 46 #include <sys/proc.h>
47 47 #include <sys/conf.h>
48 48 #include <sys/debug.h>
49 49 #include <sys/archsystm.h>
50 50 #include <sys/lgrp.h>
51 51
52 52 #include <vm/page.h>
53 53 #include <vm/hat.h>
54 54 #include <vm/as.h>
55 55 #include <vm/seg.h>
56 56 #include <vm/vpage.h>
57 57
58 58 /*
↓ open down ↓ |
58 lines elided |
↑ open up ↑ |
59 59 * Private seg op routines.
60 60 */
61 61 static int segnf_dup(struct seg *seg, struct seg *newseg);
62 62 static int segnf_unmap(struct seg *seg, caddr_t addr, size_t len);
63 63 static void segnf_free(struct seg *seg);
64 64 static faultcode_t segnf_nomap(void);
65 65 static int segnf_setprot(struct seg *seg, caddr_t addr,
66 66 size_t len, uint_t prot);
67 67 static int segnf_checkprot(struct seg *seg, caddr_t addr,
68 68 size_t len, uint_t prot);
69 -static void segnf_badop(void);
70 69 static int segnf_nop(void);
71 70 static int segnf_getprot(struct seg *seg, caddr_t addr,
72 71 size_t len, uint_t *protv);
73 72 static u_offset_t segnf_getoffset(struct seg *seg, caddr_t addr);
74 73 static int segnf_gettype(struct seg *seg, caddr_t addr);
75 74 static int segnf_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
76 75 static void segnf_dump(struct seg *seg);
77 76 static int segnf_pagelock(struct seg *seg, caddr_t addr, size_t len,
78 77 struct page ***ppp, enum lock_type type, enum seg_rw rw);
79 78 static int segnf_setpagesize(struct seg *seg, caddr_t addr, size_t len,
80 79 uint_t szc);
81 80 static int segnf_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
82 81 static lgrp_mem_policy_info_t *segnf_getpolicy(struct seg *seg,
83 82 caddr_t addr);
84 83
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
85 84
86 85 struct seg_ops segnf_ops = {
87 86 .dup = segnf_dup,
88 87 .unmap = segnf_unmap,
89 88 .free = segnf_free,
90 89 .fault = (faultcode_t (*)(struct hat *, struct seg *, caddr_t,
91 90 size_t, enum fault_type, enum seg_rw))segnf_nomap,
92 91 .faulta = (faultcode_t (*)(struct seg *, caddr_t)) segnf_nomap,
93 92 .setprot = segnf_setprot,
94 93 .checkprot = segnf_checkprot,
95 - .kluster = (int (*)())segnf_badop,
96 94 .sync = (int (*)(struct seg *, caddr_t, size_t, int, uint_t))
97 95 segnf_nop,
98 96 .incore = (size_t (*)(struct seg *, caddr_t, size_t, char *))
99 97 segnf_nop,
100 98 .lockop = (int (*)(struct seg *, caddr_t, size_t, int, int,
101 99 ulong_t *, size_t))segnf_nop,
102 100 .getprot = segnf_getprot,
103 101 .getoffset = segnf_getoffset,
104 102 .gettype = segnf_gettype,
105 103 .getvp = segnf_getvp,
106 104 .advise = (int (*)(struct seg *, caddr_t, size_t, uint_t))
107 105 segnf_nop,
108 106 .dump = segnf_dump,
109 107 .pagelock = segnf_pagelock,
110 108 .setpagesize = segnf_setpagesize,
111 109 .getmemid = segnf_getmemid,
112 110 .getpolicy = segnf_getpolicy,
113 111 };
114 112
115 113 /*
116 114 * vnode and page for the page of zeros we use for the nf mappings.
117 115 */
118 116 static kmutex_t segnf_lock;
119 117 static struct vnode nfvp;
120 118 static struct page **nfpp;
121 119
122 120 #define addr_to_vcolor(addr) \
123 121 (shm_alignment) ? \
124 122 ((int)(((uintptr_t)(addr) & (shm_alignment - 1)) >> PAGESHIFT)) : 0
125 123
126 124 /*
127 125 * We try to limit the number of Non-fault segments created.
128 126 * Non fault segments are created to optimize sparc V9 code which uses
129 127 * the sparc nonfaulting load ASI (ASI_PRIMARY_NOFAULT).
130 128 *
131 129 * There are several reasons why creating too many non-fault segments
132 130 * could cause problems.
133 131 *
134 132 * First, excessive allocation of kernel resources for the seg
135 133 * structures and the HAT data to map the zero pages.
136 134 *
137 135 * Secondly, creating nofault segments actually uses up user virtual
138 136 * address space. This makes it unavailable for subsequent mmap(0, ...)
139 137 * calls which use as_gap() to find empty va regions. Creation of too
140 138 * many nofault segments could thus interfere with the ability of the
141 139 * runtime linker to load a shared object.
142 140 */
143 141 #define MAXSEGFORNF (10000)
144 142 #define MAXNFSEARCH (5)
145 143
146 144
147 145 /*
148 146 * Must be called from startup()
149 147 */
150 148 void
151 149 segnf_init()
152 150 {
153 151 mutex_init(&segnf_lock, NULL, MUTEX_DEFAULT, NULL);
154 152 }
155 153
156 154
157 155 /*
158 156 * Create a no-fault segment.
159 157 *
160 158 * The no-fault segment is not technically necessary, as the code in
161 159 * nfload() in trap.c will emulate the SPARC instruction and load
162 160 * a value of zero in the destination register.
163 161 *
164 162 * However, this code tries to put a page of zero's at the nofault address
165 163 * so that subsequent non-faulting loads to the same page will not
166 164 * trap with a tlb miss.
167 165 *
168 166 * In order to help limit the number of segments we merge adjacent nofault
169 167 * segments into a single segment. If we get a large number of segments
170 168 * we'll also try to delete a random other nf segment.
171 169 */
172 170 /* ARGSUSED */
173 171 int
174 172 segnf_create(struct seg *seg, void *argsp)
175 173 {
176 174 uint_t prot;
177 175 pgcnt_t vacpgs;
178 176 u_offset_t off = 0;
179 177 caddr_t vaddr = NULL;
180 178 int i, color;
181 179 struct seg *s1;
182 180 struct seg *s2;
183 181 size_t size;
184 182 struct as *as = seg->s_as;
185 183
186 184 ASSERT(as && AS_WRITE_HELD(as, &as->a_lock));
187 185
188 186 /*
189 187 * Need a page per virtual color or just 1 if no vac.
190 188 */
191 189 mutex_enter(&segnf_lock);
192 190 if (nfpp == NULL) {
193 191 struct seg kseg;
194 192
195 193 vacpgs = 1;
196 194 if (shm_alignment > PAGESIZE) {
197 195 vacpgs = shm_alignment >> PAGESHIFT;
198 196 }
199 197
200 198 nfpp = kmem_alloc(sizeof (*nfpp) * vacpgs, KM_SLEEP);
201 199
202 200 kseg.s_as = &kas;
203 201 for (i = 0; i < vacpgs; i++, off += PAGESIZE,
204 202 vaddr += PAGESIZE) {
205 203 nfpp[i] = page_create_va(&nfvp, off, PAGESIZE,
206 204 PG_WAIT | PG_NORELOC, &kseg, vaddr);
207 205 page_io_unlock(nfpp[i]);
208 206 page_downgrade(nfpp[i]);
209 207 pagezero(nfpp[i], 0, PAGESIZE);
210 208 }
211 209 }
212 210 mutex_exit(&segnf_lock);
213 211
214 212 hat_map(as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
215 213
216 214 /*
217 215 * s_data can't be NULL because of ASSERTS in the common vm code.
218 216 */
219 217 seg->s_ops = &segnf_ops;
220 218 seg->s_data = seg;
221 219 seg->s_flags |= S_PURGE;
222 220
223 221 mutex_enter(&as->a_contents);
224 222 as->a_flags |= AS_NEEDSPURGE;
225 223 mutex_exit(&as->a_contents);
226 224
227 225 prot = PROT_READ;
228 226 color = addr_to_vcolor(seg->s_base);
229 227 if (as != &kas)
230 228 prot |= PROT_USER;
231 229 hat_memload(as->a_hat, seg->s_base, nfpp[color],
232 230 prot | HAT_NOFAULT, HAT_LOAD);
233 231
234 232 /*
235 233 * At this point see if we can concatenate a segment to
236 234 * a non-fault segment immediately before and/or after it.
237 235 */
238 236 if ((s1 = AS_SEGPREV(as, seg)) != NULL &&
239 237 s1->s_ops == &segnf_ops &&
240 238 s1->s_base + s1->s_size == seg->s_base) {
241 239 size = s1->s_size;
242 240 seg_free(s1);
243 241 seg->s_base -= size;
244 242 seg->s_size += size;
245 243 }
246 244
247 245 if ((s2 = AS_SEGNEXT(as, seg)) != NULL &&
248 246 s2->s_ops == &segnf_ops &&
249 247 seg->s_base + seg->s_size == s2->s_base) {
250 248 size = s2->s_size;
251 249 seg_free(s2);
252 250 seg->s_size += size;
253 251 }
254 252
255 253 /*
256 254 * if we already have a lot of segments, try to delete some other
257 255 * nofault segment to reduce the probability of uncontrolled segment
258 256 * creation.
259 257 *
260 258 * the code looks around quickly (no more than MAXNFSEARCH segments
261 259 * each way) for another NF segment and then deletes it.
262 260 */
263 261 if (avl_numnodes(&as->a_segtree) > MAXSEGFORNF) {
264 262 size = 0;
265 263 s2 = NULL;
266 264 s1 = AS_SEGPREV(as, seg);
267 265 while (size++ < MAXNFSEARCH && s1 != NULL) {
268 266 if (s1->s_ops == &segnf_ops)
269 267 s2 = s1;
270 268 s1 = AS_SEGPREV(s1->s_as, seg);
271 269 }
272 270 if (s2 == NULL) {
273 271 s1 = AS_SEGNEXT(as, seg);
274 272 while (size-- > 0 && s1 != NULL) {
275 273 if (s1->s_ops == &segnf_ops)
276 274 s2 = s1;
277 275 s1 = AS_SEGNEXT(as, seg);
278 276 }
279 277 }
280 278 if (s2 != NULL)
281 279 seg_unmap(s2);
282 280 }
283 281
284 282 return (0);
285 283 }
286 284
287 285 /*
288 286 * Never really need "No fault" segments, so they aren't dup'd.
289 287 */
290 288 /* ARGSUSED */
291 289 static int
292 290 segnf_dup(struct seg *seg, struct seg *newseg)
293 291 {
294 292 panic("segnf_dup");
295 293 return (0);
296 294 }
297 295
298 296 /*
299 297 * Split a segment at addr for length len.
300 298 */
301 299 static int
302 300 segnf_unmap(struct seg *seg, caddr_t addr, size_t len)
303 301 {
304 302 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
305 303
306 304 /*
307 305 * Check for bad sizes.
308 306 */
309 307 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
310 308 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) {
311 309 cmn_err(CE_PANIC, "segnf_unmap: bad unmap size");
312 310 }
313 311
314 312 /*
315 313 * Unload any hardware translations in the range to be taken out.
316 314 */
317 315 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD_UNMAP);
318 316
319 317 if (addr == seg->s_base && len == seg->s_size) {
320 318 /*
321 319 * Freeing entire segment.
322 320 */
323 321 seg_free(seg);
324 322 } else if (addr == seg->s_base) {
325 323 /*
326 324 * Freeing the beginning of the segment.
327 325 */
328 326 seg->s_base += len;
329 327 seg->s_size -= len;
330 328 } else if (addr + len == seg->s_base + seg->s_size) {
331 329 /*
332 330 * Freeing the end of the segment.
333 331 */
334 332 seg->s_size -= len;
335 333 } else {
336 334 /*
337 335 * The section to go is in the middle of the segment, so we
338 336 * have to cut it into two segments. We shrink the existing
339 337 * "seg" at the low end, and create "nseg" for the high end.
340 338 */
341 339 caddr_t nbase = addr + len;
342 340 size_t nsize = (seg->s_base + seg->s_size) - nbase;
343 341 struct seg *nseg;
344 342
345 343 /*
346 344 * Trim down "seg" before trying to stick "nseg" into the as.
347 345 */
348 346 seg->s_size = addr - seg->s_base;
349 347 nseg = seg_alloc(seg->s_as, nbase, nsize);
350 348 if (nseg == NULL)
351 349 cmn_err(CE_PANIC, "segnf_unmap: seg_alloc failed");
352 350
353 351 /*
354 352 * s_data can't be NULL because of ASSERTs in common VM code.
355 353 */
356 354 nseg->s_ops = seg->s_ops;
357 355 nseg->s_data = nseg;
358 356 nseg->s_flags |= S_PURGE;
359 357 mutex_enter(&seg->s_as->a_contents);
360 358 seg->s_as->a_flags |= AS_NEEDSPURGE;
361 359 mutex_exit(&seg->s_as->a_contents);
362 360 }
363 361
364 362 return (0);
365 363 }
366 364
367 365 /*
368 366 * Free a segment.
369 367 */
370 368 static void
371 369 segnf_free(struct seg *seg)
372 370 {
373 371 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
374 372 }
375 373
376 374 /*
377 375 * No faults allowed on segnf.
378 376 */
379 377 static faultcode_t
380 378 segnf_nomap(void)
381 379 {
382 380 return (FC_NOMAP);
383 381 }
384 382
385 383 /* ARGSUSED */
386 384 static int
387 385 segnf_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
388 386 {
389 387 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
390 388 return (EACCES);
391 389 }
↓ open down ↓ |
286 lines elided |
↑ open up ↑ |
392 390
393 391 /* ARGSUSED */
394 392 static int
395 393 segnf_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
396 394 {
397 395 uint_t sprot;
398 396 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
399 397
400 398 sprot = seg->s_as == &kas ? PROT_READ : PROT_READ|PROT_USER;
401 399 return ((prot & sprot) == prot ? 0 : EACCES);
402 -}
403 -
404 -static void
405 -segnf_badop(void)
406 -{
407 - panic("segnf_badop");
408 - /*NOTREACHED*/
409 400 }
410 401
411 402 static int
412 403 segnf_nop(void)
413 404 {
414 405 return (0);
415 406 }
416 407
417 408 static int
418 409 segnf_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
419 410 {
420 411 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
421 412 size_t p;
422 413 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
423 414
424 415 for (p = 0; p < pgno; ++p)
425 416 protv[p] = PROT_READ;
426 417 return (0);
427 418 }
428 419
429 420 /* ARGSUSED */
430 421 static u_offset_t
431 422 segnf_getoffset(struct seg *seg, caddr_t addr)
432 423 {
433 424 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
434 425
435 426 return ((u_offset_t)0);
436 427 }
437 428
438 429 /* ARGSUSED */
439 430 static int
440 431 segnf_gettype(struct seg *seg, caddr_t addr)
441 432 {
442 433 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
443 434
444 435 return (MAP_SHARED);
445 436 }
446 437
447 438 /* ARGSUSED */
448 439 static int
449 440 segnf_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
450 441 {
451 442 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
452 443
453 444 *vpp = &nfvp;
454 445 return (0);
455 446 }
456 447
457 448 /*
458 449 * segnf pages are not dumped, so we just return
459 450 */
460 451 /* ARGSUSED */
461 452 static void
462 453 segnf_dump(struct seg *seg)
463 454 {}
464 455
465 456 /*ARGSUSED*/
466 457 static int
467 458 segnf_pagelock(struct seg *seg, caddr_t addr, size_t len,
468 459 struct page ***ppp, enum lock_type type, enum seg_rw rw)
469 460 {
470 461 return (ENOTSUP);
471 462 }
472 463
473 464 /*ARGSUSED*/
474 465 static int
475 466 segnf_setpagesize(struct seg *seg, caddr_t addr, size_t len,
476 467 uint_t szc)
477 468 {
478 469 return (ENOTSUP);
479 470 }
480 471
481 472 /*ARGSUSED*/
482 473 static int
483 474 segnf_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
484 475 {
485 476 return (ENODEV);
486 477 }
487 478
488 479 /*ARGSUSED*/
489 480 static lgrp_mem_policy_info_t *
490 481 segnf_getpolicy(struct seg *seg, caddr_t addr)
491 482 {
492 483 return (NULL);
493 484 }
↓ open down ↓ |
75 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX