Print this page
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory. The code is there and in theory it runs when we get *extremely* low
on memory. In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4/os/trap.c
+++ new/usr/src/uts/sun4/os/trap.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Copyright (c) 2012 Joyent, Inc. All rights reserved.
29 29 */
30 30
31 31 #include <sys/mmu.h>
32 32 #include <sys/systm.h>
33 33 #include <sys/trap.h>
34 34 #include <sys/machtrap.h>
35 35 #include <sys/vtrace.h>
36 36 #include <sys/prsystm.h>
37 37 #include <sys/archsystm.h>
38 38 #include <sys/machsystm.h>
39 39 #include <sys/fpu/fpusystm.h>
40 40 #include <sys/tnf.h>
41 41 #include <sys/tnf_probe.h>
42 42 #include <sys/simulate.h>
43 43 #include <sys/ftrace.h>
44 44 #include <sys/ontrap.h>
45 45 #include <sys/kcpc.h>
46 46 #include <sys/kobj.h>
47 47 #include <sys/procfs.h>
48 48 #include <sys/sun4asi.h>
49 49 #include <sys/sdt.h>
50 50 #include <sys/fpras.h>
51 51 #include <sys/contract/process_impl.h>
52 52
53 53 #ifdef TRAPTRACE
54 54 #include <sys/traptrace.h>
55 55 #endif
56 56
57 57 int tudebug = 0;
58 58 static int tudebugbpt = 0;
59 59 static int tudebugfpe = 0;
60 60
61 61 static int alignfaults = 0;
62 62
63 63 #if defined(TRAPDEBUG) || defined(lint)
64 64 static int lodebug = 0;
65 65 #else
66 66 #define lodebug 0
67 67 #endif /* defined(TRAPDEBUG) || defined(lint) */
68 68
69 69
70 70 int vis1_partial_support(struct regs *rp, k_siginfo_t *siginfo, uint_t *fault);
71 71 #pragma weak vis1_partial_support
72 72
73 73 void showregs(unsigned, struct regs *, caddr_t, uint_t);
74 74 #pragma weak showregs
75 75
76 76 void trap_async_hwerr(void);
77 77 #pragma weak trap_async_hwerr
78 78
79 79 void trap_async_berr_bto(int, struct regs *);
80 80 #pragma weak trap_async_berr_bto
81 81
82 82 static enum seg_rw get_accesstype(struct regs *);
83 83 static int nfload(struct regs *, int *);
84 84 static int swap_nc(struct regs *, int);
85 85 static int ldstub_nc(struct regs *, int);
86 86 void trap_cleanup(struct regs *, uint_t, k_siginfo_t *, int);
87 87 void trap_rtt(void);
88 88
89 89 static int
90 90 die(unsigned type, struct regs *rp, caddr_t addr, uint_t mmu_fsr)
91 91 {
92 92 struct panic_trap_info ti;
93 93
94 94 #ifdef TRAPTRACE
95 95 TRAPTRACE_FREEZE;
96 96 #endif
97 97
98 98 ti.trap_regs = rp;
99 99 ti.trap_type = type;
100 100 ti.trap_addr = addr;
101 101 ti.trap_mmu_fsr = mmu_fsr;
102 102
103 103 curthread->t_panic_trap = &ti;
104 104
105 105 if (type == T_DATA_MMU_MISS && addr < (caddr_t)KERNELBASE) {
106 106 panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x "
107 107 "occurred in module \"%s\" due to %s",
108 108 type, (void *)rp, (void *)addr, mmu_fsr,
109 109 mod_containing_pc((caddr_t)rp->r_pc),
110 110 addr < (caddr_t)PAGESIZE ?
111 111 "a NULL pointer dereference" :
112 112 "an illegal access to a user address");
113 113 } else {
114 114 panic("BAD TRAP: type=%x rp=%p addr=%p mmu_fsr=%x",
115 115 type, (void *)rp, (void *)addr, mmu_fsr);
116 116 }
117 117
118 118 return (0); /* avoid optimization of restore in call's delay slot */
119 119 }
120 120
121 121 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
122 122 int ill_calls;
123 123 #endif
124 124
125 125 /*
126 126 * Currently, the only PREFETCH/PREFETCHA instructions which cause traps
127 127 * are the "strong" prefetches (fcn=20-23). But we check for all flavors of
128 128 * PREFETCH, in case some future variant also causes a DATA_MMU_MISS.
129 129 */
130 130 #define IS_PREFETCH(i) (((i) & 0xc1780000) == 0xc1680000)
131 131
132 132 #define IS_FLUSH(i) (((i) & 0xc1f80000) == 0x81d80000)
133 133 #define IS_SWAP(i) (((i) & 0xc1f80000) == 0xc0780000)
134 134 #define IS_LDSTUB(i) (((i) & 0xc1f80000) == 0xc0680000)
135 135 #define IS_FLOAT(i) (((i) & 0x1000000) != 0)
136 136 #define IS_STORE(i) (((i) >> 21) & 1)
137 137
138 138 /*
139 139 * Called from the trap handler when a processor trap occurs.
140 140 */
141 141 /*VARARGS2*/
142 142 void
143 143 trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t mmu_fsr)
144 144 {
145 145 proc_t *p = ttoproc(curthread);
146 146 klwp_id_t lwp = ttolwp(curthread);
147 147 struct machpcb *mpcb = NULL;
148 148 k_siginfo_t siginfo;
149 149 uint_t op3, fault = 0;
150 150 int stepped = 0;
151 151 greg_t oldpc;
152 152 int mstate;
153 153 char *badaddr;
154 154 faultcode_t res;
155 155 enum fault_type fault_type;
156 156 enum seg_rw rw;
157 157 uintptr_t lofault;
158 158 label_t *onfault;
159 159 int instr;
160 160 int iskernel;
↓ open down ↓ |
160 lines elided |
↑ open up ↑ |
161 161 int watchcode;
162 162 int watchpage;
163 163 extern faultcode_t pagefault(caddr_t, enum fault_type,
164 164 enum seg_rw, int);
165 165 #ifdef sun4v
166 166 extern boolean_t tick_stick_emulation_active;
167 167 #endif /* sun4v */
168 168
169 169 CPU_STATS_ADDQ(CPU, sys, trap, 1);
170 170
171 -#ifdef SF_ERRATA_23 /* call causes illegal-insn */
172 - ASSERT((curthread->t_schedflag & TS_DONT_SWAP) ||
173 - (type == T_UNIMP_INSTR));
174 -#else
175 - ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
176 -#endif /* SF_ERRATA_23 */
177 -
178 171 if (USERMODE(rp->r_tstate) || (type & T_USER)) {
179 172 /*
180 173 * Set lwp_state before trying to acquire any
181 174 * adaptive lock
182 175 */
183 176 ASSERT(lwp != NULL);
184 177 lwp->lwp_state = LWP_SYS;
185 178 /*
186 179 * Set up the current cred to use during this trap. u_cred
187 180 * no longer exists. t_cred is used instead.
188 181 * The current process credential applies to the thread for
189 182 * the entire trap. If trapping from the kernel, this
190 183 * should already be set up.
191 184 */
192 185 if (curthread->t_cred != p->p_cred) {
193 186 cred_t *oldcred = curthread->t_cred;
194 187 /*
195 188 * DTrace accesses t_cred in probe context. t_cred
196 189 * must always be either NULL, or point to a valid,
197 190 * allocated cred structure.
198 191 */
199 192 curthread->t_cred = crgetcred();
200 193 crfree(oldcred);
201 194 }
202 195 type |= T_USER;
203 196 ASSERT((type == (T_SYS_RTT_PAGE | T_USER)) ||
204 197 (type == (T_SYS_RTT_ALIGN | T_USER)) ||
205 198 lwp->lwp_regs == rp);
206 199 mpcb = lwptompcb(lwp);
207 200 switch (type) {
208 201 case T_WIN_OVERFLOW + T_USER:
209 202 case T_WIN_UNDERFLOW + T_USER:
210 203 case T_SYS_RTT_PAGE + T_USER:
211 204 case T_DATA_MMU_MISS + T_USER:
212 205 mstate = LMS_DFAULT;
213 206 break;
214 207 case T_INSTR_MMU_MISS + T_USER:
215 208 mstate = LMS_TFAULT;
216 209 break;
217 210 default:
218 211 mstate = LMS_TRAP;
219 212 break;
220 213 }
221 214 /* Kernel probe */
222 215 TNF_PROBE_1(thread_state, "thread", /* CSTYLED */,
223 216 tnf_microstate, state, (char)mstate);
224 217 mstate = new_mstate(curthread, mstate);
225 218 siginfo.si_signo = 0;
226 219 stepped =
227 220 lwp->lwp_pcb.pcb_step != STEP_NONE &&
228 221 ((oldpc = rp->r_pc), prundostep()) &&
229 222 mmu_btop((uintptr_t)addr) == mmu_btop((uintptr_t)oldpc);
230 223 /* this assignment must not precede call to prundostep() */
231 224 oldpc = rp->r_pc;
232 225 }
233 226
234 227 TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER,
235 228 "C_trap_handler_enter:type %x", type);
236 229
237 230 #ifdef F_DEFERRED
238 231 /*
239 232 * Take any pending floating point exceptions now.
240 233 * If the floating point unit has an exception to handle,
241 234 * just return to user-level to let the signal handler run.
242 235 * The instruction that got us to trap() will be reexecuted on
243 236 * return from the signal handler and we will trap to here again.
244 237 * This is necessary to disambiguate simultaneous traps which
245 238 * happen when a floating-point exception is pending and a
246 239 * machine fault is incurred.
247 240 */
248 241 if (type & USER) {
249 242 /*
250 243 * FP_TRAPPED is set only by sendsig() when it copies
251 244 * out the floating-point queue for the signal handler.
252 245 * It is set there so we can test it here and in syscall().
253 246 */
254 247 mpcb->mpcb_flags &= ~FP_TRAPPED;
255 248 syncfpu();
256 249 if (mpcb->mpcb_flags & FP_TRAPPED) {
257 250 /*
258 251 * trap() has have been called recursively and may
259 252 * have stopped the process, so do single step
260 253 * support for /proc.
261 254 */
262 255 mpcb->mpcb_flags &= ~FP_TRAPPED;
263 256 goto out;
264 257 }
265 258 }
266 259 #endif
267 260 switch (type) {
268 261 case T_DATA_MMU_MISS:
269 262 case T_INSTR_MMU_MISS + T_USER:
270 263 case T_DATA_MMU_MISS + T_USER:
271 264 case T_DATA_PROT + T_USER:
272 265 case T_AST + T_USER:
273 266 case T_SYS_RTT_PAGE + T_USER:
274 267 case T_FLUSH_PCB + T_USER:
275 268 case T_FLUSHW + T_USER:
276 269 break;
277 270
278 271 default:
279 272 FTRACE_3("trap(): type=0x%lx, regs=0x%lx, addr=0x%lx",
280 273 (ulong_t)type, (ulong_t)rp, (ulong_t)addr);
281 274 break;
282 275 }
283 276
284 277 switch (type) {
285 278
286 279 default:
287 280 /*
288 281 * Check for user software trap.
289 282 */
290 283 if (type & T_USER) {
291 284 if (tudebug)
292 285 showregs(type, rp, (caddr_t)0, 0);
293 286 if ((type & ~T_USER) >= T_SOFTWARE_TRAP) {
294 287 bzero(&siginfo, sizeof (siginfo));
295 288 siginfo.si_signo = SIGILL;
296 289 siginfo.si_code = ILL_ILLTRP;
297 290 siginfo.si_addr = (caddr_t)rp->r_pc;
298 291 siginfo.si_trapno = type &~ T_USER;
299 292 fault = FLTILL;
300 293 break;
301 294 }
302 295 }
303 296 addr = (caddr_t)rp->r_pc;
304 297 (void) die(type, rp, addr, 0);
305 298 /*NOTREACHED*/
306 299
307 300 case T_ALIGNMENT: /* supv alignment error */
308 301 if (nfload(rp, NULL))
309 302 goto cleanup;
310 303
311 304 if (curthread->t_lofault) {
312 305 if (lodebug) {
313 306 showregs(type, rp, addr, 0);
314 307 traceback((caddr_t)rp->r_sp);
315 308 }
316 309 rp->r_g1 = EFAULT;
317 310 rp->r_pc = curthread->t_lofault;
318 311 rp->r_npc = rp->r_pc + 4;
319 312 goto cleanup;
320 313 }
321 314 (void) die(type, rp, addr, 0);
322 315 /*NOTREACHED*/
323 316
324 317 case T_INSTR_EXCEPTION: /* sys instruction access exception */
325 318 addr = (caddr_t)rp->r_pc;
326 319 (void) die(type, rp, addr, mmu_fsr);
327 320 /*NOTREACHED*/
328 321
329 322 case T_INSTR_MMU_MISS: /* sys instruction mmu miss */
330 323 addr = (caddr_t)rp->r_pc;
331 324 (void) die(type, rp, addr, 0);
332 325 /*NOTREACHED*/
333 326
334 327 case T_DATA_EXCEPTION: /* system data access exception */
335 328 switch (X_FAULT_TYPE(mmu_fsr)) {
336 329 case FT_RANGE:
337 330 /*
338 331 * This happens when we attempt to dereference an
339 332 * address in the address hole. If t_ontrap is set,
340 333 * then break and fall through to T_DATA_MMU_MISS /
341 334 * T_DATA_PROT case below. If lofault is set, then
342 335 * honour it (perhaps the user gave us a bogus
343 336 * address in the hole to copyin from or copyout to?)
344 337 */
345 338
346 339 if (curthread->t_ontrap != NULL)
347 340 break;
348 341
349 342 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
350 343 if (curthread->t_lofault) {
351 344 if (lodebug) {
352 345 showregs(type, rp, addr, 0);
353 346 traceback((caddr_t)rp->r_sp);
354 347 }
355 348 rp->r_g1 = EFAULT;
356 349 rp->r_pc = curthread->t_lofault;
357 350 rp->r_npc = rp->r_pc + 4;
358 351 goto cleanup;
359 352 }
360 353 (void) die(type, rp, addr, mmu_fsr);
361 354 /*NOTREACHED*/
362 355
363 356 case FT_PRIV:
364 357 /*
365 358 * This can happen if we access ASI_USER from a kernel
366 359 * thread. To support pxfs, we need to honor lofault if
367 360 * we're doing a copyin/copyout from a kernel thread.
368 361 */
369 362
370 363 if (nfload(rp, NULL))
371 364 goto cleanup;
372 365 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
373 366 if (curthread->t_lofault) {
374 367 if (lodebug) {
375 368 showregs(type, rp, addr, 0);
376 369 traceback((caddr_t)rp->r_sp);
377 370 }
378 371 rp->r_g1 = EFAULT;
379 372 rp->r_pc = curthread->t_lofault;
380 373 rp->r_npc = rp->r_pc + 4;
381 374 goto cleanup;
382 375 }
383 376 (void) die(type, rp, addr, mmu_fsr);
384 377 /*NOTREACHED*/
385 378
386 379 default:
387 380 if (nfload(rp, NULL))
388 381 goto cleanup;
389 382 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
390 383 (void) die(type, rp, addr, mmu_fsr);
391 384 /*NOTREACHED*/
392 385
393 386 case FT_NFO:
394 387 break;
395 388 }
396 389 /* fall into ... */
397 390
398 391 case T_DATA_MMU_MISS: /* system data mmu miss */
399 392 case T_DATA_PROT: /* system data protection fault */
400 393 if (nfload(rp, &instr))
401 394 goto cleanup;
402 395
403 396 /*
404 397 * If we're under on_trap() protection (see <sys/ontrap.h>),
405 398 * set ot_trap and return from the trap to the trampoline.
406 399 */
407 400 if (curthread->t_ontrap != NULL) {
408 401 on_trap_data_t *otp = curthread->t_ontrap;
409 402
410 403 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT,
411 404 "C_trap_handler_exit");
412 405 TRACE_0(TR_FAC_TRAP, TR_TRAP_END, "trap_end");
413 406
414 407 if (otp->ot_prot & OT_DATA_ACCESS) {
415 408 otp->ot_trap |= OT_DATA_ACCESS;
416 409 rp->r_pc = otp->ot_trampoline;
417 410 rp->r_npc = rp->r_pc + 4;
418 411 goto cleanup;
419 412 }
420 413 }
421 414 lofault = curthread->t_lofault;
422 415 onfault = curthread->t_onfault;
423 416 curthread->t_lofault = 0;
424 417
425 418 mstate = new_mstate(curthread, LMS_KFAULT);
426 419
427 420 switch (type) {
428 421 case T_DATA_PROT:
429 422 fault_type = F_PROT;
430 423 rw = S_WRITE;
431 424 break;
432 425 case T_INSTR_MMU_MISS:
433 426 fault_type = F_INVAL;
434 427 rw = S_EXEC;
435 428 break;
436 429 case T_DATA_MMU_MISS:
437 430 case T_DATA_EXCEPTION:
438 431 /*
439 432 * The hardware doesn't update the sfsr on mmu
440 433 * misses so it is not easy to find out whether
441 434 * the access was a read or a write so we need
442 435 * to decode the actual instruction.
443 436 */
444 437 fault_type = F_INVAL;
445 438 rw = get_accesstype(rp);
446 439 break;
447 440 default:
448 441 cmn_err(CE_PANIC, "trap: unknown type %x", type);
449 442 break;
450 443 }
451 444 /*
452 445 * We determine if access was done to kernel or user
453 446 * address space. The addr passed into trap is really the
454 447 * tag access register.
455 448 */
456 449 iskernel = (((uintptr_t)addr & TAGACC_CTX_MASK) == KCONTEXT);
457 450 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
458 451
459 452 res = pagefault(addr, fault_type, rw, iskernel);
460 453 if (!iskernel && res == FC_NOMAP &&
461 454 addr < p->p_usrstack && grow(addr))
462 455 res = 0;
463 456
464 457 (void) new_mstate(curthread, mstate);
465 458
466 459 /*
467 460 * Restore lofault and onfault. If we resolved the fault, exit.
468 461 * If we didn't and lofault wasn't set, die.
469 462 */
470 463 curthread->t_lofault = lofault;
471 464 curthread->t_onfault = onfault;
472 465
473 466 if (res == 0)
474 467 goto cleanup;
475 468
476 469 if (IS_PREFETCH(instr)) {
477 470 /* skip prefetch instructions in kernel-land */
478 471 rp->r_pc = rp->r_npc;
479 472 rp->r_npc += 4;
480 473 goto cleanup;
481 474 }
482 475
483 476 if ((lofault == 0 || lodebug) &&
484 477 (calc_memaddr(rp, &badaddr) == SIMU_SUCCESS))
485 478 addr = badaddr;
486 479 if (lofault == 0)
487 480 (void) die(type, rp, addr, 0);
488 481 /*
489 482 * Cannot resolve fault. Return to lofault.
490 483 */
491 484 if (lodebug) {
492 485 showregs(type, rp, addr, 0);
493 486 traceback((caddr_t)rp->r_sp);
494 487 }
495 488 if (FC_CODE(res) == FC_OBJERR)
496 489 res = FC_ERRNO(res);
497 490 else
498 491 res = EFAULT;
499 492 rp->r_g1 = res;
500 493 rp->r_pc = curthread->t_lofault;
501 494 rp->r_npc = curthread->t_lofault + 4;
502 495 goto cleanup;
503 496
504 497 case T_INSTR_EXCEPTION + T_USER: /* user insn access exception */
505 498 bzero(&siginfo, sizeof (siginfo));
506 499 siginfo.si_addr = (caddr_t)rp->r_pc;
507 500 siginfo.si_signo = SIGSEGV;
508 501 siginfo.si_code = X_FAULT_TYPE(mmu_fsr) == FT_PRIV ?
509 502 SEGV_ACCERR : SEGV_MAPERR;
510 503 fault = FLTBOUNDS;
511 504 break;
512 505
513 506 case T_WIN_OVERFLOW + T_USER: /* window overflow in ??? */
514 507 case T_WIN_UNDERFLOW + T_USER: /* window underflow in ??? */
515 508 case T_SYS_RTT_PAGE + T_USER: /* window underflow in user_rtt */
516 509 case T_INSTR_MMU_MISS + T_USER: /* user instruction mmu miss */
517 510 case T_DATA_MMU_MISS + T_USER: /* user data mmu miss */
518 511 case T_DATA_PROT + T_USER: /* user data protection fault */
519 512 switch (type) {
520 513 case T_INSTR_MMU_MISS + T_USER:
521 514 addr = (caddr_t)rp->r_pc;
522 515 fault_type = F_INVAL;
523 516 rw = S_EXEC;
524 517 break;
525 518
526 519 case T_DATA_MMU_MISS + T_USER:
527 520 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
528 521 fault_type = F_INVAL;
529 522 /*
530 523 * The hardware doesn't update the sfsr on mmu misses
531 524 * so it is not easy to find out whether the access
532 525 * was a read or a write so we need to decode the
533 526 * actual instruction. XXX BUGLY HW
534 527 */
535 528 rw = get_accesstype(rp);
536 529 break;
537 530
538 531 case T_DATA_PROT + T_USER:
539 532 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
540 533 fault_type = F_PROT;
541 534 rw = S_WRITE;
542 535 break;
543 536
544 537 case T_WIN_OVERFLOW + T_USER:
545 538 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
546 539 fault_type = F_INVAL;
547 540 rw = S_WRITE;
548 541 break;
549 542
550 543 case T_WIN_UNDERFLOW + T_USER:
551 544 case T_SYS_RTT_PAGE + T_USER:
552 545 addr = (caddr_t)((uintptr_t)addr & TAGACC_VADDR_MASK);
553 546 fault_type = F_INVAL;
554 547 rw = S_READ;
555 548 break;
556 549
557 550 default:
558 551 cmn_err(CE_PANIC, "trap: unknown type %x", type);
559 552 break;
560 553 }
561 554
562 555 /*
563 556 * If we are single stepping do not call pagefault
564 557 */
565 558 if (stepped) {
566 559 res = FC_NOMAP;
567 560 } else {
568 561 caddr_t vaddr = addr;
569 562 size_t sz;
570 563 int ta;
571 564
572 565 ASSERT(!(curthread->t_flag & T_WATCHPT));
573 566 watchpage = (pr_watch_active(p) &&
574 567 type != T_WIN_OVERFLOW + T_USER &&
575 568 type != T_WIN_UNDERFLOW + T_USER &&
576 569 type != T_SYS_RTT_PAGE + T_USER &&
577 570 pr_is_watchpage(addr, rw));
578 571
579 572 if (!watchpage ||
580 573 (sz = instr_size(rp, &vaddr, rw)) <= 0)
581 574 /* EMPTY */;
582 575 else if ((watchcode = pr_is_watchpoint(&vaddr, &ta,
583 576 sz, NULL, rw)) != 0) {
584 577 if (ta) {
585 578 do_watch_step(vaddr, sz, rw,
586 579 watchcode, rp->r_pc);
587 580 fault_type = F_INVAL;
588 581 } else {
589 582 bzero(&siginfo, sizeof (siginfo));
590 583 siginfo.si_signo = SIGTRAP;
591 584 siginfo.si_code = watchcode;
592 585 siginfo.si_addr = vaddr;
593 586 siginfo.si_trapafter = 0;
594 587 siginfo.si_pc = (caddr_t)rp->r_pc;
595 588 fault = FLTWATCH;
596 589 break;
597 590 }
598 591 } else {
599 592 if (rw != S_EXEC &&
600 593 pr_watch_emul(rp, vaddr, rw))
601 594 goto out;
602 595 do_watch_step(vaddr, sz, rw, 0, 0);
603 596 fault_type = F_INVAL;
604 597 }
605 598
606 599 if (pr_watch_active(p) &&
607 600 (type == T_WIN_OVERFLOW + T_USER ||
608 601 type == T_WIN_UNDERFLOW + T_USER ||
609 602 type == T_SYS_RTT_PAGE + T_USER)) {
610 603 int dotwo = (type == T_WIN_UNDERFLOW + T_USER);
611 604 if (copy_return_window(dotwo))
612 605 goto out;
613 606 fault_type = F_INVAL;
614 607 }
615 608
616 609 res = pagefault(addr, fault_type, rw, 0);
617 610
618 611 /*
619 612 * If pagefault succeed, ok.
620 613 * Otherwise grow the stack automatically.
621 614 */
622 615 if (res == 0 ||
623 616 (res == FC_NOMAP &&
624 617 type != T_INSTR_MMU_MISS + T_USER &&
625 618 addr < p->p_usrstack &&
626 619 grow(addr))) {
627 620 int ismem = prismember(&p->p_fltmask, FLTPAGE);
628 621
629 622 /*
630 623 * instr_size() is used to get the exact
631 624 * address of the fault, instead of the
632 625 * page of the fault. Unfortunately it is
633 626 * very slow, and this is an important
634 627 * code path. Don't call it unless
635 628 * correctness is needed. ie. if FLTPAGE
636 629 * is set, or we're profiling.
637 630 */
638 631
639 632 if (curthread->t_rprof != NULL || ismem)
640 633 (void) instr_size(rp, &addr, rw);
641 634
642 635 lwp->lwp_lastfault = FLTPAGE;
643 636 lwp->lwp_lastfaddr = addr;
644 637
645 638 if (ismem) {
646 639 bzero(&siginfo, sizeof (siginfo));
647 640 siginfo.si_addr = addr;
648 641 (void) stop_on_fault(FLTPAGE, &siginfo);
649 642 }
650 643 goto out;
651 644 }
652 645
653 646 if (type != (T_INSTR_MMU_MISS + T_USER)) {
654 647 /*
655 648 * check for non-faulting loads, also
656 649 * fetch the instruction to check for
657 650 * flush
658 651 */
659 652 if (nfload(rp, &instr))
660 653 goto out;
661 654
662 655 /* skip userland prefetch instructions */
663 656 if (IS_PREFETCH(instr)) {
664 657 rp->r_pc = rp->r_npc;
665 658 rp->r_npc += 4;
666 659 goto out;
667 660 /*NOTREACHED*/
668 661 }
669 662
670 663 /*
671 664 * check if the instruction was a
672 665 * flush. ABI allows users to specify
673 666 * an illegal address on the flush
674 667 * instruction so we simply return in
675 668 * this case.
676 669 *
677 670 * NB: the hardware should set a bit
678 671 * indicating this trap was caused by
679 672 * a flush instruction. Instruction
680 673 * decoding is bugly!
681 674 */
682 675 if (IS_FLUSH(instr)) {
683 676 /* skip the flush instruction */
684 677 rp->r_pc = rp->r_npc;
685 678 rp->r_npc += 4;
686 679 goto out;
687 680 /*NOTREACHED*/
688 681 }
689 682 } else if (res == FC_PROT) {
690 683 report_stack_exec(p, addr);
691 684 }
692 685
693 686 if (tudebug)
694 687 showregs(type, rp, addr, 0);
695 688 }
696 689
697 690 /*
698 691 * In the case where both pagefault and grow fail,
699 692 * set the code to the value provided by pagefault.
700 693 */
701 694 (void) instr_size(rp, &addr, rw);
702 695 bzero(&siginfo, sizeof (siginfo));
703 696 siginfo.si_addr = addr;
704 697 if (FC_CODE(res) == FC_OBJERR) {
705 698 siginfo.si_errno = FC_ERRNO(res);
706 699 if (siginfo.si_errno != EINTR) {
707 700 siginfo.si_signo = SIGBUS;
708 701 siginfo.si_code = BUS_OBJERR;
709 702 fault = FLTACCESS;
710 703 }
711 704 } else { /* FC_NOMAP || FC_PROT */
712 705 siginfo.si_signo = SIGSEGV;
713 706 siginfo.si_code = (res == FC_NOMAP) ?
714 707 SEGV_MAPERR : SEGV_ACCERR;
715 708 fault = FLTBOUNDS;
716 709 }
717 710 /*
718 711 * If this is the culmination of a single-step,
719 712 * reset the addr, code, signal and fault to
720 713 * indicate a hardware trace trap.
721 714 */
722 715 if (stepped) {
723 716 pcb_t *pcb = &lwp->lwp_pcb;
724 717
725 718 siginfo.si_signo = 0;
726 719 fault = 0;
727 720 if (pcb->pcb_step == STEP_WASACTIVE) {
728 721 pcb->pcb_step = STEP_NONE;
729 722 pcb->pcb_tracepc = NULL;
730 723 oldpc = rp->r_pc - 4;
731 724 }
732 725 /*
733 726 * If both NORMAL_STEP and WATCH_STEP are in
734 727 * effect, give precedence to WATCH_STEP.
735 728 * One or the other must be set at this point.
736 729 */
737 730 ASSERT(pcb->pcb_flags & (NORMAL_STEP|WATCH_STEP));
738 731 if ((fault = undo_watch_step(&siginfo)) == 0 &&
739 732 (pcb->pcb_flags & NORMAL_STEP)) {
740 733 siginfo.si_signo = SIGTRAP;
741 734 siginfo.si_code = TRAP_TRACE;
742 735 siginfo.si_addr = (caddr_t)rp->r_pc;
743 736 fault = FLTTRACE;
744 737 }
745 738 pcb->pcb_flags &= ~(NORMAL_STEP|WATCH_STEP);
746 739 }
747 740 break;
748 741
749 742 case T_DATA_EXCEPTION + T_USER: /* user data access exception */
750 743
751 744 if (&vis1_partial_support != NULL) {
752 745 bzero(&siginfo, sizeof (siginfo));
753 746 if (vis1_partial_support(rp,
754 747 &siginfo, &fault) == 0)
755 748 goto out;
756 749 }
757 750
758 751 if (nfload(rp, &instr))
759 752 goto out;
760 753 if (IS_FLUSH(instr)) {
761 754 /* skip the flush instruction */
762 755 rp->r_pc = rp->r_npc;
763 756 rp->r_npc += 4;
764 757 goto out;
765 758 /*NOTREACHED*/
766 759 }
767 760 bzero(&siginfo, sizeof (siginfo));
768 761 siginfo.si_addr = addr;
769 762 switch (X_FAULT_TYPE(mmu_fsr)) {
770 763 case FT_ATOMIC_NC:
771 764 if ((IS_SWAP(instr) && swap_nc(rp, instr)) ||
772 765 (IS_LDSTUB(instr) && ldstub_nc(rp, instr))) {
773 766 /* skip the atomic */
774 767 rp->r_pc = rp->r_npc;
775 768 rp->r_npc += 4;
776 769 goto out;
777 770 }
778 771 /* fall into ... */
779 772 case FT_PRIV:
780 773 siginfo.si_signo = SIGSEGV;
781 774 siginfo.si_code = SEGV_ACCERR;
782 775 fault = FLTBOUNDS;
783 776 break;
784 777 case FT_SPEC_LD:
785 778 case FT_ILL_ALT:
786 779 siginfo.si_signo = SIGILL;
787 780 siginfo.si_code = ILL_ILLADR;
788 781 fault = FLTILL;
789 782 break;
790 783 default:
791 784 siginfo.si_signo = SIGSEGV;
792 785 siginfo.si_code = SEGV_MAPERR;
793 786 fault = FLTBOUNDS;
794 787 break;
795 788 }
796 789 break;
797 790
798 791 case T_SYS_RTT_ALIGN + T_USER: /* user alignment error */
799 792 case T_ALIGNMENT + T_USER: /* user alignment error */
800 793 if (tudebug)
801 794 showregs(type, rp, addr, 0);
802 795 /*
803 796 * If the user has to do unaligned references
804 797 * the ugly stuff gets done here.
805 798 */
806 799 alignfaults++;
807 800 if (&vis1_partial_support != NULL) {
808 801 bzero(&siginfo, sizeof (siginfo));
809 802 if (vis1_partial_support(rp,
810 803 &siginfo, &fault) == 0)
811 804 goto out;
812 805 }
813 806
814 807 bzero(&siginfo, sizeof (siginfo));
815 808 if (type == T_SYS_RTT_ALIGN + T_USER) {
816 809 if (nfload(rp, NULL))
817 810 goto out;
818 811 /*
819 812 * Can't do unaligned stack access
820 813 */
821 814 siginfo.si_signo = SIGBUS;
822 815 siginfo.si_code = BUS_ADRALN;
823 816 siginfo.si_addr = addr;
824 817 fault = FLTACCESS;
825 818 break;
826 819 }
827 820
828 821 /*
829 822 * Try to fix alignment before non-faulting load test.
830 823 */
831 824 if (p->p_fixalignment) {
832 825 if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
833 826 rp->r_pc = rp->r_npc;
834 827 rp->r_npc += 4;
835 828 goto out;
836 829 }
837 830 if (nfload(rp, NULL))
838 831 goto out;
839 832 siginfo.si_signo = SIGSEGV;
840 833 siginfo.si_code = SEGV_MAPERR;
841 834 siginfo.si_addr = badaddr;
842 835 fault = FLTBOUNDS;
843 836 } else {
844 837 if (nfload(rp, NULL))
845 838 goto out;
846 839 siginfo.si_signo = SIGBUS;
847 840 siginfo.si_code = BUS_ADRALN;
848 841 if (rp->r_pc & 3) { /* offending address, if pc */
849 842 siginfo.si_addr = (caddr_t)rp->r_pc;
850 843 } else {
851 844 if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN)
852 845 siginfo.si_addr = badaddr;
853 846 else
854 847 siginfo.si_addr = (caddr_t)rp->r_pc;
855 848 }
856 849 fault = FLTACCESS;
857 850 }
858 851 break;
859 852
860 853 case T_PRIV_INSTR + T_USER: /* privileged instruction fault */
861 854 if (tudebug)
862 855 showregs(type, rp, (caddr_t)0, 0);
863 856
864 857 bzero(&siginfo, sizeof (siginfo));
865 858 #ifdef sun4v
866 859 /*
867 860 * If this instruction fault is a non-privileged %tick
868 861 * or %stick trap, and %tick/%stick user emulation is
869 862 * enabled as a result of an OS suspend, then simulate
870 863 * the register read. We rely on simulate_rdtick to fail
871 864 * if the instruction is not a %tick or %stick read,
872 865 * causing us to fall through to the normal privileged
873 866 * instruction handling.
874 867 */
875 868 if (tick_stick_emulation_active &&
876 869 (X_FAULT_TYPE(mmu_fsr) == FT_NEW_PRVACT) &&
877 870 simulate_rdtick(rp) == SIMU_SUCCESS) {
878 871 /* skip the successfully simulated instruction */
879 872 rp->r_pc = rp->r_npc;
880 873 rp->r_npc += 4;
881 874 goto out;
882 875 }
883 876 #endif
884 877 siginfo.si_signo = SIGILL;
885 878 siginfo.si_code = ILL_PRVOPC;
886 879 siginfo.si_addr = (caddr_t)rp->r_pc;
887 880 fault = FLTILL;
888 881 break;
889 882
890 883 case T_UNIMP_INSTR: /* priv illegal instruction fault */
891 884 if (fpras_implemented) {
892 885 /*
893 886 * Call fpras_chktrap indicating that
894 887 * we've come from a trap handler and pass
895 888 * the regs. That function may choose to panic
896 889 * (in which case it won't return) or it may
897 890 * determine that a reboot is desired. In the
898 891 * latter case it must alter pc/npc to skip
899 892 * the illegal instruction and continue at
900 893 * a controlled address.
901 894 */
902 895 if (&fpras_chktrap) {
903 896 if (fpras_chktrap(rp))
904 897 goto cleanup;
905 898 }
906 899 }
907 900 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
908 901 instr = *(int *)rp->r_pc;
909 902 if ((instr & 0xc0000000) == 0x40000000) {
910 903 long pc;
911 904
912 905 rp->r_o7 = (long long)rp->r_pc;
913 906 pc = rp->r_pc + ((instr & 0x3fffffff) << 2);
914 907 rp->r_pc = rp->r_npc;
915 908 rp->r_npc = pc;
916 909 ill_calls++;
917 910 goto cleanup;
918 911 }
919 912 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */
920 913 /*
921 914 * It's not an fpras failure and it's not SF_ERRATA_23 - die
922 915 */
923 916 addr = (caddr_t)rp->r_pc;
924 917 (void) die(type, rp, addr, 0);
925 918 /*NOTREACHED*/
926 919
927 920 case T_UNIMP_INSTR + T_USER: /* illegal instruction fault */
928 921 #if defined(SF_ERRATA_23) || defined(SF_ERRATA_30) /* call ... illegal-insn */
929 922 instr = fetch_user_instr((caddr_t)rp->r_pc);
930 923 if ((instr & 0xc0000000) == 0x40000000) {
931 924 long pc;
932 925
933 926 rp->r_o7 = (long long)rp->r_pc;
934 927 pc = rp->r_pc + ((instr & 0x3fffffff) << 2);
935 928 rp->r_pc = rp->r_npc;
936 929 rp->r_npc = pc;
937 930 ill_calls++;
938 931 goto out;
939 932 }
940 933 #endif /* SF_ERRATA_23 || SF_ERRATA_30 */
941 934 if (tudebug)
942 935 showregs(type, rp, (caddr_t)0, 0);
943 936 bzero(&siginfo, sizeof (siginfo));
944 937 /*
945 938 * Try to simulate the instruction.
946 939 */
947 940 switch (simulate_unimp(rp, &badaddr)) {
948 941 case SIMU_RETRY:
949 942 goto out; /* regs are already set up */
950 943 /*NOTREACHED*/
951 944
952 945 case SIMU_SUCCESS:
953 946 /* skip the successfully simulated instruction */
954 947 rp->r_pc = rp->r_npc;
955 948 rp->r_npc += 4;
956 949 goto out;
957 950 /*NOTREACHED*/
958 951
959 952 case SIMU_FAULT:
960 953 siginfo.si_signo = SIGSEGV;
961 954 siginfo.si_code = SEGV_MAPERR;
962 955 siginfo.si_addr = badaddr;
963 956 fault = FLTBOUNDS;
964 957 break;
965 958
966 959 case SIMU_DZERO:
967 960 siginfo.si_signo = SIGFPE;
968 961 siginfo.si_code = FPE_INTDIV;
969 962 siginfo.si_addr = (caddr_t)rp->r_pc;
970 963 fault = FLTIZDIV;
971 964 break;
972 965
973 966 case SIMU_UNALIGN:
974 967 siginfo.si_signo = SIGBUS;
975 968 siginfo.si_code = BUS_ADRALN;
976 969 siginfo.si_addr = badaddr;
977 970 fault = FLTACCESS;
978 971 break;
979 972
980 973 case SIMU_ILLEGAL:
981 974 default:
982 975 siginfo.si_signo = SIGILL;
983 976 op3 = (instr >> 19) & 0x3F;
984 977 if ((IS_FLOAT(instr) && (op3 == IOP_V8_STQFA) ||
985 978 (op3 == IOP_V8_STDFA)))
986 979 siginfo.si_code = ILL_ILLADR;
987 980 else
988 981 siginfo.si_code = ILL_ILLOPC;
989 982 siginfo.si_addr = (caddr_t)rp->r_pc;
990 983 fault = FLTILL;
991 984 break;
992 985 }
993 986 break;
994 987
995 988 case T_UNIMP_LDD + T_USER:
996 989 case T_UNIMP_STD + T_USER:
997 990 if (tudebug)
998 991 showregs(type, rp, (caddr_t)0, 0);
999 992 switch (simulate_lddstd(rp, &badaddr)) {
1000 993 case SIMU_SUCCESS:
1001 994 /* skip the successfully simulated instruction */
1002 995 rp->r_pc = rp->r_npc;
1003 996 rp->r_npc += 4;
1004 997 goto out;
1005 998 /*NOTREACHED*/
1006 999
1007 1000 case SIMU_FAULT:
1008 1001 if (nfload(rp, NULL))
1009 1002 goto out;
1010 1003 siginfo.si_signo = SIGSEGV;
1011 1004 siginfo.si_code = SEGV_MAPERR;
1012 1005 siginfo.si_addr = badaddr;
1013 1006 fault = FLTBOUNDS;
1014 1007 break;
1015 1008
1016 1009 case SIMU_UNALIGN:
1017 1010 if (nfload(rp, NULL))
1018 1011 goto out;
1019 1012 siginfo.si_signo = SIGBUS;
1020 1013 siginfo.si_code = BUS_ADRALN;
1021 1014 siginfo.si_addr = badaddr;
1022 1015 fault = FLTACCESS;
1023 1016 break;
1024 1017
1025 1018 case SIMU_ILLEGAL:
1026 1019 default:
1027 1020 siginfo.si_signo = SIGILL;
1028 1021 siginfo.si_code = ILL_ILLOPC;
1029 1022 siginfo.si_addr = (caddr_t)rp->r_pc;
1030 1023 fault = FLTILL;
1031 1024 break;
1032 1025 }
1033 1026 break;
1034 1027
1035 1028 case T_UNIMP_LDD:
1036 1029 case T_UNIMP_STD:
1037 1030 if (simulate_lddstd(rp, &badaddr) == SIMU_SUCCESS) {
1038 1031 /* skip the successfully simulated instruction */
1039 1032 rp->r_pc = rp->r_npc;
1040 1033 rp->r_npc += 4;
1041 1034 goto cleanup;
1042 1035 /*NOTREACHED*/
1043 1036 }
1044 1037 /*
1045 1038 * A third party driver executed an {LDD,STD,LDDA,STDA}
1046 1039 * that we couldn't simulate.
1047 1040 */
1048 1041 if (nfload(rp, NULL))
1049 1042 goto cleanup;
1050 1043
1051 1044 if (curthread->t_lofault) {
1052 1045 if (lodebug) {
1053 1046 showregs(type, rp, addr, 0);
1054 1047 traceback((caddr_t)rp->r_sp);
1055 1048 }
1056 1049 rp->r_g1 = EFAULT;
1057 1050 rp->r_pc = curthread->t_lofault;
1058 1051 rp->r_npc = rp->r_pc + 4;
1059 1052 goto cleanup;
1060 1053 }
1061 1054 (void) die(type, rp, addr, 0);
1062 1055 /*NOTREACHED*/
1063 1056
1064 1057 case T_IDIV0 + T_USER: /* integer divide by zero */
1065 1058 case T_DIV0 + T_USER: /* integer divide by zero */
1066 1059 if (tudebug && tudebugfpe)
1067 1060 showregs(type, rp, (caddr_t)0, 0);
1068 1061 bzero(&siginfo, sizeof (siginfo));
1069 1062 siginfo.si_signo = SIGFPE;
1070 1063 siginfo.si_code = FPE_INTDIV;
1071 1064 siginfo.si_addr = (caddr_t)rp->r_pc;
1072 1065 fault = FLTIZDIV;
1073 1066 break;
1074 1067
1075 1068 case T_INT_OVERFLOW + T_USER: /* integer overflow */
1076 1069 if (tudebug && tudebugfpe)
1077 1070 showregs(type, rp, (caddr_t)0, 0);
1078 1071 bzero(&siginfo, sizeof (siginfo));
1079 1072 siginfo.si_signo = SIGFPE;
1080 1073 siginfo.si_code = FPE_INTOVF;
1081 1074 siginfo.si_addr = (caddr_t)rp->r_pc;
1082 1075 fault = FLTIOVF;
1083 1076 break;
1084 1077
1085 1078 case T_BREAKPOINT + T_USER: /* breakpoint trap (t 1) */
1086 1079 if (tudebug && tudebugbpt)
1087 1080 showregs(type, rp, (caddr_t)0, 0);
1088 1081 bzero(&siginfo, sizeof (siginfo));
1089 1082 siginfo.si_signo = SIGTRAP;
1090 1083 siginfo.si_code = TRAP_BRKPT;
1091 1084 siginfo.si_addr = (caddr_t)rp->r_pc;
1092 1085 fault = FLTBPT;
1093 1086 break;
1094 1087
1095 1088 case T_TAG_OVERFLOW + T_USER: /* tag overflow (taddcctv, tsubcctv) */
1096 1089 if (tudebug)
1097 1090 showregs(type, rp, (caddr_t)0, 0);
1098 1091 bzero(&siginfo, sizeof (siginfo));
1099 1092 siginfo.si_signo = SIGEMT;
1100 1093 siginfo.si_code = EMT_TAGOVF;
1101 1094 siginfo.si_addr = (caddr_t)rp->r_pc;
1102 1095 fault = FLTACCESS;
1103 1096 break;
1104 1097
1105 1098 case T_FLUSH_PCB + T_USER: /* finish user window overflow */
1106 1099 case T_FLUSHW + T_USER: /* finish user window flush */
1107 1100 /*
1108 1101 * This trap is entered from sys_rtt in locore.s when,
1109 1102 * upon return to user is is found that there are user
1110 1103 * windows in pcb_wbuf. This happens because they could
1111 1104 * not be saved on the user stack, either because it
1112 1105 * wasn't resident or because it was misaligned.
1113 1106 */
1114 1107 {
1115 1108 int error;
1116 1109 caddr_t sp;
1117 1110
1118 1111 error = flush_user_windows_to_stack(&sp);
1119 1112 /*
1120 1113 * Possible errors:
1121 1114 * error copying out
1122 1115 * unaligned stack pointer
1123 1116 * The first is given to us as the return value
1124 1117 * from flush_user_windows_to_stack(). The second
1125 1118 * results in residual windows in the pcb.
1126 1119 */
1127 1120 if (error != 0) {
1128 1121 /*
1129 1122 * EINTR comes from a signal during copyout;
1130 1123 * we should not post another signal.
1131 1124 */
1132 1125 if (error != EINTR) {
1133 1126 /*
1134 1127 * Zap the process with a SIGSEGV - process
1135 1128 * may be managing its own stack growth by
1136 1129 * taking SIGSEGVs on a different signal stack.
1137 1130 */
1138 1131 bzero(&siginfo, sizeof (siginfo));
1139 1132 siginfo.si_signo = SIGSEGV;
1140 1133 siginfo.si_code = SEGV_MAPERR;
1141 1134 siginfo.si_addr = sp;
1142 1135 fault = FLTBOUNDS;
1143 1136 }
1144 1137 break;
1145 1138 } else if (mpcb->mpcb_wbcnt) {
1146 1139 bzero(&siginfo, sizeof (siginfo));
1147 1140 siginfo.si_signo = SIGILL;
1148 1141 siginfo.si_code = ILL_BADSTK;
1149 1142 siginfo.si_addr = (caddr_t)rp->r_pc;
1150 1143 fault = FLTILL;
1151 1144 break;
1152 1145 }
1153 1146 }
1154 1147
1155 1148 /*
1156 1149 * T_FLUSHW is used when handling a ta 0x3 -- the old flush
1157 1150 * window trap -- which is implemented by executing the
1158 1151 * flushw instruction. The flushw can trap if any of the
1159 1152 * stack pages are not writable for whatever reason. In this
1160 1153 * case only, we advance the pc to the next instruction so
1161 1154 * that the user thread doesn't needlessly execute the trap
1162 1155 * again. Normally this wouldn't be a problem -- we'll
1163 1156 * usually only end up here if this is the first touch to a
1164 1157 * stack page -- since the second execution won't trap, but
1165 1158 * if there's a watchpoint on the stack page the user thread
1166 1159 * would spin, continuously executing the trap instruction.
1167 1160 */
1168 1161 if (type == T_FLUSHW + T_USER) {
1169 1162 rp->r_pc = rp->r_npc;
1170 1163 rp->r_npc += 4;
1171 1164 }
1172 1165 goto out;
1173 1166
1174 1167 case T_AST + T_USER: /* profiling or resched pseudo trap */
1175 1168 if (lwp->lwp_pcb.pcb_flags & CPC_OVERFLOW) {
1176 1169 lwp->lwp_pcb.pcb_flags &= ~CPC_OVERFLOW;
1177 1170 if (kcpc_overflow_ast()) {
1178 1171 /*
1179 1172 * Signal performance counter overflow
1180 1173 */
1181 1174 if (tudebug)
1182 1175 showregs(type, rp, (caddr_t)0, 0);
1183 1176 bzero(&siginfo, sizeof (siginfo));
1184 1177 siginfo.si_signo = SIGEMT;
1185 1178 siginfo.si_code = EMT_CPCOVF;
1186 1179 siginfo.si_addr = (caddr_t)rp->r_pc;
1187 1180 /* for trap_cleanup(), below */
1188 1181 oldpc = rp->r_pc - 4;
1189 1182 fault = FLTCPCOVF;
1190 1183 }
1191 1184 }
1192 1185
1193 1186 /*
1194 1187 * The CPC_OVERFLOW check above may already have populated
1195 1188 * siginfo and set fault, so the checks below must not
1196 1189 * touch these and the functions they call must use
1197 1190 * trapsig() directly.
1198 1191 */
1199 1192
1200 1193 if (lwp->lwp_pcb.pcb_flags & ASYNC_HWERR) {
1201 1194 lwp->lwp_pcb.pcb_flags &= ~ASYNC_HWERR;
1202 1195 trap_async_hwerr();
1203 1196 }
1204 1197
1205 1198 if (lwp->lwp_pcb.pcb_flags & ASYNC_BERR) {
1206 1199 lwp->lwp_pcb.pcb_flags &= ~ASYNC_BERR;
1207 1200 trap_async_berr_bto(ASYNC_BERR, rp);
1208 1201 }
1209 1202
1210 1203 if (lwp->lwp_pcb.pcb_flags & ASYNC_BTO) {
1211 1204 lwp->lwp_pcb.pcb_flags &= ~ASYNC_BTO;
1212 1205 trap_async_berr_bto(ASYNC_BTO, rp);
1213 1206 }
1214 1207
1215 1208 break;
1216 1209 }
1217 1210
1218 1211 if (fault) {
1219 1212 /* We took a fault so abort single step. */
1220 1213 lwp->lwp_pcb.pcb_flags &= ~(NORMAL_STEP|WATCH_STEP);
1221 1214 }
1222 1215 trap_cleanup(rp, fault, &siginfo, oldpc == rp->r_pc);
1223 1216
1224 1217 out: /* We can't get here from a system trap */
1225 1218 ASSERT(type & T_USER);
1226 1219 trap_rtt();
1227 1220 (void) new_mstate(curthread, mstate);
1228 1221 /* Kernel probe */
1229 1222 TNF_PROBE_1(thread_state, "thread", /* CSTYLED */,
1230 1223 tnf_microstate, state, LMS_USER);
1231 1224
1232 1225 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1233 1226 return;
1234 1227
1235 1228 cleanup: /* system traps end up here */
1236 1229 ASSERT(!(type & T_USER));
1237 1230
1238 1231 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1239 1232 }
1240 1233
1241 1234 void
1242 1235 trap_cleanup(
1243 1236 struct regs *rp,
1244 1237 uint_t fault,
1245 1238 k_siginfo_t *sip,
1246 1239 int restartable)
1247 1240 {
1248 1241 extern void aio_cleanup();
1249 1242 proc_t *p = ttoproc(curthread);
1250 1243 klwp_id_t lwp = ttolwp(curthread);
1251 1244
1252 1245 if (fault) {
1253 1246 /*
1254 1247 * Remember the fault and fault address
1255 1248 * for real-time (SIGPROF) profiling.
1256 1249 */
1257 1250 lwp->lwp_lastfault = fault;
1258 1251 lwp->lwp_lastfaddr = sip->si_addr;
1259 1252
1260 1253 DTRACE_PROC2(fault, int, fault, ksiginfo_t *, sip);
1261 1254
1262 1255 /*
1263 1256 * If a debugger has declared this fault to be an
1264 1257 * event of interest, stop the lwp. Otherwise just
1265 1258 * deliver the associated signal.
1266 1259 */
1267 1260 if (sip->si_signo != SIGKILL &&
1268 1261 prismember(&p->p_fltmask, fault) &&
1269 1262 stop_on_fault(fault, sip) == 0)
1270 1263 sip->si_signo = 0;
1271 1264 }
1272 1265
1273 1266 if (sip->si_signo)
1274 1267 trapsig(sip, restartable);
1275 1268
1276 1269 if (lwp->lwp_oweupc)
1277 1270 profil_tick(rp->r_pc);
1278 1271
1279 1272 if (curthread->t_astflag | curthread->t_sig_check) {
1280 1273 /*
1281 1274 * Turn off the AST flag before checking all the conditions that
1282 1275 * may have caused an AST. This flag is on whenever a signal or
1283 1276 * unusual condition should be handled after the next trap or
1284 1277 * syscall.
1285 1278 */
1286 1279 astoff(curthread);
1287 1280 curthread->t_sig_check = 0;
1288 1281
1289 1282 /*
1290 1283 * The following check is legal for the following reasons:
1291 1284 * 1) The thread we are checking, is ourselves, so there is
1292 1285 * no way the proc can go away.
1293 1286 * 2) The only time we need to be protected by the
1294 1287 * lock is if the binding is changed.
1295 1288 *
1296 1289 * Note we will still take the lock and check the binding
1297 1290 * if the condition was true without the lock held. This
1298 1291 * prevents lock contention among threads owned by the
1299 1292 * same proc.
1300 1293 */
1301 1294
1302 1295 if (curthread->t_proc_flag & TP_CHANGEBIND) {
1303 1296 mutex_enter(&p->p_lock);
1304 1297 if (curthread->t_proc_flag & TP_CHANGEBIND) {
1305 1298 timer_lwpbind();
1306 1299 curthread->t_proc_flag &= ~TP_CHANGEBIND;
1307 1300 }
1308 1301 mutex_exit(&p->p_lock);
1309 1302 }
1310 1303
1311 1304 /*
1312 1305 * for kaio requests that are on the per-process poll queue,
1313 1306 * aiop->aio_pollq, they're AIO_POLL bit is set, the kernel
1314 1307 * should copyout their result_t to user memory. by copying
1315 1308 * out the result_t, the user can poll on memory waiting
1316 1309 * for the kaio request to complete.
1317 1310 */
1318 1311 if (p->p_aio)
1319 1312 aio_cleanup(0);
1320 1313
1321 1314 /*
1322 1315 * If this LWP was asked to hold, call holdlwp(), which will
1323 1316 * stop. holdlwps() sets this up and calls pokelwps() which
1324 1317 * sets the AST flag.
1325 1318 *
1326 1319 * Also check TP_EXITLWP, since this is used by fresh new LWPs
1327 1320 * through lwp_rtt(). That flag is set if the lwp_create(2)
1328 1321 * syscall failed after creating the LWP.
1329 1322 */
1330 1323 if (ISHOLD(p))
1331 1324 holdlwp();
1332 1325
1333 1326 /*
1334 1327 * All code that sets signals and makes ISSIG evaluate true must
1335 1328 * set t_astflag afterwards.
1336 1329 */
1337 1330 if (ISSIG_PENDING(curthread, lwp, p)) {
1338 1331 if (issig(FORREAL))
1339 1332 psig();
1340 1333 curthread->t_sig_check = 1;
1341 1334 }
1342 1335
1343 1336 if (curthread->t_rprof != NULL) {
1344 1337 realsigprof(0, 0, 0);
1345 1338 curthread->t_sig_check = 1;
1346 1339 }
1347 1340 }
1348 1341 }
1349 1342
1350 1343 /*
1351 1344 * Called from fp_traps when a floating point trap occurs.
1352 1345 * Note that the T_DATA_EXCEPTION case does not use X_FAULT_TYPE(mmu_fsr),
1353 1346 * because mmu_fsr (now changed to code) is always 0.
1354 1347 * Note that the T_UNIMP_INSTR case does not call simulate_unimp(),
1355 1348 * because the simulator only simulates multiply and divide instructions,
1356 1349 * which would not cause floating point traps in the first place.
1357 1350 * XXX - Supervisor mode floating point traps?
1358 1351 */
1359 1352 void
1360 1353 fpu_trap(struct regs *rp, caddr_t addr, uint32_t type, uint32_t code)
1361 1354 {
1362 1355 proc_t *p = ttoproc(curthread);
1363 1356 klwp_id_t lwp = ttolwp(curthread);
↓ open down ↓ |
1176 lines elided |
↑ open up ↑ |
1364 1357 k_siginfo_t siginfo;
1365 1358 uint_t op3, fault = 0;
1366 1359 int mstate;
1367 1360 char *badaddr;
1368 1361 kfpu_t *fp;
1369 1362 struct fpq *pfpq;
1370 1363 uint32_t inst;
1371 1364 utrap_handler_t *utrapp;
1372 1365
1373 1366 CPU_STATS_ADDQ(CPU, sys, trap, 1);
1374 -
1375 - ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
1376 1367
1377 1368 if (USERMODE(rp->r_tstate)) {
1378 1369 /*
1379 1370 * Set lwp_state before trying to acquire any
1380 1371 * adaptive lock
1381 1372 */
1382 1373 ASSERT(lwp != NULL);
1383 1374 lwp->lwp_state = LWP_SYS;
1384 1375 /*
1385 1376 * Set up the current cred to use during this trap. u_cred
1386 1377 * no longer exists. t_cred is used instead.
1387 1378 * The current process credential applies to the thread for
1388 1379 * the entire trap. If trapping from the kernel, this
1389 1380 * should already be set up.
1390 1381 */
1391 1382 if (curthread->t_cred != p->p_cred) {
1392 1383 cred_t *oldcred = curthread->t_cred;
1393 1384 /*
1394 1385 * DTrace accesses t_cred in probe context. t_cred
1395 1386 * must always be either NULL, or point to a valid,
1396 1387 * allocated cred structure.
1397 1388 */
1398 1389 curthread->t_cred = crgetcred();
1399 1390 crfree(oldcred);
1400 1391 }
1401 1392 ASSERT(lwp->lwp_regs == rp);
1402 1393 mstate = new_mstate(curthread, LMS_TRAP);
1403 1394 siginfo.si_signo = 0;
1404 1395 type |= T_USER;
1405 1396 }
1406 1397
1407 1398 TRACE_1(TR_FAC_TRAP, TR_C_TRAP_HANDLER_ENTER,
1408 1399 "C_fpu_trap_handler_enter:type %x", type);
1409 1400
1410 1401 if (tudebug && tudebugfpe)
1411 1402 showregs(type, rp, addr, 0);
1412 1403
1413 1404 bzero(&siginfo, sizeof (siginfo));
1414 1405 siginfo.si_code = code;
1415 1406 siginfo.si_addr = addr;
1416 1407
1417 1408 switch (type) {
1418 1409
1419 1410 case T_FP_EXCEPTION_IEEE + T_USER: /* FPU arithmetic exception */
1420 1411 /*
1421 1412 * FPU arithmetic exception - fake up a fpq if we
1422 1413 * came here directly from _fp_ieee_exception,
1423 1414 * which is indicated by a zero fpu_qcnt.
1424 1415 */
1425 1416 fp = lwptofpu(curthread->t_lwp);
1426 1417 utrapp = curthread->t_procp->p_utraps;
1427 1418 if (fp->fpu_qcnt == 0) {
1428 1419 inst = fetch_user_instr((caddr_t)rp->r_pc);
1429 1420 lwp->lwp_state = LWP_SYS;
1430 1421 pfpq = &fp->fpu_q->FQu.fpq;
1431 1422 pfpq->fpq_addr = (uint32_t *)rp->r_pc;
1432 1423 pfpq->fpq_instr = inst;
1433 1424 fp->fpu_qcnt = 1;
1434 1425 fp->fpu_q_entrysize = sizeof (struct fpq);
1435 1426 #ifdef SF_V9_TABLE_28
1436 1427 /*
1437 1428 * Spitfire and blackbird followed the SPARC V9 manual
1438 1429 * paragraph 3 of section 5.1.7.9 FSR_current_exception
1439 1430 * (cexc) for setting fsr.cexc bits on underflow and
1440 1431 * overflow traps when the fsr.tem.inexact bit is set,
1441 1432 * instead of following Table 28. Bugid 1263234.
1442 1433 */
1443 1434 {
1444 1435 extern int spitfire_bb_fsr_bug;
1445 1436
1446 1437 if (spitfire_bb_fsr_bug &&
1447 1438 (fp->fpu_fsr & FSR_TEM_NX)) {
1448 1439 if (((fp->fpu_fsr & FSR_TEM_OF) == 0) &&
1449 1440 (fp->fpu_fsr & FSR_CEXC_OF)) {
1450 1441 fp->fpu_fsr &= ~FSR_CEXC_OF;
1451 1442 fp->fpu_fsr |= FSR_CEXC_NX;
1452 1443 _fp_write_pfsr(&fp->fpu_fsr);
1453 1444 siginfo.si_code = FPE_FLTRES;
1454 1445 }
1455 1446 if (((fp->fpu_fsr & FSR_TEM_UF) == 0) &&
1456 1447 (fp->fpu_fsr & FSR_CEXC_UF)) {
1457 1448 fp->fpu_fsr &= ~FSR_CEXC_UF;
1458 1449 fp->fpu_fsr |= FSR_CEXC_NX;
1459 1450 _fp_write_pfsr(&fp->fpu_fsr);
1460 1451 siginfo.si_code = FPE_FLTRES;
1461 1452 }
1462 1453 }
1463 1454 }
1464 1455 #endif /* SF_V9_TABLE_28 */
1465 1456 rp->r_pc = rp->r_npc;
1466 1457 rp->r_npc += 4;
1467 1458 } else if (utrapp && utrapp[UT_FP_EXCEPTION_IEEE_754]) {
1468 1459 /*
1469 1460 * The user had a trap handler installed. Jump to
1470 1461 * the trap handler instead of signalling the process.
1471 1462 */
1472 1463 rp->r_pc = (long)utrapp[UT_FP_EXCEPTION_IEEE_754];
1473 1464 rp->r_npc = rp->r_pc + 4;
1474 1465 break;
1475 1466 }
1476 1467 siginfo.si_signo = SIGFPE;
1477 1468 fault = FLTFPE;
1478 1469 break;
1479 1470
1480 1471 case T_DATA_EXCEPTION + T_USER: /* user data access exception */
1481 1472 siginfo.si_signo = SIGSEGV;
1482 1473 fault = FLTBOUNDS;
1483 1474 break;
1484 1475
1485 1476 case T_LDDF_ALIGN + T_USER: /* 64 bit user lddfa alignment error */
1486 1477 case T_STDF_ALIGN + T_USER: /* 64 bit user stdfa alignment error */
1487 1478 alignfaults++;
1488 1479 lwp->lwp_state = LWP_SYS;
1489 1480 if (&vis1_partial_support != NULL) {
1490 1481 bzero(&siginfo, sizeof (siginfo));
1491 1482 if (vis1_partial_support(rp,
1492 1483 &siginfo, &fault) == 0)
1493 1484 goto out;
1494 1485 }
1495 1486 if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
1496 1487 rp->r_pc = rp->r_npc;
1497 1488 rp->r_npc += 4;
1498 1489 goto out;
1499 1490 }
1500 1491 fp = lwptofpu(curthread->t_lwp);
1501 1492 fp->fpu_qcnt = 0;
1502 1493 siginfo.si_signo = SIGSEGV;
1503 1494 siginfo.si_code = SEGV_MAPERR;
1504 1495 siginfo.si_addr = badaddr;
1505 1496 fault = FLTBOUNDS;
1506 1497 break;
1507 1498
1508 1499 case T_ALIGNMENT + T_USER: /* user alignment error */
1509 1500 /*
1510 1501 * If the user has to do unaligned references
1511 1502 * the ugly stuff gets done here.
1512 1503 * Only handles vanilla loads and stores.
1513 1504 */
1514 1505 alignfaults++;
1515 1506 if (p->p_fixalignment) {
1516 1507 if (do_unaligned(rp, &badaddr) == SIMU_SUCCESS) {
1517 1508 rp->r_pc = rp->r_npc;
1518 1509 rp->r_npc += 4;
1519 1510 goto out;
1520 1511 }
1521 1512 siginfo.si_signo = SIGSEGV;
1522 1513 siginfo.si_code = SEGV_MAPERR;
1523 1514 siginfo.si_addr = badaddr;
1524 1515 fault = FLTBOUNDS;
1525 1516 } else {
1526 1517 siginfo.si_signo = SIGBUS;
1527 1518 siginfo.si_code = BUS_ADRALN;
1528 1519 if (rp->r_pc & 3) { /* offending address, if pc */
1529 1520 siginfo.si_addr = (caddr_t)rp->r_pc;
1530 1521 } else {
1531 1522 if (calc_memaddr(rp, &badaddr) == SIMU_UNALIGN)
1532 1523 siginfo.si_addr = badaddr;
1533 1524 else
1534 1525 siginfo.si_addr = (caddr_t)rp->r_pc;
1535 1526 }
1536 1527 fault = FLTACCESS;
1537 1528 }
1538 1529 break;
1539 1530
1540 1531 case T_UNIMP_INSTR + T_USER: /* illegal instruction fault */
1541 1532 siginfo.si_signo = SIGILL;
1542 1533 inst = fetch_user_instr((caddr_t)rp->r_pc);
1543 1534 op3 = (inst >> 19) & 0x3F;
1544 1535 if ((op3 == IOP_V8_STQFA) || (op3 == IOP_V8_STDFA))
1545 1536 siginfo.si_code = ILL_ILLADR;
1546 1537 else
1547 1538 siginfo.si_code = ILL_ILLTRP;
1548 1539 fault = FLTILL;
1549 1540 break;
1550 1541
1551 1542 default:
1552 1543 (void) die(type, rp, addr, 0);
1553 1544 /*NOTREACHED*/
1554 1545 }
1555 1546
1556 1547 /*
1557 1548 * We can't get here from a system trap
1558 1549 * Never restart any instruction which got here from an fp trap.
1559 1550 */
1560 1551 ASSERT(type & T_USER);
1561 1552
1562 1553 trap_cleanup(rp, fault, &siginfo, 0);
1563 1554 out:
1564 1555 trap_rtt();
1565 1556 (void) new_mstate(curthread, mstate);
1566 1557 }
1567 1558
1568 1559 void
1569 1560 trap_rtt(void)
1570 1561 {
1571 1562 klwp_id_t lwp = ttolwp(curthread);
1572 1563
1573 1564 /*
1574 1565 * Restore register window if a debugger modified it.
1575 1566 * Set up to perform a single-step if a debugger requested it.
1576 1567 */
1577 1568 if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
1578 1569 xregrestore(lwp, 0);
1579 1570
1580 1571 /*
1581 1572 * Set state to LWP_USER here so preempt won't give us a kernel
1582 1573 * priority if it occurs after this point. Call CL_TRAPRET() to
1583 1574 * restore the user-level priority.
1584 1575 *
1585 1576 * It is important that no locks (other than spinlocks) be entered
1586 1577 * after this point before returning to user mode (unless lwp_state
1587 1578 * is set back to LWP_SYS).
1588 1579 */
1589 1580 lwp->lwp_state = LWP_USER;
1590 1581 if (curthread->t_trapret) {
1591 1582 curthread->t_trapret = 0;
1592 1583 thread_lock(curthread);
1593 1584 CL_TRAPRET(curthread);
1594 1585 thread_unlock(curthread);
1595 1586 }
1596 1587 if (CPU->cpu_runrun || curthread->t_schedflag & TS_ANYWAITQ)
1597 1588 preempt();
1598 1589 prunstop();
1599 1590 if (lwp->lwp_pcb.pcb_step != STEP_NONE)
1600 1591 prdostep();
1601 1592
1602 1593 TRACE_0(TR_FAC_TRAP, TR_C_TRAP_HANDLER_EXIT, "C_trap_handler_exit");
1603 1594 }
1604 1595
1605 1596 #define IS_LDASI(o) \
1606 1597 ((o) == (uint32_t)0xC0C00000 || (o) == (uint32_t)0xC0800000 || \
1607 1598 (o) == (uint32_t)0xC1800000)
1608 1599 #define IS_IMM_ASI(i) (((i) & 0x2000) == 0)
1609 1600 #define IS_ASINF(a) (((a) & 0xF6) == 0x82)
1610 1601 #define IS_LDDA(i) (((i) & 0xC1F80000) == 0xC0980000)
1611 1602
1612 1603 static int
1613 1604 nfload(struct regs *rp, int *instrp)
1614 1605 {
1615 1606 uint_t instr, asi, op3, rd;
1616 1607 size_t len;
1617 1608 struct as *as;
1618 1609 caddr_t addr;
1619 1610 FPU_DREGS_TYPE zero;
1620 1611 extern int segnf_create();
1621 1612
1622 1613 if (USERMODE(rp->r_tstate))
1623 1614 instr = fetch_user_instr((caddr_t)rp->r_pc);
1624 1615 else
1625 1616 instr = *(int *)rp->r_pc;
1626 1617
1627 1618 if (instrp)
1628 1619 *instrp = instr;
1629 1620
1630 1621 op3 = (uint_t)(instr & 0xC1E00000);
1631 1622 if (!IS_LDASI(op3))
1632 1623 return (0);
1633 1624 if (IS_IMM_ASI(instr))
1634 1625 asi = (instr & 0x1FE0) >> 5;
1635 1626 else
1636 1627 asi = (uint_t)((rp->r_tstate >> TSTATE_ASI_SHIFT) &
1637 1628 TSTATE_ASI_MASK);
1638 1629 if (!IS_ASINF(asi))
1639 1630 return (0);
1640 1631 if (calc_memaddr(rp, &addr) == SIMU_SUCCESS) {
1641 1632 len = 1;
1642 1633 as = USERMODE(rp->r_tstate) ? ttoproc(curthread)->p_as : &kas;
1643 1634 as_rangelock(as);
1644 1635 if (as_gap(as, len, &addr, &len, 0, addr) == 0)
1645 1636 (void) as_map(as, addr, len, segnf_create, NULL);
1646 1637 as_rangeunlock(as);
1647 1638 }
1648 1639 zero = 0;
1649 1640 rd = (instr >> 25) & 0x1f;
1650 1641 if (IS_FLOAT(instr)) {
1651 1642 uint_t dbflg = ((instr >> 19) & 3) == 3;
1652 1643
1653 1644 if (dbflg) { /* clever v9 reg encoding */
1654 1645 if (rd & 1)
1655 1646 rd = (rd & 0x1e) | 0x20;
1656 1647 rd >>= 1;
1657 1648 }
1658 1649 if (fpu_exists) {
1659 1650 if (!(_fp_read_fprs() & FPRS_FEF))
1660 1651 fp_enable();
1661 1652
1662 1653 if (dbflg)
1663 1654 _fp_write_pdreg(&zero, rd);
1664 1655 else
1665 1656 _fp_write_pfreg((uint_t *)&zero, rd);
1666 1657 } else {
1667 1658 kfpu_t *fp = lwptofpu(curthread->t_lwp);
1668 1659
1669 1660 if (!fp->fpu_en)
1670 1661 fp_enable();
1671 1662
1672 1663 if (dbflg)
1673 1664 fp->fpu_fr.fpu_dregs[rd] = zero;
1674 1665 else
1675 1666 fp->fpu_fr.fpu_regs[rd] = 0;
1676 1667 }
1677 1668 } else {
1678 1669 (void) putreg(&zero, rp, rd, &addr);
1679 1670 if (IS_LDDA(instr))
1680 1671 (void) putreg(&zero, rp, rd + 1, &addr);
1681 1672 }
1682 1673 rp->r_pc = rp->r_npc;
1683 1674 rp->r_npc += 4;
1684 1675 return (1);
1685 1676 }
1686 1677
1687 1678 kmutex_t atomic_nc_mutex;
1688 1679
1689 1680 /*
1690 1681 * The following couple of routines are for userland drivers which
1691 1682 * do atomics to noncached addresses. This sort of worked on previous
1692 1683 * platforms -- the operation really wasn't atomic, but it didn't generate
1693 1684 * a trap as sun4u systems do.
1694 1685 */
1695 1686 static int
1696 1687 swap_nc(struct regs *rp, int instr)
1697 1688 {
1698 1689 uint64_t rdata, mdata;
1699 1690 caddr_t addr, badaddr;
1700 1691 uint_t tmp, rd;
1701 1692
1702 1693 (void) flush_user_windows_to_stack(NULL);
1703 1694 rd = (instr >> 25) & 0x1f;
1704 1695 if (calc_memaddr(rp, &addr) != SIMU_SUCCESS)
1705 1696 return (0);
1706 1697 if (getreg(rp, rd, &rdata, &badaddr))
1707 1698 return (0);
1708 1699 mutex_enter(&atomic_nc_mutex);
1709 1700 if (fuword32(addr, &tmp) == -1) {
1710 1701 mutex_exit(&atomic_nc_mutex);
1711 1702 return (0);
1712 1703 }
1713 1704 mdata = (u_longlong_t)tmp;
1714 1705 if (suword32(addr, (uint32_t)rdata) == -1) {
1715 1706 mutex_exit(&atomic_nc_mutex);
1716 1707 return (0);
1717 1708 }
1718 1709 (void) putreg(&mdata, rp, rd, &badaddr);
1719 1710 mutex_exit(&atomic_nc_mutex);
1720 1711 return (1);
1721 1712 }
1722 1713
1723 1714 static int
1724 1715 ldstub_nc(struct regs *rp, int instr)
1725 1716 {
1726 1717 uint64_t mdata;
1727 1718 caddr_t addr, badaddr;
1728 1719 uint_t rd;
1729 1720 uint8_t tmp;
1730 1721
1731 1722 (void) flush_user_windows_to_stack(NULL);
1732 1723 rd = (instr >> 25) & 0x1f;
1733 1724 if (calc_memaddr(rp, &addr) != SIMU_SUCCESS)
1734 1725 return (0);
1735 1726 mutex_enter(&atomic_nc_mutex);
1736 1727 if (fuword8(addr, &tmp) == -1) {
1737 1728 mutex_exit(&atomic_nc_mutex);
1738 1729 return (0);
1739 1730 }
1740 1731 mdata = (u_longlong_t)tmp;
1741 1732 if (suword8(addr, (uint8_t)0xff) == -1) {
1742 1733 mutex_exit(&atomic_nc_mutex);
1743 1734 return (0);
1744 1735 }
1745 1736 (void) putreg(&mdata, rp, rd, &badaddr);
1746 1737 mutex_exit(&atomic_nc_mutex);
1747 1738 return (1);
1748 1739 }
1749 1740
1750 1741 /*
1751 1742 * This function helps instr_size() determine the operand size.
1752 1743 * It is called for the extended ldda/stda asi's.
1753 1744 */
1754 1745 int
1755 1746 extended_asi_size(int asi)
1756 1747 {
1757 1748 switch (asi) {
1758 1749 case ASI_PST8_P:
1759 1750 case ASI_PST8_S:
1760 1751 case ASI_PST16_P:
1761 1752 case ASI_PST16_S:
1762 1753 case ASI_PST32_P:
1763 1754 case ASI_PST32_S:
1764 1755 case ASI_PST8_PL:
1765 1756 case ASI_PST8_SL:
1766 1757 case ASI_PST16_PL:
1767 1758 case ASI_PST16_SL:
1768 1759 case ASI_PST32_PL:
1769 1760 case ASI_PST32_SL:
1770 1761 return (8);
1771 1762 case ASI_FL8_P:
1772 1763 case ASI_FL8_S:
1773 1764 case ASI_FL8_PL:
1774 1765 case ASI_FL8_SL:
1775 1766 return (1);
1776 1767 case ASI_FL16_P:
1777 1768 case ASI_FL16_S:
1778 1769 case ASI_FL16_PL:
1779 1770 case ASI_FL16_SL:
1780 1771 return (2);
1781 1772 case ASI_BLK_P:
1782 1773 case ASI_BLK_S:
1783 1774 case ASI_BLK_PL:
1784 1775 case ASI_BLK_SL:
1785 1776 case ASI_BLK_COMMIT_P:
1786 1777 case ASI_BLK_COMMIT_S:
1787 1778 return (64);
1788 1779 }
1789 1780
1790 1781 return (0);
1791 1782 }
1792 1783
1793 1784 /*
1794 1785 * Patch non-zero to disable preemption of threads in the kernel.
1795 1786 */
1796 1787 int IGNORE_KERNEL_PREEMPTION = 0; /* XXX - delete this someday */
1797 1788
1798 1789 struct kpreempt_cnts { /* kernel preemption statistics */
1799 1790 int kpc_idle; /* executing idle thread */
1800 1791 int kpc_intr; /* executing interrupt thread */
1801 1792 int kpc_clock; /* executing clock thread */
1802 1793 int kpc_blocked; /* thread has blocked preemption (t_preempt) */
1803 1794 int kpc_notonproc; /* thread is surrendering processor */
1804 1795 int kpc_inswtch; /* thread has ratified scheduling decision */
1805 1796 int kpc_prilevel; /* processor interrupt level is too high */
1806 1797 int kpc_apreempt; /* asynchronous preemption */
1807 1798 int kpc_spreempt; /* synchronous preemption */
1808 1799 } kpreempt_cnts;
1809 1800
1810 1801 /*
1811 1802 * kernel preemption: forced rescheduling
1812 1803 * preempt the running kernel thread.
1813 1804 */
1814 1805 void
1815 1806 kpreempt(int asyncspl)
1816 1807 {
1817 1808 if (IGNORE_KERNEL_PREEMPTION) {
1818 1809 aston(CPU->cpu_dispthread);
1819 1810 return;
1820 1811 }
1821 1812 /*
1822 1813 * Check that conditions are right for kernel preemption
1823 1814 */
1824 1815 do {
1825 1816 if (curthread->t_preempt) {
1826 1817 /*
1827 1818 * either a privileged thread (idle, panic, interrupt)
1828 1819 * or will check when t_preempt is lowered
1829 1820 * We need to specifically handle the case where
1830 1821 * the thread is in the middle of swtch (resume has
1831 1822 * been called) and has its t_preempt set
1832 1823 * [idle thread and a thread which is in kpreempt
1833 1824 * already] and then a high priority thread is
1834 1825 * available in the local dispatch queue.
1835 1826 * In this case the resumed thread needs to take a
1836 1827 * trap so that it can call kpreempt. We achieve
1837 1828 * this by using siron().
1838 1829 * How do we detect this condition:
1839 1830 * idle thread is running and is in the midst of
1840 1831 * resume: curthread->t_pri == -1 && CPU->dispthread
1841 1832 * != CPU->thread
1842 1833 * Need to ensure that this happens only at high pil
1843 1834 * resume is called at high pil
1844 1835 * Only in resume_from_idle is the pil changed.
1845 1836 */
1846 1837 if (curthread->t_pri < 0) {
1847 1838 kpreempt_cnts.kpc_idle++;
1848 1839 if (CPU->cpu_dispthread != CPU->cpu_thread)
1849 1840 siron();
1850 1841 } else if (curthread->t_flag & T_INTR_THREAD) {
1851 1842 kpreempt_cnts.kpc_intr++;
1852 1843 if (curthread->t_pil == CLOCK_LEVEL)
1853 1844 kpreempt_cnts.kpc_clock++;
1854 1845 } else {
1855 1846 kpreempt_cnts.kpc_blocked++;
1856 1847 if (CPU->cpu_dispthread != CPU->cpu_thread)
1857 1848 siron();
1858 1849 }
1859 1850 aston(CPU->cpu_dispthread);
1860 1851 return;
1861 1852 }
1862 1853 if (curthread->t_state != TS_ONPROC ||
1863 1854 curthread->t_disp_queue != CPU->cpu_disp) {
1864 1855 /* this thread will be calling swtch() shortly */
1865 1856 kpreempt_cnts.kpc_notonproc++;
1866 1857 if (CPU->cpu_thread != CPU->cpu_dispthread) {
1867 1858 /* already in swtch(), force another */
1868 1859 kpreempt_cnts.kpc_inswtch++;
1869 1860 siron();
1870 1861 }
1871 1862 return;
1872 1863 }
1873 1864
1874 1865 if (((asyncspl != KPREEMPT_SYNC) ? spltoipl(asyncspl) :
1875 1866 getpil()) >= DISP_LEVEL) {
1876 1867 /*
1877 1868 * We can't preempt this thread if it is at
1878 1869 * a PIL >= DISP_LEVEL since it may be holding
1879 1870 * a spin lock (like sched_lock).
1880 1871 */
1881 1872 siron(); /* check back later */
1882 1873 kpreempt_cnts.kpc_prilevel++;
1883 1874 return;
1884 1875 }
1885 1876
1886 1877 /*
1887 1878 * block preemption so we don't have multiple preemptions
1888 1879 * pending on the interrupt stack
1889 1880 */
1890 1881 curthread->t_preempt++;
1891 1882 if (asyncspl != KPREEMPT_SYNC) {
1892 1883 splx(asyncspl);
1893 1884 kpreempt_cnts.kpc_apreempt++;
1894 1885 } else
1895 1886 kpreempt_cnts.kpc_spreempt++;
1896 1887
1897 1888 preempt();
1898 1889 curthread->t_preempt--;
1899 1890 } while (CPU->cpu_kprunrun);
1900 1891 }
1901 1892
1902 1893 static enum seg_rw
1903 1894 get_accesstype(struct regs *rp)
1904 1895 {
1905 1896 uint32_t instr;
1906 1897
1907 1898 if (USERMODE(rp->r_tstate))
1908 1899 instr = fetch_user_instr((caddr_t)rp->r_pc);
1909 1900 else
1910 1901 instr = *(uint32_t *)rp->r_pc;
1911 1902
1912 1903 if (IS_FLUSH(instr))
1913 1904 return (S_OTHER);
1914 1905
1915 1906 if (IS_STORE(instr))
1916 1907 return (S_WRITE);
1917 1908 else
1918 1909 return (S_READ);
1919 1910 }
1920 1911
1921 1912 /*
1922 1913 * Handle an asynchronous hardware error.
1923 1914 * The policy is currently to send a hardware error contract event to
1924 1915 * the process's process contract and to kill the process. Eventually
1925 1916 * we may want to instead send a special signal whose default
1926 1917 * disposition is to generate the contract event.
1927 1918 */
1928 1919 void
1929 1920 trap_async_hwerr(void)
1930 1921 {
1931 1922 k_siginfo_t si;
1932 1923 proc_t *p = ttoproc(curthread);
1933 1924 extern void print_msg_hwerr(ctid_t ct_id, proc_t *p);
1934 1925
1935 1926 errorq_drain(ue_queue); /* flush pending async error messages */
1936 1927
1937 1928 print_msg_hwerr(p->p_ct_process->conp_contract.ct_id, p);
1938 1929
1939 1930 contract_process_hwerr(p->p_ct_process, p);
1940 1931
1941 1932 bzero(&si, sizeof (k_siginfo_t));
1942 1933 si.si_signo = SIGKILL;
1943 1934 si.si_code = SI_NOINFO;
1944 1935 trapsig(&si, 1);
1945 1936 }
1946 1937
1947 1938 /*
1948 1939 * Handle bus error and bus timeout for a user process by sending SIGBUS
1949 1940 * The type is either ASYNC_BERR or ASYNC_BTO.
1950 1941 */
1951 1942 void
1952 1943 trap_async_berr_bto(int type, struct regs *rp)
1953 1944 {
1954 1945 k_siginfo_t si;
1955 1946
1956 1947 errorq_drain(ue_queue); /* flush pending async error messages */
1957 1948 bzero(&si, sizeof (k_siginfo_t));
1958 1949
1959 1950 si.si_signo = SIGBUS;
1960 1951 si.si_code = (type == ASYNC_BERR ? BUS_OBJERR : BUS_ADRERR);
1961 1952 si.si_addr = (caddr_t)rp->r_pc; /* AFAR unavailable - future RFE */
1962 1953 si.si_errno = ENXIO;
1963 1954
1964 1955 trapsig(&si, 1);
1965 1956 }
↓ open down ↓ |
580 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX