Print this page
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory. The code is there and in theory it runs when we get *extremely* low
on memory. In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sparc/v9/os/v9dep.c
+++ new/usr/src/uts/sparc/v9/os/v9dep.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
23 23 /* All Rights Reserved */
24 24
25 25 /*
26 26 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
27 27 * Use is subject to license terms.
28 28 */
29 29
30 30 #include <sys/param.h>
31 31 #include <sys/types.h>
32 32 #include <sys/vmparam.h>
33 33 #include <sys/systm.h>
34 34 #include <sys/stack.h>
35 35 #include <sys/frame.h>
36 36 #include <sys/proc.h>
37 37 #include <sys/ucontext.h>
38 38 #include <sys/cpuvar.h>
39 39 #include <sys/asm_linkage.h>
40 40 #include <sys/kmem.h>
41 41 #include <sys/errno.h>
42 42 #include <sys/bootconf.h>
43 43 #include <sys/archsystm.h>
44 44 #include <sys/fpu/fpusystm.h>
45 45 #include <sys/debug.h>
46 46 #include <sys/privregs.h>
47 47 #include <sys/machpcb.h>
48 48 #include <sys/psr_compat.h>
49 49 #include <sys/cmn_err.h>
50 50 #include <sys/asi.h>
51 51 #include <sys/copyops.h>
52 52 #include <sys/model.h>
53 53 #include <sys/panic.h>
54 54 #include <sys/exec.h>
55 55
56 56 /*
57 57 * By default, set the weakest model to TSO (Total Store Order)
58 58 * which is the default memory model on SPARC.
59 59 * If a platform does support a weaker model than TSO, this will be
60 60 * updated at runtime to reflect that.
61 61 */
62 62 uint_t weakest_mem_model = TSTATE_MM_TSO;
63 63
64 64 /*
65 65 * modify the lower 32bits of a uint64_t
66 66 */
67 67 #define SET_LOWER_32(all, lower) \
68 68 (((uint64_t)(all) & 0xffffffff00000000) | (uint32_t)(lower))
69 69
70 70 #define MEMCPY_FPU_EN 2 /* fprs on and fpu_en == 0 */
71 71
72 72 static uint_t mkpsr(uint64_t tstate, uint32_t fprs);
73 73
74 74 #ifdef _SYSCALL32_IMPL
75 75 static void fpuregset_32ton(const fpregset32_t *src, fpregset_t *dest,
76 76 const struct fq32 *sfq, struct fq *dfq);
77 77 #endif /* _SYSCALL32_IMPL */
78 78
79 79 /*
80 80 * Set floating-point registers.
81 81 * NOTE: 'lwp' might not correspond to 'curthread' since this is
82 82 * called from code in /proc to set the registers of another lwp.
83 83 */
84 84 void
85 85 setfpregs(klwp_t *lwp, fpregset_t *fp)
86 86 {
87 87 struct machpcb *mpcb;
88 88 kfpu_t *pfp;
89 89 uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL);
90 90 model_t model = lwp_getdatamodel(lwp);
91 91
92 92 mpcb = lwptompcb(lwp);
93 93 pfp = lwptofpu(lwp);
94 94
95 95 /*
96 96 * This is always true for both "real" fp programs and memcpy fp
97 97 * programs, because we force fpu_en to MEMCPY_FPU_EN in getfpregs,
98 98 * for the memcpy and threads cases where (fpu_en == 0) &&
99 99 * (fpu_fprs & FPRS_FEF), if setfpregs is called after getfpregs.
100 100 */
101 101 if (fp->fpu_en) {
102 102 kpreempt_disable();
103 103
104 104 if (!(pfp->fpu_en) && (!(pfp->fpu_fprs & FPRS_FEF)) &&
105 105 fpu_exists) {
106 106 /*
107 107 * He's not currently using the FPU but wants to in his
108 108 * new context - arrange for this on return to userland.
109 109 */
110 110 pfp->fpu_fprs = (uint32_t)fprs;
111 111 }
112 112 /*
113 113 * Get setfpregs to restore fpu_en to zero
114 114 * for the memcpy/threads case (where pfp->fpu_en == 0 &&
115 115 * (pfp->fp_fprs & FPRS_FEF) == FPRS_FEF).
116 116 */
117 117 if (fp->fpu_en == MEMCPY_FPU_EN)
118 118 fp->fpu_en = 0;
119 119
120 120 /*
121 121 * Load up a user's floating point context.
122 122 */
123 123 if (fp->fpu_qcnt > MAXFPQ) /* plug security holes */
124 124 fp->fpu_qcnt = MAXFPQ;
125 125 fp->fpu_q_entrysize = sizeof (struct fq);
126 126
127 127 /*
128 128 * For v9 kernel, copy all of the fp regs.
129 129 * For v8 kernel, copy v8 fp regs (lower half of v9 fp regs).
130 130 * Restore entire fsr for v9, only lower half for v8.
131 131 */
132 132 (void) kcopy(fp, pfp, sizeof (fp->fpu_fr));
133 133 if (model == DATAMODEL_LP64)
134 134 pfp->fpu_fsr = fp->fpu_fsr;
135 135 else
136 136 pfp->fpu_fsr = SET_LOWER_32(pfp->fpu_fsr, fp->fpu_fsr);
137 137 pfp->fpu_qcnt = fp->fpu_qcnt;
138 138 pfp->fpu_q_entrysize = fp->fpu_q_entrysize;
139 139 pfp->fpu_en = fp->fpu_en;
140 140 pfp->fpu_q = mpcb->mpcb_fpu_q;
141 141 if (fp->fpu_qcnt)
142 142 (void) kcopy(fp->fpu_q, pfp->fpu_q,
143 143 fp->fpu_qcnt * fp->fpu_q_entrysize);
144 144 /* FSR ignores these bits on load, so they can not be set */
145 145 pfp->fpu_fsr &= ~(FSR_QNE|FSR_FTT);
146 146
147 147 /*
148 148 * If not the current process then resume() will handle it.
149 149 */
150 150 if (lwp != ttolwp(curthread)) {
151 151 /* force resume to reload fp regs */
152 152 pfp->fpu_fprs |= FPRS_FEF;
153 153 kpreempt_enable();
154 154 return;
155 155 }
156 156
157 157 /*
158 158 * Load up FPU with new floating point context.
159 159 */
160 160 if (fpu_exists) {
161 161 pfp->fpu_fprs = _fp_read_fprs();
162 162 if ((pfp->fpu_fprs & FPRS_FEF) != FPRS_FEF) {
163 163 _fp_write_fprs(fprs);
164 164 pfp->fpu_fprs = (uint32_t)fprs;
165 165 #ifdef DEBUG
166 166 if (fpdispr)
167 167 cmn_err(CE_NOTE,
168 168 "setfpregs with fp disabled!\n");
169 169 #endif
170 170 }
171 171 /*
172 172 * Load all fp regs for v9 user programs, but only
173 173 * load the lower half for v8[plus] programs.
174 174 */
175 175 if (model == DATAMODEL_LP64)
176 176 fp_restore(pfp);
177 177 else
178 178 fp_v8_load(pfp);
179 179 }
180 180
181 181 kpreempt_enable();
182 182 } else {
183 183 if ((pfp->fpu_en) || /* normal fp case */
184 184 (pfp->fpu_fprs & FPRS_FEF)) { /* memcpy/threads case */
185 185 /*
186 186 * Currently the lwp has floating point enabled.
187 187 * Turn off FPRS_FEF in user's fprs, saved and
188 188 * real copies thereof.
189 189 */
190 190 pfp->fpu_en = 0;
191 191 if (fpu_exists) {
192 192 fprs = 0;
193 193 if (lwp == ttolwp(curthread))
194 194 _fp_write_fprs(fprs);
195 195 pfp->fpu_fprs = (uint32_t)fprs;
196 196 }
197 197 }
198 198 }
199 199 }
200 200
201 201 #ifdef _SYSCALL32_IMPL
202 202 void
203 203 setfpregs32(klwp_t *lwp, fpregset32_t *fp)
204 204 {
205 205 fpregset_t fpregs;
206 206
207 207 fpuregset_32ton(fp, &fpregs, NULL, NULL);
208 208 setfpregs(lwp, &fpregs);
209 209 }
210 210 #endif /* _SYSCALL32_IMPL */
211 211
212 212 /*
213 213 * NOTE: 'lwp' might not correspond to 'curthread' since this is
214 214 * called from code in /proc to set the registers of another lwp.
215 215 */
216 216 void
217 217 run_fpq(klwp_t *lwp, fpregset_t *fp)
218 218 {
219 219 /*
220 220 * If the context being loaded up includes a floating queue,
221 221 * we need to simulate those instructions (since we can't reload
222 222 * the fpu) and pass the process any appropriate signals
223 223 */
224 224
225 225 if (lwp == ttolwp(curthread)) {
226 226 if (fpu_exists) {
227 227 if (fp->fpu_qcnt)
228 228 fp_runq(lwp->lwp_regs);
229 229 }
230 230 }
231 231 }
232 232
233 233 /*
234 234 * Get floating-point registers.
235 235 * NOTE: 'lwp' might not correspond to 'curthread' since this is
236 236 * called from code in /proc to set the registers of another lwp.
237 237 */
238 238 void
239 239 getfpregs(klwp_t *lwp, fpregset_t *fp)
240 240 {
241 241 kfpu_t *pfp;
242 242 model_t model = lwp_getdatamodel(lwp);
243 243
244 244 pfp = lwptofpu(lwp);
245 245 kpreempt_disable();
246 246 if (fpu_exists && ttolwp(curthread) == lwp)
247 247 pfp->fpu_fprs = _fp_read_fprs();
248 248
249 249 /*
250 250 * First check the fpu_en case, for normal fp programs.
251 251 * Next check the fprs case, for fp use by memcpy/threads.
252 252 */
253 253 if (((fp->fpu_en = pfp->fpu_en) != 0) ||
254 254 (pfp->fpu_fprs & FPRS_FEF)) {
255 255 /*
256 256 * Force setfpregs to restore the fp context in
257 257 * setfpregs for the memcpy and threads cases (where
258 258 * pfp->fpu_en == 0 && (pfp->fp_fprs & FPRS_FEF) == FPRS_FEF).
259 259 */
260 260 if (pfp->fpu_en == 0)
261 261 fp->fpu_en = MEMCPY_FPU_EN;
262 262 /*
263 263 * If we have an fpu and the current thread owns the fp
264 264 * context, flush fp * registers into the pcb. Save all
265 265 * the fp regs for v9, xregs_getfpregs saves the upper half
266 266 * for v8plus. Save entire fsr for v9, only lower half for v8.
267 267 */
268 268 if (fpu_exists && ttolwp(curthread) == lwp) {
269 269 if ((pfp->fpu_fprs & FPRS_FEF) != FPRS_FEF) {
270 270 uint32_t fprs = (FPRS_FEF|FPRS_DU|FPRS_DL);
271 271
272 272 _fp_write_fprs(fprs);
273 273 pfp->fpu_fprs = fprs;
274 274 #ifdef DEBUG
275 275 if (fpdispr)
276 276 cmn_err(CE_NOTE,
277 277 "getfpregs with fp disabled!\n");
278 278 #endif
279 279 }
280 280 if (model == DATAMODEL_LP64)
281 281 fp_fksave(pfp);
282 282 else
283 283 fp_v8_fksave(pfp);
284 284 }
285 285 (void) kcopy(pfp, fp, sizeof (fp->fpu_fr));
286 286 fp->fpu_q = pfp->fpu_q;
287 287 if (model == DATAMODEL_LP64)
288 288 fp->fpu_fsr = pfp->fpu_fsr;
289 289 else
290 290 fp->fpu_fsr = (uint32_t)pfp->fpu_fsr;
291 291 fp->fpu_qcnt = pfp->fpu_qcnt;
292 292 fp->fpu_q_entrysize = pfp->fpu_q_entrysize;
293 293 } else {
294 294 int i;
295 295 for (i = 0; i < 32; i++) /* NaN */
296 296 ((uint32_t *)fp->fpu_fr.fpu_regs)[i] = (uint32_t)-1;
297 297 if (model == DATAMODEL_LP64) {
298 298 for (i = 16; i < 32; i++) /* NaN */
299 299 ((uint64_t *)fp->fpu_fr.fpu_dregs)[i] =
300 300 (uint64_t)-1;
301 301 }
302 302 fp->fpu_fsr = 0;
303 303 fp->fpu_qcnt = 0;
304 304 }
305 305 kpreempt_enable();
306 306 }
307 307
308 308 #ifdef _SYSCALL32_IMPL
309 309 void
310 310 getfpregs32(klwp_t *lwp, fpregset32_t *fp)
311 311 {
312 312 fpregset_t fpregs;
313 313
314 314 getfpregs(lwp, &fpregs);
315 315 fpuregset_nto32(&fpregs, fp, NULL);
316 316 }
317 317 #endif /* _SYSCALL32_IMPL */
318 318
319 319 /*
320 320 * Set general registers.
321 321 * NOTE: 'lwp' might not correspond to 'curthread' since this is
322 322 * called from code in /proc to set the registers of another lwp.
323 323 */
324 324
325 325 /* 64-bit gregset_t */
326 326 void
327 327 setgregs(klwp_t *lwp, gregset_t grp)
328 328 {
329 329 struct regs *rp = lwptoregs(lwp);
330 330 kfpu_t *fp = lwptofpu(lwp);
331 331 uint64_t tbits;
332 332
333 333 int current = (lwp == curthread->t_lwp);
334 334
335 335 if (current)
336 336 (void) save_syscall_args(); /* copy the args first */
337 337
338 338 tbits = (((grp[REG_CCR] & TSTATE_CCR_MASK) << TSTATE_CCR_SHIFT) |
339 339 ((grp[REG_ASI] & TSTATE_ASI_MASK) << TSTATE_ASI_SHIFT));
340 340 rp->r_tstate &= ~(((uint64_t)TSTATE_CCR_MASK << TSTATE_CCR_SHIFT) |
341 341 ((uint64_t)TSTATE_ASI_MASK << TSTATE_ASI_SHIFT));
342 342 rp->r_tstate |= tbits;
343 343 kpreempt_disable();
344 344 fp->fpu_fprs = (uint32_t)grp[REG_FPRS];
345 345 if (fpu_exists && (current) && (fp->fpu_fprs & FPRS_FEF))
346 346 _fp_write_fprs(fp->fpu_fprs);
347 347 kpreempt_enable();
348 348
349 349 /*
350 350 * pc and npc must be 4-byte aligned on sparc.
351 351 * We silently make it so to avoid a watchdog reset.
352 352 */
353 353 rp->r_pc = grp[REG_PC] & ~03L;
354 354 rp->r_npc = grp[REG_nPC] & ~03L;
355 355 rp->r_y = grp[REG_Y];
356 356
357 357 rp->r_g1 = grp[REG_G1];
358 358 rp->r_g2 = grp[REG_G2];
359 359 rp->r_g3 = grp[REG_G3];
360 360 rp->r_g4 = grp[REG_G4];
361 361 rp->r_g5 = grp[REG_G5];
362 362 rp->r_g6 = grp[REG_G6];
363 363 rp->r_g7 = grp[REG_G7];
364 364
365 365 rp->r_o0 = grp[REG_O0];
366 366 rp->r_o1 = grp[REG_O1];
367 367 rp->r_o2 = grp[REG_O2];
368 368 rp->r_o3 = grp[REG_O3];
369 369 rp->r_o4 = grp[REG_O4];
370 370 rp->r_o5 = grp[REG_O5];
371 371 rp->r_o6 = grp[REG_O6];
372 372 rp->r_o7 = grp[REG_O7];
373 373
374 374 if (current) {
375 375 /*
376 376 * This was called from a system call, but we
377 377 * do not want to return via the shared window;
378 378 * restoring the CPU context changes everything.
379 379 */
380 380 lwp->lwp_eosys = JUSTRETURN;
381 381 curthread->t_post_sys = 1;
382 382 }
383 383 }
384 384
385 385 /*
386 386 * Return the general registers.
387 387 * NOTE: 'lwp' might not correspond to 'curthread' since this is
388 388 * called from code in /proc to get the registers of another lwp.
389 389 */
390 390 void
391 391 getgregs(klwp_t *lwp, gregset_t grp)
392 392 {
393 393 struct regs *rp = lwptoregs(lwp);
394 394 uint32_t fprs;
395 395
396 396 kpreempt_disable();
397 397 if (fpu_exists && ttolwp(curthread) == lwp) {
398 398 fprs = _fp_read_fprs();
399 399 } else {
400 400 kfpu_t *fp = lwptofpu(lwp);
401 401 fprs = fp->fpu_fprs;
402 402 }
403 403 kpreempt_enable();
404 404 grp[REG_CCR] = (rp->r_tstate >> TSTATE_CCR_SHIFT) & TSTATE_CCR_MASK;
405 405 grp[REG_PC] = rp->r_pc;
406 406 grp[REG_nPC] = rp->r_npc;
407 407 grp[REG_Y] = (uint32_t)rp->r_y;
408 408 grp[REG_G1] = rp->r_g1;
409 409 grp[REG_G2] = rp->r_g2;
410 410 grp[REG_G3] = rp->r_g3;
411 411 grp[REG_G4] = rp->r_g4;
412 412 grp[REG_G5] = rp->r_g5;
413 413 grp[REG_G6] = rp->r_g6;
414 414 grp[REG_G7] = rp->r_g7;
415 415 grp[REG_O0] = rp->r_o0;
416 416 grp[REG_O1] = rp->r_o1;
417 417 grp[REG_O2] = rp->r_o2;
418 418 grp[REG_O3] = rp->r_o3;
419 419 grp[REG_O4] = rp->r_o4;
420 420 grp[REG_O5] = rp->r_o5;
421 421 grp[REG_O6] = rp->r_o6;
422 422 grp[REG_O7] = rp->r_o7;
423 423 grp[REG_ASI] = (rp->r_tstate >> TSTATE_ASI_SHIFT) & TSTATE_ASI_MASK;
424 424 grp[REG_FPRS] = fprs;
425 425 }
426 426
427 427 void
428 428 getgregs32(klwp_t *lwp, gregset32_t grp)
429 429 {
430 430 struct regs *rp = lwptoregs(lwp);
431 431 uint32_t fprs;
432 432
433 433 kpreempt_disable();
434 434 if (fpu_exists && ttolwp(curthread) == lwp) {
435 435 fprs = _fp_read_fprs();
436 436 } else {
437 437 kfpu_t *fp = lwptofpu(lwp);
438 438 fprs = fp->fpu_fprs;
439 439 }
440 440 kpreempt_enable();
441 441 grp[REG_PSR] = mkpsr(rp->r_tstate, fprs);
442 442 grp[REG_PC] = rp->r_pc;
443 443 grp[REG_nPC] = rp->r_npc;
444 444 grp[REG_Y] = rp->r_y;
445 445 grp[REG_G1] = rp->r_g1;
446 446 grp[REG_G2] = rp->r_g2;
447 447 grp[REG_G3] = rp->r_g3;
448 448 grp[REG_G4] = rp->r_g4;
449 449 grp[REG_G5] = rp->r_g5;
450 450 grp[REG_G6] = rp->r_g6;
451 451 grp[REG_G7] = rp->r_g7;
452 452 grp[REG_O0] = rp->r_o0;
453 453 grp[REG_O1] = rp->r_o1;
454 454 grp[REG_O2] = rp->r_o2;
455 455 grp[REG_O3] = rp->r_o3;
456 456 grp[REG_O4] = rp->r_o4;
457 457 grp[REG_O5] = rp->r_o5;
458 458 grp[REG_O6] = rp->r_o6;
459 459 grp[REG_O7] = rp->r_o7;
460 460 }
461 461
462 462 /*
463 463 * Return the user-level PC.
464 464 * If in a system call, return the address of the syscall trap.
465 465 */
466 466 greg_t
467 467 getuserpc()
468 468 {
469 469 return (lwptoregs(ttolwp(curthread))->r_pc);
470 470 }
471 471
472 472 /*
473 473 * Set register windows.
474 474 */
475 475 void
476 476 setgwins(klwp_t *lwp, gwindows_t *gwins)
477 477 {
478 478 struct machpcb *mpcb = lwptompcb(lwp);
479 479 int wbcnt = gwins->wbcnt;
480 480 caddr_t sp;
481 481 int i;
482 482 struct rwindow32 *rwp;
483 483 int wbuf_rwindow_size;
484 484 int is64;
485 485
486 486 if (mpcb->mpcb_wstate == WSTATE_USER32) {
487 487 wbuf_rwindow_size = WINDOWSIZE32;
488 488 is64 = 0;
489 489 } else {
490 490 wbuf_rwindow_size = WINDOWSIZE64;
491 491 is64 = 1;
492 492 }
493 493 ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
494 494 mpcb->mpcb_wbcnt = 0;
495 495 for (i = 0; i < wbcnt; i++) {
496 496 sp = (caddr_t)gwins->spbuf[i];
497 497 mpcb->mpcb_spbuf[i] = sp;
498 498 rwp = (struct rwindow32 *)
499 499 (mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
500 500 if (is64 && IS_V9STACK(sp))
501 501 bcopy(&gwins->wbuf[i], rwp, sizeof (struct rwindow));
502 502 else
503 503 rwindow_nto32(&gwins->wbuf[i], rwp);
504 504 mpcb->mpcb_wbcnt++;
505 505 }
506 506 }
507 507
508 508 void
509 509 setgwins32(klwp_t *lwp, gwindows32_t *gwins)
510 510 {
511 511 struct machpcb *mpcb = lwptompcb(lwp);
512 512 int wbcnt = gwins->wbcnt;
513 513 caddr_t sp;
514 514 int i;
515 515
516 516 struct rwindow *rwp;
517 517 int wbuf_rwindow_size;
518 518 int is64;
519 519
520 520 if (mpcb->mpcb_wstate == WSTATE_USER32) {
521 521 wbuf_rwindow_size = WINDOWSIZE32;
522 522 is64 = 0;
523 523 } else {
524 524 wbuf_rwindow_size = WINDOWSIZE64;
525 525 is64 = 1;
526 526 }
527 527
528 528 ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
529 529 mpcb->mpcb_wbcnt = 0;
530 530 for (i = 0; i < wbcnt; i++) {
531 531 sp = (caddr_t)(uintptr_t)gwins->spbuf[i];
532 532 mpcb->mpcb_spbuf[i] = sp;
533 533 rwp = (struct rwindow *)
534 534 (mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
535 535 if (is64 && IS_V9STACK(sp))
536 536 rwindow_32ton(&gwins->wbuf[i], rwp);
537 537 else
538 538 bcopy(&gwins->wbuf[i], rwp, sizeof (struct rwindow32));
539 539 mpcb->mpcb_wbcnt++;
540 540 }
541 541 }
542 542
543 543 /*
544 544 * Get register windows.
545 545 * NOTE: 'lwp' might not correspond to 'curthread' since this is
546 546 * called from code in /proc to set the registers of another lwp.
547 547 */
548 548 void
549 549 getgwins(klwp_t *lwp, gwindows_t *gwp)
550 550 {
551 551 struct machpcb *mpcb = lwptompcb(lwp);
552 552 int wbcnt = mpcb->mpcb_wbcnt;
553 553 caddr_t sp;
554 554 int i;
555 555 struct rwindow32 *rwp;
556 556 int wbuf_rwindow_size;
557 557 int is64;
558 558
559 559 if (mpcb->mpcb_wstate == WSTATE_USER32) {
560 560 wbuf_rwindow_size = WINDOWSIZE32;
561 561 is64 = 0;
562 562 } else {
563 563 wbuf_rwindow_size = WINDOWSIZE64;
564 564 is64 = 1;
565 565 }
566 566 ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
567 567 gwp->wbcnt = wbcnt;
568 568 for (i = 0; i < wbcnt; i++) {
569 569 sp = mpcb->mpcb_spbuf[i];
570 570 gwp->spbuf[i] = (greg_t *)sp;
571 571 rwp = (struct rwindow32 *)
572 572 (mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
573 573 if (is64 && IS_V9STACK(sp))
574 574 bcopy(rwp, &gwp->wbuf[i], sizeof (struct rwindow));
575 575 else
576 576 rwindow_32ton(rwp, &gwp->wbuf[i]);
577 577 }
578 578 }
579 579
580 580 void
581 581 getgwins32(klwp_t *lwp, gwindows32_t *gwp)
582 582 {
583 583 struct machpcb *mpcb = lwptompcb(lwp);
584 584 int wbcnt = mpcb->mpcb_wbcnt;
585 585 int i;
586 586 struct rwindow *rwp;
587 587 int wbuf_rwindow_size;
588 588 caddr_t sp;
589 589 int is64;
590 590
591 591 if (mpcb->mpcb_wstate == WSTATE_USER32) {
592 592 wbuf_rwindow_size = WINDOWSIZE32;
593 593 is64 = 0;
594 594 } else {
595 595 wbuf_rwindow_size = WINDOWSIZE64;
596 596 is64 = 1;
597 597 }
598 598
599 599 ASSERT(wbcnt >= 0 && wbcnt <= SPARC_MAXREGWINDOW);
600 600 gwp->wbcnt = wbcnt;
601 601 for (i = 0; i < wbcnt; i++) {
602 602 sp = mpcb->mpcb_spbuf[i];
603 603 rwp = (struct rwindow *)
604 604 (mpcb->mpcb_wbuf + (i * wbuf_rwindow_size));
605 605 gwp->spbuf[i] = (caddr32_t)(uintptr_t)sp;
606 606 if (is64 && IS_V9STACK(sp))
607 607 rwindow_nto32(rwp, &gwp->wbuf[i]);
608 608 else
609 609 bcopy(rwp, &gwp->wbuf[i], sizeof (struct rwindow32));
610 610 }
611 611 }
612 612
613 613 /*
614 614 * For things that depend on register state being on the stack,
615 615 * copy any register windows that get saved into the window buffer
616 616 * (in the pcb) onto the stack. This normally gets fixed up
617 617 * before returning to a user program. Callers of this routine
618 618 * require this to happen immediately because a later kernel
619 619 * operation depends on window state (like instruction simulation).
620 620 */
621 621 int
622 622 flush_user_windows_to_stack(caddr_t *psp)
623 623 {
624 624 int j, k;
625 625 caddr_t sp;
626 626 struct machpcb *mpcb = lwptompcb(ttolwp(curthread));
627 627 int err;
628 628 int error = 0;
629 629 int wbuf_rwindow_size;
630 630 int rwindow_size;
631 631 int stack_align;
632 632 int watched;
633 633
634 634 flush_user_windows();
635 635
636 636 if (mpcb->mpcb_wstate != WSTATE_USER32)
637 637 wbuf_rwindow_size = WINDOWSIZE64;
638 638 else
639 639 wbuf_rwindow_size = WINDOWSIZE32;
640 640
641 641 j = mpcb->mpcb_wbcnt;
642 642 while (j > 0) {
643 643 sp = mpcb->mpcb_spbuf[--j];
644 644
645 645 if ((mpcb->mpcb_wstate != WSTATE_USER32) &&
646 646 IS_V9STACK(sp)) {
647 647 sp += V9BIAS64;
648 648 stack_align = STACK_ALIGN64;
649 649 rwindow_size = WINDOWSIZE64;
650 650 } else {
651 651 /*
652 652 * Reduce sp to a 32 bit value. This was originally
653 653 * done by casting down to uint32_t and back up to
654 654 * caddr_t, but one compiler didn't like that, so the
655 655 * uintptr_t casts were added. The temporary 32 bit
656 656 * variable was introduced to avoid depending on all
657 657 * compilers to generate the desired assembly code for a
658 658 * quadruple cast in a single expression.
659 659 */
660 660 caddr32_t sp32 = (uint32_t)(uintptr_t)sp;
661 661 sp = (caddr_t)(uintptr_t)sp32;
662 662
663 663 stack_align = STACK_ALIGN32;
664 664 rwindow_size = WINDOWSIZE32;
665 665 }
666 666 if (((uintptr_t)sp & (stack_align - 1)) != 0)
667 667 continue;
668 668
669 669 watched = watch_disable_addr(sp, rwindow_size, S_WRITE);
670 670 err = xcopyout(mpcb->mpcb_wbuf +
671 671 (j * wbuf_rwindow_size), sp, rwindow_size);
672 672 if (err != 0) {
673 673 if (psp != NULL) {
674 674 /*
675 675 * Determine the offending address.
676 676 * It may not be the stack pointer itself.
677 677 */
678 678 uint_t *kaddr = (uint_t *)(mpcb->mpcb_wbuf +
679 679 (j * wbuf_rwindow_size));
680 680 uint_t *uaddr = (uint_t *)sp;
681 681
682 682 for (k = 0;
683 683 k < rwindow_size / sizeof (int);
684 684 k++, kaddr++, uaddr++) {
685 685 if (suword32(uaddr, *kaddr))
686 686 break;
687 687 }
688 688
689 689 /* can't happen? */
690 690 if (k == rwindow_size / sizeof (int))
691 691 uaddr = (uint_t *)sp;
692 692
693 693 *psp = (caddr_t)uaddr;
694 694 }
695 695 error = err;
696 696 } else {
697 697 /*
698 698 * stack was aligned and copyout succeeded;
699 699 * move other windows down.
700 700 */
701 701 mpcb->mpcb_wbcnt--;
702 702 for (k = j; k < mpcb->mpcb_wbcnt; k++) {
703 703 mpcb->mpcb_spbuf[k] = mpcb->mpcb_spbuf[k+1];
704 704 bcopy(
705 705 mpcb->mpcb_wbuf +
706 706 ((k+1) * wbuf_rwindow_size),
707 707 mpcb->mpcb_wbuf +
708 708 (k * wbuf_rwindow_size),
709 709 wbuf_rwindow_size);
710 710 }
711 711 }
712 712 if (watched)
713 713 watch_enable_addr(sp, rwindow_size, S_WRITE);
714 714 } /* while there are windows in the wbuf */
715 715 return (error);
716 716 }
717 717
718 718 static int
719 719 copy_return_window32(int dotwo)
720 720 {
721 721 klwp_t *lwp = ttolwp(curthread);
722 722 struct machpcb *mpcb = lwptompcb(lwp);
723 723 struct rwindow32 rwindow32;
724 724 caddr_t sp1;
725 725 caddr_t sp2;
726 726
727 727 (void) flush_user_windows_to_stack(NULL);
728 728 if (mpcb->mpcb_rsp[0] == NULL) {
729 729 /*
730 730 * Reduce r_sp to a 32 bit value before storing it in sp1. This
731 731 * was originally done by casting down to uint32_t and back up
732 732 * to caddr_t, but that generated complaints under one compiler.
733 733 * The uintptr_t cast was added to address that, and the
734 734 * temporary 32 bit variable was introduced to avoid depending
735 735 * on all compilers to generate the desired assembly code for a
736 736 * triple cast in a single expression.
737 737 */
738 738 caddr32_t sp1_32 = (uint32_t)lwptoregs(lwp)->r_sp;
739 739 sp1 = (caddr_t)(uintptr_t)sp1_32;
740 740
741 741 if ((copyin_nowatch(sp1, &rwindow32,
742 742 sizeof (struct rwindow32))) == 0)
743 743 mpcb->mpcb_rsp[0] = sp1;
744 744 rwindow_32ton(&rwindow32, &mpcb->mpcb_rwin[0]);
745 745 }
746 746 mpcb->mpcb_rsp[1] = NULL;
747 747 if (dotwo && mpcb->mpcb_rsp[0] != NULL &&
748 748 (sp2 = (caddr_t)mpcb->mpcb_rwin[0].rw_fp) != NULL) {
749 749 if ((copyin_nowatch(sp2, &rwindow32,
750 750 sizeof (struct rwindow32)) == 0))
751 751 mpcb->mpcb_rsp[1] = sp2;
752 752 rwindow_32ton(&rwindow32, &mpcb->mpcb_rwin[1]);
753 753 }
754 754 return (mpcb->mpcb_rsp[0] != NULL);
755 755 }
756 756
757 757 int
758 758 copy_return_window(int dotwo)
759 759 {
760 760 proc_t *p = ttoproc(curthread);
761 761 klwp_t *lwp;
762 762 struct machpcb *mpcb;
763 763 caddr_t sp1;
764 764 caddr_t sp2;
765 765
766 766 if (p->p_model == DATAMODEL_ILP32)
767 767 return (copy_return_window32(dotwo));
768 768
769 769 lwp = ttolwp(curthread);
770 770 mpcb = lwptompcb(lwp);
771 771 (void) flush_user_windows_to_stack(NULL);
772 772 if (mpcb->mpcb_rsp[0] == NULL) {
773 773 sp1 = (caddr_t)lwptoregs(lwp)->r_sp + STACK_BIAS;
774 774 if ((copyin_nowatch(sp1, &mpcb->mpcb_rwin[0],
775 775 sizeof (struct rwindow)) == 0))
776 776 mpcb->mpcb_rsp[0] = sp1 - STACK_BIAS;
777 777 }
778 778 mpcb->mpcb_rsp[1] = NULL;
779 779 if (dotwo && mpcb->mpcb_rsp[0] != NULL &&
780 780 (sp2 = (caddr_t)mpcb->mpcb_rwin[0].rw_fp) != NULL) {
781 781 sp2 += STACK_BIAS;
782 782 if ((copyin_nowatch(sp2, &mpcb->mpcb_rwin[1],
783 783 sizeof (struct rwindow)) == 0))
784 784 mpcb->mpcb_rsp[1] = sp2 - STACK_BIAS;
785 785 }
786 786 return (mpcb->mpcb_rsp[0] != NULL);
787 787 }
788 788
789 789 /*
790 790 * Clear registers on exec(2).
791 791 */
792 792 void
793 793 setregs(uarg_t *args)
794 794 {
795 795 struct regs *rp;
796 796 klwp_t *lwp = ttolwp(curthread);
797 797 kfpu_t *fpp = lwptofpu(lwp);
798 798 struct machpcb *mpcb = lwptompcb(lwp);
799 799 proc_t *p = ttoproc(curthread);
800 800
801 801 /*
802 802 * Initialize user registers.
803 803 */
804 804 (void) save_syscall_args(); /* copy args from registers first */
805 805 rp = lwptoregs(lwp);
806 806 rp->r_g1 = rp->r_g2 = rp->r_g3 = rp->r_g4 = rp->r_g5 =
807 807 rp->r_g6 = rp->r_o0 = rp->r_o1 = rp->r_o2 =
808 808 rp->r_o3 = rp->r_o4 = rp->r_o5 = rp->r_o7 = 0;
809 809 if (p->p_model == DATAMODEL_ILP32)
810 810 rp->r_tstate = TSTATE_USER32 | weakest_mem_model;
811 811 else
812 812 rp->r_tstate = TSTATE_USER64 | weakest_mem_model;
813 813 if (!fpu_exists)
814 814 rp->r_tstate &= ~TSTATE_PEF;
815 815 rp->r_g7 = args->thrptr;
816 816 rp->r_pc = args->entry;
817 817 rp->r_npc = args->entry + 4;
818 818 rp->r_y = 0;
819 819 curthread->t_post_sys = 1;
820 820 lwp->lwp_eosys = JUSTRETURN;
821 821 lwp->lwp_pcb.pcb_trap0addr = NULL; /* no trap 0 handler */
822 822 /*
823 823 * Clear the fixalignment flag
824 824 */
825 825 p->p_fixalignment = 0;
826 826
827 827 /*
828 828 * Throw out old user windows, init window buf.
829 829 */
830 830 trash_user_windows();
831 831
832 832 if (p->p_model == DATAMODEL_LP64 &&
833 833 mpcb->mpcb_wstate != WSTATE_USER64) {
834 834 ASSERT(mpcb->mpcb_wbcnt == 0);
835 835 kmem_cache_free(wbuf32_cache, mpcb->mpcb_wbuf);
836 836 mpcb->mpcb_wbuf = kmem_cache_alloc(wbuf64_cache, KM_SLEEP);
837 837 ASSERT(((uintptr_t)mpcb->mpcb_wbuf & 7) == 0);
838 838 mpcb->mpcb_wstate = WSTATE_USER64;
839 839 } else if (p->p_model == DATAMODEL_ILP32 &&
840 840 mpcb->mpcb_wstate != WSTATE_USER32) {
841 841 ASSERT(mpcb->mpcb_wbcnt == 0);
842 842 kmem_cache_free(wbuf64_cache, mpcb->mpcb_wbuf);
843 843 mpcb->mpcb_wbuf = kmem_cache_alloc(wbuf32_cache, KM_SLEEP);
844 844 mpcb->mpcb_wstate = WSTATE_USER32;
845 845 }
846 846 mpcb->mpcb_pa = va_to_pa(mpcb);
847 847 mpcb->mpcb_wbuf_pa = va_to_pa(mpcb->mpcb_wbuf);
848 848
849 849 /*
850 850 * Here we initialize minimal fpu state.
851 851 * The rest is done at the first floating
852 852 * point instruction that a process executes
↓ open down ↓ |
852 lines elided |
↑ open up ↑ |
853 853 * or by the lib_psr memcpy routines.
854 854 */
855 855 if (fpu_exists) {
856 856 extern void _fp_write_fprs(unsigned);
857 857 _fp_write_fprs(0);
858 858 }
859 859 fpp->fpu_en = 0;
860 860 fpp->fpu_fprs = 0;
861 861 }
862 862
863 -void
864 -lwp_swapin(kthread_t *tp)
865 -{
866 - struct machpcb *mpcb = lwptompcb(ttolwp(tp));
867 -
868 - mpcb->mpcb_pa = va_to_pa(mpcb);
869 - mpcb->mpcb_wbuf_pa = va_to_pa(mpcb->mpcb_wbuf);
870 -}
871 -
872 863 /*
873 864 * Construct the execution environment for the user's signal
874 865 * handler and arrange for control to be given to it on return
875 866 * to userland. The library code now calls setcontext() to
876 867 * clean up after the signal handler, so sigret() is no longer
877 868 * needed.
878 869 */
879 870 int
880 871 sendsig(int sig, k_siginfo_t *sip, void (*hdlr)())
881 872 {
882 873 /*
883 874 * 'volatile' is needed to ensure that values are
884 875 * correct on the error return from on_fault().
885 876 */
886 877 volatile int minstacksz; /* min stack required to catch signal */
887 878 int newstack = 0; /* if true, switching to altstack */
888 879 label_t ljb;
889 880 caddr_t sp;
890 881 struct regs *volatile rp;
891 882 klwp_t *lwp = ttolwp(curthread);
892 883 proc_t *volatile p = ttoproc(curthread);
893 884 int fpq_size = 0;
894 885 struct sigframe {
895 886 struct frame frwin;
896 887 ucontext_t uc;
897 888 };
898 889 siginfo_t *sip_addr;
899 890 struct sigframe *volatile fp;
900 891 ucontext_t *volatile tuc = NULL;
901 892 char *volatile xregs = NULL;
902 893 volatile size_t xregs_size = 0;
903 894 gwindows_t *volatile gwp = NULL;
904 895 volatile int gwin_size = 0;
905 896 kfpu_t *fpp;
906 897 struct machpcb *mpcb;
907 898 volatile int watched = 0;
908 899 volatile int watched2 = 0;
909 900 caddr_t tos;
910 901
911 902 /*
912 903 * Make sure the current last user window has been flushed to
913 904 * the stack save area before we change the sp.
914 905 * Restore register window if a debugger modified it.
915 906 */
916 907 (void) flush_user_windows_to_stack(NULL);
917 908 if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
918 909 xregrestore(lwp, 0);
919 910
920 911 mpcb = lwptompcb(lwp);
921 912 rp = lwptoregs(lwp);
922 913
923 914 /*
924 915 * Clear the watchpoint return stack pointers.
925 916 */
926 917 mpcb->mpcb_rsp[0] = NULL;
927 918 mpcb->mpcb_rsp[1] = NULL;
928 919
929 920 minstacksz = sizeof (struct sigframe);
930 921
931 922 /*
932 923 * We know that sizeof (siginfo_t) is stack-aligned:
933 924 * 128 bytes for ILP32, 256 bytes for LP64.
934 925 */
935 926 if (sip != NULL)
936 927 minstacksz += sizeof (siginfo_t);
937 928
938 929 /*
939 930 * These two fields are pointed to by ABI structures and may
940 931 * be of arbitrary length. Size them now so we know how big
941 932 * the signal frame has to be.
942 933 */
943 934 fpp = lwptofpu(lwp);
944 935 fpp->fpu_fprs = _fp_read_fprs();
945 936 if ((fpp->fpu_en) || (fpp->fpu_fprs & FPRS_FEF)) {
946 937 fpq_size = fpp->fpu_q_entrysize * fpp->fpu_qcnt;
947 938 minstacksz += SA(fpq_size);
948 939 }
949 940
950 941 mpcb = lwptompcb(lwp);
951 942 if (mpcb->mpcb_wbcnt != 0) {
952 943 gwin_size = (mpcb->mpcb_wbcnt * sizeof (struct rwindow)) +
953 944 (SPARC_MAXREGWINDOW * sizeof (caddr_t)) + sizeof (long);
954 945 minstacksz += SA(gwin_size);
955 946 }
956 947
957 948 /*
958 949 * Extra registers, if support by this platform, may be of arbitrary
959 950 * length. Size them now so we know how big the signal frame has to be.
960 951 * For sparcv9 _LP64 user programs, use asrs instead of the xregs.
961 952 */
962 953 minstacksz += SA(xregs_size);
963 954
964 955 /*
965 956 * Figure out whether we will be handling this signal on
966 957 * an alternate stack specified by the user. Then allocate
967 958 * and validate the stack requirements for the signal handler
968 959 * context. on_fault will catch any faults.
969 960 */
970 961 newstack = (sigismember(&PTOU(curproc)->u_sigonstack, sig) &&
971 962 !(lwp->lwp_sigaltstack.ss_flags & (SS_ONSTACK|SS_DISABLE)));
972 963
973 964 tos = (caddr_t)rp->r_sp + STACK_BIAS;
974 965 /*
975 966 * Force proper stack pointer alignment, even in the face of a
976 967 * misaligned stack pointer from user-level before the signal.
977 968 * Don't use the SA() macro because that rounds up, not down.
978 969 */
979 970 tos = (caddr_t)((uintptr_t)tos & ~(STACK_ALIGN - 1ul));
980 971
981 972 if (newstack != 0) {
982 973 fp = (struct sigframe *)
983 974 (SA((uintptr_t)lwp->lwp_sigaltstack.ss_sp) +
984 975 SA((int)lwp->lwp_sigaltstack.ss_size) - STACK_ALIGN -
985 976 SA(minstacksz));
986 977 } else {
987 978 /*
988 979 * If we were unable to flush all register windows to
989 980 * the stack and we are not now on an alternate stack,
990 981 * just dump core with a SIGSEGV back in psig().
991 982 */
992 983 if (sig == SIGSEGV &&
993 984 mpcb->mpcb_wbcnt != 0 &&
994 985 !(lwp->lwp_sigaltstack.ss_flags & SS_ONSTACK))
995 986 return (0);
996 987 fp = (struct sigframe *)(tos - SA(minstacksz));
997 988 /*
998 989 * Could call grow here, but stack growth now handled below
999 990 * in code protected by on_fault().
1000 991 */
1001 992 }
1002 993 sp = (caddr_t)fp + sizeof (struct sigframe);
1003 994
1004 995 /*
1005 996 * Make sure process hasn't trashed its stack.
1006 997 */
1007 998 if ((caddr_t)fp >= p->p_usrstack ||
1008 999 (caddr_t)fp + SA(minstacksz) >= p->p_usrstack) {
1009 1000 #ifdef DEBUG
1010 1001 printf("sendsig: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1011 1002 PTOU(p)->u_comm, p->p_pid, sig);
1012 1003 printf("sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n",
1013 1004 (void *)fp, (void *)hdlr, rp->r_pc);
1014 1005 printf("fp above USRSTACK\n");
1015 1006 #endif
1016 1007 return (0);
1017 1008 }
1018 1009
1019 1010 watched = watch_disable_addr((caddr_t)fp, SA(minstacksz), S_WRITE);
1020 1011 if (on_fault(&ljb))
1021 1012 goto badstack;
1022 1013
1023 1014 tuc = kmem_alloc(sizeof (ucontext_t), KM_SLEEP);
1024 1015 savecontext(tuc, &lwp->lwp_sigoldmask);
1025 1016
1026 1017 /*
1027 1018 * save extra register state if it exists
1028 1019 */
1029 1020 if (xregs_size != 0) {
1030 1021 xregs_setptr(lwp, tuc, sp);
1031 1022 xregs = kmem_alloc(xregs_size, KM_SLEEP);
1032 1023 xregs_get(lwp, xregs);
1033 1024 copyout_noerr(xregs, sp, xregs_size);
1034 1025 kmem_free(xregs, xregs_size);
1035 1026 xregs = NULL;
1036 1027 sp += SA(xregs_size);
1037 1028 }
1038 1029
1039 1030 copyout_noerr(tuc, &fp->uc, sizeof (*tuc));
1040 1031 kmem_free(tuc, sizeof (*tuc));
1041 1032 tuc = NULL;
1042 1033
1043 1034 if (sip != NULL) {
1044 1035 zoneid_t zoneid;
1045 1036
1046 1037 uzero(sp, sizeof (siginfo_t));
1047 1038 if (SI_FROMUSER(sip) &&
1048 1039 (zoneid = p->p_zone->zone_id) != GLOBAL_ZONEID &&
1049 1040 zoneid != sip->si_zoneid) {
1050 1041 k_siginfo_t sani_sip = *sip;
1051 1042 sani_sip.si_pid = p->p_zone->zone_zsched->p_pid;
1052 1043 sani_sip.si_uid = 0;
1053 1044 sani_sip.si_ctid = -1;
1054 1045 sani_sip.si_zoneid = zoneid;
1055 1046 copyout_noerr(&sani_sip, sp, sizeof (sani_sip));
1056 1047 } else {
1057 1048 copyout_noerr(sip, sp, sizeof (*sip));
1058 1049 }
1059 1050 sip_addr = (siginfo_t *)sp;
1060 1051 sp += sizeof (siginfo_t);
1061 1052
1062 1053 if (sig == SIGPROF &&
1063 1054 curthread->t_rprof != NULL &&
1064 1055 curthread->t_rprof->rp_anystate) {
1065 1056 /*
1066 1057 * We stand on our head to deal with
1067 1058 * the real time profiling signal.
1068 1059 * Fill in the stuff that doesn't fit
1069 1060 * in a normal k_siginfo structure.
1070 1061 */
1071 1062 int i = sip->si_nsysarg;
1072 1063 while (--i >= 0) {
1073 1064 sulword_noerr(
1074 1065 (ulong_t *)&sip_addr->si_sysarg[i],
1075 1066 (ulong_t)lwp->lwp_arg[i]);
1076 1067 }
1077 1068 copyout_noerr(curthread->t_rprof->rp_state,
1078 1069 sip_addr->si_mstate,
1079 1070 sizeof (curthread->t_rprof->rp_state));
1080 1071 }
1081 1072 } else {
1082 1073 sip_addr = (siginfo_t *)NULL;
1083 1074 }
1084 1075
1085 1076 /*
1086 1077 * When flush_user_windows_to_stack() can't save all the
1087 1078 * windows to the stack, it puts them in the lwp's pcb.
1088 1079 */
1089 1080 if (gwin_size != 0) {
1090 1081 gwp = kmem_alloc(gwin_size, KM_SLEEP);
1091 1082 getgwins(lwp, gwp);
1092 1083 sulword_noerr(&fp->uc.uc_mcontext.gwins, (ulong_t)sp);
1093 1084 copyout_noerr(gwp, sp, gwin_size);
1094 1085 kmem_free(gwp, gwin_size);
1095 1086 gwp = NULL;
1096 1087 sp += SA(gwin_size);
1097 1088 } else
1098 1089 sulword_noerr(&fp->uc.uc_mcontext.gwins, (ulong_t)NULL);
1099 1090
1100 1091 if (fpq_size != 0) {
1101 1092 struct fq *fqp = (struct fq *)sp;
1102 1093 sulword_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q, (ulong_t)fqp);
1103 1094 copyout_noerr(mpcb->mpcb_fpu_q, fqp, fpq_size);
1104 1095
1105 1096 /*
1106 1097 * forget the fp queue so that the signal handler can run
1107 1098 * without being harrassed--it will do a setcontext that will
1108 1099 * re-establish the queue if there still is one
1109 1100 *
1110 1101 * NOTE: fp_runq() relies on the qcnt field being zeroed here
1111 1102 * to terminate its processing of the queue after signal
1112 1103 * delivery.
1113 1104 */
1114 1105 mpcb->mpcb_fpu->fpu_qcnt = 0;
1115 1106 sp += SA(fpq_size);
1116 1107
1117 1108 /* Also, syscall needs to know about this */
1118 1109 mpcb->mpcb_flags |= FP_TRAPPED;
1119 1110
1120 1111 } else {
1121 1112 sulword_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q, (ulong_t)NULL);
1122 1113 suword8_noerr(&fp->uc.uc_mcontext.fpregs.fpu_qcnt, 0);
1123 1114 }
1124 1115
1125 1116
1126 1117 /*
1127 1118 * Since we flushed the user's windows and we are changing his
1128 1119 * stack pointer, the window that the user will return to will
1129 1120 * be restored from the save area in the frame we are setting up.
1130 1121 * We copy in save area for old stack pointer so that debuggers
1131 1122 * can do a proper stack backtrace from the signal handler.
1132 1123 */
1133 1124 if (mpcb->mpcb_wbcnt == 0) {
1134 1125 watched2 = watch_disable_addr(tos, sizeof (struct rwindow),
1135 1126 S_READ);
1136 1127 ucopy(tos, &fp->frwin, sizeof (struct rwindow));
1137 1128 }
1138 1129
1139 1130 lwp->lwp_oldcontext = (uintptr_t)&fp->uc;
1140 1131
1141 1132 if (newstack != 0) {
1142 1133 lwp->lwp_sigaltstack.ss_flags |= SS_ONSTACK;
1143 1134
1144 1135 if (lwp->lwp_ustack) {
1145 1136 copyout_noerr(&lwp->lwp_sigaltstack,
1146 1137 (stack_t *)lwp->lwp_ustack, sizeof (stack_t));
1147 1138 }
1148 1139 }
1149 1140
1150 1141 no_fault();
1151 1142 mpcb->mpcb_wbcnt = 0; /* let user go on */
1152 1143
1153 1144 if (watched2)
1154 1145 watch_enable_addr(tos, sizeof (struct rwindow), S_READ);
1155 1146 if (watched)
1156 1147 watch_enable_addr((caddr_t)fp, SA(minstacksz), S_WRITE);
1157 1148
1158 1149 /*
1159 1150 * Set up user registers for execution of signal handler.
1160 1151 */
1161 1152 rp->r_sp = (uintptr_t)fp - STACK_BIAS;
1162 1153 rp->r_pc = (uintptr_t)hdlr;
1163 1154 rp->r_npc = (uintptr_t)hdlr + 4;
1164 1155 /* make sure %asi is ASI_PNF */
1165 1156 rp->r_tstate &= ~((uint64_t)TSTATE_ASI_MASK << TSTATE_ASI_SHIFT);
1166 1157 rp->r_tstate |= ((uint64_t)ASI_PNF << TSTATE_ASI_SHIFT);
1167 1158 rp->r_o0 = sig;
1168 1159 rp->r_o1 = (uintptr_t)sip_addr;
1169 1160 rp->r_o2 = (uintptr_t)&fp->uc;
1170 1161 /*
1171 1162 * Don't set lwp_eosys here. sendsig() is called via psig() after
1172 1163 * lwp_eosys is handled, so setting it here would affect the next
1173 1164 * system call.
1174 1165 */
1175 1166 return (1);
1176 1167
1177 1168 badstack:
1178 1169 no_fault();
1179 1170 if (watched2)
1180 1171 watch_enable_addr(tos, sizeof (struct rwindow), S_READ);
1181 1172 if (watched)
1182 1173 watch_enable_addr((caddr_t)fp, SA(minstacksz), S_WRITE);
1183 1174 if (tuc)
1184 1175 kmem_free(tuc, sizeof (ucontext_t));
1185 1176 if (xregs)
1186 1177 kmem_free(xregs, xregs_size);
1187 1178 if (gwp)
1188 1179 kmem_free(gwp, gwin_size);
1189 1180 #ifdef DEBUG
1190 1181 printf("sendsig: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1191 1182 PTOU(p)->u_comm, p->p_pid, sig);
1192 1183 printf("on fault, sigsp = %p, action = %p, upc = 0x%lx\n",
1193 1184 (void *)fp, (void *)hdlr, rp->r_pc);
1194 1185 #endif
1195 1186 return (0);
1196 1187 }
1197 1188
1198 1189
1199 1190 #ifdef _SYSCALL32_IMPL
1200 1191
1201 1192 /*
1202 1193 * Construct the execution environment for the user's signal
1203 1194 * handler and arrange for control to be given to it on return
1204 1195 * to userland. The library code now calls setcontext() to
1205 1196 * clean up after the signal handler, so sigret() is no longer
1206 1197 * needed.
1207 1198 */
1208 1199 int
1209 1200 sendsig32(int sig, k_siginfo_t *sip, void (*hdlr)())
1210 1201 {
1211 1202 /*
1212 1203 * 'volatile' is needed to ensure that values are
1213 1204 * correct on the error return from on_fault().
1214 1205 */
1215 1206 volatile int minstacksz; /* min stack required to catch signal */
1216 1207 int newstack = 0; /* if true, switching to altstack */
1217 1208 label_t ljb;
1218 1209 caddr_t sp;
1219 1210 struct regs *volatile rp;
1220 1211 klwp_t *lwp = ttolwp(curthread);
1221 1212 proc_t *volatile p = ttoproc(curthread);
1222 1213 struct fq32 fpu_q[MAXFPQ]; /* to hold floating queue */
1223 1214 struct fq32 *dfq = NULL;
1224 1215 size_t fpq_size = 0;
1225 1216 struct sigframe32 {
1226 1217 struct frame32 frwin;
1227 1218 ucontext32_t uc;
1228 1219 };
1229 1220 struct sigframe32 *volatile fp;
1230 1221 siginfo32_t *sip_addr;
1231 1222 ucontext32_t *volatile tuc = NULL;
1232 1223 char *volatile xregs = NULL;
1233 1224 volatile int xregs_size = 0;
1234 1225 gwindows32_t *volatile gwp = NULL;
1235 1226 volatile size_t gwin_size = 0;
1236 1227 kfpu_t *fpp;
1237 1228 struct machpcb *mpcb;
1238 1229 volatile int watched = 0;
1239 1230 volatile int watched2 = 0;
1240 1231 caddr_t tos;
1241 1232
1242 1233 /*
1243 1234 * Make sure the current last user window has been flushed to
1244 1235 * the stack save area before we change the sp.
1245 1236 * Restore register window if a debugger modified it.
1246 1237 */
1247 1238 (void) flush_user_windows_to_stack(NULL);
1248 1239 if (lwp->lwp_pcb.pcb_xregstat != XREGNONE)
1249 1240 xregrestore(lwp, 0);
1250 1241
1251 1242 mpcb = lwptompcb(lwp);
1252 1243 rp = lwptoregs(lwp);
1253 1244
1254 1245 /*
1255 1246 * Clear the watchpoint return stack pointers.
1256 1247 */
1257 1248 mpcb->mpcb_rsp[0] = NULL;
1258 1249 mpcb->mpcb_rsp[1] = NULL;
1259 1250
1260 1251 minstacksz = sizeof (struct sigframe32);
1261 1252
1262 1253 if (sip != NULL)
1263 1254 minstacksz += sizeof (siginfo32_t);
1264 1255
1265 1256 /*
1266 1257 * These two fields are pointed to by ABI structures and may
1267 1258 * be of arbitrary length. Size them now so we know how big
1268 1259 * the signal frame has to be.
1269 1260 */
1270 1261 fpp = lwptofpu(lwp);
1271 1262 fpp->fpu_fprs = _fp_read_fprs();
1272 1263 if ((fpp->fpu_en) || (fpp->fpu_fprs & FPRS_FEF)) {
1273 1264 fpq_size = sizeof (struct fpq32) * fpp->fpu_qcnt;
1274 1265 minstacksz += fpq_size;
1275 1266 dfq = fpu_q;
1276 1267 }
1277 1268
1278 1269 mpcb = lwptompcb(lwp);
1279 1270 if (mpcb->mpcb_wbcnt != 0) {
1280 1271 gwin_size = (mpcb->mpcb_wbcnt * sizeof (struct rwindow32)) +
1281 1272 (SPARC_MAXREGWINDOW * sizeof (caddr32_t)) +
1282 1273 sizeof (int32_t);
1283 1274 minstacksz += gwin_size;
1284 1275 }
1285 1276
1286 1277 /*
1287 1278 * Extra registers, if supported by this platform, may be of arbitrary
1288 1279 * length. Size them now so we know how big the signal frame has to be.
1289 1280 */
1290 1281 xregs_size = xregs_getsize(p);
1291 1282 minstacksz += SA32(xregs_size);
1292 1283
1293 1284 /*
1294 1285 * Figure out whether we will be handling this signal on
1295 1286 * an alternate stack specified by the user. Then allocate
1296 1287 * and validate the stack requirements for the signal handler
1297 1288 * context. on_fault will catch any faults.
1298 1289 */
1299 1290 newstack = (sigismember(&PTOU(curproc)->u_sigonstack, sig) &&
1300 1291 !(lwp->lwp_sigaltstack.ss_flags & (SS_ONSTACK|SS_DISABLE)));
1301 1292
1302 1293 tos = (void *)(uintptr_t)(uint32_t)rp->r_sp;
1303 1294 /*
1304 1295 * Force proper stack pointer alignment, even in the face of a
1305 1296 * misaligned stack pointer from user-level before the signal.
1306 1297 * Don't use the SA32() macro because that rounds up, not down.
1307 1298 */
1308 1299 tos = (caddr_t)((uintptr_t)tos & ~(STACK_ALIGN32 - 1ul));
1309 1300
1310 1301 if (newstack != 0) {
1311 1302 fp = (struct sigframe32 *)
1312 1303 (SA32((uintptr_t)lwp->lwp_sigaltstack.ss_sp) +
1313 1304 SA32((int)lwp->lwp_sigaltstack.ss_size) -
1314 1305 STACK_ALIGN32 -
1315 1306 SA32(minstacksz));
1316 1307 } else {
1317 1308 /*
1318 1309 * If we were unable to flush all register windows to
1319 1310 * the stack and we are not now on an alternate stack,
1320 1311 * just dump core with a SIGSEGV back in psig().
1321 1312 */
1322 1313 if (sig == SIGSEGV &&
1323 1314 mpcb->mpcb_wbcnt != 0 &&
1324 1315 !(lwp->lwp_sigaltstack.ss_flags & SS_ONSTACK))
1325 1316 return (0);
1326 1317 fp = (struct sigframe32 *)(tos - SA32(minstacksz));
1327 1318 /*
1328 1319 * Could call grow here, but stack growth now handled below
1329 1320 * in code protected by on_fault().
1330 1321 */
1331 1322 }
1332 1323 sp = (caddr_t)fp + sizeof (struct sigframe32);
1333 1324
1334 1325 /*
1335 1326 * Make sure process hasn't trashed its stack.
1336 1327 */
1337 1328 if ((caddr_t)fp >= p->p_usrstack ||
1338 1329 (caddr_t)fp + SA32(minstacksz) >= p->p_usrstack) {
1339 1330 #ifdef DEBUG
1340 1331 printf("sendsig32: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1341 1332 PTOU(p)->u_comm, p->p_pid, sig);
1342 1333 printf("sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n",
1343 1334 (void *)fp, (void *)hdlr, rp->r_pc);
1344 1335 printf("fp above USRSTACK32\n");
1345 1336 #endif
1346 1337 return (0);
1347 1338 }
1348 1339
1349 1340 watched = watch_disable_addr((caddr_t)fp, SA32(minstacksz), S_WRITE);
1350 1341 if (on_fault(&ljb))
1351 1342 goto badstack;
1352 1343
1353 1344 tuc = kmem_alloc(sizeof (ucontext32_t), KM_SLEEP);
1354 1345 savecontext32(tuc, &lwp->lwp_sigoldmask, dfq);
1355 1346
1356 1347 /*
1357 1348 * save extra register state if it exists
1358 1349 */
1359 1350 if (xregs_size != 0) {
1360 1351 xregs_setptr32(lwp, tuc, (caddr32_t)(uintptr_t)sp);
1361 1352 xregs = kmem_alloc(xregs_size, KM_SLEEP);
1362 1353 xregs_get(lwp, xregs);
1363 1354 copyout_noerr(xregs, sp, xregs_size);
1364 1355 kmem_free(xregs, xregs_size);
1365 1356 xregs = NULL;
1366 1357 sp += SA32(xregs_size);
1367 1358 }
1368 1359
1369 1360 copyout_noerr(tuc, &fp->uc, sizeof (*tuc));
1370 1361 kmem_free(tuc, sizeof (*tuc));
1371 1362 tuc = NULL;
1372 1363
1373 1364 if (sip != NULL) {
1374 1365 siginfo32_t si32;
1375 1366 zoneid_t zoneid;
1376 1367
1377 1368 siginfo_kto32(sip, &si32);
1378 1369 if (SI_FROMUSER(sip) &&
1379 1370 (zoneid = p->p_zone->zone_id) != GLOBAL_ZONEID &&
1380 1371 zoneid != sip->si_zoneid) {
1381 1372 si32.si_pid = p->p_zone->zone_zsched->p_pid;
1382 1373 si32.si_uid = 0;
1383 1374 si32.si_ctid = -1;
1384 1375 si32.si_zoneid = zoneid;
1385 1376 }
1386 1377 uzero(sp, sizeof (siginfo32_t));
1387 1378 copyout_noerr(&si32, sp, sizeof (siginfo32_t));
1388 1379 sip_addr = (siginfo32_t *)sp;
1389 1380 sp += sizeof (siginfo32_t);
1390 1381
1391 1382 if (sig == SIGPROF &&
1392 1383 curthread->t_rprof != NULL &&
1393 1384 curthread->t_rprof->rp_anystate) {
1394 1385 /*
1395 1386 * We stand on our head to deal with
1396 1387 * the real time profiling signal.
1397 1388 * Fill in the stuff that doesn't fit
1398 1389 * in a normal k_siginfo structure.
1399 1390 */
1400 1391 int i = sip->si_nsysarg;
1401 1392 while (--i >= 0) {
1402 1393 suword32_noerr(&sip_addr->si_sysarg[i],
1403 1394 (uint32_t)lwp->lwp_arg[i]);
1404 1395 }
1405 1396 copyout_noerr(curthread->t_rprof->rp_state,
1406 1397 sip_addr->si_mstate,
1407 1398 sizeof (curthread->t_rprof->rp_state));
1408 1399 }
1409 1400 } else {
1410 1401 sip_addr = NULL;
1411 1402 }
1412 1403
1413 1404 /*
1414 1405 * When flush_user_windows_to_stack() can't save all the
1415 1406 * windows to the stack, it puts them in the lwp's pcb.
1416 1407 */
1417 1408 if (gwin_size != 0) {
1418 1409 gwp = kmem_alloc(gwin_size, KM_SLEEP);
1419 1410 getgwins32(lwp, gwp);
1420 1411 suword32_noerr(&fp->uc.uc_mcontext.gwins,
1421 1412 (uint32_t)(uintptr_t)sp);
1422 1413 copyout_noerr(gwp, sp, gwin_size);
1423 1414 kmem_free(gwp, gwin_size);
1424 1415 gwp = NULL;
1425 1416 sp += gwin_size;
1426 1417 } else {
1427 1418 suword32_noerr(&fp->uc.uc_mcontext.gwins, (uint32_t)NULL);
1428 1419 }
1429 1420
1430 1421 if (fpq_size != 0) {
1431 1422 /*
1432 1423 * Update the (already copied out) fpu32.fpu_q pointer
1433 1424 * from NULL to the 32-bit address on the user's stack
1434 1425 * where we then copyout the fq32 to.
1435 1426 */
1436 1427 struct fq32 *fqp = (struct fq32 *)sp;
1437 1428 suword32_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q,
1438 1429 (uint32_t)(uintptr_t)fqp);
1439 1430 copyout_noerr(dfq, fqp, fpq_size);
1440 1431
1441 1432 /*
1442 1433 * forget the fp queue so that the signal handler can run
1443 1434 * without being harrassed--it will do a setcontext that will
1444 1435 * re-establish the queue if there still is one
1445 1436 *
1446 1437 * NOTE: fp_runq() relies on the qcnt field being zeroed here
1447 1438 * to terminate its processing of the queue after signal
1448 1439 * delivery.
1449 1440 */
1450 1441 mpcb->mpcb_fpu->fpu_qcnt = 0;
1451 1442 sp += fpq_size;
1452 1443
1453 1444 /* Also, syscall needs to know about this */
1454 1445 mpcb->mpcb_flags |= FP_TRAPPED;
1455 1446
1456 1447 } else {
1457 1448 suword32_noerr(&fp->uc.uc_mcontext.fpregs.fpu_q,
1458 1449 (uint32_t)NULL);
1459 1450 suword8_noerr(&fp->uc.uc_mcontext.fpregs.fpu_qcnt, 0);
1460 1451 }
1461 1452
1462 1453
1463 1454 /*
1464 1455 * Since we flushed the user's windows and we are changing his
1465 1456 * stack pointer, the window that the user will return to will
1466 1457 * be restored from the save area in the frame we are setting up.
1467 1458 * We copy in save area for old stack pointer so that debuggers
1468 1459 * can do a proper stack backtrace from the signal handler.
1469 1460 */
1470 1461 if (mpcb->mpcb_wbcnt == 0) {
1471 1462 watched2 = watch_disable_addr(tos, sizeof (struct rwindow32),
1472 1463 S_READ);
1473 1464 ucopy(tos, &fp->frwin, sizeof (struct rwindow32));
1474 1465 }
1475 1466
1476 1467 lwp->lwp_oldcontext = (uintptr_t)&fp->uc;
1477 1468
1478 1469 if (newstack != 0) {
1479 1470 lwp->lwp_sigaltstack.ss_flags |= SS_ONSTACK;
1480 1471 if (lwp->lwp_ustack) {
1481 1472 stack32_t stk32;
1482 1473
1483 1474 stk32.ss_sp =
1484 1475 (caddr32_t)(uintptr_t)lwp->lwp_sigaltstack.ss_sp;
1485 1476 stk32.ss_size = (size32_t)lwp->lwp_sigaltstack.ss_size;
1486 1477 stk32.ss_flags = (int32_t)lwp->lwp_sigaltstack.ss_flags;
1487 1478
1488 1479 copyout_noerr(&stk32, (stack32_t *)lwp->lwp_ustack,
1489 1480 sizeof (stack32_t));
1490 1481 }
1491 1482 }
1492 1483
1493 1484 no_fault();
1494 1485 mpcb->mpcb_wbcnt = 0; /* let user go on */
1495 1486
1496 1487 if (watched2)
1497 1488 watch_enable_addr(tos, sizeof (struct rwindow32), S_READ);
1498 1489 if (watched)
1499 1490 watch_enable_addr((caddr_t)fp, SA32(minstacksz), S_WRITE);
1500 1491
1501 1492 /*
1502 1493 * Set up user registers for execution of signal handler.
1503 1494 */
1504 1495 rp->r_sp = (uintptr_t)fp;
1505 1496 rp->r_pc = (uintptr_t)hdlr;
1506 1497 rp->r_npc = (uintptr_t)hdlr + 4;
1507 1498 /* make sure %asi is ASI_PNF */
1508 1499 rp->r_tstate &= ~((uint64_t)TSTATE_ASI_MASK << TSTATE_ASI_SHIFT);
1509 1500 rp->r_tstate |= ((uint64_t)ASI_PNF << TSTATE_ASI_SHIFT);
1510 1501 rp->r_o0 = sig;
1511 1502 rp->r_o1 = (uintptr_t)sip_addr;
1512 1503 rp->r_o2 = (uintptr_t)&fp->uc;
1513 1504 /*
1514 1505 * Don't set lwp_eosys here. sendsig() is called via psig() after
1515 1506 * lwp_eosys is handled, so setting it here would affect the next
1516 1507 * system call.
1517 1508 */
1518 1509 return (1);
1519 1510
1520 1511 badstack:
1521 1512 no_fault();
1522 1513 if (watched2)
1523 1514 watch_enable_addr(tos, sizeof (struct rwindow32), S_READ);
1524 1515 if (watched)
1525 1516 watch_enable_addr((caddr_t)fp, SA32(minstacksz), S_WRITE);
1526 1517 if (tuc)
1527 1518 kmem_free(tuc, sizeof (*tuc));
1528 1519 if (xregs)
1529 1520 kmem_free(xregs, xregs_size);
1530 1521 if (gwp)
1531 1522 kmem_free(gwp, gwin_size);
1532 1523 #ifdef DEBUG
1533 1524 printf("sendsig32: bad signal stack cmd=%s, pid=%d, sig=%d\n",
1534 1525 PTOU(p)->u_comm, p->p_pid, sig);
1535 1526 printf("on fault, sigsp = 0x%p, action = 0x%p, upc = 0x%lx\n",
1536 1527 (void *)fp, (void *)hdlr, rp->r_pc);
1537 1528 #endif
1538 1529 return (0);
1539 1530 }
1540 1531
1541 1532 #endif /* _SYSCALL32_IMPL */
1542 1533
1543 1534
1544 1535 /*
1545 1536 * Load user registers into lwp. Called only from syslwp_create().
1546 1537 * thrptr ignored for sparc.
1547 1538 */
1548 1539 /* ARGSUSED2 */
1549 1540 void
1550 1541 lwp_load(klwp_t *lwp, gregset_t grp, uintptr_t thrptr)
1551 1542 {
1552 1543 setgregs(lwp, grp);
1553 1544 if (lwptoproc(lwp)->p_model == DATAMODEL_ILP32)
1554 1545 lwptoregs(lwp)->r_tstate = TSTATE_USER32 | TSTATE_MM_TSO;
1555 1546 else
1556 1547 lwptoregs(lwp)->r_tstate = TSTATE_USER64 | TSTATE_MM_TSO;
1557 1548
1558 1549 if (!fpu_exists)
1559 1550 lwptoregs(lwp)->r_tstate &= ~TSTATE_PEF;
1560 1551 lwp->lwp_eosys = JUSTRETURN;
1561 1552 lwptot(lwp)->t_post_sys = 1;
1562 1553 }
1563 1554
1564 1555 /*
1565 1556 * set syscall()'s return values for a lwp.
1566 1557 */
1567 1558 void
1568 1559 lwp_setrval(klwp_t *lwp, int v1, int v2)
1569 1560 {
1570 1561 struct regs *rp = lwptoregs(lwp);
1571 1562
1572 1563 rp->r_tstate &= ~TSTATE_IC;
1573 1564 rp->r_o0 = v1;
1574 1565 rp->r_o1 = v2;
1575 1566 }
1576 1567
1577 1568 /*
1578 1569 * set stack pointer for a lwp
1579 1570 */
1580 1571 void
1581 1572 lwp_setsp(klwp_t *lwp, caddr_t sp)
1582 1573 {
1583 1574 struct regs *rp = lwptoregs(lwp);
1584 1575 rp->r_sp = (uintptr_t)sp;
1585 1576 }
1586 1577
1587 1578 /*
1588 1579 * Take any PCB specific actions that are required or flagged in the PCB.
1589 1580 */
1590 1581 extern void trap_async_hwerr(void);
1591 1582 #pragma weak trap_async_hwerr
1592 1583
1593 1584 void
1594 1585 lwp_pcb_exit(void)
1595 1586 {
1596 1587 klwp_t *lwp = ttolwp(curthread);
1597 1588
1598 1589 if (lwp->lwp_pcb.pcb_flags & ASYNC_HWERR) {
1599 1590 lwp->lwp_pcb.pcb_flags &= ~ASYNC_HWERR;
1600 1591 trap_async_hwerr();
1601 1592 }
1602 1593 }
1603 1594
1604 1595 /*
1605 1596 * Invalidate the saved user register windows in the pcb struct
1606 1597 * for the current thread. They will no longer be preserved.
1607 1598 */
1608 1599 void
1609 1600 lwp_clear_uwin(void)
1610 1601 {
1611 1602 struct machpcb *m = lwptompcb(ttolwp(curthread));
1612 1603
1613 1604 /*
1614 1605 * This has the effect of invalidating all (any) of the
1615 1606 * user level windows that are currently sitting in the
1616 1607 * kernel buffer.
1617 1608 */
1618 1609 m->mpcb_wbcnt = 0;
1619 1610 }
1620 1611
1621 1612 /*
1622 1613 * Set memory model to Total Store Order (TSO).
1623 1614 */
1624 1615 static void
1625 1616 mmodel_set_tso(void)
1626 1617 {
1627 1618 struct regs *rp = lwptoregs(ttolwp(curthread));
1628 1619
1629 1620 /*
1630 1621 * The thread is doing something which requires TSO semantics
1631 1622 * (creating a 2nd thread, or mapping writable shared memory).
1632 1623 * It's no longer safe to run in WC mode.
1633 1624 */
1634 1625 rp->r_tstate &= ~TSTATE_MM;
1635 1626 /* LINTED E_EXPR_NULL_EFFECT */
1636 1627 rp->r_tstate |= TSTATE_MM_TSO;
1637 1628 }
1638 1629
1639 1630 /*
1640 1631 * When this routine is invoked, the process is just about to add a new lwp;
1641 1632 * making it multi threaded.
1642 1633 *
1643 1634 * If the program requires default stronger/legacy memory model semantics,
1644 1635 * this is an indication that the processor memory model
1645 1636 * should be altered to provide those semantics.
1646 1637 */
1647 1638 void
1648 1639 lwp_mmodel_newlwp(void)
1649 1640 {
1650 1641 /*
1651 1642 * New thread has been created and it's no longer safe
1652 1643 * to run in WC mode, so revert back to TSO.
1653 1644 */
1654 1645 mmodel_set_tso();
1655 1646 }
1656 1647
1657 1648 /*
1658 1649 * This routine is invoked immediately after the lwp has added a mapping
1659 1650 * to shared memory to its address space. The mapping starts at address
1660 1651 * 'addr' and extends for 'size' bytes.
1661 1652 *
1662 1653 * Unless we can (somehow) guarantee that all the processes we're sharing
1663 1654 * the underlying mapped object with, are using the same memory model that
1664 1655 * this process is using, this call should change the memory model
1665 1656 * configuration of the processor to be the most pessimistic available.
1666 1657 */
1667 1658 /* ARGSUSED */
1668 1659 void
1669 1660 lwp_mmodel_shared_as(caddr_t addr, size_t sz)
1670 1661 {
1671 1662 /*
1672 1663 * lwp has mapped shared memory and is no longer safe
1673 1664 * to run in WC mode, so revert back to TSO.
1674 1665 * For now, any shared memory access is enough to get back to TSO
1675 1666 * and hence not checking on 'addr' & 'sz'.
1676 1667 */
1677 1668 mmodel_set_tso();
1678 1669 }
1679 1670
1680 1671 static uint_t
1681 1672 mkpsr(uint64_t tstate, uint_t fprs)
1682 1673 {
1683 1674 uint_t psr, icc;
1684 1675
1685 1676 psr = tstate & TSTATE_CWP_MASK;
1686 1677 if (tstate & TSTATE_PRIV)
1687 1678 psr |= PSR_PS;
1688 1679 if (fprs & FPRS_FEF)
1689 1680 psr |= PSR_EF;
1690 1681 icc = (uint_t)(tstate >> PSR_TSTATE_CC_SHIFT) & PSR_ICC;
1691 1682 psr |= icc;
1692 1683 psr |= V9_PSR_IMPLVER;
1693 1684 return (psr);
1694 1685 }
1695 1686
1696 1687 void
1697 1688 sync_icache(caddr_t va, uint_t len)
1698 1689 {
1699 1690 caddr_t end;
1700 1691
1701 1692 end = va + len;
1702 1693 va = (caddr_t)((uintptr_t)va & -8l); /* sparc needs 8-byte align */
1703 1694 while (va < end) {
1704 1695 doflush(va);
1705 1696 va += 8;
1706 1697 }
1707 1698 }
1708 1699
1709 1700 #ifdef _SYSCALL32_IMPL
1710 1701
1711 1702 /*
1712 1703 * Copy the floating point queue if and only if there is a queue and a place
1713 1704 * to copy it to. Let xregs take care of the other fp regs, for v8plus.
1714 1705 * The issue is that while we are handling the fq32 in sendsig, we
1715 1706 * still need a 64-bit pointer to it, and the caddr32_t in fpregset32_t
1716 1707 * will not suffice, so we have the third parameter to this function.
1717 1708 */
1718 1709 void
1719 1710 fpuregset_nto32(const fpregset_t *src, fpregset32_t *dest, struct fq32 *dfq)
1720 1711 {
1721 1712 int i;
1722 1713
1723 1714 bzero(dest, sizeof (*dest));
1724 1715 for (i = 0; i < 32; i++)
1725 1716 dest->fpu_fr.fpu_regs[i] = src->fpu_fr.fpu_regs[i];
1726 1717 dest->fpu_q = NULL;
1727 1718 dest->fpu_fsr = (uint32_t)src->fpu_fsr;
1728 1719 dest->fpu_qcnt = src->fpu_qcnt;
1729 1720 dest->fpu_q_entrysize = sizeof (struct fpq32);
1730 1721 dest->fpu_en = src->fpu_en;
1731 1722
1732 1723 if ((src->fpu_qcnt) && (dfq != NULL)) {
1733 1724 struct fq *sfq = src->fpu_q;
1734 1725 for (i = 0; i < src->fpu_qcnt; i++, dfq++, sfq++) {
1735 1726 dfq->FQu.fpq.fpq_addr =
1736 1727 (caddr32_t)(uintptr_t)sfq->FQu.fpq.fpq_addr;
1737 1728 dfq->FQu.fpq.fpq_instr = sfq->FQu.fpq.fpq_instr;
1738 1729 }
1739 1730 }
1740 1731 }
1741 1732
1742 1733 /*
1743 1734 * Copy the floating point queue if and only if there is a queue and a place
1744 1735 * to copy it to. Let xregs take care of the other fp regs, for v8plus.
1745 1736 * The *dfq is required to escape the bzero in both this function and in
1746 1737 * ucontext_32ton. The *sfq is required because once the fq32 is copied
1747 1738 * into the kernel, in setcontext, then we need a 64-bit pointer to it.
1748 1739 */
1749 1740 static void
1750 1741 fpuregset_32ton(const fpregset32_t *src, fpregset_t *dest,
1751 1742 const struct fq32 *sfq, struct fq *dfq)
1752 1743 {
1753 1744 int i;
1754 1745
1755 1746 bzero(dest, sizeof (*dest));
1756 1747 for (i = 0; i < 32; i++)
1757 1748 dest->fpu_fr.fpu_regs[i] = src->fpu_fr.fpu_regs[i];
1758 1749 dest->fpu_q = dfq;
1759 1750 dest->fpu_fsr = (uint64_t)src->fpu_fsr;
1760 1751 if ((dest->fpu_qcnt = src->fpu_qcnt) > 0)
1761 1752 dest->fpu_q_entrysize = sizeof (struct fpq);
1762 1753 else
1763 1754 dest->fpu_q_entrysize = 0;
1764 1755 dest->fpu_en = src->fpu_en;
1765 1756
1766 1757 if ((src->fpu_qcnt) && (sfq) && (dfq)) {
1767 1758 for (i = 0; i < src->fpu_qcnt; i++, dfq++, sfq++) {
1768 1759 dfq->FQu.fpq.fpq_addr =
1769 1760 (unsigned int *)(uintptr_t)sfq->FQu.fpq.fpq_addr;
1770 1761 dfq->FQu.fpq.fpq_instr = sfq->FQu.fpq.fpq_instr;
1771 1762 }
1772 1763 }
1773 1764 }
1774 1765
1775 1766 void
1776 1767 ucontext_32ton(const ucontext32_t *src, ucontext_t *dest,
1777 1768 const struct fq32 *sfq, struct fq *dfq)
1778 1769 {
1779 1770 int i;
1780 1771
1781 1772 bzero(dest, sizeof (*dest));
1782 1773
1783 1774 dest->uc_flags = src->uc_flags;
1784 1775 dest->uc_link = (ucontext_t *)(uintptr_t)src->uc_link;
1785 1776
1786 1777 for (i = 0; i < 4; i++) {
1787 1778 dest->uc_sigmask.__sigbits[i] = src->uc_sigmask.__sigbits[i];
1788 1779 }
1789 1780
1790 1781 dest->uc_stack.ss_sp = (void *)(uintptr_t)src->uc_stack.ss_sp;
1791 1782 dest->uc_stack.ss_size = (size_t)src->uc_stack.ss_size;
1792 1783 dest->uc_stack.ss_flags = src->uc_stack.ss_flags;
1793 1784
1794 1785 /* REG_CCR is 0, skip over it and handle it after this loop */
1795 1786 for (i = 1; i < _NGREG32; i++)
1796 1787 dest->uc_mcontext.gregs[i] =
1797 1788 (greg_t)(uint32_t)src->uc_mcontext.gregs[i];
1798 1789 dest->uc_mcontext.gregs[REG_CCR] =
1799 1790 (src->uc_mcontext.gregs[REG_PSR] & PSR_ICC) >> PSR_ICC_SHIFT;
1800 1791 dest->uc_mcontext.gregs[REG_ASI] = ASI_PNF;
1801 1792 /*
1802 1793 * A valid fpregs is only copied in if (uc.uc_flags & UC_FPU),
1803 1794 * otherwise there is no guarantee that anything in fpregs is valid.
1804 1795 */
1805 1796 if (src->uc_flags & UC_FPU) {
1806 1797 dest->uc_mcontext.gregs[REG_FPRS] =
1807 1798 ((src->uc_mcontext.fpregs.fpu_en) ?
1808 1799 (FPRS_DU|FPRS_DL|FPRS_FEF) : 0);
1809 1800 } else {
1810 1801 dest->uc_mcontext.gregs[REG_FPRS] = 0;
1811 1802 }
1812 1803 dest->uc_mcontext.gwins =
1813 1804 (gwindows_t *)(uintptr_t)src->uc_mcontext.gwins;
1814 1805 if (src->uc_flags & UC_FPU) {
1815 1806 fpuregset_32ton(&src->uc_mcontext.fpregs,
1816 1807 &dest->uc_mcontext.fpregs, sfq, dfq);
1817 1808 }
1818 1809 }
1819 1810
1820 1811 void
1821 1812 rwindow_nto32(struct rwindow *src, struct rwindow32 *dest)
1822 1813 {
1823 1814 greg_t *s = (greg_t *)src;
1824 1815 greg32_t *d = (greg32_t *)dest;
1825 1816 int i;
1826 1817
1827 1818 for (i = 0; i < 16; i++)
1828 1819 *d++ = (greg32_t)*s++;
1829 1820 }
1830 1821
1831 1822 void
1832 1823 rwindow_32ton(struct rwindow32 *src, struct rwindow *dest)
1833 1824 {
1834 1825 greg32_t *s = (greg32_t *)src;
1835 1826 greg_t *d = (greg_t *)dest;
1836 1827 int i;
1837 1828
1838 1829 for (i = 0; i < 16; i++)
1839 1830 *d++ = (uint32_t)*s++;
1840 1831 }
1841 1832
1842 1833 #endif /* _SYSCALL32_IMPL */
1843 1834
1844 1835 /*
1845 1836 * The panic code invokes panic_saveregs() to record the contents of a
1846 1837 * regs structure into the specified panic_data structure for debuggers.
1847 1838 */
1848 1839 void
1849 1840 panic_saveregs(panic_data_t *pdp, struct regs *rp)
1850 1841 {
1851 1842 panic_nv_t *pnv = PANICNVGET(pdp);
1852 1843
1853 1844 PANICNVADD(pnv, "tstate", rp->r_tstate);
1854 1845 PANICNVADD(pnv, "g1", rp->r_g1);
1855 1846 PANICNVADD(pnv, "g2", rp->r_g2);
1856 1847 PANICNVADD(pnv, "g3", rp->r_g3);
1857 1848 PANICNVADD(pnv, "g4", rp->r_g4);
1858 1849 PANICNVADD(pnv, "g5", rp->r_g5);
1859 1850 PANICNVADD(pnv, "g6", rp->r_g6);
1860 1851 PANICNVADD(pnv, "g7", rp->r_g7);
1861 1852 PANICNVADD(pnv, "o0", rp->r_o0);
1862 1853 PANICNVADD(pnv, "o1", rp->r_o1);
1863 1854 PANICNVADD(pnv, "o2", rp->r_o2);
1864 1855 PANICNVADD(pnv, "o3", rp->r_o3);
1865 1856 PANICNVADD(pnv, "o4", rp->r_o4);
1866 1857 PANICNVADD(pnv, "o5", rp->r_o5);
1867 1858 PANICNVADD(pnv, "o6", rp->r_o6);
1868 1859 PANICNVADD(pnv, "o7", rp->r_o7);
1869 1860 PANICNVADD(pnv, "pc", (ulong_t)rp->r_pc);
1870 1861 PANICNVADD(pnv, "npc", (ulong_t)rp->r_npc);
1871 1862 PANICNVADD(pnv, "y", (uint32_t)rp->r_y);
1872 1863
1873 1864 PANICNVSET(pdp, pnv);
1874 1865 }
↓ open down ↓ |
993 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX