Print this page
remove xhat
The xhat infrastructure was added to support hardware such as the zulu
graphics card - hardware which had on-board MMUs. The VM used the xhat code
to keep the CPU's and Zulu's page tables in-sync. Since the only xhat user
was zulu (which is gone), we can safely remove it simplifying the whole VM
subsystem.
Assorted notes:
- AS_BUSY flag was used solely by xhat
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/watchpoint.c
+++ new/usr/src/uts/common/os/watchpoint.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License, Version 1.0 only
6 6 * (the "License"). You may not use this file except in compliance
7 7 * with the License.
8 8 *
9 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 10 * or http://www.opensolaris.org/os/licensing.
11 11 * See the License for the specific language governing permissions
12 12 * and limitations under the License.
13 13 *
14 14 * When distributing Covered Code, include this CDDL HEADER in each
15 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 16 * If applicable, add the following below this CDDL HEADER, with the
17 17 * fields enclosed by brackets "[]" replaced with your own identifying
18 18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 19 *
20 20 * CDDL HEADER END
21 21 */
22 22 /*
23 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 #pragma ident "%Z%%M% %I% %E% SMI"
28 28
29 29 #include <sys/types.h>
30 30 #include <sys/t_lock.h>
31 31 #include <sys/param.h>
32 32 #include <sys/cred.h>
33 33 #include <sys/debug.h>
34 34 #include <sys/inline.h>
35 35 #include <sys/kmem.h>
36 36 #include <sys/proc.h>
37 37 #include <sys/regset.h>
38 38 #include <sys/sysmacros.h>
39 39 #include <sys/systm.h>
40 40 #include <sys/prsystm.h>
41 41 #include <sys/buf.h>
42 42 #include <sys/signal.h>
43 43 #include <sys/user.h>
44 44 #include <sys/cpuvar.h>
45 45
46 46 #include <sys/fault.h>
47 47 #include <sys/syscall.h>
48 48 #include <sys/procfs.h>
49 49 #include <sys/cmn_err.h>
50 50 #include <sys/stack.h>
51 51 #include <sys/watchpoint.h>
52 52 #include <sys/copyops.h>
53 53 #include <sys/schedctl.h>
54 54
55 55 #include <sys/mman.h>
56 56 #include <vm/as.h>
57 57 #include <vm/seg.h>
58 58
59 59 /*
60 60 * Copy ops vector for watchpoints.
61 61 */
62 62 static int watch_copyin(const void *, void *, size_t);
63 63 static int watch_xcopyin(const void *, void *, size_t);
64 64 static int watch_copyout(const void *, void *, size_t);
65 65 static int watch_xcopyout(const void *, void *, size_t);
66 66 static int watch_copyinstr(const char *, char *, size_t, size_t *);
67 67 static int watch_copyoutstr(const char *, char *, size_t, size_t *);
68 68 static int watch_fuword8(const void *, uint8_t *);
69 69 static int watch_fuword16(const void *, uint16_t *);
70 70 static int watch_fuword32(const void *, uint32_t *);
71 71 static int watch_suword8(void *, uint8_t);
72 72 static int watch_suword16(void *, uint16_t);
73 73 static int watch_suword32(void *, uint32_t);
74 74 static int watch_physio(int (*)(struct buf *), struct buf *,
75 75 dev_t, int, void (*)(struct buf *), struct uio *);
76 76 #ifdef _LP64
77 77 static int watch_fuword64(const void *, uint64_t *);
78 78 static int watch_suword64(void *, uint64_t);
79 79 #endif
80 80
81 81 struct copyops watch_copyops = {
82 82 watch_copyin,
83 83 watch_xcopyin,
84 84 watch_copyout,
85 85 watch_xcopyout,
86 86 watch_copyinstr,
87 87 watch_copyoutstr,
88 88 watch_fuword8,
89 89 watch_fuword16,
90 90 watch_fuword32,
91 91 #ifdef _LP64
92 92 watch_fuword64,
93 93 #else
94 94 NULL,
95 95 #endif
96 96 watch_suword8,
97 97 watch_suword16,
98 98 watch_suword32,
99 99 #ifdef _LP64
100 100 watch_suword64,
101 101 #else
102 102 NULL,
103 103 #endif
104 104 watch_physio
105 105 };
106 106
107 107 /*
108 108 * Map the 'rw' argument to a protection flag.
109 109 */
110 110 static int
111 111 rw_to_prot(enum seg_rw rw)
112 112 {
113 113 switch (rw) {
114 114 case S_EXEC:
115 115 return (PROT_EXEC);
116 116 case S_READ:
117 117 return (PROT_READ);
118 118 case S_WRITE:
119 119 return (PROT_WRITE);
120 120 default:
121 121 return (PROT_NONE); /* can't happen */
122 122 }
123 123 }
124 124
125 125 /*
126 126 * Map the 'rw' argument to an index into an array of exec/write/read things.
127 127 * The index follows the precedence order: exec .. write .. read
128 128 */
129 129 static int
130 130 rw_to_index(enum seg_rw rw)
131 131 {
132 132 switch (rw) {
133 133 default: /* default case "can't happen" */
134 134 case S_EXEC:
135 135 return (0);
136 136 case S_WRITE:
137 137 return (1);
138 138 case S_READ:
139 139 return (2);
140 140 }
141 141 }
142 142
143 143 /*
144 144 * Map an index back to a seg_rw.
145 145 */
146 146 static enum seg_rw S_rw[4] = {
147 147 S_EXEC,
148 148 S_WRITE,
149 149 S_READ,
150 150 S_READ,
151 151 };
152 152
153 153 #define X 0
154 154 #define W 1
155 155 #define R 2
156 156 #define sum(a) (a[X] + a[W] + a[R])
157 157
158 158 /*
159 159 * Common code for pr_mappage() and pr_unmappage().
160 160 */
161 161 static int
162 162 pr_do_mappage(caddr_t addr, size_t size, int mapin, enum seg_rw rw, int kernel)
163 163 {
164 164 proc_t *p = curproc;
165 165 struct as *as = p->p_as;
166 166 char *eaddr = addr + size;
167 167 int prot_rw = rw_to_prot(rw);
168 168 int xrw = rw_to_index(rw);
169 169 int rv = 0;
170 170 struct watched_page *pwp;
171 171 struct watched_page tpw;
172 172 avl_index_t where;
173 173 uint_t prot;
174 174
↓ open down ↓ |
174 lines elided |
↑ open up ↑ |
175 175 ASSERT(as != &kas);
176 176
177 177 startover:
178 178 ASSERT(rv == 0);
179 179 if (avl_numnodes(&as->a_wpage) == 0)
180 180 return (0);
181 181
182 182 /*
183 183 * as->a_wpage can only be changed while the process is totally stopped.
184 184 * Don't grab p_lock here. Holding p_lock while grabbing the address
185 - * space lock leads to deadlocks with the clock thread. Note that if an
186 - * as_fault() is servicing a fault to a watched page on behalf of an
187 - * XHAT provider, watchpoint will be temporarily cleared (and wp_prot
188 - * will be set to wp_oprot). Since this is done while holding as writer
189 - * lock, we need to grab as lock (reader lock is good enough).
185 + * space lock leads to deadlocks with the clock thread.
190 186 *
191 187 * p_maplock prevents simultaneous execution of this function. Under
192 188 * normal circumstances, holdwatch() will stop all other threads, so the
193 189 * lock isn't really needed. But there may be multiple threads within
194 190 * stop() when SWATCHOK is set, so we need to handle multiple threads
195 191 * at once. See holdwatch() for the details of this dance.
196 192 */
197 193
198 194 mutex_enter(&p->p_maplock);
199 195 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
200 196
201 197 tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
202 198 if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL)
203 199 pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);
204 200
205 201 for (; pwp != NULL && pwp->wp_vaddr < eaddr;
206 202 pwp = AVL_NEXT(&as->a_wpage, pwp)) {
207 203
208 204 /*
209 205 * If the requested protection has not been
210 206 * removed, we need not remap this page.
211 207 */
212 208 prot = pwp->wp_prot;
213 209 if (kernel || (prot & PROT_USER))
214 210 if (prot & prot_rw)
215 211 continue;
216 212 /*
217 213 * If the requested access does not exist in the page's
218 214 * original protections, we need not remap this page.
219 215 * If the page does not exist yet, we can't test it.
220 216 */
221 217 if ((prot = pwp->wp_oprot) != 0) {
222 218 if (!(kernel || (prot & PROT_USER)))
223 219 continue;
224 220 if (!(prot & prot_rw))
225 221 continue;
226 222 }
227 223
228 224 if (mapin) {
229 225 /*
230 226 * Before mapping the page in, ensure that
231 227 * all other lwps are held in the kernel.
232 228 */
233 229 if (p->p_mapcnt == 0) {
234 230 /*
235 231 * Release as lock while in holdwatch()
236 232 * in case other threads need to grab it.
237 233 */
238 234 AS_LOCK_EXIT(as, &as->a_lock);
239 235 mutex_exit(&p->p_maplock);
240 236 if (holdwatch() != 0) {
241 237 /*
242 238 * We stopped in holdwatch().
243 239 * Start all over again because the
244 240 * watched page list may have changed.
245 241 */
246 242 goto startover;
247 243 }
248 244 mutex_enter(&p->p_maplock);
249 245 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
250 246 }
251 247 p->p_mapcnt++;
252 248 }
253 249
254 250 addr = pwp->wp_vaddr;
255 251 rv++;
256 252
257 253 prot = pwp->wp_prot;
258 254 if (mapin) {
259 255 if (kernel)
260 256 pwp->wp_kmap[xrw]++;
261 257 else
262 258 pwp->wp_umap[xrw]++;
263 259 pwp->wp_flags |= WP_NOWATCH;
264 260 if (pwp->wp_kmap[X] + pwp->wp_umap[X])
265 261 /* cannot have exec-only protection */
266 262 prot |= PROT_READ|PROT_EXEC;
267 263 if (pwp->wp_kmap[R] + pwp->wp_umap[R])
268 264 prot |= PROT_READ;
269 265 if (pwp->wp_kmap[W] + pwp->wp_umap[W])
270 266 /* cannot have write-only protection */
271 267 prot |= PROT_READ|PROT_WRITE;
272 268 #if 0 /* damned broken mmu feature! */
273 269 if (sum(pwp->wp_umap) == 0)
274 270 prot &= ~PROT_USER;
275 271 #endif
276 272 } else {
277 273 ASSERT(pwp->wp_flags & WP_NOWATCH);
278 274 if (kernel) {
279 275 ASSERT(pwp->wp_kmap[xrw] != 0);
280 276 --pwp->wp_kmap[xrw];
281 277 } else {
282 278 ASSERT(pwp->wp_umap[xrw] != 0);
283 279 --pwp->wp_umap[xrw];
284 280 }
285 281 if (sum(pwp->wp_kmap) + sum(pwp->wp_umap) == 0)
286 282 pwp->wp_flags &= ~WP_NOWATCH;
287 283 else {
288 284 if (pwp->wp_kmap[X] + pwp->wp_umap[X])
289 285 /* cannot have exec-only protection */
290 286 prot |= PROT_READ|PROT_EXEC;
291 287 if (pwp->wp_kmap[R] + pwp->wp_umap[R])
292 288 prot |= PROT_READ;
293 289 if (pwp->wp_kmap[W] + pwp->wp_umap[W])
294 290 /* cannot have write-only protection */
295 291 prot |= PROT_READ|PROT_WRITE;
296 292 #if 0 /* damned broken mmu feature! */
297 293 if (sum(pwp->wp_umap) == 0)
298 294 prot &= ~PROT_USER;
299 295 #endif
300 296 }
301 297 }
302 298
303 299
304 300 if (pwp->wp_oprot != 0) { /* if page exists */
305 301 struct seg *seg;
306 302 uint_t oprot;
307 303 int err, retrycnt = 0;
308 304
309 305 AS_LOCK_EXIT(as, &as->a_lock);
310 306 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
311 307 retry:
312 308 seg = as_segat(as, addr);
313 309 ASSERT(seg != NULL);
314 310 SEGOP_GETPROT(seg, addr, 0, &oprot);
315 311 if (prot != oprot) {
316 312 err = SEGOP_SETPROT(seg, addr, PAGESIZE, prot);
317 313 if (err == IE_RETRY) {
318 314 ASSERT(retrycnt == 0);
319 315 retrycnt++;
320 316 goto retry;
321 317 }
322 318 }
323 319 AS_LOCK_EXIT(as, &as->a_lock);
324 320 } else
325 321 AS_LOCK_EXIT(as, &as->a_lock);
326 322
327 323 /*
328 324 * When all pages are mapped back to their normal state,
329 325 * continue the other lwps.
330 326 */
331 327 if (!mapin) {
332 328 ASSERT(p->p_mapcnt > 0);
333 329 p->p_mapcnt--;
334 330 if (p->p_mapcnt == 0) {
335 331 mutex_exit(&p->p_maplock);
336 332 mutex_enter(&p->p_lock);
337 333 continuelwps(p);
338 334 mutex_exit(&p->p_lock);
339 335 mutex_enter(&p->p_maplock);
340 336 }
341 337 }
342 338
343 339 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
344 340 }
345 341
346 342 AS_LOCK_EXIT(as, &as->a_lock);
347 343 mutex_exit(&p->p_maplock);
348 344
349 345 return (rv);
350 346 }
351 347
352 348 /*
353 349 * Restore the original page protections on an address range.
354 350 * If 'kernel' is non-zero, just do it for the kernel.
355 351 * pr_mappage() returns non-zero if it actually changed anything.
356 352 *
357 353 * pr_mappage() and pr_unmappage() must be executed in matched pairs,
358 354 * but pairs may be nested within other pairs. The reference counts
359 355 * sort it all out. See pr_do_mappage(), above.
360 356 */
361 357 static int
362 358 pr_mappage(const caddr_t addr, size_t size, enum seg_rw rw, int kernel)
363 359 {
364 360 return (pr_do_mappage(addr, size, 1, rw, kernel));
365 361 }
366 362
367 363 /*
368 364 * Set the modified page protections on a watched page.
369 365 * Inverse of pr_mappage().
370 366 * Needs to be called only if pr_mappage() returned non-zero.
371 367 */
372 368 static void
373 369 pr_unmappage(const caddr_t addr, size_t size, enum seg_rw rw, int kernel)
374 370 {
375 371 (void) pr_do_mappage(addr, size, 0, rw, kernel);
376 372 }
377 373
378 374 /*
379 375 * Function called by an lwp after it resumes from stop().
380 376 */
381 377 void
382 378 setallwatch(void)
383 379 {
384 380 proc_t *p = curproc;
385 381 struct as *as = curproc->p_as;
386 382 struct watched_page *pwp, *next;
387 383 struct seg *seg;
388 384 caddr_t vaddr;
389 385 uint_t prot;
390 386 int err, retrycnt;
391 387
392 388 if (p->p_wprot == NULL)
393 389 return;
394 390
395 391 ASSERT(MUTEX_NOT_HELD(&curproc->p_lock));
396 392
397 393 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
398 394
399 395 pwp = p->p_wprot;
400 396 while (pwp != NULL) {
401 397
402 398 vaddr = pwp->wp_vaddr;
403 399 retrycnt = 0;
404 400 retry:
405 401 ASSERT(pwp->wp_flags & WP_SETPROT);
406 402 if ((seg = as_segat(as, vaddr)) != NULL &&
407 403 !(pwp->wp_flags & WP_NOWATCH)) {
408 404 prot = pwp->wp_prot;
409 405 err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, prot);
410 406 if (err == IE_RETRY) {
411 407 ASSERT(retrycnt == 0);
412 408 retrycnt++;
413 409 goto retry;
414 410 }
415 411 }
416 412
417 413 next = pwp->wp_list;
418 414
419 415 if (pwp->wp_read + pwp->wp_write + pwp->wp_exec == 0) {
420 416 /*
421 417 * No watched areas remain in this page.
422 418 * Free the watched_page structure.
423 419 */
424 420 avl_remove(&as->a_wpage, pwp);
425 421 kmem_free(pwp, sizeof (struct watched_page));
426 422 } else {
427 423 pwp->wp_flags &= ~WP_SETPROT;
428 424 }
429 425
430 426 pwp = next;
431 427 }
432 428 p->p_wprot = NULL;
433 429
434 430 AS_LOCK_EXIT(as, &as->a_lock);
435 431 }
436 432
437 433
438 434
439 435 /* Must be called with as lock held */
440 436 int
441 437 pr_is_watchpage_as(caddr_t addr, enum seg_rw rw, struct as *as)
442 438 {
443 439 register struct watched_page *pwp;
444 440 struct watched_page tpw;
445 441 uint_t prot;
446 442 int rv = 0;
447 443
448 444 switch (rw) {
449 445 case S_READ:
450 446 case S_WRITE:
451 447 case S_EXEC:
452 448 break;
453 449 default:
454 450 return (0);
455 451 }
456 452
457 453 /*
458 454 * as->a_wpage can only be modified while the process is totally
459 455 * stopped. We need, and should use, no locks here.
460 456 */
461 457 if (as != &kas && avl_numnodes(&as->a_wpage) != 0) {
462 458 tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
463 459 pwp = avl_find(&as->a_wpage, &tpw, NULL);
464 460 if (pwp != NULL) {
465 461 ASSERT(addr >= pwp->wp_vaddr &&
466 462 addr < pwp->wp_vaddr + PAGESIZE);
467 463 if (pwp->wp_oprot != 0) {
468 464 prot = pwp->wp_prot;
469 465 switch (rw) {
470 466 case S_READ:
471 467 rv = ((prot & (PROT_USER|PROT_READ))
472 468 != (PROT_USER|PROT_READ));
473 469 break;
474 470 case S_WRITE:
475 471 rv = ((prot & (PROT_USER|PROT_WRITE))
476 472 != (PROT_USER|PROT_WRITE));
477 473 break;
478 474 case S_EXEC:
479 475 rv = ((prot & (PROT_USER|PROT_EXEC))
480 476 != (PROT_USER|PROT_EXEC));
481 477 break;
482 478 default:
483 479 /* can't happen! */
484 480 break;
485 481 }
486 482 }
487 483 }
488 484 }
489 485
490 486 return (rv);
491 487 }
492 488
493 489
494 490 /*
495 491 * trap() calls here to determine if a fault is in a watched page.
496 492 * We return nonzero if this is true and the load/store would fail.
↓ open down ↓ |
297 lines elided |
↑ open up ↑ |
497 493 */
498 494 int
499 495 pr_is_watchpage(caddr_t addr, enum seg_rw rw)
500 496 {
501 497 struct as *as = curproc->p_as;
502 498 int rv;
503 499
504 500 if ((as == &kas) || avl_numnodes(&as->a_wpage) == 0)
505 501 return (0);
506 502
507 - /* Grab the lock because of XHAT (see comment in pr_mappage()) */
508 503 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
509 504 rv = pr_is_watchpage_as(addr, rw, as);
510 505 AS_LOCK_EXIT(as, &as->a_lock);
511 506
512 507 return (rv);
513 508 }
514 509
515 510
516 511
517 512 /*
518 513 * trap() calls here to determine if a fault is a watchpoint.
519 514 */
520 515 int
521 516 pr_is_watchpoint(caddr_t *paddr, int *pta, size_t size, size_t *plen,
522 517 enum seg_rw rw)
523 518 {
524 519 proc_t *p = curproc;
525 520 caddr_t addr = *paddr;
526 521 caddr_t eaddr = addr + size;
527 522 register struct watched_area *pwa;
528 523 struct watched_area twa;
529 524 int rv = 0;
530 525 int ta = 0;
531 526 size_t len = 0;
532 527
533 528 switch (rw) {
534 529 case S_READ:
535 530 case S_WRITE:
536 531 case S_EXEC:
537 532 break;
538 533 default:
539 534 *pta = 0;
540 535 return (0);
541 536 }
542 537
543 538 /*
544 539 * p->p_warea is protected by p->p_lock.
545 540 */
546 541 mutex_enter(&p->p_lock);
547 542
548 543 /* BEGIN CSTYLED */
549 544 /*
550 545 * This loop is somewhat complicated because the fault region can span
551 546 * multiple watched areas. For example:
552 547 *
553 548 * addr eaddr
554 549 * +-----------------+
555 550 * | fault region |
556 551 * +-------+--------+----+---+------------+
557 552 * | prot not right | | prot correct |
558 553 * +----------------+ +----------------+
559 554 * wa_vaddr wa_eaddr
560 555 * wa_vaddr wa_eaddr
561 556 *
562 557 * We start at the area greater than or equal to the starting address.
563 558 * As long as some portion of the fault region overlaps the current
564 559 * area, we continue checking permissions until we find an appropriate
565 560 * match.
566 561 */
567 562 /* END CSTYLED */
568 563 twa.wa_vaddr = addr;
569 564 twa.wa_eaddr = eaddr;
570 565
571 566 for (pwa = pr_find_watched_area(p, &twa, NULL);
572 567 pwa != NULL && eaddr > pwa->wa_vaddr && addr < pwa->wa_eaddr;
573 568 pwa = AVL_NEXT(&p->p_warea, pwa)) {
574 569
575 570 switch (rw) {
576 571 case S_READ:
577 572 if (pwa->wa_flags & WA_READ)
578 573 rv = TRAP_RWATCH;
579 574 break;
580 575 case S_WRITE:
581 576 if (pwa->wa_flags & WA_WRITE)
582 577 rv = TRAP_WWATCH;
583 578 break;
584 579 case S_EXEC:
585 580 if (pwa->wa_flags & WA_EXEC)
586 581 rv = TRAP_XWATCH;
587 582 break;
588 583 default:
589 584 /* can't happen */
590 585 break;
591 586 }
592 587
593 588 /*
594 589 * If protections didn't match, check the next watched
595 590 * area
596 591 */
597 592 if (rv != 0) {
598 593 if (addr < pwa->wa_vaddr)
599 594 addr = pwa->wa_vaddr;
600 595 len = pwa->wa_eaddr - addr;
601 596 if (pwa->wa_flags & WA_TRAPAFTER)
602 597 ta = 1;
603 598 break;
604 599 }
605 600 }
606 601
607 602 mutex_exit(&p->p_lock);
608 603
609 604 *paddr = addr;
610 605 *pta = ta;
611 606 if (plen != NULL)
612 607 *plen = len;
613 608 return (rv);
614 609 }
615 610
616 611 /*
617 612 * Set up to perform a single-step at user level for the
618 613 * case of a trapafter watchpoint. Called from trap().
619 614 */
620 615 void
621 616 do_watch_step(caddr_t vaddr, size_t sz, enum seg_rw rw,
622 617 int watchcode, greg_t pc)
623 618 {
624 619 register klwp_t *lwp = ttolwp(curthread);
625 620 struct lwp_watch *pw = &lwp->lwp_watch[rw_to_index(rw)];
626 621
627 622 /*
628 623 * Check to see if we are already performing this special
629 624 * watchpoint single-step. We must not do pr_mappage() twice.
630 625 */
631 626
632 627 /* special check for two read traps on the same instruction */
633 628 if (rw == S_READ && pw->wpaddr != NULL &&
634 629 !(pw->wpaddr <= vaddr && vaddr < pw->wpaddr + pw->wpsize)) {
635 630 ASSERT(lwp->lwp_watchtrap != 0);
636 631 pw++; /* use the extra S_READ struct */
637 632 }
638 633
639 634 if (pw->wpaddr != NULL) {
640 635 ASSERT(lwp->lwp_watchtrap != 0);
641 636 ASSERT(pw->wpaddr <= vaddr && vaddr < pw->wpaddr + pw->wpsize);
642 637 if (pw->wpcode == 0) {
643 638 pw->wpcode = watchcode;
644 639 pw->wppc = pc;
645 640 }
646 641 } else {
647 642 int mapped = pr_mappage(vaddr, sz, rw, 0);
648 643 prstep(lwp, 1);
649 644 lwp->lwp_watchtrap = 1;
650 645 pw->wpaddr = vaddr;
651 646 pw->wpsize = sz;
652 647 pw->wpcode = watchcode;
653 648 pw->wpmapped = mapped;
654 649 pw->wppc = pc;
655 650 }
656 651 }
657 652
658 653 /*
659 654 * Undo the effects of do_watch_step().
660 655 * Called from trap() after the single-step is finished.
661 656 * Also called from issig_forreal() and stop() with a NULL
662 657 * argument to avoid having these things set more than once.
663 658 */
664 659 int
665 660 undo_watch_step(k_siginfo_t *sip)
666 661 {
667 662 register klwp_t *lwp = ttolwp(curthread);
668 663 int fault = 0;
669 664
670 665 if (lwp->lwp_watchtrap) {
671 666 struct lwp_watch *pw = lwp->lwp_watch;
672 667 int i;
673 668
674 669 for (i = 0; i < 4; i++, pw++) {
675 670 if (pw->wpaddr == NULL)
676 671 continue;
677 672 if (pw->wpmapped)
678 673 pr_unmappage(pw->wpaddr, pw->wpsize, S_rw[i],
679 674 0);
680 675 if (pw->wpcode != 0) {
681 676 if (sip != NULL) {
682 677 sip->si_signo = SIGTRAP;
683 678 sip->si_code = pw->wpcode;
684 679 sip->si_addr = pw->wpaddr;
685 680 sip->si_trapafter = 1;
686 681 sip->si_pc = (caddr_t)pw->wppc;
687 682 }
688 683 fault = FLTWATCH;
689 684 pw->wpcode = 0;
690 685 }
691 686 pw->wpaddr = NULL;
692 687 pw->wpsize = 0;
693 688 pw->wpmapped = 0;
694 689 }
695 690 lwp->lwp_watchtrap = 0;
696 691 }
697 692
698 693 return (fault);
699 694 }
700 695
701 696 /*
702 697 * Handle a watchpoint that occurs while doing copyin()
703 698 * or copyout() in a system call.
704 699 * Return non-zero if the fault or signal is cleared
705 700 * by a debugger while the lwp is stopped.
706 701 */
707 702 static int
708 703 sys_watchpoint(caddr_t addr, int watchcode, int ta)
709 704 {
710 705 extern greg_t getuserpc(void); /* XXX header file */
711 706 k_sigset_t smask;
712 707 register proc_t *p = ttoproc(curthread);
713 708 register klwp_t *lwp = ttolwp(curthread);
714 709 register sigqueue_t *sqp;
715 710 int rval;
716 711
717 712 /* assert no locks are held */
718 713 /* ASSERT(curthread->t_nlocks == 0); */
719 714
720 715 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
721 716 sqp->sq_info.si_signo = SIGTRAP;
722 717 sqp->sq_info.si_code = watchcode;
723 718 sqp->sq_info.si_addr = addr;
724 719 sqp->sq_info.si_trapafter = ta;
725 720 sqp->sq_info.si_pc = (caddr_t)getuserpc();
726 721
727 722 mutex_enter(&p->p_lock);
728 723
729 724 /* this will be tested and cleared by the caller */
730 725 lwp->lwp_sysabort = 0;
731 726
732 727 if (prismember(&p->p_fltmask, FLTWATCH)) {
733 728 lwp->lwp_curflt = (uchar_t)FLTWATCH;
734 729 lwp->lwp_siginfo = sqp->sq_info;
735 730 stop(PR_FAULTED, FLTWATCH);
736 731 if (lwp->lwp_curflt == 0) {
737 732 mutex_exit(&p->p_lock);
738 733 kmem_free(sqp, sizeof (sigqueue_t));
739 734 return (1);
740 735 }
741 736 lwp->lwp_curflt = 0;
742 737 }
743 738
744 739 /*
745 740 * post the SIGTRAP signal.
746 741 * Block all other signals so we only stop showing SIGTRAP.
747 742 */
748 743 if (signal_is_blocked(curthread, SIGTRAP) ||
749 744 sigismember(&p->p_ignore, SIGTRAP)) {
750 745 /* SIGTRAP is blocked or ignored, forget the rest. */
751 746 mutex_exit(&p->p_lock);
752 747 kmem_free(sqp, sizeof (sigqueue_t));
753 748 return (0);
754 749 }
755 750 sigdelq(p, curthread, SIGTRAP);
756 751 sigaddqa(p, curthread, sqp);
757 752 schedctl_finish_sigblock(curthread);
758 753 smask = curthread->t_hold;
759 754 sigfillset(&curthread->t_hold);
760 755 sigdiffset(&curthread->t_hold, &cantmask);
761 756 sigdelset(&curthread->t_hold, SIGTRAP);
762 757 mutex_exit(&p->p_lock);
763 758
764 759 rval = ((ISSIG_FAST(curthread, lwp, p, FORREAL))? 0 : 1);
765 760
766 761 /* restore the original signal mask */
767 762 mutex_enter(&p->p_lock);
768 763 curthread->t_hold = smask;
769 764 mutex_exit(&p->p_lock);
770 765
771 766 return (rval);
772 767 }
773 768
774 769 /*
775 770 * Wrappers for the copyin()/copyout() functions to deal
776 771 * with watchpoints that fire while in system calls.
777 772 */
778 773
779 774 static int
780 775 watch_xcopyin(const void *uaddr, void *kaddr, size_t count)
781 776 {
782 777 klwp_t *lwp = ttolwp(curthread);
783 778 caddr_t watch_uaddr = (caddr_t)uaddr;
784 779 caddr_t watch_kaddr = (caddr_t)kaddr;
785 780 int error = 0;
786 781 label_t ljb;
787 782 size_t part;
788 783 int mapped;
789 784
790 785 while (count && error == 0) {
791 786 int watchcode;
792 787 caddr_t vaddr;
793 788 size_t len;
794 789 int ta;
795 790
796 791 if ((part = PAGESIZE -
797 792 (((uintptr_t)uaddr) & PAGEOFFSET)) > count)
798 793 part = count;
799 794
800 795 if (!pr_is_watchpage(watch_uaddr, S_READ))
801 796 watchcode = 0;
802 797 else {
803 798 vaddr = watch_uaddr;
804 799 watchcode = pr_is_watchpoint(&vaddr, &ta,
805 800 part, &len, S_READ);
806 801 if (watchcode && ta == 0)
807 802 part = vaddr - watch_uaddr;
808 803 }
809 804
810 805 /*
811 806 * Copy the initial part, up to a watched address, if any.
812 807 */
813 808 if (part != 0) {
814 809 mapped = pr_mappage(watch_uaddr, part, S_READ, 1);
815 810 if (on_fault(&ljb))
816 811 error = EFAULT;
817 812 else
818 813 copyin_noerr(watch_uaddr, watch_kaddr, part);
819 814 no_fault();
820 815 if (mapped)
821 816 pr_unmappage(watch_uaddr, part, S_READ, 1);
822 817 watch_uaddr += part;
823 818 watch_kaddr += part;
824 819 count -= part;
825 820 }
826 821 /*
827 822 * If trapafter was specified, then copy through the
828 823 * watched area before taking the watchpoint trap.
829 824 */
830 825 while (count && watchcode && ta && len > part && error == 0) {
831 826 len -= part;
832 827 if ((part = PAGESIZE) > count)
833 828 part = count;
834 829 if (part > len)
835 830 part = len;
836 831 mapped = pr_mappage(watch_uaddr, part, S_READ, 1);
837 832 if (on_fault(&ljb))
838 833 error = EFAULT;
839 834 else
840 835 copyin_noerr(watch_uaddr, watch_kaddr, part);
841 836 no_fault();
842 837 if (mapped)
843 838 pr_unmappage(watch_uaddr, part, S_READ, 1);
844 839 watch_uaddr += part;
845 840 watch_kaddr += part;
846 841 count -= part;
847 842 }
848 843
849 844 error:
850 845 /* if we hit a watched address, do the watchpoint logic */
851 846 if (watchcode &&
852 847 (!sys_watchpoint(vaddr, watchcode, ta) ||
853 848 lwp->lwp_sysabort)) {
854 849 lwp->lwp_sysabort = 0;
855 850 error = EFAULT;
856 851 break;
857 852 }
858 853 }
859 854
860 855 return (error);
861 856 }
862 857
863 858 static int
864 859 watch_copyin(const void *kaddr, void *uaddr, size_t count)
865 860 {
866 861 return (watch_xcopyin(kaddr, uaddr, count) ? -1 : 0);
867 862 }
868 863
869 864
870 865 static int
871 866 watch_xcopyout(const void *kaddr, void *uaddr, size_t count)
872 867 {
873 868 klwp_t *lwp = ttolwp(curthread);
874 869 caddr_t watch_uaddr = (caddr_t)uaddr;
875 870 caddr_t watch_kaddr = (caddr_t)kaddr;
876 871 int error = 0;
877 872 label_t ljb;
878 873
879 874 while (count && error == 0) {
880 875 int watchcode;
881 876 caddr_t vaddr;
882 877 size_t part;
883 878 size_t len;
884 879 int ta;
885 880 int mapped;
886 881
887 882 if ((part = PAGESIZE -
888 883 (((uintptr_t)uaddr) & PAGEOFFSET)) > count)
889 884 part = count;
890 885
891 886 if (!pr_is_watchpage(watch_uaddr, S_WRITE))
892 887 watchcode = 0;
893 888 else {
894 889 vaddr = watch_uaddr;
895 890 watchcode = pr_is_watchpoint(&vaddr, &ta,
896 891 part, &len, S_WRITE);
897 892 if (watchcode) {
898 893 if (ta == 0)
899 894 part = vaddr - watch_uaddr;
900 895 else {
901 896 len += vaddr - watch_uaddr;
902 897 if (part > len)
903 898 part = len;
904 899 }
905 900 }
906 901 }
907 902
908 903 /*
909 904 * Copy the initial part, up to a watched address, if any.
910 905 */
911 906 if (part != 0) {
912 907 mapped = pr_mappage(watch_uaddr, part, S_WRITE, 1);
913 908 if (on_fault(&ljb))
914 909 error = EFAULT;
915 910 else
916 911 copyout_noerr(watch_kaddr, watch_uaddr, part);
917 912 no_fault();
918 913 if (mapped)
919 914 pr_unmappage(watch_uaddr, part, S_WRITE, 1);
920 915 watch_uaddr += part;
921 916 watch_kaddr += part;
922 917 count -= part;
923 918 }
924 919
925 920 /*
926 921 * If trapafter was specified, then copy through the
927 922 * watched area before taking the watchpoint trap.
928 923 */
929 924 while (count && watchcode && ta && len > part && error == 0) {
930 925 len -= part;
931 926 if ((part = PAGESIZE) > count)
932 927 part = count;
933 928 if (part > len)
934 929 part = len;
935 930 mapped = pr_mappage(watch_uaddr, part, S_WRITE, 1);
936 931 if (on_fault(&ljb))
937 932 error = EFAULT;
938 933 else
939 934 copyout_noerr(watch_kaddr, watch_uaddr, part);
940 935 no_fault();
941 936 if (mapped)
942 937 pr_unmappage(watch_uaddr, part, S_WRITE, 1);
943 938 watch_uaddr += part;
944 939 watch_kaddr += part;
945 940 count -= part;
946 941 }
947 942
948 943 /* if we hit a watched address, do the watchpoint logic */
949 944 if (watchcode &&
950 945 (!sys_watchpoint(vaddr, watchcode, ta) ||
951 946 lwp->lwp_sysabort)) {
952 947 lwp->lwp_sysabort = 0;
953 948 error = EFAULT;
954 949 break;
955 950 }
956 951 }
957 952
958 953 return (error);
959 954 }
960 955
961 956 static int
962 957 watch_copyout(const void *kaddr, void *uaddr, size_t count)
963 958 {
964 959 return (watch_xcopyout(kaddr, uaddr, count) ? -1 : 0);
965 960 }
966 961
967 962 static int
968 963 watch_copyinstr(
969 964 const char *uaddr,
970 965 char *kaddr,
971 966 size_t maxlength,
972 967 size_t *lencopied)
973 968 {
974 969 klwp_t *lwp = ttolwp(curthread);
975 970 size_t resid;
976 971 int error = 0;
977 972 label_t ljb;
978 973
979 974 if ((resid = maxlength) == 0)
980 975 return (ENAMETOOLONG);
981 976
982 977 while (resid && error == 0) {
983 978 int watchcode;
984 979 caddr_t vaddr;
985 980 size_t part;
986 981 size_t len;
987 982 size_t size;
988 983 int ta;
989 984 int mapped;
990 985
991 986 if ((part = PAGESIZE -
992 987 (((uintptr_t)uaddr) & PAGEOFFSET)) > resid)
993 988 part = resid;
994 989
995 990 if (!pr_is_watchpage((caddr_t)uaddr, S_READ))
996 991 watchcode = 0;
997 992 else {
998 993 vaddr = (caddr_t)uaddr;
999 994 watchcode = pr_is_watchpoint(&vaddr, &ta,
1000 995 part, &len, S_READ);
1001 996 if (watchcode) {
1002 997 if (ta == 0)
1003 998 part = vaddr - uaddr;
1004 999 else {
1005 1000 len += vaddr - uaddr;
1006 1001 if (part > len)
1007 1002 part = len;
1008 1003 }
1009 1004 }
1010 1005 }
1011 1006
1012 1007 /*
1013 1008 * Copy the initial part, up to a watched address, if any.
1014 1009 */
1015 1010 if (part != 0) {
1016 1011 mapped = pr_mappage((caddr_t)uaddr, part, S_READ, 1);
1017 1012 if (on_fault(&ljb))
1018 1013 error = EFAULT;
1019 1014 else
1020 1015 error = copyinstr_noerr(uaddr, kaddr, part,
1021 1016 &size);
1022 1017 no_fault();
1023 1018 if (mapped)
1024 1019 pr_unmappage((caddr_t)uaddr, part, S_READ, 1);
1025 1020 uaddr += size;
1026 1021 kaddr += size;
1027 1022 resid -= size;
1028 1023 if (error == ENAMETOOLONG && resid > 0)
1029 1024 error = 0;
1030 1025 if (error != 0 || (watchcode &&
1031 1026 (uaddr < vaddr || kaddr[-1] == '\0')))
1032 1027 break; /* didn't reach the watched area */
1033 1028 }
1034 1029
1035 1030 /*
1036 1031 * If trapafter was specified, then copy through the
1037 1032 * watched area before taking the watchpoint trap.
1038 1033 */
1039 1034 while (resid && watchcode && ta && len > part && error == 0 &&
1040 1035 size == part && kaddr[-1] != '\0') {
1041 1036 len -= part;
1042 1037 if ((part = PAGESIZE) > resid)
1043 1038 part = resid;
1044 1039 if (part > len)
1045 1040 part = len;
1046 1041 mapped = pr_mappage((caddr_t)uaddr, part, S_READ, 1);
1047 1042 if (on_fault(&ljb))
1048 1043 error = EFAULT;
1049 1044 else
1050 1045 error = copyinstr_noerr(uaddr, kaddr, part,
1051 1046 &size);
1052 1047 no_fault();
1053 1048 if (mapped)
1054 1049 pr_unmappage((caddr_t)uaddr, part, S_READ, 1);
1055 1050 uaddr += size;
1056 1051 kaddr += size;
1057 1052 resid -= size;
1058 1053 if (error == ENAMETOOLONG && resid > 0)
1059 1054 error = 0;
1060 1055 }
1061 1056
1062 1057 /* if we hit a watched address, do the watchpoint logic */
1063 1058 if (watchcode &&
1064 1059 (!sys_watchpoint(vaddr, watchcode, ta) ||
1065 1060 lwp->lwp_sysabort)) {
1066 1061 lwp->lwp_sysabort = 0;
1067 1062 error = EFAULT;
1068 1063 break;
1069 1064 }
1070 1065
1071 1066 if (error == 0 && part != 0 &&
1072 1067 (size < part || kaddr[-1] == '\0'))
1073 1068 break;
1074 1069 }
1075 1070
1076 1071 if (error != EFAULT && lencopied)
1077 1072 *lencopied = maxlength - resid;
1078 1073 return (error);
1079 1074 }
1080 1075
1081 1076 static int
1082 1077 watch_copyoutstr(
1083 1078 const char *kaddr,
1084 1079 char *uaddr,
1085 1080 size_t maxlength,
1086 1081 size_t *lencopied)
1087 1082 {
1088 1083 klwp_t *lwp = ttolwp(curthread);
1089 1084 size_t resid;
1090 1085 int error = 0;
1091 1086 label_t ljb;
1092 1087
1093 1088 if ((resid = maxlength) == 0)
1094 1089 return (ENAMETOOLONG);
1095 1090
1096 1091 while (resid && error == 0) {
1097 1092 int watchcode;
1098 1093 caddr_t vaddr;
1099 1094 size_t part;
1100 1095 size_t len;
1101 1096 size_t size;
1102 1097 int ta;
1103 1098 int mapped;
1104 1099
1105 1100 if ((part = PAGESIZE -
1106 1101 (((uintptr_t)uaddr) & PAGEOFFSET)) > resid)
1107 1102 part = resid;
1108 1103
1109 1104 if (!pr_is_watchpage(uaddr, S_WRITE)) {
1110 1105 watchcode = 0;
1111 1106 } else {
1112 1107 vaddr = uaddr;
1113 1108 watchcode = pr_is_watchpoint(&vaddr, &ta,
1114 1109 part, &len, S_WRITE);
1115 1110 if (watchcode && ta == 0)
1116 1111 part = vaddr - uaddr;
1117 1112 }
1118 1113
1119 1114 /*
1120 1115 * Copy the initial part, up to a watched address, if any.
1121 1116 */
1122 1117 if (part != 0) {
1123 1118 mapped = pr_mappage(uaddr, part, S_WRITE, 1);
1124 1119 if (on_fault(&ljb))
1125 1120 error = EFAULT;
1126 1121 else
1127 1122 error = copyoutstr_noerr(kaddr, uaddr, part,
1128 1123 &size);
1129 1124 no_fault();
1130 1125 if (mapped)
1131 1126 pr_unmappage(uaddr, part, S_WRITE, 1);
1132 1127 uaddr += size;
1133 1128 kaddr += size;
1134 1129 resid -= size;
1135 1130 if (error == ENAMETOOLONG && resid > 0)
1136 1131 error = 0;
1137 1132 if (error != 0 || (watchcode &&
1138 1133 (uaddr < vaddr || kaddr[-1] == '\0')))
1139 1134 break; /* didn't reach the watched area */
1140 1135 }
1141 1136
1142 1137 /*
1143 1138 * If trapafter was specified, then copy through the
1144 1139 * watched area before taking the watchpoint trap.
1145 1140 */
1146 1141 while (resid && watchcode && ta && len > part && error == 0 &&
1147 1142 size == part && kaddr[-1] != '\0') {
1148 1143 len -= part;
1149 1144 if ((part = PAGESIZE) > resid)
1150 1145 part = resid;
1151 1146 if (part > len)
1152 1147 part = len;
1153 1148 mapped = pr_mappage(uaddr, part, S_WRITE, 1);
1154 1149 if (on_fault(&ljb))
1155 1150 error = EFAULT;
1156 1151 else
1157 1152 error = copyoutstr_noerr(kaddr, uaddr, part,
1158 1153 &size);
1159 1154 no_fault();
1160 1155 if (mapped)
1161 1156 pr_unmappage(uaddr, part, S_WRITE, 1);
1162 1157 uaddr += size;
1163 1158 kaddr += size;
1164 1159 resid -= size;
1165 1160 if (error == ENAMETOOLONG && resid > 0)
1166 1161 error = 0;
1167 1162 }
1168 1163
1169 1164 /* if we hit a watched address, do the watchpoint logic */
1170 1165 if (watchcode &&
1171 1166 (!sys_watchpoint(vaddr, watchcode, ta) ||
1172 1167 lwp->lwp_sysabort)) {
1173 1168 lwp->lwp_sysabort = 0;
1174 1169 error = EFAULT;
1175 1170 break;
1176 1171 }
1177 1172
1178 1173 if (error == 0 && part != 0 &&
1179 1174 (size < part || kaddr[-1] == '\0'))
1180 1175 break;
1181 1176 }
1182 1177
1183 1178 if (error != EFAULT && lencopied)
1184 1179 *lencopied = maxlength - resid;
1185 1180 return (error);
1186 1181 }
1187 1182
1188 1183 typedef int (*fuword_func)(const void *, void *);
1189 1184
1190 1185 /*
1191 1186 * Generic form of watch_fuword8(), watch_fuword16(), etc.
1192 1187 */
1193 1188 static int
1194 1189 watch_fuword(const void *addr, void *dst, fuword_func func, size_t size)
1195 1190 {
1196 1191 klwp_t *lwp = ttolwp(curthread);
1197 1192 int watchcode;
1198 1193 caddr_t vaddr;
1199 1194 int mapped;
1200 1195 int rv = 0;
1201 1196 int ta;
1202 1197 label_t ljb;
1203 1198
1204 1199 for (;;) {
1205 1200
1206 1201 vaddr = (caddr_t)addr;
1207 1202 watchcode = pr_is_watchpoint(&vaddr, &ta, size, NULL, S_READ);
1208 1203 if (watchcode == 0 || ta != 0) {
1209 1204 mapped = pr_mappage((caddr_t)addr, size, S_READ, 1);
1210 1205 if (on_fault(&ljb))
1211 1206 rv = -1;
1212 1207 else
1213 1208 (*func)(addr, dst);
1214 1209 no_fault();
1215 1210 if (mapped)
1216 1211 pr_unmappage((caddr_t)addr, size, S_READ, 1);
1217 1212 }
1218 1213 if (watchcode &&
1219 1214 (!sys_watchpoint(vaddr, watchcode, ta) ||
1220 1215 lwp->lwp_sysabort)) {
1221 1216 lwp->lwp_sysabort = 0;
1222 1217 rv = -1;
1223 1218 break;
1224 1219 }
1225 1220 if (watchcode == 0 || ta != 0)
1226 1221 break;
1227 1222 }
1228 1223
1229 1224 return (rv);
1230 1225 }
1231 1226
1232 1227 static int
1233 1228 watch_fuword8(const void *addr, uint8_t *dst)
1234 1229 {
1235 1230 return (watch_fuword(addr, dst, (fuword_func)fuword8_noerr,
1236 1231 sizeof (*dst)));
1237 1232 }
1238 1233
1239 1234 static int
1240 1235 watch_fuword16(const void *addr, uint16_t *dst)
1241 1236 {
1242 1237 return (watch_fuword(addr, dst, (fuword_func)fuword16_noerr,
1243 1238 sizeof (*dst)));
1244 1239 }
1245 1240
1246 1241 static int
1247 1242 watch_fuword32(const void *addr, uint32_t *dst)
1248 1243 {
1249 1244 return (watch_fuword(addr, dst, (fuword_func)fuword32_noerr,
1250 1245 sizeof (*dst)));
1251 1246 }
1252 1247
1253 1248 #ifdef _LP64
1254 1249 static int
1255 1250 watch_fuword64(const void *addr, uint64_t *dst)
1256 1251 {
1257 1252 return (watch_fuword(addr, dst, (fuword_func)fuword64_noerr,
1258 1253 sizeof (*dst)));
1259 1254 }
1260 1255 #endif
1261 1256
1262 1257
1263 1258 static int
1264 1259 watch_suword8(void *addr, uint8_t value)
1265 1260 {
1266 1261 klwp_t *lwp = ttolwp(curthread);
1267 1262 int watchcode;
1268 1263 caddr_t vaddr;
1269 1264 int mapped;
1270 1265 int rv = 0;
1271 1266 int ta;
1272 1267 label_t ljb;
1273 1268
1274 1269 for (;;) {
1275 1270
1276 1271 vaddr = (caddr_t)addr;
1277 1272 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1278 1273 S_WRITE);
1279 1274 if (watchcode == 0 || ta != 0) {
1280 1275 mapped = pr_mappage((caddr_t)addr, sizeof (value),
1281 1276 S_WRITE, 1);
1282 1277 if (on_fault(&ljb))
1283 1278 rv = -1;
1284 1279 else
1285 1280 suword8_noerr(addr, value);
1286 1281 no_fault();
1287 1282 if (mapped)
1288 1283 pr_unmappage((caddr_t)addr, sizeof (value),
1289 1284 S_WRITE, 1);
1290 1285 }
1291 1286 if (watchcode &&
1292 1287 (!sys_watchpoint(vaddr, watchcode, ta) ||
1293 1288 lwp->lwp_sysabort)) {
1294 1289 lwp->lwp_sysabort = 0;
1295 1290 rv = -1;
1296 1291 break;
1297 1292 }
1298 1293 if (watchcode == 0 || ta != 0)
1299 1294 break;
1300 1295 }
1301 1296
1302 1297 return (rv);
1303 1298 }
1304 1299
1305 1300 static int
1306 1301 watch_suword16(void *addr, uint16_t value)
1307 1302 {
1308 1303 klwp_t *lwp = ttolwp(curthread);
1309 1304 int watchcode;
1310 1305 caddr_t vaddr;
1311 1306 int mapped;
1312 1307 int rv = 0;
1313 1308 int ta;
1314 1309 label_t ljb;
1315 1310
1316 1311 for (;;) {
1317 1312
1318 1313 vaddr = (caddr_t)addr;
1319 1314 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1320 1315 S_WRITE);
1321 1316 if (watchcode == 0 || ta != 0) {
1322 1317 mapped = pr_mappage((caddr_t)addr, sizeof (value),
1323 1318 S_WRITE, 1);
1324 1319 if (on_fault(&ljb))
1325 1320 rv = -1;
1326 1321 else
1327 1322 suword16_noerr(addr, value);
1328 1323 no_fault();
1329 1324 if (mapped)
1330 1325 pr_unmappage((caddr_t)addr, sizeof (value),
1331 1326 S_WRITE, 1);
1332 1327 }
1333 1328 if (watchcode &&
1334 1329 (!sys_watchpoint(vaddr, watchcode, ta) ||
1335 1330 lwp->lwp_sysabort)) {
1336 1331 lwp->lwp_sysabort = 0;
1337 1332 rv = -1;
1338 1333 break;
1339 1334 }
1340 1335 if (watchcode == 0 || ta != 0)
1341 1336 break;
1342 1337 }
1343 1338
1344 1339 return (rv);
1345 1340 }
1346 1341
1347 1342 static int
1348 1343 watch_suword32(void *addr, uint32_t value)
1349 1344 {
1350 1345 klwp_t *lwp = ttolwp(curthread);
1351 1346 int watchcode;
1352 1347 caddr_t vaddr;
1353 1348 int mapped;
1354 1349 int rv = 0;
1355 1350 int ta;
1356 1351 label_t ljb;
1357 1352
1358 1353 for (;;) {
1359 1354
1360 1355 vaddr = (caddr_t)addr;
1361 1356 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1362 1357 S_WRITE);
1363 1358 if (watchcode == 0 || ta != 0) {
1364 1359 mapped = pr_mappage((caddr_t)addr, sizeof (value),
1365 1360 S_WRITE, 1);
1366 1361 if (on_fault(&ljb))
1367 1362 rv = -1;
1368 1363 else
1369 1364 suword32_noerr(addr, value);
1370 1365 no_fault();
1371 1366 if (mapped)
1372 1367 pr_unmappage((caddr_t)addr, sizeof (value),
1373 1368 S_WRITE, 1);
1374 1369 }
1375 1370 if (watchcode &&
1376 1371 (!sys_watchpoint(vaddr, watchcode, ta) ||
1377 1372 lwp->lwp_sysabort)) {
1378 1373 lwp->lwp_sysabort = 0;
1379 1374 rv = -1;
1380 1375 break;
1381 1376 }
1382 1377 if (watchcode == 0 || ta != 0)
1383 1378 break;
1384 1379 }
1385 1380
1386 1381 return (rv);
1387 1382 }
1388 1383
1389 1384 #ifdef _LP64
1390 1385 static int
1391 1386 watch_suword64(void *addr, uint64_t value)
1392 1387 {
1393 1388 klwp_t *lwp = ttolwp(curthread);
1394 1389 int watchcode;
1395 1390 caddr_t vaddr;
1396 1391 int mapped;
1397 1392 int rv = 0;
1398 1393 int ta;
1399 1394 label_t ljb;
1400 1395
1401 1396 for (;;) {
1402 1397
1403 1398 vaddr = (caddr_t)addr;
1404 1399 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1405 1400 S_WRITE);
1406 1401 if (watchcode == 0 || ta != 0) {
1407 1402 mapped = pr_mappage((caddr_t)addr, sizeof (value),
1408 1403 S_WRITE, 1);
1409 1404 if (on_fault(&ljb))
1410 1405 rv = -1;
1411 1406 else
1412 1407 suword64_noerr(addr, value);
1413 1408 no_fault();
1414 1409 if (mapped)
1415 1410 pr_unmappage((caddr_t)addr, sizeof (value),
1416 1411 S_WRITE, 1);
1417 1412 }
1418 1413 if (watchcode &&
1419 1414 (!sys_watchpoint(vaddr, watchcode, ta) ||
1420 1415 lwp->lwp_sysabort)) {
1421 1416 lwp->lwp_sysabort = 0;
1422 1417 rv = -1;
1423 1418 break;
1424 1419 }
1425 1420 if (watchcode == 0 || ta != 0)
1426 1421 break;
1427 1422 }
1428 1423
1429 1424 return (rv);
1430 1425 }
1431 1426 #endif /* _LP64 */
1432 1427
1433 1428 /*
1434 1429 * Check for watched addresses in the given address space.
1435 1430 * Return 1 if this is true, otherwise 0.
1436 1431 */
1437 1432 static int
1438 1433 pr_is_watched(caddr_t base, size_t len, int rw)
1439 1434 {
1440 1435 caddr_t saddr = (caddr_t)((uintptr_t)base & (uintptr_t)PAGEMASK);
1441 1436 caddr_t eaddr = base + len;
1442 1437 caddr_t paddr;
1443 1438
1444 1439 for (paddr = saddr; paddr < eaddr; paddr += PAGESIZE) {
1445 1440 if (pr_is_watchpage(paddr, rw))
1446 1441 return (1);
1447 1442 }
1448 1443
1449 1444 return (0);
1450 1445 }
1451 1446
1452 1447 /*
1453 1448 * Wrapper for the physio() function.
1454 1449 * Splits one uio operation with multiple iovecs into uio operations with
1455 1450 * only one iovecs to do the watchpoint handling separately for each iovecs.
1456 1451 */
1457 1452 static int
1458 1453 watch_physio(int (*strat)(struct buf *), struct buf *bp, dev_t dev,
1459 1454 int rw, void (*mincnt)(struct buf *), struct uio *uio)
1460 1455 {
1461 1456 struct uio auio;
1462 1457 struct iovec *iov;
1463 1458 caddr_t base;
1464 1459 size_t len;
1465 1460 int seg_rw;
1466 1461 int error = 0;
1467 1462
1468 1463 if (uio->uio_segflg == UIO_SYSSPACE)
1469 1464 return (default_physio(strat, bp, dev, rw, mincnt, uio));
1470 1465
1471 1466 seg_rw = (rw == B_READ) ? S_WRITE : S_READ;
1472 1467
1473 1468 while (uio->uio_iovcnt > 0) {
1474 1469 if (uio->uio_resid == 0) {
1475 1470 /*
1476 1471 * Make sure to return the uio structure with the
1477 1472 * same values as default_physio() does.
1478 1473 */
1479 1474 uio->uio_iov++;
1480 1475 uio->uio_iovcnt--;
1481 1476 continue;
1482 1477 }
1483 1478
1484 1479 iov = uio->uio_iov;
1485 1480 len = MIN(iov->iov_len, uio->uio_resid);
1486 1481
1487 1482 auio.uio_iovcnt = 1;
1488 1483 auio.uio_iov = iov;
1489 1484 auio.uio_resid = len;
1490 1485 auio.uio_loffset = uio->uio_loffset;
1491 1486 auio.uio_llimit = uio->uio_llimit;
1492 1487 auio.uio_fmode = uio->uio_fmode;
1493 1488 auio.uio_extflg = uio->uio_extflg;
1494 1489 auio.uio_segflg = uio->uio_segflg;
1495 1490
1496 1491 base = iov->iov_base;
1497 1492
1498 1493 if (!pr_is_watched(base, len, seg_rw)) {
1499 1494 /*
1500 1495 * The given memory references don't cover a
1501 1496 * watched page.
1502 1497 */
1503 1498 error = default_physio(strat, bp, dev, rw, mincnt,
1504 1499 &auio);
1505 1500
1506 1501 /* Update uio with values from auio. */
1507 1502 len -= auio.uio_resid;
1508 1503 uio->uio_resid -= len;
1509 1504 uio->uio_loffset += len;
1510 1505
1511 1506 /*
1512 1507 * Return if an error occurred or not all data
1513 1508 * was copied.
1514 1509 */
1515 1510 if (auio.uio_resid || error)
1516 1511 break;
1517 1512 uio->uio_iov++;
1518 1513 uio->uio_iovcnt--;
1519 1514 } else {
1520 1515 int mapped, watchcode, ta;
1521 1516 caddr_t vaddr = base;
1522 1517 klwp_t *lwp = ttolwp(curthread);
1523 1518
1524 1519 watchcode = pr_is_watchpoint(&vaddr, &ta, len,
1525 1520 NULL, seg_rw);
1526 1521
1527 1522 if (watchcode == 0 || ta != 0) {
1528 1523 /*
1529 1524 * Do the io if the given memory references
1530 1525 * don't cover a watched area (watchcode=0)
1531 1526 * or if WA_TRAPAFTER was specified.
1532 1527 */
1533 1528 mapped = pr_mappage(base, len, seg_rw, 1);
1534 1529 error = default_physio(strat, bp, dev, rw,
1535 1530 mincnt, &auio);
1536 1531 if (mapped)
1537 1532 pr_unmappage(base, len, seg_rw, 1);
1538 1533
1539 1534 len -= auio.uio_resid;
1540 1535 uio->uio_resid -= len;
1541 1536 uio->uio_loffset += len;
1542 1537 }
1543 1538
1544 1539 /*
1545 1540 * If we hit a watched address, do the watchpoint logic.
1546 1541 */
1547 1542 if (watchcode &&
1548 1543 (!sys_watchpoint(vaddr, watchcode, ta) ||
1549 1544 lwp->lwp_sysabort)) {
1550 1545 lwp->lwp_sysabort = 0;
1551 1546 return (EFAULT);
1552 1547 }
1553 1548
1554 1549 /*
1555 1550 * Check for errors from default_physio().
1556 1551 */
1557 1552 if (watchcode == 0 || ta != 0) {
1558 1553 if (auio.uio_resid || error)
1559 1554 break;
1560 1555 uio->uio_iov++;
1561 1556 uio->uio_iovcnt--;
1562 1557 }
1563 1558 }
1564 1559 }
1565 1560
1566 1561 return (error);
1567 1562 }
1568 1563
1569 1564 int
1570 1565 wa_compare(const void *a, const void *b)
1571 1566 {
1572 1567 const watched_area_t *pa = a;
1573 1568 const watched_area_t *pb = b;
1574 1569
1575 1570 if (pa->wa_vaddr < pb->wa_vaddr)
1576 1571 return (-1);
1577 1572 else if (pa->wa_vaddr > pb->wa_vaddr)
1578 1573 return (1);
1579 1574 else
1580 1575 return (0);
1581 1576 }
1582 1577
1583 1578 int
1584 1579 wp_compare(const void *a, const void *b)
1585 1580 {
1586 1581 const watched_page_t *pa = a;
1587 1582 const watched_page_t *pb = b;
1588 1583
1589 1584 if (pa->wp_vaddr < pb->wp_vaddr)
1590 1585 return (-1);
1591 1586 else if (pa->wp_vaddr > pb->wp_vaddr)
1592 1587 return (1);
1593 1588 else
1594 1589 return (0);
1595 1590 }
1596 1591
1597 1592 /*
1598 1593 * Given an address range, finds the first watched area which overlaps some or
1599 1594 * all of the range.
1600 1595 */
1601 1596 watched_area_t *
1602 1597 pr_find_watched_area(proc_t *p, watched_area_t *pwa, avl_index_t *where)
1603 1598 {
1604 1599 caddr_t vaddr = pwa->wa_vaddr;
1605 1600 caddr_t eaddr = pwa->wa_eaddr;
1606 1601 watched_area_t *wap;
1607 1602 avl_index_t real_where;
1608 1603
1609 1604 /* First, check if there is an exact match. */
1610 1605 wap = avl_find(&p->p_warea, pwa, &real_where);
1611 1606
1612 1607
1613 1608 /* Check to see if we overlap with the previous area. */
1614 1609 if (wap == NULL) {
1615 1610 wap = avl_nearest(&p->p_warea, real_where, AVL_BEFORE);
1616 1611 if (wap != NULL &&
1617 1612 (vaddr >= wap->wa_eaddr || eaddr <= wap->wa_vaddr))
1618 1613 wap = NULL;
1619 1614 }
1620 1615
1621 1616 /* Try the next area. */
1622 1617 if (wap == NULL) {
1623 1618 wap = avl_nearest(&p->p_warea, real_where, AVL_AFTER);
1624 1619 if (wap != NULL &&
1625 1620 (vaddr >= wap->wa_eaddr || eaddr <= wap->wa_vaddr))
1626 1621 wap = NULL;
1627 1622 }
1628 1623
1629 1624 if (where)
1630 1625 *where = real_where;
1631 1626
1632 1627 return (wap);
1633 1628 }
1634 1629
1635 1630 void
1636 1631 watch_enable(kthread_id_t t)
1637 1632 {
1638 1633 t->t_proc_flag |= TP_WATCHPT;
1639 1634 install_copyops(t, &watch_copyops);
1640 1635 }
1641 1636
1642 1637 void
1643 1638 watch_disable(kthread_id_t t)
1644 1639 {
1645 1640 t->t_proc_flag &= ~TP_WATCHPT;
1646 1641 remove_copyops(t);
1647 1642 }
1648 1643
1649 1644 int
1650 1645 copyin_nowatch(const void *uaddr, void *kaddr, size_t len)
1651 1646 {
1652 1647 int watched, ret;
1653 1648
1654 1649 watched = watch_disable_addr(uaddr, len, S_READ);
1655 1650 ret = copyin(uaddr, kaddr, len);
1656 1651 if (watched)
1657 1652 watch_enable_addr(uaddr, len, S_READ);
1658 1653
1659 1654 return (ret);
1660 1655 }
1661 1656
1662 1657 int
1663 1658 copyout_nowatch(const void *kaddr, void *uaddr, size_t len)
1664 1659 {
1665 1660 int watched, ret;
1666 1661
1667 1662 watched = watch_disable_addr(uaddr, len, S_WRITE);
1668 1663 ret = copyout(kaddr, uaddr, len);
1669 1664 if (watched)
1670 1665 watch_enable_addr(uaddr, len, S_WRITE);
1671 1666
1672 1667 return (ret);
1673 1668 }
1674 1669
1675 1670 #ifdef _LP64
1676 1671 int
1677 1672 fuword64_nowatch(const void *addr, uint64_t *value)
1678 1673 {
1679 1674 int watched, ret;
1680 1675
1681 1676 watched = watch_disable_addr(addr, sizeof (*value), S_READ);
1682 1677 ret = fuword64(addr, value);
1683 1678 if (watched)
1684 1679 watch_enable_addr(addr, sizeof (*value), S_READ);
1685 1680
1686 1681 return (ret);
1687 1682 }
1688 1683 #endif
1689 1684
1690 1685 int
1691 1686 fuword32_nowatch(const void *addr, uint32_t *value)
1692 1687 {
1693 1688 int watched, ret;
1694 1689
1695 1690 watched = watch_disable_addr(addr, sizeof (*value), S_READ);
1696 1691 ret = fuword32(addr, value);
1697 1692 if (watched)
1698 1693 watch_enable_addr(addr, sizeof (*value), S_READ);
1699 1694
1700 1695 return (ret);
1701 1696 }
1702 1697
1703 1698 #ifdef _LP64
1704 1699 int
1705 1700 suword64_nowatch(void *addr, uint64_t value)
1706 1701 {
1707 1702 int watched, ret;
1708 1703
1709 1704 watched = watch_disable_addr(addr, sizeof (value), S_WRITE);
1710 1705 ret = suword64(addr, value);
1711 1706 if (watched)
1712 1707 watch_enable_addr(addr, sizeof (value), S_WRITE);
1713 1708
1714 1709 return (ret);
1715 1710 }
1716 1711 #endif
1717 1712
1718 1713 int
1719 1714 suword32_nowatch(void *addr, uint32_t value)
1720 1715 {
1721 1716 int watched, ret;
1722 1717
1723 1718 watched = watch_disable_addr(addr, sizeof (value), S_WRITE);
1724 1719 ret = suword32(addr, value);
1725 1720 if (watched)
1726 1721 watch_enable_addr(addr, sizeof (value), S_WRITE);
1727 1722
1728 1723 return (ret);
1729 1724 }
1730 1725
1731 1726 int
1732 1727 watch_disable_addr(const void *addr, size_t len, enum seg_rw rw)
1733 1728 {
1734 1729 if (pr_watch_active(curproc))
1735 1730 return (pr_mappage((caddr_t)addr, len, rw, 1));
1736 1731 return (0);
1737 1732 }
1738 1733
1739 1734 void
1740 1735 watch_enable_addr(const void *addr, size_t len, enum seg_rw rw)
1741 1736 {
1742 1737 if (pr_watch_active(curproc))
1743 1738 pr_unmappage((caddr_t)addr, len, rw, 1);
1744 1739 }
↓ open down ↓ |
1227 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX