Print this page
patch lower-case-segops
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/watchpoint.c
+++ new/usr/src/uts/common/os/watchpoint.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License, Version 1.0 only
6 6 * (the "License"). You may not use this file except in compliance
7 7 * with the License.
8 8 *
9 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 10 * or http://www.opensolaris.org/os/licensing.
11 11 * See the License for the specific language governing permissions
12 12 * and limitations under the License.
13 13 *
14 14 * When distributing Covered Code, include this CDDL HEADER in each
15 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 16 * If applicable, add the following below this CDDL HEADER, with the
17 17 * fields enclosed by brackets "[]" replaced with your own identifying
18 18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 19 *
20 20 * CDDL HEADER END
21 21 */
22 22 /*
23 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 #pragma ident "%Z%%M% %I% %E% SMI"
28 28
29 29 #include <sys/types.h>
30 30 #include <sys/t_lock.h>
31 31 #include <sys/param.h>
32 32 #include <sys/cred.h>
33 33 #include <sys/debug.h>
34 34 #include <sys/inline.h>
35 35 #include <sys/kmem.h>
36 36 #include <sys/proc.h>
37 37 #include <sys/regset.h>
38 38 #include <sys/sysmacros.h>
39 39 #include <sys/systm.h>
40 40 #include <sys/prsystm.h>
41 41 #include <sys/buf.h>
42 42 #include <sys/signal.h>
43 43 #include <sys/user.h>
44 44 #include <sys/cpuvar.h>
45 45
46 46 #include <sys/fault.h>
47 47 #include <sys/syscall.h>
48 48 #include <sys/procfs.h>
49 49 #include <sys/cmn_err.h>
50 50 #include <sys/stack.h>
51 51 #include <sys/watchpoint.h>
52 52 #include <sys/copyops.h>
53 53 #include <sys/schedctl.h>
54 54
55 55 #include <sys/mman.h>
56 56 #include <vm/as.h>
57 57 #include <vm/seg.h>
58 58
59 59 /*
60 60 * Copy ops vector for watchpoints.
61 61 */
62 62 static int watch_copyin(const void *, void *, size_t);
63 63 static int watch_xcopyin(const void *, void *, size_t);
64 64 static int watch_copyout(const void *, void *, size_t);
65 65 static int watch_xcopyout(const void *, void *, size_t);
66 66 static int watch_copyinstr(const char *, char *, size_t, size_t *);
67 67 static int watch_copyoutstr(const char *, char *, size_t, size_t *);
68 68 static int watch_fuword8(const void *, uint8_t *);
69 69 static int watch_fuword16(const void *, uint16_t *);
70 70 static int watch_fuword32(const void *, uint32_t *);
71 71 static int watch_suword8(void *, uint8_t);
72 72 static int watch_suword16(void *, uint16_t);
73 73 static int watch_suword32(void *, uint32_t);
74 74 static int watch_physio(int (*)(struct buf *), struct buf *,
75 75 dev_t, int, void (*)(struct buf *), struct uio *);
76 76 #ifdef _LP64
77 77 static int watch_fuword64(const void *, uint64_t *);
78 78 static int watch_suword64(void *, uint64_t);
79 79 #endif
80 80
81 81 struct copyops watch_copyops = {
82 82 watch_copyin,
83 83 watch_xcopyin,
84 84 watch_copyout,
85 85 watch_xcopyout,
86 86 watch_copyinstr,
87 87 watch_copyoutstr,
88 88 watch_fuword8,
89 89 watch_fuword16,
90 90 watch_fuword32,
91 91 #ifdef _LP64
92 92 watch_fuword64,
93 93 #else
94 94 NULL,
95 95 #endif
96 96 watch_suword8,
97 97 watch_suword16,
98 98 watch_suword32,
99 99 #ifdef _LP64
100 100 watch_suword64,
101 101 #else
102 102 NULL,
103 103 #endif
104 104 watch_physio
105 105 };
106 106
107 107 /*
108 108 * Map the 'rw' argument to a protection flag.
109 109 */
110 110 static int
111 111 rw_to_prot(enum seg_rw rw)
112 112 {
113 113 switch (rw) {
114 114 case S_EXEC:
115 115 return (PROT_EXEC);
116 116 case S_READ:
117 117 return (PROT_READ);
118 118 case S_WRITE:
119 119 return (PROT_WRITE);
120 120 default:
121 121 return (PROT_NONE); /* can't happen */
122 122 }
123 123 }
124 124
125 125 /*
126 126 * Map the 'rw' argument to an index into an array of exec/write/read things.
127 127 * The index follows the precedence order: exec .. write .. read
128 128 */
129 129 static int
130 130 rw_to_index(enum seg_rw rw)
131 131 {
132 132 switch (rw) {
133 133 default: /* default case "can't happen" */
134 134 case S_EXEC:
135 135 return (0);
136 136 case S_WRITE:
137 137 return (1);
138 138 case S_READ:
139 139 return (2);
140 140 }
141 141 }
142 142
143 143 /*
144 144 * Map an index back to a seg_rw.
145 145 */
146 146 static enum seg_rw S_rw[4] = {
147 147 S_EXEC,
148 148 S_WRITE,
149 149 S_READ,
150 150 S_READ,
151 151 };
152 152
153 153 #define X 0
154 154 #define W 1
155 155 #define R 2
156 156 #define sum(a) (a[X] + a[W] + a[R])
157 157
158 158 /*
159 159 * Common code for pr_mappage() and pr_unmappage().
160 160 */
161 161 static int
162 162 pr_do_mappage(caddr_t addr, size_t size, int mapin, enum seg_rw rw, int kernel)
163 163 {
164 164 proc_t *p = curproc;
165 165 struct as *as = p->p_as;
166 166 char *eaddr = addr + size;
167 167 int prot_rw = rw_to_prot(rw);
168 168 int xrw = rw_to_index(rw);
169 169 int rv = 0;
170 170 struct watched_page *pwp;
171 171 struct watched_page tpw;
172 172 avl_index_t where;
173 173 uint_t prot;
174 174
175 175 ASSERT(as != &kas);
176 176
177 177 startover:
178 178 ASSERT(rv == 0);
179 179 if (avl_numnodes(&as->a_wpage) == 0)
180 180 return (0);
181 181
182 182 /*
183 183 * as->a_wpage can only be changed while the process is totally stopped.
184 184 * Don't grab p_lock here. Holding p_lock while grabbing the address
185 185 * space lock leads to deadlocks with the clock thread.
186 186 *
187 187 * p_maplock prevents simultaneous execution of this function. Under
188 188 * normal circumstances, holdwatch() will stop all other threads, so the
189 189 * lock isn't really needed. But there may be multiple threads within
190 190 * stop() when SWATCHOK is set, so we need to handle multiple threads
191 191 * at once. See holdwatch() for the details of this dance.
192 192 */
193 193
194 194 mutex_enter(&p->p_maplock);
195 195 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
196 196
197 197 tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
198 198 if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL)
199 199 pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);
200 200
201 201 for (; pwp != NULL && pwp->wp_vaddr < eaddr;
202 202 pwp = AVL_NEXT(&as->a_wpage, pwp)) {
203 203
204 204 /*
205 205 * If the requested protection has not been
206 206 * removed, we need not remap this page.
207 207 */
208 208 prot = pwp->wp_prot;
209 209 if (kernel || (prot & PROT_USER))
210 210 if (prot & prot_rw)
211 211 continue;
212 212 /*
213 213 * If the requested access does not exist in the page's
214 214 * original protections, we need not remap this page.
215 215 * If the page does not exist yet, we can't test it.
216 216 */
217 217 if ((prot = pwp->wp_oprot) != 0) {
218 218 if (!(kernel || (prot & PROT_USER)))
219 219 continue;
220 220 if (!(prot & prot_rw))
221 221 continue;
222 222 }
223 223
224 224 if (mapin) {
225 225 /*
226 226 * Before mapping the page in, ensure that
227 227 * all other lwps are held in the kernel.
228 228 */
229 229 if (p->p_mapcnt == 0) {
230 230 /*
231 231 * Release as lock while in holdwatch()
232 232 * in case other threads need to grab it.
233 233 */
234 234 AS_LOCK_EXIT(as, &as->a_lock);
235 235 mutex_exit(&p->p_maplock);
236 236 if (holdwatch() != 0) {
237 237 /*
238 238 * We stopped in holdwatch().
239 239 * Start all over again because the
240 240 * watched page list may have changed.
241 241 */
242 242 goto startover;
243 243 }
244 244 mutex_enter(&p->p_maplock);
245 245 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
246 246 }
247 247 p->p_mapcnt++;
248 248 }
249 249
250 250 addr = pwp->wp_vaddr;
251 251 rv++;
252 252
253 253 prot = pwp->wp_prot;
254 254 if (mapin) {
255 255 if (kernel)
256 256 pwp->wp_kmap[xrw]++;
257 257 else
258 258 pwp->wp_umap[xrw]++;
259 259 pwp->wp_flags |= WP_NOWATCH;
260 260 if (pwp->wp_kmap[X] + pwp->wp_umap[X])
261 261 /* cannot have exec-only protection */
262 262 prot |= PROT_READ|PROT_EXEC;
263 263 if (pwp->wp_kmap[R] + pwp->wp_umap[R])
264 264 prot |= PROT_READ;
265 265 if (pwp->wp_kmap[W] + pwp->wp_umap[W])
266 266 /* cannot have write-only protection */
267 267 prot |= PROT_READ|PROT_WRITE;
268 268 #if 0 /* damned broken mmu feature! */
269 269 if (sum(pwp->wp_umap) == 0)
270 270 prot &= ~PROT_USER;
271 271 #endif
272 272 } else {
273 273 ASSERT(pwp->wp_flags & WP_NOWATCH);
274 274 if (kernel) {
275 275 ASSERT(pwp->wp_kmap[xrw] != 0);
276 276 --pwp->wp_kmap[xrw];
277 277 } else {
278 278 ASSERT(pwp->wp_umap[xrw] != 0);
279 279 --pwp->wp_umap[xrw];
280 280 }
281 281 if (sum(pwp->wp_kmap) + sum(pwp->wp_umap) == 0)
282 282 pwp->wp_flags &= ~WP_NOWATCH;
283 283 else {
284 284 if (pwp->wp_kmap[X] + pwp->wp_umap[X])
285 285 /* cannot have exec-only protection */
286 286 prot |= PROT_READ|PROT_EXEC;
287 287 if (pwp->wp_kmap[R] + pwp->wp_umap[R])
288 288 prot |= PROT_READ;
289 289 if (pwp->wp_kmap[W] + pwp->wp_umap[W])
290 290 /* cannot have write-only protection */
291 291 prot |= PROT_READ|PROT_WRITE;
292 292 #if 0 /* damned broken mmu feature! */
293 293 if (sum(pwp->wp_umap) == 0)
294 294 prot &= ~PROT_USER;
295 295 #endif
296 296 }
297 297 }
298 298
299 299
↓ open down ↓ |
299 lines elided |
↑ open up ↑ |
300 300 if (pwp->wp_oprot != 0) { /* if page exists */
301 301 struct seg *seg;
302 302 uint_t oprot;
303 303 int err, retrycnt = 0;
304 304
305 305 AS_LOCK_EXIT(as, &as->a_lock);
306 306 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
307 307 retry:
308 308 seg = as_segat(as, addr);
309 309 ASSERT(seg != NULL);
310 - SEGOP_GETPROT(seg, addr, 0, &oprot);
310 + segop_getprot(seg, addr, 0, &oprot);
311 311 if (prot != oprot) {
312 - err = SEGOP_SETPROT(seg, addr, PAGESIZE, prot);
312 + err = segop_setprot(seg, addr, PAGESIZE, prot);
313 313 if (err == IE_RETRY) {
314 314 ASSERT(retrycnt == 0);
315 315 retrycnt++;
316 316 goto retry;
317 317 }
318 318 }
319 319 AS_LOCK_EXIT(as, &as->a_lock);
320 320 } else
321 321 AS_LOCK_EXIT(as, &as->a_lock);
322 322
323 323 /*
324 324 * When all pages are mapped back to their normal state,
325 325 * continue the other lwps.
326 326 */
327 327 if (!mapin) {
328 328 ASSERT(p->p_mapcnt > 0);
329 329 p->p_mapcnt--;
330 330 if (p->p_mapcnt == 0) {
331 331 mutex_exit(&p->p_maplock);
332 332 mutex_enter(&p->p_lock);
333 333 continuelwps(p);
334 334 mutex_exit(&p->p_lock);
335 335 mutex_enter(&p->p_maplock);
336 336 }
337 337 }
338 338
339 339 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
340 340 }
341 341
342 342 AS_LOCK_EXIT(as, &as->a_lock);
343 343 mutex_exit(&p->p_maplock);
344 344
345 345 return (rv);
346 346 }
347 347
348 348 /*
349 349 * Restore the original page protections on an address range.
350 350 * If 'kernel' is non-zero, just do it for the kernel.
351 351 * pr_mappage() returns non-zero if it actually changed anything.
352 352 *
353 353 * pr_mappage() and pr_unmappage() must be executed in matched pairs,
354 354 * but pairs may be nested within other pairs. The reference counts
355 355 * sort it all out. See pr_do_mappage(), above.
356 356 */
357 357 static int
358 358 pr_mappage(const caddr_t addr, size_t size, enum seg_rw rw, int kernel)
359 359 {
360 360 return (pr_do_mappage(addr, size, 1, rw, kernel));
361 361 }
362 362
363 363 /*
364 364 * Set the modified page protections on a watched page.
365 365 * Inverse of pr_mappage().
366 366 * Needs to be called only if pr_mappage() returned non-zero.
367 367 */
368 368 static void
369 369 pr_unmappage(const caddr_t addr, size_t size, enum seg_rw rw, int kernel)
370 370 {
371 371 (void) pr_do_mappage(addr, size, 0, rw, kernel);
372 372 }
373 373
374 374 /*
375 375 * Function called by an lwp after it resumes from stop().
376 376 */
377 377 void
378 378 setallwatch(void)
379 379 {
380 380 proc_t *p = curproc;
381 381 struct as *as = curproc->p_as;
382 382 struct watched_page *pwp, *next;
383 383 struct seg *seg;
384 384 caddr_t vaddr;
385 385 uint_t prot;
386 386 int err, retrycnt;
387 387
388 388 if (p->p_wprot == NULL)
389 389 return;
390 390
391 391 ASSERT(MUTEX_NOT_HELD(&curproc->p_lock));
392 392
393 393 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
394 394
↓ open down ↓ |
72 lines elided |
↑ open up ↑ |
395 395 pwp = p->p_wprot;
396 396 while (pwp != NULL) {
397 397
398 398 vaddr = pwp->wp_vaddr;
399 399 retrycnt = 0;
400 400 retry:
401 401 ASSERT(pwp->wp_flags & WP_SETPROT);
402 402 if ((seg = as_segat(as, vaddr)) != NULL &&
403 403 !(pwp->wp_flags & WP_NOWATCH)) {
404 404 prot = pwp->wp_prot;
405 - err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, prot);
405 + err = segop_setprot(seg, vaddr, PAGESIZE, prot);
406 406 if (err == IE_RETRY) {
407 407 ASSERT(retrycnt == 0);
408 408 retrycnt++;
409 409 goto retry;
410 410 }
411 411 }
412 412
413 413 next = pwp->wp_list;
414 414
415 415 if (pwp->wp_read + pwp->wp_write + pwp->wp_exec == 0) {
416 416 /*
417 417 * No watched areas remain in this page.
418 418 * Free the watched_page structure.
419 419 */
420 420 avl_remove(&as->a_wpage, pwp);
421 421 kmem_free(pwp, sizeof (struct watched_page));
422 422 } else {
423 423 pwp->wp_flags &= ~WP_SETPROT;
424 424 }
425 425
426 426 pwp = next;
427 427 }
428 428 p->p_wprot = NULL;
429 429
430 430 AS_LOCK_EXIT(as, &as->a_lock);
431 431 }
432 432
433 433
434 434
435 435 /* Must be called with as lock held */
436 436 int
437 437 pr_is_watchpage_as(caddr_t addr, enum seg_rw rw, struct as *as)
438 438 {
439 439 register struct watched_page *pwp;
440 440 struct watched_page tpw;
441 441 uint_t prot;
442 442 int rv = 0;
443 443
444 444 switch (rw) {
445 445 case S_READ:
446 446 case S_WRITE:
447 447 case S_EXEC:
448 448 break;
449 449 default:
450 450 return (0);
451 451 }
452 452
453 453 /*
454 454 * as->a_wpage can only be modified while the process is totally
455 455 * stopped. We need, and should use, no locks here.
456 456 */
457 457 if (as != &kas && avl_numnodes(&as->a_wpage) != 0) {
458 458 tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
459 459 pwp = avl_find(&as->a_wpage, &tpw, NULL);
460 460 if (pwp != NULL) {
461 461 ASSERT(addr >= pwp->wp_vaddr &&
462 462 addr < pwp->wp_vaddr + PAGESIZE);
463 463 if (pwp->wp_oprot != 0) {
464 464 prot = pwp->wp_prot;
465 465 switch (rw) {
466 466 case S_READ:
467 467 rv = ((prot & (PROT_USER|PROT_READ))
468 468 != (PROT_USER|PROT_READ));
469 469 break;
470 470 case S_WRITE:
471 471 rv = ((prot & (PROT_USER|PROT_WRITE))
472 472 != (PROT_USER|PROT_WRITE));
473 473 break;
474 474 case S_EXEC:
475 475 rv = ((prot & (PROT_USER|PROT_EXEC))
476 476 != (PROT_USER|PROT_EXEC));
477 477 break;
478 478 default:
479 479 /* can't happen! */
480 480 break;
481 481 }
482 482 }
483 483 }
484 484 }
485 485
486 486 return (rv);
487 487 }
488 488
489 489
490 490 /*
491 491 * trap() calls here to determine if a fault is in a watched page.
492 492 * We return nonzero if this is true and the load/store would fail.
493 493 */
494 494 int
495 495 pr_is_watchpage(caddr_t addr, enum seg_rw rw)
496 496 {
497 497 struct as *as = curproc->p_as;
498 498 int rv;
499 499
500 500 if ((as == &kas) || avl_numnodes(&as->a_wpage) == 0)
501 501 return (0);
502 502
503 503 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
504 504 rv = pr_is_watchpage_as(addr, rw, as);
505 505 AS_LOCK_EXIT(as, &as->a_lock);
506 506
507 507 return (rv);
508 508 }
509 509
510 510
511 511
512 512 /*
513 513 * trap() calls here to determine if a fault is a watchpoint.
514 514 */
515 515 int
516 516 pr_is_watchpoint(caddr_t *paddr, int *pta, size_t size, size_t *plen,
517 517 enum seg_rw rw)
518 518 {
519 519 proc_t *p = curproc;
520 520 caddr_t addr = *paddr;
521 521 caddr_t eaddr = addr + size;
522 522 register struct watched_area *pwa;
523 523 struct watched_area twa;
524 524 int rv = 0;
525 525 int ta = 0;
526 526 size_t len = 0;
527 527
528 528 switch (rw) {
529 529 case S_READ:
530 530 case S_WRITE:
531 531 case S_EXEC:
532 532 break;
533 533 default:
534 534 *pta = 0;
535 535 return (0);
536 536 }
537 537
538 538 /*
539 539 * p->p_warea is protected by p->p_lock.
540 540 */
541 541 mutex_enter(&p->p_lock);
542 542
543 543 /* BEGIN CSTYLED */
544 544 /*
545 545 * This loop is somewhat complicated because the fault region can span
546 546 * multiple watched areas. For example:
547 547 *
548 548 * addr eaddr
549 549 * +-----------------+
550 550 * | fault region |
551 551 * +-------+--------+----+---+------------+
552 552 * | prot not right | | prot correct |
553 553 * +----------------+ +----------------+
554 554 * wa_vaddr wa_eaddr
555 555 * wa_vaddr wa_eaddr
556 556 *
557 557 * We start at the area greater than or equal to the starting address.
558 558 * As long as some portion of the fault region overlaps the current
559 559 * area, we continue checking permissions until we find an appropriate
560 560 * match.
561 561 */
562 562 /* END CSTYLED */
563 563 twa.wa_vaddr = addr;
564 564 twa.wa_eaddr = eaddr;
565 565
566 566 for (pwa = pr_find_watched_area(p, &twa, NULL);
567 567 pwa != NULL && eaddr > pwa->wa_vaddr && addr < pwa->wa_eaddr;
568 568 pwa = AVL_NEXT(&p->p_warea, pwa)) {
569 569
570 570 switch (rw) {
571 571 case S_READ:
572 572 if (pwa->wa_flags & WA_READ)
573 573 rv = TRAP_RWATCH;
574 574 break;
575 575 case S_WRITE:
576 576 if (pwa->wa_flags & WA_WRITE)
577 577 rv = TRAP_WWATCH;
578 578 break;
579 579 case S_EXEC:
580 580 if (pwa->wa_flags & WA_EXEC)
581 581 rv = TRAP_XWATCH;
582 582 break;
583 583 default:
584 584 /* can't happen */
585 585 break;
586 586 }
587 587
588 588 /*
589 589 * If protections didn't match, check the next watched
590 590 * area
591 591 */
592 592 if (rv != 0) {
593 593 if (addr < pwa->wa_vaddr)
594 594 addr = pwa->wa_vaddr;
595 595 len = pwa->wa_eaddr - addr;
596 596 if (pwa->wa_flags & WA_TRAPAFTER)
597 597 ta = 1;
598 598 break;
599 599 }
600 600 }
601 601
602 602 mutex_exit(&p->p_lock);
603 603
604 604 *paddr = addr;
605 605 *pta = ta;
606 606 if (plen != NULL)
607 607 *plen = len;
608 608 return (rv);
609 609 }
610 610
611 611 /*
612 612 * Set up to perform a single-step at user level for the
613 613 * case of a trapafter watchpoint. Called from trap().
614 614 */
615 615 void
616 616 do_watch_step(caddr_t vaddr, size_t sz, enum seg_rw rw,
617 617 int watchcode, greg_t pc)
618 618 {
619 619 register klwp_t *lwp = ttolwp(curthread);
620 620 struct lwp_watch *pw = &lwp->lwp_watch[rw_to_index(rw)];
621 621
622 622 /*
623 623 * Check to see if we are already performing this special
624 624 * watchpoint single-step. We must not do pr_mappage() twice.
625 625 */
626 626
627 627 /* special check for two read traps on the same instruction */
628 628 if (rw == S_READ && pw->wpaddr != NULL &&
629 629 !(pw->wpaddr <= vaddr && vaddr < pw->wpaddr + pw->wpsize)) {
630 630 ASSERT(lwp->lwp_watchtrap != 0);
631 631 pw++; /* use the extra S_READ struct */
632 632 }
633 633
634 634 if (pw->wpaddr != NULL) {
635 635 ASSERT(lwp->lwp_watchtrap != 0);
636 636 ASSERT(pw->wpaddr <= vaddr && vaddr < pw->wpaddr + pw->wpsize);
637 637 if (pw->wpcode == 0) {
638 638 pw->wpcode = watchcode;
639 639 pw->wppc = pc;
640 640 }
641 641 } else {
642 642 int mapped = pr_mappage(vaddr, sz, rw, 0);
643 643 prstep(lwp, 1);
644 644 lwp->lwp_watchtrap = 1;
645 645 pw->wpaddr = vaddr;
646 646 pw->wpsize = sz;
647 647 pw->wpcode = watchcode;
648 648 pw->wpmapped = mapped;
649 649 pw->wppc = pc;
650 650 }
651 651 }
652 652
653 653 /*
654 654 * Undo the effects of do_watch_step().
655 655 * Called from trap() after the single-step is finished.
656 656 * Also called from issig_forreal() and stop() with a NULL
657 657 * argument to avoid having these things set more than once.
658 658 */
659 659 int
660 660 undo_watch_step(k_siginfo_t *sip)
661 661 {
662 662 register klwp_t *lwp = ttolwp(curthread);
663 663 int fault = 0;
664 664
665 665 if (lwp->lwp_watchtrap) {
666 666 struct lwp_watch *pw = lwp->lwp_watch;
667 667 int i;
668 668
669 669 for (i = 0; i < 4; i++, pw++) {
670 670 if (pw->wpaddr == NULL)
671 671 continue;
672 672 if (pw->wpmapped)
673 673 pr_unmappage(pw->wpaddr, pw->wpsize, S_rw[i],
674 674 0);
675 675 if (pw->wpcode != 0) {
676 676 if (sip != NULL) {
677 677 sip->si_signo = SIGTRAP;
678 678 sip->si_code = pw->wpcode;
679 679 sip->si_addr = pw->wpaddr;
680 680 sip->si_trapafter = 1;
681 681 sip->si_pc = (caddr_t)pw->wppc;
682 682 }
683 683 fault = FLTWATCH;
684 684 pw->wpcode = 0;
685 685 }
686 686 pw->wpaddr = NULL;
687 687 pw->wpsize = 0;
688 688 pw->wpmapped = 0;
689 689 }
690 690 lwp->lwp_watchtrap = 0;
691 691 }
692 692
693 693 return (fault);
694 694 }
695 695
696 696 /*
697 697 * Handle a watchpoint that occurs while doing copyin()
698 698 * or copyout() in a system call.
699 699 * Return non-zero if the fault or signal is cleared
700 700 * by a debugger while the lwp is stopped.
701 701 */
702 702 static int
703 703 sys_watchpoint(caddr_t addr, int watchcode, int ta)
704 704 {
705 705 extern greg_t getuserpc(void); /* XXX header file */
706 706 k_sigset_t smask;
707 707 register proc_t *p = ttoproc(curthread);
708 708 register klwp_t *lwp = ttolwp(curthread);
709 709 register sigqueue_t *sqp;
710 710 int rval;
711 711
712 712 /* assert no locks are held */
713 713 /* ASSERT(curthread->t_nlocks == 0); */
714 714
715 715 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
716 716 sqp->sq_info.si_signo = SIGTRAP;
717 717 sqp->sq_info.si_code = watchcode;
718 718 sqp->sq_info.si_addr = addr;
719 719 sqp->sq_info.si_trapafter = ta;
720 720 sqp->sq_info.si_pc = (caddr_t)getuserpc();
721 721
722 722 mutex_enter(&p->p_lock);
723 723
724 724 /* this will be tested and cleared by the caller */
725 725 lwp->lwp_sysabort = 0;
726 726
727 727 if (prismember(&p->p_fltmask, FLTWATCH)) {
728 728 lwp->lwp_curflt = (uchar_t)FLTWATCH;
729 729 lwp->lwp_siginfo = sqp->sq_info;
730 730 stop(PR_FAULTED, FLTWATCH);
731 731 if (lwp->lwp_curflt == 0) {
732 732 mutex_exit(&p->p_lock);
733 733 kmem_free(sqp, sizeof (sigqueue_t));
734 734 return (1);
735 735 }
736 736 lwp->lwp_curflt = 0;
737 737 }
738 738
739 739 /*
740 740 * post the SIGTRAP signal.
741 741 * Block all other signals so we only stop showing SIGTRAP.
742 742 */
743 743 if (signal_is_blocked(curthread, SIGTRAP) ||
744 744 sigismember(&p->p_ignore, SIGTRAP)) {
745 745 /* SIGTRAP is blocked or ignored, forget the rest. */
746 746 mutex_exit(&p->p_lock);
747 747 kmem_free(sqp, sizeof (sigqueue_t));
748 748 return (0);
749 749 }
750 750 sigdelq(p, curthread, SIGTRAP);
751 751 sigaddqa(p, curthread, sqp);
752 752 schedctl_finish_sigblock(curthread);
753 753 smask = curthread->t_hold;
754 754 sigfillset(&curthread->t_hold);
755 755 sigdiffset(&curthread->t_hold, &cantmask);
756 756 sigdelset(&curthread->t_hold, SIGTRAP);
757 757 mutex_exit(&p->p_lock);
758 758
759 759 rval = ((ISSIG_FAST(curthread, lwp, p, FORREAL))? 0 : 1);
760 760
761 761 /* restore the original signal mask */
762 762 mutex_enter(&p->p_lock);
763 763 curthread->t_hold = smask;
764 764 mutex_exit(&p->p_lock);
765 765
766 766 return (rval);
767 767 }
768 768
769 769 /*
770 770 * Wrappers for the copyin()/copyout() functions to deal
771 771 * with watchpoints that fire while in system calls.
772 772 */
773 773
774 774 static int
775 775 watch_xcopyin(const void *uaddr, void *kaddr, size_t count)
776 776 {
777 777 klwp_t *lwp = ttolwp(curthread);
778 778 caddr_t watch_uaddr = (caddr_t)uaddr;
779 779 caddr_t watch_kaddr = (caddr_t)kaddr;
780 780 int error = 0;
781 781 label_t ljb;
782 782 size_t part;
783 783 int mapped;
784 784
785 785 while (count && error == 0) {
786 786 int watchcode;
787 787 caddr_t vaddr;
788 788 size_t len;
789 789 int ta;
790 790
791 791 if ((part = PAGESIZE -
792 792 (((uintptr_t)uaddr) & PAGEOFFSET)) > count)
793 793 part = count;
794 794
795 795 if (!pr_is_watchpage(watch_uaddr, S_READ))
796 796 watchcode = 0;
797 797 else {
798 798 vaddr = watch_uaddr;
799 799 watchcode = pr_is_watchpoint(&vaddr, &ta,
800 800 part, &len, S_READ);
801 801 if (watchcode && ta == 0)
802 802 part = vaddr - watch_uaddr;
803 803 }
804 804
805 805 /*
806 806 * Copy the initial part, up to a watched address, if any.
807 807 */
808 808 if (part != 0) {
809 809 mapped = pr_mappage(watch_uaddr, part, S_READ, 1);
810 810 if (on_fault(&ljb))
811 811 error = EFAULT;
812 812 else
813 813 copyin_noerr(watch_uaddr, watch_kaddr, part);
814 814 no_fault();
815 815 if (mapped)
816 816 pr_unmappage(watch_uaddr, part, S_READ, 1);
817 817 watch_uaddr += part;
818 818 watch_kaddr += part;
819 819 count -= part;
820 820 }
821 821 /*
822 822 * If trapafter was specified, then copy through the
823 823 * watched area before taking the watchpoint trap.
824 824 */
825 825 while (count && watchcode && ta && len > part && error == 0) {
826 826 len -= part;
827 827 if ((part = PAGESIZE) > count)
828 828 part = count;
829 829 if (part > len)
830 830 part = len;
831 831 mapped = pr_mappage(watch_uaddr, part, S_READ, 1);
832 832 if (on_fault(&ljb))
833 833 error = EFAULT;
834 834 else
835 835 copyin_noerr(watch_uaddr, watch_kaddr, part);
836 836 no_fault();
837 837 if (mapped)
838 838 pr_unmappage(watch_uaddr, part, S_READ, 1);
839 839 watch_uaddr += part;
840 840 watch_kaddr += part;
841 841 count -= part;
842 842 }
843 843
844 844 error:
845 845 /* if we hit a watched address, do the watchpoint logic */
846 846 if (watchcode &&
847 847 (!sys_watchpoint(vaddr, watchcode, ta) ||
848 848 lwp->lwp_sysabort)) {
849 849 lwp->lwp_sysabort = 0;
850 850 error = EFAULT;
851 851 break;
852 852 }
853 853 }
854 854
855 855 return (error);
856 856 }
857 857
858 858 static int
859 859 watch_copyin(const void *kaddr, void *uaddr, size_t count)
860 860 {
861 861 return (watch_xcopyin(kaddr, uaddr, count) ? -1 : 0);
862 862 }
863 863
864 864
865 865 static int
866 866 watch_xcopyout(const void *kaddr, void *uaddr, size_t count)
867 867 {
868 868 klwp_t *lwp = ttolwp(curthread);
869 869 caddr_t watch_uaddr = (caddr_t)uaddr;
870 870 caddr_t watch_kaddr = (caddr_t)kaddr;
871 871 int error = 0;
872 872 label_t ljb;
873 873
874 874 while (count && error == 0) {
875 875 int watchcode;
876 876 caddr_t vaddr;
877 877 size_t part;
878 878 size_t len;
879 879 int ta;
880 880 int mapped;
881 881
882 882 if ((part = PAGESIZE -
883 883 (((uintptr_t)uaddr) & PAGEOFFSET)) > count)
884 884 part = count;
885 885
886 886 if (!pr_is_watchpage(watch_uaddr, S_WRITE))
887 887 watchcode = 0;
888 888 else {
889 889 vaddr = watch_uaddr;
890 890 watchcode = pr_is_watchpoint(&vaddr, &ta,
891 891 part, &len, S_WRITE);
892 892 if (watchcode) {
893 893 if (ta == 0)
894 894 part = vaddr - watch_uaddr;
895 895 else {
896 896 len += vaddr - watch_uaddr;
897 897 if (part > len)
898 898 part = len;
899 899 }
900 900 }
901 901 }
902 902
903 903 /*
904 904 * Copy the initial part, up to a watched address, if any.
905 905 */
906 906 if (part != 0) {
907 907 mapped = pr_mappage(watch_uaddr, part, S_WRITE, 1);
908 908 if (on_fault(&ljb))
909 909 error = EFAULT;
910 910 else
911 911 copyout_noerr(watch_kaddr, watch_uaddr, part);
912 912 no_fault();
913 913 if (mapped)
914 914 pr_unmappage(watch_uaddr, part, S_WRITE, 1);
915 915 watch_uaddr += part;
916 916 watch_kaddr += part;
917 917 count -= part;
918 918 }
919 919
920 920 /*
921 921 * If trapafter was specified, then copy through the
922 922 * watched area before taking the watchpoint trap.
923 923 */
924 924 while (count && watchcode && ta && len > part && error == 0) {
925 925 len -= part;
926 926 if ((part = PAGESIZE) > count)
927 927 part = count;
928 928 if (part > len)
929 929 part = len;
930 930 mapped = pr_mappage(watch_uaddr, part, S_WRITE, 1);
931 931 if (on_fault(&ljb))
932 932 error = EFAULT;
933 933 else
934 934 copyout_noerr(watch_kaddr, watch_uaddr, part);
935 935 no_fault();
936 936 if (mapped)
937 937 pr_unmappage(watch_uaddr, part, S_WRITE, 1);
938 938 watch_uaddr += part;
939 939 watch_kaddr += part;
940 940 count -= part;
941 941 }
942 942
943 943 /* if we hit a watched address, do the watchpoint logic */
944 944 if (watchcode &&
945 945 (!sys_watchpoint(vaddr, watchcode, ta) ||
946 946 lwp->lwp_sysabort)) {
947 947 lwp->lwp_sysabort = 0;
948 948 error = EFAULT;
949 949 break;
950 950 }
951 951 }
952 952
953 953 return (error);
954 954 }
955 955
956 956 static int
957 957 watch_copyout(const void *kaddr, void *uaddr, size_t count)
958 958 {
959 959 return (watch_xcopyout(kaddr, uaddr, count) ? -1 : 0);
960 960 }
961 961
962 962 static int
963 963 watch_copyinstr(
964 964 const char *uaddr,
965 965 char *kaddr,
966 966 size_t maxlength,
967 967 size_t *lencopied)
968 968 {
969 969 klwp_t *lwp = ttolwp(curthread);
970 970 size_t resid;
971 971 int error = 0;
972 972 label_t ljb;
973 973
974 974 if ((resid = maxlength) == 0)
975 975 return (ENAMETOOLONG);
976 976
977 977 while (resid && error == 0) {
978 978 int watchcode;
979 979 caddr_t vaddr;
980 980 size_t part;
981 981 size_t len;
982 982 size_t size;
983 983 int ta;
984 984 int mapped;
985 985
986 986 if ((part = PAGESIZE -
987 987 (((uintptr_t)uaddr) & PAGEOFFSET)) > resid)
988 988 part = resid;
989 989
990 990 if (!pr_is_watchpage((caddr_t)uaddr, S_READ))
991 991 watchcode = 0;
992 992 else {
993 993 vaddr = (caddr_t)uaddr;
994 994 watchcode = pr_is_watchpoint(&vaddr, &ta,
995 995 part, &len, S_READ);
996 996 if (watchcode) {
997 997 if (ta == 0)
998 998 part = vaddr - uaddr;
999 999 else {
1000 1000 len += vaddr - uaddr;
1001 1001 if (part > len)
1002 1002 part = len;
1003 1003 }
1004 1004 }
1005 1005 }
1006 1006
1007 1007 /*
1008 1008 * Copy the initial part, up to a watched address, if any.
1009 1009 */
1010 1010 if (part != 0) {
1011 1011 mapped = pr_mappage((caddr_t)uaddr, part, S_READ, 1);
1012 1012 if (on_fault(&ljb))
1013 1013 error = EFAULT;
1014 1014 else
1015 1015 error = copyinstr_noerr(uaddr, kaddr, part,
1016 1016 &size);
1017 1017 no_fault();
1018 1018 if (mapped)
1019 1019 pr_unmappage((caddr_t)uaddr, part, S_READ, 1);
1020 1020 uaddr += size;
1021 1021 kaddr += size;
1022 1022 resid -= size;
1023 1023 if (error == ENAMETOOLONG && resid > 0)
1024 1024 error = 0;
1025 1025 if (error != 0 || (watchcode &&
1026 1026 (uaddr < vaddr || kaddr[-1] == '\0')))
1027 1027 break; /* didn't reach the watched area */
1028 1028 }
1029 1029
1030 1030 /*
1031 1031 * If trapafter was specified, then copy through the
1032 1032 * watched area before taking the watchpoint trap.
1033 1033 */
1034 1034 while (resid && watchcode && ta && len > part && error == 0 &&
1035 1035 size == part && kaddr[-1] != '\0') {
1036 1036 len -= part;
1037 1037 if ((part = PAGESIZE) > resid)
1038 1038 part = resid;
1039 1039 if (part > len)
1040 1040 part = len;
1041 1041 mapped = pr_mappage((caddr_t)uaddr, part, S_READ, 1);
1042 1042 if (on_fault(&ljb))
1043 1043 error = EFAULT;
1044 1044 else
1045 1045 error = copyinstr_noerr(uaddr, kaddr, part,
1046 1046 &size);
1047 1047 no_fault();
1048 1048 if (mapped)
1049 1049 pr_unmappage((caddr_t)uaddr, part, S_READ, 1);
1050 1050 uaddr += size;
1051 1051 kaddr += size;
1052 1052 resid -= size;
1053 1053 if (error == ENAMETOOLONG && resid > 0)
1054 1054 error = 0;
1055 1055 }
1056 1056
1057 1057 /* if we hit a watched address, do the watchpoint logic */
1058 1058 if (watchcode &&
1059 1059 (!sys_watchpoint(vaddr, watchcode, ta) ||
1060 1060 lwp->lwp_sysabort)) {
1061 1061 lwp->lwp_sysabort = 0;
1062 1062 error = EFAULT;
1063 1063 break;
1064 1064 }
1065 1065
1066 1066 if (error == 0 && part != 0 &&
1067 1067 (size < part || kaddr[-1] == '\0'))
1068 1068 break;
1069 1069 }
1070 1070
1071 1071 if (error != EFAULT && lencopied)
1072 1072 *lencopied = maxlength - resid;
1073 1073 return (error);
1074 1074 }
1075 1075
1076 1076 static int
1077 1077 watch_copyoutstr(
1078 1078 const char *kaddr,
1079 1079 char *uaddr,
1080 1080 size_t maxlength,
1081 1081 size_t *lencopied)
1082 1082 {
1083 1083 klwp_t *lwp = ttolwp(curthread);
1084 1084 size_t resid;
1085 1085 int error = 0;
1086 1086 label_t ljb;
1087 1087
1088 1088 if ((resid = maxlength) == 0)
1089 1089 return (ENAMETOOLONG);
1090 1090
1091 1091 while (resid && error == 0) {
1092 1092 int watchcode;
1093 1093 caddr_t vaddr;
1094 1094 size_t part;
1095 1095 size_t len;
1096 1096 size_t size;
1097 1097 int ta;
1098 1098 int mapped;
1099 1099
1100 1100 if ((part = PAGESIZE -
1101 1101 (((uintptr_t)uaddr) & PAGEOFFSET)) > resid)
1102 1102 part = resid;
1103 1103
1104 1104 if (!pr_is_watchpage(uaddr, S_WRITE)) {
1105 1105 watchcode = 0;
1106 1106 } else {
1107 1107 vaddr = uaddr;
1108 1108 watchcode = pr_is_watchpoint(&vaddr, &ta,
1109 1109 part, &len, S_WRITE);
1110 1110 if (watchcode && ta == 0)
1111 1111 part = vaddr - uaddr;
1112 1112 }
1113 1113
1114 1114 /*
1115 1115 * Copy the initial part, up to a watched address, if any.
1116 1116 */
1117 1117 if (part != 0) {
1118 1118 mapped = pr_mappage(uaddr, part, S_WRITE, 1);
1119 1119 if (on_fault(&ljb))
1120 1120 error = EFAULT;
1121 1121 else
1122 1122 error = copyoutstr_noerr(kaddr, uaddr, part,
1123 1123 &size);
1124 1124 no_fault();
1125 1125 if (mapped)
1126 1126 pr_unmappage(uaddr, part, S_WRITE, 1);
1127 1127 uaddr += size;
1128 1128 kaddr += size;
1129 1129 resid -= size;
1130 1130 if (error == ENAMETOOLONG && resid > 0)
1131 1131 error = 0;
1132 1132 if (error != 0 || (watchcode &&
1133 1133 (uaddr < vaddr || kaddr[-1] == '\0')))
1134 1134 break; /* didn't reach the watched area */
1135 1135 }
1136 1136
1137 1137 /*
1138 1138 * If trapafter was specified, then copy through the
1139 1139 * watched area before taking the watchpoint trap.
1140 1140 */
1141 1141 while (resid && watchcode && ta && len > part && error == 0 &&
1142 1142 size == part && kaddr[-1] != '\0') {
1143 1143 len -= part;
1144 1144 if ((part = PAGESIZE) > resid)
1145 1145 part = resid;
1146 1146 if (part > len)
1147 1147 part = len;
1148 1148 mapped = pr_mappage(uaddr, part, S_WRITE, 1);
1149 1149 if (on_fault(&ljb))
1150 1150 error = EFAULT;
1151 1151 else
1152 1152 error = copyoutstr_noerr(kaddr, uaddr, part,
1153 1153 &size);
1154 1154 no_fault();
1155 1155 if (mapped)
1156 1156 pr_unmappage(uaddr, part, S_WRITE, 1);
1157 1157 uaddr += size;
1158 1158 kaddr += size;
1159 1159 resid -= size;
1160 1160 if (error == ENAMETOOLONG && resid > 0)
1161 1161 error = 0;
1162 1162 }
1163 1163
1164 1164 /* if we hit a watched address, do the watchpoint logic */
1165 1165 if (watchcode &&
1166 1166 (!sys_watchpoint(vaddr, watchcode, ta) ||
1167 1167 lwp->lwp_sysabort)) {
1168 1168 lwp->lwp_sysabort = 0;
1169 1169 error = EFAULT;
1170 1170 break;
1171 1171 }
1172 1172
1173 1173 if (error == 0 && part != 0 &&
1174 1174 (size < part || kaddr[-1] == '\0'))
1175 1175 break;
1176 1176 }
1177 1177
1178 1178 if (error != EFAULT && lencopied)
1179 1179 *lencopied = maxlength - resid;
1180 1180 return (error);
1181 1181 }
1182 1182
1183 1183 typedef int (*fuword_func)(const void *, void *);
1184 1184
1185 1185 /*
1186 1186 * Generic form of watch_fuword8(), watch_fuword16(), etc.
1187 1187 */
1188 1188 static int
1189 1189 watch_fuword(const void *addr, void *dst, fuword_func func, size_t size)
1190 1190 {
1191 1191 klwp_t *lwp = ttolwp(curthread);
1192 1192 int watchcode;
1193 1193 caddr_t vaddr;
1194 1194 int mapped;
1195 1195 int rv = 0;
1196 1196 int ta;
1197 1197 label_t ljb;
1198 1198
1199 1199 for (;;) {
1200 1200
1201 1201 vaddr = (caddr_t)addr;
1202 1202 watchcode = pr_is_watchpoint(&vaddr, &ta, size, NULL, S_READ);
1203 1203 if (watchcode == 0 || ta != 0) {
1204 1204 mapped = pr_mappage((caddr_t)addr, size, S_READ, 1);
1205 1205 if (on_fault(&ljb))
1206 1206 rv = -1;
1207 1207 else
1208 1208 (*func)(addr, dst);
1209 1209 no_fault();
1210 1210 if (mapped)
1211 1211 pr_unmappage((caddr_t)addr, size, S_READ, 1);
1212 1212 }
1213 1213 if (watchcode &&
1214 1214 (!sys_watchpoint(vaddr, watchcode, ta) ||
1215 1215 lwp->lwp_sysabort)) {
1216 1216 lwp->lwp_sysabort = 0;
1217 1217 rv = -1;
1218 1218 break;
1219 1219 }
1220 1220 if (watchcode == 0 || ta != 0)
1221 1221 break;
1222 1222 }
1223 1223
1224 1224 return (rv);
1225 1225 }
1226 1226
1227 1227 static int
1228 1228 watch_fuword8(const void *addr, uint8_t *dst)
1229 1229 {
1230 1230 return (watch_fuword(addr, dst, (fuword_func)fuword8_noerr,
1231 1231 sizeof (*dst)));
1232 1232 }
1233 1233
1234 1234 static int
1235 1235 watch_fuword16(const void *addr, uint16_t *dst)
1236 1236 {
1237 1237 return (watch_fuword(addr, dst, (fuword_func)fuword16_noerr,
1238 1238 sizeof (*dst)));
1239 1239 }
1240 1240
1241 1241 static int
1242 1242 watch_fuword32(const void *addr, uint32_t *dst)
1243 1243 {
1244 1244 return (watch_fuword(addr, dst, (fuword_func)fuword32_noerr,
1245 1245 sizeof (*dst)));
1246 1246 }
1247 1247
1248 1248 #ifdef _LP64
1249 1249 static int
1250 1250 watch_fuword64(const void *addr, uint64_t *dst)
1251 1251 {
1252 1252 return (watch_fuword(addr, dst, (fuword_func)fuword64_noerr,
1253 1253 sizeof (*dst)));
1254 1254 }
1255 1255 #endif
1256 1256
1257 1257
1258 1258 static int
1259 1259 watch_suword8(void *addr, uint8_t value)
1260 1260 {
1261 1261 klwp_t *lwp = ttolwp(curthread);
1262 1262 int watchcode;
1263 1263 caddr_t vaddr;
1264 1264 int mapped;
1265 1265 int rv = 0;
1266 1266 int ta;
1267 1267 label_t ljb;
1268 1268
1269 1269 for (;;) {
1270 1270
1271 1271 vaddr = (caddr_t)addr;
1272 1272 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1273 1273 S_WRITE);
1274 1274 if (watchcode == 0 || ta != 0) {
1275 1275 mapped = pr_mappage((caddr_t)addr, sizeof (value),
1276 1276 S_WRITE, 1);
1277 1277 if (on_fault(&ljb))
1278 1278 rv = -1;
1279 1279 else
1280 1280 suword8_noerr(addr, value);
1281 1281 no_fault();
1282 1282 if (mapped)
1283 1283 pr_unmappage((caddr_t)addr, sizeof (value),
1284 1284 S_WRITE, 1);
1285 1285 }
1286 1286 if (watchcode &&
1287 1287 (!sys_watchpoint(vaddr, watchcode, ta) ||
1288 1288 lwp->lwp_sysabort)) {
1289 1289 lwp->lwp_sysabort = 0;
1290 1290 rv = -1;
1291 1291 break;
1292 1292 }
1293 1293 if (watchcode == 0 || ta != 0)
1294 1294 break;
1295 1295 }
1296 1296
1297 1297 return (rv);
1298 1298 }
1299 1299
1300 1300 static int
1301 1301 watch_suword16(void *addr, uint16_t value)
1302 1302 {
1303 1303 klwp_t *lwp = ttolwp(curthread);
1304 1304 int watchcode;
1305 1305 caddr_t vaddr;
1306 1306 int mapped;
1307 1307 int rv = 0;
1308 1308 int ta;
1309 1309 label_t ljb;
1310 1310
1311 1311 for (;;) {
1312 1312
1313 1313 vaddr = (caddr_t)addr;
1314 1314 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1315 1315 S_WRITE);
1316 1316 if (watchcode == 0 || ta != 0) {
1317 1317 mapped = pr_mappage((caddr_t)addr, sizeof (value),
1318 1318 S_WRITE, 1);
1319 1319 if (on_fault(&ljb))
1320 1320 rv = -1;
1321 1321 else
1322 1322 suword16_noerr(addr, value);
1323 1323 no_fault();
1324 1324 if (mapped)
1325 1325 pr_unmappage((caddr_t)addr, sizeof (value),
1326 1326 S_WRITE, 1);
1327 1327 }
1328 1328 if (watchcode &&
1329 1329 (!sys_watchpoint(vaddr, watchcode, ta) ||
1330 1330 lwp->lwp_sysabort)) {
1331 1331 lwp->lwp_sysabort = 0;
1332 1332 rv = -1;
1333 1333 break;
1334 1334 }
1335 1335 if (watchcode == 0 || ta != 0)
1336 1336 break;
1337 1337 }
1338 1338
1339 1339 return (rv);
1340 1340 }
1341 1341
1342 1342 static int
1343 1343 watch_suword32(void *addr, uint32_t value)
1344 1344 {
1345 1345 klwp_t *lwp = ttolwp(curthread);
1346 1346 int watchcode;
1347 1347 caddr_t vaddr;
1348 1348 int mapped;
1349 1349 int rv = 0;
1350 1350 int ta;
1351 1351 label_t ljb;
1352 1352
1353 1353 for (;;) {
1354 1354
1355 1355 vaddr = (caddr_t)addr;
1356 1356 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1357 1357 S_WRITE);
1358 1358 if (watchcode == 0 || ta != 0) {
1359 1359 mapped = pr_mappage((caddr_t)addr, sizeof (value),
1360 1360 S_WRITE, 1);
1361 1361 if (on_fault(&ljb))
1362 1362 rv = -1;
1363 1363 else
1364 1364 suword32_noerr(addr, value);
1365 1365 no_fault();
1366 1366 if (mapped)
1367 1367 pr_unmappage((caddr_t)addr, sizeof (value),
1368 1368 S_WRITE, 1);
1369 1369 }
1370 1370 if (watchcode &&
1371 1371 (!sys_watchpoint(vaddr, watchcode, ta) ||
1372 1372 lwp->lwp_sysabort)) {
1373 1373 lwp->lwp_sysabort = 0;
1374 1374 rv = -1;
1375 1375 break;
1376 1376 }
1377 1377 if (watchcode == 0 || ta != 0)
1378 1378 break;
1379 1379 }
1380 1380
1381 1381 return (rv);
1382 1382 }
1383 1383
1384 1384 #ifdef _LP64
1385 1385 static int
1386 1386 watch_suword64(void *addr, uint64_t value)
1387 1387 {
1388 1388 klwp_t *lwp = ttolwp(curthread);
1389 1389 int watchcode;
1390 1390 caddr_t vaddr;
1391 1391 int mapped;
1392 1392 int rv = 0;
1393 1393 int ta;
1394 1394 label_t ljb;
1395 1395
1396 1396 for (;;) {
1397 1397
1398 1398 vaddr = (caddr_t)addr;
1399 1399 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL,
1400 1400 S_WRITE);
1401 1401 if (watchcode == 0 || ta != 0) {
1402 1402 mapped = pr_mappage((caddr_t)addr, sizeof (value),
1403 1403 S_WRITE, 1);
1404 1404 if (on_fault(&ljb))
1405 1405 rv = -1;
1406 1406 else
1407 1407 suword64_noerr(addr, value);
1408 1408 no_fault();
1409 1409 if (mapped)
1410 1410 pr_unmappage((caddr_t)addr, sizeof (value),
1411 1411 S_WRITE, 1);
1412 1412 }
1413 1413 if (watchcode &&
1414 1414 (!sys_watchpoint(vaddr, watchcode, ta) ||
1415 1415 lwp->lwp_sysabort)) {
1416 1416 lwp->lwp_sysabort = 0;
1417 1417 rv = -1;
1418 1418 break;
1419 1419 }
1420 1420 if (watchcode == 0 || ta != 0)
1421 1421 break;
1422 1422 }
1423 1423
1424 1424 return (rv);
1425 1425 }
1426 1426 #endif /* _LP64 */
1427 1427
1428 1428 /*
1429 1429 * Check for watched addresses in the given address space.
1430 1430 * Return 1 if this is true, otherwise 0.
1431 1431 */
1432 1432 static int
1433 1433 pr_is_watched(caddr_t base, size_t len, int rw)
1434 1434 {
1435 1435 caddr_t saddr = (caddr_t)((uintptr_t)base & (uintptr_t)PAGEMASK);
1436 1436 caddr_t eaddr = base + len;
1437 1437 caddr_t paddr;
1438 1438
1439 1439 for (paddr = saddr; paddr < eaddr; paddr += PAGESIZE) {
1440 1440 if (pr_is_watchpage(paddr, rw))
1441 1441 return (1);
1442 1442 }
1443 1443
1444 1444 return (0);
1445 1445 }
1446 1446
1447 1447 /*
1448 1448 * Wrapper for the physio() function.
1449 1449 * Splits one uio operation with multiple iovecs into uio operations with
1450 1450 * only one iovecs to do the watchpoint handling separately for each iovecs.
1451 1451 */
1452 1452 static int
1453 1453 watch_physio(int (*strat)(struct buf *), struct buf *bp, dev_t dev,
1454 1454 int rw, void (*mincnt)(struct buf *), struct uio *uio)
1455 1455 {
1456 1456 struct uio auio;
1457 1457 struct iovec *iov;
1458 1458 caddr_t base;
1459 1459 size_t len;
1460 1460 int seg_rw;
1461 1461 int error = 0;
1462 1462
1463 1463 if (uio->uio_segflg == UIO_SYSSPACE)
1464 1464 return (default_physio(strat, bp, dev, rw, mincnt, uio));
1465 1465
1466 1466 seg_rw = (rw == B_READ) ? S_WRITE : S_READ;
1467 1467
1468 1468 while (uio->uio_iovcnt > 0) {
1469 1469 if (uio->uio_resid == 0) {
1470 1470 /*
1471 1471 * Make sure to return the uio structure with the
1472 1472 * same values as default_physio() does.
1473 1473 */
1474 1474 uio->uio_iov++;
1475 1475 uio->uio_iovcnt--;
1476 1476 continue;
1477 1477 }
1478 1478
1479 1479 iov = uio->uio_iov;
1480 1480 len = MIN(iov->iov_len, uio->uio_resid);
1481 1481
1482 1482 auio.uio_iovcnt = 1;
1483 1483 auio.uio_iov = iov;
1484 1484 auio.uio_resid = len;
1485 1485 auio.uio_loffset = uio->uio_loffset;
1486 1486 auio.uio_llimit = uio->uio_llimit;
1487 1487 auio.uio_fmode = uio->uio_fmode;
1488 1488 auio.uio_extflg = uio->uio_extflg;
1489 1489 auio.uio_segflg = uio->uio_segflg;
1490 1490
1491 1491 base = iov->iov_base;
1492 1492
1493 1493 if (!pr_is_watched(base, len, seg_rw)) {
1494 1494 /*
1495 1495 * The given memory references don't cover a
1496 1496 * watched page.
1497 1497 */
1498 1498 error = default_physio(strat, bp, dev, rw, mincnt,
1499 1499 &auio);
1500 1500
1501 1501 /* Update uio with values from auio. */
1502 1502 len -= auio.uio_resid;
1503 1503 uio->uio_resid -= len;
1504 1504 uio->uio_loffset += len;
1505 1505
1506 1506 /*
1507 1507 * Return if an error occurred or not all data
1508 1508 * was copied.
1509 1509 */
1510 1510 if (auio.uio_resid || error)
1511 1511 break;
1512 1512 uio->uio_iov++;
1513 1513 uio->uio_iovcnt--;
1514 1514 } else {
1515 1515 int mapped, watchcode, ta;
1516 1516 caddr_t vaddr = base;
1517 1517 klwp_t *lwp = ttolwp(curthread);
1518 1518
1519 1519 watchcode = pr_is_watchpoint(&vaddr, &ta, len,
1520 1520 NULL, seg_rw);
1521 1521
1522 1522 if (watchcode == 0 || ta != 0) {
1523 1523 /*
1524 1524 * Do the io if the given memory references
1525 1525 * don't cover a watched area (watchcode=0)
1526 1526 * or if WA_TRAPAFTER was specified.
1527 1527 */
1528 1528 mapped = pr_mappage(base, len, seg_rw, 1);
1529 1529 error = default_physio(strat, bp, dev, rw,
1530 1530 mincnt, &auio);
1531 1531 if (mapped)
1532 1532 pr_unmappage(base, len, seg_rw, 1);
1533 1533
1534 1534 len -= auio.uio_resid;
1535 1535 uio->uio_resid -= len;
1536 1536 uio->uio_loffset += len;
1537 1537 }
1538 1538
1539 1539 /*
1540 1540 * If we hit a watched address, do the watchpoint logic.
1541 1541 */
1542 1542 if (watchcode &&
1543 1543 (!sys_watchpoint(vaddr, watchcode, ta) ||
1544 1544 lwp->lwp_sysabort)) {
1545 1545 lwp->lwp_sysabort = 0;
1546 1546 return (EFAULT);
1547 1547 }
1548 1548
1549 1549 /*
1550 1550 * Check for errors from default_physio().
1551 1551 */
1552 1552 if (watchcode == 0 || ta != 0) {
1553 1553 if (auio.uio_resid || error)
1554 1554 break;
1555 1555 uio->uio_iov++;
1556 1556 uio->uio_iovcnt--;
1557 1557 }
1558 1558 }
1559 1559 }
1560 1560
1561 1561 return (error);
1562 1562 }
1563 1563
1564 1564 int
1565 1565 wa_compare(const void *a, const void *b)
1566 1566 {
1567 1567 const watched_area_t *pa = a;
1568 1568 const watched_area_t *pb = b;
1569 1569
1570 1570 if (pa->wa_vaddr < pb->wa_vaddr)
1571 1571 return (-1);
1572 1572 else if (pa->wa_vaddr > pb->wa_vaddr)
1573 1573 return (1);
1574 1574 else
1575 1575 return (0);
1576 1576 }
1577 1577
1578 1578 int
1579 1579 wp_compare(const void *a, const void *b)
1580 1580 {
1581 1581 const watched_page_t *pa = a;
1582 1582 const watched_page_t *pb = b;
1583 1583
1584 1584 if (pa->wp_vaddr < pb->wp_vaddr)
1585 1585 return (-1);
1586 1586 else if (pa->wp_vaddr > pb->wp_vaddr)
1587 1587 return (1);
1588 1588 else
1589 1589 return (0);
1590 1590 }
1591 1591
1592 1592 /*
1593 1593 * Given an address range, finds the first watched area which overlaps some or
1594 1594 * all of the range.
1595 1595 */
1596 1596 watched_area_t *
1597 1597 pr_find_watched_area(proc_t *p, watched_area_t *pwa, avl_index_t *where)
1598 1598 {
1599 1599 caddr_t vaddr = pwa->wa_vaddr;
1600 1600 caddr_t eaddr = pwa->wa_eaddr;
1601 1601 watched_area_t *wap;
1602 1602 avl_index_t real_where;
1603 1603
1604 1604 /* First, check if there is an exact match. */
1605 1605 wap = avl_find(&p->p_warea, pwa, &real_where);
1606 1606
1607 1607
1608 1608 /* Check to see if we overlap with the previous area. */
1609 1609 if (wap == NULL) {
1610 1610 wap = avl_nearest(&p->p_warea, real_where, AVL_BEFORE);
1611 1611 if (wap != NULL &&
1612 1612 (vaddr >= wap->wa_eaddr || eaddr <= wap->wa_vaddr))
1613 1613 wap = NULL;
1614 1614 }
1615 1615
1616 1616 /* Try the next area. */
1617 1617 if (wap == NULL) {
1618 1618 wap = avl_nearest(&p->p_warea, real_where, AVL_AFTER);
1619 1619 if (wap != NULL &&
1620 1620 (vaddr >= wap->wa_eaddr || eaddr <= wap->wa_vaddr))
1621 1621 wap = NULL;
1622 1622 }
1623 1623
1624 1624 if (where)
1625 1625 *where = real_where;
1626 1626
1627 1627 return (wap);
1628 1628 }
1629 1629
1630 1630 void
1631 1631 watch_enable(kthread_id_t t)
1632 1632 {
1633 1633 t->t_proc_flag |= TP_WATCHPT;
1634 1634 install_copyops(t, &watch_copyops);
1635 1635 }
1636 1636
1637 1637 void
1638 1638 watch_disable(kthread_id_t t)
1639 1639 {
1640 1640 t->t_proc_flag &= ~TP_WATCHPT;
1641 1641 remove_copyops(t);
1642 1642 }
1643 1643
1644 1644 int
1645 1645 copyin_nowatch(const void *uaddr, void *kaddr, size_t len)
1646 1646 {
1647 1647 int watched, ret;
1648 1648
1649 1649 watched = watch_disable_addr(uaddr, len, S_READ);
1650 1650 ret = copyin(uaddr, kaddr, len);
1651 1651 if (watched)
1652 1652 watch_enable_addr(uaddr, len, S_READ);
1653 1653
1654 1654 return (ret);
1655 1655 }
1656 1656
1657 1657 int
1658 1658 copyout_nowatch(const void *kaddr, void *uaddr, size_t len)
1659 1659 {
1660 1660 int watched, ret;
1661 1661
1662 1662 watched = watch_disable_addr(uaddr, len, S_WRITE);
1663 1663 ret = copyout(kaddr, uaddr, len);
1664 1664 if (watched)
1665 1665 watch_enable_addr(uaddr, len, S_WRITE);
1666 1666
1667 1667 return (ret);
1668 1668 }
1669 1669
1670 1670 #ifdef _LP64
1671 1671 int
1672 1672 fuword64_nowatch(const void *addr, uint64_t *value)
1673 1673 {
1674 1674 int watched, ret;
1675 1675
1676 1676 watched = watch_disable_addr(addr, sizeof (*value), S_READ);
1677 1677 ret = fuword64(addr, value);
1678 1678 if (watched)
1679 1679 watch_enable_addr(addr, sizeof (*value), S_READ);
1680 1680
1681 1681 return (ret);
1682 1682 }
1683 1683 #endif
1684 1684
1685 1685 int
1686 1686 fuword32_nowatch(const void *addr, uint32_t *value)
1687 1687 {
1688 1688 int watched, ret;
1689 1689
1690 1690 watched = watch_disable_addr(addr, sizeof (*value), S_READ);
1691 1691 ret = fuword32(addr, value);
1692 1692 if (watched)
1693 1693 watch_enable_addr(addr, sizeof (*value), S_READ);
1694 1694
1695 1695 return (ret);
1696 1696 }
1697 1697
1698 1698 #ifdef _LP64
1699 1699 int
1700 1700 suword64_nowatch(void *addr, uint64_t value)
1701 1701 {
1702 1702 int watched, ret;
1703 1703
1704 1704 watched = watch_disable_addr(addr, sizeof (value), S_WRITE);
1705 1705 ret = suword64(addr, value);
1706 1706 if (watched)
1707 1707 watch_enable_addr(addr, sizeof (value), S_WRITE);
1708 1708
1709 1709 return (ret);
1710 1710 }
1711 1711 #endif
1712 1712
1713 1713 int
1714 1714 suword32_nowatch(void *addr, uint32_t value)
1715 1715 {
1716 1716 int watched, ret;
1717 1717
1718 1718 watched = watch_disable_addr(addr, sizeof (value), S_WRITE);
1719 1719 ret = suword32(addr, value);
1720 1720 if (watched)
1721 1721 watch_enable_addr(addr, sizeof (value), S_WRITE);
1722 1722
1723 1723 return (ret);
1724 1724 }
1725 1725
1726 1726 int
1727 1727 watch_disable_addr(const void *addr, size_t len, enum seg_rw rw)
1728 1728 {
1729 1729 if (pr_watch_active(curproc))
1730 1730 return (pr_mappage((caddr_t)addr, len, rw, 1));
1731 1731 return (0);
1732 1732 }
1733 1733
1734 1734 void
1735 1735 watch_enable_addr(const void *addr, size_t len, enum seg_rw rw)
1736 1736 {
1737 1737 if (pr_watch_active(curproc))
1738 1738 pr_unmappage((caddr_t)addr, len, rw, 1);
1739 1739 }
↓ open down ↓ |
1324 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX