1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <sys/types.h> 30 #include <sys/t_lock.h> 31 #include <sys/param.h> 32 #include <sys/cred.h> 33 #include <sys/debug.h> 34 #include <sys/inline.h> 35 #include <sys/kmem.h> 36 #include <sys/proc.h> 37 #include <sys/regset.h> 38 #include <sys/sysmacros.h> 39 #include <sys/systm.h> 40 #include <sys/prsystm.h> 41 #include <sys/buf.h> 42 #include <sys/signal.h> 43 #include <sys/user.h> 44 #include <sys/cpuvar.h> 45 46 #include <sys/fault.h> 47 #include <sys/syscall.h> 48 #include <sys/procfs.h> 49 #include <sys/cmn_err.h> 50 #include <sys/stack.h> 51 #include <sys/watchpoint.h> 52 #include <sys/copyops.h> 53 #include <sys/schedctl.h> 54 55 #include <sys/mman.h> 56 #include <vm/as.h> 57 #include <vm/seg.h> 58 59 /* 60 * Copy ops vector for watchpoints. 61 */ 62 static int watch_copyin(const void *, void *, size_t); 63 static int watch_xcopyin(const void *, void *, size_t); 64 static int watch_copyout(const void *, void *, size_t); 65 static int watch_xcopyout(const void *, void *, size_t); 66 static int watch_copyinstr(const char *, char *, size_t, size_t *); 67 static int watch_copyoutstr(const char *, char *, size_t, size_t *); 68 static int watch_fuword8(const void *, uint8_t *); 69 static int watch_fuword16(const void *, uint16_t *); 70 static int watch_fuword32(const void *, uint32_t *); 71 static int watch_suword8(void *, uint8_t); 72 static int watch_suword16(void *, uint16_t); 73 static int watch_suword32(void *, uint32_t); 74 static int watch_physio(int (*)(struct buf *), struct buf *, 75 dev_t, int, void (*)(struct buf *), struct uio *); 76 #ifdef _LP64 77 static int watch_fuword64(const void *, uint64_t *); 78 static int watch_suword64(void *, uint64_t); 79 #endif 80 81 struct copyops watch_copyops = { 82 watch_copyin, 83 watch_xcopyin, 84 watch_copyout, 85 watch_xcopyout, 86 watch_copyinstr, 87 watch_copyoutstr, 88 watch_fuword8, 89 watch_fuword16, 90 watch_fuword32, 91 #ifdef _LP64 92 watch_fuword64, 93 #else 94 NULL, 95 #endif 96 watch_suword8, 97 watch_suword16, 98 watch_suword32, 99 #ifdef _LP64 100 watch_suword64, 101 #else 102 NULL, 103 #endif 104 watch_physio 105 }; 106 107 /* 108 * Map the 'rw' argument to a protection flag. 109 */ 110 static int 111 rw_to_prot(enum seg_rw rw) 112 { 113 switch (rw) { 114 case S_EXEC: 115 return (PROT_EXEC); 116 case S_READ: 117 return (PROT_READ); 118 case S_WRITE: 119 return (PROT_WRITE); 120 default: 121 return (PROT_NONE); /* can't happen */ 122 } 123 } 124 125 /* 126 * Map the 'rw' argument to an index into an array of exec/write/read things. 127 * The index follows the precedence order: exec .. write .. read 128 */ 129 static int 130 rw_to_index(enum seg_rw rw) 131 { 132 switch (rw) { 133 default: /* default case "can't happen" */ 134 case S_EXEC: 135 return (0); 136 case S_WRITE: 137 return (1); 138 case S_READ: 139 return (2); 140 } 141 } 142 143 /* 144 * Map an index back to a seg_rw. 145 */ 146 static enum seg_rw S_rw[4] = { 147 S_EXEC, 148 S_WRITE, 149 S_READ, 150 S_READ, 151 }; 152 153 #define X 0 154 #define W 1 155 #define R 2 156 #define sum(a) (a[X] + a[W] + a[R]) 157 158 /* 159 * Common code for pr_mappage() and pr_unmappage(). 160 */ 161 static int 162 pr_do_mappage(caddr_t addr, size_t size, int mapin, enum seg_rw rw, int kernel) 163 { 164 proc_t *p = curproc; 165 struct as *as = p->p_as; 166 char *eaddr = addr + size; 167 int prot_rw = rw_to_prot(rw); 168 int xrw = rw_to_index(rw); 169 int rv = 0; 170 struct watched_page *pwp; 171 struct watched_page tpw; 172 avl_index_t where; 173 uint_t prot; 174 175 ASSERT(as != &kas); 176 177 startover: 178 ASSERT(rv == 0); 179 if (avl_numnodes(&as->a_wpage) == 0) 180 return (0); 181 182 /* 183 * as->a_wpage can only be changed while the process is totally stopped. 184 * Don't grab p_lock here. Holding p_lock while grabbing the address 185 * space lock leads to deadlocks with the clock thread. 186 * 187 * p_maplock prevents simultaneous execution of this function. Under 188 * normal circumstances, holdwatch() will stop all other threads, so the 189 * lock isn't really needed. But there may be multiple threads within 190 * stop() when SWATCHOK is set, so we need to handle multiple threads 191 * at once. See holdwatch() for the details of this dance. 192 */ 193 194 mutex_enter(&p->p_maplock); 195 AS_LOCK_ENTER(as, &as->a_lock, RW_READER); 196 197 tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 198 if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL) 199 pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER); 200 201 for (; pwp != NULL && pwp->wp_vaddr < eaddr; 202 pwp = AVL_NEXT(&as->a_wpage, pwp)) { 203 204 /* 205 * If the requested protection has not been 206 * removed, we need not remap this page. 207 */ 208 prot = pwp->wp_prot; 209 if (kernel || (prot & PROT_USER)) 210 if (prot & prot_rw) 211 continue; 212 /* 213 * If the requested access does not exist in the page's 214 * original protections, we need not remap this page. 215 * If the page does not exist yet, we can't test it. 216 */ 217 if ((prot = pwp->wp_oprot) != 0) { 218 if (!(kernel || (prot & PROT_USER))) 219 continue; 220 if (!(prot & prot_rw)) 221 continue; 222 } 223 224 if (mapin) { 225 /* 226 * Before mapping the page in, ensure that 227 * all other lwps are held in the kernel. 228 */ 229 if (p->p_mapcnt == 0) { 230 /* 231 * Release as lock while in holdwatch() 232 * in case other threads need to grab it. 233 */ 234 AS_LOCK_EXIT(as, &as->a_lock); 235 mutex_exit(&p->p_maplock); 236 if (holdwatch() != 0) { 237 /* 238 * We stopped in holdwatch(). 239 * Start all over again because the 240 * watched page list may have changed. 241 */ 242 goto startover; 243 } 244 mutex_enter(&p->p_maplock); 245 AS_LOCK_ENTER(as, &as->a_lock, RW_READER); 246 } 247 p->p_mapcnt++; 248 } 249 250 addr = pwp->wp_vaddr; 251 rv++; 252 253 prot = pwp->wp_prot; 254 if (mapin) { 255 if (kernel) 256 pwp->wp_kmap[xrw]++; 257 else 258 pwp->wp_umap[xrw]++; 259 pwp->wp_flags |= WP_NOWATCH; 260 if (pwp->wp_kmap[X] + pwp->wp_umap[X]) 261 /* cannot have exec-only protection */ 262 prot |= PROT_READ|PROT_EXEC; 263 if (pwp->wp_kmap[R] + pwp->wp_umap[R]) 264 prot |= PROT_READ; 265 if (pwp->wp_kmap[W] + pwp->wp_umap[W]) 266 /* cannot have write-only protection */ 267 prot |= PROT_READ|PROT_WRITE; 268 #if 0 /* damned broken mmu feature! */ 269 if (sum(pwp->wp_umap) == 0) 270 prot &= ~PROT_USER; 271 #endif 272 } else { 273 ASSERT(pwp->wp_flags & WP_NOWATCH); 274 if (kernel) { 275 ASSERT(pwp->wp_kmap[xrw] != 0); 276 --pwp->wp_kmap[xrw]; 277 } else { 278 ASSERT(pwp->wp_umap[xrw] != 0); 279 --pwp->wp_umap[xrw]; 280 } 281 if (sum(pwp->wp_kmap) + sum(pwp->wp_umap) == 0) 282 pwp->wp_flags &= ~WP_NOWATCH; 283 else { 284 if (pwp->wp_kmap[X] + pwp->wp_umap[X]) 285 /* cannot have exec-only protection */ 286 prot |= PROT_READ|PROT_EXEC; 287 if (pwp->wp_kmap[R] + pwp->wp_umap[R]) 288 prot |= PROT_READ; 289 if (pwp->wp_kmap[W] + pwp->wp_umap[W]) 290 /* cannot have write-only protection */ 291 prot |= PROT_READ|PROT_WRITE; 292 #if 0 /* damned broken mmu feature! */ 293 if (sum(pwp->wp_umap) == 0) 294 prot &= ~PROT_USER; 295 #endif 296 } 297 } 298 299 300 if (pwp->wp_oprot != 0) { /* if page exists */ 301 struct seg *seg; 302 uint_t oprot; 303 int err, retrycnt = 0; 304 305 AS_LOCK_EXIT(as, &as->a_lock); 306 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); 307 retry: 308 seg = as_segat(as, addr); 309 ASSERT(seg != NULL); 310 segop_getprot(seg, addr, 0, &oprot); 311 if (prot != oprot) { 312 err = segop_setprot(seg, addr, PAGESIZE, prot); 313 if (err == IE_RETRY) { 314 ASSERT(retrycnt == 0); 315 retrycnt++; 316 goto retry; 317 } 318 } 319 AS_LOCK_EXIT(as, &as->a_lock); 320 } else 321 AS_LOCK_EXIT(as, &as->a_lock); 322 323 /* 324 * When all pages are mapped back to their normal state, 325 * continue the other lwps. 326 */ 327 if (!mapin) { 328 ASSERT(p->p_mapcnt > 0); 329 p->p_mapcnt--; 330 if (p->p_mapcnt == 0) { 331 mutex_exit(&p->p_maplock); 332 mutex_enter(&p->p_lock); 333 continuelwps(p); 334 mutex_exit(&p->p_lock); 335 mutex_enter(&p->p_maplock); 336 } 337 } 338 339 AS_LOCK_ENTER(as, &as->a_lock, RW_READER); 340 } 341 342 AS_LOCK_EXIT(as, &as->a_lock); 343 mutex_exit(&p->p_maplock); 344 345 return (rv); 346 } 347 348 /* 349 * Restore the original page protections on an address range. 350 * If 'kernel' is non-zero, just do it for the kernel. 351 * pr_mappage() returns non-zero if it actually changed anything. 352 * 353 * pr_mappage() and pr_unmappage() must be executed in matched pairs, 354 * but pairs may be nested within other pairs. The reference counts 355 * sort it all out. See pr_do_mappage(), above. 356 */ 357 static int 358 pr_mappage(const caddr_t addr, size_t size, enum seg_rw rw, int kernel) 359 { 360 return (pr_do_mappage(addr, size, 1, rw, kernel)); 361 } 362 363 /* 364 * Set the modified page protections on a watched page. 365 * Inverse of pr_mappage(). 366 * Needs to be called only if pr_mappage() returned non-zero. 367 */ 368 static void 369 pr_unmappage(const caddr_t addr, size_t size, enum seg_rw rw, int kernel) 370 { 371 (void) pr_do_mappage(addr, size, 0, rw, kernel); 372 } 373 374 /* 375 * Function called by an lwp after it resumes from stop(). 376 */ 377 void 378 setallwatch(void) 379 { 380 proc_t *p = curproc; 381 struct as *as = curproc->p_as; 382 struct watched_page *pwp, *next; 383 struct seg *seg; 384 caddr_t vaddr; 385 uint_t prot; 386 int err, retrycnt; 387 388 if (p->p_wprot == NULL) 389 return; 390 391 ASSERT(MUTEX_NOT_HELD(&curproc->p_lock)); 392 393 AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); 394 395 pwp = p->p_wprot; 396 while (pwp != NULL) { 397 398 vaddr = pwp->wp_vaddr; 399 retrycnt = 0; 400 retry: 401 ASSERT(pwp->wp_flags & WP_SETPROT); 402 if ((seg = as_segat(as, vaddr)) != NULL && 403 !(pwp->wp_flags & WP_NOWATCH)) { 404 prot = pwp->wp_prot; 405 err = segop_setprot(seg, vaddr, PAGESIZE, prot); 406 if (err == IE_RETRY) { 407 ASSERT(retrycnt == 0); 408 retrycnt++; 409 goto retry; 410 } 411 } 412 413 next = pwp->wp_list; 414 415 if (pwp->wp_read + pwp->wp_write + pwp->wp_exec == 0) { 416 /* 417 * No watched areas remain in this page. 418 * Free the watched_page structure. 419 */ 420 avl_remove(&as->a_wpage, pwp); 421 kmem_free(pwp, sizeof (struct watched_page)); 422 } else { 423 pwp->wp_flags &= ~WP_SETPROT; 424 } 425 426 pwp = next; 427 } 428 p->p_wprot = NULL; 429 430 AS_LOCK_EXIT(as, &as->a_lock); 431 } 432 433 434 435 /* Must be called with as lock held */ 436 int 437 pr_is_watchpage_as(caddr_t addr, enum seg_rw rw, struct as *as) 438 { 439 register struct watched_page *pwp; 440 struct watched_page tpw; 441 uint_t prot; 442 int rv = 0; 443 444 switch (rw) { 445 case S_READ: 446 case S_WRITE: 447 case S_EXEC: 448 break; 449 default: 450 return (0); 451 } 452 453 /* 454 * as->a_wpage can only be modified while the process is totally 455 * stopped. We need, and should use, no locks here. 456 */ 457 if (as != &kas && avl_numnodes(&as->a_wpage) != 0) { 458 tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK); 459 pwp = avl_find(&as->a_wpage, &tpw, NULL); 460 if (pwp != NULL) { 461 ASSERT(addr >= pwp->wp_vaddr && 462 addr < pwp->wp_vaddr + PAGESIZE); 463 if (pwp->wp_oprot != 0) { 464 prot = pwp->wp_prot; 465 switch (rw) { 466 case S_READ: 467 rv = ((prot & (PROT_USER|PROT_READ)) 468 != (PROT_USER|PROT_READ)); 469 break; 470 case S_WRITE: 471 rv = ((prot & (PROT_USER|PROT_WRITE)) 472 != (PROT_USER|PROT_WRITE)); 473 break; 474 case S_EXEC: 475 rv = ((prot & (PROT_USER|PROT_EXEC)) 476 != (PROT_USER|PROT_EXEC)); 477 break; 478 default: 479 /* can't happen! */ 480 break; 481 } 482 } 483 } 484 } 485 486 return (rv); 487 } 488 489 490 /* 491 * trap() calls here to determine if a fault is in a watched page. 492 * We return nonzero if this is true and the load/store would fail. 493 */ 494 int 495 pr_is_watchpage(caddr_t addr, enum seg_rw rw) 496 { 497 struct as *as = curproc->p_as; 498 int rv; 499 500 if ((as == &kas) || avl_numnodes(&as->a_wpage) == 0) 501 return (0); 502 503 AS_LOCK_ENTER(as, &as->a_lock, RW_READER); 504 rv = pr_is_watchpage_as(addr, rw, as); 505 AS_LOCK_EXIT(as, &as->a_lock); 506 507 return (rv); 508 } 509 510 511 512 /* 513 * trap() calls here to determine if a fault is a watchpoint. 514 */ 515 int 516 pr_is_watchpoint(caddr_t *paddr, int *pta, size_t size, size_t *plen, 517 enum seg_rw rw) 518 { 519 proc_t *p = curproc; 520 caddr_t addr = *paddr; 521 caddr_t eaddr = addr + size; 522 register struct watched_area *pwa; 523 struct watched_area twa; 524 int rv = 0; 525 int ta = 0; 526 size_t len = 0; 527 528 switch (rw) { 529 case S_READ: 530 case S_WRITE: 531 case S_EXEC: 532 break; 533 default: 534 *pta = 0; 535 return (0); 536 } 537 538 /* 539 * p->p_warea is protected by p->p_lock. 540 */ 541 mutex_enter(&p->p_lock); 542 543 /* BEGIN CSTYLED */ 544 /* 545 * This loop is somewhat complicated because the fault region can span 546 * multiple watched areas. For example: 547 * 548 * addr eaddr 549 * +-----------------+ 550 * | fault region | 551 * +-------+--------+----+---+------------+ 552 * | prot not right | | prot correct | 553 * +----------------+ +----------------+ 554 * wa_vaddr wa_eaddr 555 * wa_vaddr wa_eaddr 556 * 557 * We start at the area greater than or equal to the starting address. 558 * As long as some portion of the fault region overlaps the current 559 * area, we continue checking permissions until we find an appropriate 560 * match. 561 */ 562 /* END CSTYLED */ 563 twa.wa_vaddr = addr; 564 twa.wa_eaddr = eaddr; 565 566 for (pwa = pr_find_watched_area(p, &twa, NULL); 567 pwa != NULL && eaddr > pwa->wa_vaddr && addr < pwa->wa_eaddr; 568 pwa = AVL_NEXT(&p->p_warea, pwa)) { 569 570 switch (rw) { 571 case S_READ: 572 if (pwa->wa_flags & WA_READ) 573 rv = TRAP_RWATCH; 574 break; 575 case S_WRITE: 576 if (pwa->wa_flags & WA_WRITE) 577 rv = TRAP_WWATCH; 578 break; 579 case S_EXEC: 580 if (pwa->wa_flags & WA_EXEC) 581 rv = TRAP_XWATCH; 582 break; 583 default: 584 /* can't happen */ 585 break; 586 } 587 588 /* 589 * If protections didn't match, check the next watched 590 * area 591 */ 592 if (rv != 0) { 593 if (addr < pwa->wa_vaddr) 594 addr = pwa->wa_vaddr; 595 len = pwa->wa_eaddr - addr; 596 if (pwa->wa_flags & WA_TRAPAFTER) 597 ta = 1; 598 break; 599 } 600 } 601 602 mutex_exit(&p->p_lock); 603 604 *paddr = addr; 605 *pta = ta; 606 if (plen != NULL) 607 *plen = len; 608 return (rv); 609 } 610 611 /* 612 * Set up to perform a single-step at user level for the 613 * case of a trapafter watchpoint. Called from trap(). 614 */ 615 void 616 do_watch_step(caddr_t vaddr, size_t sz, enum seg_rw rw, 617 int watchcode, greg_t pc) 618 { 619 register klwp_t *lwp = ttolwp(curthread); 620 struct lwp_watch *pw = &lwp->lwp_watch[rw_to_index(rw)]; 621 622 /* 623 * Check to see if we are already performing this special 624 * watchpoint single-step. We must not do pr_mappage() twice. 625 */ 626 627 /* special check for two read traps on the same instruction */ 628 if (rw == S_READ && pw->wpaddr != NULL && 629 !(pw->wpaddr <= vaddr && vaddr < pw->wpaddr + pw->wpsize)) { 630 ASSERT(lwp->lwp_watchtrap != 0); 631 pw++; /* use the extra S_READ struct */ 632 } 633 634 if (pw->wpaddr != NULL) { 635 ASSERT(lwp->lwp_watchtrap != 0); 636 ASSERT(pw->wpaddr <= vaddr && vaddr < pw->wpaddr + pw->wpsize); 637 if (pw->wpcode == 0) { 638 pw->wpcode = watchcode; 639 pw->wppc = pc; 640 } 641 } else { 642 int mapped = pr_mappage(vaddr, sz, rw, 0); 643 prstep(lwp, 1); 644 lwp->lwp_watchtrap = 1; 645 pw->wpaddr = vaddr; 646 pw->wpsize = sz; 647 pw->wpcode = watchcode; 648 pw->wpmapped = mapped; 649 pw->wppc = pc; 650 } 651 } 652 653 /* 654 * Undo the effects of do_watch_step(). 655 * Called from trap() after the single-step is finished. 656 * Also called from issig_forreal() and stop() with a NULL 657 * argument to avoid having these things set more than once. 658 */ 659 int 660 undo_watch_step(k_siginfo_t *sip) 661 { 662 register klwp_t *lwp = ttolwp(curthread); 663 int fault = 0; 664 665 if (lwp->lwp_watchtrap) { 666 struct lwp_watch *pw = lwp->lwp_watch; 667 int i; 668 669 for (i = 0; i < 4; i++, pw++) { 670 if (pw->wpaddr == NULL) 671 continue; 672 if (pw->wpmapped) 673 pr_unmappage(pw->wpaddr, pw->wpsize, S_rw[i], 674 0); 675 if (pw->wpcode != 0) { 676 if (sip != NULL) { 677 sip->si_signo = SIGTRAP; 678 sip->si_code = pw->wpcode; 679 sip->si_addr = pw->wpaddr; 680 sip->si_trapafter = 1; 681 sip->si_pc = (caddr_t)pw->wppc; 682 } 683 fault = FLTWATCH; 684 pw->wpcode = 0; 685 } 686 pw->wpaddr = NULL; 687 pw->wpsize = 0; 688 pw->wpmapped = 0; 689 } 690 lwp->lwp_watchtrap = 0; 691 } 692 693 return (fault); 694 } 695 696 /* 697 * Handle a watchpoint that occurs while doing copyin() 698 * or copyout() in a system call. 699 * Return non-zero if the fault or signal is cleared 700 * by a debugger while the lwp is stopped. 701 */ 702 static int 703 sys_watchpoint(caddr_t addr, int watchcode, int ta) 704 { 705 extern greg_t getuserpc(void); /* XXX header file */ 706 k_sigset_t smask; 707 register proc_t *p = ttoproc(curthread); 708 register klwp_t *lwp = ttolwp(curthread); 709 register sigqueue_t *sqp; 710 int rval; 711 712 /* assert no locks are held */ 713 /* ASSERT(curthread->t_nlocks == 0); */ 714 715 sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP); 716 sqp->sq_info.si_signo = SIGTRAP; 717 sqp->sq_info.si_code = watchcode; 718 sqp->sq_info.si_addr = addr; 719 sqp->sq_info.si_trapafter = ta; 720 sqp->sq_info.si_pc = (caddr_t)getuserpc(); 721 722 mutex_enter(&p->p_lock); 723 724 /* this will be tested and cleared by the caller */ 725 lwp->lwp_sysabort = 0; 726 727 if (prismember(&p->p_fltmask, FLTWATCH)) { 728 lwp->lwp_curflt = (uchar_t)FLTWATCH; 729 lwp->lwp_siginfo = sqp->sq_info; 730 stop(PR_FAULTED, FLTWATCH); 731 if (lwp->lwp_curflt == 0) { 732 mutex_exit(&p->p_lock); 733 kmem_free(sqp, sizeof (sigqueue_t)); 734 return (1); 735 } 736 lwp->lwp_curflt = 0; 737 } 738 739 /* 740 * post the SIGTRAP signal. 741 * Block all other signals so we only stop showing SIGTRAP. 742 */ 743 if (signal_is_blocked(curthread, SIGTRAP) || 744 sigismember(&p->p_ignore, SIGTRAP)) { 745 /* SIGTRAP is blocked or ignored, forget the rest. */ 746 mutex_exit(&p->p_lock); 747 kmem_free(sqp, sizeof (sigqueue_t)); 748 return (0); 749 } 750 sigdelq(p, curthread, SIGTRAP); 751 sigaddqa(p, curthread, sqp); 752 schedctl_finish_sigblock(curthread); 753 smask = curthread->t_hold; 754 sigfillset(&curthread->t_hold); 755 sigdiffset(&curthread->t_hold, &cantmask); 756 sigdelset(&curthread->t_hold, SIGTRAP); 757 mutex_exit(&p->p_lock); 758 759 rval = ((ISSIG_FAST(curthread, lwp, p, FORREAL))? 0 : 1); 760 761 /* restore the original signal mask */ 762 mutex_enter(&p->p_lock); 763 curthread->t_hold = smask; 764 mutex_exit(&p->p_lock); 765 766 return (rval); 767 } 768 769 /* 770 * Wrappers for the copyin()/copyout() functions to deal 771 * with watchpoints that fire while in system calls. 772 */ 773 774 static int 775 watch_xcopyin(const void *uaddr, void *kaddr, size_t count) 776 { 777 klwp_t *lwp = ttolwp(curthread); 778 caddr_t watch_uaddr = (caddr_t)uaddr; 779 caddr_t watch_kaddr = (caddr_t)kaddr; 780 int error = 0; 781 label_t ljb; 782 size_t part; 783 int mapped; 784 785 while (count && error == 0) { 786 int watchcode; 787 caddr_t vaddr; 788 size_t len; 789 int ta; 790 791 if ((part = PAGESIZE - 792 (((uintptr_t)uaddr) & PAGEOFFSET)) > count) 793 part = count; 794 795 if (!pr_is_watchpage(watch_uaddr, S_READ)) 796 watchcode = 0; 797 else { 798 vaddr = watch_uaddr; 799 watchcode = pr_is_watchpoint(&vaddr, &ta, 800 part, &len, S_READ); 801 if (watchcode && ta == 0) 802 part = vaddr - watch_uaddr; 803 } 804 805 /* 806 * Copy the initial part, up to a watched address, if any. 807 */ 808 if (part != 0) { 809 mapped = pr_mappage(watch_uaddr, part, S_READ, 1); 810 if (on_fault(&ljb)) 811 error = EFAULT; 812 else 813 copyin_noerr(watch_uaddr, watch_kaddr, part); 814 no_fault(); 815 if (mapped) 816 pr_unmappage(watch_uaddr, part, S_READ, 1); 817 watch_uaddr += part; 818 watch_kaddr += part; 819 count -= part; 820 } 821 /* 822 * If trapafter was specified, then copy through the 823 * watched area before taking the watchpoint trap. 824 */ 825 while (count && watchcode && ta && len > part && error == 0) { 826 len -= part; 827 if ((part = PAGESIZE) > count) 828 part = count; 829 if (part > len) 830 part = len; 831 mapped = pr_mappage(watch_uaddr, part, S_READ, 1); 832 if (on_fault(&ljb)) 833 error = EFAULT; 834 else 835 copyin_noerr(watch_uaddr, watch_kaddr, part); 836 no_fault(); 837 if (mapped) 838 pr_unmappage(watch_uaddr, part, S_READ, 1); 839 watch_uaddr += part; 840 watch_kaddr += part; 841 count -= part; 842 } 843 844 error: 845 /* if we hit a watched address, do the watchpoint logic */ 846 if (watchcode && 847 (!sys_watchpoint(vaddr, watchcode, ta) || 848 lwp->lwp_sysabort)) { 849 lwp->lwp_sysabort = 0; 850 error = EFAULT; 851 break; 852 } 853 } 854 855 return (error); 856 } 857 858 static int 859 watch_copyin(const void *kaddr, void *uaddr, size_t count) 860 { 861 return (watch_xcopyin(kaddr, uaddr, count) ? -1 : 0); 862 } 863 864 865 static int 866 watch_xcopyout(const void *kaddr, void *uaddr, size_t count) 867 { 868 klwp_t *lwp = ttolwp(curthread); 869 caddr_t watch_uaddr = (caddr_t)uaddr; 870 caddr_t watch_kaddr = (caddr_t)kaddr; 871 int error = 0; 872 label_t ljb; 873 874 while (count && error == 0) { 875 int watchcode; 876 caddr_t vaddr; 877 size_t part; 878 size_t len; 879 int ta; 880 int mapped; 881 882 if ((part = PAGESIZE - 883 (((uintptr_t)uaddr) & PAGEOFFSET)) > count) 884 part = count; 885 886 if (!pr_is_watchpage(watch_uaddr, S_WRITE)) 887 watchcode = 0; 888 else { 889 vaddr = watch_uaddr; 890 watchcode = pr_is_watchpoint(&vaddr, &ta, 891 part, &len, S_WRITE); 892 if (watchcode) { 893 if (ta == 0) 894 part = vaddr - watch_uaddr; 895 else { 896 len += vaddr - watch_uaddr; 897 if (part > len) 898 part = len; 899 } 900 } 901 } 902 903 /* 904 * Copy the initial part, up to a watched address, if any. 905 */ 906 if (part != 0) { 907 mapped = pr_mappage(watch_uaddr, part, S_WRITE, 1); 908 if (on_fault(&ljb)) 909 error = EFAULT; 910 else 911 copyout_noerr(watch_kaddr, watch_uaddr, part); 912 no_fault(); 913 if (mapped) 914 pr_unmappage(watch_uaddr, part, S_WRITE, 1); 915 watch_uaddr += part; 916 watch_kaddr += part; 917 count -= part; 918 } 919 920 /* 921 * If trapafter was specified, then copy through the 922 * watched area before taking the watchpoint trap. 923 */ 924 while (count && watchcode && ta && len > part && error == 0) { 925 len -= part; 926 if ((part = PAGESIZE) > count) 927 part = count; 928 if (part > len) 929 part = len; 930 mapped = pr_mappage(watch_uaddr, part, S_WRITE, 1); 931 if (on_fault(&ljb)) 932 error = EFAULT; 933 else 934 copyout_noerr(watch_kaddr, watch_uaddr, part); 935 no_fault(); 936 if (mapped) 937 pr_unmappage(watch_uaddr, part, S_WRITE, 1); 938 watch_uaddr += part; 939 watch_kaddr += part; 940 count -= part; 941 } 942 943 /* if we hit a watched address, do the watchpoint logic */ 944 if (watchcode && 945 (!sys_watchpoint(vaddr, watchcode, ta) || 946 lwp->lwp_sysabort)) { 947 lwp->lwp_sysabort = 0; 948 error = EFAULT; 949 break; 950 } 951 } 952 953 return (error); 954 } 955 956 static int 957 watch_copyout(const void *kaddr, void *uaddr, size_t count) 958 { 959 return (watch_xcopyout(kaddr, uaddr, count) ? -1 : 0); 960 } 961 962 static int 963 watch_copyinstr( 964 const char *uaddr, 965 char *kaddr, 966 size_t maxlength, 967 size_t *lencopied) 968 { 969 klwp_t *lwp = ttolwp(curthread); 970 size_t resid; 971 int error = 0; 972 label_t ljb; 973 974 if ((resid = maxlength) == 0) 975 return (ENAMETOOLONG); 976 977 while (resid && error == 0) { 978 int watchcode; 979 caddr_t vaddr; 980 size_t part; 981 size_t len; 982 size_t size; 983 int ta; 984 int mapped; 985 986 if ((part = PAGESIZE - 987 (((uintptr_t)uaddr) & PAGEOFFSET)) > resid) 988 part = resid; 989 990 if (!pr_is_watchpage((caddr_t)uaddr, S_READ)) 991 watchcode = 0; 992 else { 993 vaddr = (caddr_t)uaddr; 994 watchcode = pr_is_watchpoint(&vaddr, &ta, 995 part, &len, S_READ); 996 if (watchcode) { 997 if (ta == 0) 998 part = vaddr - uaddr; 999 else { 1000 len += vaddr - uaddr; 1001 if (part > len) 1002 part = len; 1003 } 1004 } 1005 } 1006 1007 /* 1008 * Copy the initial part, up to a watched address, if any. 1009 */ 1010 if (part != 0) { 1011 mapped = pr_mappage((caddr_t)uaddr, part, S_READ, 1); 1012 if (on_fault(&ljb)) 1013 error = EFAULT; 1014 else 1015 error = copyinstr_noerr(uaddr, kaddr, part, 1016 &size); 1017 no_fault(); 1018 if (mapped) 1019 pr_unmappage((caddr_t)uaddr, part, S_READ, 1); 1020 uaddr += size; 1021 kaddr += size; 1022 resid -= size; 1023 if (error == ENAMETOOLONG && resid > 0) 1024 error = 0; 1025 if (error != 0 || (watchcode && 1026 (uaddr < vaddr || kaddr[-1] == '\0'))) 1027 break; /* didn't reach the watched area */ 1028 } 1029 1030 /* 1031 * If trapafter was specified, then copy through the 1032 * watched area before taking the watchpoint trap. 1033 */ 1034 while (resid && watchcode && ta && len > part && error == 0 && 1035 size == part && kaddr[-1] != '\0') { 1036 len -= part; 1037 if ((part = PAGESIZE) > resid) 1038 part = resid; 1039 if (part > len) 1040 part = len; 1041 mapped = pr_mappage((caddr_t)uaddr, part, S_READ, 1); 1042 if (on_fault(&ljb)) 1043 error = EFAULT; 1044 else 1045 error = copyinstr_noerr(uaddr, kaddr, part, 1046 &size); 1047 no_fault(); 1048 if (mapped) 1049 pr_unmappage((caddr_t)uaddr, part, S_READ, 1); 1050 uaddr += size; 1051 kaddr += size; 1052 resid -= size; 1053 if (error == ENAMETOOLONG && resid > 0) 1054 error = 0; 1055 } 1056 1057 /* if we hit a watched address, do the watchpoint logic */ 1058 if (watchcode && 1059 (!sys_watchpoint(vaddr, watchcode, ta) || 1060 lwp->lwp_sysabort)) { 1061 lwp->lwp_sysabort = 0; 1062 error = EFAULT; 1063 break; 1064 } 1065 1066 if (error == 0 && part != 0 && 1067 (size < part || kaddr[-1] == '\0')) 1068 break; 1069 } 1070 1071 if (error != EFAULT && lencopied) 1072 *lencopied = maxlength - resid; 1073 return (error); 1074 } 1075 1076 static int 1077 watch_copyoutstr( 1078 const char *kaddr, 1079 char *uaddr, 1080 size_t maxlength, 1081 size_t *lencopied) 1082 { 1083 klwp_t *lwp = ttolwp(curthread); 1084 size_t resid; 1085 int error = 0; 1086 label_t ljb; 1087 1088 if ((resid = maxlength) == 0) 1089 return (ENAMETOOLONG); 1090 1091 while (resid && error == 0) { 1092 int watchcode; 1093 caddr_t vaddr; 1094 size_t part; 1095 size_t len; 1096 size_t size; 1097 int ta; 1098 int mapped; 1099 1100 if ((part = PAGESIZE - 1101 (((uintptr_t)uaddr) & PAGEOFFSET)) > resid) 1102 part = resid; 1103 1104 if (!pr_is_watchpage(uaddr, S_WRITE)) { 1105 watchcode = 0; 1106 } else { 1107 vaddr = uaddr; 1108 watchcode = pr_is_watchpoint(&vaddr, &ta, 1109 part, &len, S_WRITE); 1110 if (watchcode && ta == 0) 1111 part = vaddr - uaddr; 1112 } 1113 1114 /* 1115 * Copy the initial part, up to a watched address, if any. 1116 */ 1117 if (part != 0) { 1118 mapped = pr_mappage(uaddr, part, S_WRITE, 1); 1119 if (on_fault(&ljb)) 1120 error = EFAULT; 1121 else 1122 error = copyoutstr_noerr(kaddr, uaddr, part, 1123 &size); 1124 no_fault(); 1125 if (mapped) 1126 pr_unmappage(uaddr, part, S_WRITE, 1); 1127 uaddr += size; 1128 kaddr += size; 1129 resid -= size; 1130 if (error == ENAMETOOLONG && resid > 0) 1131 error = 0; 1132 if (error != 0 || (watchcode && 1133 (uaddr < vaddr || kaddr[-1] == '\0'))) 1134 break; /* didn't reach the watched area */ 1135 } 1136 1137 /* 1138 * If trapafter was specified, then copy through the 1139 * watched area before taking the watchpoint trap. 1140 */ 1141 while (resid && watchcode && ta && len > part && error == 0 && 1142 size == part && kaddr[-1] != '\0') { 1143 len -= part; 1144 if ((part = PAGESIZE) > resid) 1145 part = resid; 1146 if (part > len) 1147 part = len; 1148 mapped = pr_mappage(uaddr, part, S_WRITE, 1); 1149 if (on_fault(&ljb)) 1150 error = EFAULT; 1151 else 1152 error = copyoutstr_noerr(kaddr, uaddr, part, 1153 &size); 1154 no_fault(); 1155 if (mapped) 1156 pr_unmappage(uaddr, part, S_WRITE, 1); 1157 uaddr += size; 1158 kaddr += size; 1159 resid -= size; 1160 if (error == ENAMETOOLONG && resid > 0) 1161 error = 0; 1162 } 1163 1164 /* if we hit a watched address, do the watchpoint logic */ 1165 if (watchcode && 1166 (!sys_watchpoint(vaddr, watchcode, ta) || 1167 lwp->lwp_sysabort)) { 1168 lwp->lwp_sysabort = 0; 1169 error = EFAULT; 1170 break; 1171 } 1172 1173 if (error == 0 && part != 0 && 1174 (size < part || kaddr[-1] == '\0')) 1175 break; 1176 } 1177 1178 if (error != EFAULT && lencopied) 1179 *lencopied = maxlength - resid; 1180 return (error); 1181 } 1182 1183 typedef int (*fuword_func)(const void *, void *); 1184 1185 /* 1186 * Generic form of watch_fuword8(), watch_fuword16(), etc. 1187 */ 1188 static int 1189 watch_fuword(const void *addr, void *dst, fuword_func func, size_t size) 1190 { 1191 klwp_t *lwp = ttolwp(curthread); 1192 int watchcode; 1193 caddr_t vaddr; 1194 int mapped; 1195 int rv = 0; 1196 int ta; 1197 label_t ljb; 1198 1199 for (;;) { 1200 1201 vaddr = (caddr_t)addr; 1202 watchcode = pr_is_watchpoint(&vaddr, &ta, size, NULL, S_READ); 1203 if (watchcode == 0 || ta != 0) { 1204 mapped = pr_mappage((caddr_t)addr, size, S_READ, 1); 1205 if (on_fault(&ljb)) 1206 rv = -1; 1207 else 1208 (*func)(addr, dst); 1209 no_fault(); 1210 if (mapped) 1211 pr_unmappage((caddr_t)addr, size, S_READ, 1); 1212 } 1213 if (watchcode && 1214 (!sys_watchpoint(vaddr, watchcode, ta) || 1215 lwp->lwp_sysabort)) { 1216 lwp->lwp_sysabort = 0; 1217 rv = -1; 1218 break; 1219 } 1220 if (watchcode == 0 || ta != 0) 1221 break; 1222 } 1223 1224 return (rv); 1225 } 1226 1227 static int 1228 watch_fuword8(const void *addr, uint8_t *dst) 1229 { 1230 return (watch_fuword(addr, dst, (fuword_func)fuword8_noerr, 1231 sizeof (*dst))); 1232 } 1233 1234 static int 1235 watch_fuword16(const void *addr, uint16_t *dst) 1236 { 1237 return (watch_fuword(addr, dst, (fuword_func)fuword16_noerr, 1238 sizeof (*dst))); 1239 } 1240 1241 static int 1242 watch_fuword32(const void *addr, uint32_t *dst) 1243 { 1244 return (watch_fuword(addr, dst, (fuword_func)fuword32_noerr, 1245 sizeof (*dst))); 1246 } 1247 1248 #ifdef _LP64 1249 static int 1250 watch_fuword64(const void *addr, uint64_t *dst) 1251 { 1252 return (watch_fuword(addr, dst, (fuword_func)fuword64_noerr, 1253 sizeof (*dst))); 1254 } 1255 #endif 1256 1257 1258 static int 1259 watch_suword8(void *addr, uint8_t value) 1260 { 1261 klwp_t *lwp = ttolwp(curthread); 1262 int watchcode; 1263 caddr_t vaddr; 1264 int mapped; 1265 int rv = 0; 1266 int ta; 1267 label_t ljb; 1268 1269 for (;;) { 1270 1271 vaddr = (caddr_t)addr; 1272 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL, 1273 S_WRITE); 1274 if (watchcode == 0 || ta != 0) { 1275 mapped = pr_mappage((caddr_t)addr, sizeof (value), 1276 S_WRITE, 1); 1277 if (on_fault(&ljb)) 1278 rv = -1; 1279 else 1280 suword8_noerr(addr, value); 1281 no_fault(); 1282 if (mapped) 1283 pr_unmappage((caddr_t)addr, sizeof (value), 1284 S_WRITE, 1); 1285 } 1286 if (watchcode && 1287 (!sys_watchpoint(vaddr, watchcode, ta) || 1288 lwp->lwp_sysabort)) { 1289 lwp->lwp_sysabort = 0; 1290 rv = -1; 1291 break; 1292 } 1293 if (watchcode == 0 || ta != 0) 1294 break; 1295 } 1296 1297 return (rv); 1298 } 1299 1300 static int 1301 watch_suword16(void *addr, uint16_t value) 1302 { 1303 klwp_t *lwp = ttolwp(curthread); 1304 int watchcode; 1305 caddr_t vaddr; 1306 int mapped; 1307 int rv = 0; 1308 int ta; 1309 label_t ljb; 1310 1311 for (;;) { 1312 1313 vaddr = (caddr_t)addr; 1314 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL, 1315 S_WRITE); 1316 if (watchcode == 0 || ta != 0) { 1317 mapped = pr_mappage((caddr_t)addr, sizeof (value), 1318 S_WRITE, 1); 1319 if (on_fault(&ljb)) 1320 rv = -1; 1321 else 1322 suword16_noerr(addr, value); 1323 no_fault(); 1324 if (mapped) 1325 pr_unmappage((caddr_t)addr, sizeof (value), 1326 S_WRITE, 1); 1327 } 1328 if (watchcode && 1329 (!sys_watchpoint(vaddr, watchcode, ta) || 1330 lwp->lwp_sysabort)) { 1331 lwp->lwp_sysabort = 0; 1332 rv = -1; 1333 break; 1334 } 1335 if (watchcode == 0 || ta != 0) 1336 break; 1337 } 1338 1339 return (rv); 1340 } 1341 1342 static int 1343 watch_suword32(void *addr, uint32_t value) 1344 { 1345 klwp_t *lwp = ttolwp(curthread); 1346 int watchcode; 1347 caddr_t vaddr; 1348 int mapped; 1349 int rv = 0; 1350 int ta; 1351 label_t ljb; 1352 1353 for (;;) { 1354 1355 vaddr = (caddr_t)addr; 1356 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL, 1357 S_WRITE); 1358 if (watchcode == 0 || ta != 0) { 1359 mapped = pr_mappage((caddr_t)addr, sizeof (value), 1360 S_WRITE, 1); 1361 if (on_fault(&ljb)) 1362 rv = -1; 1363 else 1364 suword32_noerr(addr, value); 1365 no_fault(); 1366 if (mapped) 1367 pr_unmappage((caddr_t)addr, sizeof (value), 1368 S_WRITE, 1); 1369 } 1370 if (watchcode && 1371 (!sys_watchpoint(vaddr, watchcode, ta) || 1372 lwp->lwp_sysabort)) { 1373 lwp->lwp_sysabort = 0; 1374 rv = -1; 1375 break; 1376 } 1377 if (watchcode == 0 || ta != 0) 1378 break; 1379 } 1380 1381 return (rv); 1382 } 1383 1384 #ifdef _LP64 1385 static int 1386 watch_suword64(void *addr, uint64_t value) 1387 { 1388 klwp_t *lwp = ttolwp(curthread); 1389 int watchcode; 1390 caddr_t vaddr; 1391 int mapped; 1392 int rv = 0; 1393 int ta; 1394 label_t ljb; 1395 1396 for (;;) { 1397 1398 vaddr = (caddr_t)addr; 1399 watchcode = pr_is_watchpoint(&vaddr, &ta, sizeof (value), NULL, 1400 S_WRITE); 1401 if (watchcode == 0 || ta != 0) { 1402 mapped = pr_mappage((caddr_t)addr, sizeof (value), 1403 S_WRITE, 1); 1404 if (on_fault(&ljb)) 1405 rv = -1; 1406 else 1407 suword64_noerr(addr, value); 1408 no_fault(); 1409 if (mapped) 1410 pr_unmappage((caddr_t)addr, sizeof (value), 1411 S_WRITE, 1); 1412 } 1413 if (watchcode && 1414 (!sys_watchpoint(vaddr, watchcode, ta) || 1415 lwp->lwp_sysabort)) { 1416 lwp->lwp_sysabort = 0; 1417 rv = -1; 1418 break; 1419 } 1420 if (watchcode == 0 || ta != 0) 1421 break; 1422 } 1423 1424 return (rv); 1425 } 1426 #endif /* _LP64 */ 1427 1428 /* 1429 * Check for watched addresses in the given address space. 1430 * Return 1 if this is true, otherwise 0. 1431 */ 1432 static int 1433 pr_is_watched(caddr_t base, size_t len, int rw) 1434 { 1435 caddr_t saddr = (caddr_t)((uintptr_t)base & (uintptr_t)PAGEMASK); 1436 caddr_t eaddr = base + len; 1437 caddr_t paddr; 1438 1439 for (paddr = saddr; paddr < eaddr; paddr += PAGESIZE) { 1440 if (pr_is_watchpage(paddr, rw)) 1441 return (1); 1442 } 1443 1444 return (0); 1445 } 1446 1447 /* 1448 * Wrapper for the physio() function. 1449 * Splits one uio operation with multiple iovecs into uio operations with 1450 * only one iovecs to do the watchpoint handling separately for each iovecs. 1451 */ 1452 static int 1453 watch_physio(int (*strat)(struct buf *), struct buf *bp, dev_t dev, 1454 int rw, void (*mincnt)(struct buf *), struct uio *uio) 1455 { 1456 struct uio auio; 1457 struct iovec *iov; 1458 caddr_t base; 1459 size_t len; 1460 int seg_rw; 1461 int error = 0; 1462 1463 if (uio->uio_segflg == UIO_SYSSPACE) 1464 return (default_physio(strat, bp, dev, rw, mincnt, uio)); 1465 1466 seg_rw = (rw == B_READ) ? S_WRITE : S_READ; 1467 1468 while (uio->uio_iovcnt > 0) { 1469 if (uio->uio_resid == 0) { 1470 /* 1471 * Make sure to return the uio structure with the 1472 * same values as default_physio() does. 1473 */ 1474 uio->uio_iov++; 1475 uio->uio_iovcnt--; 1476 continue; 1477 } 1478 1479 iov = uio->uio_iov; 1480 len = MIN(iov->iov_len, uio->uio_resid); 1481 1482 auio.uio_iovcnt = 1; 1483 auio.uio_iov = iov; 1484 auio.uio_resid = len; 1485 auio.uio_loffset = uio->uio_loffset; 1486 auio.uio_llimit = uio->uio_llimit; 1487 auio.uio_fmode = uio->uio_fmode; 1488 auio.uio_extflg = uio->uio_extflg; 1489 auio.uio_segflg = uio->uio_segflg; 1490 1491 base = iov->iov_base; 1492 1493 if (!pr_is_watched(base, len, seg_rw)) { 1494 /* 1495 * The given memory references don't cover a 1496 * watched page. 1497 */ 1498 error = default_physio(strat, bp, dev, rw, mincnt, 1499 &auio); 1500 1501 /* Update uio with values from auio. */ 1502 len -= auio.uio_resid; 1503 uio->uio_resid -= len; 1504 uio->uio_loffset += len; 1505 1506 /* 1507 * Return if an error occurred or not all data 1508 * was copied. 1509 */ 1510 if (auio.uio_resid || error) 1511 break; 1512 uio->uio_iov++; 1513 uio->uio_iovcnt--; 1514 } else { 1515 int mapped, watchcode, ta; 1516 caddr_t vaddr = base; 1517 klwp_t *lwp = ttolwp(curthread); 1518 1519 watchcode = pr_is_watchpoint(&vaddr, &ta, len, 1520 NULL, seg_rw); 1521 1522 if (watchcode == 0 || ta != 0) { 1523 /* 1524 * Do the io if the given memory references 1525 * don't cover a watched area (watchcode=0) 1526 * or if WA_TRAPAFTER was specified. 1527 */ 1528 mapped = pr_mappage(base, len, seg_rw, 1); 1529 error = default_physio(strat, bp, dev, rw, 1530 mincnt, &auio); 1531 if (mapped) 1532 pr_unmappage(base, len, seg_rw, 1); 1533 1534 len -= auio.uio_resid; 1535 uio->uio_resid -= len; 1536 uio->uio_loffset += len; 1537 } 1538 1539 /* 1540 * If we hit a watched address, do the watchpoint logic. 1541 */ 1542 if (watchcode && 1543 (!sys_watchpoint(vaddr, watchcode, ta) || 1544 lwp->lwp_sysabort)) { 1545 lwp->lwp_sysabort = 0; 1546 return (EFAULT); 1547 } 1548 1549 /* 1550 * Check for errors from default_physio(). 1551 */ 1552 if (watchcode == 0 || ta != 0) { 1553 if (auio.uio_resid || error) 1554 break; 1555 uio->uio_iov++; 1556 uio->uio_iovcnt--; 1557 } 1558 } 1559 } 1560 1561 return (error); 1562 } 1563 1564 int 1565 wa_compare(const void *a, const void *b) 1566 { 1567 const watched_area_t *pa = a; 1568 const watched_area_t *pb = b; 1569 1570 if (pa->wa_vaddr < pb->wa_vaddr) 1571 return (-1); 1572 else if (pa->wa_vaddr > pb->wa_vaddr) 1573 return (1); 1574 else 1575 return (0); 1576 } 1577 1578 int 1579 wp_compare(const void *a, const void *b) 1580 { 1581 const watched_page_t *pa = a; 1582 const watched_page_t *pb = b; 1583 1584 if (pa->wp_vaddr < pb->wp_vaddr) 1585 return (-1); 1586 else if (pa->wp_vaddr > pb->wp_vaddr) 1587 return (1); 1588 else 1589 return (0); 1590 } 1591 1592 /* 1593 * Given an address range, finds the first watched area which overlaps some or 1594 * all of the range. 1595 */ 1596 watched_area_t * 1597 pr_find_watched_area(proc_t *p, watched_area_t *pwa, avl_index_t *where) 1598 { 1599 caddr_t vaddr = pwa->wa_vaddr; 1600 caddr_t eaddr = pwa->wa_eaddr; 1601 watched_area_t *wap; 1602 avl_index_t real_where; 1603 1604 /* First, check if there is an exact match. */ 1605 wap = avl_find(&p->p_warea, pwa, &real_where); 1606 1607 1608 /* Check to see if we overlap with the previous area. */ 1609 if (wap == NULL) { 1610 wap = avl_nearest(&p->p_warea, real_where, AVL_BEFORE); 1611 if (wap != NULL && 1612 (vaddr >= wap->wa_eaddr || eaddr <= wap->wa_vaddr)) 1613 wap = NULL; 1614 } 1615 1616 /* Try the next area. */ 1617 if (wap == NULL) { 1618 wap = avl_nearest(&p->p_warea, real_where, AVL_AFTER); 1619 if (wap != NULL && 1620 (vaddr >= wap->wa_eaddr || eaddr <= wap->wa_vaddr)) 1621 wap = NULL; 1622 } 1623 1624 if (where) 1625 *where = real_where; 1626 1627 return (wap); 1628 } 1629 1630 void 1631 watch_enable(kthread_id_t t) 1632 { 1633 t->t_proc_flag |= TP_WATCHPT; 1634 install_copyops(t, &watch_copyops); 1635 } 1636 1637 void 1638 watch_disable(kthread_id_t t) 1639 { 1640 t->t_proc_flag &= ~TP_WATCHPT; 1641 remove_copyops(t); 1642 } 1643 1644 int 1645 copyin_nowatch(const void *uaddr, void *kaddr, size_t len) 1646 { 1647 int watched, ret; 1648 1649 watched = watch_disable_addr(uaddr, len, S_READ); 1650 ret = copyin(uaddr, kaddr, len); 1651 if (watched) 1652 watch_enable_addr(uaddr, len, S_READ); 1653 1654 return (ret); 1655 } 1656 1657 int 1658 copyout_nowatch(const void *kaddr, void *uaddr, size_t len) 1659 { 1660 int watched, ret; 1661 1662 watched = watch_disable_addr(uaddr, len, S_WRITE); 1663 ret = copyout(kaddr, uaddr, len); 1664 if (watched) 1665 watch_enable_addr(uaddr, len, S_WRITE); 1666 1667 return (ret); 1668 } 1669 1670 #ifdef _LP64 1671 int 1672 fuword64_nowatch(const void *addr, uint64_t *value) 1673 { 1674 int watched, ret; 1675 1676 watched = watch_disable_addr(addr, sizeof (*value), S_READ); 1677 ret = fuword64(addr, value); 1678 if (watched) 1679 watch_enable_addr(addr, sizeof (*value), S_READ); 1680 1681 return (ret); 1682 } 1683 #endif 1684 1685 int 1686 fuword32_nowatch(const void *addr, uint32_t *value) 1687 { 1688 int watched, ret; 1689 1690 watched = watch_disable_addr(addr, sizeof (*value), S_READ); 1691 ret = fuword32(addr, value); 1692 if (watched) 1693 watch_enable_addr(addr, sizeof (*value), S_READ); 1694 1695 return (ret); 1696 } 1697 1698 #ifdef _LP64 1699 int 1700 suword64_nowatch(void *addr, uint64_t value) 1701 { 1702 int watched, ret; 1703 1704 watched = watch_disable_addr(addr, sizeof (value), S_WRITE); 1705 ret = suword64(addr, value); 1706 if (watched) 1707 watch_enable_addr(addr, sizeof (value), S_WRITE); 1708 1709 return (ret); 1710 } 1711 #endif 1712 1713 int 1714 suword32_nowatch(void *addr, uint32_t value) 1715 { 1716 int watched, ret; 1717 1718 watched = watch_disable_addr(addr, sizeof (value), S_WRITE); 1719 ret = suword32(addr, value); 1720 if (watched) 1721 watch_enable_addr(addr, sizeof (value), S_WRITE); 1722 1723 return (ret); 1724 } 1725 1726 int 1727 watch_disable_addr(const void *addr, size_t len, enum seg_rw rw) 1728 { 1729 if (pr_watch_active(curproc)) 1730 return (pr_mappage((caddr_t)addr, len, rw, 1)); 1731 return (0); 1732 } 1733 1734 void 1735 watch_enable_addr(const void *addr, size_t len, enum seg_rw rw) 1736 { 1737 if (pr_watch_active(curproc)) 1738 pr_unmappage((caddr_t)addr, len, rw, 1); 1739 }