Print this page
patch lower-case-segops
remove xhat
The xhat infrastructure was added to support hardware such as the zulu
graphics card - hardware which had on-board MMUs.  The VM used the xhat code
to keep the CPU's and Zulu's page tables in-sync.  Since the only xhat user
was zulu (which is gone), we can safely remove it simplifying the whole VM
subsystem.
Assorted notes:
- AS_BUSY flag was used solely by xhat


 165         struct as *as = p->p_as;
 166         char *eaddr = addr + size;
 167         int prot_rw = rw_to_prot(rw);
 168         int xrw = rw_to_index(rw);
 169         int rv = 0;
 170         struct watched_page *pwp;
 171         struct watched_page tpw;
 172         avl_index_t where;
 173         uint_t prot;
 174 
 175         ASSERT(as != &kas);
 176 
 177 startover:
 178         ASSERT(rv == 0);
 179         if (avl_numnodes(&as->a_wpage) == 0)
 180                 return (0);
 181 
 182         /*
 183          * as->a_wpage can only be changed while the process is totally stopped.
 184          * Don't grab p_lock here.  Holding p_lock while grabbing the address
 185          * space lock leads to deadlocks with the clock thread.  Note that if an
 186          * as_fault() is servicing a fault to a watched page on behalf of an
 187          * XHAT provider, watchpoint will be temporarily cleared (and wp_prot
 188          * will be set to wp_oprot).  Since this is done while holding as writer
 189          * lock, we need to grab as lock (reader lock is good enough).
 190          *
 191          * p_maplock prevents simultaneous execution of this function.  Under
 192          * normal circumstances, holdwatch() will stop all other threads, so the
 193          * lock isn't really needed.  But there may be multiple threads within
 194          * stop() when SWATCHOK is set, so we need to handle multiple threads
 195          * at once.  See holdwatch() for the details of this dance.
 196          */
 197 
 198         mutex_enter(&p->p_maplock);
 199         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
 200 
 201         tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
 202         if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL)
 203                 pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);
 204 
 205         for (; pwp != NULL && pwp->wp_vaddr < eaddr;
 206                 pwp = AVL_NEXT(&as->a_wpage, pwp)) {
 207 
 208                 /*
 209                  * If the requested protection has not been


 294                                         /* cannot have write-only protection */
 295                                         prot |= PROT_READ|PROT_WRITE;
 296 #if 0   /* damned broken mmu feature! */
 297                                 if (sum(pwp->wp_umap) == 0)
 298                                         prot &= ~PROT_USER;
 299 #endif
 300                         }
 301                 }
 302 
 303 
 304                 if (pwp->wp_oprot != 0) {    /* if page exists */
 305                         struct seg *seg;
 306                         uint_t oprot;
 307                         int err, retrycnt = 0;
 308 
 309                         AS_LOCK_EXIT(as, &as->a_lock);
 310                         AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
 311                 retry:
 312                         seg = as_segat(as, addr);
 313                         ASSERT(seg != NULL);
 314                         SEGOP_GETPROT(seg, addr, 0, &oprot);
 315                         if (prot != oprot) {
 316                                 err = SEGOP_SETPROT(seg, addr, PAGESIZE, prot);
 317                                 if (err == IE_RETRY) {
 318                                         ASSERT(retrycnt == 0);
 319                                         retrycnt++;
 320                                         goto retry;
 321                                 }
 322                         }
 323                         AS_LOCK_EXIT(as, &as->a_lock);
 324                 } else
 325                         AS_LOCK_EXIT(as, &as->a_lock);
 326 
 327                 /*
 328                  * When all pages are mapped back to their normal state,
 329                  * continue the other lwps.
 330                  */
 331                 if (!mapin) {
 332                         ASSERT(p->p_mapcnt > 0);
 333                         p->p_mapcnt--;
 334                         if (p->p_mapcnt == 0) {
 335                                 mutex_exit(&p->p_maplock);
 336                                 mutex_enter(&p->p_lock);


 389         uint_t prot;
 390         int err, retrycnt;
 391 
 392         if (p->p_wprot == NULL)
 393                 return;
 394 
 395         ASSERT(MUTEX_NOT_HELD(&curproc->p_lock));
 396 
 397         AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
 398 
 399         pwp = p->p_wprot;
 400         while (pwp != NULL) {
 401 
 402                 vaddr = pwp->wp_vaddr;
 403                 retrycnt = 0;
 404         retry:
 405                 ASSERT(pwp->wp_flags & WP_SETPROT);
 406                 if ((seg = as_segat(as, vaddr)) != NULL &&
 407                     !(pwp->wp_flags & WP_NOWATCH)) {
 408                         prot = pwp->wp_prot;
 409                         err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, prot);
 410                         if (err == IE_RETRY) {
 411                                 ASSERT(retrycnt == 0);
 412                                 retrycnt++;
 413                                 goto retry;
 414                         }
 415                 }
 416 
 417                 next = pwp->wp_list;
 418 
 419                 if (pwp->wp_read + pwp->wp_write + pwp->wp_exec == 0) {
 420                         /*
 421                          * No watched areas remain in this page.
 422                          * Free the watched_page structure.
 423                          */
 424                         avl_remove(&as->a_wpage, pwp);
 425                         kmem_free(pwp, sizeof (struct watched_page));
 426                 } else {
 427                         pwp->wp_flags &= ~WP_SETPROT;
 428                 }
 429 


 487                 }
 488         }
 489 
 490         return (rv);
 491 }
 492 
 493 
 494 /*
 495  * trap() calls here to determine if a fault is in a watched page.
 496  * We return nonzero if this is true and the load/store would fail.
 497  */
 498 int
 499 pr_is_watchpage(caddr_t addr, enum seg_rw rw)
 500 {
 501         struct as *as = curproc->p_as;
 502         int rv;
 503 
 504         if ((as == &kas) || avl_numnodes(&as->a_wpage) == 0)
 505                 return (0);
 506 
 507         /* Grab the lock because of XHAT (see comment in pr_mappage()) */
 508         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
 509         rv = pr_is_watchpage_as(addr, rw, as);
 510         AS_LOCK_EXIT(as, &as->a_lock);
 511 
 512         return (rv);
 513 }
 514 
 515 
 516 
 517 /*
 518  * trap() calls here to determine if a fault is a watchpoint.
 519  */
 520 int
 521 pr_is_watchpoint(caddr_t *paddr, int *pta, size_t size, size_t *plen,
 522         enum seg_rw rw)
 523 {
 524         proc_t *p = curproc;
 525         caddr_t addr = *paddr;
 526         caddr_t eaddr = addr + size;
 527         register struct watched_area *pwa;




 165         struct as *as = p->p_as;
 166         char *eaddr = addr + size;
 167         int prot_rw = rw_to_prot(rw);
 168         int xrw = rw_to_index(rw);
 169         int rv = 0;
 170         struct watched_page *pwp;
 171         struct watched_page tpw;
 172         avl_index_t where;
 173         uint_t prot;
 174 
 175         ASSERT(as != &kas);
 176 
 177 startover:
 178         ASSERT(rv == 0);
 179         if (avl_numnodes(&as->a_wpage) == 0)
 180                 return (0);
 181 
 182         /*
 183          * as->a_wpage can only be changed while the process is totally stopped.
 184          * Don't grab p_lock here.  Holding p_lock while grabbing the address
 185          * space lock leads to deadlocks with the clock thread.




 186          *
 187          * p_maplock prevents simultaneous execution of this function.  Under
 188          * normal circumstances, holdwatch() will stop all other threads, so the
 189          * lock isn't really needed.  But there may be multiple threads within
 190          * stop() when SWATCHOK is set, so we need to handle multiple threads
 191          * at once.  See holdwatch() for the details of this dance.
 192          */
 193 
 194         mutex_enter(&p->p_maplock);
 195         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
 196 
 197         tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
 198         if ((pwp = avl_find(&as->a_wpage, &tpw, &where)) == NULL)
 199                 pwp = avl_nearest(&as->a_wpage, where, AVL_AFTER);
 200 
 201         for (; pwp != NULL && pwp->wp_vaddr < eaddr;
 202                 pwp = AVL_NEXT(&as->a_wpage, pwp)) {
 203 
 204                 /*
 205                  * If the requested protection has not been


 290                                         /* cannot have write-only protection */
 291                                         prot |= PROT_READ|PROT_WRITE;
 292 #if 0   /* damned broken mmu feature! */
 293                                 if (sum(pwp->wp_umap) == 0)
 294                                         prot &= ~PROT_USER;
 295 #endif
 296                         }
 297                 }
 298 
 299 
 300                 if (pwp->wp_oprot != 0) {    /* if page exists */
 301                         struct seg *seg;
 302                         uint_t oprot;
 303                         int err, retrycnt = 0;
 304 
 305                         AS_LOCK_EXIT(as, &as->a_lock);
 306                         AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
 307                 retry:
 308                         seg = as_segat(as, addr);
 309                         ASSERT(seg != NULL);
 310                         segop_getprot(seg, addr, 0, &oprot);
 311                         if (prot != oprot) {
 312                                 err = segop_setprot(seg, addr, PAGESIZE, prot);
 313                                 if (err == IE_RETRY) {
 314                                         ASSERT(retrycnt == 0);
 315                                         retrycnt++;
 316                                         goto retry;
 317                                 }
 318                         }
 319                         AS_LOCK_EXIT(as, &as->a_lock);
 320                 } else
 321                         AS_LOCK_EXIT(as, &as->a_lock);
 322 
 323                 /*
 324                  * When all pages are mapped back to their normal state,
 325                  * continue the other lwps.
 326                  */
 327                 if (!mapin) {
 328                         ASSERT(p->p_mapcnt > 0);
 329                         p->p_mapcnt--;
 330                         if (p->p_mapcnt == 0) {
 331                                 mutex_exit(&p->p_maplock);
 332                                 mutex_enter(&p->p_lock);


 385         uint_t prot;
 386         int err, retrycnt;
 387 
 388         if (p->p_wprot == NULL)
 389                 return;
 390 
 391         ASSERT(MUTEX_NOT_HELD(&curproc->p_lock));
 392 
 393         AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
 394 
 395         pwp = p->p_wprot;
 396         while (pwp != NULL) {
 397 
 398                 vaddr = pwp->wp_vaddr;
 399                 retrycnt = 0;
 400         retry:
 401                 ASSERT(pwp->wp_flags & WP_SETPROT);
 402                 if ((seg = as_segat(as, vaddr)) != NULL &&
 403                     !(pwp->wp_flags & WP_NOWATCH)) {
 404                         prot = pwp->wp_prot;
 405                         err = segop_setprot(seg, vaddr, PAGESIZE, prot);
 406                         if (err == IE_RETRY) {
 407                                 ASSERT(retrycnt == 0);
 408                                 retrycnt++;
 409                                 goto retry;
 410                         }
 411                 }
 412 
 413                 next = pwp->wp_list;
 414 
 415                 if (pwp->wp_read + pwp->wp_write + pwp->wp_exec == 0) {
 416                         /*
 417                          * No watched areas remain in this page.
 418                          * Free the watched_page structure.
 419                          */
 420                         avl_remove(&as->a_wpage, pwp);
 421                         kmem_free(pwp, sizeof (struct watched_page));
 422                 } else {
 423                         pwp->wp_flags &= ~WP_SETPROT;
 424                 }
 425 


 483                 }
 484         }
 485 
 486         return (rv);
 487 }
 488 
 489 
 490 /*
 491  * trap() calls here to determine if a fault is in a watched page.
 492  * We return nonzero if this is true and the load/store would fail.
 493  */
 494 int
 495 pr_is_watchpage(caddr_t addr, enum seg_rw rw)
 496 {
 497         struct as *as = curproc->p_as;
 498         int rv;
 499 
 500         if ((as == &kas) || avl_numnodes(&as->a_wpage) == 0)
 501                 return (0);
 502 

 503         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
 504         rv = pr_is_watchpage_as(addr, rw, as);
 505         AS_LOCK_EXIT(as, &as->a_lock);
 506 
 507         return (rv);
 508 }
 509 
 510 
 511 
 512 /*
 513  * trap() calls here to determine if a fault is a watchpoint.
 514  */
 515 int
 516 pr_is_watchpoint(caddr_t *paddr, int *pta, size_t size, size_t *plen,
 517         enum seg_rw rw)
 518 {
 519         proc_t *p = curproc;
 520         caddr_t addr = *paddr;
 521         caddr_t eaddr = addr + size;
 522         register struct watched_area *pwa;