Print this page
patch as-lock-macro-simplification


 271         } else
 272                 error = uiomove(va + pageoff, nbytes, rw, uio);
 273 
 274         if (devload)
 275                 hat_unload(kas.a_hat, mm_map, PAGESIZE, HAT_UNLOAD_UNLOCK);
 276         else if (pp)
 277                 hat_kpm_mapout(pp, NULL, va);
 278         else
 279                 hat_kpm_mapout_pfn(pfn);
 280 
 281         mutex_exit(&mm_lock);
 282         return (error);
 283 }
 284 
 285 static int
 286 mmpagelock(struct as *as, caddr_t va)
 287 {
 288         struct seg *seg;
 289         int i;
 290 
 291         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
 292         seg = as_segat(as, va);
 293         i = (seg != NULL)? SEGOP_CAPABLE(seg, S_CAPABILITY_NOMINFLT) : 0;
 294         AS_LOCK_EXIT(as, &as->a_lock);
 295 
 296         return (i);
 297 }
 298 
 299 #ifdef  __sparc
 300 
 301 #define NEED_LOCK_KVADDR(kva)   mmpagelock(&kas, kva)
 302 
 303 #else   /* __i386, __amd64 */
 304 
 305 #define NEED_LOCK_KVADDR(va)    0
 306 
 307 #endif  /* __sparc */
 308 
 309 /*ARGSUSED3*/
 310 static int
 311 mmrw(dev_t dev, struct uio *uio, enum uio_rw rw, cred_t *cred)
 312 {
 313         pfn_t v;
 314         struct iovec *iov;


 479                                 return (EIO);
 480                         mem_vtop.m_as = p->p_as;
 481                 }
 482 
 483                 mutex_enter(&pidlock);
 484                 for (p = practive; p != NULL; p = p->p_next) {
 485                         if (p->p_as == mem_vtop.m_as) {
 486                                 pid = p->p_pid;
 487                                 break;
 488                         }
 489                 }
 490                 mutex_exit(&pidlock);
 491                 if (p == NULL)
 492                         return (EIO);
 493                 p = sprlock(pid);
 494                 if (p == NULL)
 495                         return (EIO);
 496                 as = p->p_as;
 497                 if (as == mem_vtop.m_as) {
 498                         mutex_exit(&p->p_lock);
 499                         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
 500                         for (seg = AS_SEGFIRST(as); seg != NULL;
 501                             seg = AS_SEGNEXT(as, seg))
 502                                 if ((uintptr_t)mem_vtop.m_va -
 503                                     (uintptr_t)seg->s_base < seg->s_size)
 504                                         break;
 505                         if (seg != NULL)
 506                                 pfn = hat_getpfnum(as->a_hat, mem_vtop.m_va);
 507                         AS_LOCK_EXIT(as, &as->a_lock);
 508                         mutex_enter(&p->p_lock);
 509                 }
 510                 sprunlock(p);
 511         }
 512         mem_vtop.m_pfn = pfn;
 513         if (pfn == PFN_INVALID)
 514                 return (EIO);
 515 
 516         if (get_udatamodel() == DATAMODEL_NATIVE) {
 517                 if (copyout(&mem_vtop, (void *)data, sizeof (mem_vtop_t)))
 518                         return (EFAULT);
 519         }
 520 #ifdef _SYSCALL32
 521         else {
 522                 vtop32.m_pfn = mem_vtop.m_pfn;
 523                 if (copyout(&vtop32, (void *)data, sizeof (mem_vtop32_t)))
 524                         return (EFAULT);
 525         }
 526 #endif
 527 




 271         } else
 272                 error = uiomove(va + pageoff, nbytes, rw, uio);
 273 
 274         if (devload)
 275                 hat_unload(kas.a_hat, mm_map, PAGESIZE, HAT_UNLOAD_UNLOCK);
 276         else if (pp)
 277                 hat_kpm_mapout(pp, NULL, va);
 278         else
 279                 hat_kpm_mapout_pfn(pfn);
 280 
 281         mutex_exit(&mm_lock);
 282         return (error);
 283 }
 284 
 285 static int
 286 mmpagelock(struct as *as, caddr_t va)
 287 {
 288         struct seg *seg;
 289         int i;
 290 
 291         AS_LOCK_ENTER(as, RW_READER);
 292         seg = as_segat(as, va);
 293         i = (seg != NULL)? SEGOP_CAPABLE(seg, S_CAPABILITY_NOMINFLT) : 0;
 294         AS_LOCK_EXIT(as);
 295 
 296         return (i);
 297 }
 298 
 299 #ifdef  __sparc
 300 
 301 #define NEED_LOCK_KVADDR(kva)   mmpagelock(&kas, kva)
 302 
 303 #else   /* __i386, __amd64 */
 304 
 305 #define NEED_LOCK_KVADDR(va)    0
 306 
 307 #endif  /* __sparc */
 308 
 309 /*ARGSUSED3*/
 310 static int
 311 mmrw(dev_t dev, struct uio *uio, enum uio_rw rw, cred_t *cred)
 312 {
 313         pfn_t v;
 314         struct iovec *iov;


 479                                 return (EIO);
 480                         mem_vtop.m_as = p->p_as;
 481                 }
 482 
 483                 mutex_enter(&pidlock);
 484                 for (p = practive; p != NULL; p = p->p_next) {
 485                         if (p->p_as == mem_vtop.m_as) {
 486                                 pid = p->p_pid;
 487                                 break;
 488                         }
 489                 }
 490                 mutex_exit(&pidlock);
 491                 if (p == NULL)
 492                         return (EIO);
 493                 p = sprlock(pid);
 494                 if (p == NULL)
 495                         return (EIO);
 496                 as = p->p_as;
 497                 if (as == mem_vtop.m_as) {
 498                         mutex_exit(&p->p_lock);
 499                         AS_LOCK_ENTER(as, RW_READER);
 500                         for (seg = AS_SEGFIRST(as); seg != NULL;
 501                             seg = AS_SEGNEXT(as, seg))
 502                                 if ((uintptr_t)mem_vtop.m_va -
 503                                     (uintptr_t)seg->s_base < seg->s_size)
 504                                         break;
 505                         if (seg != NULL)
 506                                 pfn = hat_getpfnum(as->a_hat, mem_vtop.m_va);
 507                         AS_LOCK_EXIT(as);
 508                         mutex_enter(&p->p_lock);
 509                 }
 510                 sprunlock(p);
 511         }
 512         mem_vtop.m_pfn = pfn;
 513         if (pfn == PFN_INVALID)
 514                 return (EIO);
 515 
 516         if (get_udatamodel() == DATAMODEL_NATIVE) {
 517                 if (copyout(&mem_vtop, (void *)data, sizeof (mem_vtop_t)))
 518                         return (EFAULT);
 519         }
 520 #ifdef _SYSCALL32
 521         else {
 522                 vtop32.m_pfn = mem_vtop.m_pfn;
 523                 if (copyout(&vtop32, (void *)data, sizeof (mem_vtop32_t)))
 524                         return (EFAULT);
 525         }
 526 #endif
 527