Print this page
patch as-lock-macro-simplification

@@ -1374,11 +1374,11 @@
 prnsegs(struct as *as, int reserved)
 {
         int n = 0;
         struct seg *seg;
 
-        ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
+        ASSERT(as != &kas && AS_WRITE_HELD(as));
 
         for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
                 caddr_t eaddr = seg->s_base + pr_getsegsize(seg, reserved);
                 caddr_t saddr, naddr;
                 void *tmp = NULL;

@@ -1617,11 +1617,11 @@
         struct seg *brkseg, *stkseg;
         struct vnode *vp;
         struct vattr vattr;
         uint_t prot;
 
-        ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
+        ASSERT(as != &kas && AS_WRITE_HELD(as));
 
         /*
          * Request an initial buffer size that doesn't waste memory
          * if the address space has only a small number of segments.
          */

@@ -1728,11 +1728,11 @@
         struct seg *brkseg, *stkseg;
         struct vnode *vp;
         struct vattr vattr;
         uint_t prot;
 
-        ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
+        ASSERT(as != &kas && AS_WRITE_HELD(as));
 
         /*
          * Request an initial buffer size that doesn't waste memory
          * if the address space has only a small number of segments.
          */

@@ -1838,11 +1838,11 @@
 prpdsize(struct as *as)
 {
         struct seg *seg;
         size_t size;
 
-        ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
+        ASSERT(as != &kas && AS_WRITE_HELD(as));
 
         if ((seg = AS_SEGFIRST(as)) == NULL)
                 return (0);
 
         size = sizeof (prpageheader_t);

@@ -1868,11 +1868,11 @@
 prpdsize32(struct as *as)
 {
         struct seg *seg;
         size_t size;
 
-        ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
+        ASSERT(as != &kas && AS_WRITE_HELD(as));
 
         if ((seg = AS_SEGFIRST(as)) == NULL)
                 return (0);
 
         size = sizeof (prpageheader32_t);

@@ -1907,19 +1907,19 @@
         prasmap_t *pmp;
         struct seg *seg;
         int error;
 
 again:
-        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
+        AS_LOCK_ENTER(as, RW_WRITER);
 
         if ((seg = AS_SEGFIRST(as)) == NULL) {
-                AS_LOCK_EXIT(as, &as->a_lock);
+                AS_LOCK_EXIT(as);
                 return (0);
         }
         size = prpdsize(as);
         if (uiop->uio_resid < size) {
-                AS_LOCK_EXIT(as, &as->a_lock);
+                AS_LOCK_EXIT(as);
                 return (E2BIG);
         }
 
         buf = kmem_zalloc(size, KM_SLEEP);
         php = (prpageheader_t *)buf;

@@ -1963,11 +1963,11 @@
                          * EINTR so that this thread can be dislodged if
                          * a latent bug causes us to spin indefinitely.
                          */
                         if (next > (uintptr_t)buf + size) {
                                 pr_getprot_done(&tmp);
-                                AS_LOCK_EXIT(as, &as->a_lock);
+                                AS_LOCK_EXIT(as);
 
                                 kmem_free(buf, size);
 
                                 if (ISSIG(curthread, JUSTLOOKING))
                                         return (EINTR);

@@ -2032,11 +2032,11 @@
                         pmp = (prasmap_t *)next;
                 }
                 ASSERT(tmp == NULL);
         } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
 
-        AS_LOCK_EXIT(as, &as->a_lock);
+        AS_LOCK_EXIT(as);
 
         ASSERT((uintptr_t)pmp <= (uintptr_t)buf + size);
         error = uiomove(buf, (caddr_t)pmp - buf, UIO_READ, uiop);
         kmem_free(buf, size);
 

@@ -2054,19 +2054,19 @@
         prasmap32_t *pmp;
         struct seg *seg;
         int error;
 
 again:
-        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
+        AS_LOCK_ENTER(as, RW_WRITER);
 
         if ((seg = AS_SEGFIRST(as)) == NULL) {
-                AS_LOCK_EXIT(as, &as->a_lock);
+                AS_LOCK_EXIT(as);
                 return (0);
         }
         size = prpdsize32(as);
         if (uiop->uio_resid < size) {
-                AS_LOCK_EXIT(as, &as->a_lock);
+                AS_LOCK_EXIT(as);
                 return (E2BIG);
         }
 
         buf = kmem_zalloc(size, KM_SLEEP);
         php = (prpageheader32_t *)buf;

@@ -2110,11 +2110,11 @@
                          * EINTR so that this thread can be dislodged if
                          * a latent bug causes us to spin indefinitely.
                          */
                         if (next > (uintptr_t)buf + size) {
                                 pr_getprot_done(&tmp);
-                                AS_LOCK_EXIT(as, &as->a_lock);
+                                AS_LOCK_EXIT(as);
 
                                 kmem_free(buf, size);
 
                                 if (ISSIG(curthread, JUSTLOOKING))
                                         return (EINTR);

@@ -2179,11 +2179,11 @@
                         pmp = (prasmap32_t *)next;
                 }
                 ASSERT(tmp == NULL);
         } while ((seg = AS_SEGNEXT(as, seg)) != NULL);
 
-        AS_LOCK_EXIT(as, &as->a_lock);
+        AS_LOCK_EXIT(as);
 
         ASSERT((uintptr_t)pmp <= (uintptr_t)buf + size);
         error = uiomove(buf, (caddr_t)pmp - buf, UIO_READ, uiop);
         kmem_free(buf, size);
 

@@ -2334,16 +2334,16 @@
                 if ((p->p_flag & SSYS) || (as = p->p_as) == &kas) {
                         psp->pr_size = 0;
                         psp->pr_rssize = 0;
                 } else {
                         mutex_exit(&p->p_lock);
-                        AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
+                        AS_LOCK_ENTER(as, RW_READER);
                         psp->pr_size = btopr(as->a_resvsize) *
                             (PAGESIZE / 1024);
                         psp->pr_rssize = rm_asrss(as) * (PAGESIZE / 1024);
                         psp->pr_pctmem = rm_pctmemory(as);
-                        AS_LOCK_EXIT(as, &as->a_lock);
+                        AS_LOCK_EXIT(as);
                         mutex_enter(&p->p_lock);
                 }
         }
 }
 

@@ -2467,17 +2467,17 @@
                 if ((p->p_flag & SSYS) || (as = p->p_as) == &kas) {
                         psp->pr_size = 0;
                         psp->pr_rssize = 0;
                 } else {
                         mutex_exit(&p->p_lock);
-                        AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
+                        AS_LOCK_ENTER(as, RW_READER);
                         psp->pr_size = (size32_t)
                             (btopr(as->a_resvsize) * (PAGESIZE / 1024));
                         psp->pr_rssize = (size32_t)
                             (rm_asrss(as) * (PAGESIZE / 1024));
                         psp->pr_pctmem = rm_pctmemory(as);
-                        AS_LOCK_EXIT(as, &as->a_lock);
+                        AS_LOCK_EXIT(as);
                         mutex_enter(&p->p_lock);
                 }
         }
 
         /*

@@ -3311,11 +3311,11 @@
 
         if (as == NULL || avl_numnodes(&as->a_wpage) == 0)
                 return;
 
         ASSERT(MUTEX_NOT_HELD(&curproc->p_lock));
-        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
+        AS_LOCK_ENTER(as, RW_WRITER);
 
         pwp = avl_first(&as->a_wpage);
 
         cookie = NULL;
         while ((pwp = avl_destroy_nodes(&as->a_wpage, &cookie)) != NULL) {

@@ -3340,11 +3340,11 @@
         }
 
         avl_destroy(&as->a_wpage);
         p->p_wprot = NULL;
 
-        AS_LOCK_EXIT(as, &as->a_lock);
+        AS_LOCK_EXIT(as);
 }
 
 /*
  * Insert a watched area into the list of watched pages.
  * If oflags is zero then we are adding a new watched area.

@@ -3374,11 +3374,11 @@
                 pwp = kmem_zalloc(sizeof (struct watched_page), KM_SLEEP);
                 pwp->wp_list = newpwp;
                 newpwp = pwp;
         }
 
-        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
+        AS_LOCK_ENTER(as, RW_WRITER);
 
         /*
          * Search for an existing watched page to contain the watched area.
          * If none is found, grab a new one from the available list
          * and insert it in the active list, keeping the list sorted

@@ -3389,11 +3389,11 @@
         else
                 pwp_tree = &as->a_wpage;
 
 again:
         if (avl_numnodes(pwp_tree) > prnwatch) {
-                AS_LOCK_EXIT(as, &as->a_lock);
+                AS_LOCK_EXIT(as);
                 while (newpwp != NULL) {
                         pwp = newpwp->wp_list;
                         kmem_free(newpwp, sizeof (struct watched_page));
                         newpwp = pwp;
                 }

@@ -3462,11 +3462,11 @@
          * it over again with the virtual address of the next page.
          */
         if ((vaddr = pwp->wp_vaddr + PAGESIZE) < eaddr)
                 goto again;
 
-        AS_LOCK_EXIT(as, &as->a_lock);
+        AS_LOCK_EXIT(as);
 
         /*
          * Free any pages we may have over-allocated
          */
         while (newpwp != NULL) {

@@ -3489,11 +3489,11 @@
         struct watched_page *pwp;
         struct watched_page tpw;
         avl_tree_t *tree;
         avl_index_t where;
 
-        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
+        AS_LOCK_ENTER(as, RW_WRITER);
 
         if (p->p_flag & SVFWAIT)
                 tree = &p->p_wpage;
         else
                 tree = &as->a_wpage;

@@ -3554,11 +3554,11 @@
                 }
 
                 pwp = AVL_NEXT(tree, pwp);
         }
 
-        AS_LOCK_EXIT(as, &as->a_lock);
+        AS_LOCK_EXIT(as);
 }
 
 /*
  * Return the original protections for the specified page.
  */

@@ -3566,11 +3566,11 @@
 getwatchprot(struct as *as, caddr_t addr, uint_t *prot)
 {
         struct watched_page *pwp;
         struct watched_page tpw;
 
-        ASSERT(AS_LOCK_HELD(as, &as->a_lock));
+        ASSERT(AS_LOCK_HELD(as));
 
         tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
         if ((pwp = avl_find(&as->a_wpage, &tpw, NULL)) != NULL)
                 *prot = pwp->wp_oprot;
 }

@@ -3853,11 +3853,11 @@
                 void *data;
         } s;
 
         s.data = seg->s_data;
 
-        ASSERT(AS_WRITE_HELD(as, &as->a_lock));
+        ASSERT(AS_WRITE_HELD(as));
         ASSERT(saddr >= seg->s_base && saddr < eaddr);
         ASSERT(eaddr <= seg->s_base + seg->s_size);
 
         /*
          * Don't include MAP_NORESERVE pages in the address range

@@ -3967,11 +3967,11 @@
 static ssize_t
 pr_getpagesize(struct seg *seg, caddr_t saddr, caddr_t *naddrp, caddr_t eaddr)
 {
         ssize_t pagesize, hatsize;
 
-        ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
+        ASSERT(AS_WRITE_HELD(seg->s_as));
         ASSERT(IS_P2ALIGNED(saddr, PAGESIZE));
         ASSERT(IS_P2ALIGNED(eaddr, PAGESIZE));
         ASSERT(saddr < eaddr);
 
         pagesize = hatsize = hat_getpagesize(seg->s_as->a_hat, saddr);

@@ -4007,11 +4007,11 @@
         struct seg *brkseg, *stkseg;
         struct vnode *vp;
         struct vattr vattr;
         uint_t prot;
 
-        ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
+        ASSERT(as != &kas && AS_WRITE_HELD(as));
 
         /*
          * Request an initial buffer size that doesn't waste memory
          * if the address space has only a small number of segments.
          */

@@ -4191,11 +4191,11 @@
         struct seg *brkseg, *stkseg;
         struct vnode *vp;
         struct vattr vattr;
         uint_t prot;
 
-        ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
+        ASSERT(as != &kas && AS_WRITE_HELD(as));
 
         /*
          * Request an initial buffer size that doesn't waste memory
          * if the address space has only a small number of segments.
          */