Print this page
patch as-lock-macro-simplification
*** 1374,1384 ****
prnsegs(struct as *as, int reserved)
{
int n = 0;
struct seg *seg;
! ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
caddr_t eaddr = seg->s_base + pr_getsegsize(seg, reserved);
caddr_t saddr, naddr;
void *tmp = NULL;
--- 1374,1384 ----
prnsegs(struct as *as, int reserved)
{
int n = 0;
struct seg *seg;
! ASSERT(as != &kas && AS_WRITE_HELD(as));
for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
caddr_t eaddr = seg->s_base + pr_getsegsize(seg, reserved);
caddr_t saddr, naddr;
void *tmp = NULL;
*** 1617,1627 ****
struct seg *brkseg, *stkseg;
struct vnode *vp;
struct vattr vattr;
uint_t prot;
! ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
/*
* Request an initial buffer size that doesn't waste memory
* if the address space has only a small number of segments.
*/
--- 1617,1627 ----
struct seg *brkseg, *stkseg;
struct vnode *vp;
struct vattr vattr;
uint_t prot;
! ASSERT(as != &kas && AS_WRITE_HELD(as));
/*
* Request an initial buffer size that doesn't waste memory
* if the address space has only a small number of segments.
*/
*** 1728,1738 ****
struct seg *brkseg, *stkseg;
struct vnode *vp;
struct vattr vattr;
uint_t prot;
! ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
/*
* Request an initial buffer size that doesn't waste memory
* if the address space has only a small number of segments.
*/
--- 1728,1738 ----
struct seg *brkseg, *stkseg;
struct vnode *vp;
struct vattr vattr;
uint_t prot;
! ASSERT(as != &kas && AS_WRITE_HELD(as));
/*
* Request an initial buffer size that doesn't waste memory
* if the address space has only a small number of segments.
*/
*** 1838,1848 ****
prpdsize(struct as *as)
{
struct seg *seg;
size_t size;
! ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
if ((seg = AS_SEGFIRST(as)) == NULL)
return (0);
size = sizeof (prpageheader_t);
--- 1838,1848 ----
prpdsize(struct as *as)
{
struct seg *seg;
size_t size;
! ASSERT(as != &kas && AS_WRITE_HELD(as));
if ((seg = AS_SEGFIRST(as)) == NULL)
return (0);
size = sizeof (prpageheader_t);
*** 1868,1878 ****
prpdsize32(struct as *as)
{
struct seg *seg;
size_t size;
! ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
if ((seg = AS_SEGFIRST(as)) == NULL)
return (0);
size = sizeof (prpageheader32_t);
--- 1868,1878 ----
prpdsize32(struct as *as)
{
struct seg *seg;
size_t size;
! ASSERT(as != &kas && AS_WRITE_HELD(as));
if ((seg = AS_SEGFIRST(as)) == NULL)
return (0);
size = sizeof (prpageheader32_t);
*** 1907,1925 ****
prasmap_t *pmp;
struct seg *seg;
int error;
again:
! AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
if ((seg = AS_SEGFIRST(as)) == NULL) {
! AS_LOCK_EXIT(as, &as->a_lock);
return (0);
}
size = prpdsize(as);
if (uiop->uio_resid < size) {
! AS_LOCK_EXIT(as, &as->a_lock);
return (E2BIG);
}
buf = kmem_zalloc(size, KM_SLEEP);
php = (prpageheader_t *)buf;
--- 1907,1925 ----
prasmap_t *pmp;
struct seg *seg;
int error;
again:
! AS_LOCK_ENTER(as, RW_WRITER);
if ((seg = AS_SEGFIRST(as)) == NULL) {
! AS_LOCK_EXIT(as);
return (0);
}
size = prpdsize(as);
if (uiop->uio_resid < size) {
! AS_LOCK_EXIT(as);
return (E2BIG);
}
buf = kmem_zalloc(size, KM_SLEEP);
php = (prpageheader_t *)buf;
*** 1963,1973 ****
* EINTR so that this thread can be dislodged if
* a latent bug causes us to spin indefinitely.
*/
if (next > (uintptr_t)buf + size) {
pr_getprot_done(&tmp);
! AS_LOCK_EXIT(as, &as->a_lock);
kmem_free(buf, size);
if (ISSIG(curthread, JUSTLOOKING))
return (EINTR);
--- 1963,1973 ----
* EINTR so that this thread can be dislodged if
* a latent bug causes us to spin indefinitely.
*/
if (next > (uintptr_t)buf + size) {
pr_getprot_done(&tmp);
! AS_LOCK_EXIT(as);
kmem_free(buf, size);
if (ISSIG(curthread, JUSTLOOKING))
return (EINTR);
*** 2032,2042 ****
pmp = (prasmap_t *)next;
}
ASSERT(tmp == NULL);
} while ((seg = AS_SEGNEXT(as, seg)) != NULL);
! AS_LOCK_EXIT(as, &as->a_lock);
ASSERT((uintptr_t)pmp <= (uintptr_t)buf + size);
error = uiomove(buf, (caddr_t)pmp - buf, UIO_READ, uiop);
kmem_free(buf, size);
--- 2032,2042 ----
pmp = (prasmap_t *)next;
}
ASSERT(tmp == NULL);
} while ((seg = AS_SEGNEXT(as, seg)) != NULL);
! AS_LOCK_EXIT(as);
ASSERT((uintptr_t)pmp <= (uintptr_t)buf + size);
error = uiomove(buf, (caddr_t)pmp - buf, UIO_READ, uiop);
kmem_free(buf, size);
*** 2054,2072 ****
prasmap32_t *pmp;
struct seg *seg;
int error;
again:
! AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
if ((seg = AS_SEGFIRST(as)) == NULL) {
! AS_LOCK_EXIT(as, &as->a_lock);
return (0);
}
size = prpdsize32(as);
if (uiop->uio_resid < size) {
! AS_LOCK_EXIT(as, &as->a_lock);
return (E2BIG);
}
buf = kmem_zalloc(size, KM_SLEEP);
php = (prpageheader32_t *)buf;
--- 2054,2072 ----
prasmap32_t *pmp;
struct seg *seg;
int error;
again:
! AS_LOCK_ENTER(as, RW_WRITER);
if ((seg = AS_SEGFIRST(as)) == NULL) {
! AS_LOCK_EXIT(as);
return (0);
}
size = prpdsize32(as);
if (uiop->uio_resid < size) {
! AS_LOCK_EXIT(as);
return (E2BIG);
}
buf = kmem_zalloc(size, KM_SLEEP);
php = (prpageheader32_t *)buf;
*** 2110,2120 ****
* EINTR so that this thread can be dislodged if
* a latent bug causes us to spin indefinitely.
*/
if (next > (uintptr_t)buf + size) {
pr_getprot_done(&tmp);
! AS_LOCK_EXIT(as, &as->a_lock);
kmem_free(buf, size);
if (ISSIG(curthread, JUSTLOOKING))
return (EINTR);
--- 2110,2120 ----
* EINTR so that this thread can be dislodged if
* a latent bug causes us to spin indefinitely.
*/
if (next > (uintptr_t)buf + size) {
pr_getprot_done(&tmp);
! AS_LOCK_EXIT(as);
kmem_free(buf, size);
if (ISSIG(curthread, JUSTLOOKING))
return (EINTR);
*** 2179,2189 ****
pmp = (prasmap32_t *)next;
}
ASSERT(tmp == NULL);
} while ((seg = AS_SEGNEXT(as, seg)) != NULL);
! AS_LOCK_EXIT(as, &as->a_lock);
ASSERT((uintptr_t)pmp <= (uintptr_t)buf + size);
error = uiomove(buf, (caddr_t)pmp - buf, UIO_READ, uiop);
kmem_free(buf, size);
--- 2179,2189 ----
pmp = (prasmap32_t *)next;
}
ASSERT(tmp == NULL);
} while ((seg = AS_SEGNEXT(as, seg)) != NULL);
! AS_LOCK_EXIT(as);
ASSERT((uintptr_t)pmp <= (uintptr_t)buf + size);
error = uiomove(buf, (caddr_t)pmp - buf, UIO_READ, uiop);
kmem_free(buf, size);
*** 2334,2349 ****
if ((p->p_flag & SSYS) || (as = p->p_as) == &kas) {
psp->pr_size = 0;
psp->pr_rssize = 0;
} else {
mutex_exit(&p->p_lock);
! AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
psp->pr_size = btopr(as->a_resvsize) *
(PAGESIZE / 1024);
psp->pr_rssize = rm_asrss(as) * (PAGESIZE / 1024);
psp->pr_pctmem = rm_pctmemory(as);
! AS_LOCK_EXIT(as, &as->a_lock);
mutex_enter(&p->p_lock);
}
}
}
--- 2334,2349 ----
if ((p->p_flag & SSYS) || (as = p->p_as) == &kas) {
psp->pr_size = 0;
psp->pr_rssize = 0;
} else {
mutex_exit(&p->p_lock);
! AS_LOCK_ENTER(as, RW_READER);
psp->pr_size = btopr(as->a_resvsize) *
(PAGESIZE / 1024);
psp->pr_rssize = rm_asrss(as) * (PAGESIZE / 1024);
psp->pr_pctmem = rm_pctmemory(as);
! AS_LOCK_EXIT(as);
mutex_enter(&p->p_lock);
}
}
}
*** 2467,2483 ****
if ((p->p_flag & SSYS) || (as = p->p_as) == &kas) {
psp->pr_size = 0;
psp->pr_rssize = 0;
} else {
mutex_exit(&p->p_lock);
! AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
psp->pr_size = (size32_t)
(btopr(as->a_resvsize) * (PAGESIZE / 1024));
psp->pr_rssize = (size32_t)
(rm_asrss(as) * (PAGESIZE / 1024));
psp->pr_pctmem = rm_pctmemory(as);
! AS_LOCK_EXIT(as, &as->a_lock);
mutex_enter(&p->p_lock);
}
}
/*
--- 2467,2483 ----
if ((p->p_flag & SSYS) || (as = p->p_as) == &kas) {
psp->pr_size = 0;
psp->pr_rssize = 0;
} else {
mutex_exit(&p->p_lock);
! AS_LOCK_ENTER(as, RW_READER);
psp->pr_size = (size32_t)
(btopr(as->a_resvsize) * (PAGESIZE / 1024));
psp->pr_rssize = (size32_t)
(rm_asrss(as) * (PAGESIZE / 1024));
psp->pr_pctmem = rm_pctmemory(as);
! AS_LOCK_EXIT(as);
mutex_enter(&p->p_lock);
}
}
/*
*** 3311,3321 ****
if (as == NULL || avl_numnodes(&as->a_wpage) == 0)
return;
ASSERT(MUTEX_NOT_HELD(&curproc->p_lock));
! AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
pwp = avl_first(&as->a_wpage);
cookie = NULL;
while ((pwp = avl_destroy_nodes(&as->a_wpage, &cookie)) != NULL) {
--- 3311,3321 ----
if (as == NULL || avl_numnodes(&as->a_wpage) == 0)
return;
ASSERT(MUTEX_NOT_HELD(&curproc->p_lock));
! AS_LOCK_ENTER(as, RW_WRITER);
pwp = avl_first(&as->a_wpage);
cookie = NULL;
while ((pwp = avl_destroy_nodes(&as->a_wpage, &cookie)) != NULL) {
*** 3340,3350 ****
}
avl_destroy(&as->a_wpage);
p->p_wprot = NULL;
! AS_LOCK_EXIT(as, &as->a_lock);
}
/*
* Insert a watched area into the list of watched pages.
* If oflags is zero then we are adding a new watched area.
--- 3340,3350 ----
}
avl_destroy(&as->a_wpage);
p->p_wprot = NULL;
! AS_LOCK_EXIT(as);
}
/*
* Insert a watched area into the list of watched pages.
* If oflags is zero then we are adding a new watched area.
*** 3374,3384 ****
pwp = kmem_zalloc(sizeof (struct watched_page), KM_SLEEP);
pwp->wp_list = newpwp;
newpwp = pwp;
}
! AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
/*
* Search for an existing watched page to contain the watched area.
* If none is found, grab a new one from the available list
* and insert it in the active list, keeping the list sorted
--- 3374,3384 ----
pwp = kmem_zalloc(sizeof (struct watched_page), KM_SLEEP);
pwp->wp_list = newpwp;
newpwp = pwp;
}
! AS_LOCK_ENTER(as, RW_WRITER);
/*
* Search for an existing watched page to contain the watched area.
* If none is found, grab a new one from the available list
* and insert it in the active list, keeping the list sorted
*** 3389,3399 ****
else
pwp_tree = &as->a_wpage;
again:
if (avl_numnodes(pwp_tree) > prnwatch) {
! AS_LOCK_EXIT(as, &as->a_lock);
while (newpwp != NULL) {
pwp = newpwp->wp_list;
kmem_free(newpwp, sizeof (struct watched_page));
newpwp = pwp;
}
--- 3389,3399 ----
else
pwp_tree = &as->a_wpage;
again:
if (avl_numnodes(pwp_tree) > prnwatch) {
! AS_LOCK_EXIT(as);
while (newpwp != NULL) {
pwp = newpwp->wp_list;
kmem_free(newpwp, sizeof (struct watched_page));
newpwp = pwp;
}
*** 3462,3472 ****
* it over again with the virtual address of the next page.
*/
if ((vaddr = pwp->wp_vaddr + PAGESIZE) < eaddr)
goto again;
! AS_LOCK_EXIT(as, &as->a_lock);
/*
* Free any pages we may have over-allocated
*/
while (newpwp != NULL) {
--- 3462,3472 ----
* it over again with the virtual address of the next page.
*/
if ((vaddr = pwp->wp_vaddr + PAGESIZE) < eaddr)
goto again;
! AS_LOCK_EXIT(as);
/*
* Free any pages we may have over-allocated
*/
while (newpwp != NULL) {
*** 3489,3499 ****
struct watched_page *pwp;
struct watched_page tpw;
avl_tree_t *tree;
avl_index_t where;
! AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
if (p->p_flag & SVFWAIT)
tree = &p->p_wpage;
else
tree = &as->a_wpage;
--- 3489,3499 ----
struct watched_page *pwp;
struct watched_page tpw;
avl_tree_t *tree;
avl_index_t where;
! AS_LOCK_ENTER(as, RW_WRITER);
if (p->p_flag & SVFWAIT)
tree = &p->p_wpage;
else
tree = &as->a_wpage;
*** 3554,3564 ****
}
pwp = AVL_NEXT(tree, pwp);
}
! AS_LOCK_EXIT(as, &as->a_lock);
}
/*
* Return the original protections for the specified page.
*/
--- 3554,3564 ----
}
pwp = AVL_NEXT(tree, pwp);
}
! AS_LOCK_EXIT(as);
}
/*
* Return the original protections for the specified page.
*/
*** 3566,3576 ****
getwatchprot(struct as *as, caddr_t addr, uint_t *prot)
{
struct watched_page *pwp;
struct watched_page tpw;
! ASSERT(AS_LOCK_HELD(as, &as->a_lock));
tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
if ((pwp = avl_find(&as->a_wpage, &tpw, NULL)) != NULL)
*prot = pwp->wp_oprot;
}
--- 3566,3576 ----
getwatchprot(struct as *as, caddr_t addr, uint_t *prot)
{
struct watched_page *pwp;
struct watched_page tpw;
! ASSERT(AS_LOCK_HELD(as));
tpw.wp_vaddr = (caddr_t)((uintptr_t)addr & (uintptr_t)PAGEMASK);
if ((pwp = avl_find(&as->a_wpage, &tpw, NULL)) != NULL)
*prot = pwp->wp_oprot;
}
*** 3853,3863 ****
void *data;
} s;
s.data = seg->s_data;
! ASSERT(AS_WRITE_HELD(as, &as->a_lock));
ASSERT(saddr >= seg->s_base && saddr < eaddr);
ASSERT(eaddr <= seg->s_base + seg->s_size);
/*
* Don't include MAP_NORESERVE pages in the address range
--- 3853,3863 ----
void *data;
} s;
s.data = seg->s_data;
! ASSERT(AS_WRITE_HELD(as));
ASSERT(saddr >= seg->s_base && saddr < eaddr);
ASSERT(eaddr <= seg->s_base + seg->s_size);
/*
* Don't include MAP_NORESERVE pages in the address range
*** 3967,3977 ****
static ssize_t
pr_getpagesize(struct seg *seg, caddr_t saddr, caddr_t *naddrp, caddr_t eaddr)
{
ssize_t pagesize, hatsize;
! ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
ASSERT(IS_P2ALIGNED(saddr, PAGESIZE));
ASSERT(IS_P2ALIGNED(eaddr, PAGESIZE));
ASSERT(saddr < eaddr);
pagesize = hatsize = hat_getpagesize(seg->s_as->a_hat, saddr);
--- 3967,3977 ----
static ssize_t
pr_getpagesize(struct seg *seg, caddr_t saddr, caddr_t *naddrp, caddr_t eaddr)
{
ssize_t pagesize, hatsize;
! ASSERT(AS_WRITE_HELD(seg->s_as));
ASSERT(IS_P2ALIGNED(saddr, PAGESIZE));
ASSERT(IS_P2ALIGNED(eaddr, PAGESIZE));
ASSERT(saddr < eaddr);
pagesize = hatsize = hat_getpagesize(seg->s_as->a_hat, saddr);
*** 4007,4017 ****
struct seg *brkseg, *stkseg;
struct vnode *vp;
struct vattr vattr;
uint_t prot;
! ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
/*
* Request an initial buffer size that doesn't waste memory
* if the address space has only a small number of segments.
*/
--- 4007,4017 ----
struct seg *brkseg, *stkseg;
struct vnode *vp;
struct vattr vattr;
uint_t prot;
! ASSERT(as != &kas && AS_WRITE_HELD(as));
/*
* Request an initial buffer size that doesn't waste memory
* if the address space has only a small number of segments.
*/
*** 4191,4201 ****
struct seg *brkseg, *stkseg;
struct vnode *vp;
struct vattr vattr;
uint_t prot;
! ASSERT(as != &kas && AS_WRITE_HELD(as, &as->a_lock));
/*
* Request an initial buffer size that doesn't waste memory
* if the address space has only a small number of segments.
*/
--- 4191,4201 ----
struct seg *brkseg, *stkseg;
struct vnode *vp;
struct vattr vattr;
uint_t prot;
! ASSERT(as != &kas && AS_WRITE_HELD(as));
/*
* Request an initial buffer size that doesn't waste memory
* if the address space has only a small number of segments.
*/