Print this page
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.


 456         seg = avl_find(&as->a_segtree, &addr, &where);
 457 
 458         if (seg == NULL)
 459                 seg = avl_nearest(&as->a_segtree, where, AVL_AFTER);
 460 
 461         if (seg == NULL)
 462                 seg = avl_last(&as->a_segtree);
 463 
 464         if (seg != NULL) {
 465                 caddr_t base = seg->s_base;
 466 
 467                 /*
 468                  * If top of seg is below the requested address, then
 469                  * the insertion point is at the end of the linked list,
 470                  * and seg points to the tail of the list.  Otherwise,
 471                  * the insertion point is immediately before seg.
 472                  */
 473                 if (base + seg->s_size > addr) {
 474                         if (addr >= base || eaddr > base) {
 475 #ifdef __sparc
 476                                 extern struct seg_ops segnf_ops;
 477 
 478                                 /*
 479                                  * no-fault segs must disappear if overlaid.
 480                                  * XXX need new segment type so
 481                                  * we don't have to check s_ops
 482                                  */
 483                                 if (seg->s_ops == &segnf_ops) {
 484                                         seg_unmap(seg);
 485                                         goto again;
 486                                 }
 487 #endif
 488                                 return (-1);    /* overlapping segment */
 489                         }
 490                 }
 491         }
 492         as->a_seglast = newseg;
 493         avl_insert(&as->a_segtree, newseg, where);
 494 
 495 #ifdef VERIFY_SEGLIST
 496         as_verify(as);


1988  * -1 is returned.
1989  *
1990  * NOTE: This routine is not correct when base+len overflows caddr_t.
1991  */
1992 int
1993 as_gap(struct as *as, size_t minlen, caddr_t *basep, size_t *lenp, uint_t flags,
1994     caddr_t addr)
1995 {
1996 
1997         return (as_gap_aligned(as, minlen, basep, lenp, flags, addr, 0, 0, 0));
1998 }
1999 
2000 /*
2001  * Return the next range within [base, base + len) that is backed
2002  * with "real memory".  Skip holes and non-seg_vn segments.
2003  * We're lazy and only return one segment at a time.
2004  */
2005 int
2006 as_memory(struct as *as, caddr_t *basep, size_t *lenp)
2007 {
2008         extern struct seg_ops segspt_shmops;    /* needs a header file */
2009         struct seg *seg;
2010         caddr_t addr, eaddr;
2011         caddr_t segend;
2012 
2013         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
2014 
2015         addr = *basep;
2016         eaddr = addr + *lenp;
2017 
2018         seg = as_findseg(as, addr, 0);
2019         if (seg != NULL)
2020                 addr = MAX(seg->s_base, addr);
2021 
2022         for (;;) {
2023                 if (seg == NULL || addr >= eaddr || eaddr <= seg->s_base) {
2024                         AS_LOCK_EXIT(as, &as->a_lock);
2025                         return (EINVAL);
2026                 }
2027 
2028                 if (seg->s_ops == &segvn_ops) {


2465  * as expected by the caller.  Save pointers to per segment shadow lists at
2466  * the tail of plist so that they can be used during as_pageunlock().
2467  */
2468 static int
2469 as_pagelock_segs(struct as *as, struct seg *seg, struct page ***ppp,
2470     caddr_t addr, size_t size, enum seg_rw rw)
2471 {
2472         caddr_t sv_addr = addr;
2473         size_t sv_size = size;
2474         struct seg *sv_seg = seg;
2475         ulong_t segcnt = 1;
2476         ulong_t cnt;
2477         size_t ssize;
2478         pgcnt_t npages = btop(size);
2479         page_t **plist;
2480         page_t **pl;
2481         int error;
2482         caddr_t eaddr;
2483         faultcode_t fault_err = 0;
2484         pgcnt_t pl_off;
2485         extern struct seg_ops segspt_shmops;
2486 
2487         ASSERT(AS_LOCK_HELD(as, &as->a_lock));
2488         ASSERT(seg != NULL);
2489         ASSERT(addr >= seg->s_base && addr < seg->s_base + seg->s_size);
2490         ASSERT(addr + size > seg->s_base + seg->s_size);
2491         ASSERT(IS_P2ALIGNED(size, PAGESIZE));
2492         ASSERT(IS_P2ALIGNED(addr, PAGESIZE));
2493 
2494         /*
2495          * Count the number of segments covered by the range we are about to
2496          * lock. The segment count is used to size the shadow list we return
2497          * back to the caller.
2498          */
2499         for (; size != 0; size -= ssize, addr += ssize) {
2500                 if (addr >= seg->s_base + seg->s_size) {
2501 
2502                         seg = AS_SEGNEXT(as, seg);
2503                         if (seg == NULL || addr != seg->s_base) {
2504                                 AS_LOCK_EXIT(as, &as->a_lock);
2505                                 return (EFAULT);




 456         seg = avl_find(&as->a_segtree, &addr, &where);
 457 
 458         if (seg == NULL)
 459                 seg = avl_nearest(&as->a_segtree, where, AVL_AFTER);
 460 
 461         if (seg == NULL)
 462                 seg = avl_last(&as->a_segtree);
 463 
 464         if (seg != NULL) {
 465                 caddr_t base = seg->s_base;
 466 
 467                 /*
 468                  * If top of seg is below the requested address, then
 469                  * the insertion point is at the end of the linked list,
 470                  * and seg points to the tail of the list.  Otherwise,
 471                  * the insertion point is immediately before seg.
 472                  */
 473                 if (base + seg->s_size > addr) {
 474                         if (addr >= base || eaddr > base) {
 475 #ifdef __sparc
 476                                 extern const struct seg_ops segnf_ops;
 477 
 478                                 /*
 479                                  * no-fault segs must disappear if overlaid.
 480                                  * XXX need new segment type so
 481                                  * we don't have to check s_ops
 482                                  */
 483                                 if (seg->s_ops == &segnf_ops) {
 484                                         seg_unmap(seg);
 485                                         goto again;
 486                                 }
 487 #endif
 488                                 return (-1);    /* overlapping segment */
 489                         }
 490                 }
 491         }
 492         as->a_seglast = newseg;
 493         avl_insert(&as->a_segtree, newseg, where);
 494 
 495 #ifdef VERIFY_SEGLIST
 496         as_verify(as);


1988  * -1 is returned.
1989  *
1990  * NOTE: This routine is not correct when base+len overflows caddr_t.
1991  */
1992 int
1993 as_gap(struct as *as, size_t minlen, caddr_t *basep, size_t *lenp, uint_t flags,
1994     caddr_t addr)
1995 {
1996 
1997         return (as_gap_aligned(as, minlen, basep, lenp, flags, addr, 0, 0, 0));
1998 }
1999 
2000 /*
2001  * Return the next range within [base, base + len) that is backed
2002  * with "real memory".  Skip holes and non-seg_vn segments.
2003  * We're lazy and only return one segment at a time.
2004  */
2005 int
2006 as_memory(struct as *as, caddr_t *basep, size_t *lenp)
2007 {
2008         extern const struct seg_ops segspt_shmops;      /* needs a header file */
2009         struct seg *seg;
2010         caddr_t addr, eaddr;
2011         caddr_t segend;
2012 
2013         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
2014 
2015         addr = *basep;
2016         eaddr = addr + *lenp;
2017 
2018         seg = as_findseg(as, addr, 0);
2019         if (seg != NULL)
2020                 addr = MAX(seg->s_base, addr);
2021 
2022         for (;;) {
2023                 if (seg == NULL || addr >= eaddr || eaddr <= seg->s_base) {
2024                         AS_LOCK_EXIT(as, &as->a_lock);
2025                         return (EINVAL);
2026                 }
2027 
2028                 if (seg->s_ops == &segvn_ops) {


2465  * as expected by the caller.  Save pointers to per segment shadow lists at
2466  * the tail of plist so that they can be used during as_pageunlock().
2467  */
2468 static int
2469 as_pagelock_segs(struct as *as, struct seg *seg, struct page ***ppp,
2470     caddr_t addr, size_t size, enum seg_rw rw)
2471 {
2472         caddr_t sv_addr = addr;
2473         size_t sv_size = size;
2474         struct seg *sv_seg = seg;
2475         ulong_t segcnt = 1;
2476         ulong_t cnt;
2477         size_t ssize;
2478         pgcnt_t npages = btop(size);
2479         page_t **plist;
2480         page_t **pl;
2481         int error;
2482         caddr_t eaddr;
2483         faultcode_t fault_err = 0;
2484         pgcnt_t pl_off;
2485         extern const struct seg_ops segspt_shmops;
2486 
2487         ASSERT(AS_LOCK_HELD(as, &as->a_lock));
2488         ASSERT(seg != NULL);
2489         ASSERT(addr >= seg->s_base && addr < seg->s_base + seg->s_size);
2490         ASSERT(addr + size > seg->s_base + seg->s_size);
2491         ASSERT(IS_P2ALIGNED(size, PAGESIZE));
2492         ASSERT(IS_P2ALIGNED(addr, PAGESIZE));
2493 
2494         /*
2495          * Count the number of segments covered by the range we are about to
2496          * lock. The segment count is used to size the shadow list we return
2497          * back to the caller.
2498          */
2499         for (; size != 0; size -= ssize, addr += ssize) {
2500                 if (addr >= seg->s_base + seg->s_size) {
2501 
2502                         seg = AS_SEGNEXT(as, seg);
2503                         if (seg == NULL || addr != seg->s_base) {
2504                                 AS_LOCK_EXIT(as, &as->a_lock);
2505                                 return (EFAULT);