Print this page
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.
use NULL getmemid segop as a shorthand for ENODEV
Instead of forcing every segment driver to implement a dummy function to
return (hopefully) ENODEV, handle NULL getmemid segop function pointer as
"return ENODEV" shorthand.
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
patch lower-case-segops
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory.  The code is there and in theory it runs when we get *extremely* low
on memory.  In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout
remove xhat
The xhat infrastructure was added to support hardware such as the zulu
graphics card - hardware which had on-board MMUs.  The VM used the xhat code
to keep the CPU's and Zulu's page tables in-sync.  Since the only xhat user
was zulu (which is gone), we can safely remove it simplifying the whole VM
subsystem.
Assorted notes:
- AS_BUSY flag was used solely by xhat

@@ -57,11 +57,10 @@
 #include <sys/debug.h>
 #include <sys/tnf_probe.h>
 #include <sys/vtrace.h>
 
 #include <vm/hat.h>
-#include <vm/xhat.h>
 #include <vm/as.h>
 #include <vm/seg.h>
 #include <vm/seg_vn.h>
 #include <vm/seg_dev.h>
 #include <vm/seg_kmem.h>

@@ -472,11 +471,11 @@
                  * the insertion point is immediately before seg.
                  */
                 if (base + seg->s_size > addr) {
                         if (addr >= base || eaddr > base) {
 #ifdef __sparc
-                                extern struct seg_ops segnf_ops;
+                                extern const struct seg_ops segnf_ops;
 
                                 /*
                                  * no-fault segs must disappear if overlaid.
                                  * XXX need new segment type so
                                  * we don't have to check s_ops

@@ -669,12 +668,10 @@
 
         AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
         as->a_hat = hat_alloc(as);      /* create hat for default system mmu */
         AS_LOCK_EXIT(as, &as->a_lock);
 
-        as->a_xhat = NULL;
-
         return (as);
 }
 
 /*
  * Free an address space data structure.

@@ -685,11 +682,11 @@
 void
 as_free(struct as *as)
 {
         struct hat *hat = as->a_hat;
         struct seg *seg, *next;
-        int called = 0;
+        boolean_t free_started = B_FALSE;
 
 top:
         /*
          * Invoke ALL callbacks. as_do_callbacks will do one callback
          * per call, and not return (-1) until the callback has completed.

@@ -697,28 +694,23 @@
          */
         mutex_enter(&as->a_contents);
         while (as->a_callbacks && as_do_callbacks(as, AS_ALL_EVENT, 0, 0))
                 ;
 
-        /* This will prevent new XHATs from attaching to as */
-        if (!called)
-                AS_SETBUSY(as);
         mutex_exit(&as->a_contents);
         AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
 
-        if (!called) {
-                called = 1;
+        if (!free_started) {
+                free_started = B_TRUE;
                 hat_free_start(hat);
-                if (as->a_xhat != NULL)
-                        xhat_free_start_all(as);
         }
         for (seg = AS_SEGFIRST(as); seg != NULL; seg = next) {
                 int err;
 
                 next = AS_SEGNEXT(as, seg);
 retry:
-                err = SEGOP_UNMAP(seg, seg->s_base, seg->s_size);
+                err = segop_unmap(seg, seg->s_base, seg->s_size);
                 if (err == EAGAIN) {
                         mutex_enter(&as->a_contents);
                         if (as->a_callbacks) {
                                 AS_LOCK_EXIT(as, &as->a_lock);
                         } else if (!AS_ISNOUNMAPWAIT(as)) {

@@ -757,12 +749,10 @@
                          */
                         ASSERT(err == 0);
                 }
         }
         hat_free_end(hat);
-        if (as->a_xhat != NULL)
-                xhat_free_end_all(as);
         AS_LOCK_EXIT(as, &as->a_lock);
 
         /* /proc stuff */
         ASSERT(avl_numnodes(&as->a_wpage) == 0);
         if (as->a_objectdir) {

@@ -792,18 +782,10 @@
         newas->a_userlimit = as->a_userlimit;
         newas->a_proc = forkedproc;
 
         AS_LOCK_ENTER(newas, &newas->a_lock, RW_WRITER);
 
-        /* This will prevent new XHATs from attaching */
-        mutex_enter(&as->a_contents);
-        AS_SETBUSY(as);
-        mutex_exit(&as->a_contents);
-        mutex_enter(&newas->a_contents);
-        AS_SETBUSY(newas);
-        mutex_exit(&newas->a_contents);
-
         (void) hat_dup(as->a_hat, newas->a_hat, NULL, 0, HAT_DUP_SRD);
 
         for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
 
                 if (seg->s_flags & S_PURGE) {

@@ -813,27 +795,21 @@
 
                 newseg = seg_alloc(newas, seg->s_base, seg->s_size);
                 if (newseg == NULL) {
                         AS_LOCK_EXIT(newas, &newas->a_lock);
                         as_setwatch(as);
-                        mutex_enter(&as->a_contents);
-                        AS_CLRBUSY(as);
-                        mutex_exit(&as->a_contents);
                         AS_LOCK_EXIT(as, &as->a_lock);
                         as_free(newas);
                         return (-1);
                 }
-                if ((error = SEGOP_DUP(seg, newseg)) != 0) {
+                if ((error = segop_dup(seg, newseg)) != 0) {
                         /*
                          * We call seg_free() on the new seg
                          * because the segment is not set up
                          * completely; i.e. it has no ops.
                          */
                         as_setwatch(as);
-                        mutex_enter(&as->a_contents);
-                        AS_CLRBUSY(as);
-                        mutex_exit(&as->a_contents);
                         AS_LOCK_EXIT(as, &as->a_lock);
                         seg_free(newseg);
                         AS_LOCK_EXIT(newas, &newas->a_lock);
                         as_free(newas);
                         return (error);

@@ -841,22 +817,14 @@
                 newas->a_size += seg->s_size;
         }
         newas->a_resvsize = as->a_resvsize - purgesize;
 
         error = hat_dup(as->a_hat, newas->a_hat, NULL, 0, HAT_DUP_ALL);
-        if (as->a_xhat != NULL)
-                error |= xhat_dup_all(as, newas, NULL, 0, HAT_DUP_ALL);
 
-        mutex_enter(&newas->a_contents);
-        AS_CLRBUSY(newas);
-        mutex_exit(&newas->a_contents);
         AS_LOCK_EXIT(newas, &newas->a_lock);
 
         as_setwatch(as);
-        mutex_enter(&as->a_contents);
-        AS_CLRBUSY(as);
-        mutex_exit(&as->a_contents);
         AS_LOCK_EXIT(as, &as->a_lock);
         if (error != 0) {
                 as_free(newas);
                 return (error);
         }

@@ -878,39 +846,28 @@
         faultcode_t res = 0;
         caddr_t addrsav;
         struct seg *segsav;
         int as_lock_held;
         klwp_t *lwp = ttolwp(curthread);
-        int is_xhat = 0;
         int holding_wpage = 0;
-        extern struct seg_ops   segdev_ops;
 
 
 
-        if (as->a_hat != hat) {
-                /* This must be an XHAT then */
-                is_xhat = 1;
-
-                if ((type != F_INVAL) || (as == &kas))
-                        return (FC_NOSUPPORT);
-        }
-
 retry:
-        if (!is_xhat) {
                 /*
-                 * Indicate that the lwp is not to be stopped while waiting
-                 * for a pagefault.  This is to avoid deadlock while debugging
-                 * a process via /proc over NFS (in particular).
+         * Indicate that the lwp is not to be stopped while waiting for a
+         * pagefault.  This is to avoid deadlock while debugging a process
+         * via /proc over NFS (in particular).
                  */
                 if (lwp != NULL)
                         lwp->lwp_nostop++;
 
                 /*
-                 * same length must be used when we softlock and softunlock.
-                 * We don't support softunlocking lengths less than
-                 * the original length when there is largepage support.
-                 * See seg_dev.c for more comments.
+         * same length must be used when we softlock and softunlock.  We
+         * don't support softunlocking lengths less than the original length
+         * when there is largepage support.  See seg_dev.c for more
+         * comments.
                  */
                 switch (type) {
 
                 case F_SOFTLOCK:
                         CPU_STATS_ADD_K(vm, softlock, 1);

@@ -929,11 +886,10 @@
                         if (as == &kas)
                                 CPU_STATS_ADDQ(CPU, vm, kernel_asflt, 1);
                         CPU_STATS_EXIT_K();
                         break;
                 }
-        }
 
         /* Kernel probe */
         TNF_PROBE_3(address_fault, "vm pagefault", /* CSTYLED */,
             tnf_opaque, address,        addr,
             tnf_fault_type,     fault_type,     type,

@@ -950,39 +906,19 @@
          * filesystem, and then no-one will be able to exec new commands,
          * as exec'ing requires the write lock on the as.
          */
         if (as == &kas && segkmap && segkmap->s_base <= raddr &&
             raddr + size < segkmap->s_base + segkmap->s_size) {
-                /*
-                 * if (as==&kas), this can't be XHAT: we've already returned
-                 * FC_NOSUPPORT.
-                 */
                 seg = segkmap;
                 as_lock_held = 0;
         } else {
                 AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
-                if (is_xhat && avl_numnodes(&as->a_wpage) != 0) {
-                        /*
-                         * Grab and hold the writers' lock on the as
-                         * if the fault is to a watched page.
-                         * This will keep CPUs from "peeking" at the
-                         * address range while we're temporarily boosting
-                         * the permissions for the XHAT device to
-                         * resolve the fault in the segment layer.
-                         *
-                         * We could check whether faulted address
-                         * is within a watched page and only then grab
-                         * the writer lock, but this is simpler.
-                         */
-                        AS_LOCK_EXIT(as, &as->a_lock);
-                        AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
-                }
 
                 seg = as_segat(as, raddr);
                 if (seg == NULL) {
                         AS_LOCK_EXIT(as, &as->a_lock);
-                        if ((lwp != NULL) && (!is_xhat))
+                        if (lwp != NULL)
                                 lwp->lwp_nostop--;
                         return (FC_NOMAP);
                 }
 
                 as_lock_held = 1;

@@ -1002,39 +938,20 @@
                 if (raddr + rsize > seg->s_base + seg->s_size)
                         ssize = seg->s_base + seg->s_size - raddr;
                 else
                         ssize = rsize;
 
-                if (!is_xhat || (seg->s_ops != &segdev_ops)) {
-
-                        if (is_xhat && avl_numnodes(&as->a_wpage) != 0 &&
-                            pr_is_watchpage_as(raddr, rw, as)) {
-                                /*
-                                 * Handle watch pages.  If we're faulting on a
-                                 * watched page from an X-hat, we have to
-                                 * restore the original permissions while we
-                                 * handle the fault.
-                                 */
-                                as_clearwatch(as);
-                                holding_wpage = 1;
-                        }
-
-                        res = SEGOP_FAULT(hat, seg, raddr, ssize, type, rw);
+                res = segop_fault(hat, seg, raddr, ssize, type, rw);
 
                         /* Restore watchpoints */
                         if (holding_wpage) {
                                 as_setwatch(as);
                                 holding_wpage = 0;
                         }
 
                         if (res != 0)
                                 break;
-                } else {
-                        /* XHAT does not support seg_dev */
-                        res = FC_NOSUPPORT;
-                        break;
-                }
         }
 
         /*
          * If we were SOFTLOCKing and encountered a failure,
          * we must SOFTUNLOCK the range we already did. (Maybe we

@@ -1053,17 +970,17 @@
                          */
                         if (raddr > seg->s_base + seg->s_size)
                                 ssize = seg->s_base + seg->s_size - addrsav;
                         else
                                 ssize = raddr - addrsav;
-                        (void) SEGOP_FAULT(hat, seg, addrsav, ssize,
+                        (void) segop_fault(hat, seg, addrsav, ssize,
                             F_SOFTUNLOCK, S_OTHER);
                 }
         }
         if (as_lock_held)
                 AS_LOCK_EXIT(as, &as->a_lock);
-        if ((lwp != NULL) && (!is_xhat))
+        if (lwp != NULL)
                 lwp->lwp_nostop--;
 
         /*
          * If the lower levels returned EDEADLK for a fault,
          * It means that we should retry the fault.  Let's wait

@@ -1123,11 +1040,11 @@
                         if (seg == NULL || raddr != seg->s_base) {
                                 res = FC_NOMAP;
                                 break;
                         }
                 }
-                res = SEGOP_FAULTA(seg, raddr);
+                res = segop_faulta(seg, raddr);
                 if (res != 0)
                         break;
         }
         AS_LOCK_EXIT(as, &as->a_lock);
         if (lwp != NULL)

@@ -1213,11 +1130,11 @@
                 if ((raddr + rsize) > (seg->s_base + seg->s_size))
                         ssize = seg->s_base + seg->s_size - raddr;
                 else
                         ssize = rsize;
 retry:
-                error = SEGOP_SETPROT(seg, raddr, ssize, prot);
+                error = segop_setprot(seg, raddr, ssize, prot);
 
                 if (error == IE_NOMEM) {
                         error = EAGAIN;
                         break;
                 }

@@ -1364,11 +1281,11 @@
                 if ((raddr + rsize) > (seg->s_base + seg->s_size))
                         ssize = seg->s_base + seg->s_size - raddr;
                 else
                         ssize = rsize;
 
-                error = SEGOP_CHECKPROT(seg, raddr, ssize, prot);
+                error = segop_checkprot(seg, raddr, ssize, prot);
                 if (error != 0)
                         break;
         }
         as_setwatch(as);
         AS_LOCK_EXIT(as, &as->a_lock);

@@ -1430,11 +1347,11 @@
                 if (!SEG_IS_DEVNULL_MAPPING(seg) &&
                     !SEG_IS_PARTIAL_RESV(seg))
                         rsize = ssize;
 
 retry:
-                err = SEGOP_UNMAP(seg, raddr, ssize);
+                err = segop_unmap(seg, raddr, ssize);
                 if (err == EAGAIN) {
                         /*
                          * Memory is currently locked.  It must be unlocked
                          * before this operation can succeed through a retry.
                          * The possible reasons for locked memory and

@@ -1868,11 +1785,11 @@
         next_seg = NULL;
         seg = AS_SEGFIRST(as);
         while (seg != NULL) {
                 next_seg = AS_SEGNEXT(as, seg);
                 if (seg->s_flags & S_PURGE)
-                        SEGOP_UNMAP(seg, seg->s_base, seg->s_size);
+                        segop_unmap(seg, seg->s_base, seg->s_size);
                 seg = next_seg;
         }
         AS_LOCK_EXIT(as, &as->a_lock);
 
         mutex_enter(&as->a_contents);

@@ -2086,11 +2003,11 @@
  * We're lazy and only return one segment at a time.
  */
 int
 as_memory(struct as *as, caddr_t *basep, size_t *lenp)
 {
-        extern struct seg_ops segspt_shmops;    /* needs a header file */
+        extern const struct seg_ops segspt_shmops;      /* needs a header file */
         struct seg *seg;
         caddr_t addr, eaddr;
         caddr_t segend;
 
         AS_LOCK_ENTER(as, &as->a_lock, RW_READER);

@@ -2139,77 +2056,10 @@
         AS_LOCK_EXIT(as, &as->a_lock);
         return (0);
 }
 
 /*
- * Swap the pages associated with the address space as out to
- * secondary storage, returning the number of bytes actually
- * swapped.
- *
- * The value returned is intended to correlate well with the process's
- * memory requirements.  Its usefulness for this purpose depends on
- * how well the segment-level routines do at returning accurate
- * information.
- */
-size_t
-as_swapout(struct as *as)
-{
-        struct seg *seg;
-        size_t swpcnt = 0;
-
-        /*
-         * Kernel-only processes have given up their address
-         * spaces.  Of course, we shouldn't be attempting to
-         * swap out such processes in the first place...
-         */
-        if (as == NULL)
-                return (0);
-
-        AS_LOCK_ENTER(as, &as->a_lock, RW_READER);
-
-        /* Prevent XHATs from attaching */
-        mutex_enter(&as->a_contents);
-        AS_SETBUSY(as);
-        mutex_exit(&as->a_contents);
-
-
-        /*
-         * Free all mapping resources associated with the address
-         * space.  The segment-level swapout routines capitalize
-         * on this unmapping by scavanging pages that have become
-         * unmapped here.
-         */
-        hat_swapout(as->a_hat);
-        if (as->a_xhat != NULL)
-                xhat_swapout_all(as);
-
-        mutex_enter(&as->a_contents);
-        AS_CLRBUSY(as);
-        mutex_exit(&as->a_contents);
-
-        /*
-         * Call the swapout routines of all segments in the address
-         * space to do the actual work, accumulating the amount of
-         * space reclaimed.
-         */
-        for (seg = AS_SEGFIRST(as); seg != NULL; seg = AS_SEGNEXT(as, seg)) {
-                struct seg_ops *ov = seg->s_ops;
-
-                /*
-                 * We have to check to see if the seg has
-                 * an ops vector because the seg may have
-                 * been in the middle of being set up when
-                 * the process was picked for swapout.
-                 */
-                if ((ov != NULL) && (ov->swapout != NULL))
-                        swpcnt += SEGOP_SWAPOUT(seg);
-        }
-        AS_LOCK_EXIT(as, &as->a_lock);
-        return (swpcnt);
-}
-
-/*
  * Determine whether data from the mappings in interval [addr, addr + size)
  * are in the primary memory (core) cache.
  */
 int
 as_incore(struct as *as, caddr_t addr,

@@ -2247,11 +2097,11 @@
                 }
                 if ((raddr + rsize) > (seg->s_base + seg->s_size))
                         ssize = seg->s_base + seg->s_size - raddr;
                 else
                         ssize = rsize;
-                *sizep += isize = SEGOP_INCORE(seg, raddr, ssize, vec);
+                *sizep += isize = segop_incore(seg, raddr, ssize, vec);
                 if (isize != ssize) {
                         error = -1;
                         break;
                 }
                 vec += btopr(ssize);

@@ -2273,11 +2123,11 @@
         while (bt_range(bitmap, &pos1, &pos2, end_pos)) {
                 size = ptob((pos2 - pos1));
                 range_start = (caddr_t)((uintptr_t)addr +
                     ptob(pos1 - position));
 
-                (void) SEGOP_LOCKOP(seg, range_start, size, attr, MC_UNLOCK,
+                (void) segop_lockop(seg, range_start, size, attr, MC_UNLOCK,
                     (ulong_t *)NULL, (size_t)NULL);
                 pos1 = pos2;
         }
 }
 

@@ -2369,11 +2219,11 @@
                                 AS_LOCK_EXIT(as, &as->a_lock);
                                 return (EAGAIN);
                 }
 
                 for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) {
-                        error = SEGOP_LOCKOP(seg, seg->s_base,
+                        error = segop_lockop(seg, seg->s_base,
                             seg->s_size, attr, MC_LOCK, mlock_map, pos);
                         if (error != 0)
                                 break;
                         pos += seg_pages(seg);
                 }

@@ -2398,11 +2248,11 @@
                 mutex_enter(&as->a_contents);
                 AS_CLRPGLCK(as);
                 mutex_exit(&as->a_contents);
 
                 for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) {
-                        error = SEGOP_LOCKOP(seg, seg->s_base,
+                        error = segop_lockop(seg, seg->s_base,
                             seg->s_size, attr, MC_UNLOCK, NULL, 0);
                         if (error != 0)
                                 break;
                 }
 

@@ -2476,22 +2326,22 @@
                 /*
                  * Synchronize cached data from mappings with backing
                  * objects.
                  */
                 case MC_SYNC:
-                        if (error = SEGOP_SYNC(seg, raddr, ssize,
+                        if (error = segop_sync(seg, raddr, ssize,
                             attr, (uint_t)arg)) {
                                 AS_LOCK_EXIT(as, &as->a_lock);
                                 return (error);
                         }
                         break;
 
                 /*
                  * Lock pages in memory.
                  */
                 case MC_LOCK:
-                        if (error = SEGOP_LOCKOP(seg, raddr, ssize,
+                        if (error = segop_lockop(seg, raddr, ssize,
                             attr, func, mlock_map, pos)) {
                                 as_unlockerr(as, attr, mlock_map, initraddr,
                                     initrsize - rsize + ssize);
                                 kmem_free(mlock_map, mlock_size *
                                     sizeof (ulong_t));

@@ -2502,19 +2352,19 @@
 
                 /*
                  * Unlock mapped pages.
                  */
                 case MC_UNLOCK:
-                        (void) SEGOP_LOCKOP(seg, raddr, ssize, attr, func,
+                        (void) segop_lockop(seg, raddr, ssize, attr, func,
                             (ulong_t *)NULL, (size_t)NULL);
                         break;
 
                 /*
                  * Store VM advise for mapped pages in segment layer.
                  */
                 case MC_ADVISE:
-                        error = SEGOP_ADVISE(seg, raddr, ssize, (uint_t)arg);
+                        error = segop_advise(seg, raddr, ssize, (uint_t)arg);
 
                         /*
                          * Check for regular errors and special retry error
                          */
                         if (error) {

@@ -2546,16 +2396,11 @@
                                 }
                         }
                         break;
 
                 case MC_INHERIT_ZERO:
-                        if (seg->s_ops->inherit == NULL) {
-                                error = ENOTSUP;
-                        } else {
-                                error = SEGOP_INHERIT(seg, raddr, ssize,
-                                    SEGP_INH_ZERO);
-                        }
+                        error = segop_inherit(seg, raddr, ssize, SEGP_INH_ZERO);
                         if (error != 0) {
                                 AS_LOCK_EXIT(as, &as->a_lock);
                                 return (error);
                         }
                         break;

@@ -2635,11 +2480,11 @@
         page_t **pl;
         int error;
         caddr_t eaddr;
         faultcode_t fault_err = 0;
         pgcnt_t pl_off;
-        extern struct seg_ops segspt_shmops;
+        extern const struct seg_ops segspt_shmops;
 
         ASSERT(AS_LOCK_HELD(as, &as->a_lock));
         ASSERT(seg != NULL);
         ASSERT(addr >= seg->s_base && addr < seg->s_base + seg->s_size);
         ASSERT(addr + size > seg->s_base + seg->s_size);

@@ -2664,11 +2509,11 @@
                          * will most likely support pagelock.
                          */
                         if (seg->s_ops == &segvn_ops) {
                                 vnode_t *vp;
 
-                                if (SEGOP_GETVP(seg, addr, &vp) != 0 ||
+                                if (segop_getvp(seg, addr, &vp) != 0 ||
                                     vp != NULL) {
                                         AS_LOCK_EXIT(as, &as->a_lock);
                                         goto slow;
                                 }
                         } else if (seg->s_ops != &segspt_shmops) {

@@ -2702,11 +2547,11 @@
                         ssize = seg->s_base + seg->s_size - addr;
                 } else {
                         ssize = size;
                 }
                 pl = &plist[npages + cnt];
-                error = SEGOP_PAGELOCK(seg, addr, ssize, (page_t ***)pl,
+                error = segop_pagelock(seg, addr, ssize, (page_t ***)pl,
                     L_PAGELOCK, rw);
                 if (error) {
                         break;
                 }
                 ASSERT(plist[npages + cnt] != NULL);

@@ -2745,11 +2590,11 @@
                 } else {
                         ssize = eaddr - addr;
                 }
                 pl = &plist[npages + cnt];
                 ASSERT(*pl != NULL);
-                (void) SEGOP_PAGELOCK(seg, addr, ssize, (page_t ***)pl,
+                (void) segop_pagelock(seg, addr, ssize, (page_t ***)pl,
                     L_PAGEUNLOCK, rw);
         }
 
         AS_LOCK_EXIT(as, &as->a_lock);
 

@@ -2820,11 +2665,11 @@
             "seg_lock_1_start: raddr %p rsize %ld", raddr, rsize);
 
         /*
          * try to lock pages and pass back shadow list
          */
-        err = SEGOP_PAGELOCK(seg, raddr, rsize, ppp, L_PAGELOCK, rw);
+        err = segop_pagelock(seg, raddr, rsize, ppp, L_PAGELOCK, rw);
 
         TRACE_0(TR_FAC_PHYSIO, TR_PHYSIO_SEG_LOCK_END, "seg_lock_1_end");
 
         AS_LOCK_EXIT(as, &as->a_lock);
 

@@ -2883,11 +2728,11 @@
                 } else {
                         ssize = eaddr - addr;
                 }
                 pl = &plist[npages + cnt];
                 ASSERT(*pl != NULL);
-                (void) SEGOP_PAGELOCK(seg, addr, ssize, (page_t ***)pl,
+                (void) segop_pagelock(seg, addr, ssize, (page_t ***)pl,
                     L_PAGEUNLOCK, rw);
         }
         ASSERT(cnt > 0);
         AS_LOCK_EXIT(as, &as->a_lock);
 

@@ -2929,11 +2774,11 @@
         TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEG_UNLOCK_START,
             "seg_unlock_start: raddr %p rsize %ld", raddr, rsize);
 
         ASSERT(raddr >= seg->s_base && raddr < seg->s_base + seg->s_size);
         if (raddr + rsize <= seg->s_base + seg->s_size) {
-                SEGOP_PAGELOCK(seg, raddr, rsize, &pp, L_PAGEUNLOCK, rw);
+                segop_pagelock(seg, raddr, rsize, &pp, L_PAGEUNLOCK, rw);
         } else {
                 as_pageunlock_segs(as, seg, raddr, rsize, pp, rw);
                 return;
         }
         AS_LOCK_EXIT(as, &as->a_lock);

@@ -2984,11 +2829,11 @@
                 } else {
                         ssize = rsize;
                 }
 
 retry:
-                error = SEGOP_SETPAGESIZE(seg, raddr, ssize, szc);
+                error = segop_setpagesize(seg, raddr, ssize, szc);
 
                 if (error == IE_NOMEM) {
                         error = EAGAIN;
                         break;
                 }

@@ -3063,11 +2908,11 @@
         AS_LOCK_EXIT(as, &as->a_lock);
         return (error);
 }
 
 /*
- * as_iset3_default_lpsize() just calls SEGOP_SETPAGESIZE() on all segments
+ * as_iset3_default_lpsize() just calls segop_setpagesize() on all segments
  * in its chunk where s_szc is less than the szc we want to set.
  */
 static int
 as_iset3_default_lpsize(struct as *as, caddr_t raddr, size_t rsize, uint_t szc,
     int *retry)

@@ -3095,16 +2940,16 @@
                 } else {
                         ssize = rsize;
                 }
 
                 if (szc > seg->s_szc) {
-                        error = SEGOP_SETPAGESIZE(seg, raddr, ssize, szc);
+                        error = segop_setpagesize(seg, raddr, ssize, szc);
                         /* Only retry on EINVAL segments that have no vnode. */
                         if (error == EINVAL) {
                                 vnode_t *vp = NULL;
-                                if ((SEGOP_GETTYPE(seg, raddr) & MAP_SHARED) &&
-                                    (SEGOP_GETVP(seg, raddr, &vp) != 0 ||
+                                if ((segop_gettype(seg, raddr) & MAP_SHARED) &&
+                                    (segop_getvp(seg, raddr, &vp) != 0 ||
                                     vp == NULL)) {
                                         *retry = 1;
                                 } else {
                                         *retry = 0;
                                 }

@@ -3343,11 +3188,11 @@
                 as_setwatch(as);
                 AS_LOCK_EXIT(as, &as->a_lock);
                 return (ENOMEM);
         }
         if (seg->s_ops == &segvn_ops) {
-                rtype = SEGOP_GETTYPE(seg, addr);
+                rtype = segop_gettype(seg, addr);
                 rflags = rtype & (MAP_TEXT | MAP_INITDATA);
                 rtype = rtype & (MAP_SHARED | MAP_PRIVATE);
                 segvn = 1;
         } else {
                 segvn = 0;

@@ -3361,11 +3206,11 @@
                         if (seg == NULL || raddr != seg->s_base) {
                                 error = ENOMEM;
                                 break;
                         }
                         if (seg->s_ops == &segvn_ops) {
-                                stype = SEGOP_GETTYPE(seg, raddr);
+                                stype = segop_gettype(seg, raddr);
                                 sflags = stype & (MAP_TEXT | MAP_INITDATA);
                                 stype &= (MAP_SHARED | MAP_PRIVATE);
                                 if (segvn && (rflags != sflags ||
                                     rtype != stype)) {
                                         /*

@@ -3475,22 +3320,22 @@
                 retrycnt = 0;
         retry:
                 vaddr = pwp->wp_vaddr;
                 if (pwp->wp_oprot != 0 ||       /* already set up */
                     (seg = as_segat(as, vaddr)) == NULL ||
-                    SEGOP_GETPROT(seg, vaddr, 0, &prot) != 0)
+                    segop_getprot(seg, vaddr, 0, &prot) != 0)
                         continue;
 
                 pwp->wp_oprot = prot;
                 if (pwp->wp_read)
                         prot &= ~(PROT_READ|PROT_WRITE|PROT_EXEC);
                 if (pwp->wp_write)
                         prot &= ~PROT_WRITE;
                 if (pwp->wp_exec)
                         prot &= ~(PROT_READ|PROT_WRITE|PROT_EXEC);
                 if (!(pwp->wp_flags & WP_NOWATCH) && prot != pwp->wp_oprot) {
-                        err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, prot);
+                        err = segop_setprot(seg, vaddr, PAGESIZE, prot);
                         if (err == IE_RETRY) {
                                 pwp->wp_oprot = 0;
                                 ASSERT(retrycnt == 0);
                                 retrycnt++;
                                 goto retry;

@@ -3525,11 +3370,11 @@
                 if (pwp->wp_oprot == 0 ||       /* not set up */
                     (seg = as_segat(as, vaddr)) == NULL)
                         continue;
 
                 if ((prot = pwp->wp_oprot) != pwp->wp_prot) {
-                        err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, prot);
+                        err = segop_setprot(seg, vaddr, PAGESIZE, prot);
                         if (err == IE_RETRY) {
                                 ASSERT(retrycnt == 0);
                                 retrycnt++;
                                 goto retry;
                         }

@@ -3579,11 +3424,11 @@
                         seg = as_segat(as, vaddr);
                         if (seg == NULL) {
                                 panic("as_setwatchprot: no seg");
                                 /*NOTREACHED*/
                         }
-                        err = SEGOP_SETPROT(seg, vaddr, PAGESIZE, wprot);
+                        err = segop_setprot(seg, vaddr, PAGESIZE, wprot);
                         if (err == IE_RETRY) {
                                 ASSERT(retrycnt == 0);
                                 retrycnt++;
                                 goto retry;
                         }

@@ -3626,11 +3471,11 @@
                         if (prot != pwp->wp_prot) {
                         retry:
                                 seg = as_segat(as, pwp->wp_vaddr);
                                 if (seg == NULL)
                                         continue;
-                                err = SEGOP_SETPROT(seg, pwp->wp_vaddr,
+                                err = segop_setprot(seg, pwp->wp_vaddr,
                                     PAGESIZE, prot);
                                 if (err == IE_RETRY) {
                                         ASSERT(retrycnt == 0);
                                         retrycnt++;
                                         goto retry;

@@ -3675,18 +3520,11 @@
         seg = as_segat(as, addr);
         if (seg == NULL) {
                 AS_LOCK_EXIT(as, &as->a_lock);
                 return (EFAULT);
         }
-        /*
-         * catch old drivers which may not support getmemid
-         */
-        if (seg->s_ops->getmemid == NULL) {
-                AS_LOCK_EXIT(as, &as->a_lock);
-                return (ENODEV);
-        }
 
-        sts = SEGOP_GETMEMID(seg, addr, memidp);
+        sts = segop_getmemid(seg, addr, memidp);
 
         AS_LOCK_EXIT(as, &as->a_lock);
         return (sts);
 }