Print this page
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.
use NULL getmemid segop as a shorthand for ENODEV
Instead of forcing every segment driver to implement a dummy function to
return (hopefully) ENODEV, handle NULL getmemid segop function pointer as
"return ENODEV" shorthand.
use NULL capable segop as a shorthand for no-capabilities
Instead of forcing every segment driver to implement a dummy "return 0"
function, handle NULL capable segop function pointer as "no copabilities
supported" shorthand.
segop_getpolicy already checks for a NULL op
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
no need for bad-op segment op functions
The segment drivers have a number of bad-op functions that simply panic.
Keeping the function pointer NULL will accomplish the same thing in most
cases.  In other cases, keeping the function pointer NULL will result in
proper error code being returned.
use C99 initializers in segment ops structures
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory.  The code is there and in theory it runs when we get *extremely* low
on memory.  In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout

@@ -73,11 +73,10 @@
 #include <sys/bitmap.h>
 
 /*
  * Private seg op routines
  */
-static void     segkp_badop(void);
 static void     segkp_dump(struct seg *seg);
 static int      segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
                         uint_t prot);
 static int      segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
 static int      segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,

@@ -92,14 +91,10 @@
 static int      segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
                         size_t len, struct segkp_data *kpd, uint_t flags);
 static int      segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
                         size_t len, struct segkp_data *kpd, uint_t flags);
 static struct   segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
-static int      segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
-static lgrp_mem_policy_info_t   *segkp_getpolicy(struct seg *seg,
-    caddr_t addr);
-static int      segkp_capable(struct seg *seg, segcapability_t capability);
 
 /*
  * Lock used to protect the hash table(s) and caches.
  */
 static kmutex_t segkp_lock;

@@ -107,12 +102,10 @@
 /*
  * The segkp caches
  */
 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
 
-#define SEGKP_BADOP(t)  (t(*)())segkp_badop
-
 /*
  * When there are fewer than red_minavail bytes left on the stack,
  * segkp_map_red() will map in the redzone (if called).  5000 seems
  * to work reasonably well...
  */

@@ -143,45 +136,19 @@
 uint32_t        red_ndoubles;
 
 pgcnt_t anon_segkp_pages_locked;        /* See vm/anon.h */
 pgcnt_t anon_segkp_pages_resv;          /* anon reserved by seg_kp */
 
-static struct   seg_ops segkp_ops = {
-        SEGKP_BADOP(int),               /* dup */
-        SEGKP_BADOP(int),               /* unmap */
-        SEGKP_BADOP(void),              /* free */
-        segkp_fault,
-        SEGKP_BADOP(faultcode_t),       /* faulta */
-        SEGKP_BADOP(int),               /* setprot */
-        segkp_checkprot,
-        segkp_kluster,
-        SEGKP_BADOP(size_t),            /* swapout */
-        SEGKP_BADOP(int),               /* sync */
-        SEGKP_BADOP(size_t),            /* incore */
-        SEGKP_BADOP(int),               /* lockop */
-        SEGKP_BADOP(int),               /* getprot */
-        SEGKP_BADOP(u_offset_t),                /* getoffset */
-        SEGKP_BADOP(int),               /* gettype */
-        SEGKP_BADOP(int),               /* getvp */
-        SEGKP_BADOP(int),               /* advise */
-        segkp_dump,                     /* dump */
-        segkp_pagelock,                 /* pagelock */
-        SEGKP_BADOP(int),               /* setpgsz */
-        segkp_getmemid,                 /* getmemid */
-        segkp_getpolicy,                /* getpolicy */
-        segkp_capable,                  /* capable */
-        seg_inherit_notsup              /* inherit */
+static const struct seg_ops segkp_ops = {
+        .fault          = segkp_fault,
+        .checkprot      = segkp_checkprot,
+        .kluster        = segkp_kluster,
+        .dump           = segkp_dump,
+        .pagelock       = segkp_pagelock,
 };
 
 
-static void
-segkp_badop(void)
-{
-        panic("segkp_badop");
-        /*NOTREACHED*/
-}
-
 static void segkpinit_mem_config(struct seg *);
 
 static uint32_t segkp_indel;
 
 /*

@@ -758,19 +725,15 @@
 /*
  * segkp_map_red() will check the current frame pointer against the
  * stack base.  If the amount of stack remaining is questionable
  * (less than red_minavail), then segkp_map_red() will map in the redzone
  * and return 1.  Otherwise, it will return 0.  segkp_map_red() can
- * _only_ be called when:
- *
- *   - it is safe to sleep on page_create_va().
- *   - the caller is non-swappable.
+ * _only_ be called when it is safe to sleep on page_create_va().
  *
  * It is up to the caller to remember whether segkp_map_red() successfully
  * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
- * time.  Note that the caller must _remain_ non-swappable until after
- * calling segkp_unmap_red().
+ * time.
  *
  * Currently, this routine is only called from pagefault() (which necessarily
  * satisfies the above conditions).
  */
 #if defined(STACK_GROWTH_DOWN)

@@ -780,12 +743,10 @@
         uintptr_t fp = STACK_BIAS + (uintptr_t)getfp();
 #ifndef _LP64
         caddr_t stkbase;
 #endif
 
-        ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
-
         /*
          * Optimize for the common case where we simply return.
          */
         if ((curthread->t_red_pp == NULL) &&
             (fp - (uintptr_t)curthread->t_stkbase >= red_minavail))

@@ -882,11 +843,10 @@
         page_t *pp;
         caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase &
             (uintptr_t)PAGEMASK) - PAGESIZE);
 
         ASSERT(curthread->t_red_pp != NULL);
-        ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
 
         /*
          * Because we locked the mapping down, we can't simply rely
          * on page_destroy() to clean everything up;  we need to call
          * hat_unload() to explicitly unlock the mapping resources.

@@ -1397,31 +1357,10 @@
     struct page ***ppp, enum lock_type type, enum seg_rw rw)
 {
         return (ENOTSUP);
 }
 
-/*ARGSUSED*/
-static int
-segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
-{
-        return (ENODEV);
-}
-
-/*ARGSUSED*/
-static lgrp_mem_policy_info_t   *
-segkp_getpolicy(struct seg *seg, caddr_t addr)
-{
-        return (NULL);
-}
-
-/*ARGSUSED*/
-static int
-segkp_capable(struct seg *seg, segcapability_t capability)
-{
-        return (0);
-}
-
 #include <sys/mem_config.h>
 
 /*ARGSUSED*/
 static void
 segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)