Print this page
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.
use NULL capable segop as a shorthand for no-capabilities
Instead of forcing every segment driver to implement a dummy "return 0"
function, handle NULL capable segop function pointer as "no copabilities
supported" shorthand.
segop_getpolicy already checks for a NULL op
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
no need for bad-op segment op functions
The segment drivers have a number of bad-op functions that simply panic.
Keeping the function pointer NULL will accomplish the same thing in most
cases.  In other cases, keeping the function pointer NULL will result in
proper error code being returned.
use C99 initializers in segment ops structures
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory.  The code is there and in theory it runs when we get *extremely* low
on memory.  In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout


  70 
  71 /*
  72  * Private seg op routines.
  73  */
  74 static void     segmap_free(struct seg *seg);
  75 faultcode_t segmap_fault(struct hat *hat, struct seg *seg, caddr_t addr,
  76                         size_t len, enum fault_type type, enum seg_rw rw);
  77 static faultcode_t segmap_faulta(struct seg *seg, caddr_t addr);
  78 static int      segmap_checkprot(struct seg *seg, caddr_t addr, size_t len,
  79                         uint_t prot);
  80 static int      segmap_kluster(struct seg *seg, caddr_t addr, ssize_t);
  81 static int      segmap_getprot(struct seg *seg, caddr_t addr, size_t len,
  82                         uint_t *protv);
  83 static u_offset_t       segmap_getoffset(struct seg *seg, caddr_t addr);
  84 static int      segmap_gettype(struct seg *seg, caddr_t addr);
  85 static int      segmap_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
  86 static void     segmap_dump(struct seg *seg);
  87 static int      segmap_pagelock(struct seg *seg, caddr_t addr, size_t len,
  88                         struct page ***ppp, enum lock_type type,
  89                         enum seg_rw rw);
  90 static void     segmap_badop(void);
  91 static int      segmap_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
  92 static lgrp_mem_policy_info_t   *segmap_getpolicy(struct seg *seg,
  93     caddr_t addr);
  94 static int      segmap_capable(struct seg *seg, segcapability_t capability);
  95 
  96 /* segkpm support */
  97 static caddr_t  segmap_pagecreate_kpm(struct seg *, vnode_t *, u_offset_t,
  98                         struct smap *, enum seg_rw);
  99 struct smap     *get_smap_kpm(caddr_t, page_t **);
 100 
 101 #define SEGMAP_BADOP(t) (t(*)())segmap_badop
 102 
 103 static struct seg_ops segmap_ops = {
 104         SEGMAP_BADOP(int),      /* dup */
 105         SEGMAP_BADOP(int),      /* unmap */
 106         segmap_free,
 107         segmap_fault,
 108         segmap_faulta,
 109         SEGMAP_BADOP(int),      /* setprot */
 110         segmap_checkprot,
 111         segmap_kluster,
 112         SEGMAP_BADOP(size_t),   /* swapout */
 113         SEGMAP_BADOP(int),      /* sync */
 114         SEGMAP_BADOP(size_t),   /* incore */
 115         SEGMAP_BADOP(int),      /* lockop */
 116         segmap_getprot,
 117         segmap_getoffset,
 118         segmap_gettype,
 119         segmap_getvp,
 120         SEGMAP_BADOP(int),      /* advise */
 121         segmap_dump,
 122         segmap_pagelock,        /* pagelock */
 123         SEGMAP_BADOP(int),      /* setpgsz */
 124         segmap_getmemid,        /* getmemid */
 125         segmap_getpolicy,       /* getpolicy */
 126         segmap_capable,         /* capable */
 127         seg_inherit_notsup      /* inherit */
 128 };
 129 
 130 /*
 131  * Private segmap routines.
 132  */
 133 static void     segmap_unlock(struct hat *hat, struct seg *seg, caddr_t addr,
 134                         size_t len, enum seg_rw rw, struct smap *smp);
 135 static void     segmap_smapadd(struct smap *smp);
 136 static struct smap *segmap_hashin(struct smap *smp, struct vnode *vp,
 137                         u_offset_t off, int hashid);
 138 static void     segmap_hashout(struct smap *smp);
 139 
 140 
 141 /*
 142  * Statistics for segmap operations.
 143  *
 144  * No explicit locking to protect these stats.
 145  */
 146 struct segmapcnt segmapcnt = {
 147         { "fault",              KSTAT_DATA_ULONG },


 892 
 893         /* XXX - This doesn't make any sense */
 894         *vpp = smd->smd_sm->sm_vp;
 895         return (0);
 896 }
 897 
 898 /*
 899  * Check to see if it makes sense to do kluster/read ahead to
 900  * addr + delta relative to the mapping at addr.  We assume here
 901  * that delta is a signed PAGESIZE'd multiple (which can be negative).
 902  *
 903  * For segmap we always "approve" of this action from our standpoint.
 904  */
 905 /*ARGSUSED*/
 906 static int
 907 segmap_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
 908 {
 909         return (0);
 910 }
 911 
 912 static void
 913 segmap_badop()
 914 {
 915         panic("segmap_badop");
 916         /*NOTREACHED*/
 917 }
 918 
 919 /*
 920  * Special private segmap operations
 921  */
 922 
 923 /*
 924  * Add smap to the appropriate free list.
 925  */
 926 static void
 927 segmap_smapadd(struct smap *smp)
 928 {
 929         struct smfree *sm;
 930         struct smap *smpfreelist;
 931         struct sm_freeq *releq;
 932 
 933         ASSERT(MUTEX_HELD(SMAPMTX(smp)));
 934 
 935         if (smp->sm_refcnt != 0) {
 936                 panic("segmap_smapadd");
 937                 /*NOTREACHED*/
 938         }


2171                 }
2172                 addr += MAXBSIZE;
2173         }
2174 }
2175 
2176 /*ARGSUSED*/
2177 static int
2178 segmap_pagelock(struct seg *seg, caddr_t addr, size_t len,
2179     struct page ***ppp, enum lock_type type, enum seg_rw rw)
2180 {
2181         return (ENOTSUP);
2182 }
2183 
2184 static int
2185 segmap_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
2186 {
2187         struct segmap_data *smd = (struct segmap_data *)seg->s_data;
2188 
2189         memidp->val[0] = (uintptr_t)smd->smd_sm->sm_vp;
2190         memidp->val[1] = smd->smd_sm->sm_off + (uintptr_t)(addr - seg->s_base);
2191         return (0);
2192 }
2193 
2194 /*ARGSUSED*/
2195 static lgrp_mem_policy_info_t *
2196 segmap_getpolicy(struct seg *seg, caddr_t addr)
2197 {
2198         return (NULL);
2199 }
2200 
2201 /*ARGSUSED*/
2202 static int
2203 segmap_capable(struct seg *seg, segcapability_t capability)
2204 {
2205         return (0);
2206 }
2207 
2208 
2209 #ifdef  SEGKPM_SUPPORT
2210 
2211 /*
2212  * segkpm support routines
2213  */
2214 
2215 static caddr_t
2216 segmap_pagecreate_kpm(struct seg *seg, vnode_t *vp, u_offset_t off,
2217         struct smap *smp, enum seg_rw rw)
2218 {
2219         caddr_t base;
2220         page_t  *pp;
2221         int     newpage = 0;
2222         struct kpme     *kpme;
2223 
2224         ASSERT(smp->sm_refcnt > 0);




  70 
  71 /*
  72  * Private seg op routines.
  73  */
  74 static void     segmap_free(struct seg *seg);
  75 faultcode_t segmap_fault(struct hat *hat, struct seg *seg, caddr_t addr,
  76                         size_t len, enum fault_type type, enum seg_rw rw);
  77 static faultcode_t segmap_faulta(struct seg *seg, caddr_t addr);
  78 static int      segmap_checkprot(struct seg *seg, caddr_t addr, size_t len,
  79                         uint_t prot);
  80 static int      segmap_kluster(struct seg *seg, caddr_t addr, ssize_t);
  81 static int      segmap_getprot(struct seg *seg, caddr_t addr, size_t len,
  82                         uint_t *protv);
  83 static u_offset_t       segmap_getoffset(struct seg *seg, caddr_t addr);
  84 static int      segmap_gettype(struct seg *seg, caddr_t addr);
  85 static int      segmap_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
  86 static void     segmap_dump(struct seg *seg);
  87 static int      segmap_pagelock(struct seg *seg, caddr_t addr, size_t len,
  88                         struct page ***ppp, enum lock_type type,
  89                         enum seg_rw rw);

  90 static int      segmap_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);



  91 
  92 /* segkpm support */
  93 static caddr_t  segmap_pagecreate_kpm(struct seg *, vnode_t *, u_offset_t,
  94                         struct smap *, enum seg_rw);
  95 struct smap     *get_smap_kpm(caddr_t, page_t **);
  96 
  97 static const struct seg_ops segmap_ops = {
  98         .free           = segmap_free,
  99         .fault          = segmap_fault,
 100         .faulta         = segmap_faulta,
 101         .checkprot      = segmap_checkprot,
 102         .kluster        = segmap_kluster,
 103         .getprot        = segmap_getprot,
 104         .getoffset      = segmap_getoffset,
 105         .gettype        = segmap_gettype,
 106         .getvp          = segmap_getvp,
 107         .dump           = segmap_dump,
 108         .pagelock       = segmap_pagelock,
 109         .getmemid       = segmap_getmemid,














 110 };
 111 
 112 /*
 113  * Private segmap routines.
 114  */
 115 static void     segmap_unlock(struct hat *hat, struct seg *seg, caddr_t addr,
 116                         size_t len, enum seg_rw rw, struct smap *smp);
 117 static void     segmap_smapadd(struct smap *smp);
 118 static struct smap *segmap_hashin(struct smap *smp, struct vnode *vp,
 119                         u_offset_t off, int hashid);
 120 static void     segmap_hashout(struct smap *smp);
 121 
 122 
 123 /*
 124  * Statistics for segmap operations.
 125  *
 126  * No explicit locking to protect these stats.
 127  */
 128 struct segmapcnt segmapcnt = {
 129         { "fault",              KSTAT_DATA_ULONG },


 874 
 875         /* XXX - This doesn't make any sense */
 876         *vpp = smd->smd_sm->sm_vp;
 877         return (0);
 878 }
 879 
 880 /*
 881  * Check to see if it makes sense to do kluster/read ahead to
 882  * addr + delta relative to the mapping at addr.  We assume here
 883  * that delta is a signed PAGESIZE'd multiple (which can be negative).
 884  *
 885  * For segmap we always "approve" of this action from our standpoint.
 886  */
 887 /*ARGSUSED*/
 888 static int
 889 segmap_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
 890 {
 891         return (0);
 892 }
 893 







 894 /*
 895  * Special private segmap operations
 896  */
 897 
 898 /*
 899  * Add smap to the appropriate free list.
 900  */
 901 static void
 902 segmap_smapadd(struct smap *smp)
 903 {
 904         struct smfree *sm;
 905         struct smap *smpfreelist;
 906         struct sm_freeq *releq;
 907 
 908         ASSERT(MUTEX_HELD(SMAPMTX(smp)));
 909 
 910         if (smp->sm_refcnt != 0) {
 911                 panic("segmap_smapadd");
 912                 /*NOTREACHED*/
 913         }


2146                 }
2147                 addr += MAXBSIZE;
2148         }
2149 }
2150 
2151 /*ARGSUSED*/
2152 static int
2153 segmap_pagelock(struct seg *seg, caddr_t addr, size_t len,
2154     struct page ***ppp, enum lock_type type, enum seg_rw rw)
2155 {
2156         return (ENOTSUP);
2157 }
2158 
2159 static int
2160 segmap_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
2161 {
2162         struct segmap_data *smd = (struct segmap_data *)seg->s_data;
2163 
2164         memidp->val[0] = (uintptr_t)smd->smd_sm->sm_vp;
2165         memidp->val[1] = smd->smd_sm->sm_off + (uintptr_t)(addr - seg->s_base);














2166         return (0);
2167 }
2168 
2169 
2170 #ifdef  SEGKPM_SUPPORT
2171 
2172 /*
2173  * segkpm support routines
2174  */
2175 
2176 static caddr_t
2177 segmap_pagecreate_kpm(struct seg *seg, vnode_t *vp, u_offset_t off,
2178         struct smap *smp, enum seg_rw rw)
2179 {
2180         caddr_t base;
2181         page_t  *pp;
2182         int     newpage = 0;
2183         struct kpme     *kpme;
2184 
2185         ASSERT(smp->sm_refcnt > 0);