Print this page
no need for bad-op segment op functions
The segment drivers have a number of bad-op functions that simply panic.
Keeping the function pointer NULL will accomplish the same thing in most
cases.  In other cases, keeping the function pointer NULL will result in
proper error code being returned.


  58 #include <sys/dumphdr.h>
  59 #include <sys/debug.h>
  60 #include <sys/vtrace.h>
  61 #include <sys/stack.h>
  62 #include <sys/atomic.h>
  63 #include <sys/archsystm.h>
  64 #include <sys/lgrp.h>
  65 
  66 #include <vm/as.h>
  67 #include <vm/seg.h>
  68 #include <vm/seg_kp.h>
  69 #include <vm/seg_kmem.h>
  70 #include <vm/anon.h>
  71 #include <vm/page.h>
  72 #include <vm/hat.h>
  73 #include <sys/bitmap.h>
  74 
  75 /*
  76  * Private seg op routines
  77  */
  78 static void     segkp_badop(void);
  79 static void     segkp_dump(struct seg *seg);
  80 static int      segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
  81                         uint_t prot);
  82 static int      segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
  83 static int      segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
  84                         struct page ***page, enum lock_type type,
  85                         enum seg_rw rw);
  86 static void     segkp_insert(struct seg *seg, struct segkp_data *kpd);
  87 static void     segkp_delete(struct seg *seg, struct segkp_data *kpd);
  88 static caddr_t  segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
  89                         struct segkp_data **tkpd, struct anon_map *amp);
  90 static void     segkp_release_internal(struct seg *seg,
  91                         struct segkp_data *kpd, size_t len);
  92 static int      segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
  93                         size_t len, struct segkp_data *kpd, uint_t flags);
  94 static int      segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
  95                         size_t len, struct segkp_data *kpd, uint_t flags);
  96 static struct   segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
  97 static int      segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
  98 static lgrp_mem_policy_info_t   *segkp_getpolicy(struct seg *seg,
  99     caddr_t addr);
 100 static int      segkp_capable(struct seg *seg, segcapability_t capability);
 101 
 102 /*
 103  * Lock used to protect the hash table(s) and caches.
 104  */
 105 static kmutex_t segkp_lock;
 106 
 107 /*
 108  * The segkp caches
 109  */
 110 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
 111 
 112 #define SEGKP_BADOP(t)  (t(*)())segkp_badop
 113 
 114 /*
 115  * When there are fewer than red_minavail bytes left on the stack,
 116  * segkp_map_red() will map in the redzone (if called).  5000 seems
 117  * to work reasonably well...
 118  */
 119 long            red_minavail = 5000;
 120 
 121 /*
 122  * will be set to 1 for 32 bit x86 systems only, in startup.c
 123  */
 124 int     segkp_fromheap = 0;
 125 ulong_t *segkp_bitmap;
 126 
 127 /*
 128  * If segkp_map_red() is called with the redzone already mapped and
 129  * with less than RED_DEEP_THRESHOLD bytes available on the stack,
 130  * then the stack situation has become quite serious;  if much more stack
 131  * is consumed, we have the potential of scrogging the next thread/LWP
 132  * structure.  To help debug the "can't happen" panics which may
 133  * result from this condition, we record hrestime and the calling thread
 134  * in red_deep_hires and red_deep_thread respectively.
 135  */
 136 #define RED_DEEP_THRESHOLD      2000
 137 
 138 hrtime_t        red_deep_hires;
 139 kthread_t       *red_deep_thread;
 140 
 141 uint32_t        red_nmapped;
 142 uint32_t        red_closest = UINT_MAX;
 143 uint32_t        red_ndoubles;
 144 
 145 pgcnt_t anon_segkp_pages_locked;        /* See vm/anon.h */
 146 pgcnt_t anon_segkp_pages_resv;          /* anon reserved by seg_kp */
 147 
 148 static struct   seg_ops segkp_ops = {
 149         .dup            = SEGKP_BADOP(int),
 150         .unmap          = SEGKP_BADOP(int),
 151         .free           = SEGKP_BADOP(void),
 152         .fault          = segkp_fault,
 153         .faulta         = SEGKP_BADOP(faultcode_t),
 154         .setprot        = SEGKP_BADOP(int),
 155         .checkprot      = segkp_checkprot,
 156         .kluster        = segkp_kluster,
 157         .sync           = SEGKP_BADOP(int),
 158         .incore         = SEGKP_BADOP(size_t),
 159         .lockop         = SEGKP_BADOP(int),
 160         .getprot        = SEGKP_BADOP(int),
 161         .getoffset      = SEGKP_BADOP(u_offset_t),
 162         .gettype        = SEGKP_BADOP(int),
 163         .getvp          = SEGKP_BADOP(int),
 164         .advise         = SEGKP_BADOP(int),
 165         .dump           = segkp_dump,
 166         .pagelock       = segkp_pagelock,
 167         .setpagesize    = SEGKP_BADOP(int),
 168         .getmemid       = segkp_getmemid,
 169         .getpolicy      = segkp_getpolicy,
 170         .capable        = segkp_capable,
 171         .inherit        = seg_inherit_notsup,
 172 };
 173 
 174 
 175 static void
 176 segkp_badop(void)
 177 {
 178         panic("segkp_badop");
 179         /*NOTREACHED*/
 180 }
 181 
 182 static void segkpinit_mem_config(struct seg *);
 183 
 184 static uint32_t segkp_indel;
 185 
 186 /*
 187  * Allocate the segment specific private data struct and fill it in
 188  * with the per kp segment mutex, anon ptr. array and hash table.
 189  */
 190 int
 191 segkp_create(struct seg *seg)
 192 {
 193         struct segkp_segdata *kpsd;
 194         size_t  np;
 195 
 196         ASSERT(seg != NULL && seg->s_as == &kas);
 197         ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));
 198 
 199         if (seg->s_size & PAGEOFFSET) {
 200                 panic("Bad segkp size");




  58 #include <sys/dumphdr.h>
  59 #include <sys/debug.h>
  60 #include <sys/vtrace.h>
  61 #include <sys/stack.h>
  62 #include <sys/atomic.h>
  63 #include <sys/archsystm.h>
  64 #include <sys/lgrp.h>
  65 
  66 #include <vm/as.h>
  67 #include <vm/seg.h>
  68 #include <vm/seg_kp.h>
  69 #include <vm/seg_kmem.h>
  70 #include <vm/anon.h>
  71 #include <vm/page.h>
  72 #include <vm/hat.h>
  73 #include <sys/bitmap.h>
  74 
  75 /*
  76  * Private seg op routines
  77  */

  78 static void     segkp_dump(struct seg *seg);
  79 static int      segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
  80                         uint_t prot);
  81 static int      segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
  82 static int      segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
  83                         struct page ***page, enum lock_type type,
  84                         enum seg_rw rw);
  85 static void     segkp_insert(struct seg *seg, struct segkp_data *kpd);
  86 static void     segkp_delete(struct seg *seg, struct segkp_data *kpd);
  87 static caddr_t  segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
  88                         struct segkp_data **tkpd, struct anon_map *amp);
  89 static void     segkp_release_internal(struct seg *seg,
  90                         struct segkp_data *kpd, size_t len);
  91 static int      segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
  92                         size_t len, struct segkp_data *kpd, uint_t flags);
  93 static int      segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
  94                         size_t len, struct segkp_data *kpd, uint_t flags);
  95 static struct   segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
  96 static int      segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
  97 static lgrp_mem_policy_info_t   *segkp_getpolicy(struct seg *seg,
  98     caddr_t addr);
  99 static int      segkp_capable(struct seg *seg, segcapability_t capability);
 100 
 101 /*
 102  * Lock used to protect the hash table(s) and caches.
 103  */
 104 static kmutex_t segkp_lock;
 105 
 106 /*
 107  * The segkp caches
 108  */
 109 static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
 110 


 111 /*
 112  * When there are fewer than red_minavail bytes left on the stack,
 113  * segkp_map_red() will map in the redzone (if called).  5000 seems
 114  * to work reasonably well...
 115  */
 116 long            red_minavail = 5000;
 117 
 118 /*
 119  * will be set to 1 for 32 bit x86 systems only, in startup.c
 120  */
 121 int     segkp_fromheap = 0;
 122 ulong_t *segkp_bitmap;
 123 
 124 /*
 125  * If segkp_map_red() is called with the redzone already mapped and
 126  * with less than RED_DEEP_THRESHOLD bytes available on the stack,
 127  * then the stack situation has become quite serious;  if much more stack
 128  * is consumed, we have the potential of scrogging the next thread/LWP
 129  * structure.  To help debug the "can't happen" panics which may
 130  * result from this condition, we record hrestime and the calling thread
 131  * in red_deep_hires and red_deep_thread respectively.
 132  */
 133 #define RED_DEEP_THRESHOLD      2000
 134 
 135 hrtime_t        red_deep_hires;
 136 kthread_t       *red_deep_thread;
 137 
 138 uint32_t        red_nmapped;
 139 uint32_t        red_closest = UINT_MAX;
 140 uint32_t        red_ndoubles;
 141 
 142 pgcnt_t anon_segkp_pages_locked;        /* See vm/anon.h */
 143 pgcnt_t anon_segkp_pages_resv;          /* anon reserved by seg_kp */
 144 
 145 static struct   seg_ops segkp_ops = {



 146         .fault          = segkp_fault,


 147         .checkprot      = segkp_checkprot,
 148         .kluster        = segkp_kluster,








 149         .dump           = segkp_dump,
 150         .pagelock       = segkp_pagelock,

 151         .getmemid       = segkp_getmemid,
 152         .getpolicy      = segkp_getpolicy,
 153         .capable        = segkp_capable,
 154         .inherit        = seg_inherit_notsup,
 155 };
 156 







 157 
 158 static void segkpinit_mem_config(struct seg *);
 159 
 160 static uint32_t segkp_indel;
 161 
 162 /*
 163  * Allocate the segment specific private data struct and fill it in
 164  * with the per kp segment mutex, anon ptr. array and hash table.
 165  */
 166 int
 167 segkp_create(struct seg *seg)
 168 {
 169         struct segkp_segdata *kpsd;
 170         size_t  np;
 171 
 172         ASSERT(seg != NULL && seg->s_as == &kas);
 173         ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));
 174 
 175         if (seg->s_size & PAGEOFFSET) {
 176                 panic("Bad segkp size");