Print this page
6154 const-ify segment ops structures


 124  * If segkp_map_red() is called with the redzone already mapped and
 125  * with less than RED_DEEP_THRESHOLD bytes available on the stack,
 126  * then the stack situation has become quite serious;  if much more stack
 127  * is consumed, we have the potential of scrogging the next thread/LWP
 128  * structure.  To help debug the "can't happen" panics which may
 129  * result from this condition, we record hrestime and the calling thread
 130  * in red_deep_hires and red_deep_thread respectively.
 131  */
 132 #define RED_DEEP_THRESHOLD      2000
 133 
 134 hrtime_t        red_deep_hires;
 135 kthread_t       *red_deep_thread;
 136 
 137 uint32_t        red_nmapped;
 138 uint32_t        red_closest = UINT_MAX;
 139 uint32_t        red_ndoubles;
 140 
 141 pgcnt_t anon_segkp_pages_locked;        /* See vm/anon.h */
 142 pgcnt_t anon_segkp_pages_resv;          /* anon reserved by seg_kp */
 143 
 144 static struct   seg_ops segkp_ops = {
 145         .dup            = SEGKP_BADOP(int),
 146         .unmap          = SEGKP_BADOP(int),
 147         .free           = SEGKP_BADOP(void),
 148         .fault          = segkp_fault,
 149         .faulta         = SEGKP_BADOP(faultcode_t),
 150         .setprot        = SEGKP_BADOP(int),
 151         .checkprot      = segkp_checkprot,
 152         .kluster        = segkp_kluster,
 153         .swapout        = SEGKP_BADOP(size_t),
 154         .sync           = SEGKP_BADOP(int),
 155         .incore         = SEGKP_BADOP(size_t),
 156         .lockop         = SEGKP_BADOP(int),
 157         .getprot        = SEGKP_BADOP(int),
 158         .getoffset      = SEGKP_BADOP(u_offset_t),
 159         .gettype        = SEGKP_BADOP(int),
 160         .getvp          = SEGKP_BADOP(int),
 161         .advise         = SEGKP_BADOP(int),
 162         .dump           = segkp_dump,
 163         .pagelock       = segkp_pagelock,
 164         .setpagesize    = SEGKP_BADOP(int),




 124  * If segkp_map_red() is called with the redzone already mapped and
 125  * with less than RED_DEEP_THRESHOLD bytes available on the stack,
 126  * then the stack situation has become quite serious;  if much more stack
 127  * is consumed, we have the potential of scrogging the next thread/LWP
 128  * structure.  To help debug the "can't happen" panics which may
 129  * result from this condition, we record hrestime and the calling thread
 130  * in red_deep_hires and red_deep_thread respectively.
 131  */
 132 #define RED_DEEP_THRESHOLD      2000
 133 
 134 hrtime_t        red_deep_hires;
 135 kthread_t       *red_deep_thread;
 136 
 137 uint32_t        red_nmapped;
 138 uint32_t        red_closest = UINT_MAX;
 139 uint32_t        red_ndoubles;
 140 
 141 pgcnt_t anon_segkp_pages_locked;        /* See vm/anon.h */
 142 pgcnt_t anon_segkp_pages_resv;          /* anon reserved by seg_kp */
 143 
 144 static const struct seg_ops segkp_ops = {
 145         .dup            = SEGKP_BADOP(int),
 146         .unmap          = SEGKP_BADOP(int),
 147         .free           = SEGKP_BADOP(void),
 148         .fault          = segkp_fault,
 149         .faulta         = SEGKP_BADOP(faultcode_t),
 150         .setprot        = SEGKP_BADOP(int),
 151         .checkprot      = segkp_checkprot,
 152         .kluster        = segkp_kluster,
 153         .swapout        = SEGKP_BADOP(size_t),
 154         .sync           = SEGKP_BADOP(int),
 155         .incore         = SEGKP_BADOP(size_t),
 156         .lockop         = SEGKP_BADOP(int),
 157         .getprot        = SEGKP_BADOP(int),
 158         .getoffset      = SEGKP_BADOP(u_offset_t),
 159         .gettype        = SEGKP_BADOP(int),
 160         .getvp          = SEGKP_BADOP(int),
 161         .advise         = SEGKP_BADOP(int),
 162         .dump           = segkp_dump,
 163         .pagelock       = segkp_pagelock,
 164         .setpagesize    = SEGKP_BADOP(int),