121 * If segkp_map_red() is called with the redzone already mapped and
122 * with less than RED_DEEP_THRESHOLD bytes available on the stack,
123 * then the stack situation has become quite serious; if much more stack
124 * is consumed, we have the potential of scrogging the next thread/LWP
125 * structure. To help debug the "can't happen" panics which may
126 * result from this condition, we record hrestime and the calling thread
127 * in red_deep_hires and red_deep_thread respectively.
128 */
129 #define RED_DEEP_THRESHOLD 2000
130
131 hrtime_t red_deep_hires;
132 kthread_t *red_deep_thread;
133
134 uint32_t red_nmapped;
135 uint32_t red_closest = UINT_MAX;
136 uint32_t red_ndoubles;
137
138 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
139 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
140
141 static struct seg_ops segkp_ops = {
142 .fault = segkp_fault,
143 .checkprot = segkp_checkprot,
144 .kluster = segkp_kluster,
145 .dump = segkp_dump,
146 .pagelock = segkp_pagelock,
147 };
148
149
150 static void segkpinit_mem_config(struct seg *);
151
152 static uint32_t segkp_indel;
153
154 /*
155 * Allocate the segment specific private data struct and fill it in
156 * with the per kp segment mutex, anon ptr. array and hash table.
157 */
158 int
159 segkp_create(struct seg *seg)
160 {
161 struct segkp_segdata *kpsd;
|
121 * If segkp_map_red() is called with the redzone already mapped and
122 * with less than RED_DEEP_THRESHOLD bytes available on the stack,
123 * then the stack situation has become quite serious; if much more stack
124 * is consumed, we have the potential of scrogging the next thread/LWP
125 * structure. To help debug the "can't happen" panics which may
126 * result from this condition, we record hrestime and the calling thread
127 * in red_deep_hires and red_deep_thread respectively.
128 */
129 #define RED_DEEP_THRESHOLD 2000
130
131 hrtime_t red_deep_hires;
132 kthread_t *red_deep_thread;
133
134 uint32_t red_nmapped;
135 uint32_t red_closest = UINT_MAX;
136 uint32_t red_ndoubles;
137
138 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
139 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
140
141 static const struct seg_ops segkp_ops = {
142 .fault = segkp_fault,
143 .checkprot = segkp_checkprot,
144 .kluster = segkp_kluster,
145 .dump = segkp_dump,
146 .pagelock = segkp_pagelock,
147 };
148
149
150 static void segkpinit_mem_config(struct seg *);
151
152 static uint32_t segkp_indel;
153
154 /*
155 * Allocate the segment specific private data struct and fill it in
156 * with the per kp segment mutex, anon ptr. array and hash table.
157 */
158 int
159 segkp_create(struct seg *seg)
160 {
161 struct segkp_segdata *kpsd;
|