129 * with less than RED_DEEP_THRESHOLD bytes available on the stack,
130 * then the stack situation has become quite serious; if much more stack
131 * is consumed, we have the potential of scrogging the next thread/LWP
132 * structure. To help debug the "can't happen" panics which may
133 * result from this condition, we record hrestime and the calling thread
134 * in red_deep_hires and red_deep_thread respectively.
135 */
136 #define RED_DEEP_THRESHOLD 2000
137
138 hrtime_t red_deep_hires;
139 kthread_t *red_deep_thread;
140
141 uint32_t red_nmapped;
142 uint32_t red_closest = UINT_MAX;
143 uint32_t red_ndoubles;
144
145 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
146 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
147
148 static struct seg_ops segkp_ops = {
149 SEGKP_BADOP(int), /* dup */
150 SEGKP_BADOP(int), /* unmap */
151 SEGKP_BADOP(void), /* free */
152 segkp_fault,
153 SEGKP_BADOP(faultcode_t), /* faulta */
154 SEGKP_BADOP(int), /* setprot */
155 segkp_checkprot,
156 segkp_kluster,
157 SEGKP_BADOP(int), /* sync */
158 SEGKP_BADOP(size_t), /* incore */
159 SEGKP_BADOP(int), /* lockop */
160 SEGKP_BADOP(int), /* getprot */
161 SEGKP_BADOP(u_offset_t), /* getoffset */
162 SEGKP_BADOP(int), /* gettype */
163 SEGKP_BADOP(int), /* getvp */
164 SEGKP_BADOP(int), /* advise */
165 segkp_dump, /* dump */
166 segkp_pagelock, /* pagelock */
167 SEGKP_BADOP(int), /* setpgsz */
168 segkp_getmemid, /* getmemid */
169 segkp_getpolicy, /* getpolicy */
170 segkp_capable, /* capable */
171 seg_inherit_notsup /* inherit */
172 };
173
174
175 static void
176 segkp_badop(void)
177 {
178 panic("segkp_badop");
179 /*NOTREACHED*/
180 }
181
182 static void segkpinit_mem_config(struct seg *);
183
184 static uint32_t segkp_indel;
185
186 /*
187 * Allocate the segment specific private data struct and fill it in
188 * with the per kp segment mutex, anon ptr. array and hash table.
189 */
190 int
191 segkp_create(struct seg *seg)
|
129 * with less than RED_DEEP_THRESHOLD bytes available on the stack,
130 * then the stack situation has become quite serious; if much more stack
131 * is consumed, we have the potential of scrogging the next thread/LWP
132 * structure. To help debug the "can't happen" panics which may
133 * result from this condition, we record hrestime and the calling thread
134 * in red_deep_hires and red_deep_thread respectively.
135 */
136 #define RED_DEEP_THRESHOLD 2000
137
138 hrtime_t red_deep_hires;
139 kthread_t *red_deep_thread;
140
141 uint32_t red_nmapped;
142 uint32_t red_closest = UINT_MAX;
143 uint32_t red_ndoubles;
144
145 pgcnt_t anon_segkp_pages_locked; /* See vm/anon.h */
146 pgcnt_t anon_segkp_pages_resv; /* anon reserved by seg_kp */
147
148 static struct seg_ops segkp_ops = {
149 .dup = SEGKP_BADOP(int),
150 .unmap = SEGKP_BADOP(int),
151 .free = SEGKP_BADOP(void),
152 .fault = segkp_fault,
153 .faulta = SEGKP_BADOP(faultcode_t),
154 .setprot = SEGKP_BADOP(int),
155 .checkprot = segkp_checkprot,
156 .kluster = segkp_kluster,
157 .sync = SEGKP_BADOP(int),
158 .incore = SEGKP_BADOP(size_t),
159 .lockop = SEGKP_BADOP(int),
160 .getprot = SEGKP_BADOP(int),
161 .getoffset = SEGKP_BADOP(u_offset_t),
162 .gettype = SEGKP_BADOP(int),
163 .getvp = SEGKP_BADOP(int),
164 .advise = SEGKP_BADOP(int),
165 .dump = segkp_dump,
166 .pagelock = segkp_pagelock,
167 .setpagesize = SEGKP_BADOP(int),
168 .getmemid = segkp_getmemid,
169 .getpolicy = segkp_getpolicy,
170 .capable = segkp_capable,
171 .inherit = seg_inherit_notsup,
172 };
173
174
175 static void
176 segkp_badop(void)
177 {
178 panic("segkp_badop");
179 /*NOTREACHED*/
180 }
181
182 static void segkpinit_mem_config(struct seg *);
183
184 static uint32_t segkp_indel;
185
186 /*
187 * Allocate the segment specific private data struct and fill it in
188 * with the per kp segment mutex, anon ptr. array and hash table.
189 */
190 int
191 segkp_create(struct seg *seg)
|