202
203 /* Page status bits for segop_incore */
204 #define SEG_PAGE_INCORE 0x01 /* VA has a page backing it */
205 #define SEG_PAGE_LOCKED 0x02 /* VA has a page that is locked */
206 #define SEG_PAGE_HASCOW 0x04 /* VA has a page with a copy-on-write */
207 #define SEG_PAGE_SOFTLOCK 0x08 /* VA has a page with softlock held */
208 #define SEG_PAGE_VNODEBACKED 0x10 /* Segment is backed by a vnode */
209 #define SEG_PAGE_ANON 0x20 /* VA has an anonymous page */
210 #define SEG_PAGE_VNODE 0x40 /* VA has a vnode page backing it */
211
212 #define seg_page(seg, addr) \
213 (((uintptr_t)((addr) - (seg)->s_base)) >> PAGESHIFT)
214
215 #define seg_pages(seg) \
216 (((uintptr_t)((seg)->s_size + PAGEOFFSET)) >> PAGESHIFT)
217
218 #define IE_NOMEM -1 /* internal to seg layer */
219 #define IE_RETRY -2 /* internal to seg layer */
220 #define IE_REATTACH -3 /* internal to seg layer */
221
222 /* Values for SEGOP_INHERIT */
223 #define SEGP_INH_ZERO 0x01
224
225 int seg_inherit_notsup(struct seg *, caddr_t, size_t, uint_t);
226
227 /* Delay/retry factors for seg_p_mem_config_pre_del */
228 #define SEGP_PREDEL_DELAY_FACTOR 4
229 /*
230 * As a workaround to being unable to purge the pagelock
231 * cache during a DR delete memory operation, we use
232 * a stall threshold that is twice the maximum seen
233 * during testing. This workaround will be removed
234 * when a suitable fix is found.
235 */
236 #define SEGP_STALL_SECONDS 25
237 #define SEGP_STALL_THRESHOLD \
238 (SEGP_STALL_SECONDS * SEGP_PREDEL_DELAY_FACTOR)
239
240 #ifdef VMDEBUG
241
242 uint_t seg_page(struct seg *, caddr_t);
|
202
203 /* Page status bits for segop_incore */
204 #define SEG_PAGE_INCORE 0x01 /* VA has a page backing it */
205 #define SEG_PAGE_LOCKED 0x02 /* VA has a page that is locked */
206 #define SEG_PAGE_HASCOW 0x04 /* VA has a page with a copy-on-write */
207 #define SEG_PAGE_SOFTLOCK 0x08 /* VA has a page with softlock held */
208 #define SEG_PAGE_VNODEBACKED 0x10 /* Segment is backed by a vnode */
209 #define SEG_PAGE_ANON 0x20 /* VA has an anonymous page */
210 #define SEG_PAGE_VNODE 0x40 /* VA has a vnode page backing it */
211
212 #define seg_page(seg, addr) \
213 (((uintptr_t)((addr) - (seg)->s_base)) >> PAGESHIFT)
214
215 #define seg_pages(seg) \
216 (((uintptr_t)((seg)->s_size + PAGEOFFSET)) >> PAGESHIFT)
217
218 #define IE_NOMEM -1 /* internal to seg layer */
219 #define IE_RETRY -2 /* internal to seg layer */
220 #define IE_REATTACH -3 /* internal to seg layer */
221
222 /* Values for segop_inherit */
223 #define SEGP_INH_ZERO 0x01
224
225 int seg_inherit_notsup(struct seg *, caddr_t, size_t, uint_t);
226
227 /* Delay/retry factors for seg_p_mem_config_pre_del */
228 #define SEGP_PREDEL_DELAY_FACTOR 4
229 /*
230 * As a workaround to being unable to purge the pagelock
231 * cache during a DR delete memory operation, we use
232 * a stall threshold that is twice the maximum seen
233 * during testing. This workaround will be removed
234 * when a suitable fix is found.
235 */
236 #define SEGP_STALL_SECONDS 25
237 #define SEGP_STALL_THRESHOLD \
238 (SEGP_STALL_SECONDS * SEGP_PREDEL_DELAY_FACTOR)
239
240 #ifdef VMDEBUG
241
242 uint_t seg_page(struct seg *, caddr_t);
|