193 */
194 #define SEGP_FORCE_WIRED 0x1 /* skip check against seg_pwindow */
195 #define SEGP_AMP 0x2 /* anon map's pcache entry */
196 #define SEGP_PSHIFT 0x4 /* addr pgsz shift for hash function */
197
198 /*
199 * Return values for seg_pinsert and seg_pinsert_check functions.
200 */
201 #define SEGP_SUCCESS 0 /* seg_pinsert() succeeded */
202 #define SEGP_FAIL 1 /* seg_pinsert() failed */
203
204 /* Page status bits for segop_incore */
205 #define SEG_PAGE_INCORE 0x01 /* VA has a page backing it */
206 #define SEG_PAGE_LOCKED 0x02 /* VA has a page that is locked */
207 #define SEG_PAGE_HASCOW 0x04 /* VA has a page with a copy-on-write */
208 #define SEG_PAGE_SOFTLOCK 0x08 /* VA has a page with softlock held */
209 #define SEG_PAGE_VNODEBACKED 0x10 /* Segment is backed by a vnode */
210 #define SEG_PAGE_ANON 0x20 /* VA has an anonymous page */
211 #define SEG_PAGE_VNODE 0x40 /* VA has a vnode page backing it */
212
213 #define SEGOP_DUP(s, n) (*(s)->s_ops->dup)((s), (n))
214 #define SEGOP_UNMAP(s, a, l) (*(s)->s_ops->unmap)((s), (a), (l))
215 #define SEGOP_FREE(s) (*(s)->s_ops->free)((s))
216 #define SEGOP_FAULT(h, s, a, l, t, rw) \
217 (*(s)->s_ops->fault)((h), (s), (a), (l), (t), (rw))
218 #define SEGOP_FAULTA(s, a) (*(s)->s_ops->faulta)((s), (a))
219 #define SEGOP_SETPROT(s, a, l, p) (*(s)->s_ops->setprot)((s), (a), (l), (p))
220 #define SEGOP_CHECKPROT(s, a, l, p) (*(s)->s_ops->checkprot)((s), (a), (l), (p))
221 #define SEGOP_KLUSTER(s, a, d) (*(s)->s_ops->kluster)((s), (a), (d))
222 #define SEGOP_SWAPOUT(s) (*(s)->s_ops->swapout)((s))
223 #define SEGOP_SYNC(s, a, l, atr, f) \
224 (*(s)->s_ops->sync)((s), (a), (l), (atr), (f))
225 #define SEGOP_INCORE(s, a, l, v) (*(s)->s_ops->incore)((s), (a), (l), (v))
226 #define SEGOP_LOCKOP(s, a, l, atr, op, b, p) \
227 (*(s)->s_ops->lockop)((s), (a), (l), (atr), (op), (b), (p))
228 #define SEGOP_GETPROT(s, a, l, p) (*(s)->s_ops->getprot)((s), (a), (l), (p))
229 #define SEGOP_GETOFFSET(s, a) (*(s)->s_ops->getoffset)((s), (a))
230 #define SEGOP_GETTYPE(s, a) (*(s)->s_ops->gettype)((s), (a))
231 #define SEGOP_GETVP(s, a, vpp) (*(s)->s_ops->getvp)((s), (a), (vpp))
232 #define SEGOP_ADVISE(s, a, l, b) (*(s)->s_ops->advise)((s), (a), (l), (b))
233 #define SEGOP_DUMP(s) (*(s)->s_ops->dump)((s))
234 #define SEGOP_PAGELOCK(s, a, l, p, t, rw) \
235 (*(s)->s_ops->pagelock)((s), (a), (l), (p), (t), (rw))
236 #define SEGOP_SETPAGESIZE(s, a, l, szc) \
237 (*(s)->s_ops->setpagesize)((s), (a), (l), (szc))
238 #define SEGOP_GETMEMID(s, a, mp) (*(s)->s_ops->getmemid)((s), (a), (mp))
239 #define SEGOP_GETPOLICY(s, a) (*(s)->s_ops->getpolicy)((s), (a))
240 #define SEGOP_CAPABLE(s, c) (*(s)->s_ops->capable)((s), (c))
241 #define SEGOP_INHERIT(s, a, l, b) (*(s)->s_ops->inherit)((s), (a), (l), (b))
242
243 #define seg_page(seg, addr) \
244 (((uintptr_t)((addr) - (seg)->s_base)) >> PAGESHIFT)
245
246 #define seg_pages(seg) \
247 (((uintptr_t)((seg)->s_size + PAGEOFFSET)) >> PAGESHIFT)
248
249 #define IE_NOMEM -1 /* internal to seg layer */
250 #define IE_RETRY -2 /* internal to seg layer */
251 #define IE_REATTACH -3 /* internal to seg layer */
252
253 /* Values for SEGOP_INHERIT */
254 #define SEGP_INH_ZERO 0x01
255
256 int seg_inherit_notsup(struct seg *, caddr_t, size_t, uint_t);
257
258 /* Delay/retry factors for seg_p_mem_config_pre_del */
259 #define SEGP_PREDEL_DELAY_FACTOR 4
260 /*
261 * As a workaround to being unable to purge the pagelock
262 * cache during a DR delete memory operation, we use
263 * a stall threshold that is twice the maximum seen
264 * during testing. This workaround will be removed
265 * when a suitable fix is found.
266 */
267 #define SEGP_STALL_SECONDS 25
268 #define SEGP_STALL_THRESHOLD \
269 (SEGP_STALL_SECONDS * SEGP_PREDEL_DELAY_FACTOR)
270
271 #ifdef VMDEBUG
272
273 uint_t seg_page(struct seg *, caddr_t);
274 uint_t seg_pages(struct seg *);
275
276 #endif /* VMDEBUG */
277
278 boolean_t seg_can_change_zones(struct seg *);
279 size_t seg_swresv(struct seg *);
280
281 #endif /* _KERNEL */
282
283 #ifdef __cplusplus
284 }
285 #endif
286
287 #endif /* _VM_SEG_H */
|
193 */
194 #define SEGP_FORCE_WIRED 0x1 /* skip check against seg_pwindow */
195 #define SEGP_AMP 0x2 /* anon map's pcache entry */
196 #define SEGP_PSHIFT 0x4 /* addr pgsz shift for hash function */
197
198 /*
199 * Return values for seg_pinsert and seg_pinsert_check functions.
200 */
201 #define SEGP_SUCCESS 0 /* seg_pinsert() succeeded */
202 #define SEGP_FAIL 1 /* seg_pinsert() failed */
203
204 /* Page status bits for segop_incore */
205 #define SEG_PAGE_INCORE 0x01 /* VA has a page backing it */
206 #define SEG_PAGE_LOCKED 0x02 /* VA has a page that is locked */
207 #define SEG_PAGE_HASCOW 0x04 /* VA has a page with a copy-on-write */
208 #define SEG_PAGE_SOFTLOCK 0x08 /* VA has a page with softlock held */
209 #define SEG_PAGE_VNODEBACKED 0x10 /* Segment is backed by a vnode */
210 #define SEG_PAGE_ANON 0x20 /* VA has an anonymous page */
211 #define SEG_PAGE_VNODE 0x40 /* VA has a vnode page backing it */
212
213 #define seg_page(seg, addr) \
214 (((uintptr_t)((addr) - (seg)->s_base)) >> PAGESHIFT)
215
216 #define seg_pages(seg) \
217 (((uintptr_t)((seg)->s_size + PAGEOFFSET)) >> PAGESHIFT)
218
219 #define IE_NOMEM -1 /* internal to seg layer */
220 #define IE_RETRY -2 /* internal to seg layer */
221 #define IE_REATTACH -3 /* internal to seg layer */
222
223 /* Values for SEGOP_INHERIT */
224 #define SEGP_INH_ZERO 0x01
225
226 int seg_inherit_notsup(struct seg *, caddr_t, size_t, uint_t);
227
228 /* Delay/retry factors for seg_p_mem_config_pre_del */
229 #define SEGP_PREDEL_DELAY_FACTOR 4
230 /*
231 * As a workaround to being unable to purge the pagelock
232 * cache during a DR delete memory operation, we use
233 * a stall threshold that is twice the maximum seen
234 * during testing. This workaround will be removed
235 * when a suitable fix is found.
236 */
237 #define SEGP_STALL_SECONDS 25
238 #define SEGP_STALL_THRESHOLD \
239 (SEGP_STALL_SECONDS * SEGP_PREDEL_DELAY_FACTOR)
240
241 #ifdef VMDEBUG
242
243 uint_t seg_page(struct seg *, caddr_t);
244 uint_t seg_pages(struct seg *);
245
246 #endif /* VMDEBUG */
247
248 boolean_t seg_can_change_zones(struct seg *);
249 size_t seg_swresv(struct seg *);
250
251 /* segop wrappers */
252 extern int segop_dup(struct seg *, struct seg *);
253 extern int segop_unmap(struct seg *, caddr_t, size_t);
254 extern void segop_free(struct seg *);
255 extern faultcode_t segop_fault(struct hat *, struct seg *, caddr_t, size_t,
256 enum fault_type, enum seg_rw);
257 extern faultcode_t segop_faulta(struct seg *, caddr_t);
258 extern int segop_setprot(struct seg *, caddr_t, size_t, uint_t);
259 extern int segop_checkprot(struct seg *, caddr_t, size_t, uint_t);
260 extern int segop_kluster(struct seg *, caddr_t, ssize_t);
261 extern size_t segop_swapout(struct seg *);
262 extern int segop_sync(struct seg *, caddr_t, size_t, int, uint_t);
263 extern size_t segop_incore(struct seg *, caddr_t, size_t, char *);
264 extern int segop_lockop(struct seg *, caddr_t, size_t, int, int, ulong_t *,
265 size_t);
266 extern int segop_getprot(struct seg *, caddr_t, size_t, uint_t *);
267 extern u_offset_t segop_getoffset(struct seg *, caddr_t);
268 extern int segop_gettype(struct seg *, caddr_t);
269 extern int segop_getvp(struct seg *, caddr_t, struct vnode **);
270 extern int segop_advise(struct seg *, caddr_t, size_t, uint_t);
271 extern void segop_dump(struct seg *);
272 extern int segop_pagelock(struct seg *, caddr_t, size_t, struct page ***,
273 enum lock_type, enum seg_rw);
274 extern int segop_setpagesize(struct seg *, caddr_t, size_t, uint_t);
275 extern int segop_getmemid(struct seg *, caddr_t, memid_t *);
276 extern struct lgrp_mem_policy_info *segop_getpolicy(struct seg *, caddr_t);
277 extern int segop_capable(struct seg *, segcapability_t);
278 extern int segop_inherit(struct seg *, caddr_t, size_t, uint_t);
279
280 #endif /* _KERNEL */
281
282 #ifdef __cplusplus
283 }
284 #endif
285
286 #endif /* _VM_SEG_H */
|