Print this page
instead using SEGOP_* macros, define full-fledged segop_* functions
This will allow us to do some sanity checking or even implement stub
functionality in one place instead of duplicating it wherever these wrappers
are used.


 192  */
 193 #define SEGP_FORCE_WIRED        0x1     /* skip check against seg_pwindow */
 194 #define SEGP_AMP                0x2     /* anon map's pcache entry */
 195 #define SEGP_PSHIFT             0x4     /* addr pgsz shift for hash function */
 196 
 197 /*
 198  * Return values for seg_pinsert and seg_pinsert_check functions.
 199  */
 200 #define SEGP_SUCCESS            0       /* seg_pinsert() succeeded */
 201 #define SEGP_FAIL               1       /* seg_pinsert() failed */
 202 
 203 /* Page status bits for segop_incore */
 204 #define SEG_PAGE_INCORE         0x01    /* VA has a page backing it */
 205 #define SEG_PAGE_LOCKED         0x02    /* VA has a page that is locked */
 206 #define SEG_PAGE_HASCOW         0x04    /* VA has a page with a copy-on-write */
 207 #define SEG_PAGE_SOFTLOCK       0x08    /* VA has a page with softlock held */
 208 #define SEG_PAGE_VNODEBACKED    0x10    /* Segment is backed by a vnode */
 209 #define SEG_PAGE_ANON           0x20    /* VA has an anonymous page */
 210 #define SEG_PAGE_VNODE          0x40    /* VA has a vnode page backing it */
 211 
 212 #define SEGOP_DUP(s, n)             (*(s)->s_ops->dup)((s), (n))
 213 #define SEGOP_UNMAP(s, a, l)        (*(s)->s_ops->unmap)((s), (a), (l))
 214 #define SEGOP_FREE(s)               (*(s)->s_ops->free)((s))
 215 #define SEGOP_FAULT(h, s, a, l, t, rw) \
 216                 (*(s)->s_ops->fault)((h), (s), (a), (l), (t), (rw))
 217 #define SEGOP_FAULTA(s, a)          (*(s)->s_ops->faulta)((s), (a))
 218 #define SEGOP_SETPROT(s, a, l, p)   (*(s)->s_ops->setprot)((s), (a), (l), (p))
 219 #define SEGOP_CHECKPROT(s, a, l, p) (*(s)->s_ops->checkprot)((s), (a), (l), (p))
 220 #define SEGOP_KLUSTER(s, a, d)      (*(s)->s_ops->kluster)((s), (a), (d))
 221 #define SEGOP_SYNC(s, a, l, atr, f) \
 222                 (*(s)->s_ops->sync)((s), (a), (l), (atr), (f))
 223 #define SEGOP_INCORE(s, a, l, v)    (*(s)->s_ops->incore)((s), (a), (l), (v))
 224 #define SEGOP_LOCKOP(s, a, l, atr, op, b, p) \
 225                 (*(s)->s_ops->lockop)((s), (a), (l), (atr), (op), (b), (p))
 226 #define SEGOP_GETPROT(s, a, l, p)   (*(s)->s_ops->getprot)((s), (a), (l), (p))
 227 #define SEGOP_GETOFFSET(s, a)       (*(s)->s_ops->getoffset)((s), (a))
 228 #define SEGOP_GETTYPE(s, a)         (*(s)->s_ops->gettype)((s), (a))
 229 #define SEGOP_GETVP(s, a, vpp)      (*(s)->s_ops->getvp)((s), (a), (vpp))
 230 #define SEGOP_ADVISE(s, a, l, b)    (*(s)->s_ops->advise)((s), (a), (l), (b))
 231 #define SEGOP_DUMP(s)               (*(s)->s_ops->dump)((s))
 232 #define SEGOP_PAGELOCK(s, a, l, p, t, rw) \
 233                 (*(s)->s_ops->pagelock)((s), (a), (l), (p), (t), (rw))
 234 #define SEGOP_SETPAGESIZE(s, a, l, szc) \
 235                 (*(s)->s_ops->setpagesize)((s), (a), (l), (szc))
 236 #define SEGOP_GETMEMID(s, a, mp)    (*(s)->s_ops->getmemid)((s), (a), (mp))
 237 #define SEGOP_GETPOLICY(s, a)       (*(s)->s_ops->getpolicy)((s), (a))
 238 #define SEGOP_CAPABLE(s, c)         (*(s)->s_ops->capable)((s), (c))
 239 #define SEGOP_INHERIT(s, a, l, b)   (*(s)->s_ops->inherit)((s), (a), (l), (b))
 240 
 241 #define seg_page(seg, addr) \
 242         (((uintptr_t)((addr) - (seg)->s_base)) >> PAGESHIFT)
 243 
 244 #define seg_pages(seg) \
 245         (((uintptr_t)((seg)->s_size + PAGEOFFSET)) >> PAGESHIFT)
 246 
 247 #define IE_NOMEM        -1      /* internal to seg layer */
 248 #define IE_RETRY        -2      /* internal to seg layer */
 249 #define IE_REATTACH     -3      /* internal to seg layer */
 250 
 251 /* Values for SEGOP_INHERIT */
 252 #define SEGP_INH_ZERO   0x01
 253 
 254 int seg_inherit_notsup(struct seg *, caddr_t, size_t, uint_t);
 255 
 256 /* Delay/retry factors for seg_p_mem_config_pre_del */
 257 #define SEGP_PREDEL_DELAY_FACTOR        4
 258 /*
 259  * As a workaround to being unable to purge the pagelock
 260  * cache during a DR delete memory operation, we use
 261  * a stall threshold that is twice the maximum seen
 262  * during testing.  This workaround will be removed
 263  * when a suitable fix is found.
 264  */
 265 #define SEGP_STALL_SECONDS      25
 266 #define SEGP_STALL_THRESHOLD \
 267         (SEGP_STALL_SECONDS * SEGP_PREDEL_DELAY_FACTOR)
 268 
 269 #ifdef VMDEBUG
 270 
 271 uint_t  seg_page(struct seg *, caddr_t);
 272 uint_t  seg_pages(struct seg *);
 273 
 274 #endif  /* VMDEBUG */
 275 
 276 boolean_t       seg_can_change_zones(struct seg *);
 277 size_t          seg_swresv(struct seg *);

























 278 
 279 #endif  /* _KERNEL */
 280 
 281 #ifdef  __cplusplus
 282 }
 283 #endif
 284 
 285 #endif  /* _VM_SEG_H */


 192  */
 193 #define SEGP_FORCE_WIRED        0x1     /* skip check against seg_pwindow */
 194 #define SEGP_AMP                0x2     /* anon map's pcache entry */
 195 #define SEGP_PSHIFT             0x4     /* addr pgsz shift for hash function */
 196 
 197 /*
 198  * Return values for seg_pinsert and seg_pinsert_check functions.
 199  */
 200 #define SEGP_SUCCESS            0       /* seg_pinsert() succeeded */
 201 #define SEGP_FAIL               1       /* seg_pinsert() failed */
 202 
 203 /* Page status bits for segop_incore */
 204 #define SEG_PAGE_INCORE         0x01    /* VA has a page backing it */
 205 #define SEG_PAGE_LOCKED         0x02    /* VA has a page that is locked */
 206 #define SEG_PAGE_HASCOW         0x04    /* VA has a page with a copy-on-write */
 207 #define SEG_PAGE_SOFTLOCK       0x08    /* VA has a page with softlock held */
 208 #define SEG_PAGE_VNODEBACKED    0x10    /* Segment is backed by a vnode */
 209 #define SEG_PAGE_ANON           0x20    /* VA has an anonymous page */
 210 #define SEG_PAGE_VNODE          0x40    /* VA has a vnode page backing it */
 211 





























 212 #define seg_page(seg, addr) \
 213         (((uintptr_t)((addr) - (seg)->s_base)) >> PAGESHIFT)
 214 
 215 #define seg_pages(seg) \
 216         (((uintptr_t)((seg)->s_size + PAGEOFFSET)) >> PAGESHIFT)
 217 
 218 #define IE_NOMEM        -1      /* internal to seg layer */
 219 #define IE_RETRY        -2      /* internal to seg layer */
 220 #define IE_REATTACH     -3      /* internal to seg layer */
 221 
 222 /* Values for SEGOP_INHERIT */
 223 #define SEGP_INH_ZERO   0x01
 224 
 225 int seg_inherit_notsup(struct seg *, caddr_t, size_t, uint_t);
 226 
 227 /* Delay/retry factors for seg_p_mem_config_pre_del */
 228 #define SEGP_PREDEL_DELAY_FACTOR        4
 229 /*
 230  * As a workaround to being unable to purge the pagelock
 231  * cache during a DR delete memory operation, we use
 232  * a stall threshold that is twice the maximum seen
 233  * during testing.  This workaround will be removed
 234  * when a suitable fix is found.
 235  */
 236 #define SEGP_STALL_SECONDS      25
 237 #define SEGP_STALL_THRESHOLD \
 238         (SEGP_STALL_SECONDS * SEGP_PREDEL_DELAY_FACTOR)
 239 
 240 #ifdef VMDEBUG
 241 
 242 uint_t  seg_page(struct seg *, caddr_t);
 243 uint_t  seg_pages(struct seg *);
 244 
 245 #endif  /* VMDEBUG */
 246 
 247 boolean_t       seg_can_change_zones(struct seg *);
 248 size_t          seg_swresv(struct seg *);
 249 
 250 /* segop wrappers */
 251 int segop_dup(struct seg *, struct seg *);
 252 int segop_unmap(struct seg *, caddr_t, size_t);
 253 void segop_free(struct seg *);
 254 faultcode_t segop_fault(struct hat *, struct seg *, caddr_t, size_t, enum fault_type, enum seg_rw);
 255 faultcode_t segop_faulta(struct seg *, caddr_t);
 256 int segop_setprot(struct seg *, caddr_t, size_t, uint_t);
 257 int segop_checkprot(struct seg *, caddr_t, size_t, uint_t);
 258 int segop_kluster(struct seg *, caddr_t, ssize_t);
 259 int segop_sync(struct seg *, caddr_t, size_t, int, uint_t);
 260 size_t segop_incore(struct seg *, caddr_t, size_t, char *);
 261 int segop_lockop(struct seg *, caddr_t, size_t, int, int, ulong_t *, size_t );
 262 int segop_getprot(struct seg *, caddr_t, size_t, uint_t *);
 263 u_offset_t segop_getoffset(struct seg *, caddr_t);
 264 int segop_gettype(struct seg *, caddr_t);
 265 int segop_getvp(struct seg *, caddr_t, struct vnode **);
 266 int segop_advise(struct seg *, caddr_t, size_t, uint_t);
 267 void segop_dump(struct seg *);
 268 int segop_pagelock(struct seg *, caddr_t, size_t, struct page ***, enum lock_type, enum seg_rw);
 269 int segop_setpagesize(struct seg *, caddr_t, size_t, uint_t);
 270 int segop_getmemid(struct seg *, caddr_t, memid_t *);
 271 struct lgrp_mem_policy_info *segop_getpolicy(struct seg *, caddr_t);
 272 int segop_capable(struct seg *, segcapability_t);
 273 int segop_inherit(struct seg *, caddr_t, size_t, uint_t);
 274 
 275 #endif  /* _KERNEL */
 276 
 277 #ifdef  __cplusplus
 278 }
 279 #endif
 280 
 281 #endif  /* _VM_SEG_H */