Print this page
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
patch lower-case-segops
instead using SEGOP_* macros, define full-fledged segop_* functions
This will allow us to do some sanity checking or even implement stub
functionality in one place instead of duplicating it wherever these wrappers
are used.
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory.  The code is there and in theory it runs when we get *extremely* low
on memory.  In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout

*** 104,114 **** size_t s_size; /* size in bytes */ uint_t s_szc; /* max page size code */ uint_t s_flags; /* flags for segment, see below */ struct as *s_as; /* containing address space */ avl_node_t s_tree; /* AVL tree links to segs in this as */ ! struct seg_ops *s_ops; /* ops vector: see below */ void *s_data; /* private data for instance */ kmutex_t s_pmtx; /* protects seg's pcache list */ pcache_link_t s_phead; /* head of seg's pcache list */ } seg_t; --- 104,114 ---- size_t s_size; /* size in bytes */ uint_t s_szc; /* max page size code */ uint_t s_flags; /* flags for segment, see below */ struct as *s_as; /* containing address space */ avl_node_t s_tree; /* AVL tree links to segs in this as */ ! const struct seg_ops *s_ops; /* ops vector: see below */ void *s_data; /* private data for instance */ kmutex_t s_pmtx; /* protects seg's pcache list */ pcache_link_t s_phead; /* head of seg's pcache list */ } seg_t;
*** 122,132 **** enum fault_type, enum seg_rw); faultcode_t (*faulta)(struct seg *, caddr_t); int (*setprot)(struct seg *, caddr_t, size_t, uint_t); int (*checkprot)(struct seg *, caddr_t, size_t, uint_t); int (*kluster)(struct seg *, caddr_t, ssize_t); - size_t (*swapout)(struct seg *); int (*sync)(struct seg *, caddr_t, size_t, int, uint_t); size_t (*incore)(struct seg *, caddr_t, size_t, char *); int (*lockop)(struct seg *, caddr_t, size_t, int, int, ulong_t *, size_t); int (*getprot)(struct seg *, caddr_t, size_t, uint_t *); --- 122,131 ----
*** 208,262 **** #define SEG_PAGE_SOFTLOCK 0x08 /* VA has a page with softlock held */ #define SEG_PAGE_VNODEBACKED 0x10 /* Segment is backed by a vnode */ #define SEG_PAGE_ANON 0x20 /* VA has an anonymous page */ #define SEG_PAGE_VNODE 0x40 /* VA has a vnode page backing it */ - #define SEGOP_DUP(s, n) (*(s)->s_ops->dup)((s), (n)) - #define SEGOP_UNMAP(s, a, l) (*(s)->s_ops->unmap)((s), (a), (l)) - #define SEGOP_FREE(s) (*(s)->s_ops->free)((s)) - #define SEGOP_FAULT(h, s, a, l, t, rw) \ - (*(s)->s_ops->fault)((h), (s), (a), (l), (t), (rw)) - #define SEGOP_FAULTA(s, a) (*(s)->s_ops->faulta)((s), (a)) - #define SEGOP_SETPROT(s, a, l, p) (*(s)->s_ops->setprot)((s), (a), (l), (p)) - #define SEGOP_CHECKPROT(s, a, l, p) (*(s)->s_ops->checkprot)((s), (a), (l), (p)) - #define SEGOP_KLUSTER(s, a, d) (*(s)->s_ops->kluster)((s), (a), (d)) - #define SEGOP_SWAPOUT(s) (*(s)->s_ops->swapout)((s)) - #define SEGOP_SYNC(s, a, l, atr, f) \ - (*(s)->s_ops->sync)((s), (a), (l), (atr), (f)) - #define SEGOP_INCORE(s, a, l, v) (*(s)->s_ops->incore)((s), (a), (l), (v)) - #define SEGOP_LOCKOP(s, a, l, atr, op, b, p) \ - (*(s)->s_ops->lockop)((s), (a), (l), (atr), (op), (b), (p)) - #define SEGOP_GETPROT(s, a, l, p) (*(s)->s_ops->getprot)((s), (a), (l), (p)) - #define SEGOP_GETOFFSET(s, a) (*(s)->s_ops->getoffset)((s), (a)) - #define SEGOP_GETTYPE(s, a) (*(s)->s_ops->gettype)((s), (a)) - #define SEGOP_GETVP(s, a, vpp) (*(s)->s_ops->getvp)((s), (a), (vpp)) - #define SEGOP_ADVISE(s, a, l, b) (*(s)->s_ops->advise)((s), (a), (l), (b)) - #define SEGOP_DUMP(s) (*(s)->s_ops->dump)((s)) - #define SEGOP_PAGELOCK(s, a, l, p, t, rw) \ - (*(s)->s_ops->pagelock)((s), (a), (l), (p), (t), (rw)) - #define SEGOP_SETPAGESIZE(s, a, l, szc) \ - (*(s)->s_ops->setpagesize)((s), (a), (l), (szc)) - #define SEGOP_GETMEMID(s, a, mp) (*(s)->s_ops->getmemid)((s), (a), (mp)) - #define SEGOP_GETPOLICY(s, a) (*(s)->s_ops->getpolicy)((s), (a)) - #define SEGOP_CAPABLE(s, c) (*(s)->s_ops->capable)((s), (c)) - #define SEGOP_INHERIT(s, a, l, b) (*(s)->s_ops->inherit)((s), (a), (l), (b)) - #define seg_page(seg, addr) \ (((uintptr_t)((addr) - (seg)->s_base)) >> PAGESHIFT) #define seg_pages(seg) \ (((uintptr_t)((seg)->s_size + PAGEOFFSET)) >> PAGESHIFT) #define IE_NOMEM -1 /* internal to seg layer */ #define IE_RETRY -2 /* internal to seg layer */ #define IE_REATTACH -3 /* internal to seg layer */ ! /* Values for SEGOP_INHERIT */ #define SEGP_INH_ZERO 0x01 - int seg_inherit_notsup(struct seg *, caddr_t, size_t, uint_t); - /* Delay/retry factors for seg_p_mem_config_pre_del */ #define SEGP_PREDEL_DELAY_FACTOR 4 /* * As a workaround to being unable to purge the pagelock * cache during a DR delete memory operation, we use --- 207,229 ---- #define SEG_PAGE_SOFTLOCK 0x08 /* VA has a page with softlock held */ #define SEG_PAGE_VNODEBACKED 0x10 /* Segment is backed by a vnode */ #define SEG_PAGE_ANON 0x20 /* VA has an anonymous page */ #define SEG_PAGE_VNODE 0x40 /* VA has a vnode page backing it */ #define seg_page(seg, addr) \ (((uintptr_t)((addr) - (seg)->s_base)) >> PAGESHIFT) #define seg_pages(seg) \ (((uintptr_t)((seg)->s_size + PAGEOFFSET)) >> PAGESHIFT) #define IE_NOMEM -1 /* internal to seg layer */ #define IE_RETRY -2 /* internal to seg layer */ #define IE_REATTACH -3 /* internal to seg layer */ ! /* Values for segop_inherit */ #define SEGP_INH_ZERO 0x01 /* Delay/retry factors for seg_p_mem_config_pre_del */ #define SEGP_PREDEL_DELAY_FACTOR 4 /* * As a workaround to being unable to purge the pagelock * cache during a DR delete memory operation, we use
*** 276,285 **** --- 243,277 ---- #endif /* VMDEBUG */ boolean_t seg_can_change_zones(struct seg *); size_t seg_swresv(struct seg *); + /* segop wrappers */ + int segop_dup(struct seg *, struct seg *); + int segop_unmap(struct seg *, caddr_t, size_t); + void segop_free(struct seg *); + faultcode_t segop_fault(struct hat *, struct seg *, caddr_t, size_t, enum fault_type, enum seg_rw); + faultcode_t segop_faulta(struct seg *, caddr_t); + int segop_setprot(struct seg *, caddr_t, size_t, uint_t); + int segop_checkprot(struct seg *, caddr_t, size_t, uint_t); + int segop_kluster(struct seg *, caddr_t, ssize_t); + int segop_sync(struct seg *, caddr_t, size_t, int, uint_t); + size_t segop_incore(struct seg *, caddr_t, size_t, char *); + int segop_lockop(struct seg *, caddr_t, size_t, int, int, ulong_t *, size_t ); + int segop_getprot(struct seg *, caddr_t, size_t, uint_t *); + u_offset_t segop_getoffset(struct seg *, caddr_t); + int segop_gettype(struct seg *, caddr_t); + int segop_getvp(struct seg *, caddr_t, struct vnode **); + int segop_advise(struct seg *, caddr_t, size_t, uint_t); + void segop_dump(struct seg *); + int segop_pagelock(struct seg *, caddr_t, size_t, struct page ***, enum lock_type, enum seg_rw); + int segop_setpagesize(struct seg *, caddr_t, size_t, uint_t); + int segop_getmemid(struct seg *, caddr_t, memid_t *); + struct lgrp_mem_policy_info *segop_getpolicy(struct seg *, caddr_t); + int segop_capable(struct seg *, segcapability_t); + int segop_inherit(struct seg *, caddr_t, size_t, uint_t); + #endif /* _KERNEL */ #ifdef __cplusplus } #endif