Print this page
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
patch lower-case-segops
instead using SEGOP_* macros, define full-fledged segop_* functions
This will allow us to do some sanity checking or even implement stub
functionality in one place instead of duplicating it wherever these wrappers
are used.
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory.  The code is there and in theory it runs when we get *extremely* low
on memory.  In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/vm/seg.h
          +++ new/usr/src/uts/common/vm/seg.h
↓ open down ↓ 98 lines elided ↑ open up ↑
  99   99          struct pcache_link      *p_lprev;
 100  100  } pcache_link_t;
 101  101  
 102  102  typedef struct seg {
 103  103          caddr_t s_base;                 /* base virtual address */
 104  104          size_t  s_size;                 /* size in bytes */
 105  105          uint_t  s_szc;                  /* max page size code */
 106  106          uint_t  s_flags;                /* flags for segment, see below */
 107  107          struct  as *s_as;               /* containing address space */
 108  108          avl_node_t s_tree;              /* AVL tree links to segs in this as */
 109      -        struct  seg_ops *s_ops;         /* ops vector: see below */
      109 +        const struct seg_ops *s_ops;    /* ops vector: see below */
 110  110          void *s_data;                   /* private data for instance */
 111  111          kmutex_t s_pmtx;                /* protects seg's pcache list */
 112  112          pcache_link_t s_phead;          /* head of seg's pcache list */
 113  113  } seg_t;
 114  114  
 115  115  #define S_PURGE         (0x01)          /* seg should be purged in as_gap() */
 116  116  
 117  117  struct  seg_ops {
 118  118          int     (*dup)(struct seg *, struct seg *);
 119  119          int     (*unmap)(struct seg *, caddr_t, size_t);
 120  120          void    (*free)(struct seg *);
 121  121          faultcode_t (*fault)(struct hat *, struct seg *, caddr_t, size_t,
 122  122              enum fault_type, enum seg_rw);
 123  123          faultcode_t (*faulta)(struct seg *, caddr_t);
 124  124          int     (*setprot)(struct seg *, caddr_t, size_t, uint_t);
 125  125          int     (*checkprot)(struct seg *, caddr_t, size_t, uint_t);
 126  126          int     (*kluster)(struct seg *, caddr_t, ssize_t);
 127      -        size_t  (*swapout)(struct seg *);
 128  127          int     (*sync)(struct seg *, caddr_t, size_t, int, uint_t);
 129  128          size_t  (*incore)(struct seg *, caddr_t, size_t, char *);
 130  129          int     (*lockop)(struct seg *, caddr_t, size_t, int, int, ulong_t *,
 131  130                          size_t);
 132  131          int     (*getprot)(struct seg *, caddr_t, size_t, uint_t *);
 133  132          u_offset_t      (*getoffset)(struct seg *, caddr_t);
 134  133          int     (*gettype)(struct seg *, caddr_t);
 135  134          int     (*getvp)(struct seg *, caddr_t, struct vnode **);
 136  135          int     (*advise)(struct seg *, caddr_t, size_t, uint_t);
 137  136          void    (*dump)(struct seg *);
↓ open down ↓ 65 lines elided ↑ open up ↑
 203  202  
 204  203  /* Page status bits for segop_incore */
 205  204  #define SEG_PAGE_INCORE         0x01    /* VA has a page backing it */
 206  205  #define SEG_PAGE_LOCKED         0x02    /* VA has a page that is locked */
 207  206  #define SEG_PAGE_HASCOW         0x04    /* VA has a page with a copy-on-write */
 208  207  #define SEG_PAGE_SOFTLOCK       0x08    /* VA has a page with softlock held */
 209  208  #define SEG_PAGE_VNODEBACKED    0x10    /* Segment is backed by a vnode */
 210  209  #define SEG_PAGE_ANON           0x20    /* VA has an anonymous page */
 211  210  #define SEG_PAGE_VNODE          0x40    /* VA has a vnode page backing it */
 212  211  
 213      -#define SEGOP_DUP(s, n)             (*(s)->s_ops->dup)((s), (n))
 214      -#define SEGOP_UNMAP(s, a, l)        (*(s)->s_ops->unmap)((s), (a), (l))
 215      -#define SEGOP_FREE(s)               (*(s)->s_ops->free)((s))
 216      -#define SEGOP_FAULT(h, s, a, l, t, rw) \
 217      -                (*(s)->s_ops->fault)((h), (s), (a), (l), (t), (rw))
 218      -#define SEGOP_FAULTA(s, a)          (*(s)->s_ops->faulta)((s), (a))
 219      -#define SEGOP_SETPROT(s, a, l, p)   (*(s)->s_ops->setprot)((s), (a), (l), (p))
 220      -#define SEGOP_CHECKPROT(s, a, l, p) (*(s)->s_ops->checkprot)((s), (a), (l), (p))
 221      -#define SEGOP_KLUSTER(s, a, d)      (*(s)->s_ops->kluster)((s), (a), (d))
 222      -#define SEGOP_SWAPOUT(s)            (*(s)->s_ops->swapout)((s))
 223      -#define SEGOP_SYNC(s, a, l, atr, f) \
 224      -                (*(s)->s_ops->sync)((s), (a), (l), (atr), (f))
 225      -#define SEGOP_INCORE(s, a, l, v)    (*(s)->s_ops->incore)((s), (a), (l), (v))
 226      -#define SEGOP_LOCKOP(s, a, l, atr, op, b, p) \
 227      -                (*(s)->s_ops->lockop)((s), (a), (l), (atr), (op), (b), (p))
 228      -#define SEGOP_GETPROT(s, a, l, p)   (*(s)->s_ops->getprot)((s), (a), (l), (p))
 229      -#define SEGOP_GETOFFSET(s, a)       (*(s)->s_ops->getoffset)((s), (a))
 230      -#define SEGOP_GETTYPE(s, a)         (*(s)->s_ops->gettype)((s), (a))
 231      -#define SEGOP_GETVP(s, a, vpp)      (*(s)->s_ops->getvp)((s), (a), (vpp))
 232      -#define SEGOP_ADVISE(s, a, l, b)    (*(s)->s_ops->advise)((s), (a), (l), (b))
 233      -#define SEGOP_DUMP(s)               (*(s)->s_ops->dump)((s))
 234      -#define SEGOP_PAGELOCK(s, a, l, p, t, rw) \
 235      -                (*(s)->s_ops->pagelock)((s), (a), (l), (p), (t), (rw))
 236      -#define SEGOP_SETPAGESIZE(s, a, l, szc) \
 237      -                (*(s)->s_ops->setpagesize)((s), (a), (l), (szc))
 238      -#define SEGOP_GETMEMID(s, a, mp)    (*(s)->s_ops->getmemid)((s), (a), (mp))
 239      -#define SEGOP_GETPOLICY(s, a)       (*(s)->s_ops->getpolicy)((s), (a))
 240      -#define SEGOP_CAPABLE(s, c)         (*(s)->s_ops->capable)((s), (c))
 241      -#define SEGOP_INHERIT(s, a, l, b)   (*(s)->s_ops->inherit)((s), (a), (l), (b))
 242      -
 243  212  #define seg_page(seg, addr) \
 244  213          (((uintptr_t)((addr) - (seg)->s_base)) >> PAGESHIFT)
 245  214  
 246  215  #define seg_pages(seg) \
 247  216          (((uintptr_t)((seg)->s_size + PAGEOFFSET)) >> PAGESHIFT)
 248  217  
 249  218  #define IE_NOMEM        -1      /* internal to seg layer */
 250  219  #define IE_RETRY        -2      /* internal to seg layer */
 251  220  #define IE_REATTACH     -3      /* internal to seg layer */
 252  221  
 253      -/* Values for SEGOP_INHERIT */
      222 +/* Values for segop_inherit */
 254  223  #define SEGP_INH_ZERO   0x01
 255  224  
 256      -int seg_inherit_notsup(struct seg *, caddr_t, size_t, uint_t);
 257      -
 258  225  /* Delay/retry factors for seg_p_mem_config_pre_del */
 259  226  #define SEGP_PREDEL_DELAY_FACTOR        4
 260  227  /*
 261  228   * As a workaround to being unable to purge the pagelock
 262  229   * cache during a DR delete memory operation, we use
 263  230   * a stall threshold that is twice the maximum seen
 264  231   * during testing.  This workaround will be removed
 265  232   * when a suitable fix is found.
 266  233   */
 267  234  #define SEGP_STALL_SECONDS      25
↓ open down ↓ 2 lines elided ↑ open up ↑
 270  237  
 271  238  #ifdef VMDEBUG
 272  239  
 273  240  uint_t  seg_page(struct seg *, caddr_t);
 274  241  uint_t  seg_pages(struct seg *);
 275  242  
 276  243  #endif  /* VMDEBUG */
 277  244  
 278  245  boolean_t       seg_can_change_zones(struct seg *);
 279  246  size_t          seg_swresv(struct seg *);
      247 +
      248 +/* segop wrappers */
      249 +int segop_dup(struct seg *, struct seg *);
      250 +int segop_unmap(struct seg *, caddr_t, size_t);
      251 +void segop_free(struct seg *);
      252 +faultcode_t segop_fault(struct hat *, struct seg *, caddr_t, size_t, enum fault_type, enum seg_rw);
      253 +faultcode_t segop_faulta(struct seg *, caddr_t);
      254 +int segop_setprot(struct seg *, caddr_t, size_t, uint_t);
      255 +int segop_checkprot(struct seg *, caddr_t, size_t, uint_t);
      256 +int segop_kluster(struct seg *, caddr_t, ssize_t);
      257 +int segop_sync(struct seg *, caddr_t, size_t, int, uint_t);
      258 +size_t segop_incore(struct seg *, caddr_t, size_t, char *);
      259 +int segop_lockop(struct seg *, caddr_t, size_t, int, int, ulong_t *, size_t );
      260 +int segop_getprot(struct seg *, caddr_t, size_t, uint_t *);
      261 +u_offset_t segop_getoffset(struct seg *, caddr_t);
      262 +int segop_gettype(struct seg *, caddr_t);
      263 +int segop_getvp(struct seg *, caddr_t, struct vnode **);
      264 +int segop_advise(struct seg *, caddr_t, size_t, uint_t);
      265 +void segop_dump(struct seg *);
      266 +int segop_pagelock(struct seg *, caddr_t, size_t, struct page ***, enum lock_type, enum seg_rw);
      267 +int segop_setpagesize(struct seg *, caddr_t, size_t, uint_t);
      268 +int segop_getmemid(struct seg *, caddr_t, memid_t *);
      269 +struct lgrp_mem_policy_info *segop_getpolicy(struct seg *, caddr_t);
      270 +int segop_capable(struct seg *, segcapability_t);
      271 +int segop_inherit(struct seg *, caddr_t, size_t, uint_t);
 280  272  
 281  273  #endif  /* _KERNEL */
 282  274  
 283  275  #ifdef  __cplusplus
 284  276  }
 285  277  #endif
 286  278  
 287  279  #endif  /* _VM_SEG_H */
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX