Print this page
use NULL dump segop as a shorthand for no-op
Instead of forcing every segment driver to implement a dummy function that
does nothing, handle NULL dump segop function pointer as a no-op shorthand.
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.
use NULL setpagesize segop as a shorthand for ENOTSUP
Instead of forcing every segment driver to implement a dummp function to
return (hopefully) ENOTSUP, handle NULL setpagesize segop function pointer
as "return ENOTSUP" shorthand.
use NULL capable segop as a shorthand for no-capabilities
Instead of forcing every segment driver to implement a dummy "return 0"
function, handle NULL capable segop function pointer as "no copabilities
supported" shorthand.
segop_getpolicy already checks for a NULL op
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
patch lower-case-segops
use C99 initializers in segment ops structures
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory.  The code is there and in theory it runs when we get *extremely* low
on memory.  In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/vm/seg_dev.c
          +++ new/usr/src/uts/common/vm/seg_dev.c
↓ open down ↓ 170 lines elided ↑ open up ↑
 171  171  static void     segdev_badop(void);
 172  172  static int      segdev_sync(struct seg *, caddr_t, size_t, int, uint_t);
 173  173  static size_t   segdev_incore(struct seg *, caddr_t, size_t, char *);
 174  174  static int      segdev_lockop(struct seg *, caddr_t, size_t, int, int,
 175  175                      ulong_t *, size_t);
 176  176  static int      segdev_getprot(struct seg *, caddr_t, size_t, uint_t *);
 177  177  static u_offset_t       segdev_getoffset(struct seg *, caddr_t);
 178  178  static int      segdev_gettype(struct seg *, caddr_t);
 179  179  static int      segdev_getvp(struct seg *, caddr_t, struct vnode **);
 180  180  static int      segdev_advise(struct seg *, caddr_t, size_t, uint_t);
 181      -static void     segdev_dump(struct seg *);
 182  181  static int      segdev_pagelock(struct seg *, caddr_t, size_t,
 183  182                      struct page ***, enum lock_type, enum seg_rw);
 184      -static int      segdev_setpagesize(struct seg *, caddr_t, size_t, uint_t);
 185  183  static int      segdev_getmemid(struct seg *, caddr_t, memid_t *);
 186      -static lgrp_mem_policy_info_t   *segdev_getpolicy(struct seg *, caddr_t);
 187      -static int      segdev_capable(struct seg *, segcapability_t);
 188  184  
 189  185  /*
 190  186   * XXX  this struct is used by rootnex_map_fault to identify
 191  187   *      the segment it has been passed. So if you make it
 192  188   *      "static" you'll need to fix rootnex_map_fault.
 193  189   */
 194      -struct seg_ops segdev_ops = {
 195      -        segdev_dup,
 196      -        segdev_unmap,
 197      -        segdev_free,
 198      -        segdev_fault,
 199      -        segdev_faulta,
 200      -        segdev_setprot,
 201      -        segdev_checkprot,
 202      -        (int (*)())segdev_badop,        /* kluster */
 203      -        (size_t (*)(struct seg *))NULL, /* swapout */
 204      -        segdev_sync,                    /* sync */
 205      -        segdev_incore,
 206      -        segdev_lockop,                  /* lockop */
 207      -        segdev_getprot,
 208      -        segdev_getoffset,
 209      -        segdev_gettype,
 210      -        segdev_getvp,
 211      -        segdev_advise,
 212      -        segdev_dump,
 213      -        segdev_pagelock,
 214      -        segdev_setpagesize,
 215      -        segdev_getmemid,
 216      -        segdev_getpolicy,
 217      -        segdev_capable,
 218      -        seg_inherit_notsup
      190 +const struct seg_ops segdev_ops = {
      191 +        .dup            = segdev_dup,
      192 +        .unmap          = segdev_unmap,
      193 +        .free           = segdev_free,
      194 +        .fault          = segdev_fault,
      195 +        .faulta         = segdev_faulta,
      196 +        .setprot        = segdev_setprot,
      197 +        .checkprot      = segdev_checkprot,
      198 +        .kluster        = (int (*)())segdev_badop,
      199 +        .sync           = segdev_sync,
      200 +        .incore         = segdev_incore,
      201 +        .lockop         = segdev_lockop,
      202 +        .getprot        = segdev_getprot,
      203 +        .getoffset      = segdev_getoffset,
      204 +        .gettype        = segdev_gettype,
      205 +        .getvp          = segdev_getvp,
      206 +        .advise         = segdev_advise,
      207 +        .pagelock       = segdev_pagelock,
      208 +        .getmemid       = segdev_getmemid,
 219  209  };
 220  210  
 221  211  /*
 222  212   * Private segdev support routines
 223  213   */
 224  214  static struct segdev_data *sdp_alloc(void);
 225  215  
 226  216  static void segdev_softunlock(struct hat *, struct seg *, caddr_t,
 227  217      size_t, enum seg_rw);
 228  218  
↓ open down ↓ 196 lines elided ↑ open up ↑
 425  415  
 426  416          if (error != 0) {
 427  417                  sdp->devmap_data = NULL;
 428  418                  hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
 429  419                      HAT_UNLOAD_UNMAP);
 430  420          } else {
 431  421                  /*
 432  422                   * Mappings of /dev/null don't count towards the VSZ of a
 433  423                   * process.  Mappings of /dev/null have no mapping type.
 434  424                   */
 435      -                if ((SEGOP_GETTYPE(seg, (seg)->s_base) & (MAP_SHARED |
      425 +                if ((segop_gettype(seg, seg->s_base) & (MAP_SHARED |
 436  426                      MAP_PRIVATE)) == 0) {
 437  427                          seg->s_as->a_resvsize -= seg->s_size;
 438  428                  }
 439  429          }
 440  430  
 441  431          return (error);
 442  432  }
 443  433  
 444  434  static struct segdev_data *
 445  435  sdp_alloc(void)
↓ open down ↓ 1928 lines elided ↑ open up ↑
2374 2364  segdev_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2375 2365  {
2376 2366          TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_ADVISE, "segdev_advise:start");
2377 2367  
2378 2368          ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2379 2369  
2380 2370          return (0);
2381 2371  }
2382 2372  
2383 2373  /*
2384      - * segdev pages are not dumped, so we just return
2385      - */
2386      -/*ARGSUSED*/
2387      -static void
2388      -segdev_dump(struct seg *seg)
2389      -{}
2390      -
2391      -/*
2392 2374   * ddi_segmap_setup:    Used by drivers who wish specify mapping attributes
2393 2375   *                      for a segment.  Called from a drivers segmap(9E)
2394 2376   *                      routine.
2395 2377   */
2396 2378  /*ARGSUSED*/
2397 2379  int
2398 2380  ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp,
2399 2381      off_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cred,
2400 2382      ddi_device_acc_attr_t *accattrp, uint_t rnumber)
2401 2383  {
↓ open down ↓ 71 lines elided ↑ open up ↑
2473 2455  /*ARGSUSED*/
2474 2456  static int
2475 2457  segdev_pagelock(struct seg *seg, caddr_t addr, size_t len,
2476 2458      struct page ***ppp, enum lock_type type, enum seg_rw rw)
2477 2459  {
2478 2460          TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_PAGELOCK,
2479 2461              "segdev_pagelock:start");
2480 2462          return (ENOTSUP);
2481 2463  }
2482 2464  
2483      -/*ARGSUSED*/
2484      -static int
2485      -segdev_setpagesize(struct seg *seg, caddr_t addr, size_t len,
2486      -    uint_t szc)
2487      -{
2488      -        return (ENOTSUP);
2489      -}
2490      -
2491 2465  /*
2492 2466   * devmap_device: Used by devmap framework to establish mapping
2493 2467   *                called by devmap_seup(9F) during map setup time.
2494 2468   */
2495 2469  /*ARGSUSED*/
2496 2470  static int
2497 2471  devmap_device(devmap_handle_t *dhp, struct as *as, caddr_t *addr,
2498 2472      offset_t off, size_t len, uint_t flags)
2499 2473  {
2500 2474          devmap_handle_t *rdhp, *maxdhp;
↓ open down ↓ 1521 lines elided ↑ open up ↑
4022 3996  {
4023 3997          struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4024 3998  
4025 3999          /*
4026 4000           * It looks as if it is always mapped shared
4027 4001           */
4028 4002          TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GETMEMID,
4029 4003              "segdev_getmemid:start");
4030 4004          memidp->val[0] = (uintptr_t)VTOCVP(sdp->vp);
4031 4005          memidp->val[1] = sdp->offset + (uintptr_t)(addr - seg->s_base);
4032      -        return (0);
4033      -}
4034      -
4035      -/*ARGSUSED*/
4036      -static lgrp_mem_policy_info_t *
4037      -segdev_getpolicy(struct seg *seg, caddr_t addr)
4038      -{
4039      -        return (NULL);
4040      -}
4041      -
4042      -/*ARGSUSED*/
4043      -static int
4044      -segdev_capable(struct seg *seg, segcapability_t capability)
4045      -{
4046 4006          return (0);
4047 4007  }
4048 4008  
4049 4009  /*
4050 4010   * ddi_umem_alloc() non-pageable quantum cache max size.
4051 4011   * This is just a SWAG.
4052 4012   */
4053 4013  #define DEVMAP_UMEM_QUANTUM     (8*PAGESIZE)
4054 4014  
4055 4015  /*
↓ open down ↓ 47 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX