Print this page
use NULL dump segop as a shorthand for no-op
Instead of forcing every segment driver to implement a dummy function that
does nothing, handle NULL dump segop function pointer as a no-op shorthand.
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.
use NULL setpagesize segop as a shorthand for ENOTSUP
Instead of forcing every segment driver to implement a dummp function to
return (hopefully) ENOTSUP, handle NULL setpagesize segop function pointer
as "return ENOTSUP" shorthand.
use NULL capable segop as a shorthand for no-capabilities
Instead of forcing every segment driver to implement a dummy "return 0"
function, handle NULL capable segop function pointer as "no copabilities
supported" shorthand.
segop_getpolicy already checks for a NULL op
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
use C99 initializers in segment ops structures
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory.  The code is there and in theory it runs when we get *extremely* low
on memory.  In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/i86xpv/vm/seg_mf.c
          +++ new/usr/src/uts/i86xpv/vm/seg_mf.c
↓ open down ↓ 91 lines elided ↑ open up ↑
  92   92  struct segmf_data {
  93   93          kmutex_t        lock;
  94   94          struct vnode    *vp;
  95   95          uchar_t         prot;
  96   96          uchar_t         maxprot;
  97   97          size_t          softlockcnt;
  98   98          domid_t         domid;
  99   99          segmf_map_t     *map;
 100  100  };
 101  101  
 102      -static struct seg_ops segmf_ops;
      102 +static const struct seg_ops segmf_ops;
 103  103  
 104  104  static int segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t len);
 105  105  
 106  106  static struct segmf_data *
 107  107  segmf_data_zalloc(struct seg *seg)
 108  108  {
 109  109          struct segmf_data *data = kmem_zalloc(sizeof (*data), KM_SLEEP);
 110  110  
 111  111          mutex_init(&data->lock, "segmf.lock", MUTEX_DEFAULT, NULL);
 112  112          seg->s_ops = &segmf_ops;
↓ open down ↓ 350 lines elided ↑ open up ↑
 463  463  }
 464  464  
 465  465  /*ARGSUSED*/
 466  466  static int
 467  467  segmf_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
 468  468  {
 469  469          return (0);
 470  470  }
 471  471  
 472  472  /*ARGSUSED*/
 473      -static void
 474      -segmf_dump(struct seg *seg)
 475      -{}
 476      -
 477      -/*ARGSUSED*/
 478  473  static int
 479  474  segmf_pagelock(struct seg *seg, caddr_t addr, size_t len,
 480  475      struct page ***ppp, enum lock_type type, enum seg_rw rw)
 481  476  {
 482  477          return (ENOTSUP);
 483  478  }
 484  479  
 485      -/*ARGSUSED*/
 486      -static int
 487      -segmf_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
 488      -{
 489      -        return (ENOTSUP);
 490      -}
 491      -
 492  480  static int
 493  481  segmf_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
 494  482  {
 495  483          struct segmf_data *data = seg->s_data;
 496  484  
 497  485          memid->val[0] = (uintptr_t)VTOCVP(data->vp);
 498  486          memid->val[1] = (uintptr_t)seg_page(seg, addr);
 499  487          return (0);
 500  488  }
 501  489  
 502      -/*ARGSUSED*/
 503      -static lgrp_mem_policy_info_t *
 504      -segmf_getpolicy(struct seg *seg, caddr_t addr)
 505      -{
 506      -        return (NULL);
 507      -}
 508      -
 509      -/*ARGSUSED*/
 510      -static int
 511      -segmf_capable(struct seg *seg, segcapability_t capability)
 512      -{
 513      -        return (0);
 514      -}
 515      -
 516  490  /*
 517  491   * Add a set of contiguous foreign MFNs to the segment. soft-locking them.  The
 518  492   * pre-faulting is necessary due to live migration; in particular we must
 519  493   * return an error in response to IOCTL_PRIVCMD_MMAPBATCH rather than faulting
 520  494   * later on a bad MFN.  Whilst this isn't necessary for the other MMAP
 521  495   * ioctl()s, we lock them too, as they should be transitory.
 522  496   */
 523  497  int
 524  498  segmf_add_mfns(struct seg *seg, caddr_t addr, mfn_t mfn,
 525  499      pgcnt_t pgcnt, domid_t domid)
↓ open down ↓ 224 lines elided ↑ open up ↑
 750  724          /* save handle for segmf_release_grefs() and mark it as mapped */
 751  725          for (i = 0; i < cnt; i++) {
 752  726                  ASSERT(mapop[i].status == GNTST_okay);
 753  727                  map[i].u.g.g_handle = mapop[i].handle;
 754  728                  map[i].u.g.g_flags |= SEGMF_GFLAGS_MAPPED;
 755  729          }
 756  730  
 757  731          return (0);
 758  732  }
 759  733  
 760      -static struct seg_ops segmf_ops = {
 761      -        segmf_dup,
 762      -        segmf_unmap,
 763      -        segmf_free,
 764      -        segmf_fault,
 765      -        segmf_faulta,
 766      -        segmf_setprot,
 767      -        segmf_checkprot,
 768      -        (int (*)())segmf_kluster,
 769      -        (size_t (*)(struct seg *))NULL, /* swapout */
 770      -        segmf_sync,
 771      -        segmf_incore,
 772      -        segmf_lockop,
 773      -        segmf_getprot,
 774      -        segmf_getoffset,
 775      -        segmf_gettype,
 776      -        segmf_getvp,
 777      -        segmf_advise,
 778      -        segmf_dump,
 779      -        segmf_pagelock,
 780      -        segmf_setpagesize,
 781      -        segmf_getmemid,
 782      -        segmf_getpolicy,
 783      -        segmf_capable,
 784      -        seg_inherit_notsup
      734 +static const struct seg_ops segmf_ops = {
      735 +        .dup            = segmf_dup,
      736 +        .unmap          = segmf_unmap,
      737 +        .free           = segmf_free,
      738 +        .fault          = segmf_fault,
      739 +        .faulta         = segmf_faulta,
      740 +        .setprot        = segmf_setprot,
      741 +        .checkprot      = segmf_checkprot,
      742 +        .kluster        = segmf_kluster,
      743 +        .sync           = segmf_sync,
      744 +        .incore         = segmf_incore,
      745 +        .lockop         = segmf_lockop,
      746 +        .getprot        = segmf_getprot,
      747 +        .getoffset      = segmf_getoffset,
      748 +        .gettype        = segmf_gettype,
      749 +        .getvp          = segmf_getvp,
      750 +        .advise         = segmf_advise,
      751 +        .pagelock       = segmf_pagelock,
      752 +        .getmemid       = segmf_getmemid,
 785  753  };
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX