Print this page
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.
use NULL getmemid segop as a shorthand for ENODEV
Instead of forcing every segment driver to implement a dummy function to
return (hopefully) ENODEV, handle NULL getmemid segop function pointer as
"return ENODEV" shorthand.
use NULL capable segop as a shorthand for no-capabilities
Instead of forcing every segment driver to implement a dummy "return 0"
function, handle NULL capable segop function pointer as "no copabilities
supported" shorthand.
segop_getpolicy already checks for a NULL op
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
no need for bad-op segment op functions
The segment drivers have a number of bad-op functions that simply panic.
Keeping the function pointer NULL will accomplish the same thing in most
cases.  In other cases, keeping the function pointer NULL will result in
proper error code being returned.
use C99 initializers in segment ops structures
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory.  The code is there and in theory it runs when we get *extremely* low
on memory.  In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/vm/seg_kp.c
          +++ new/usr/src/uts/common/vm/seg_kp.c
↓ open down ↓ 67 lines elided ↑ open up ↑
  68   68  #include <vm/seg_kp.h>
  69   69  #include <vm/seg_kmem.h>
  70   70  #include <vm/anon.h>
  71   71  #include <vm/page.h>
  72   72  #include <vm/hat.h>
  73   73  #include <sys/bitmap.h>
  74   74  
  75   75  /*
  76   76   * Private seg op routines
  77   77   */
  78      -static void     segkp_badop(void);
  79   78  static void     segkp_dump(struct seg *seg);
  80   79  static int      segkp_checkprot(struct seg *seg, caddr_t addr, size_t len,
  81   80                          uint_t prot);
  82   81  static int      segkp_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
  83   82  static int      segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
  84   83                          struct page ***page, enum lock_type type,
  85   84                          enum seg_rw rw);
  86   85  static void     segkp_insert(struct seg *seg, struct segkp_data *kpd);
  87   86  static void     segkp_delete(struct seg *seg, struct segkp_data *kpd);
  88   87  static caddr_t  segkp_get_internal(struct seg *seg, size_t len, uint_t flags,
  89   88                          struct segkp_data **tkpd, struct anon_map *amp);
  90   89  static void     segkp_release_internal(struct seg *seg,
  91   90                          struct segkp_data *kpd, size_t len);
  92   91  static int      segkp_unlock(struct hat *hat, struct seg *seg, caddr_t vaddr,
  93   92                          size_t len, struct segkp_data *kpd, uint_t flags);
  94   93  static int      segkp_load(struct hat *hat, struct seg *seg, caddr_t vaddr,
  95   94                          size_t len, struct segkp_data *kpd, uint_t flags);
  96   95  static struct   segkp_data *segkp_find(struct seg *seg, caddr_t vaddr);
  97      -static int      segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
  98      -static lgrp_mem_policy_info_t   *segkp_getpolicy(struct seg *seg,
  99      -    caddr_t addr);
 100      -static int      segkp_capable(struct seg *seg, segcapability_t capability);
 101   96  
 102   97  /*
 103   98   * Lock used to protect the hash table(s) and caches.
 104   99   */
 105  100  static kmutex_t segkp_lock;
 106  101  
 107  102  /*
 108  103   * The segkp caches
 109  104   */
 110  105  static struct segkp_cache segkp_cache[SEGKP_MAX_CACHE];
 111  106  
 112      -#define SEGKP_BADOP(t)  (t(*)())segkp_badop
 113      -
 114  107  /*
 115  108   * When there are fewer than red_minavail bytes left on the stack,
 116  109   * segkp_map_red() will map in the redzone (if called).  5000 seems
 117  110   * to work reasonably well...
 118  111   */
 119  112  long            red_minavail = 5000;
 120  113  
 121  114  /*
 122  115   * will be set to 1 for 32 bit x86 systems only, in startup.c
 123  116   */
↓ open down ↓ 14 lines elided ↑ open up ↑
 138  131  hrtime_t        red_deep_hires;
 139  132  kthread_t       *red_deep_thread;
 140  133  
 141  134  uint32_t        red_nmapped;
 142  135  uint32_t        red_closest = UINT_MAX;
 143  136  uint32_t        red_ndoubles;
 144  137  
 145  138  pgcnt_t anon_segkp_pages_locked;        /* See vm/anon.h */
 146  139  pgcnt_t anon_segkp_pages_resv;          /* anon reserved by seg_kp */
 147  140  
 148      -static struct   seg_ops segkp_ops = {
 149      -        SEGKP_BADOP(int),               /* dup */
 150      -        SEGKP_BADOP(int),               /* unmap */
 151      -        SEGKP_BADOP(void),              /* free */
 152      -        segkp_fault,
 153      -        SEGKP_BADOP(faultcode_t),       /* faulta */
 154      -        SEGKP_BADOP(int),               /* setprot */
 155      -        segkp_checkprot,
 156      -        segkp_kluster,
 157      -        SEGKP_BADOP(size_t),            /* swapout */
 158      -        SEGKP_BADOP(int),               /* sync */
 159      -        SEGKP_BADOP(size_t),            /* incore */
 160      -        SEGKP_BADOP(int),               /* lockop */
 161      -        SEGKP_BADOP(int),               /* getprot */
 162      -        SEGKP_BADOP(u_offset_t),                /* getoffset */
 163      -        SEGKP_BADOP(int),               /* gettype */
 164      -        SEGKP_BADOP(int),               /* getvp */
 165      -        SEGKP_BADOP(int),               /* advise */
 166      -        segkp_dump,                     /* dump */
 167      -        segkp_pagelock,                 /* pagelock */
 168      -        SEGKP_BADOP(int),               /* setpgsz */
 169      -        segkp_getmemid,                 /* getmemid */
 170      -        segkp_getpolicy,                /* getpolicy */
 171      -        segkp_capable,                  /* capable */
 172      -        seg_inherit_notsup              /* inherit */
      141 +static const struct seg_ops segkp_ops = {
      142 +        .fault          = segkp_fault,
      143 +        .checkprot      = segkp_checkprot,
      144 +        .kluster        = segkp_kluster,
      145 +        .dump           = segkp_dump,
      146 +        .pagelock       = segkp_pagelock,
 173  147  };
 174  148  
 175  149  
 176      -static void
 177      -segkp_badop(void)
 178      -{
 179      -        panic("segkp_badop");
 180      -        /*NOTREACHED*/
 181      -}
 182      -
 183  150  static void segkpinit_mem_config(struct seg *);
 184  151  
 185  152  static uint32_t segkp_indel;
 186  153  
 187  154  /*
 188  155   * Allocate the segment specific private data struct and fill it in
 189  156   * with the per kp segment mutex, anon ptr. array and hash table.
 190  157   */
 191  158  int
 192  159  segkp_create(struct seg *seg)
↓ open down ↓ 560 lines elided ↑ open up ↑
 753  720  
 754  721          vmem_free(SEGKP_VMEM(seg), kpd->kp_base, kpd->kp_len);
 755  722          kmem_free(kpd, sizeof (struct segkp_data));
 756  723  }
 757  724  
 758  725  /*
 759  726   * segkp_map_red() will check the current frame pointer against the
 760  727   * stack base.  If the amount of stack remaining is questionable
 761  728   * (less than red_minavail), then segkp_map_red() will map in the redzone
 762  729   * and return 1.  Otherwise, it will return 0.  segkp_map_red() can
 763      - * _only_ be called when:
 764      - *
 765      - *   - it is safe to sleep on page_create_va().
 766      - *   - the caller is non-swappable.
      730 + * _only_ be called when it is safe to sleep on page_create_va().
 767  731   *
 768  732   * It is up to the caller to remember whether segkp_map_red() successfully
 769  733   * mapped the redzone, and, if so, to call segkp_unmap_red() at a later
 770      - * time.  Note that the caller must _remain_ non-swappable until after
 771      - * calling segkp_unmap_red().
      734 + * time.
 772  735   *
 773  736   * Currently, this routine is only called from pagefault() (which necessarily
 774  737   * satisfies the above conditions).
 775  738   */
 776  739  #if defined(STACK_GROWTH_DOWN)
 777  740  int
 778  741  segkp_map_red(void)
 779  742  {
 780  743          uintptr_t fp = STACK_BIAS + (uintptr_t)getfp();
 781  744  #ifndef _LP64
 782  745          caddr_t stkbase;
 783  746  #endif
 784  747  
 785      -        ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
 786      -
 787  748          /*
 788  749           * Optimize for the common case where we simply return.
 789  750           */
 790  751          if ((curthread->t_red_pp == NULL) &&
 791  752              (fp - (uintptr_t)curthread->t_stkbase >= red_minavail))
 792  753                  return (0);
 793  754  
 794  755  #if defined(_LP64)
 795  756          /*
 796  757           * XXX  We probably need something better than this.
↓ open down ↓ 80 lines elided ↑ open up ↑
 877  838  }
 878  839  
 879  840  void
 880  841  segkp_unmap_red(void)
 881  842  {
 882  843          page_t *pp;
 883  844          caddr_t red_va = (caddr_t)(((uintptr_t)curthread->t_stkbase &
 884  845              (uintptr_t)PAGEMASK) - PAGESIZE);
 885  846  
 886  847          ASSERT(curthread->t_red_pp != NULL);
 887      -        ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
 888  848  
 889  849          /*
 890  850           * Because we locked the mapping down, we can't simply rely
 891  851           * on page_destroy() to clean everything up;  we need to call
 892  852           * hat_unload() to explicitly unlock the mapping resources.
 893  853           */
 894  854          hat_unload(kas.a_hat, red_va, PAGESIZE, HAT_UNLOAD_UNLOCK);
 895  855  
 896  856          pp = curthread->t_red_pp;
 897  857  
↓ open down ↓ 492 lines elided ↑ open up ↑
1390 1350                  }
1391 1351          }
1392 1352  }
1393 1353  
1394 1354  /*ARGSUSED*/
1395 1355  static int
1396 1356  segkp_pagelock(struct seg *seg, caddr_t addr, size_t len,
1397 1357      struct page ***ppp, enum lock_type type, enum seg_rw rw)
1398 1358  {
1399 1359          return (ENOTSUP);
1400      -}
1401      -
1402      -/*ARGSUSED*/
1403      -static int
1404      -segkp_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
1405      -{
1406      -        return (ENODEV);
1407      -}
1408      -
1409      -/*ARGSUSED*/
1410      -static lgrp_mem_policy_info_t   *
1411      -segkp_getpolicy(struct seg *seg, caddr_t addr)
1412      -{
1413      -        return (NULL);
1414      -}
1415      -
1416      -/*ARGSUSED*/
1417      -static int
1418      -segkp_capable(struct seg *seg, segcapability_t capability)
1419      -{
1420      -        return (0);
1421 1360  }
1422 1361  
1423 1362  #include <sys/mem_config.h>
1424 1363  
1425 1364  /*ARGSUSED*/
1426 1365  static void
1427 1366  segkp_mem_config_post_add(void *arg, pgcnt_t delta_pages)
1428 1367  {}
1429 1368  
1430 1369  /*
↓ open down ↓ 36 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX