Print this page
use NULL dump segop as a shorthand for no-op
Instead of forcing every segment driver to implement a dummy function that
does nothing, handle NULL dump segop function pointer as a no-op shorthand.
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.
use NULL setpagesize segop as a shorthand for ENOTSUP
Instead of forcing every segment driver to implement a dummp function to
return (hopefully) ENOTSUP, handle NULL setpagesize segop function pointer
as "return ENOTSUP" shorthand.
use NULL getmemid segop as a shorthand for ENODEV
Instead of forcing every segment driver to implement a dummy function to
return (hopefully) ENODEV, handle NULL getmemid segop function pointer as
"return ENODEV" shorthand.
use NULL capable segop as a shorthand for no-capabilities
Instead of forcing every segment driver to implement a dummy "return 0"
function, handle NULL capable segop function pointer as "no copabilities
supported" shorthand.
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
no need for bad-op segment op functions
The segment drivers have a number of bad-op functions that simply panic.
Keeping the function pointer NULL will accomplish the same thing in most
cases.  In other cases, keeping the function pointer NULL will result in
proper error code being returned.
use C99 initializers in segment ops structures
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory.  The code is there and in theory it runs when we get *extremely* low
on memory.  In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout


  85  * . Set by default for platforms that don't support kpm large pages or
  86  *   where large pages cannot be used for other reasons (e.g. there are
  87  *   only few full associative TLB entries available for large pages).
  88  *
  89  * segmap_kpm -- separate on/off switch for segmap using segkpm:
  90  * . Set by default.
  91  * . Will be disabled when kpm_enable is zero.
  92  * . Will be disabled when MAXBSIZE != PAGESIZE.
  93  * . Can be disabled via /etc/system.
  94  *
  95  */
  96 int kpm_enable = 1;
  97 int kpm_smallpages = 0;
  98 int segmap_kpm = 1;
  99 
 100 /*
 101  * Private seg op routines.
 102  */
 103 faultcode_t segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr,
 104                         size_t len, enum fault_type type, enum seg_rw rw);
 105 static void     segkpm_dump(struct seg *);
 106 static void     segkpm_badop(void);
 107 static int      segkpm_notsup(void);
 108 static int      segkpm_capable(struct seg *, segcapability_t);
 109 
 110 #define SEGKPM_BADOP(t) (t(*)())segkpm_badop
 111 #define SEGKPM_NOTSUP   (int(*)())segkpm_notsup
 112 
 113 static struct seg_ops segkpm_ops = {
 114         SEGKPM_BADOP(int),      /* dup */
 115         SEGKPM_BADOP(int),      /* unmap */
 116         SEGKPM_BADOP(void),     /* free */
 117         segkpm_fault,
 118         SEGKPM_BADOP(int),      /* faulta */
 119         SEGKPM_BADOP(int),      /* setprot */
 120         SEGKPM_BADOP(int),      /* checkprot */
 121         SEGKPM_BADOP(int),      /* kluster */
 122         SEGKPM_BADOP(size_t),   /* swapout */
 123         SEGKPM_BADOP(int),      /* sync */
 124         SEGKPM_BADOP(size_t),   /* incore */
 125         SEGKPM_BADOP(int),      /* lockop */
 126         SEGKPM_BADOP(int),      /* getprot */
 127         SEGKPM_BADOP(u_offset_t), /* getoffset */
 128         SEGKPM_BADOP(int),      /* gettype */
 129         SEGKPM_BADOP(int),      /* getvp */
 130         SEGKPM_BADOP(int),      /* advise */
 131         segkpm_dump,            /* dump */
 132         SEGKPM_NOTSUP,          /* pagelock */
 133         SEGKPM_BADOP(int),      /* setpgsz */
 134         SEGKPM_BADOP(int),      /* getmemid */
 135         SEGKPM_BADOP(lgrp_mem_policy_info_t *), /* getpolicy */
 136         segkpm_capable,         /* capable */
 137         seg_inherit_notsup      /* inherit */
 138 };
 139 
 140 /*
 141  * kpm_pgsz and kpm_pgshft are set by platform layer.
 142  */
 143 size_t          kpm_pgsz;       /* kpm page size */
 144 uint_t          kpm_pgshft;     /* kpm page shift */
 145 u_offset_t      kpm_pgoff;      /* kpm page offset mask */
 146 uint_t          kpmp2pshft;     /* kpm page to page shift */
 147 pgcnt_t         kpmpnpgs;       /* how many pages per kpm page */
 148 
 149 
 150 #ifdef  SEGKPM_SUPPORT
 151 
 152 int
 153 segkpm_create(struct seg *seg, void *argsp)
 154 {
 155         struct segkpm_data *skd;
 156         struct segkpm_crargs *b = (struct segkpm_crargs *)argsp;
 157         ushort_t *p;


 267         if ((pp = kpme->kpe_page) == NULL) {
 268                 return;
 269         }
 270 
 271         if (page_lock(pp, SE_SHARED, (kmutex_t *)NULL, P_RECLAIM) == 0)
 272                 goto retry;
 273 
 274         /*
 275          * Check if segkpm mapping is not unloaded in the meantime
 276          */
 277         if (kpme->kpe_page == NULL) {
 278                 page_unlock(pp);
 279                 return;
 280         }
 281 
 282         vaddr = hat_kpm_page2va(pp, 1);
 283         hat_kpm_mapout(pp, kpme, vaddr);
 284         page_unlock(pp);
 285 }
 286 
 287 static void
 288 segkpm_badop()
 289 {
 290         panic("segkpm_badop");
 291 }
 292 
 293 #else   /* SEGKPM_SUPPORT */
 294 
 295 /* segkpm stubs */
 296 
 297 /*ARGSUSED*/
 298 int segkpm_create(struct seg *seg, void *argsp) { return (0); }



 299 
 300 /* ARGSUSED */
 301 faultcode_t
 302 segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
 303         enum fault_type type, enum seg_rw rw)
 304 {
 305         return ((faultcode_t)0);
 306 }
 307 
 308 /* ARGSUSED */
 309 caddr_t segkpm_create_va(u_offset_t off) { return (NULL); }



 310 
 311 /* ARGSUSED */
 312 void segkpm_mapout_validkpme(struct kpme *kpme) {}
 313 
 314 static void
 315 segkpm_badop() {}
 316 
 317 #endif  /* SEGKPM_SUPPORT */
 318 
 319 static int
 320 segkpm_notsup()
 321 {
 322         return (ENOTSUP);
 323 }
 324 
 325 /*
 326  * segkpm pages are not dumped, so we just return
 327  */
 328 /*ARGSUSED*/
 329 static void
 330 segkpm_dump(struct seg *seg)
 331 {}
 332 
 333 /*
 334  * We claim to have no special capabilities.
 335  */
 336 /*ARGSUSED*/
 337 static int
 338 segkpm_capable(struct seg *seg, segcapability_t capability)

 339 {
 340         return (0);
 341 }


  85  * . Set by default for platforms that don't support kpm large pages or
  86  *   where large pages cannot be used for other reasons (e.g. there are
  87  *   only few full associative TLB entries available for large pages).
  88  *
  89  * segmap_kpm -- separate on/off switch for segmap using segkpm:
  90  * . Set by default.
  91  * . Will be disabled when kpm_enable is zero.
  92  * . Will be disabled when MAXBSIZE != PAGESIZE.
  93  * . Can be disabled via /etc/system.
  94  *
  95  */
  96 int kpm_enable = 1;
  97 int kpm_smallpages = 0;
  98 int segmap_kpm = 1;
  99 
 100 /*
 101  * Private seg op routines.
 102  */
 103 faultcode_t segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr,
 104                         size_t len, enum fault_type type, enum seg_rw rw);
 105 static int      segkpm_pagelock(struct seg *seg, caddr_t addr, size_t len,
 106                         struct page ***page, enum lock_type type,
 107                         enum seg_rw rw);
 108 
 109 static const struct seg_ops segkpm_ops = {
 110         .fault          = segkpm_fault,
 111         .pagelock       = segkpm_pagelock,
 112 //#ifndef SEGKPM_SUPPORT
 113 #if 0
 114 #error FIXME: define nop
 115         .dup            = nop,
 116         .unmap          = nop,
 117         .free           = nop,
 118         .faulta         = nop,
 119         .setprot        = nop,
 120         .checkprot      = nop,
 121         .kluster        = nop,
 122         .sync           = nop,
 123         .incore         = nop,
 124         .lockop         = nop,
 125         .getprot        = nop,
 126         .getoffset      = nop,
 127         .gettype        = nop,
 128         .getvp          = nop,
 129         .advise         = nop,
 130         .getpolicy      = nop,
 131 #endif






 132 };
 133 
 134 /*
 135  * kpm_pgsz and kpm_pgshft are set by platform layer.
 136  */
 137 size_t          kpm_pgsz;       /* kpm page size */
 138 uint_t          kpm_pgshft;     /* kpm page shift */
 139 u_offset_t      kpm_pgoff;      /* kpm page offset mask */
 140 uint_t          kpmp2pshft;     /* kpm page to page shift */
 141 pgcnt_t         kpmpnpgs;       /* how many pages per kpm page */
 142 
 143 
 144 #ifdef  SEGKPM_SUPPORT
 145 
 146 int
 147 segkpm_create(struct seg *seg, void *argsp)
 148 {
 149         struct segkpm_data *skd;
 150         struct segkpm_crargs *b = (struct segkpm_crargs *)argsp;
 151         ushort_t *p;


 261         if ((pp = kpme->kpe_page) == NULL) {
 262                 return;
 263         }
 264 
 265         if (page_lock(pp, SE_SHARED, (kmutex_t *)NULL, P_RECLAIM) == 0)
 266                 goto retry;
 267 
 268         /*
 269          * Check if segkpm mapping is not unloaded in the meantime
 270          */
 271         if (kpme->kpe_page == NULL) {
 272                 page_unlock(pp);
 273                 return;
 274         }
 275 
 276         vaddr = hat_kpm_page2va(pp, 1);
 277         hat_kpm_mapout(pp, kpme, vaddr);
 278         page_unlock(pp);
 279 }
 280 






 281 #else   /* SEGKPM_SUPPORT */
 282 
 283 /* segkpm stubs */
 284 
 285 /*ARGSUSED*/
 286 int segkpm_create(struct seg *seg, void *argsp)
 287 {
 288         return (0);
 289 }
 290 
 291 /* ARGSUSED */
 292 faultcode_t
 293 segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
 294         enum fault_type type, enum seg_rw rw)
 295 {
 296         return (0);
 297 }
 298 
 299 /* ARGSUSED */
 300 caddr_t segkpm_create_va(u_offset_t off)
 301 {
 302         return (NULL);
 303 }
 304 
 305 /* ARGSUSED */
 306 void segkpm_mapout_validkpme(struct kpme *kpme)








 307 {

 308 }
 309 
 310 #endif  /* SEGKPM_SUPPORT */






 311 
 312 /* ARGSUSED */



 313 static int
 314 segkpm_pagelock(struct seg *seg, caddr_t addr, size_t len,
 315         struct page ***page, enum lock_type type, enum seg_rw rw)
 316 {
 317         return (ENOTSUP);
 318 }