Print this page
no need for bad-op segment op functions
The segment drivers have a number of bad-op functions that simply panic.
Keeping the function pointer NULL will accomplish the same thing in most
cases.  In other cases, keeping the function pointer NULL will result in
proper error code being returned.


  86  *   where large pages cannot be used for other reasons (e.g. there are
  87  *   only few full associative TLB entries available for large pages).
  88  *
  89  * segmap_kpm -- separate on/off switch for segmap using segkpm:
  90  * . Set by default.
  91  * . Will be disabled when kpm_enable is zero.
  92  * . Will be disabled when MAXBSIZE != PAGESIZE.
  93  * . Can be disabled via /etc/system.
  94  *
  95  */
  96 int kpm_enable = 1;
  97 int kpm_smallpages = 0;
  98 int segmap_kpm = 1;
  99 
 100 /*
 101  * Private seg op routines.
 102  */
 103 faultcode_t segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr,
 104                         size_t len, enum fault_type type, enum seg_rw rw);
 105 static void     segkpm_dump(struct seg *);
 106 static void     segkpm_badop(void);
 107 static int      segkpm_notsup(void);

 108 static int      segkpm_capable(struct seg *, segcapability_t);
 109 
 110 #define SEGKPM_BADOP(t) (t(*)())segkpm_badop
 111 #define SEGKPM_NOTSUP   (int(*)())segkpm_notsup
 112 
 113 static struct seg_ops segkpm_ops = {
 114         .dup            = SEGKPM_BADOP(int),
 115         .unmap          = SEGKPM_BADOP(int),
 116         .free           = SEGKPM_BADOP(void),
 117         .fault          = segkpm_fault,
 118         .faulta         = SEGKPM_BADOP(int),
 119         .setprot        = SEGKPM_BADOP(int),
 120         .checkprot      = SEGKPM_BADOP(int),
 121         .kluster        = SEGKPM_BADOP(int),
 122         .sync           = SEGKPM_BADOP(int),
 123         .incore         = SEGKPM_BADOP(size_t),
 124         .lockop         = SEGKPM_BADOP(int),
 125         .getprot        = SEGKPM_BADOP(int),
 126         .getoffset      = SEGKPM_BADOP(u_offset_t),
 127         .gettype        = SEGKPM_BADOP(int),
 128         .getvp          = SEGKPM_BADOP(int),
 129         .advise         = SEGKPM_BADOP(int),
 130         .dump           = segkpm_dump,
 131         .pagelock       = SEGKPM_NOTSUP,
 132         .setpagesize    = SEGKPM_BADOP(int),
 133         .getmemid       = SEGKPM_BADOP(int),
 134         .getpolicy      = SEGKPM_BADOP(lgrp_mem_policy_info_t *),
 135         .capable        = segkpm_capable,
 136         .inherit        = seg_inherit_notsup,






















 137 };
 138 
 139 /*
 140  * kpm_pgsz and kpm_pgshft are set by platform layer.
 141  */
 142 size_t          kpm_pgsz;       /* kpm page size */
 143 uint_t          kpm_pgshft;     /* kpm page shift */
 144 u_offset_t      kpm_pgoff;      /* kpm page offset mask */
 145 uint_t          kpmp2pshft;     /* kpm page to page shift */
 146 pgcnt_t         kpmpnpgs;       /* how many pages per kpm page */
 147 
 148 
 149 #ifdef  SEGKPM_SUPPORT
 150 
 151 int
 152 segkpm_create(struct seg *seg, void *argsp)
 153 {
 154         struct segkpm_data *skd;
 155         struct segkpm_crargs *b = (struct segkpm_crargs *)argsp;
 156         ushort_t *p;


 266         if ((pp = kpme->kpe_page) == NULL) {
 267                 return;
 268         }
 269 
 270         if (page_lock(pp, SE_SHARED, (kmutex_t *)NULL, P_RECLAIM) == 0)
 271                 goto retry;
 272 
 273         /*
 274          * Check if segkpm mapping is not unloaded in the meantime
 275          */
 276         if (kpme->kpe_page == NULL) {
 277                 page_unlock(pp);
 278                 return;
 279         }
 280 
 281         vaddr = hat_kpm_page2va(pp, 1);
 282         hat_kpm_mapout(pp, kpme, vaddr);
 283         page_unlock(pp);
 284 }
 285 
 286 static void
 287 segkpm_badop()
 288 {
 289         panic("segkpm_badop");
 290 }
 291 
 292 #else   /* SEGKPM_SUPPORT */
 293 
 294 /* segkpm stubs */
 295 
 296 /*ARGSUSED*/
 297 int segkpm_create(struct seg *seg, void *argsp) { return (0); }



 298 
 299 /* ARGSUSED */
 300 faultcode_t
 301 segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
 302         enum fault_type type, enum seg_rw rw)
 303 {
 304         return ((faultcode_t)0);
 305 }
 306 
 307 /* ARGSUSED */
 308 caddr_t segkpm_create_va(u_offset_t off) { return (NULL); }



 309 
 310 /* ARGSUSED */
 311 void segkpm_mapout_validkpme(struct kpme *kpme) {}
 312 
 313 static void
 314 segkpm_badop() {}
 315 
 316 #endif  /* SEGKPM_SUPPORT */
 317 

 318 static int
 319 segkpm_notsup()

 320 {
 321         return (ENOTSUP);
 322 }
 323 
 324 /*
 325  * segkpm pages are not dumped, so we just return
 326  */
 327 /*ARGSUSED*/
 328 static void
 329 segkpm_dump(struct seg *seg)
 330 {}

 331 
 332 /*
 333  * We claim to have no special capabilities.
 334  */
 335 /*ARGSUSED*/
 336 static int
 337 segkpm_capable(struct seg *seg, segcapability_t capability)
 338 {
 339         return (0);
 340 }


  86  *   where large pages cannot be used for other reasons (e.g. there are
  87  *   only few full associative TLB entries available for large pages).
  88  *
  89  * segmap_kpm -- separate on/off switch for segmap using segkpm:
  90  * . Set by default.
  91  * . Will be disabled when kpm_enable is zero.
  92  * . Will be disabled when MAXBSIZE != PAGESIZE.
  93  * . Can be disabled via /etc/system.
  94  *
  95  */
  96 int kpm_enable = 1;
  97 int kpm_smallpages = 0;
  98 int segmap_kpm = 1;
  99 
 100 /*
 101  * Private seg op routines.
 102  */
 103 faultcode_t segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr,
 104                         size_t len, enum fault_type type, enum seg_rw rw);
 105 static void     segkpm_dump(struct seg *);
 106 static int      segkpm_pagelock(struct seg *seg, caddr_t addr, size_t len,
 107                         struct page ***page, enum lock_type type,
 108                         enum seg_rw rw);
 109 static int      segkpm_capable(struct seg *, segcapability_t);
 110 



 111 static struct seg_ops segkpm_ops = {



 112         .fault          = segkpm_fault,












 113         .dump           = segkpm_dump,
 114         .pagelock       = segkpm_pagelock,



 115         .capable        = segkpm_capable,
 116         .inherit        = seg_inherit_notsup,
 117 //#ifndef SEGKPM_SUPPORT
 118 #if 0
 119 #error FIXME: define nop
 120         .dup            = nop,
 121         .unmap          = nop,
 122         .free           = nop,
 123         .faulta         = nop,
 124         .setprot        = nop,
 125         .checkprot      = nop,
 126         .kluster        = nop,
 127         .sync           = nop,
 128         .incore         = nop,
 129         .lockop         = nop,
 130         .getprot        = nop,
 131         .getoffset      = nop,
 132         .gettype        = nop,
 133         .getvp          = nop,
 134         .advise         = nop,
 135         .setpagesize    = nop,
 136         .getmemid       = nop,
 137         .getpolicy      = nop,
 138 #endif
 139 };
 140 
 141 /*
 142  * kpm_pgsz and kpm_pgshft are set by platform layer.
 143  */
 144 size_t          kpm_pgsz;       /* kpm page size */
 145 uint_t          kpm_pgshft;     /* kpm page shift */
 146 u_offset_t      kpm_pgoff;      /* kpm page offset mask */
 147 uint_t          kpmp2pshft;     /* kpm page to page shift */
 148 pgcnt_t         kpmpnpgs;       /* how many pages per kpm page */
 149 
 150 
 151 #ifdef  SEGKPM_SUPPORT
 152 
 153 int
 154 segkpm_create(struct seg *seg, void *argsp)
 155 {
 156         struct segkpm_data *skd;
 157         struct segkpm_crargs *b = (struct segkpm_crargs *)argsp;
 158         ushort_t *p;


 268         if ((pp = kpme->kpe_page) == NULL) {
 269                 return;
 270         }
 271 
 272         if (page_lock(pp, SE_SHARED, (kmutex_t *)NULL, P_RECLAIM) == 0)
 273                 goto retry;
 274 
 275         /*
 276          * Check if segkpm mapping is not unloaded in the meantime
 277          */
 278         if (kpme->kpe_page == NULL) {
 279                 page_unlock(pp);
 280                 return;
 281         }
 282 
 283         vaddr = hat_kpm_page2va(pp, 1);
 284         hat_kpm_mapout(pp, kpme, vaddr);
 285         page_unlock(pp);
 286 }
 287 






 288 #else   /* SEGKPM_SUPPORT */
 289 
 290 /* segkpm stubs */
 291 
 292 /*ARGSUSED*/
 293 int segkpm_create(struct seg *seg, void *argsp)
 294 {
 295         return (0);
 296 }
 297 
 298 /* ARGSUSED */
 299 faultcode_t
 300 segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
 301         enum fault_type type, enum seg_rw rw)
 302 {
 303         return (0);
 304 }
 305 
 306 /* ARGSUSED */
 307 caddr_t segkpm_create_va(u_offset_t off)
 308 {
 309         return (NULL);
 310 }
 311 
 312 /* ARGSUSED */
 313 void segkpm_mapout_validkpme(struct kpme *kpme)
 314 {
 315 }

 316 
 317 #endif  /* SEGKPM_SUPPORT */
 318 
 319 /* ARGSUSED */
 320 static int
 321 segkpm_pagelock(struct seg *seg, caddr_t addr, size_t len,
 322         struct page ***page, enum lock_type type, enum seg_rw rw)
 323 {
 324         return (ENOTSUP);
 325 }
 326 
 327 /*
 328  * segkpm pages are not dumped, so we just return
 329  */
 330 /*ARGSUSED*/
 331 static void
 332 segkpm_dump(struct seg *seg)
 333 {
 334 }
 335 
 336 /*
 337  * We claim to have no special capabilities.
 338  */
 339 /*ARGSUSED*/
 340 static int
 341 segkpm_capable(struct seg *seg, segcapability_t capability)
 342 {
 343         return (0);
 344 }