Print this page
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.
segop_getpolicy already checks for a NULL op
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
no need for bad-op segment op functions
The segment drivers have a number of bad-op functions that simply panic.
Keeping the function pointer NULL will accomplish the same thing in most
cases.  In other cases, keeping the function pointer NULL will result in
proper error code being returned.
patch lower-case-segops
use C99 initializers in segment ops structures
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory.  The code is there and in theory it runs when we get *extremely* low
on memory.  In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/vm/seg_kmem.c
          +++ new/usr/src/uts/common/vm/seg_kmem.c
↓ open down ↓ 420 lines elided ↑ open up ↑
 421  421          if (bop_alloc_chunk(addr, size, align) != (caddr_t)addr)
 422  422                  panic("boot_alloc: bop_alloc_chunk failed");
 423  423  #else
 424  424          if (BOP_ALLOC(bootops, addr, size, align) != addr)
 425  425                  panic("boot_alloc: BOP_ALLOC failed");
 426  426  #endif
 427  427          boot_mapin((caddr_t)addr, size);
 428  428          return (addr);
 429  429  }
 430  430  
 431      -static void
 432      -segkmem_badop()
 433      -{
 434      -        panic("segkmem_badop");
 435      -}
 436      -
 437      -#define SEGKMEM_BADOP(t)        (t(*)())segkmem_badop
 438      -
 439  431  /*ARGSUSED*/
 440  432  static faultcode_t
 441  433  segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
 442  434          enum fault_type type, enum seg_rw rw)
 443  435  {
 444  436          pgcnt_t npages;
 445  437          spgcnt_t pg;
 446  438          page_t *pp;
 447  439          struct vnode *vp = seg->s_data;
 448  440  
↓ open down ↓ 1 lines elided ↑ open up ↑
 450  442  
 451  443          if (seg->s_as != &kas || size > seg->s_size ||
 452  444              addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
 453  445                  panic("segkmem_fault: bad args");
 454  446  
 455  447          /*
 456  448           * If it is one of segkp pages, call segkp_fault.
 457  449           */
 458  450          if (segkp_bitmap && seg == &kvseg &&
 459  451              BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
 460      -                return (SEGOP_FAULT(hat, segkp, addr, size, type, rw));
      452 +                return (segop_fault(hat, segkp, addr, size, type, rw));
 461  453  
 462  454          if (rw != S_READ && rw != S_WRITE && rw != S_OTHER)
 463  455                  return (FC_NOSUPPORT);
 464  456  
 465  457          npages = btopr(size);
 466  458  
 467  459          switch (type) {
 468  460          case F_SOFTLOCK:        /* lock down already-loaded translations */
 469  461                  for (pg = 0; pg < npages; pg++) {
 470  462                          pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
↓ open down ↓ 41 lines elided ↑ open up ↑
 512  504  
 513  505          if (seg->s_as != &kas || size > seg->s_size ||
 514  506              addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
 515  507                  panic("segkmem_setprot: bad args");
 516  508  
 517  509          /*
 518  510           * If it is one of segkp pages, call segkp.
 519  511           */
 520  512          if (segkp_bitmap && seg == &kvseg &&
 521  513              BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
 522      -                return (SEGOP_SETPROT(segkp, addr, size, prot));
      514 +                return (segop_setprot(segkp, addr, size, prot));
 523  515  
 524  516          if (prot == 0)
 525  517                  hat_unload(kas.a_hat, addr, size, HAT_UNLOAD);
 526  518          else
 527  519                  hat_chgprot(kas.a_hat, addr, size, prot);
 528  520          return (0);
 529  521  }
 530  522  
 531  523  /*
 532  524   * This is a dummy segkmem function overloaded to call segkp
 533  525   * when segkp is under the heap.
 534  526   */
 535  527  /* ARGSUSED */
 536  528  static int
 537  529  segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
 538  530  {
 539  531          ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
 540  532  
 541  533          if (seg->s_as != &kas)
 542      -                segkmem_badop();
      534 +                panic("segkmem badop");
 543  535  
 544  536          /*
 545  537           * If it is one of segkp pages, call into segkp.
 546  538           */
 547  539          if (segkp_bitmap && seg == &kvseg &&
 548  540              BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
 549      -                return (SEGOP_CHECKPROT(segkp, addr, size, prot));
      541 +                return (segop_checkprot(segkp, addr, size, prot));
 550  542  
 551      -        segkmem_badop();
      543 +        panic("segkmem badop");
 552  544          return (0);
 553  545  }
 554  546  
 555  547  /*
 556  548   * This is a dummy segkmem function overloaded to call segkp
 557  549   * when segkp is under the heap.
 558  550   */
 559  551  /* ARGSUSED */
 560  552  static int
 561  553  segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
 562  554  {
 563  555          ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
 564  556  
 565  557          if (seg->s_as != &kas)
 566      -                segkmem_badop();
      558 +                panic("segkmem badop");
 567  559  
 568  560          /*
 569  561           * If it is one of segkp pages, call into segkp.
 570  562           */
 571  563          if (segkp_bitmap && seg == &kvseg &&
 572  564              BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
 573      -                return (SEGOP_KLUSTER(segkp, addr, delta));
      565 +                return (segop_kluster(segkp, addr, delta));
 574  566  
 575      -        segkmem_badop();
      567 +        panic("segkmem badop");
 576  568          return (0);
 577  569  }
 578  570  
 579  571  static void
 580  572  segkmem_xdump_range(void *arg, void *start, size_t size)
 581  573  {
 582  574          struct as *as = arg;
 583  575          caddr_t addr = start;
 584  576          caddr_t addr_end = addr + size;
 585  577  
↓ open down ↓ 98 lines elided ↑ open up ↑
 684  676          size_t nb;
 685  677          struct vnode *vp = seg->s_data;
 686  678  
 687  679          ASSERT(ppp != NULL);
 688  680  
 689  681          /*
 690  682           * If it is one of segkp pages, call into segkp.
 691  683           */
 692  684          if (segkp_bitmap && seg == &kvseg &&
 693  685              BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
 694      -                return (SEGOP_PAGELOCK(segkp, addr, len, ppp, type, rw));
      686 +                return (segop_pagelock(segkp, addr, len, ppp, type, rw));
 695  687  
 696  688          npages = btopr(len);
 697  689          nb = sizeof (page_t *) * npages;
 698  690  
 699  691          if (type == L_PAGEUNLOCK) {
 700  692                  pplist = *ppp;
 701  693                  ASSERT(pplist != NULL);
 702  694  
 703  695                  for (pg = 0; pg < npages; pg++) {
 704  696                          pp = pplist[pg];
↓ open down ↓ 32 lines elided ↑ open up ↑
 737  729   * This is a dummy segkmem function overloaded to call segkp
 738  730   * when segkp is under the heap.
 739  731   */
 740  732  /* ARGSUSED */
 741  733  static int
 742  734  segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
 743  735  {
 744  736          ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
 745  737  
 746  738          if (seg->s_as != &kas)
 747      -                segkmem_badop();
      739 +                panic("segkmem badop");
 748  740  
 749  741          /*
 750  742           * If it is one of segkp pages, call into segkp.
 751  743           */
 752  744          if (segkp_bitmap && seg == &kvseg &&
 753  745              BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
 754      -                return (SEGOP_GETMEMID(segkp, addr, memidp));
      746 +                return (segop_getmemid(segkp, addr, memidp));
 755  747  
 756      -        segkmem_badop();
      748 +        panic("segkmem badop");
 757  749          return (0);
 758  750  }
 759  751  
 760  752  /*ARGSUSED*/
 761      -static lgrp_mem_policy_info_t *
 762      -segkmem_getpolicy(struct seg *seg, caddr_t addr)
 763      -{
 764      -        return (NULL);
 765      -}
 766      -
 767      -/*ARGSUSED*/
 768  753  static int
 769  754  segkmem_capable(struct seg *seg, segcapability_t capability)
 770  755  {
 771  756          if (capability == S_CAPABILITY_NOMINFLT)
 772  757                  return (1);
 773  758          return (0);
 774  759  }
 775  760  
 776      -static struct seg_ops segkmem_ops = {
 777      -        SEGKMEM_BADOP(int),             /* dup */
 778      -        SEGKMEM_BADOP(int),             /* unmap */
 779      -        SEGKMEM_BADOP(void),            /* free */
 780      -        segkmem_fault,
 781      -        SEGKMEM_BADOP(faultcode_t),     /* faulta */
 782      -        segkmem_setprot,
 783      -        segkmem_checkprot,
 784      -        segkmem_kluster,
 785      -        SEGKMEM_BADOP(size_t),          /* swapout */
 786      -        SEGKMEM_BADOP(int),             /* sync */
 787      -        SEGKMEM_BADOP(size_t),          /* incore */
 788      -        SEGKMEM_BADOP(int),             /* lockop */
 789      -        SEGKMEM_BADOP(int),             /* getprot */
 790      -        SEGKMEM_BADOP(u_offset_t),      /* getoffset */
 791      -        SEGKMEM_BADOP(int),             /* gettype */
 792      -        SEGKMEM_BADOP(int),             /* getvp */
 793      -        SEGKMEM_BADOP(int),             /* advise */
 794      -        segkmem_dump,
 795      -        segkmem_pagelock,
 796      -        SEGKMEM_BADOP(int),             /* setpgsz */
 797      -        segkmem_getmemid,
 798      -        segkmem_getpolicy,              /* getpolicy */
 799      -        segkmem_capable,                /* capable */
 800      -        seg_inherit_notsup              /* inherit */
      761 +static const struct seg_ops segkmem_ops = {
      762 +        .fault          = segkmem_fault,
      763 +        .setprot        = segkmem_setprot,
      764 +        .checkprot      = segkmem_checkprot,
      765 +        .kluster        = segkmem_kluster,
      766 +        .dump           = segkmem_dump,
      767 +        .pagelock       = segkmem_pagelock,
      768 +        .getmemid       = segkmem_getmemid,
      769 +        .capable        = segkmem_capable,
 801  770  };
 802  771  
 803  772  int
 804  773  segkmem_zio_create(struct seg *seg)
 805  774  {
 806  775          ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
 807  776          seg->s_ops = &segkmem_ops;
 808  777          seg->s_data = &zvp;
 809  778          kas.a_size += seg->s_size;
 810  779          return (0);
↓ open down ↓ 834 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX