Print this page
patch segpcache-maxwindow-is-useless
use NULL dump segop as a shorthand for no-op
Instead of forcing every segment driver to implement a dummy function that
does nothing, handle NULL dump segop function pointer as a no-op shorthand.
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.
use NULL setpagesize segop as a shorthand for ENOTSUP
Instead of forcing every segment driver to implement a dummp function to
return (hopefully) ENOTSUP, handle NULL setpagesize segop function pointer
as "return ENOTSUP" shorthand.
use NULL getmemid segop as a shorthand for ENODEV
Instead of forcing every segment driver to implement a dummy function to
return (hopefully) ENODEV, handle NULL getmemid segop function pointer as
"return ENODEV" shorthand.
use NULL capable segop as a shorthand for no-capabilities
Instead of forcing every segment driver to implement a dummy "return 0"
function, handle NULL capable segop function pointer as "no copabilities
supported" shorthand.
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
patch lower-case-segops
instead using SEGOP_* macros, define full-fledged segop_* functions
This will allow us to do some sanity checking or even implement stub
functionality in one place instead of duplicating it wherever these wrappers
are used.

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/vm/vm_seg.c
          +++ new/usr/src/uts/common/vm/vm_seg.c
↓ open down ↓ 118 lines elided ↑ open up ↑
 119  119  /*
 120  120   * log2(fraction of pcache to reclaim at a time).
 121  121   */
 122  122  #define P_SHRINK_SHFT           (5)
 123  123  
 124  124  /*
 125  125   * The following variables can be tuned via /etc/system.
 126  126   */
 127  127  
 128  128  int     segpcache_enabled = 1;          /* if 1, shadow lists are cached */
 129      -pgcnt_t segpcache_maxwindow = 0;        /* max # of pages that can be cached */
 130  129  ulong_t segpcache_hashsize_win = 0;     /* # of non wired buckets */
 131  130  ulong_t segpcache_hashsize_wired = 0;   /* # of wired buckets */
 132  131  int     segpcache_reap_sec = 1;         /* reap check rate in secs */
 133  132  clock_t segpcache_reap_ticks = 0;       /* reap interval in ticks */
 134  133  int     segpcache_pcp_maxage_sec = 1;   /* pcp max age in secs */
 135  134  clock_t segpcache_pcp_maxage_ticks = 0; /* pcp max age in ticks */
 136  135  int     segpcache_shrink_shift = P_SHRINK_SHFT; /* log2 reap fraction */
 137  136  pgcnt_t segpcache_maxapurge_bytes = P_MAX_APURGE_BYTES; /* max purge bytes */
 138  137  
 139  138  static kmutex_t seg_pcache_mtx; /* protects seg_pdisabled counter */
↓ open down ↓ 2 lines elided ↑ open up ↑
 142  141  
 143  142  #pragma align 64(pctrl1)
 144  143  #pragma align 64(pctrl2)
 145  144  #pragma align 64(pctrl3)
 146  145  
 147  146  /*
 148  147   * Keep frequently used variables together in one cache line.
 149  148   */
 150  149  static struct p_ctrl1 {
 151  150          uint_t p_disabled;              /* if not 0, caching temporarily off */
 152      -        pgcnt_t p_maxwin;               /* max # of pages that can be cached */
 153  151          size_t p_hashwin_sz;            /* # of non wired buckets */
 154  152          struct seg_phash *p_htabwin;    /* hash table for non wired entries */
 155  153          size_t p_hashwired_sz;          /* # of wired buckets */
 156  154          struct seg_phash_wired *p_htabwired; /* hash table for wired entries */
 157  155          kmem_cache_t *p_kmcache;        /* kmem cache for seg_pcache structs */
 158  156  #ifdef _LP64
 159      -        ulong_t pad[1];
      157 +        ulong_t pad[2];
 160  158  #endif /* _LP64 */
 161  159  } pctrl1;
 162  160  
 163  161  static struct p_ctrl2 {
 164  162          kmutex_t p_mem_mtx;     /* protects window counter and p_halinks */
 165  163          pgcnt_t  p_locked_win;  /* # pages from window */
 166  164          pgcnt_t  p_locked;      /* # of pages cached by pagelock */
 167  165          uchar_t  p_ahcur;       /* current active links for insert/delete */
 168  166          uchar_t  p_athr_on;     /* async reclaim thread is running. */
 169  167          pcache_link_t p_ahhead[2]; /* active buckets linkages */
↓ open down ↓ 4 lines elided ↑ open up ↑
 174  172          ulong_t p_athr_empty_ahb;       /* athread walk stats */
 175  173          ulong_t p_athr_full_ahb;        /* athread walk stats */
 176  174          pgcnt_t p_maxapurge_npages;     /* max pages to purge at a time */
 177  175          int     p_shrink_shft;          /* reap shift factor */
 178  176  #ifdef _LP64
 179  177          ulong_t pad[3];
 180  178  #endif /* _LP64 */
 181  179  } pctrl3;
 182  180  
 183  181  #define seg_pdisabled                   pctrl1.p_disabled
 184      -#define seg_pmaxwindow                  pctrl1.p_maxwin
 185  182  #define seg_phashsize_win               pctrl1.p_hashwin_sz
 186  183  #define seg_phashtab_win                pctrl1.p_htabwin
 187  184  #define seg_phashsize_wired             pctrl1.p_hashwired_sz
 188  185  #define seg_phashtab_wired              pctrl1.p_htabwired
 189  186  #define seg_pkmcache                    pctrl1.p_kmcache
 190  187  #define seg_pmem_mtx                    pctrl2.p_mem_mtx
 191  188  #define seg_plocked_window              pctrl2.p_locked_win
 192  189  #define seg_plocked                     pctrl2.p_locked
 193  190  #define seg_pahcur                      pctrl2.p_ahcur
 194  191  #define seg_pathr_on                    pctrl2.p_athr_on
↓ open down ↓ 3 lines elided ↑ open up ↑
 198  195  #define seg_pathr_full_ahb              pctrl3.p_athr_full_ahb
 199  196  #define seg_pshrink_shift               pctrl3.p_shrink_shft
 200  197  #define seg_pmaxapurge_npages           pctrl3.p_maxapurge_npages
 201  198  
 202  199  #define P_HASHWIN_MASK                  (seg_phashsize_win - 1)
 203  200  #define P_HASHWIRED_MASK                (seg_phashsize_wired - 1)
 204  201  #define P_BASESHIFT                     (6)
 205  202  
 206  203  kthread_t *seg_pasync_thr;
 207  204  
 208      -extern struct seg_ops segvn_ops;
 209      -extern struct seg_ops segspt_shmops;
      205 +extern const struct seg_ops segvn_ops;
      206 +extern const struct seg_ops segspt_shmops;
 210  207  
 211  208  #define IS_PFLAGS_WIRED(flags) ((flags) & SEGP_FORCE_WIRED)
 212  209  #define IS_PCP_WIRED(pcp) IS_PFLAGS_WIRED((pcp)->p_flags)
 213  210  
 214  211  #define LBOLT_DELTA(t)  ((ulong_t)(ddi_get_lbolt() - (t)))
 215  212  
 216  213  #define PCP_AGE(pcp)    LBOLT_DELTA((pcp)->p_lbolt)
 217  214  
 218  215  /*
 219  216   * htag0 argument can be a seg or amp pointer.
↓ open down ↓ 529 lines elided ↑ open up ↑
 749  746  
 750  747          if (seg_pdisabled) {
 751  748                  return (SEGP_FAIL);
 752  749          }
 753  750          ASSERT(seg_phashsize_win != 0);
 754  751  
 755  752          if (IS_PFLAGS_WIRED(flags)) {
 756  753                  return (SEGP_SUCCESS);
 757  754          }
 758  755  
 759      -        if (seg_plocked_window + btop(len) > seg_pmaxwindow) {
 760      -                return (SEGP_FAIL);
 761      -        }
 762      -
 763  756          if (freemem < desfree) {
 764  757                  return (SEGP_FAIL);
 765  758          }
 766  759  
 767  760          return (SEGP_SUCCESS);
 768  761  }
 769  762  
 770  763  #ifdef DEBUG
 771  764  static uint32_t p_insert_mtbf = 0;
 772  765  #endif
↓ open down ↓ 49 lines elided ↑ open up ↑
 822  815  
 823  816          if (seg_pdisabled) {
 824  817                  return (SEGP_FAIL);
 825  818          }
 826  819          ASSERT(seg_phashsize_win != 0);
 827  820  
 828  821          ASSERT((len & PAGEOFFSET) == 0);
 829  822          npages = btop(len);
 830  823          mutex_enter(&seg_pmem_mtx);
 831  824          if (!IS_PFLAGS_WIRED(flags)) {
 832      -                if (seg_plocked_window + npages > seg_pmaxwindow) {
 833      -                        mutex_exit(&seg_pmem_mtx);
 834      -                        return (SEGP_FAIL);
 835      -                }
 836  825                  seg_plocked_window += npages;
 837  826          }
 838  827          seg_plocked += npages;
 839  828          mutex_exit(&seg_pmem_mtx);
 840  829  
 841  830          pcp = kmem_cache_alloc(seg_pkmcache, KM_SLEEP);
 842  831          /*
 843  832           * If amp is not NULL set htag0 to amp otherwise set it to seg.
 844  833           */
 845  834          if (amp == NULL) {
↓ open down ↓ 95 lines elided ↑ open up ↑
 941  930          struct seg_phash *hp;
 942  931          pgcnt_t npages = 0;
 943  932          pgcnt_t npages_window = 0;
 944  933          pgcnt_t npgs_to_purge;
 945  934          pgcnt_t npgs_purged = 0;
 946  935          int hlinks = 0;
 947  936          int hlix;
 948  937          pcache_link_t *hlinkp;
 949  938          pcache_link_t *hlnextp = NULL;
 950  939          int lowmem;
 951      -        int trim;
 952  940  
 953  941          ASSERT(seg_phashsize_win != 0);
 954  942  
 955  943          /*
 956  944           * if the cache is off or empty, return
 957  945           */
 958  946          if (seg_plocked == 0 || (!force && seg_plocked_window == 0)) {
 959  947                  return;
 960  948          }
 961  949  
 962  950          if (!force) {
 963  951                  lowmem = 0;
 964      -                trim = 0;
 965  952                  if (freemem < lotsfree + needfree) {
 966  953                          spgcnt_t fmem = MAX((spgcnt_t)(freemem - needfree), 0);
 967  954                          if (fmem <= 5 * (desfree >> 2)) {
 968  955                                  lowmem = 1;
 969  956                          } else if (fmem <= 7 * (lotsfree >> 3)) {
 970  957                                  if (seg_plocked_window >=
 971  958                                      (availrmem_initial >> 1)) {
 972  959                                          lowmem = 1;
 973  960                                  }
 974  961                          } else if (fmem < lotsfree) {
 975  962                                  if (seg_plocked_window >=
 976  963                                      3 * (availrmem_initial >> 2)) {
 977  964                                          lowmem = 1;
 978  965                                  }
 979  966                          }
 980  967                  }
 981      -                if (seg_plocked_window >= 7 * (seg_pmaxwindow >> 3)) {
 982      -                        trim = 1;
 983      -                }
 984      -                if (!lowmem && !trim) {
      968 +                if (!lowmem) {
 985  969                          return;
 986  970                  }
 987  971                  npgs_to_purge = seg_plocked_window >>
 988  972                      seg_pshrink_shift;
 989  973                  if (lowmem) {
 990  974                          npgs_to_purge = MIN(npgs_to_purge,
 991  975                              MAX(seg_pmaxapurge_npages, desfree));
 992  976                  } else {
 993  977                          npgs_to_purge = MIN(npgs_to_purge,
 994  978                              seg_pmaxapurge_npages);
↓ open down ↓ 107 lines elided ↑ open up ↑
1102 1086                          seg_premove_abuck(hp, 1);
1103 1087                  }
1104 1088                  mutex_exit(&hp->p_hmutex);
1105 1089                  if (npgs_purged >= seg_plocked_window) {
1106 1090                          break;
1107 1091                  }
1108 1092                  if (!force) {
1109 1093                          if (npgs_purged >= npgs_to_purge) {
1110 1094                                  break;
1111 1095                          }
1112      -                        if (!trim && !(seg_pathr_full_ahb & 15)) {
     1096 +                        if (!(seg_pathr_full_ahb & 15)) {
1113 1097                                  ASSERT(lowmem);
1114 1098                                  if (freemem >= lotsfree + needfree) {
1115 1099                                          break;
1116 1100                                  }
1117 1101                          }
1118 1102                  }
1119 1103          }
1120 1104  
1121 1105          if (hlinkp == &seg_pahhead[hlix]) {
1122 1106                  /*
↓ open down ↓ 340 lines elided ↑ open up ↑
1463 1447          seg_phashsize_wired = segpcache_hashsize_wired;
1464 1448          seg_phashtab_wired = kmem_zalloc(
1465 1449              seg_phashsize_wired * sizeof (struct seg_phash_wired), KM_SLEEP);
1466 1450          for (i = 0; i < seg_phashsize_wired; i++) {
1467 1451                  hp = (struct seg_phash *)&seg_phashtab_wired[i];
1468 1452                  hp->p_hnext = (struct seg_pcache *)hp;
1469 1453                  hp->p_hprev = (struct seg_pcache *)hp;
1470 1454                  mutex_init(&hp->p_hmutex, NULL, MUTEX_DEFAULT, NULL);
1471 1455          }
1472 1456  
1473      -        if (segpcache_maxwindow == 0) {
1474      -                if (physmegs < 64) {
1475      -                        /* 3% of memory */
1476      -                        segpcache_maxwindow = availrmem >> 5;
1477      -                } else if (physmegs < 512) {
1478      -                        /* 12% of memory */
1479      -                        segpcache_maxwindow = availrmem >> 3;
1480      -                } else if (physmegs < 1024) {
1481      -                        /* 25% of memory */
1482      -                        segpcache_maxwindow = availrmem >> 2;
1483      -                } else if (physmegs < 2048) {
1484      -                        /* 50% of memory */
1485      -                        segpcache_maxwindow = availrmem >> 1;
1486      -                } else {
1487      -                        /* no limit */
1488      -                        segpcache_maxwindow = (pgcnt_t)-1;
1489      -                }
1490      -        }
1491      -        seg_pmaxwindow = segpcache_maxwindow;
1492 1457          seg_pinit_mem_config();
1493 1458  }
1494 1459  
1495 1460  /*
1496 1461   * called by pageout if memory is low
1497 1462   */
1498 1463  void
1499 1464  seg_preap(void)
1500 1465  {
1501 1466          /*
↓ open down ↓ 127 lines elided ↑ open up ↑
1629 1594           * as_addseg() will add the segment at the appropraite point
1630 1595           * in the list. It will return -1 if there is overlap with
1631 1596           * an already existing segment.
1632 1597           */
1633 1598          return (as_addseg(as, seg));
1634 1599  }
1635 1600  
1636 1601  /*
1637 1602   * Unmap a segment and free it from its associated address space.
1638 1603   * This should be called by anybody who's finished with a whole segment's
1639      - * mapping.  Just calls SEGOP_UNMAP() on the whole mapping .  It is the
     1604 + * mapping.  Just calls segop_unmap() on the whole mapping .  It is the
1640 1605   * responsibility of the segment driver to unlink the the segment
1641 1606   * from the address space, and to free public and private data structures
1642 1607   * associated with the segment.  (This is typically done by a call to
1643 1608   * seg_free()).
1644 1609   */
1645 1610  void
1646 1611  seg_unmap(struct seg *seg)
1647 1612  {
1648 1613  #ifdef DEBUG
1649 1614          int ret;
1650 1615  #endif /* DEBUG */
1651 1616  
1652 1617          ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1653 1618  
1654 1619          /* Shouldn't have called seg_unmap if mapping isn't yet established */
1655 1620          ASSERT(seg->s_data != NULL);
1656 1621  
1657 1622          /* Unmap the whole mapping */
1658 1623  #ifdef DEBUG
1659      -        ret = SEGOP_UNMAP(seg, seg->s_base, seg->s_size);
     1624 +        ret = segop_unmap(seg, seg->s_base, seg->s_size);
1660 1625          ASSERT(ret == 0);
1661 1626  #else
1662      -        SEGOP_UNMAP(seg, seg->s_base, seg->s_size);
     1627 +        segop_unmap(seg, seg->s_base, seg->s_size);
1663 1628  #endif /* DEBUG */
1664 1629  }
1665 1630  
1666 1631  /*
1667 1632   * Free the segment from its associated as. This should only be called
1668 1633   * if a mapping to the segment has not yet been established (e.g., if
1669 1634   * an error occurs in the middle of doing an as_map when the segment
1670 1635   * has already been partially set up) or if it has already been deleted
1671 1636   * (e.g., from a segment driver unmap routine if the unmap applies to the
1672 1637   * entire segment). If the mapping is currently set up then seg_unmap() should
↓ open down ↓ 5 lines elided ↑ open up ↑
1678 1643          register struct as *as = seg->s_as;
1679 1644          struct seg *tseg = as_removeseg(as, seg);
1680 1645  
1681 1646          ASSERT(tseg == seg);
1682 1647  
1683 1648          /*
1684 1649           * If the segment private data field is NULL,
1685 1650           * then segment driver is not attached yet.
1686 1651           */
1687 1652          if (seg->s_data != NULL)
1688      -                SEGOP_FREE(seg);
     1653 +                segop_free(seg);
1689 1654  
1690 1655          mutex_destroy(&seg->s_pmtx);
1691 1656          ASSERT(seg->s_phead.p_lnext == &seg->s_phead);
1692 1657          ASSERT(seg->s_phead.p_lprev == &seg->s_phead);
1693 1658          kmem_cache_free(seg_cache, seg);
1694 1659  }
1695 1660  
1696 1661  /*ARGSUSED*/
1697 1662  static void
1698 1663  seg_p_mem_config_post_add(
↓ open down ↓ 148 lines elided ↑ open up ↑
1847 1812  
1848 1813          if (seg->s_ops == &segvn_ops) {
1849 1814                  svd = (struct segvn_data *)seg->s_data;
1850 1815                  if (svd->type == MAP_PRIVATE && svd->swresv > 0)
1851 1816                          swap = svd->swresv;
1852 1817          }
1853 1818          return (swap);
1854 1819  }
1855 1820  
1856 1821  /*
1857      - * General not supported function for SEGOP_INHERIT
     1822 + * segop wrappers
1858 1823   */
1859      -/* ARGSUSED */
1860 1824  int
1861      -seg_inherit_notsup(struct seg *seg, caddr_t addr, size_t len, uint_t op)
     1825 +segop_dup(struct seg *seg, struct seg *new)
     1826 +{
     1827 +        VERIFY3P(seg->s_ops->dup, !=, NULL);
     1828 +
     1829 +        return (seg->s_ops->dup(seg, new));
     1830 +}
     1831 +
     1832 +int
     1833 +segop_unmap(struct seg *seg, caddr_t addr, size_t len)
     1834 +{
     1835 +        VERIFY3P(seg->s_ops->unmap, !=, NULL);
     1836 +
     1837 +        return (seg->s_ops->unmap(seg, addr, len));
     1838 +}
     1839 +
     1840 +void
     1841 +segop_free(struct seg *seg)
     1842 +{
     1843 +        VERIFY3P(seg->s_ops->free, !=, NULL);
     1844 +
     1845 +        seg->s_ops->free(seg);
     1846 +}
     1847 +
     1848 +faultcode_t
     1849 +segop_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
     1850 +    enum fault_type type, enum seg_rw rw)
     1851 +{
     1852 +        VERIFY3P(seg->s_ops->fault, !=, NULL);
     1853 +
     1854 +        return (seg->s_ops->fault(hat, seg, addr, len, type, rw));
     1855 +}
     1856 +
     1857 +faultcode_t
     1858 +segop_faulta(struct seg *seg, caddr_t addr)
     1859 +{
     1860 +        VERIFY3P(seg->s_ops->faulta, !=, NULL);
     1861 +
     1862 +        return (seg->s_ops->faulta(seg, addr));
     1863 +}
     1864 +
     1865 +int
     1866 +segop_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
     1867 +{
     1868 +        VERIFY3P(seg->s_ops->setprot, !=, NULL);
     1869 +
     1870 +        return (seg->s_ops->setprot(seg, addr, len, prot));
     1871 +}
     1872 +
     1873 +int
     1874 +segop_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
     1875 +{
     1876 +        VERIFY3P(seg->s_ops->checkprot, !=, NULL);
     1877 +
     1878 +        return (seg->s_ops->checkprot(seg, addr, len, prot));
     1879 +}
     1880 +
     1881 +int
     1882 +segop_kluster(struct seg *seg, caddr_t addr, ssize_t d)
     1883 +{
     1884 +        VERIFY3P(seg->s_ops->kluster, !=, NULL);
     1885 +
     1886 +        return (seg->s_ops->kluster(seg, addr, d));
     1887 +}
     1888 +
     1889 +int
     1890 +segop_sync(struct seg *seg, caddr_t addr, size_t len, int atr, uint_t f)
     1891 +{
     1892 +        VERIFY3P(seg->s_ops->sync, !=, NULL);
     1893 +
     1894 +        return (seg->s_ops->sync(seg, addr, len, atr, f));
     1895 +}
     1896 +
     1897 +size_t
     1898 +segop_incore(struct seg *seg, caddr_t addr, size_t len, char *v)
1862 1899  {
1863      -        return (ENOTSUP);
     1900 +        VERIFY3P(seg->s_ops->incore, !=, NULL);
     1901 +
     1902 +        return (seg->s_ops->incore(seg, addr, len, v));
     1903 +}
     1904 +
     1905 +int
     1906 +segop_lockop(struct seg *seg, caddr_t addr, size_t len, int atr, int op,
     1907 +    ulong_t *b, size_t p)
     1908 +{
     1909 +        VERIFY3P(seg->s_ops->lockop, !=, NULL);
     1910 +
     1911 +        return (seg->s_ops->lockop(seg, addr, len, atr, op, b, p));
     1912 +}
     1913 +
     1914 +int
     1915 +segop_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *p)
     1916 +{
     1917 +        VERIFY3P(seg->s_ops->getprot, !=, NULL);
     1918 +
     1919 +        return (seg->s_ops->getprot(seg, addr, len, p));
     1920 +}
     1921 +
     1922 +u_offset_t
     1923 +segop_getoffset(struct seg *seg, caddr_t addr)
     1924 +{
     1925 +        VERIFY3P(seg->s_ops->getoffset, !=, NULL);
     1926 +
     1927 +        return (seg->s_ops->getoffset(seg, addr));
     1928 +}
     1929 +
     1930 +int
     1931 +segop_gettype(struct seg *seg, caddr_t addr)
     1932 +{
     1933 +        VERIFY3P(seg->s_ops->gettype, !=, NULL);
     1934 +
     1935 +        return (seg->s_ops->gettype(seg, addr));
     1936 +}
     1937 +
     1938 +int
     1939 +segop_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
     1940 +{
     1941 +        VERIFY3P(seg->s_ops->getvp, !=, NULL);
     1942 +
     1943 +        return (seg->s_ops->getvp(seg, addr, vpp));
     1944 +}
     1945 +
     1946 +int
     1947 +segop_advise(struct seg *seg, caddr_t addr, size_t len, uint_t b)
     1948 +{
     1949 +        VERIFY3P(seg->s_ops->advise, !=, NULL);
     1950 +
     1951 +        return (seg->s_ops->advise(seg, addr, len, b));
     1952 +}
     1953 +
     1954 +void
     1955 +segop_dump(struct seg *seg)
     1956 +{
     1957 +        if (seg->s_ops->dump == NULL)
     1958 +                return;
     1959 +
     1960 +        seg->s_ops->dump(seg);
     1961 +}
     1962 +
     1963 +int
     1964 +segop_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***page,
     1965 +    enum lock_type type, enum seg_rw rw)
     1966 +{
     1967 +        VERIFY3P(seg->s_ops->pagelock, !=, NULL);
     1968 +
     1969 +        return (seg->s_ops->pagelock(seg, addr, len, page, type, rw));
     1970 +}
     1971 +
     1972 +int
     1973 +segop_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
     1974 +{
     1975 +        if (seg->s_ops->setpagesize == NULL)
     1976 +                return (ENOTSUP);
     1977 +
     1978 +        return (seg->s_ops->setpagesize(seg, addr, len, szc));
     1979 +}
     1980 +
     1981 +int
     1982 +segop_getmemid(struct seg *seg, caddr_t addr, memid_t *mp)
     1983 +{
     1984 +        if (seg->s_ops->getmemid == NULL)
     1985 +                return (ENODEV);
     1986 +
     1987 +        return (seg->s_ops->getmemid(seg, addr, mp));
     1988 +}
     1989 +
     1990 +struct lgrp_mem_policy_info *
     1991 +segop_getpolicy(struct seg *seg, caddr_t addr)
     1992 +{
     1993 +        if (seg->s_ops->getpolicy == NULL)
     1994 +                return (NULL);
     1995 +
     1996 +        return (seg->s_ops->getpolicy(seg, addr));
     1997 +}
     1998 +
     1999 +int
     2000 +segop_capable(struct seg *seg, segcapability_t cap)
     2001 +{
     2002 +        if (seg->s_ops->capable == NULL)
     2003 +                return (0);
     2004 +
     2005 +        return (seg->s_ops->capable(seg, cap));
     2006 +}
     2007 +
     2008 +int
     2009 +segop_inherit(struct seg *seg, caddr_t addr, size_t len, uint_t op)
     2010 +{
     2011 +        if (seg->s_ops->inherit == NULL)
     2012 +                return (ENOTSUP);
     2013 +
     2014 +        return (seg->s_ops->inherit(seg, addr, len, op));
1864 2015  }
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX