Print this page
patch segpcache-maxwindow-is-useless
use NULL dump segop as a shorthand for no-op
Instead of forcing every segment driver to implement a dummy function that
does nothing, handle NULL dump segop function pointer as a no-op shorthand.
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.
use NULL setpagesize segop as a shorthand for ENOTSUP
Instead of forcing every segment driver to implement a dummp function to
return (hopefully) ENOTSUP, handle NULL setpagesize segop function pointer
as "return ENOTSUP" shorthand.
use NULL getmemid segop as a shorthand for ENODEV
Instead of forcing every segment driver to implement a dummy function to
return (hopefully) ENODEV, handle NULL getmemid segop function pointer as
"return ENODEV" shorthand.
use NULL capable segop as a shorthand for no-capabilities
Instead of forcing every segment driver to implement a dummy "return 0"
function, handle NULL capable segop function pointer as "no copabilities
supported" shorthand.
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
patch lower-case-segops
instead using SEGOP_* macros, define full-fledged segop_* functions
This will allow us to do some sanity checking or even implement stub
functionality in one place instead of duplicating it wherever these wrappers
are used.
@@ -124,11 +124,10 @@
/*
* The following variables can be tuned via /etc/system.
*/
int segpcache_enabled = 1; /* if 1, shadow lists are cached */
-pgcnt_t segpcache_maxwindow = 0; /* max # of pages that can be cached */
ulong_t segpcache_hashsize_win = 0; /* # of non wired buckets */
ulong_t segpcache_hashsize_wired = 0; /* # of wired buckets */
int segpcache_reap_sec = 1; /* reap check rate in secs */
clock_t segpcache_reap_ticks = 0; /* reap interval in ticks */
int segpcache_pcp_maxage_sec = 1; /* pcp max age in secs */
@@ -147,18 +146,17 @@
/*
* Keep frequently used variables together in one cache line.
*/
static struct p_ctrl1 {
uint_t p_disabled; /* if not 0, caching temporarily off */
- pgcnt_t p_maxwin; /* max # of pages that can be cached */
size_t p_hashwin_sz; /* # of non wired buckets */
struct seg_phash *p_htabwin; /* hash table for non wired entries */
size_t p_hashwired_sz; /* # of wired buckets */
struct seg_phash_wired *p_htabwired; /* hash table for wired entries */
kmem_cache_t *p_kmcache; /* kmem cache for seg_pcache structs */
#ifdef _LP64
- ulong_t pad[1];
+ ulong_t pad[2];
#endif /* _LP64 */
} pctrl1;
static struct p_ctrl2 {
kmutex_t p_mem_mtx; /* protects window counter and p_halinks */
@@ -179,11 +177,10 @@
ulong_t pad[3];
#endif /* _LP64 */
} pctrl3;
#define seg_pdisabled pctrl1.p_disabled
-#define seg_pmaxwindow pctrl1.p_maxwin
#define seg_phashsize_win pctrl1.p_hashwin_sz
#define seg_phashtab_win pctrl1.p_htabwin
#define seg_phashsize_wired pctrl1.p_hashwired_sz
#define seg_phashtab_wired pctrl1.p_htabwired
#define seg_pkmcache pctrl1.p_kmcache
@@ -203,12 +200,12 @@
#define P_HASHWIRED_MASK (seg_phashsize_wired - 1)
#define P_BASESHIFT (6)
kthread_t *seg_pasync_thr;
-extern struct seg_ops segvn_ops;
-extern struct seg_ops segspt_shmops;
+extern const struct seg_ops segvn_ops;
+extern const struct seg_ops segspt_shmops;
#define IS_PFLAGS_WIRED(flags) ((flags) & SEGP_FORCE_WIRED)
#define IS_PCP_WIRED(pcp) IS_PFLAGS_WIRED((pcp)->p_flags)
#define LBOLT_DELTA(t) ((ulong_t)(ddi_get_lbolt() - (t)))
@@ -754,14 +751,10 @@
if (IS_PFLAGS_WIRED(flags)) {
return (SEGP_SUCCESS);
}
- if (seg_plocked_window + btop(len) > seg_pmaxwindow) {
- return (SEGP_FAIL);
- }
-
if (freemem < desfree) {
return (SEGP_FAIL);
}
return (SEGP_SUCCESS);
@@ -827,14 +820,10 @@
ASSERT((len & PAGEOFFSET) == 0);
npages = btop(len);
mutex_enter(&seg_pmem_mtx);
if (!IS_PFLAGS_WIRED(flags)) {
- if (seg_plocked_window + npages > seg_pmaxwindow) {
- mutex_exit(&seg_pmem_mtx);
- return (SEGP_FAIL);
- }
seg_plocked_window += npages;
}
seg_plocked += npages;
mutex_exit(&seg_pmem_mtx);
@@ -946,11 +935,10 @@
int hlinks = 0;
int hlix;
pcache_link_t *hlinkp;
pcache_link_t *hlnextp = NULL;
int lowmem;
- int trim;
ASSERT(seg_phashsize_win != 0);
/*
* if the cache is off or empty, return
@@ -959,11 +947,10 @@
return;
}
if (!force) {
lowmem = 0;
- trim = 0;
if (freemem < lotsfree + needfree) {
spgcnt_t fmem = MAX((spgcnt_t)(freemem - needfree), 0);
if (fmem <= 5 * (desfree >> 2)) {
lowmem = 1;
} else if (fmem <= 7 * (lotsfree >> 3)) {
@@ -976,14 +963,11 @@
3 * (availrmem_initial >> 2)) {
lowmem = 1;
}
}
}
- if (seg_plocked_window >= 7 * (seg_pmaxwindow >> 3)) {
- trim = 1;
- }
- if (!lowmem && !trim) {
+ if (!lowmem) {
return;
}
npgs_to_purge = seg_plocked_window >>
seg_pshrink_shift;
if (lowmem) {
@@ -1107,11 +1091,11 @@
}
if (!force) {
if (npgs_purged >= npgs_to_purge) {
break;
}
- if (!trim && !(seg_pathr_full_ahb & 15)) {
+ if (!(seg_pathr_full_ahb & 15)) {
ASSERT(lowmem);
if (freemem >= lotsfree + needfree) {
break;
}
}
@@ -1468,29 +1452,10 @@
hp->p_hnext = (struct seg_pcache *)hp;
hp->p_hprev = (struct seg_pcache *)hp;
mutex_init(&hp->p_hmutex, NULL, MUTEX_DEFAULT, NULL);
}
- if (segpcache_maxwindow == 0) {
- if (physmegs < 64) {
- /* 3% of memory */
- segpcache_maxwindow = availrmem >> 5;
- } else if (physmegs < 512) {
- /* 12% of memory */
- segpcache_maxwindow = availrmem >> 3;
- } else if (physmegs < 1024) {
- /* 25% of memory */
- segpcache_maxwindow = availrmem >> 2;
- } else if (physmegs < 2048) {
- /* 50% of memory */
- segpcache_maxwindow = availrmem >> 1;
- } else {
- /* no limit */
- segpcache_maxwindow = (pgcnt_t)-1;
- }
- }
- seg_pmaxwindow = segpcache_maxwindow;
seg_pinit_mem_config();
}
/*
* called by pageout if memory is low
@@ -1634,11 +1599,11 @@
}
/*
* Unmap a segment and free it from its associated address space.
* This should be called by anybody who's finished with a whole segment's
- * mapping. Just calls SEGOP_UNMAP() on the whole mapping . It is the
+ * mapping. Just calls segop_unmap() on the whole mapping . It is the
* responsibility of the segment driver to unlink the the segment
* from the address space, and to free public and private data structures
* associated with the segment. (This is typically done by a call to
* seg_free()).
*/
@@ -1654,14 +1619,14 @@
/* Shouldn't have called seg_unmap if mapping isn't yet established */
ASSERT(seg->s_data != NULL);
/* Unmap the whole mapping */
#ifdef DEBUG
- ret = SEGOP_UNMAP(seg, seg->s_base, seg->s_size);
+ ret = segop_unmap(seg, seg->s_base, seg->s_size);
ASSERT(ret == 0);
#else
- SEGOP_UNMAP(seg, seg->s_base, seg->s_size);
+ segop_unmap(seg, seg->s_base, seg->s_size);
#endif /* DEBUG */
}
/*
* Free the segment from its associated as. This should only be called
@@ -1683,11 +1648,11 @@
/*
* If the segment private data field is NULL,
* then segment driver is not attached yet.
*/
if (seg->s_data != NULL)
- SEGOP_FREE(seg);
+ segop_free(seg);
mutex_destroy(&seg->s_pmtx);
ASSERT(seg->s_phead.p_lnext == &seg->s_phead);
ASSERT(seg->s_phead.p_lprev == &seg->s_phead);
kmem_cache_free(seg_cache, seg);
@@ -1852,13 +1817,199 @@
}
return (swap);
}
/*
- * General not supported function for SEGOP_INHERIT
+ * segop wrappers
*/
-/* ARGSUSED */
int
-seg_inherit_notsup(struct seg *seg, caddr_t addr, size_t len, uint_t op)
+segop_dup(struct seg *seg, struct seg *new)
+{
+ VERIFY3P(seg->s_ops->dup, !=, NULL);
+
+ return (seg->s_ops->dup(seg, new));
+}
+
+int
+segop_unmap(struct seg *seg, caddr_t addr, size_t len)
{
+ VERIFY3P(seg->s_ops->unmap, !=, NULL);
+
+ return (seg->s_ops->unmap(seg, addr, len));
+}
+
+void
+segop_free(struct seg *seg)
+{
+ VERIFY3P(seg->s_ops->free, !=, NULL);
+
+ seg->s_ops->free(seg);
+}
+
+faultcode_t
+segop_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
+ enum fault_type type, enum seg_rw rw)
+{
+ VERIFY3P(seg->s_ops->fault, !=, NULL);
+
+ return (seg->s_ops->fault(hat, seg, addr, len, type, rw));
+}
+
+faultcode_t
+segop_faulta(struct seg *seg, caddr_t addr)
+{
+ VERIFY3P(seg->s_ops->faulta, !=, NULL);
+
+ return (seg->s_ops->faulta(seg, addr));
+}
+
+int
+segop_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
+{
+ VERIFY3P(seg->s_ops->setprot, !=, NULL);
+
+ return (seg->s_ops->setprot(seg, addr, len, prot));
+}
+
+int
+segop_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
+{
+ VERIFY3P(seg->s_ops->checkprot, !=, NULL);
+
+ return (seg->s_ops->checkprot(seg, addr, len, prot));
+}
+
+int
+segop_kluster(struct seg *seg, caddr_t addr, ssize_t d)
+{
+ VERIFY3P(seg->s_ops->kluster, !=, NULL);
+
+ return (seg->s_ops->kluster(seg, addr, d));
+}
+
+int
+segop_sync(struct seg *seg, caddr_t addr, size_t len, int atr, uint_t f)
+{
+ VERIFY3P(seg->s_ops->sync, !=, NULL);
+
+ return (seg->s_ops->sync(seg, addr, len, atr, f));
+}
+
+size_t
+segop_incore(struct seg *seg, caddr_t addr, size_t len, char *v)
+{
+ VERIFY3P(seg->s_ops->incore, !=, NULL);
+
+ return (seg->s_ops->incore(seg, addr, len, v));
+}
+
+int
+segop_lockop(struct seg *seg, caddr_t addr, size_t len, int atr, int op,
+ ulong_t *b, size_t p)
+{
+ VERIFY3P(seg->s_ops->lockop, !=, NULL);
+
+ return (seg->s_ops->lockop(seg, addr, len, atr, op, b, p));
+}
+
+int
+segop_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *p)
+{
+ VERIFY3P(seg->s_ops->getprot, !=, NULL);
+
+ return (seg->s_ops->getprot(seg, addr, len, p));
+}
+
+u_offset_t
+segop_getoffset(struct seg *seg, caddr_t addr)
+{
+ VERIFY3P(seg->s_ops->getoffset, !=, NULL);
+
+ return (seg->s_ops->getoffset(seg, addr));
+}
+
+int
+segop_gettype(struct seg *seg, caddr_t addr)
+{
+ VERIFY3P(seg->s_ops->gettype, !=, NULL);
+
+ return (seg->s_ops->gettype(seg, addr));
+}
+
+int
+segop_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
+{
+ VERIFY3P(seg->s_ops->getvp, !=, NULL);
+
+ return (seg->s_ops->getvp(seg, addr, vpp));
+}
+
+int
+segop_advise(struct seg *seg, caddr_t addr, size_t len, uint_t b)
+{
+ VERIFY3P(seg->s_ops->advise, !=, NULL);
+
+ return (seg->s_ops->advise(seg, addr, len, b));
+}
+
+void
+segop_dump(struct seg *seg)
+{
+ if (seg->s_ops->dump == NULL)
+ return;
+
+ seg->s_ops->dump(seg);
+}
+
+int
+segop_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***page,
+ enum lock_type type, enum seg_rw rw)
+{
+ VERIFY3P(seg->s_ops->pagelock, !=, NULL);
+
+ return (seg->s_ops->pagelock(seg, addr, len, page, type, rw));
+}
+
+int
+segop_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
+{
+ if (seg->s_ops->setpagesize == NULL)
+ return (ENOTSUP);
+
+ return (seg->s_ops->setpagesize(seg, addr, len, szc));
+}
+
+int
+segop_getmemid(struct seg *seg, caddr_t addr, memid_t *mp)
+{
+ if (seg->s_ops->getmemid == NULL)
+ return (ENODEV);
+
+ return (seg->s_ops->getmemid(seg, addr, mp));
+}
+
+struct lgrp_mem_policy_info *
+segop_getpolicy(struct seg *seg, caddr_t addr)
+{
+ if (seg->s_ops->getpolicy == NULL)
+ return (NULL);
+
+ return (seg->s_ops->getpolicy(seg, addr));
+}
+
+int
+segop_capable(struct seg *seg, segcapability_t cap)
+{
+ if (seg->s_ops->capable == NULL)
+ return (0);
+
+ return (seg->s_ops->capable(seg, cap));
+}
+
+int
+segop_inherit(struct seg *seg, caddr_t addr, size_t len, uint_t op)
+{
+ if (seg->s_ops->inherit == NULL)
return (ENOTSUP);
+
+ return (seg->s_ops->inherit(seg, addr, len, op));
}