Print this page
use NULL dump segop as a shorthand for no-op
Instead of forcing every segment driver to implement a dummy function that
does nothing, handle NULL dump segop function pointer as a no-op shorthand.
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.
use NULL setpagesize segop as a shorthand for ENOTSUP
Instead of forcing every segment driver to implement a dummp function to
return (hopefully) ENOTSUP, handle NULL setpagesize segop function pointer
as "return ENOTSUP" shorthand.
use NULL capable segop as a shorthand for no-capabilities
Instead of forcing every segment driver to implement a dummy "return 0"
function, handle NULL capable segop function pointer as "no copabilities
supported" shorthand.
segop_getpolicy already checks for a NULL op
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
use C99 initializers in segment ops structures
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory. The code is there and in theory it runs when we get *extremely* low
on memory. In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout
*** 97,107 ****
size_t softlockcnt;
domid_t domid;
segmf_map_t *map;
};
! static struct seg_ops segmf_ops;
static int segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t len);
static struct segmf_data *
segmf_data_zalloc(struct seg *seg)
--- 97,107 ----
size_t softlockcnt;
domid_t domid;
segmf_map_t *map;
};
! static const struct seg_ops segmf_ops;
static int segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t len);
static struct segmf_data *
segmf_data_zalloc(struct seg *seg)
*** 468,520 ****
{
return (0);
}
/*ARGSUSED*/
- static void
- segmf_dump(struct seg *seg)
- {}
-
- /*ARGSUSED*/
static int
segmf_pagelock(struct seg *seg, caddr_t addr, size_t len,
struct page ***ppp, enum lock_type type, enum seg_rw rw)
{
return (ENOTSUP);
}
- /*ARGSUSED*/
- static int
- segmf_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
- {
- return (ENOTSUP);
- }
-
static int
segmf_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
{
struct segmf_data *data = seg->s_data;
memid->val[0] = (uintptr_t)VTOCVP(data->vp);
memid->val[1] = (uintptr_t)seg_page(seg, addr);
return (0);
}
- /*ARGSUSED*/
- static lgrp_mem_policy_info_t *
- segmf_getpolicy(struct seg *seg, caddr_t addr)
- {
- return (NULL);
- }
-
- /*ARGSUSED*/
- static int
- segmf_capable(struct seg *seg, segcapability_t capability)
- {
- return (0);
- }
-
/*
* Add a set of contiguous foreign MFNs to the segment. soft-locking them. The
* pre-faulting is necessary due to live migration; in particular we must
* return an error in response to IOCTL_PRIVCMD_MMAPBATCH rather than faulting
* later on a bad MFN. Whilst this isn't necessary for the other MMAP
--- 468,494 ----
*** 755,785 ****
}
return (0);
}
! static struct seg_ops segmf_ops = {
! segmf_dup,
! segmf_unmap,
! segmf_free,
! segmf_fault,
! segmf_faulta,
! segmf_setprot,
! segmf_checkprot,
! (int (*)())segmf_kluster,
! (size_t (*)(struct seg *))NULL, /* swapout */
! segmf_sync,
! segmf_incore,
! segmf_lockop,
! segmf_getprot,
! segmf_getoffset,
! segmf_gettype,
! segmf_getvp,
! segmf_advise,
! segmf_dump,
! segmf_pagelock,
! segmf_setpagesize,
! segmf_getmemid,
! segmf_getpolicy,
! segmf_capable,
! seg_inherit_notsup
};
--- 729,753 ----
}
return (0);
}
! static const struct seg_ops segmf_ops = {
! .dup = segmf_dup,
! .unmap = segmf_unmap,
! .free = segmf_free,
! .fault = segmf_fault,
! .faulta = segmf_faulta,
! .setprot = segmf_setprot,
! .checkprot = segmf_checkprot,
! .kluster = segmf_kluster,
! .sync = segmf_sync,
! .incore = segmf_incore,
! .lockop = segmf_lockop,
! .getprot = segmf_getprot,
! .getoffset = segmf_getoffset,
! .gettype = segmf_gettype,
! .getvp = segmf_getvp,
! .advise = segmf_advise,
! .pagelock = segmf_pagelock,
! .getmemid = segmf_getmemid,
};