Print this page
6147 segop_getpolicy already checks for a NULL op
Reviewed by: Garrett D'Amore <garrett@damore.org>


 483 }
 484 
 485 /*ARGSUSED*/
 486 static int
 487 segmf_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
 488 {
 489         return (ENOTSUP);
 490 }
 491 
 492 static int
 493 segmf_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
 494 {
 495         struct segmf_data *data = seg->s_data;
 496 
 497         memid->val[0] = (uintptr_t)VTOCVP(data->vp);
 498         memid->val[1] = (uintptr_t)seg_page(seg, addr);
 499         return (0);
 500 }
 501 
 502 /*ARGSUSED*/
 503 static lgrp_mem_policy_info_t *
 504 segmf_getpolicy(struct seg *seg, caddr_t addr)
 505 {
 506         return (NULL);
 507 }
 508 
 509 /*ARGSUSED*/
 510 static int
 511 segmf_capable(struct seg *seg, segcapability_t capability)
 512 {
 513         return (0);
 514 }
 515 
 516 /*
 517  * Add a set of contiguous foreign MFNs to the segment. soft-locking them.  The
 518  * pre-faulting is necessary due to live migration; in particular we must
 519  * return an error in response to IOCTL_PRIVCMD_MMAPBATCH rather than faulting
 520  * later on a bad MFN.  Whilst this isn't necessary for the other MMAP
 521  * ioctl()s, we lock them too, as they should be transitory.
 522  */
 523 int
 524 segmf_add_mfns(struct seg *seg, caddr_t addr, mfn_t mfn,
 525     pgcnt_t pgcnt, domid_t domid)
 526 {
 527         struct segmf_data *data = seg->s_data;
 528         pgcnt_t base;
 529         faultcode_t fc;


 761         .dup            = segmf_dup,
 762         .unmap          = segmf_unmap,
 763         .free           = segmf_free,
 764         .fault          = segmf_fault,
 765         .faulta         = segmf_faulta,
 766         .setprot        = segmf_setprot,
 767         .checkprot      = segmf_checkprot,
 768         .kluster        = segmf_kluster,
 769         .sync           = segmf_sync,
 770         .incore         = segmf_incore,
 771         .lockop         = segmf_lockop,
 772         .getprot        = segmf_getprot,
 773         .getoffset      = segmf_getoffset,
 774         .gettype        = segmf_gettype,
 775         .getvp          = segmf_getvp,
 776         .advise         = segmf_advise,
 777         .dump           = segmf_dump,
 778         .pagelock       = segmf_pagelock,
 779         .setpagesize    = segmf_setpagesize,
 780         .getmemid       = segmf_getmemid,
 781         .getpolicy      = segmf_getpolicy,
 782         .capable        = segmf_capable,
 783 };


 483 }
 484 
 485 /*ARGSUSED*/
 486 static int
 487 segmf_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
 488 {
 489         return (ENOTSUP);
 490 }
 491 
 492 static int
 493 segmf_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
 494 {
 495         struct segmf_data *data = seg->s_data;
 496 
 497         memid->val[0] = (uintptr_t)VTOCVP(data->vp);
 498         memid->val[1] = (uintptr_t)seg_page(seg, addr);
 499         return (0);
 500 }
 501 
 502 /*ARGSUSED*/







 503 static int
 504 segmf_capable(struct seg *seg, segcapability_t capability)
 505 {
 506         return (0);
 507 }
 508 
 509 /*
 510  * Add a set of contiguous foreign MFNs to the segment. soft-locking them.  The
 511  * pre-faulting is necessary due to live migration; in particular we must
 512  * return an error in response to IOCTL_PRIVCMD_MMAPBATCH rather than faulting
 513  * later on a bad MFN.  Whilst this isn't necessary for the other MMAP
 514  * ioctl()s, we lock them too, as they should be transitory.
 515  */
 516 int
 517 segmf_add_mfns(struct seg *seg, caddr_t addr, mfn_t mfn,
 518     pgcnt_t pgcnt, domid_t domid)
 519 {
 520         struct segmf_data *data = seg->s_data;
 521         pgcnt_t base;
 522         faultcode_t fc;


 754         .dup            = segmf_dup,
 755         .unmap          = segmf_unmap,
 756         .free           = segmf_free,
 757         .fault          = segmf_fault,
 758         .faulta         = segmf_faulta,
 759         .setprot        = segmf_setprot,
 760         .checkprot      = segmf_checkprot,
 761         .kluster        = segmf_kluster,
 762         .sync           = segmf_sync,
 763         .incore         = segmf_incore,
 764         .lockop         = segmf_lockop,
 765         .getprot        = segmf_getprot,
 766         .getoffset      = segmf_getoffset,
 767         .gettype        = segmf_gettype,
 768         .getvp          = segmf_getvp,
 769         .advise         = segmf_advise,
 770         .dump           = segmf_dump,
 771         .pagelock       = segmf_pagelock,
 772         .setpagesize    = segmf_setpagesize,
 773         .getmemid       = segmf_getmemid,

 774         .capable        = segmf_capable,
 775 };