Print this page
6151 use NULL setpagesize segop as a shorthand for ENOTSUP


 465 /*ARGSUSED*/
 466 static int
 467 segmf_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
 468 {
 469         return (0);
 470 }
 471 
 472 /*ARGSUSED*/
 473 static void
 474 segmf_dump(struct seg *seg)
 475 {}
 476 
 477 /*ARGSUSED*/
 478 static int
 479 segmf_pagelock(struct seg *seg, caddr_t addr, size_t len,
 480     struct page ***ppp, enum lock_type type, enum seg_rw rw)
 481 {
 482         return (ENOTSUP);
 483 }
 484 
 485 /*ARGSUSED*/
 486 static int
 487 segmf_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
 488 {
 489         return (ENOTSUP);
 490 }
 491 
 492 static int
 493 segmf_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
 494 {
 495         struct segmf_data *data = seg->s_data;
 496 
 497         memid->val[0] = (uintptr_t)VTOCVP(data->vp);
 498         memid->val[1] = (uintptr_t)seg_page(seg, addr);
 499         return (0);
 500 }
 501 
 502 /*
 503  * Add a set of contiguous foreign MFNs to the segment. soft-locking them.  The
 504  * pre-faulting is necessary due to live migration; in particular we must
 505  * return an error in response to IOCTL_PRIVCMD_MMAPBATCH rather than faulting
 506  * later on a bad MFN.  Whilst this isn't necessary for the other MMAP
 507  * ioctl()s, we lock them too, as they should be transitory.
 508  */
 509 int
 510 segmf_add_mfns(struct seg *seg, caddr_t addr, mfn_t mfn,
 511     pgcnt_t pgcnt, domid_t domid)


 745 
 746 static struct seg_ops segmf_ops = {
 747         .dup            = segmf_dup,
 748         .unmap          = segmf_unmap,
 749         .free           = segmf_free,
 750         .fault          = segmf_fault,
 751         .faulta         = segmf_faulta,
 752         .setprot        = segmf_setprot,
 753         .checkprot      = segmf_checkprot,
 754         .kluster        = segmf_kluster,
 755         .sync           = segmf_sync,
 756         .incore         = segmf_incore,
 757         .lockop         = segmf_lockop,
 758         .getprot        = segmf_getprot,
 759         .getoffset      = segmf_getoffset,
 760         .gettype        = segmf_gettype,
 761         .getvp          = segmf_getvp,
 762         .advise         = segmf_advise,
 763         .dump           = segmf_dump,
 764         .pagelock       = segmf_pagelock,
 765         .setpagesize    = segmf_setpagesize,
 766         .getmemid       = segmf_getmemid,
 767 };


 465 /*ARGSUSED*/
 466 static int
 467 segmf_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
 468 {
 469         return (0);
 470 }
 471 
 472 /*ARGSUSED*/
 473 static void
 474 segmf_dump(struct seg *seg)
 475 {}
 476 
 477 /*ARGSUSED*/
 478 static int
 479 segmf_pagelock(struct seg *seg, caddr_t addr, size_t len,
 480     struct page ***ppp, enum lock_type type, enum seg_rw rw)
 481 {
 482         return (ENOTSUP);
 483 }
 484 







 485 static int
 486 segmf_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
 487 {
 488         struct segmf_data *data = seg->s_data;
 489 
 490         memid->val[0] = (uintptr_t)VTOCVP(data->vp);
 491         memid->val[1] = (uintptr_t)seg_page(seg, addr);
 492         return (0);
 493 }
 494 
 495 /*
 496  * Add a set of contiguous foreign MFNs to the segment. soft-locking them.  The
 497  * pre-faulting is necessary due to live migration; in particular we must
 498  * return an error in response to IOCTL_PRIVCMD_MMAPBATCH rather than faulting
 499  * later on a bad MFN.  Whilst this isn't necessary for the other MMAP
 500  * ioctl()s, we lock them too, as they should be transitory.
 501  */
 502 int
 503 segmf_add_mfns(struct seg *seg, caddr_t addr, mfn_t mfn,
 504     pgcnt_t pgcnt, domid_t domid)


 738 
 739 static struct seg_ops segmf_ops = {
 740         .dup            = segmf_dup,
 741         .unmap          = segmf_unmap,
 742         .free           = segmf_free,
 743         .fault          = segmf_fault,
 744         .faulta         = segmf_faulta,
 745         .setprot        = segmf_setprot,
 746         .checkprot      = segmf_checkprot,
 747         .kluster        = segmf_kluster,
 748         .sync           = segmf_sync,
 749         .incore         = segmf_incore,
 750         .lockop         = segmf_lockop,
 751         .getprot        = segmf_getprot,
 752         .getoffset      = segmf_getoffset,
 753         .gettype        = segmf_gettype,
 754         .getvp          = segmf_getvp,
 755         .advise         = segmf_advise,
 756         .dump           = segmf_dump,
 757         .pagelock       = segmf_pagelock,

 758         .getmemid       = segmf_getmemid,
 759 };