82 SEGMF_MAP_EMPTY = 0,
83 SEGMF_MAP_MFN,
84 SEGMF_MAP_GREF
85 } segmf_map_type_t;
86
87 typedef struct segmf_map_s {
88 segmf_map_type_t t_type;
89 segmf_mu_t u;
90 } segmf_map_t;
91
92 struct segmf_data {
93 kmutex_t lock;
94 struct vnode *vp;
95 uchar_t prot;
96 uchar_t maxprot;
97 size_t softlockcnt;
98 domid_t domid;
99 segmf_map_t *map;
100 };
101
102 static struct seg_ops segmf_ops;
103
104 static int segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t len);
105
106 static struct segmf_data *
107 segmf_data_zalloc(struct seg *seg)
108 {
109 struct segmf_data *data = kmem_zalloc(sizeof (*data), KM_SLEEP);
110
111 mutex_init(&data->lock, "segmf.lock", MUTEX_DEFAULT, NULL);
112 seg->s_ops = &segmf_ops;
113 seg->s_data = data;
114 return (data);
115 }
116
117 int
118 segmf_create(struct seg *seg, void *args)
119 {
120 struct segmf_crargs *a = args;
121 struct segmf_data *data;
122 struct as *as = seg->s_as;
453 }
454
455 /*ARGSUSED1*/
456 static int
457 segmf_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
458 {
459 struct segmf_data *data = seg->s_data;
460
461 *vpp = VTOCVP(data->vp);
462 return (0);
463 }
464
465 /*ARGSUSED*/
466 static int
467 segmf_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
468 {
469 return (0);
470 }
471
472 /*ARGSUSED*/
473 static void
474 segmf_dump(struct seg *seg)
475 {}
476
477 /*ARGSUSED*/
478 static int
479 segmf_pagelock(struct seg *seg, caddr_t addr, size_t len,
480 struct page ***ppp, enum lock_type type, enum seg_rw rw)
481 {
482 return (ENOTSUP);
483 }
484
485 /*ARGSUSED*/
486 static int
487 segmf_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
488 {
489 return (ENOTSUP);
490 }
491
492 static int
493 segmf_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
494 {
495 struct segmf_data *data = seg->s_data;
496
497 memid->val[0] = (uintptr_t)VTOCVP(data->vp);
498 memid->val[1] = (uintptr_t)seg_page(seg, addr);
499 return (0);
500 }
501
502 /*ARGSUSED*/
503 static lgrp_mem_policy_info_t *
504 segmf_getpolicy(struct seg *seg, caddr_t addr)
505 {
506 return (NULL);
507 }
508
509 /*ARGSUSED*/
510 static int
511 segmf_capable(struct seg *seg, segcapability_t capability)
512 {
513 return (0);
514 }
515
516 /*
517 * Add a set of contiguous foreign MFNs to the segment. soft-locking them. The
518 * pre-faulting is necessary due to live migration; in particular we must
519 * return an error in response to IOCTL_PRIVCMD_MMAPBATCH rather than faulting
520 * later on a bad MFN. Whilst this isn't necessary for the other MMAP
521 * ioctl()s, we lock them too, as they should be transitory.
522 */
523 int
524 segmf_add_mfns(struct seg *seg, caddr_t addr, mfn_t mfn,
525 pgcnt_t pgcnt, domid_t domid)
526 {
527 struct segmf_data *data = seg->s_data;
528 pgcnt_t base;
529 faultcode_t fc;
530 pgcnt_t i;
531 int error = 0;
532
533 if (seg->s_ops != &segmf_ops)
534 return (EINVAL);
535
740 GNTMAP_contains_pte;
741 if (!(map[i].u.g.g_flags & SEGMF_GFLAGS_WR)) {
742 mapop[i].flags |= GNTMAP_readonly;
743 }
744 }
745 e = xen_map_gref(GNTTABOP_map_grant_ref, mapop, cnt, B_TRUE);
746 if ((e != 0) || (mapop[0].status != GNTST_okay)) {
747 return (FC_MAKE_ERR(EFAULT));
748 }
749
750 /* save handle for segmf_release_grefs() and mark it as mapped */
751 for (i = 0; i < cnt; i++) {
752 ASSERT(mapop[i].status == GNTST_okay);
753 map[i].u.g.g_handle = mapop[i].handle;
754 map[i].u.g.g_flags |= SEGMF_GFLAGS_MAPPED;
755 }
756
757 return (0);
758 }
759
760 static struct seg_ops segmf_ops = {
761 segmf_dup,
762 segmf_unmap,
763 segmf_free,
764 segmf_fault,
765 segmf_faulta,
766 segmf_setprot,
767 segmf_checkprot,
768 (int (*)())segmf_kluster,
769 (size_t (*)(struct seg *))NULL, /* swapout */
770 segmf_sync,
771 segmf_incore,
772 segmf_lockop,
773 segmf_getprot,
774 segmf_getoffset,
775 segmf_gettype,
776 segmf_getvp,
777 segmf_advise,
778 segmf_dump,
779 segmf_pagelock,
780 segmf_setpagesize,
781 segmf_getmemid,
782 segmf_getpolicy,
783 segmf_capable,
784 seg_inherit_notsup
785 };
|
82 SEGMF_MAP_EMPTY = 0,
83 SEGMF_MAP_MFN,
84 SEGMF_MAP_GREF
85 } segmf_map_type_t;
86
87 typedef struct segmf_map_s {
88 segmf_map_type_t t_type;
89 segmf_mu_t u;
90 } segmf_map_t;
91
92 struct segmf_data {
93 kmutex_t lock;
94 struct vnode *vp;
95 uchar_t prot;
96 uchar_t maxprot;
97 size_t softlockcnt;
98 domid_t domid;
99 segmf_map_t *map;
100 };
101
102 static const struct seg_ops segmf_ops;
103
104 static int segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t len);
105
106 static struct segmf_data *
107 segmf_data_zalloc(struct seg *seg)
108 {
109 struct segmf_data *data = kmem_zalloc(sizeof (*data), KM_SLEEP);
110
111 mutex_init(&data->lock, "segmf.lock", MUTEX_DEFAULT, NULL);
112 seg->s_ops = &segmf_ops;
113 seg->s_data = data;
114 return (data);
115 }
116
117 int
118 segmf_create(struct seg *seg, void *args)
119 {
120 struct segmf_crargs *a = args;
121 struct segmf_data *data;
122 struct as *as = seg->s_as;
453 }
454
455 /*ARGSUSED1*/
456 static int
457 segmf_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
458 {
459 struct segmf_data *data = seg->s_data;
460
461 *vpp = VTOCVP(data->vp);
462 return (0);
463 }
464
465 /*ARGSUSED*/
466 static int
467 segmf_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
468 {
469 return (0);
470 }
471
472 /*ARGSUSED*/
473 static int
474 segmf_pagelock(struct seg *seg, caddr_t addr, size_t len,
475 struct page ***ppp, enum lock_type type, enum seg_rw rw)
476 {
477 return (ENOTSUP);
478 }
479
480 static int
481 segmf_getmemid(struct seg *seg, caddr_t addr, memid_t *memid)
482 {
483 struct segmf_data *data = seg->s_data;
484
485 memid->val[0] = (uintptr_t)VTOCVP(data->vp);
486 memid->val[1] = (uintptr_t)seg_page(seg, addr);
487 return (0);
488 }
489
490 /*
491 * Add a set of contiguous foreign MFNs to the segment. soft-locking them. The
492 * pre-faulting is necessary due to live migration; in particular we must
493 * return an error in response to IOCTL_PRIVCMD_MMAPBATCH rather than faulting
494 * later on a bad MFN. Whilst this isn't necessary for the other MMAP
495 * ioctl()s, we lock them too, as they should be transitory.
496 */
497 int
498 segmf_add_mfns(struct seg *seg, caddr_t addr, mfn_t mfn,
499 pgcnt_t pgcnt, domid_t domid)
500 {
501 struct segmf_data *data = seg->s_data;
502 pgcnt_t base;
503 faultcode_t fc;
504 pgcnt_t i;
505 int error = 0;
506
507 if (seg->s_ops != &segmf_ops)
508 return (EINVAL);
509
714 GNTMAP_contains_pte;
715 if (!(map[i].u.g.g_flags & SEGMF_GFLAGS_WR)) {
716 mapop[i].flags |= GNTMAP_readonly;
717 }
718 }
719 e = xen_map_gref(GNTTABOP_map_grant_ref, mapop, cnt, B_TRUE);
720 if ((e != 0) || (mapop[0].status != GNTST_okay)) {
721 return (FC_MAKE_ERR(EFAULT));
722 }
723
724 /* save handle for segmf_release_grefs() and mark it as mapped */
725 for (i = 0; i < cnt; i++) {
726 ASSERT(mapop[i].status == GNTST_okay);
727 map[i].u.g.g_handle = mapop[i].handle;
728 map[i].u.g.g_flags |= SEGMF_GFLAGS_MAPPED;
729 }
730
731 return (0);
732 }
733
734 static const struct seg_ops segmf_ops = {
735 .dup = segmf_dup,
736 .unmap = segmf_unmap,
737 .free = segmf_free,
738 .fault = segmf_fault,
739 .faulta = segmf_faulta,
740 .setprot = segmf_setprot,
741 .checkprot = segmf_checkprot,
742 .kluster = segmf_kluster,
743 .sync = segmf_sync,
744 .incore = segmf_incore,
745 .lockop = segmf_lockop,
746 .getprot = segmf_getprot,
747 .getoffset = segmf_getoffset,
748 .gettype = segmf_gettype,
749 .getvp = segmf_getvp,
750 .advise = segmf_advise,
751 .pagelock = segmf_pagelock,
752 .getmemid = segmf_getmemid,
753 };
|