Print this page
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.


  82         SEGMF_MAP_EMPTY = 0,
  83         SEGMF_MAP_MFN,
  84         SEGMF_MAP_GREF
  85 } segmf_map_type_t;
  86 
  87 typedef struct segmf_map_s {
  88         segmf_map_type_t        t_type;
  89         segmf_mu_t              u;
  90 } segmf_map_t;
  91 
  92 struct segmf_data {
  93         kmutex_t        lock;
  94         struct vnode    *vp;
  95         uchar_t         prot;
  96         uchar_t         maxprot;
  97         size_t          softlockcnt;
  98         domid_t         domid;
  99         segmf_map_t     *map;
 100 };
 101 
 102 static struct seg_ops segmf_ops;
 103 
 104 static int segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t len);
 105 
 106 static struct segmf_data *
 107 segmf_data_zalloc(struct seg *seg)
 108 {
 109         struct segmf_data *data = kmem_zalloc(sizeof (*data), KM_SLEEP);
 110 
 111         mutex_init(&data->lock, "segmf.lock", MUTEX_DEFAULT, NULL);
 112         seg->s_ops = &segmf_ops;
 113         seg->s_data = data;
 114         return (data);
 115 }
 116 
 117 int
 118 segmf_create(struct seg *seg, void *args)
 119 {
 120         struct segmf_crargs *a = args;
 121         struct segmf_data *data;
 122         struct as *as = seg->s_as;


 719                     GNTMAP_contains_pte;
 720                 if (!(map[i].u.g.g_flags & SEGMF_GFLAGS_WR)) {
 721                         mapop[i].flags |= GNTMAP_readonly;
 722                 }
 723         }
 724         e = xen_map_gref(GNTTABOP_map_grant_ref, mapop, cnt, B_TRUE);
 725         if ((e != 0) || (mapop[0].status != GNTST_okay)) {
 726                 return (FC_MAKE_ERR(EFAULT));
 727         }
 728 
 729         /* save handle for segmf_release_grefs() and mark it as mapped */
 730         for (i = 0; i < cnt; i++) {
 731                 ASSERT(mapop[i].status == GNTST_okay);
 732                 map[i].u.g.g_handle = mapop[i].handle;
 733                 map[i].u.g.g_flags |= SEGMF_GFLAGS_MAPPED;
 734         }
 735 
 736         return (0);
 737 }
 738 
 739 static struct seg_ops segmf_ops = {
 740         .dup            = segmf_dup,
 741         .unmap          = segmf_unmap,
 742         .free           = segmf_free,
 743         .fault          = segmf_fault,
 744         .faulta         = segmf_faulta,
 745         .setprot        = segmf_setprot,
 746         .checkprot      = segmf_checkprot,
 747         .kluster        = segmf_kluster,
 748         .sync           = segmf_sync,
 749         .incore         = segmf_incore,
 750         .lockop         = segmf_lockop,
 751         .getprot        = segmf_getprot,
 752         .getoffset      = segmf_getoffset,
 753         .gettype        = segmf_gettype,
 754         .getvp          = segmf_getvp,
 755         .advise         = segmf_advise,
 756         .dump           = segmf_dump,
 757         .pagelock       = segmf_pagelock,
 758         .getmemid       = segmf_getmemid,
 759 };


  82         SEGMF_MAP_EMPTY = 0,
  83         SEGMF_MAP_MFN,
  84         SEGMF_MAP_GREF
  85 } segmf_map_type_t;
  86 
  87 typedef struct segmf_map_s {
  88         segmf_map_type_t        t_type;
  89         segmf_mu_t              u;
  90 } segmf_map_t;
  91 
  92 struct segmf_data {
  93         kmutex_t        lock;
  94         struct vnode    *vp;
  95         uchar_t         prot;
  96         uchar_t         maxprot;
  97         size_t          softlockcnt;
  98         domid_t         domid;
  99         segmf_map_t     *map;
 100 };
 101 
 102 static const struct seg_ops segmf_ops;
 103 
 104 static int segmf_fault_gref_range(struct seg *seg, caddr_t addr, size_t len);
 105 
 106 static struct segmf_data *
 107 segmf_data_zalloc(struct seg *seg)
 108 {
 109         struct segmf_data *data = kmem_zalloc(sizeof (*data), KM_SLEEP);
 110 
 111         mutex_init(&data->lock, "segmf.lock", MUTEX_DEFAULT, NULL);
 112         seg->s_ops = &segmf_ops;
 113         seg->s_data = data;
 114         return (data);
 115 }
 116 
 117 int
 118 segmf_create(struct seg *seg, void *args)
 119 {
 120         struct segmf_crargs *a = args;
 121         struct segmf_data *data;
 122         struct as *as = seg->s_as;


 719                     GNTMAP_contains_pte;
 720                 if (!(map[i].u.g.g_flags & SEGMF_GFLAGS_WR)) {
 721                         mapop[i].flags |= GNTMAP_readonly;
 722                 }
 723         }
 724         e = xen_map_gref(GNTTABOP_map_grant_ref, mapop, cnt, B_TRUE);
 725         if ((e != 0) || (mapop[0].status != GNTST_okay)) {
 726                 return (FC_MAKE_ERR(EFAULT));
 727         }
 728 
 729         /* save handle for segmf_release_grefs() and mark it as mapped */
 730         for (i = 0; i < cnt; i++) {
 731                 ASSERT(mapop[i].status == GNTST_okay);
 732                 map[i].u.g.g_handle = mapop[i].handle;
 733                 map[i].u.g.g_flags |= SEGMF_GFLAGS_MAPPED;
 734         }
 735 
 736         return (0);
 737 }
 738 
 739 static const struct seg_ops segmf_ops = {
 740         .dup            = segmf_dup,
 741         .unmap          = segmf_unmap,
 742         .free           = segmf_free,
 743         .fault          = segmf_fault,
 744         .faulta         = segmf_faulta,
 745         .setprot        = segmf_setprot,
 746         .checkprot      = segmf_checkprot,
 747         .kluster        = segmf_kluster,
 748         .sync           = segmf_sync,
 749         .incore         = segmf_incore,
 750         .lockop         = segmf_lockop,
 751         .getprot        = segmf_getprot,
 752         .getoffset      = segmf_getoffset,
 753         .gettype        = segmf_gettype,
 754         .getvp          = segmf_getvp,
 755         .advise         = segmf_advise,
 756         .dump           = segmf_dump,
 757         .pagelock       = segmf_pagelock,
 758         .getmemid       = segmf_getmemid,
 759 };