70
71 /*
72 * Private seg op routines.
73 */
74 static void segmap_free(struct seg *seg);
75 faultcode_t segmap_fault(struct hat *hat, struct seg *seg, caddr_t addr,
76 size_t len, enum fault_type type, enum seg_rw rw);
77 static faultcode_t segmap_faulta(struct seg *seg, caddr_t addr);
78 static int segmap_checkprot(struct seg *seg, caddr_t addr, size_t len,
79 uint_t prot);
80 static int segmap_kluster(struct seg *seg, caddr_t addr, ssize_t);
81 static int segmap_getprot(struct seg *seg, caddr_t addr, size_t len,
82 uint_t *protv);
83 static u_offset_t segmap_getoffset(struct seg *seg, caddr_t addr);
84 static int segmap_gettype(struct seg *seg, caddr_t addr);
85 static int segmap_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
86 static void segmap_dump(struct seg *seg);
87 static int segmap_pagelock(struct seg *seg, caddr_t addr, size_t len,
88 struct page ***ppp, enum lock_type type,
89 enum seg_rw rw);
90 static void segmap_badop(void);
91 static int segmap_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
92 static lgrp_mem_policy_info_t *segmap_getpolicy(struct seg *seg,
93 caddr_t addr);
94 static int segmap_capable(struct seg *seg, segcapability_t capability);
95
96 /* segkpm support */
97 static caddr_t segmap_pagecreate_kpm(struct seg *, vnode_t *, u_offset_t,
98 struct smap *, enum seg_rw);
99 struct smap *get_smap_kpm(caddr_t, page_t **);
100
101 #define SEGMAP_BADOP(t) (t(*)())segmap_badop
102
103 static struct seg_ops segmap_ops = {
104 .dup = SEGMAP_BADOP(int),
105 .unmap = SEGMAP_BADOP(int),
106 .free = segmap_free,
107 .fault = segmap_fault,
108 .faulta = segmap_faulta,
109 .setprot = SEGMAP_BADOP(int),
110 .checkprot = segmap_checkprot,
111 .kluster = segmap_kluster,
112 .sync = SEGMAP_BADOP(int),
113 .incore = SEGMAP_BADOP(size_t),
114 .lockop = SEGMAP_BADOP(int),
115 .getprot = segmap_getprot,
116 .getoffset = segmap_getoffset,
117 .gettype = segmap_gettype,
118 .getvp = segmap_getvp,
119 .advise = SEGMAP_BADOP(int),
120 .dump = segmap_dump,
121 .pagelock = segmap_pagelock,
122 .setpagesize = SEGMAP_BADOP(int),
123 .getmemid = segmap_getmemid,
124 .getpolicy = segmap_getpolicy,
125 .capable = segmap_capable,
126 .inherit = seg_inherit_notsup,
127 };
128
129 /*
130 * Private segmap routines.
131 */
132 static void segmap_unlock(struct hat *hat, struct seg *seg, caddr_t addr,
133 size_t len, enum seg_rw rw, struct smap *smp);
134 static void segmap_smapadd(struct smap *smp);
135 static struct smap *segmap_hashin(struct smap *smp, struct vnode *vp,
136 u_offset_t off, int hashid);
137 static void segmap_hashout(struct smap *smp);
138
139
140 /*
141 * Statistics for segmap operations.
142 *
889
890 ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock));
891
892 /* XXX - This doesn't make any sense */
893 *vpp = smd->smd_sm->sm_vp;
894 return (0);
895 }
896
897 /*
898 * Check to see if it makes sense to do kluster/read ahead to
899 * addr + delta relative to the mapping at addr. We assume here
900 * that delta is a signed PAGESIZE'd multiple (which can be negative).
901 *
902 * For segmap we always "approve" of this action from our standpoint.
903 */
904 /*ARGSUSED*/
905 static int
906 segmap_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
907 {
908 return (0);
909 }
910
911 static void
912 segmap_badop()
913 {
914 panic("segmap_badop");
915 /*NOTREACHED*/
916 }
917
918 /*
919 * Special private segmap operations
920 */
921
922 /*
923 * Add smap to the appropriate free list.
924 */
925 static void
926 segmap_smapadd(struct smap *smp)
927 {
928 struct smfree *sm;
929 struct smap *smpfreelist;
930 struct sm_freeq *releq;
931
932 ASSERT(MUTEX_HELD(SMAPMTX(smp)));
933
934 if (smp->sm_refcnt != 0) {
935 panic("segmap_smapadd");
|
70
71 /*
72 * Private seg op routines.
73 */
74 static void segmap_free(struct seg *seg);
75 faultcode_t segmap_fault(struct hat *hat, struct seg *seg, caddr_t addr,
76 size_t len, enum fault_type type, enum seg_rw rw);
77 static faultcode_t segmap_faulta(struct seg *seg, caddr_t addr);
78 static int segmap_checkprot(struct seg *seg, caddr_t addr, size_t len,
79 uint_t prot);
80 static int segmap_kluster(struct seg *seg, caddr_t addr, ssize_t);
81 static int segmap_getprot(struct seg *seg, caddr_t addr, size_t len,
82 uint_t *protv);
83 static u_offset_t segmap_getoffset(struct seg *seg, caddr_t addr);
84 static int segmap_gettype(struct seg *seg, caddr_t addr);
85 static int segmap_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp);
86 static void segmap_dump(struct seg *seg);
87 static int segmap_pagelock(struct seg *seg, caddr_t addr, size_t len,
88 struct page ***ppp, enum lock_type type,
89 enum seg_rw rw);
90 static int segmap_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp);
91 static lgrp_mem_policy_info_t *segmap_getpolicy(struct seg *seg,
92 caddr_t addr);
93 static int segmap_capable(struct seg *seg, segcapability_t capability);
94
95 /* segkpm support */
96 static caddr_t segmap_pagecreate_kpm(struct seg *, vnode_t *, u_offset_t,
97 struct smap *, enum seg_rw);
98 struct smap *get_smap_kpm(caddr_t, page_t **);
99
100 static struct seg_ops segmap_ops = {
101 .free = segmap_free,
102 .fault = segmap_fault,
103 .faulta = segmap_faulta,
104 .checkprot = segmap_checkprot,
105 .kluster = segmap_kluster,
106 .getprot = segmap_getprot,
107 .getoffset = segmap_getoffset,
108 .gettype = segmap_gettype,
109 .getvp = segmap_getvp,
110 .dump = segmap_dump,
111 .pagelock = segmap_pagelock,
112 .getmemid = segmap_getmemid,
113 .getpolicy = segmap_getpolicy,
114 .capable = segmap_capable,
115 .inherit = seg_inherit_notsup,
116 };
117
118 /*
119 * Private segmap routines.
120 */
121 static void segmap_unlock(struct hat *hat, struct seg *seg, caddr_t addr,
122 size_t len, enum seg_rw rw, struct smap *smp);
123 static void segmap_smapadd(struct smap *smp);
124 static struct smap *segmap_hashin(struct smap *smp, struct vnode *vp,
125 u_offset_t off, int hashid);
126 static void segmap_hashout(struct smap *smp);
127
128
129 /*
130 * Statistics for segmap operations.
131 *
878
879 ASSERT(seg->s_as && RW_READ_HELD(&seg->s_as->a_lock));
880
881 /* XXX - This doesn't make any sense */
882 *vpp = smd->smd_sm->sm_vp;
883 return (0);
884 }
885
886 /*
887 * Check to see if it makes sense to do kluster/read ahead to
888 * addr + delta relative to the mapping at addr. We assume here
889 * that delta is a signed PAGESIZE'd multiple (which can be negative).
890 *
891 * For segmap we always "approve" of this action from our standpoint.
892 */
893 /*ARGSUSED*/
894 static int
895 segmap_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
896 {
897 return (0);
898 }
899
900 /*
901 * Special private segmap operations
902 */
903
904 /*
905 * Add smap to the appropriate free list.
906 */
907 static void
908 segmap_smapadd(struct smap *smp)
909 {
910 struct smfree *sm;
911 struct smap *smpfreelist;
912 struct sm_freeq *releq;
913
914 ASSERT(MUTEX_HELD(SMAPMTX(smp)));
915
916 if (smp->sm_refcnt != 0) {
917 panic("segmap_smapadd");
|