166 static faultcode_t segdev_fault(struct hat *, struct seg *, caddr_t, size_t,
167 enum fault_type, enum seg_rw);
168 static faultcode_t segdev_faulta(struct seg *, caddr_t);
169 static int segdev_setprot(struct seg *, caddr_t, size_t, uint_t);
170 static int segdev_checkprot(struct seg *, caddr_t, size_t, uint_t);
171 static void segdev_badop(void);
172 static int segdev_sync(struct seg *, caddr_t, size_t, int, uint_t);
173 static size_t segdev_incore(struct seg *, caddr_t, size_t, char *);
174 static int segdev_lockop(struct seg *, caddr_t, size_t, int, int,
175 ulong_t *, size_t);
176 static int segdev_getprot(struct seg *, caddr_t, size_t, uint_t *);
177 static u_offset_t segdev_getoffset(struct seg *, caddr_t);
178 static int segdev_gettype(struct seg *, caddr_t);
179 static int segdev_getvp(struct seg *, caddr_t, struct vnode **);
180 static int segdev_advise(struct seg *, caddr_t, size_t, uint_t);
181 static void segdev_dump(struct seg *);
182 static int segdev_pagelock(struct seg *, caddr_t, size_t,
183 struct page ***, enum lock_type, enum seg_rw);
184 static int segdev_setpagesize(struct seg *, caddr_t, size_t, uint_t);
185 static int segdev_getmemid(struct seg *, caddr_t, memid_t *);
186 static int segdev_capable(struct seg *, segcapability_t);
187
188 /*
189 * XXX this struct is used by rootnex_map_fault to identify
190 * the segment it has been passed. So if you make it
191 * "static" you'll need to fix rootnex_map_fault.
192 */
193 struct seg_ops segdev_ops = {
194 .dup = segdev_dup,
195 .unmap = segdev_unmap,
196 .free = segdev_free,
197 .fault = segdev_fault,
198 .faulta = segdev_faulta,
199 .setprot = segdev_setprot,
200 .checkprot = segdev_checkprot,
201 .kluster = (int (*)())segdev_badop,
202 .sync = segdev_sync,
203 .incore = segdev_incore,
204 .lockop = segdev_lockop,
205 .getprot = segdev_getprot,
206 .getoffset = segdev_getoffset,
207 .gettype = segdev_gettype,
208 .getvp = segdev_getvp,
209 .advise = segdev_advise,
210 .dump = segdev_dump,
211 .pagelock = segdev_pagelock,
212 .setpagesize = segdev_setpagesize,
213 .getmemid = segdev_getmemid,
214 .capable = segdev_capable,
215 };
216
217 /*
218 * Private segdev support routines
219 */
220 static struct segdev_data *sdp_alloc(void);
221
222 static void segdev_softunlock(struct hat *, struct seg *, caddr_t,
223 size_t, enum seg_rw);
224
225 static faultcode_t segdev_faultpage(struct hat *, struct seg *, caddr_t,
226 struct vpage *, enum fault_type, enum seg_rw, devmap_handle_t *);
227
228 static faultcode_t segdev_faultpages(struct hat *, struct seg *, caddr_t,
229 size_t, enum fault_type, enum seg_rw, devmap_handle_t *);
230
231 static struct devmap_ctx *devmap_ctxinit(dev_t, ulong_t);
232 static struct devmap_softlock *devmap_softlock_init(dev_t, ulong_t);
233 static void devmap_softlock_rele(devmap_handle_t *);
234 static void devmap_ctx_rele(devmap_handle_t *);
4008 panic("ddi_umem_free: illegal cookie type 0x%x\n",
4009 cp->type);
4010 }
4011
4012 kmem_free(cookie, sizeof (struct ddi_umem_cookie));
4013 }
4014
4015
4016 static int
4017 segdev_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
4018 {
4019 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4020
4021 /*
4022 * It looks as if it is always mapped shared
4023 */
4024 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GETMEMID,
4025 "segdev_getmemid:start");
4026 memidp->val[0] = (uintptr_t)VTOCVP(sdp->vp);
4027 memidp->val[1] = sdp->offset + (uintptr_t)(addr - seg->s_base);
4028 return (0);
4029 }
4030
4031 /*ARGSUSED*/
4032 static int
4033 segdev_capable(struct seg *seg, segcapability_t capability)
4034 {
4035 return (0);
4036 }
4037
4038 /*
4039 * ddi_umem_alloc() non-pageable quantum cache max size.
4040 * This is just a SWAG.
4041 */
4042 #define DEVMAP_UMEM_QUANTUM (8*PAGESIZE)
4043
4044 /*
4045 * Initialize seg_dev from boot. This routine sets up the trash page
4046 * and creates the umem_np_arena used to back non-pageable memory
4047 * requests.
4048 */
4049 void
4050 segdev_init(void)
4051 {
4052 struct seg kseg;
4053
4054 umem_np_arena = vmem_create("umem_np", NULL, 0, PAGESIZE,
|
166 static faultcode_t segdev_fault(struct hat *, struct seg *, caddr_t, size_t,
167 enum fault_type, enum seg_rw);
168 static faultcode_t segdev_faulta(struct seg *, caddr_t);
169 static int segdev_setprot(struct seg *, caddr_t, size_t, uint_t);
170 static int segdev_checkprot(struct seg *, caddr_t, size_t, uint_t);
171 static void segdev_badop(void);
172 static int segdev_sync(struct seg *, caddr_t, size_t, int, uint_t);
173 static size_t segdev_incore(struct seg *, caddr_t, size_t, char *);
174 static int segdev_lockop(struct seg *, caddr_t, size_t, int, int,
175 ulong_t *, size_t);
176 static int segdev_getprot(struct seg *, caddr_t, size_t, uint_t *);
177 static u_offset_t segdev_getoffset(struct seg *, caddr_t);
178 static int segdev_gettype(struct seg *, caddr_t);
179 static int segdev_getvp(struct seg *, caddr_t, struct vnode **);
180 static int segdev_advise(struct seg *, caddr_t, size_t, uint_t);
181 static void segdev_dump(struct seg *);
182 static int segdev_pagelock(struct seg *, caddr_t, size_t,
183 struct page ***, enum lock_type, enum seg_rw);
184 static int segdev_setpagesize(struct seg *, caddr_t, size_t, uint_t);
185 static int segdev_getmemid(struct seg *, caddr_t, memid_t *);
186
187 /*
188 * XXX this struct is used by rootnex_map_fault to identify
189 * the segment it has been passed. So if you make it
190 * "static" you'll need to fix rootnex_map_fault.
191 */
192 struct seg_ops segdev_ops = {
193 .dup = segdev_dup,
194 .unmap = segdev_unmap,
195 .free = segdev_free,
196 .fault = segdev_fault,
197 .faulta = segdev_faulta,
198 .setprot = segdev_setprot,
199 .checkprot = segdev_checkprot,
200 .kluster = (int (*)())segdev_badop,
201 .sync = segdev_sync,
202 .incore = segdev_incore,
203 .lockop = segdev_lockop,
204 .getprot = segdev_getprot,
205 .getoffset = segdev_getoffset,
206 .gettype = segdev_gettype,
207 .getvp = segdev_getvp,
208 .advise = segdev_advise,
209 .dump = segdev_dump,
210 .pagelock = segdev_pagelock,
211 .setpagesize = segdev_setpagesize,
212 .getmemid = segdev_getmemid,
213 };
214
215 /*
216 * Private segdev support routines
217 */
218 static struct segdev_data *sdp_alloc(void);
219
220 static void segdev_softunlock(struct hat *, struct seg *, caddr_t,
221 size_t, enum seg_rw);
222
223 static faultcode_t segdev_faultpage(struct hat *, struct seg *, caddr_t,
224 struct vpage *, enum fault_type, enum seg_rw, devmap_handle_t *);
225
226 static faultcode_t segdev_faultpages(struct hat *, struct seg *, caddr_t,
227 size_t, enum fault_type, enum seg_rw, devmap_handle_t *);
228
229 static struct devmap_ctx *devmap_ctxinit(dev_t, ulong_t);
230 static struct devmap_softlock *devmap_softlock_init(dev_t, ulong_t);
231 static void devmap_softlock_rele(devmap_handle_t *);
232 static void devmap_ctx_rele(devmap_handle_t *);
4006 panic("ddi_umem_free: illegal cookie type 0x%x\n",
4007 cp->type);
4008 }
4009
4010 kmem_free(cookie, sizeof (struct ddi_umem_cookie));
4011 }
4012
4013
4014 static int
4015 segdev_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
4016 {
4017 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4018
4019 /*
4020 * It looks as if it is always mapped shared
4021 */
4022 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GETMEMID,
4023 "segdev_getmemid:start");
4024 memidp->val[0] = (uintptr_t)VTOCVP(sdp->vp);
4025 memidp->val[1] = sdp->offset + (uintptr_t)(addr - seg->s_base);
4026 return (0);
4027 }
4028
4029 /*
4030 * ddi_umem_alloc() non-pageable quantum cache max size.
4031 * This is just a SWAG.
4032 */
4033 #define DEVMAP_UMEM_QUANTUM (8*PAGESIZE)
4034
4035 /*
4036 * Initialize seg_dev from boot. This routine sets up the trash page
4037 * and creates the umem_np_arena used to back non-pageable memory
4038 * requests.
4039 */
4040 void
4041 segdev_init(void)
4042 {
4043 struct seg kseg;
4044
4045 umem_np_arena = vmem_create("umem_np", NULL, 0, PAGESIZE,
|