161 * Private seg op routines.
162 */
163 static int segdev_dup(struct seg *, struct seg *);
164 static int segdev_unmap(struct seg *, caddr_t, size_t);
165 static void segdev_free(struct seg *);
166 static faultcode_t segdev_fault(struct hat *, struct seg *, caddr_t, size_t,
167 enum fault_type, enum seg_rw);
168 static faultcode_t segdev_faulta(struct seg *, caddr_t);
169 static int segdev_setprot(struct seg *, caddr_t, size_t, uint_t);
170 static int segdev_checkprot(struct seg *, caddr_t, size_t, uint_t);
171 static void segdev_badop(void);
172 static int segdev_sync(struct seg *, caddr_t, size_t, int, uint_t);
173 static size_t segdev_incore(struct seg *, caddr_t, size_t, char *);
174 static int segdev_lockop(struct seg *, caddr_t, size_t, int, int,
175 ulong_t *, size_t);
176 static int segdev_getprot(struct seg *, caddr_t, size_t, uint_t *);
177 static u_offset_t segdev_getoffset(struct seg *, caddr_t);
178 static int segdev_gettype(struct seg *, caddr_t);
179 static int segdev_getvp(struct seg *, caddr_t, struct vnode **);
180 static int segdev_advise(struct seg *, caddr_t, size_t, uint_t);
181 static void segdev_dump(struct seg *);
182 static int segdev_pagelock(struct seg *, caddr_t, size_t,
183 struct page ***, enum lock_type, enum seg_rw);
184 static int segdev_setpagesize(struct seg *, caddr_t, size_t, uint_t);
185 static int segdev_getmemid(struct seg *, caddr_t, memid_t *);
186 static lgrp_mem_policy_info_t *segdev_getpolicy(struct seg *, caddr_t);
187 static int segdev_capable(struct seg *, segcapability_t);
188
189 /*
190 * XXX this struct is used by rootnex_map_fault to identify
191 * the segment it has been passed. So if you make it
192 * "static" you'll need to fix rootnex_map_fault.
193 */
194 struct seg_ops segdev_ops = {
195 segdev_dup,
196 segdev_unmap,
197 segdev_free,
198 segdev_fault,
199 segdev_faulta,
200 segdev_setprot,
201 segdev_checkprot,
202 (int (*)())segdev_badop, /* kluster */
203 (size_t (*)(struct seg *))NULL, /* swapout */
204 segdev_sync, /* sync */
205 segdev_incore,
206 segdev_lockop, /* lockop */
207 segdev_getprot,
208 segdev_getoffset,
209 segdev_gettype,
210 segdev_getvp,
211 segdev_advise,
212 segdev_dump,
213 segdev_pagelock,
214 segdev_setpagesize,
215 segdev_getmemid,
216 segdev_getpolicy,
217 segdev_capable,
218 seg_inherit_notsup
219 };
220
221 /*
222 * Private segdev support routines
223 */
224 static struct segdev_data *sdp_alloc(void);
225
226 static void segdev_softunlock(struct hat *, struct seg *, caddr_t,
227 size_t, enum seg_rw);
228
229 static faultcode_t segdev_faultpage(struct hat *, struct seg *, caddr_t,
230 struct vpage *, enum fault_type, enum seg_rw, devmap_handle_t *);
231
232 static faultcode_t segdev_faultpages(struct hat *, struct seg *, caddr_t,
233 size_t, enum fault_type, enum seg_rw, devmap_handle_t *);
234
235 static struct devmap_ctx *devmap_ctxinit(dev_t, ulong_t);
236 static struct devmap_softlock *devmap_softlock_init(dev_t, ulong_t);
237 static void devmap_softlock_rele(devmap_handle_t *);
238 static void devmap_ctx_rele(devmap_handle_t *);
415 /*
416 * Inform the vnode of the new mapping.
417 */
418 /*
419 * It is ok to use pass sdp->maxprot to ADDMAP rather than to use
420 * dhp specific maxprot because spec_addmap does not use maxprot.
421 */
422 error = VOP_ADDMAP(VTOCVP(sdp->vp), sdp->offset,
423 seg->s_as, seg->s_base, seg->s_size,
424 sdp->prot, sdp->maxprot, sdp->type, CRED(), NULL);
425
426 if (error != 0) {
427 sdp->devmap_data = NULL;
428 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
429 HAT_UNLOAD_UNMAP);
430 } else {
431 /*
432 * Mappings of /dev/null don't count towards the VSZ of a
433 * process. Mappings of /dev/null have no mapping type.
434 */
435 if ((SEGOP_GETTYPE(seg, (seg)->s_base) & (MAP_SHARED |
436 MAP_PRIVATE)) == 0) {
437 seg->s_as->a_resvsize -= seg->s_size;
438 }
439 }
440
441 return (error);
442 }
443
444 static struct segdev_data *
445 sdp_alloc(void)
446 {
447 struct segdev_data *sdp;
448
449 sdp = kmem_zalloc(sizeof (struct segdev_data), KM_SLEEP);
450 rw_init(&sdp->lock, NULL, RW_DEFAULT, NULL);
451
452 return (sdp);
453 }
454
455 /*
2364
2365 return (0);
2366 }
2367
2368 /*
2369 * segdev pages are not in the cache, and thus can't really be controlled.
2370 * Hence, advise is simply always successful.
2371 */
2372 /*ARGSUSED*/
2373 static int
2374 segdev_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2375 {
2376 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_ADVISE, "segdev_advise:start");
2377
2378 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2379
2380 return (0);
2381 }
2382
2383 /*
2384 * segdev pages are not dumped, so we just return
2385 */
2386 /*ARGSUSED*/
2387 static void
2388 segdev_dump(struct seg *seg)
2389 {}
2390
2391 /*
2392 * ddi_segmap_setup: Used by drivers who wish specify mapping attributes
2393 * for a segment. Called from a drivers segmap(9E)
2394 * routine.
2395 */
2396 /*ARGSUSED*/
2397 int
2398 ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp,
2399 off_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cred,
2400 ddi_device_acc_attr_t *accattrp, uint_t rnumber)
2401 {
2402 struct segdev_crargs dev_a;
2403 int (*mapfunc)(dev_t dev, off_t off, int prot);
2404 uint_t hat_attr;
2405 pfn_t pfn;
2406 int error, i;
2407
2408 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGMAP_SETUP,
2409 "ddi_segmap_setup:start");
2410
2411 if ((mapfunc = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap) == nodev)
2463 dev_a.hat_attr = hat_attr;
2464 dev_a.hat_flags = 0;
2465 dev_a.devmap_data = NULL;
2466
2467 error = as_map(as, *addrp, len, segdev_create, &dev_a);
2468 as_rangeunlock(as);
2469 return (error);
2470
2471 }
2472
2473 /*ARGSUSED*/
2474 static int
2475 segdev_pagelock(struct seg *seg, caddr_t addr, size_t len,
2476 struct page ***ppp, enum lock_type type, enum seg_rw rw)
2477 {
2478 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_PAGELOCK,
2479 "segdev_pagelock:start");
2480 return (ENOTSUP);
2481 }
2482
2483 /*ARGSUSED*/
2484 static int
2485 segdev_setpagesize(struct seg *seg, caddr_t addr, size_t len,
2486 uint_t szc)
2487 {
2488 return (ENOTSUP);
2489 }
2490
2491 /*
2492 * devmap_device: Used by devmap framework to establish mapping
2493 * called by devmap_seup(9F) during map setup time.
2494 */
2495 /*ARGSUSED*/
2496 static int
2497 devmap_device(devmap_handle_t *dhp, struct as *as, caddr_t *addr,
2498 offset_t off, size_t len, uint_t flags)
2499 {
2500 devmap_handle_t *rdhp, *maxdhp;
2501 struct segdev_crargs dev_a;
2502 int err;
2503 uint_t maxprot = PROT_ALL;
2504 offset_t offset = 0;
2505 pfn_t pfn;
2506 struct devmap_pmem_cookie *pcp;
2507
2508 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVICE,
2509 "devmap_device:start dhp=%p addr=%p off=%llx, len=%lx",
2510 (void *)dhp, (void *)addr, off, len);
4012 panic("ddi_umem_free: illegal cookie type 0x%x\n",
4013 cp->type);
4014 }
4015
4016 kmem_free(cookie, sizeof (struct ddi_umem_cookie));
4017 }
4018
4019
4020 static int
4021 segdev_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
4022 {
4023 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4024
4025 /*
4026 * It looks as if it is always mapped shared
4027 */
4028 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GETMEMID,
4029 "segdev_getmemid:start");
4030 memidp->val[0] = (uintptr_t)VTOCVP(sdp->vp);
4031 memidp->val[1] = sdp->offset + (uintptr_t)(addr - seg->s_base);
4032 return (0);
4033 }
4034
4035 /*ARGSUSED*/
4036 static lgrp_mem_policy_info_t *
4037 segdev_getpolicy(struct seg *seg, caddr_t addr)
4038 {
4039 return (NULL);
4040 }
4041
4042 /*ARGSUSED*/
4043 static int
4044 segdev_capable(struct seg *seg, segcapability_t capability)
4045 {
4046 return (0);
4047 }
4048
4049 /*
4050 * ddi_umem_alloc() non-pageable quantum cache max size.
4051 * This is just a SWAG.
4052 */
4053 #define DEVMAP_UMEM_QUANTUM (8*PAGESIZE)
4054
4055 /*
4056 * Initialize seg_dev from boot. This routine sets up the trash page
4057 * and creates the umem_np_arena used to back non-pageable memory
4058 * requests.
4059 */
4060 void
4061 segdev_init(void)
4062 {
4063 struct seg kseg;
4064
4065 umem_np_arena = vmem_create("umem_np", NULL, 0, PAGESIZE,
|
161 * Private seg op routines.
162 */
163 static int segdev_dup(struct seg *, struct seg *);
164 static int segdev_unmap(struct seg *, caddr_t, size_t);
165 static void segdev_free(struct seg *);
166 static faultcode_t segdev_fault(struct hat *, struct seg *, caddr_t, size_t,
167 enum fault_type, enum seg_rw);
168 static faultcode_t segdev_faulta(struct seg *, caddr_t);
169 static int segdev_setprot(struct seg *, caddr_t, size_t, uint_t);
170 static int segdev_checkprot(struct seg *, caddr_t, size_t, uint_t);
171 static void segdev_badop(void);
172 static int segdev_sync(struct seg *, caddr_t, size_t, int, uint_t);
173 static size_t segdev_incore(struct seg *, caddr_t, size_t, char *);
174 static int segdev_lockop(struct seg *, caddr_t, size_t, int, int,
175 ulong_t *, size_t);
176 static int segdev_getprot(struct seg *, caddr_t, size_t, uint_t *);
177 static u_offset_t segdev_getoffset(struct seg *, caddr_t);
178 static int segdev_gettype(struct seg *, caddr_t);
179 static int segdev_getvp(struct seg *, caddr_t, struct vnode **);
180 static int segdev_advise(struct seg *, caddr_t, size_t, uint_t);
181 static int segdev_pagelock(struct seg *, caddr_t, size_t,
182 struct page ***, enum lock_type, enum seg_rw);
183 static int segdev_getmemid(struct seg *, caddr_t, memid_t *);
184
185 /*
186 * XXX this struct is used by rootnex_map_fault to identify
187 * the segment it has been passed. So if you make it
188 * "static" you'll need to fix rootnex_map_fault.
189 */
190 const struct seg_ops segdev_ops = {
191 .dup = segdev_dup,
192 .unmap = segdev_unmap,
193 .free = segdev_free,
194 .fault = segdev_fault,
195 .faulta = segdev_faulta,
196 .setprot = segdev_setprot,
197 .checkprot = segdev_checkprot,
198 .kluster = (int (*)())segdev_badop,
199 .sync = segdev_sync,
200 .incore = segdev_incore,
201 .lockop = segdev_lockop,
202 .getprot = segdev_getprot,
203 .getoffset = segdev_getoffset,
204 .gettype = segdev_gettype,
205 .getvp = segdev_getvp,
206 .advise = segdev_advise,
207 .pagelock = segdev_pagelock,
208 .getmemid = segdev_getmemid,
209 };
210
211 /*
212 * Private segdev support routines
213 */
214 static struct segdev_data *sdp_alloc(void);
215
216 static void segdev_softunlock(struct hat *, struct seg *, caddr_t,
217 size_t, enum seg_rw);
218
219 static faultcode_t segdev_faultpage(struct hat *, struct seg *, caddr_t,
220 struct vpage *, enum fault_type, enum seg_rw, devmap_handle_t *);
221
222 static faultcode_t segdev_faultpages(struct hat *, struct seg *, caddr_t,
223 size_t, enum fault_type, enum seg_rw, devmap_handle_t *);
224
225 static struct devmap_ctx *devmap_ctxinit(dev_t, ulong_t);
226 static struct devmap_softlock *devmap_softlock_init(dev_t, ulong_t);
227 static void devmap_softlock_rele(devmap_handle_t *);
228 static void devmap_ctx_rele(devmap_handle_t *);
405 /*
406 * Inform the vnode of the new mapping.
407 */
408 /*
409 * It is ok to use pass sdp->maxprot to ADDMAP rather than to use
410 * dhp specific maxprot because spec_addmap does not use maxprot.
411 */
412 error = VOP_ADDMAP(VTOCVP(sdp->vp), sdp->offset,
413 seg->s_as, seg->s_base, seg->s_size,
414 sdp->prot, sdp->maxprot, sdp->type, CRED(), NULL);
415
416 if (error != 0) {
417 sdp->devmap_data = NULL;
418 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
419 HAT_UNLOAD_UNMAP);
420 } else {
421 /*
422 * Mappings of /dev/null don't count towards the VSZ of a
423 * process. Mappings of /dev/null have no mapping type.
424 */
425 if ((segop_gettype(seg, seg->s_base) & (MAP_SHARED |
426 MAP_PRIVATE)) == 0) {
427 seg->s_as->a_resvsize -= seg->s_size;
428 }
429 }
430
431 return (error);
432 }
433
434 static struct segdev_data *
435 sdp_alloc(void)
436 {
437 struct segdev_data *sdp;
438
439 sdp = kmem_zalloc(sizeof (struct segdev_data), KM_SLEEP);
440 rw_init(&sdp->lock, NULL, RW_DEFAULT, NULL);
441
442 return (sdp);
443 }
444
445 /*
2354
2355 return (0);
2356 }
2357
2358 /*
2359 * segdev pages are not in the cache, and thus can't really be controlled.
2360 * Hence, advise is simply always successful.
2361 */
2362 /*ARGSUSED*/
2363 static int
2364 segdev_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2365 {
2366 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_ADVISE, "segdev_advise:start");
2367
2368 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2369
2370 return (0);
2371 }
2372
2373 /*
2374 * ddi_segmap_setup: Used by drivers who wish specify mapping attributes
2375 * for a segment. Called from a drivers segmap(9E)
2376 * routine.
2377 */
2378 /*ARGSUSED*/
2379 int
2380 ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp,
2381 off_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cred,
2382 ddi_device_acc_attr_t *accattrp, uint_t rnumber)
2383 {
2384 struct segdev_crargs dev_a;
2385 int (*mapfunc)(dev_t dev, off_t off, int prot);
2386 uint_t hat_attr;
2387 pfn_t pfn;
2388 int error, i;
2389
2390 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGMAP_SETUP,
2391 "ddi_segmap_setup:start");
2392
2393 if ((mapfunc = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap) == nodev)
2445 dev_a.hat_attr = hat_attr;
2446 dev_a.hat_flags = 0;
2447 dev_a.devmap_data = NULL;
2448
2449 error = as_map(as, *addrp, len, segdev_create, &dev_a);
2450 as_rangeunlock(as);
2451 return (error);
2452
2453 }
2454
2455 /*ARGSUSED*/
2456 static int
2457 segdev_pagelock(struct seg *seg, caddr_t addr, size_t len,
2458 struct page ***ppp, enum lock_type type, enum seg_rw rw)
2459 {
2460 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_PAGELOCK,
2461 "segdev_pagelock:start");
2462 return (ENOTSUP);
2463 }
2464
2465 /*
2466 * devmap_device: Used by devmap framework to establish mapping
2467 * called by devmap_seup(9F) during map setup time.
2468 */
2469 /*ARGSUSED*/
2470 static int
2471 devmap_device(devmap_handle_t *dhp, struct as *as, caddr_t *addr,
2472 offset_t off, size_t len, uint_t flags)
2473 {
2474 devmap_handle_t *rdhp, *maxdhp;
2475 struct segdev_crargs dev_a;
2476 int err;
2477 uint_t maxprot = PROT_ALL;
2478 offset_t offset = 0;
2479 pfn_t pfn;
2480 struct devmap_pmem_cookie *pcp;
2481
2482 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVICE,
2483 "devmap_device:start dhp=%p addr=%p off=%llx, len=%lx",
2484 (void *)dhp, (void *)addr, off, len);
3986 panic("ddi_umem_free: illegal cookie type 0x%x\n",
3987 cp->type);
3988 }
3989
3990 kmem_free(cookie, sizeof (struct ddi_umem_cookie));
3991 }
3992
3993
3994 static int
3995 segdev_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
3996 {
3997 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
3998
3999 /*
4000 * It looks as if it is always mapped shared
4001 */
4002 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GETMEMID,
4003 "segdev_getmemid:start");
4004 memidp->val[0] = (uintptr_t)VTOCVP(sdp->vp);
4005 memidp->val[1] = sdp->offset + (uintptr_t)(addr - seg->s_base);
4006 return (0);
4007 }
4008
4009 /*
4010 * ddi_umem_alloc() non-pageable quantum cache max size.
4011 * This is just a SWAG.
4012 */
4013 #define DEVMAP_UMEM_QUANTUM (8*PAGESIZE)
4014
4015 /*
4016 * Initialize seg_dev from boot. This routine sets up the trash page
4017 * and creates the umem_np_arena used to back non-pageable memory
4018 * requests.
4019 */
4020 void
4021 segdev_init(void)
4022 {
4023 struct seg kseg;
4024
4025 umem_np_arena = vmem_create("umem_np", NULL, 0, PAGESIZE,
|