Print this page
patch SEGOP_SWAPOUT-delete
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_dev.c
+++ new/usr/src/uts/common/vm/seg_dev.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 28 /* All Rights Reserved */
29 29
30 30 /*
31 31 * University Copyright- Copyright (c) 1982, 1986, 1988
32 32 * The Regents of the University of California
33 33 * All Rights Reserved
34 34 *
35 35 * University Acknowledgment- Portions of this document are derived from
36 36 * software developed by the University of California, Berkeley, and its
37 37 * contributors.
38 38 */
39 39
40 40 /*
41 41 * VM - segment of a mapped device.
42 42 *
43 43 * This segment driver is used when mapping character special devices.
44 44 */
45 45
46 46 #include <sys/types.h>
47 47 #include <sys/t_lock.h>
48 48 #include <sys/sysmacros.h>
49 49 #include <sys/vtrace.h>
50 50 #include <sys/systm.h>
51 51 #include <sys/vmsystm.h>
52 52 #include <sys/mman.h>
53 53 #include <sys/errno.h>
54 54 #include <sys/kmem.h>
55 55 #include <sys/cmn_err.h>
56 56 #include <sys/vnode.h>
57 57 #include <sys/proc.h>
58 58 #include <sys/conf.h>
59 59 #include <sys/debug.h>
60 60 #include <sys/ddidevmap.h>
61 61 #include <sys/ddi_implfuncs.h>
62 62 #include <sys/lgrp.h>
63 63
64 64 #include <vm/page.h>
65 65 #include <vm/hat.h>
66 66 #include <vm/as.h>
67 67 #include <vm/seg.h>
68 68 #include <vm/seg_dev.h>
69 69 #include <vm/seg_kp.h>
70 70 #include <vm/seg_kmem.h>
71 71 #include <vm/vpage.h>
72 72
73 73 #include <sys/sunddi.h>
74 74 #include <sys/esunddi.h>
75 75 #include <sys/fs/snode.h>
76 76
77 77
78 78 #if DEBUG
79 79 int segdev_debug;
80 80 #define DEBUGF(level, args) { if (segdev_debug >= (level)) cmn_err args; }
81 81 #else
82 82 #define DEBUGF(level, args)
83 83 #endif
84 84
85 85 /* Default timeout for devmap context management */
86 86 #define CTX_TIMEOUT_VALUE 0
87 87
88 88 #define HOLD_DHP_LOCK(dhp) if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) \
89 89 { mutex_enter(&dhp->dh_lock); }
90 90
91 91 #define RELE_DHP_LOCK(dhp) if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) \
92 92 { mutex_exit(&dhp->dh_lock); }
93 93
94 94 #define round_down_p2(a, s) ((a) & ~((s) - 1))
95 95 #define round_up_p2(a, s) (((a) + (s) - 1) & ~((s) - 1))
96 96
97 97 /*
98 98 * VA_PA_ALIGNED checks to see if both VA and PA are on pgsize boundary
99 99 * VA_PA_PGSIZE_ALIGNED check to see if VA is aligned with PA w.r.t. pgsize
100 100 */
101 101 #define VA_PA_ALIGNED(uvaddr, paddr, pgsize) \
102 102 (((uvaddr | paddr) & (pgsize - 1)) == 0)
103 103 #define VA_PA_PGSIZE_ALIGNED(uvaddr, paddr, pgsize) \
104 104 (((uvaddr ^ paddr) & (pgsize - 1)) == 0)
105 105
106 106 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */
107 107
108 108 #define VTOCVP(vp) (VTOS(vp)->s_commonvp) /* we "know" it's an snode */
109 109
110 110 static struct devmap_ctx *devmapctx_list = NULL;
111 111 static struct devmap_softlock *devmap_slist = NULL;
112 112
113 113 /*
114 114 * mutex, vnode and page for the page of zeros we use for the trash mappings.
115 115 * One trash page is allocated on the first ddi_umem_setup call that uses it
116 116 * XXX Eventually, we may want to combine this with what segnf does when all
117 117 * hat layers implement HAT_NOFAULT.
118 118 *
119 119 * The trash page is used when the backing store for a userland mapping is
120 120 * removed but the application semantics do not take kindly to a SIGBUS.
121 121 * In that scenario, the applications pages are mapped to some dummy page
122 122 * which returns garbage on read and writes go into a common place.
123 123 * (Perfect for NO_FAULT semantics)
124 124 * The device driver is responsible to communicating to the app with some
125 125 * other mechanism that such remapping has happened and the app should take
126 126 * corrective action.
127 127 * We can also use an anonymous memory page as there is no requirement to
128 128 * keep the page locked, however this complicates the fault code. RFE.
129 129 */
130 130 static struct vnode trashvp;
131 131 static struct page *trashpp;
132 132
133 133 /* Non-pageable kernel memory is allocated from the umem_np_arena. */
134 134 static vmem_t *umem_np_arena;
135 135
136 136 /* Set the cookie to a value we know will never be a valid umem_cookie */
137 137 #define DEVMAP_DEVMEM_COOKIE ((ddi_umem_cookie_t)0x1)
138 138
139 139 /*
140 140 * Macros to check if type of devmap handle
141 141 */
142 142 #define cookie_is_devmem(c) \
143 143 ((c) == (struct ddi_umem_cookie *)DEVMAP_DEVMEM_COOKIE)
144 144
145 145 #define cookie_is_pmem(c) \
146 146 ((c) == (struct ddi_umem_cookie *)DEVMAP_PMEM_COOKIE)
147 147
148 148 #define cookie_is_kpmem(c) (!cookie_is_devmem(c) && !cookie_is_pmem(c) &&\
149 149 ((c)->type == KMEM_PAGEABLE))
150 150
151 151 #define dhp_is_devmem(dhp) \
152 152 (cookie_is_devmem((struct ddi_umem_cookie *)((dhp)->dh_cookie)))
153 153
154 154 #define dhp_is_pmem(dhp) \
155 155 (cookie_is_pmem((struct ddi_umem_cookie *)((dhp)->dh_cookie)))
156 156
157 157 #define dhp_is_kpmem(dhp) \
158 158 (cookie_is_kpmem((struct ddi_umem_cookie *)((dhp)->dh_cookie)))
159 159
160 160 /*
161 161 * Private seg op routines.
162 162 */
163 163 static int segdev_dup(struct seg *, struct seg *);
164 164 static int segdev_unmap(struct seg *, caddr_t, size_t);
165 165 static void segdev_free(struct seg *);
166 166 static faultcode_t segdev_fault(struct hat *, struct seg *, caddr_t, size_t,
167 167 enum fault_type, enum seg_rw);
168 168 static faultcode_t segdev_faulta(struct seg *, caddr_t);
169 169 static int segdev_setprot(struct seg *, caddr_t, size_t, uint_t);
170 170 static int segdev_checkprot(struct seg *, caddr_t, size_t, uint_t);
171 171 static void segdev_badop(void);
172 172 static int segdev_sync(struct seg *, caddr_t, size_t, int, uint_t);
173 173 static size_t segdev_incore(struct seg *, caddr_t, size_t, char *);
174 174 static int segdev_lockop(struct seg *, caddr_t, size_t, int, int,
175 175 ulong_t *, size_t);
176 176 static int segdev_getprot(struct seg *, caddr_t, size_t, uint_t *);
177 177 static u_offset_t segdev_getoffset(struct seg *, caddr_t);
178 178 static int segdev_gettype(struct seg *, caddr_t);
179 179 static int segdev_getvp(struct seg *, caddr_t, struct vnode **);
180 180 static int segdev_advise(struct seg *, caddr_t, size_t, uint_t);
181 181 static void segdev_dump(struct seg *);
182 182 static int segdev_pagelock(struct seg *, caddr_t, size_t,
183 183 struct page ***, enum lock_type, enum seg_rw);
184 184 static int segdev_setpagesize(struct seg *, caddr_t, size_t, uint_t);
185 185 static int segdev_getmemid(struct seg *, caddr_t, memid_t *);
186 186 static lgrp_mem_policy_info_t *segdev_getpolicy(struct seg *, caddr_t);
187 187 static int segdev_capable(struct seg *, segcapability_t);
188 188
189 189 /*
190 190 * XXX this struct is used by rootnex_map_fault to identify
191 191 * the segment it has been passed. So if you make it
192 192 * "static" you'll need to fix rootnex_map_fault.
↓ open down ↓ |
192 lines elided |
↑ open up ↑ |
193 193 */
194 194 struct seg_ops segdev_ops = {
195 195 segdev_dup,
196 196 segdev_unmap,
197 197 segdev_free,
198 198 segdev_fault,
199 199 segdev_faulta,
200 200 segdev_setprot,
201 201 segdev_checkprot,
202 202 (int (*)())segdev_badop, /* kluster */
203 - (size_t (*)(struct seg *))NULL, /* swapout */
204 203 segdev_sync, /* sync */
205 204 segdev_incore,
206 205 segdev_lockop, /* lockop */
207 206 segdev_getprot,
208 207 segdev_getoffset,
209 208 segdev_gettype,
210 209 segdev_getvp,
211 210 segdev_advise,
212 211 segdev_dump,
213 212 segdev_pagelock,
214 213 segdev_setpagesize,
215 214 segdev_getmemid,
216 215 segdev_getpolicy,
217 216 segdev_capable,
218 217 };
219 218
220 219 /*
221 220 * Private segdev support routines
222 221 */
223 222 static struct segdev_data *sdp_alloc(void);
224 223
225 224 static void segdev_softunlock(struct hat *, struct seg *, caddr_t,
226 225 size_t, enum seg_rw);
227 226
228 227 static faultcode_t segdev_faultpage(struct hat *, struct seg *, caddr_t,
229 228 struct vpage *, enum fault_type, enum seg_rw, devmap_handle_t *);
230 229
231 230 static faultcode_t segdev_faultpages(struct hat *, struct seg *, caddr_t,
232 231 size_t, enum fault_type, enum seg_rw, devmap_handle_t *);
233 232
234 233 static struct devmap_ctx *devmap_ctxinit(dev_t, ulong_t);
235 234 static struct devmap_softlock *devmap_softlock_init(dev_t, ulong_t);
236 235 static void devmap_softlock_rele(devmap_handle_t *);
237 236 static void devmap_ctx_rele(devmap_handle_t *);
238 237
239 238 static void devmap_ctxto(void *);
240 239
241 240 static devmap_handle_t *devmap_find_handle(devmap_handle_t *dhp_head,
242 241 caddr_t addr);
243 242
244 243 static ulong_t devmap_roundup(devmap_handle_t *dhp, ulong_t offset, size_t len,
245 244 ulong_t *opfn, ulong_t *pagesize);
246 245
247 246 static void free_devmap_handle(devmap_handle_t *dhp);
248 247
249 248 static int devmap_handle_dup(devmap_handle_t *dhp, devmap_handle_t **new_dhp,
250 249 struct seg *newseg);
251 250
252 251 static devmap_handle_t *devmap_handle_unmap(devmap_handle_t *dhp);
253 252
254 253 static void devmap_handle_unmap_head(devmap_handle_t *dhp, size_t len);
255 254
256 255 static void devmap_handle_unmap_tail(devmap_handle_t *dhp, caddr_t addr);
257 256
258 257 static int devmap_device(devmap_handle_t *dhp, struct as *as, caddr_t *addr,
259 258 offset_t off, size_t len, uint_t flags);
260 259
261 260 static void devmap_get_large_pgsize(devmap_handle_t *dhp, size_t len,
262 261 caddr_t addr, size_t *llen, caddr_t *laddr);
263 262
264 263 static void devmap_handle_reduce_len(devmap_handle_t *dhp, size_t len);
265 264
266 265 static void *devmap_alloc_pages(vmem_t *vmp, size_t size, int vmflag);
267 266 static void devmap_free_pages(vmem_t *vmp, void *inaddr, size_t size);
268 267
269 268 static void *devmap_umem_alloc_np(size_t size, size_t flags);
270 269 static void devmap_umem_free_np(void *addr, size_t size);
271 270
272 271 /*
273 272 * routines to lock and unlock underlying segkp segment for
274 273 * KMEM_PAGEABLE type cookies.
275 274 */
276 275 static faultcode_t acquire_kpmem_lock(struct ddi_umem_cookie *, size_t);
277 276 static void release_kpmem_lock(struct ddi_umem_cookie *, size_t);
278 277
279 278 /*
280 279 * Routines to synchronize F_SOFTLOCK and F_INVAL faults for
281 280 * drivers with devmap_access callbacks
282 281 */
283 282 static int devmap_softlock_enter(struct devmap_softlock *, size_t,
284 283 enum fault_type);
285 284 static void devmap_softlock_exit(struct devmap_softlock *, size_t,
286 285 enum fault_type);
287 286
288 287 static kmutex_t devmapctx_lock;
289 288
290 289 static kmutex_t devmap_slock;
291 290
292 291 /*
293 292 * Initialize the thread callbacks and thread private data.
294 293 */
295 294 static struct devmap_ctx *
296 295 devmap_ctxinit(dev_t dev, ulong_t id)
297 296 {
298 297 struct devmap_ctx *devctx;
299 298 struct devmap_ctx *tmp;
300 299 dev_info_t *dip;
301 300
302 301 tmp = kmem_zalloc(sizeof (struct devmap_ctx), KM_SLEEP);
303 302
304 303 mutex_enter(&devmapctx_lock);
305 304
306 305 dip = e_ddi_hold_devi_by_dev(dev, 0);
307 306 ASSERT(dip != NULL);
308 307 ddi_release_devi(dip);
309 308
310 309 for (devctx = devmapctx_list; devctx != NULL; devctx = devctx->next)
311 310 if ((devctx->dip == dip) && (devctx->id == id))
312 311 break;
313 312
314 313 if (devctx == NULL) {
315 314 devctx = tmp;
316 315 devctx->dip = dip;
317 316 devctx->id = id;
318 317 mutex_init(&devctx->lock, NULL, MUTEX_DEFAULT, NULL);
319 318 cv_init(&devctx->cv, NULL, CV_DEFAULT, NULL);
320 319 devctx->next = devmapctx_list;
321 320 devmapctx_list = devctx;
322 321 } else
323 322 kmem_free(tmp, sizeof (struct devmap_ctx));
324 323
325 324 mutex_enter(&devctx->lock);
326 325 devctx->refcnt++;
327 326 mutex_exit(&devctx->lock);
328 327 mutex_exit(&devmapctx_lock);
329 328
330 329 return (devctx);
331 330 }
332 331
333 332 /*
334 333 * Timeout callback called if a CPU has not given up the device context
335 334 * within dhp->dh_timeout_length ticks
336 335 */
337 336 static void
338 337 devmap_ctxto(void *data)
339 338 {
340 339 struct devmap_ctx *devctx = data;
341 340
342 341 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_CTXTO,
343 342 "devmap_ctxto:timeout expired, devctx=%p", (void *)devctx);
344 343 mutex_enter(&devctx->lock);
345 344 /*
346 345 * Set oncpu = 0 so the next mapping trying to get the device context
347 346 * can.
348 347 */
349 348 devctx->oncpu = 0;
350 349 devctx->timeout = 0;
351 350 cv_signal(&devctx->cv);
352 351 mutex_exit(&devctx->lock);
353 352 }
354 353
355 354 /*
356 355 * Create a device segment.
357 356 */
358 357 int
359 358 segdev_create(struct seg *seg, void *argsp)
360 359 {
361 360 struct segdev_data *sdp;
362 361 struct segdev_crargs *a = (struct segdev_crargs *)argsp;
363 362 devmap_handle_t *dhp = (devmap_handle_t *)a->devmap_data;
364 363 int error;
365 364
366 365 /*
367 366 * Since the address space is "write" locked, we
368 367 * don't need the segment lock to protect "segdev" data.
369 368 */
370 369 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
371 370
372 371 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
373 372
374 373 sdp = sdp_alloc();
375 374
376 375 sdp->mapfunc = a->mapfunc;
377 376 sdp->offset = a->offset;
378 377 sdp->prot = a->prot;
379 378 sdp->maxprot = a->maxprot;
380 379 sdp->type = a->type;
381 380 sdp->pageprot = 0;
382 381 sdp->softlockcnt = 0;
383 382 sdp->vpage = NULL;
384 383
385 384 if (sdp->mapfunc == NULL)
386 385 sdp->devmap_data = dhp;
387 386 else
388 387 sdp->devmap_data = dhp = NULL;
389 388
390 389 sdp->hat_flags = a->hat_flags;
391 390 sdp->hat_attr = a->hat_attr;
392 391
393 392 /*
394 393 * Currently, hat_flags supports only HAT_LOAD_NOCONSIST
395 394 */
396 395 ASSERT(!(sdp->hat_flags & ~HAT_LOAD_NOCONSIST));
397 396
398 397 /*
399 398 * Hold shadow vnode -- segdev only deals with
400 399 * character (VCHR) devices. We use the common
401 400 * vp to hang pages on.
402 401 */
403 402 sdp->vp = specfind(a->dev, VCHR);
404 403 ASSERT(sdp->vp != NULL);
405 404
406 405 seg->s_ops = &segdev_ops;
407 406 seg->s_data = sdp;
408 407
409 408 while (dhp != NULL) {
410 409 dhp->dh_seg = seg;
411 410 dhp = dhp->dh_next;
412 411 }
413 412
414 413 /*
415 414 * Inform the vnode of the new mapping.
416 415 */
417 416 /*
418 417 * It is ok to use pass sdp->maxprot to ADDMAP rather than to use
419 418 * dhp specific maxprot because spec_addmap does not use maxprot.
420 419 */
421 420 error = VOP_ADDMAP(VTOCVP(sdp->vp), sdp->offset,
422 421 seg->s_as, seg->s_base, seg->s_size,
423 422 sdp->prot, sdp->maxprot, sdp->type, CRED(), NULL);
424 423
425 424 if (error != 0) {
426 425 sdp->devmap_data = NULL;
427 426 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
428 427 HAT_UNLOAD_UNMAP);
429 428 } else {
430 429 /*
431 430 * Mappings of /dev/null don't count towards the VSZ of a
432 431 * process. Mappings of /dev/null have no mapping type.
433 432 */
434 433 if ((SEGOP_GETTYPE(seg, (seg)->s_base) & (MAP_SHARED |
435 434 MAP_PRIVATE)) == 0) {
436 435 seg->s_as->a_resvsize -= seg->s_size;
437 436 }
438 437 }
439 438
440 439 return (error);
441 440 }
442 441
443 442 static struct segdev_data *
444 443 sdp_alloc(void)
445 444 {
446 445 struct segdev_data *sdp;
447 446
448 447 sdp = kmem_zalloc(sizeof (struct segdev_data), KM_SLEEP);
449 448 rw_init(&sdp->lock, NULL, RW_DEFAULT, NULL);
450 449
451 450 return (sdp);
452 451 }
453 452
454 453 /*
455 454 * Duplicate seg and return new segment in newseg.
456 455 */
457 456 static int
458 457 segdev_dup(struct seg *seg, struct seg *newseg)
459 458 {
460 459 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
461 460 struct segdev_data *newsdp;
462 461 devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
463 462 size_t npages;
464 463 int ret;
465 464
466 465 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_DUP,
467 466 "segdev_dup:start dhp=%p, seg=%p", (void *)dhp, (void *)seg);
468 467
469 468 DEBUGF(3, (CE_CONT, "segdev_dup: dhp %p seg %p\n",
470 469 (void *)dhp, (void *)seg));
471 470
472 471 /*
473 472 * Since the address space is "write" locked, we
474 473 * don't need the segment lock to protect "segdev" data.
475 474 */
476 475 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
477 476
478 477 newsdp = sdp_alloc();
479 478
480 479 newseg->s_ops = seg->s_ops;
481 480 newseg->s_data = (void *)newsdp;
482 481
483 482 VN_HOLD(sdp->vp);
484 483 newsdp->vp = sdp->vp;
485 484 newsdp->mapfunc = sdp->mapfunc;
486 485 newsdp->offset = sdp->offset;
487 486 newsdp->pageprot = sdp->pageprot;
488 487 newsdp->prot = sdp->prot;
489 488 newsdp->maxprot = sdp->maxprot;
490 489 newsdp->type = sdp->type;
491 490 newsdp->hat_attr = sdp->hat_attr;
492 491 newsdp->hat_flags = sdp->hat_flags;
493 492 newsdp->softlockcnt = 0;
494 493
495 494 /*
496 495 * Initialize per page data if the segment we are
497 496 * dup'ing has per page information.
498 497 */
499 498 npages = seg_pages(newseg);
500 499
501 500 if (sdp->vpage != NULL) {
502 501 size_t nbytes = vpgtob(npages);
503 502
504 503 newsdp->vpage = kmem_zalloc(nbytes, KM_SLEEP);
505 504 bcopy(sdp->vpage, newsdp->vpage, nbytes);
506 505 } else
507 506 newsdp->vpage = NULL;
508 507
509 508 /*
510 509 * duplicate devmap handles
511 510 */
512 511 if (dhp != NULL) {
513 512 ret = devmap_handle_dup(dhp,
514 513 (devmap_handle_t **)&newsdp->devmap_data, newseg);
515 514 if (ret != 0) {
516 515 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DUP_CK1,
517 516 "segdev_dup:ret1 ret=%x, dhp=%p seg=%p",
518 517 ret, (void *)dhp, (void *)seg);
519 518 DEBUGF(1, (CE_CONT,
520 519 "segdev_dup: ret %x dhp %p seg %p\n",
521 520 ret, (void *)dhp, (void *)seg));
522 521 return (ret);
523 522 }
524 523 }
525 524
526 525 /*
527 526 * Inform the common vnode of the new mapping.
528 527 */
529 528 return (VOP_ADDMAP(VTOCVP(newsdp->vp),
530 529 newsdp->offset, newseg->s_as,
531 530 newseg->s_base, newseg->s_size, newsdp->prot,
532 531 newsdp->maxprot, sdp->type, CRED(), NULL));
533 532 }
534 533
535 534 /*
536 535 * duplicate devmap handles
537 536 */
538 537 static int
539 538 devmap_handle_dup(devmap_handle_t *dhp, devmap_handle_t **new_dhp,
540 539 struct seg *newseg)
541 540 {
542 541 devmap_handle_t *newdhp_save = NULL;
543 542 devmap_handle_t *newdhp = NULL;
544 543 struct devmap_callback_ctl *callbackops;
545 544
546 545 while (dhp != NULL) {
547 546 newdhp = kmem_alloc(sizeof (devmap_handle_t), KM_SLEEP);
548 547
549 548 /* Need to lock the original dhp while copying if REMAP */
550 549 HOLD_DHP_LOCK(dhp);
551 550 bcopy(dhp, newdhp, sizeof (devmap_handle_t));
552 551 RELE_DHP_LOCK(dhp);
553 552 newdhp->dh_seg = newseg;
554 553 newdhp->dh_next = NULL;
555 554 if (newdhp_save != NULL)
556 555 newdhp_save->dh_next = newdhp;
557 556 else
558 557 *new_dhp = newdhp;
559 558 newdhp_save = newdhp;
560 559
561 560 callbackops = &newdhp->dh_callbackops;
562 561
563 562 if (dhp->dh_softlock != NULL)
564 563 newdhp->dh_softlock = devmap_softlock_init(
565 564 newdhp->dh_dev,
566 565 (ulong_t)callbackops->devmap_access);
567 566 if (dhp->dh_ctx != NULL)
568 567 newdhp->dh_ctx = devmap_ctxinit(newdhp->dh_dev,
569 568 (ulong_t)callbackops->devmap_access);
570 569
571 570 /*
572 571 * Initialize dh_lock if we want to do remap.
573 572 */
574 573 if (newdhp->dh_flags & DEVMAP_ALLOW_REMAP) {
575 574 mutex_init(&newdhp->dh_lock, NULL, MUTEX_DEFAULT, NULL);
576 575 newdhp->dh_flags |= DEVMAP_LOCK_INITED;
577 576 }
578 577
579 578 if (callbackops->devmap_dup != NULL) {
580 579 int ret;
581 580
582 581 /*
583 582 * Call the dup callback so that the driver can
584 583 * duplicate its private data.
585 584 */
586 585 ret = (*callbackops->devmap_dup)(dhp, dhp->dh_pvtp,
587 586 (devmap_cookie_t *)newdhp, &newdhp->dh_pvtp);
588 587
589 588 if (ret != 0) {
590 589 /*
591 590 * We want to free up this segment as the driver
592 591 * has indicated that we can't dup it. But we
593 592 * don't want to call the drivers, devmap_unmap,
594 593 * callback function as the driver does not
595 594 * think this segment exists. The caller of
596 595 * devmap_dup will call seg_free on newseg
597 596 * as it was the caller that allocated the
598 597 * segment.
599 598 */
600 599 DEBUGF(1, (CE_CONT, "devmap_handle_dup ERROR: "
601 600 "newdhp %p dhp %p\n", (void *)newdhp,
602 601 (void *)dhp));
603 602 callbackops->devmap_unmap = NULL;
604 603 return (ret);
605 604 }
606 605 }
607 606
608 607 dhp = dhp->dh_next;
609 608 }
610 609
611 610 return (0);
612 611 }
613 612
614 613 /*
615 614 * Split a segment at addr for length len.
616 615 */
617 616 /*ARGSUSED*/
618 617 static int
619 618 segdev_unmap(struct seg *seg, caddr_t addr, size_t len)
620 619 {
621 620 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
622 621 register struct segdev_data *nsdp;
623 622 register struct seg *nseg;
624 623 register size_t opages; /* old segment size in pages */
625 624 register size_t npages; /* new segment size in pages */
626 625 register size_t dpages; /* pages being deleted (unmapped) */
627 626 register size_t nbytes;
628 627 devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
629 628 devmap_handle_t *dhpp;
630 629 devmap_handle_t *newdhp;
631 630 struct devmap_callback_ctl *callbackops;
632 631 caddr_t nbase;
633 632 offset_t off;
634 633 ulong_t nsize;
635 634 size_t mlen, sz;
636 635
637 636 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP,
638 637 "segdev_unmap:start dhp=%p, seg=%p addr=%p len=%lx",
639 638 (void *)dhp, (void *)seg, (void *)addr, len);
640 639
641 640 DEBUGF(3, (CE_CONT, "segdev_unmap: dhp %p seg %p addr %p len %lx\n",
642 641 (void *)dhp, (void *)seg, (void *)addr, len));
643 642
644 643 /*
645 644 * Since the address space is "write" locked, we
646 645 * don't need the segment lock to protect "segdev" data.
647 646 */
648 647 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
649 648
650 649 if ((sz = sdp->softlockcnt) > 0) {
651 650 /*
652 651 * Fail the unmap if pages are SOFTLOCKed through this mapping.
653 652 * softlockcnt is protected from change by the as write lock.
654 653 */
655 654 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK1,
656 655 "segdev_unmap:error softlockcnt = %ld", sz);
657 656 DEBUGF(1, (CE_CONT, "segdev_unmap: softlockcnt %ld\n", sz));
658 657 return (EAGAIN);
659 658 }
660 659
661 660 /*
662 661 * Check for bad sizes
663 662 */
664 663 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
665 664 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
666 665 panic("segdev_unmap");
667 666
668 667 if (dhp != NULL) {
669 668 devmap_handle_t *tdhp;
670 669 /*
671 670 * If large page size was used in hat_devload(),
672 671 * the same page size must be used in hat_unload().
673 672 */
674 673 dhpp = tdhp = devmap_find_handle(dhp, addr);
675 674 while (tdhp != NULL) {
676 675 if (tdhp->dh_flags & DEVMAP_FLAG_LARGE) {
677 676 break;
678 677 }
679 678 tdhp = tdhp->dh_next;
680 679 }
681 680 if (tdhp != NULL) { /* found a dhp using large pages */
682 681 size_t slen = len;
683 682 size_t mlen;
684 683 size_t soff;
685 684
686 685 soff = (ulong_t)(addr - dhpp->dh_uvaddr);
687 686 while (slen != 0) {
688 687 mlen = MIN(slen, (dhpp->dh_len - soff));
689 688 hat_unload(seg->s_as->a_hat, dhpp->dh_uvaddr,
690 689 dhpp->dh_len, HAT_UNLOAD_UNMAP);
691 690 dhpp = dhpp->dh_next;
692 691 ASSERT(slen >= mlen);
693 692 slen -= mlen;
694 693 soff = 0;
695 694 }
696 695 } else
697 696 hat_unload(seg->s_as->a_hat, addr, len,
698 697 HAT_UNLOAD_UNMAP);
699 698 } else {
700 699 /*
701 700 * Unload any hardware translations in the range
702 701 * to be taken out.
703 702 */
704 703 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD_UNMAP);
705 704 }
706 705
707 706 /*
708 707 * get the user offset which will used in the driver callbacks
709 708 */
710 709 off = sdp->offset + (offset_t)(addr - seg->s_base);
711 710
712 711 /*
713 712 * Inform the vnode of the unmapping.
714 713 */
715 714 ASSERT(sdp->vp != NULL);
716 715 (void) VOP_DELMAP(VTOCVP(sdp->vp), off, seg->s_as, addr, len,
717 716 sdp->prot, sdp->maxprot, sdp->type, CRED(), NULL);
718 717
719 718 /*
720 719 * Check for entire segment
721 720 */
722 721 if (addr == seg->s_base && len == seg->s_size) {
723 722 seg_free(seg);
724 723 return (0);
725 724 }
726 725
727 726 opages = seg_pages(seg);
728 727 dpages = btop(len);
729 728 npages = opages - dpages;
730 729
731 730 /*
732 731 * Check for beginning of segment
733 732 */
734 733 if (addr == seg->s_base) {
735 734 if (sdp->vpage != NULL) {
736 735 register struct vpage *ovpage;
737 736
738 737 ovpage = sdp->vpage; /* keep pointer to vpage */
739 738
740 739 nbytes = vpgtob(npages);
741 740 sdp->vpage = kmem_alloc(nbytes, KM_SLEEP);
742 741 bcopy(&ovpage[dpages], sdp->vpage, nbytes);
743 742
744 743 /* free up old vpage */
745 744 kmem_free(ovpage, vpgtob(opages));
746 745 }
747 746
748 747 /*
749 748 * free devmap handles from the beginning of the mapping.
750 749 */
751 750 if (dhp != NULL)
752 751 devmap_handle_unmap_head(dhp, len);
753 752
754 753 sdp->offset += (offset_t)len;
755 754
756 755 seg->s_base += len;
757 756 seg->s_size -= len;
758 757
759 758 return (0);
760 759 }
761 760
762 761 /*
763 762 * Check for end of segment
764 763 */
765 764 if (addr + len == seg->s_base + seg->s_size) {
766 765 if (sdp->vpage != NULL) {
767 766 register struct vpage *ovpage;
768 767
769 768 ovpage = sdp->vpage; /* keep pointer to vpage */
770 769
771 770 nbytes = vpgtob(npages);
772 771 sdp->vpage = kmem_alloc(nbytes, KM_SLEEP);
773 772 bcopy(ovpage, sdp->vpage, nbytes);
774 773
775 774 /* free up old vpage */
776 775 kmem_free(ovpage, vpgtob(opages));
777 776 }
778 777 seg->s_size -= len;
779 778
780 779 /*
781 780 * free devmap handles from addr to the end of the mapping.
782 781 */
783 782 if (dhp != NULL)
784 783 devmap_handle_unmap_tail(dhp, addr);
785 784
786 785 return (0);
787 786 }
788 787
789 788 /*
790 789 * The section to go is in the middle of the segment,
791 790 * have to make it into two segments. nseg is made for
792 791 * the high end while seg is cut down at the low end.
793 792 */
794 793 nbase = addr + len; /* new seg base */
795 794 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */
796 795 seg->s_size = addr - seg->s_base; /* shrink old seg */
797 796 nseg = seg_alloc(seg->s_as, nbase, nsize);
798 797 if (nseg == NULL)
799 798 panic("segdev_unmap seg_alloc");
800 799
801 800 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK2,
802 801 "segdev_unmap: seg=%p nseg=%p", (void *)seg, (void *)nseg);
803 802 DEBUGF(3, (CE_CONT, "segdev_unmap: segdev_dup seg %p nseg %p\n",
804 803 (void *)seg, (void *)nseg));
805 804 nsdp = sdp_alloc();
806 805
807 806 nseg->s_ops = seg->s_ops;
808 807 nseg->s_data = (void *)nsdp;
809 808
810 809 VN_HOLD(sdp->vp);
811 810 nsdp->mapfunc = sdp->mapfunc;
812 811 nsdp->offset = sdp->offset + (offset_t)(nseg->s_base - seg->s_base);
813 812 nsdp->vp = sdp->vp;
814 813 nsdp->pageprot = sdp->pageprot;
815 814 nsdp->prot = sdp->prot;
816 815 nsdp->maxprot = sdp->maxprot;
817 816 nsdp->type = sdp->type;
818 817 nsdp->hat_attr = sdp->hat_attr;
819 818 nsdp->hat_flags = sdp->hat_flags;
820 819 nsdp->softlockcnt = 0;
821 820
822 821 /*
823 822 * Initialize per page data if the segment we are
824 823 * dup'ing has per page information.
825 824 */
826 825 if (sdp->vpage != NULL) {
827 826 /* need to split vpage into two arrays */
828 827 register size_t nnbytes;
829 828 register size_t nnpages;
830 829 register struct vpage *ovpage;
831 830
832 831 ovpage = sdp->vpage; /* keep pointer to vpage */
833 832
834 833 npages = seg_pages(seg); /* seg has shrunk */
835 834 nbytes = vpgtob(npages);
836 835 nnpages = seg_pages(nseg);
837 836 nnbytes = vpgtob(nnpages);
838 837
839 838 sdp->vpage = kmem_alloc(nbytes, KM_SLEEP);
840 839 bcopy(ovpage, sdp->vpage, nbytes);
841 840
842 841 nsdp->vpage = kmem_alloc(nnbytes, KM_SLEEP);
843 842 bcopy(&ovpage[npages + dpages], nsdp->vpage, nnbytes);
844 843
845 844 /* free up old vpage */
846 845 kmem_free(ovpage, vpgtob(opages));
847 846 } else
848 847 nsdp->vpage = NULL;
849 848
850 849 /*
851 850 * unmap dhps.
852 851 */
853 852 if (dhp == NULL) {
854 853 nsdp->devmap_data = NULL;
855 854 return (0);
856 855 }
857 856 while (dhp != NULL) {
858 857 callbackops = &dhp->dh_callbackops;
859 858 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK3,
860 859 "segdev_unmap: dhp=%p addr=%p", dhp, addr);
861 860 DEBUGF(3, (CE_CONT, "unmap: dhp %p addr %p uvaddr %p len %lx\n",
862 861 (void *)dhp, (void *)addr,
863 862 (void *)dhp->dh_uvaddr, dhp->dh_len));
864 863
865 864 if (addr == (dhp->dh_uvaddr + dhp->dh_len)) {
866 865 dhpp = dhp->dh_next;
867 866 dhp->dh_next = NULL;
868 867 dhp = dhpp;
869 868 } else if (addr > (dhp->dh_uvaddr + dhp->dh_len)) {
870 869 dhp = dhp->dh_next;
871 870 } else if (addr > dhp->dh_uvaddr &&
872 871 (addr + len) < (dhp->dh_uvaddr + dhp->dh_len)) {
873 872 /*
874 873 * <addr, addr+len> is enclosed by dhp.
875 874 * create a newdhp that begins at addr+len and
876 875 * ends at dhp->dh_uvaddr+dhp->dh_len.
877 876 */
878 877 newdhp = kmem_alloc(sizeof (devmap_handle_t), KM_SLEEP);
879 878 HOLD_DHP_LOCK(dhp);
880 879 bcopy(dhp, newdhp, sizeof (devmap_handle_t));
881 880 RELE_DHP_LOCK(dhp);
882 881 newdhp->dh_seg = nseg;
883 882 newdhp->dh_next = dhp->dh_next;
884 883 if (dhp->dh_softlock != NULL)
885 884 newdhp->dh_softlock = devmap_softlock_init(
886 885 newdhp->dh_dev,
887 886 (ulong_t)callbackops->devmap_access);
888 887 if (dhp->dh_ctx != NULL)
889 888 newdhp->dh_ctx = devmap_ctxinit(newdhp->dh_dev,
890 889 (ulong_t)callbackops->devmap_access);
891 890 if (newdhp->dh_flags & DEVMAP_LOCK_INITED) {
892 891 mutex_init(&newdhp->dh_lock,
893 892 NULL, MUTEX_DEFAULT, NULL);
894 893 }
895 894 if (callbackops->devmap_unmap != NULL)
896 895 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
897 896 off, len, dhp, &dhp->dh_pvtp,
898 897 newdhp, &newdhp->dh_pvtp);
899 898 mlen = len + (addr - dhp->dh_uvaddr);
900 899 devmap_handle_reduce_len(newdhp, mlen);
901 900 nsdp->devmap_data = newdhp;
902 901 /* XX Changing len should recalculate LARGE flag */
903 902 dhp->dh_len = addr - dhp->dh_uvaddr;
904 903 dhpp = dhp->dh_next;
905 904 dhp->dh_next = NULL;
906 905 dhp = dhpp;
907 906 } else if ((addr > dhp->dh_uvaddr) &&
908 907 ((addr + len) >= (dhp->dh_uvaddr + dhp->dh_len))) {
909 908 mlen = dhp->dh_len + dhp->dh_uvaddr - addr;
910 909 /*
911 910 * <addr, addr+len> spans over dhps.
912 911 */
913 912 if (callbackops->devmap_unmap != NULL)
914 913 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
915 914 off, mlen, (devmap_cookie_t *)dhp,
916 915 &dhp->dh_pvtp, NULL, NULL);
917 916 /* XX Changing len should recalculate LARGE flag */
918 917 dhp->dh_len = addr - dhp->dh_uvaddr;
919 918 dhpp = dhp->dh_next;
920 919 dhp->dh_next = NULL;
921 920 dhp = dhpp;
922 921 nsdp->devmap_data = dhp;
923 922 } else if ((addr + len) >= (dhp->dh_uvaddr + dhp->dh_len)) {
924 923 /*
925 924 * dhp is enclosed by <addr, addr+len>.
926 925 */
927 926 dhp->dh_seg = nseg;
928 927 nsdp->devmap_data = dhp;
929 928 dhp = devmap_handle_unmap(dhp);
930 929 nsdp->devmap_data = dhp; /* XX redundant? */
931 930 } else if (((addr + len) > dhp->dh_uvaddr) &&
932 931 ((addr + len) < (dhp->dh_uvaddr + dhp->dh_len))) {
933 932 mlen = addr + len - dhp->dh_uvaddr;
934 933 if (callbackops->devmap_unmap != NULL)
935 934 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
936 935 dhp->dh_uoff, mlen, NULL,
937 936 NULL, dhp, &dhp->dh_pvtp);
938 937 devmap_handle_reduce_len(dhp, mlen);
939 938 nsdp->devmap_data = dhp;
940 939 dhp->dh_seg = nseg;
941 940 dhp = dhp->dh_next;
942 941 } else {
943 942 dhp->dh_seg = nseg;
944 943 dhp = dhp->dh_next;
945 944 }
946 945 }
947 946 return (0);
948 947 }
949 948
950 949 /*
951 950 * Utility function handles reducing the length of a devmap handle during unmap
952 951 * Note that is only used for unmapping the front portion of the handler,
953 952 * i.e., we are bumping up the offset/pfn etc up by len
954 953 * Do not use if reducing length at the tail.
955 954 */
956 955 static void
957 956 devmap_handle_reduce_len(devmap_handle_t *dhp, size_t len)
958 957 {
959 958 struct ddi_umem_cookie *cp;
960 959 struct devmap_pmem_cookie *pcp;
961 960 /*
962 961 * adjust devmap handle fields
963 962 */
964 963 ASSERT(len < dhp->dh_len);
965 964
966 965 /* Make sure only page-aligned changes are done */
967 966 ASSERT((len & PAGEOFFSET) == 0);
968 967
969 968 dhp->dh_len -= len;
970 969 dhp->dh_uoff += (offset_t)len;
971 970 dhp->dh_roff += (offset_t)len;
972 971 dhp->dh_uvaddr += len;
973 972 /* Need to grab dhp lock if REMAP */
974 973 HOLD_DHP_LOCK(dhp);
975 974 cp = dhp->dh_cookie;
976 975 if (!(dhp->dh_flags & DEVMAP_MAPPING_INVALID)) {
977 976 if (cookie_is_devmem(cp)) {
978 977 dhp->dh_pfn += btop(len);
979 978 } else if (cookie_is_pmem(cp)) {
980 979 pcp = (struct devmap_pmem_cookie *)dhp->dh_pcookie;
981 980 ASSERT((dhp->dh_roff & PAGEOFFSET) == 0 &&
982 981 dhp->dh_roff < ptob(pcp->dp_npages));
983 982 } else {
984 983 ASSERT(dhp->dh_roff < cp->size);
985 984 ASSERT(dhp->dh_cvaddr >= cp->cvaddr &&
986 985 dhp->dh_cvaddr < (cp->cvaddr + cp->size));
987 986 ASSERT((dhp->dh_cvaddr + len) <=
988 987 (cp->cvaddr + cp->size));
989 988
990 989 dhp->dh_cvaddr += len;
991 990 }
992 991 }
993 992 /* XXX - Should recalculate the DEVMAP_FLAG_LARGE after changes */
994 993 RELE_DHP_LOCK(dhp);
995 994 }
996 995
997 996 /*
998 997 * Free devmap handle, dhp.
999 998 * Return the next devmap handle on the linked list.
1000 999 */
1001 1000 static devmap_handle_t *
1002 1001 devmap_handle_unmap(devmap_handle_t *dhp)
1003 1002 {
1004 1003 struct devmap_callback_ctl *callbackops = &dhp->dh_callbackops;
1005 1004 struct segdev_data *sdp = (struct segdev_data *)dhp->dh_seg->s_data;
1006 1005 devmap_handle_t *dhpp = (devmap_handle_t *)sdp->devmap_data;
1007 1006
1008 1007 ASSERT(dhp != NULL);
1009 1008
1010 1009 /*
1011 1010 * before we free up dhp, call the driver's devmap_unmap entry point
1012 1011 * to free resources allocated for this dhp.
1013 1012 */
1014 1013 if (callbackops->devmap_unmap != NULL) {
1015 1014 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp, dhp->dh_uoff,
1016 1015 dhp->dh_len, NULL, NULL, NULL, NULL);
1017 1016 }
1018 1017
1019 1018 if (dhpp == dhp) { /* releasing first dhp, change sdp data */
1020 1019 sdp->devmap_data = dhp->dh_next;
1021 1020 } else {
1022 1021 while (dhpp->dh_next != dhp) {
1023 1022 dhpp = dhpp->dh_next;
1024 1023 }
1025 1024 dhpp->dh_next = dhp->dh_next;
1026 1025 }
1027 1026 dhpp = dhp->dh_next; /* return value is next dhp in chain */
1028 1027
1029 1028 if (dhp->dh_softlock != NULL)
1030 1029 devmap_softlock_rele(dhp);
1031 1030
1032 1031 if (dhp->dh_ctx != NULL)
1033 1032 devmap_ctx_rele(dhp);
1034 1033
1035 1034 if (dhp->dh_flags & DEVMAP_LOCK_INITED) {
1036 1035 mutex_destroy(&dhp->dh_lock);
1037 1036 }
1038 1037 kmem_free(dhp, sizeof (devmap_handle_t));
1039 1038
1040 1039 return (dhpp);
1041 1040 }
1042 1041
1043 1042 /*
1044 1043 * Free complete devmap handles from dhp for len bytes
1045 1044 * dhp can be either the first handle or a subsequent handle
1046 1045 */
1047 1046 static void
1048 1047 devmap_handle_unmap_head(devmap_handle_t *dhp, size_t len)
1049 1048 {
1050 1049 struct devmap_callback_ctl *callbackops;
1051 1050
1052 1051 /*
1053 1052 * free the devmap handles covered by len.
1054 1053 */
1055 1054 while (len >= dhp->dh_len) {
1056 1055 len -= dhp->dh_len;
1057 1056 dhp = devmap_handle_unmap(dhp);
1058 1057 }
1059 1058 if (len != 0) { /* partial unmap at head of first remaining dhp */
1060 1059 callbackops = &dhp->dh_callbackops;
1061 1060
1062 1061 /*
1063 1062 * Call the unmap callback so the drivers can make
1064 1063 * adjustment on its private data.
1065 1064 */
1066 1065 if (callbackops->devmap_unmap != NULL)
1067 1066 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
1068 1067 dhp->dh_uoff, len, NULL, NULL, dhp, &dhp->dh_pvtp);
1069 1068 devmap_handle_reduce_len(dhp, len);
1070 1069 }
1071 1070 }
1072 1071
1073 1072 /*
1074 1073 * Free devmap handles to truncate the mapping after addr
1075 1074 * RFE: Simpler to pass in dhp pointing at correct dhp (avoid find again)
1076 1075 * Also could then use the routine in middle unmap case too
1077 1076 */
1078 1077 static void
1079 1078 devmap_handle_unmap_tail(devmap_handle_t *dhp, caddr_t addr)
1080 1079 {
1081 1080 register struct seg *seg = dhp->dh_seg;
1082 1081 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1083 1082 register devmap_handle_t *dhph = (devmap_handle_t *)sdp->devmap_data;
1084 1083 struct devmap_callback_ctl *callbackops;
1085 1084 register devmap_handle_t *dhpp;
1086 1085 size_t maplen;
1087 1086 ulong_t off;
1088 1087 size_t len;
1089 1088
1090 1089 maplen = (size_t)(addr - dhp->dh_uvaddr);
1091 1090 dhph = devmap_find_handle(dhph, addr);
1092 1091
1093 1092 while (dhph != NULL) {
1094 1093 if (maplen == 0) {
1095 1094 dhph = devmap_handle_unmap(dhph);
1096 1095 } else {
1097 1096 callbackops = &dhph->dh_callbackops;
1098 1097 len = dhph->dh_len - maplen;
1099 1098 off = (ulong_t)sdp->offset + (addr - seg->s_base);
1100 1099 /*
1101 1100 * Call the unmap callback so the driver
1102 1101 * can make adjustments on its private data.
1103 1102 */
1104 1103 if (callbackops->devmap_unmap != NULL)
1105 1104 (*callbackops->devmap_unmap)(dhph,
1106 1105 dhph->dh_pvtp, off, len,
1107 1106 (devmap_cookie_t *)dhph,
1108 1107 &dhph->dh_pvtp, NULL, NULL);
1109 1108 /* XXX Reducing len needs to recalculate LARGE flag */
1110 1109 dhph->dh_len = maplen;
1111 1110 maplen = 0;
1112 1111 dhpp = dhph->dh_next;
1113 1112 dhph->dh_next = NULL;
1114 1113 dhph = dhpp;
1115 1114 }
1116 1115 } /* end while */
1117 1116 }
1118 1117
1119 1118 /*
1120 1119 * Free a segment.
1121 1120 */
1122 1121 static void
1123 1122 segdev_free(struct seg *seg)
1124 1123 {
1125 1124 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1126 1125 devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
1127 1126
1128 1127 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FREE,
1129 1128 "segdev_free: dhp=%p seg=%p", (void *)dhp, (void *)seg);
1130 1129 DEBUGF(3, (CE_CONT, "segdev_free: dhp %p seg %p\n",
1131 1130 (void *)dhp, (void *)seg));
1132 1131
1133 1132 /*
1134 1133 * Since the address space is "write" locked, we
1135 1134 * don't need the segment lock to protect "segdev" data.
1136 1135 */
1137 1136 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1138 1137
1139 1138 while (dhp != NULL)
1140 1139 dhp = devmap_handle_unmap(dhp);
1141 1140
1142 1141 VN_RELE(sdp->vp);
1143 1142 if (sdp->vpage != NULL)
1144 1143 kmem_free(sdp->vpage, vpgtob(seg_pages(seg)));
1145 1144
1146 1145 rw_destroy(&sdp->lock);
1147 1146 kmem_free(sdp, sizeof (*sdp));
1148 1147 }
1149 1148
1150 1149 static void
1151 1150 free_devmap_handle(devmap_handle_t *dhp)
1152 1151 {
1153 1152 register devmap_handle_t *dhpp;
1154 1153
1155 1154 /*
1156 1155 * free up devmap handle
1157 1156 */
1158 1157 while (dhp != NULL) {
1159 1158 dhpp = dhp->dh_next;
1160 1159 if (dhp->dh_flags & DEVMAP_LOCK_INITED) {
1161 1160 mutex_destroy(&dhp->dh_lock);
1162 1161 }
1163 1162
1164 1163 if (dhp->dh_softlock != NULL)
1165 1164 devmap_softlock_rele(dhp);
1166 1165
1167 1166 if (dhp->dh_ctx != NULL)
1168 1167 devmap_ctx_rele(dhp);
1169 1168
1170 1169 kmem_free(dhp, sizeof (devmap_handle_t));
1171 1170 dhp = dhpp;
1172 1171 }
1173 1172 }
1174 1173
1175 1174 /*
1176 1175 * routines to lock and unlock underlying segkp segment for
1177 1176 * KMEM_PAGEABLE type cookies.
1178 1177 * segkp only allows a single pending F_SOFTLOCK
1179 1178 * we keep track of number of locks in the cookie so we can
1180 1179 * have multiple pending faults and manage the calls to segkp.
1181 1180 * RFE: if segkp supports either pagelock or can support multiple
1182 1181 * calls to F_SOFTLOCK, then these routines can go away.
1183 1182 * If pagelock, segdev_faultpage can fault on a page by page basis
1184 1183 * and simplifies the code quite a bit.
1185 1184 * if multiple calls allowed but not partial ranges, then need for
1186 1185 * cookie->lock and locked count goes away, code can call as_fault directly
1187 1186 */
1188 1187 static faultcode_t
1189 1188 acquire_kpmem_lock(struct ddi_umem_cookie *cookie, size_t npages)
1190 1189 {
1191 1190 int err = 0;
1192 1191 ASSERT(cookie_is_kpmem(cookie));
1193 1192 /*
1194 1193 * Fault in pages in segkp with F_SOFTLOCK.
1195 1194 * We want to hold the lock until all pages have been loaded.
1196 1195 * segkp only allows single caller to hold SOFTLOCK, so cookie
1197 1196 * holds a count so we dont call into segkp multiple times
1198 1197 */
1199 1198 mutex_enter(&cookie->lock);
1200 1199
1201 1200 /*
1202 1201 * Check for overflow in locked field
1203 1202 */
1204 1203 if ((UINT32_MAX - cookie->locked) < npages) {
1205 1204 err = FC_MAKE_ERR(ENOMEM);
1206 1205 } else if (cookie->locked == 0) {
1207 1206 /* First time locking */
1208 1207 err = as_fault(kas.a_hat, &kas, cookie->cvaddr,
1209 1208 cookie->size, F_SOFTLOCK, PROT_READ|PROT_WRITE);
1210 1209 }
1211 1210 if (!err) {
1212 1211 cookie->locked += npages;
1213 1212 }
1214 1213 mutex_exit(&cookie->lock);
1215 1214 return (err);
1216 1215 }
1217 1216
1218 1217 static void
1219 1218 release_kpmem_lock(struct ddi_umem_cookie *cookie, size_t npages)
1220 1219 {
1221 1220 mutex_enter(&cookie->lock);
1222 1221 ASSERT(cookie_is_kpmem(cookie));
1223 1222 ASSERT(cookie->locked >= npages);
1224 1223 cookie->locked -= (uint_t)npages;
1225 1224 if (cookie->locked == 0) {
1226 1225 /* Last unlock */
1227 1226 if (as_fault(kas.a_hat, &kas, cookie->cvaddr,
1228 1227 cookie->size, F_SOFTUNLOCK, PROT_READ|PROT_WRITE))
1229 1228 panic("segdev releasing kpmem lock %p", (void *)cookie);
1230 1229 }
1231 1230 mutex_exit(&cookie->lock);
1232 1231 }
1233 1232
1234 1233 /*
1235 1234 * Routines to synchronize F_SOFTLOCK and F_INVAL faults for
1236 1235 * drivers with devmap_access callbacks
1237 1236 * slock->softlocked basically works like a rw lock
1238 1237 * -ve counts => F_SOFTLOCK in progress
1239 1238 * +ve counts => F_INVAL/F_PROT in progress
1240 1239 * We allow only one F_SOFTLOCK at a time
1241 1240 * but can have multiple pending F_INVAL/F_PROT calls
1242 1241 *
1243 1242 * This routine waits using cv_wait_sig so killing processes is more graceful
1244 1243 * Returns EINTR if coming out of this routine due to a signal, 0 otherwise
1245 1244 */
1246 1245 static int devmap_softlock_enter(
1247 1246 struct devmap_softlock *slock,
1248 1247 size_t npages,
1249 1248 enum fault_type type)
1250 1249 {
1251 1250 if (npages == 0)
1252 1251 return (0);
1253 1252 mutex_enter(&(slock->lock));
1254 1253 switch (type) {
1255 1254 case F_SOFTLOCK :
1256 1255 while (slock->softlocked) {
1257 1256 if (cv_wait_sig(&(slock)->cv, &(slock)->lock) == 0) {
1258 1257 /* signalled */
1259 1258 mutex_exit(&(slock->lock));
1260 1259 return (EINTR);
1261 1260 }
1262 1261 }
1263 1262 slock->softlocked -= npages; /* -ve count => locked */
1264 1263 break;
1265 1264 case F_INVAL :
1266 1265 case F_PROT :
1267 1266 while (slock->softlocked < 0)
1268 1267 if (cv_wait_sig(&(slock)->cv, &(slock)->lock) == 0) {
1269 1268 /* signalled */
1270 1269 mutex_exit(&(slock->lock));
1271 1270 return (EINTR);
1272 1271 }
1273 1272 slock->softlocked += npages; /* +ve count => f_invals */
1274 1273 break;
1275 1274 default:
1276 1275 ASSERT(0);
1277 1276 }
1278 1277 mutex_exit(&(slock->lock));
1279 1278 return (0);
1280 1279 }
1281 1280
1282 1281 static void devmap_softlock_exit(
1283 1282 struct devmap_softlock *slock,
1284 1283 size_t npages,
1285 1284 enum fault_type type)
1286 1285 {
1287 1286 if (slock == NULL)
1288 1287 return;
1289 1288 mutex_enter(&(slock->lock));
1290 1289 switch (type) {
1291 1290 case F_SOFTLOCK :
1292 1291 ASSERT(-slock->softlocked >= npages);
1293 1292 slock->softlocked += npages; /* -ve count is softlocked */
1294 1293 if (slock->softlocked == 0)
1295 1294 cv_signal(&slock->cv);
1296 1295 break;
1297 1296 case F_INVAL :
1298 1297 case F_PROT:
1299 1298 ASSERT(slock->softlocked >= npages);
1300 1299 slock->softlocked -= npages;
1301 1300 if (slock->softlocked == 0)
1302 1301 cv_signal(&slock->cv);
1303 1302 break;
1304 1303 default:
1305 1304 ASSERT(0);
1306 1305 }
1307 1306 mutex_exit(&(slock->lock));
1308 1307 }
1309 1308
1310 1309 /*
1311 1310 * Do a F_SOFTUNLOCK call over the range requested.
1312 1311 * The range must have already been F_SOFTLOCK'ed.
1313 1312 * The segment lock should be held, (but not the segment private lock?)
1314 1313 * The softunlock code below does not adjust for large page sizes
1315 1314 * assumes the caller already did any addr/len adjustments for
1316 1315 * pagesize mappings before calling.
1317 1316 */
1318 1317 /*ARGSUSED*/
1319 1318 static void
1320 1319 segdev_softunlock(
1321 1320 struct hat *hat, /* the hat */
1322 1321 struct seg *seg, /* seg_dev of interest */
1323 1322 caddr_t addr, /* base address of range */
1324 1323 size_t len, /* number of bytes */
1325 1324 enum seg_rw rw) /* type of access at fault */
1326 1325 {
1327 1326 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1328 1327 devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
1329 1328
1330 1329 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_SOFTUNLOCK,
1331 1330 "segdev_softunlock:dhp_head=%p sdp=%p addr=%p len=%lx",
1332 1331 dhp_head, sdp, addr, len);
1333 1332 DEBUGF(3, (CE_CONT, "segdev_softunlock: dhp %p lockcnt %lx "
1334 1333 "addr %p len %lx\n",
1335 1334 (void *)dhp_head, sdp->softlockcnt, (void *)addr, len));
1336 1335
1337 1336 hat_unlock(hat, addr, len);
1338 1337
1339 1338 if (dhp_head != NULL) {
1340 1339 devmap_handle_t *dhp;
1341 1340 size_t mlen;
1342 1341 size_t tlen = len;
1343 1342 ulong_t off;
1344 1343
1345 1344 dhp = devmap_find_handle(dhp_head, addr);
1346 1345 ASSERT(dhp != NULL);
1347 1346
1348 1347 off = (ulong_t)(addr - dhp->dh_uvaddr);
1349 1348 while (tlen != 0) {
1350 1349 mlen = MIN(tlen, (dhp->dh_len - off));
1351 1350
1352 1351 /*
1353 1352 * unlock segkp memory, locked during F_SOFTLOCK
1354 1353 */
1355 1354 if (dhp_is_kpmem(dhp)) {
1356 1355 release_kpmem_lock(
1357 1356 (struct ddi_umem_cookie *)dhp->dh_cookie,
1358 1357 btopr(mlen));
1359 1358 }
1360 1359
1361 1360 /*
1362 1361 * Do the softlock accounting for devmap_access
1363 1362 */
1364 1363 if (dhp->dh_callbackops.devmap_access != NULL) {
1365 1364 devmap_softlock_exit(dhp->dh_softlock,
1366 1365 btopr(mlen), F_SOFTLOCK);
1367 1366 }
1368 1367
1369 1368 tlen -= mlen;
1370 1369 dhp = dhp->dh_next;
1371 1370 off = 0;
1372 1371 }
1373 1372 }
1374 1373
1375 1374 mutex_enter(&freemem_lock);
1376 1375 ASSERT(sdp->softlockcnt >= btopr(len));
1377 1376 sdp->softlockcnt -= btopr(len);
1378 1377 mutex_exit(&freemem_lock);
1379 1378 if (sdp->softlockcnt == 0) {
1380 1379 /*
1381 1380 * All SOFTLOCKS are gone. Wakeup any waiting
1382 1381 * unmappers so they can try again to unmap.
1383 1382 * Check for waiters first without the mutex
1384 1383 * held so we don't always grab the mutex on
1385 1384 * softunlocks.
1386 1385 */
1387 1386 if (AS_ISUNMAPWAIT(seg->s_as)) {
1388 1387 mutex_enter(&seg->s_as->a_contents);
1389 1388 if (AS_ISUNMAPWAIT(seg->s_as)) {
1390 1389 AS_CLRUNMAPWAIT(seg->s_as);
1391 1390 cv_broadcast(&seg->s_as->a_cv);
1392 1391 }
1393 1392 mutex_exit(&seg->s_as->a_contents);
1394 1393 }
1395 1394 }
1396 1395
1397 1396 }
1398 1397
1399 1398 /*
1400 1399 * Handle fault for a single page.
1401 1400 * Done in a separate routine so we can handle errors more easily.
1402 1401 * This routine is called only from segdev_faultpages()
1403 1402 * when looping over the range of addresses requested. The segment lock is held.
1404 1403 */
1405 1404 static faultcode_t
1406 1405 segdev_faultpage(
1407 1406 struct hat *hat, /* the hat */
1408 1407 struct seg *seg, /* seg_dev of interest */
1409 1408 caddr_t addr, /* address in as */
1410 1409 struct vpage *vpage, /* pointer to vpage for seg, addr */
1411 1410 enum fault_type type, /* type of fault */
1412 1411 enum seg_rw rw, /* type of access at fault */
1413 1412 devmap_handle_t *dhp) /* devmap handle if any for this page */
1414 1413 {
1415 1414 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1416 1415 uint_t prot;
1417 1416 pfn_t pfnum = PFN_INVALID;
1418 1417 u_offset_t offset;
1419 1418 uint_t hat_flags;
1420 1419 dev_info_t *dip;
1421 1420
1422 1421 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGE,
1423 1422 "segdev_faultpage: dhp=%p seg=%p addr=%p", dhp, seg, addr);
1424 1423 DEBUGF(8, (CE_CONT, "segdev_faultpage: dhp %p seg %p addr %p \n",
1425 1424 (void *)dhp, (void *)seg, (void *)addr));
1426 1425
1427 1426 /*
1428 1427 * Initialize protection value for this page.
1429 1428 * If we have per page protection values check it now.
1430 1429 */
1431 1430 if (sdp->pageprot) {
1432 1431 uint_t protchk;
1433 1432
1434 1433 switch (rw) {
1435 1434 case S_READ:
1436 1435 protchk = PROT_READ;
1437 1436 break;
1438 1437 case S_WRITE:
1439 1438 protchk = PROT_WRITE;
1440 1439 break;
1441 1440 case S_EXEC:
1442 1441 protchk = PROT_EXEC;
1443 1442 break;
1444 1443 case S_OTHER:
1445 1444 default:
1446 1445 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
1447 1446 break;
1448 1447 }
1449 1448
1450 1449 prot = VPP_PROT(vpage);
1451 1450 if ((prot & protchk) == 0)
1452 1451 return (FC_PROT); /* illegal access type */
1453 1452 } else {
1454 1453 prot = sdp->prot;
1455 1454 /* caller has already done segment level protection check */
1456 1455 }
1457 1456
1458 1457 if (type == F_SOFTLOCK) {
1459 1458 mutex_enter(&freemem_lock);
1460 1459 sdp->softlockcnt++;
1461 1460 mutex_exit(&freemem_lock);
1462 1461 }
1463 1462
1464 1463 hat_flags = ((type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD);
1465 1464 offset = sdp->offset + (u_offset_t)(addr - seg->s_base);
1466 1465 /*
1467 1466 * In the devmap framework, sdp->mapfunc is set to NULL. we can get
1468 1467 * pfnum from dhp->dh_pfn (at beginning of segment) and offset from
1469 1468 * seg->s_base.
1470 1469 */
1471 1470 if (dhp == NULL) {
1472 1471 /* If segment has devmap_data, then dhp should be non-NULL */
1473 1472 ASSERT(sdp->devmap_data == NULL);
1474 1473 pfnum = (pfn_t)cdev_mmap(sdp->mapfunc, sdp->vp->v_rdev,
1475 1474 (off_t)offset, prot);
1476 1475 prot |= sdp->hat_attr;
1477 1476 } else {
1478 1477 ulong_t off;
1479 1478 struct ddi_umem_cookie *cp;
1480 1479 struct devmap_pmem_cookie *pcp;
1481 1480
1482 1481 /* ensure the dhp passed in contains addr. */
1483 1482 ASSERT(dhp == devmap_find_handle(
1484 1483 (devmap_handle_t *)sdp->devmap_data, addr));
1485 1484
1486 1485 off = addr - dhp->dh_uvaddr;
1487 1486
1488 1487 /*
1489 1488 * This routine assumes that the caller makes sure that the
1490 1489 * fields in dhp used below are unchanged due to remap during
1491 1490 * this call. Caller does HOLD_DHP_LOCK if neeed
1492 1491 */
1493 1492 cp = dhp->dh_cookie;
1494 1493 if (dhp->dh_flags & DEVMAP_MAPPING_INVALID) {
1495 1494 pfnum = PFN_INVALID;
1496 1495 } else if (cookie_is_devmem(cp)) {
1497 1496 pfnum = dhp->dh_pfn + btop(off);
1498 1497 } else if (cookie_is_pmem(cp)) {
1499 1498 pcp = (struct devmap_pmem_cookie *)dhp->dh_pcookie;
1500 1499 ASSERT((dhp->dh_roff & PAGEOFFSET) == 0 &&
1501 1500 dhp->dh_roff < ptob(pcp->dp_npages));
1502 1501 pfnum = page_pptonum(
1503 1502 pcp->dp_pparray[btop(off + dhp->dh_roff)]);
1504 1503 } else {
1505 1504 ASSERT(dhp->dh_roff < cp->size);
1506 1505 ASSERT(dhp->dh_cvaddr >= cp->cvaddr &&
1507 1506 dhp->dh_cvaddr < (cp->cvaddr + cp->size));
1508 1507 ASSERT((dhp->dh_cvaddr + off) <=
1509 1508 (cp->cvaddr + cp->size));
1510 1509 ASSERT((dhp->dh_cvaddr + off + PAGESIZE) <=
1511 1510 (cp->cvaddr + cp->size));
1512 1511
1513 1512 switch (cp->type) {
1514 1513 case UMEM_LOCKED :
1515 1514 if (cp->pparray != NULL) {
1516 1515 ASSERT((dhp->dh_roff &
1517 1516 PAGEOFFSET) == 0);
1518 1517 pfnum = page_pptonum(
1519 1518 cp->pparray[btop(off +
1520 1519 dhp->dh_roff)]);
1521 1520 } else {
1522 1521 pfnum = hat_getpfnum(
1523 1522 ((proc_t *)cp->procp)->p_as->a_hat,
1524 1523 cp->cvaddr + off);
1525 1524 }
1526 1525 break;
1527 1526 case UMEM_TRASH :
1528 1527 pfnum = page_pptonum(trashpp);
1529 1528 /*
1530 1529 * We should set hat_flags to HAT_NOFAULT also
1531 1530 * However, not all hat layers implement this
1532 1531 */
1533 1532 break;
1534 1533 case KMEM_PAGEABLE:
1535 1534 case KMEM_NON_PAGEABLE:
1536 1535 pfnum = hat_getpfnum(kas.a_hat,
1537 1536 dhp->dh_cvaddr + off);
1538 1537 break;
1539 1538 default :
1540 1539 pfnum = PFN_INVALID;
1541 1540 break;
1542 1541 }
1543 1542 }
1544 1543 prot |= dhp->dh_hat_attr;
1545 1544 }
1546 1545 if (pfnum == PFN_INVALID) {
1547 1546 return (FC_MAKE_ERR(EFAULT));
1548 1547 }
1549 1548 /* prot should already be OR'ed in with hat_attributes if needed */
1550 1549
1551 1550 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGE_CK1,
1552 1551 "segdev_faultpage: pfnum=%lx memory=%x prot=%x flags=%x",
1553 1552 pfnum, pf_is_memory(pfnum), prot, hat_flags);
1554 1553 DEBUGF(9, (CE_CONT, "segdev_faultpage: pfnum %lx memory %x "
1555 1554 "prot %x flags %x\n", pfnum, pf_is_memory(pfnum), prot, hat_flags));
1556 1555
1557 1556 if (pf_is_memory(pfnum) || (dhp != NULL)) {
1558 1557 /*
1559 1558 * It's not _really_ required here to pass sdp->hat_flags
1560 1559 * to hat_devload even though we do it.
1561 1560 * This is because hat figures it out DEVMEM mappings
1562 1561 * are non-consistent, anyway.
1563 1562 */
1564 1563 hat_devload(hat, addr, PAGESIZE, pfnum,
1565 1564 prot, hat_flags | sdp->hat_flags);
1566 1565 return (0);
1567 1566 }
1568 1567
1569 1568 /*
1570 1569 * Fall through to the case where devmap is not used and need to call
1571 1570 * up the device tree to set up the mapping
1572 1571 */
1573 1572
1574 1573 dip = VTOS(VTOCVP(sdp->vp))->s_dip;
1575 1574 ASSERT(dip);
1576 1575
1577 1576 /*
1578 1577 * When calling ddi_map_fault, we do not OR in sdp->hat_attr
1579 1578 * This is because this calls drivers which may not expect
1580 1579 * prot to have any other values than PROT_ALL
1581 1580 * The root nexus driver has a hack to peek into the segment
1582 1581 * structure and then OR in sdp->hat_attr.
1583 1582 * XX In case the bus_ops interfaces are ever revisited
1584 1583 * we need to fix this. prot should include other hat attributes
1585 1584 */
1586 1585 if (ddi_map_fault(dip, hat, seg, addr, NULL, pfnum, prot & PROT_ALL,
1587 1586 (uint_t)(type == F_SOFTLOCK)) != DDI_SUCCESS) {
1588 1587 return (FC_MAKE_ERR(EFAULT));
1589 1588 }
1590 1589 return (0);
1591 1590 }
1592 1591
1593 1592 static faultcode_t
1594 1593 segdev_fault(
1595 1594 struct hat *hat, /* the hat */
1596 1595 struct seg *seg, /* the seg_dev of interest */
1597 1596 caddr_t addr, /* the address of the fault */
1598 1597 size_t len, /* the length of the range */
1599 1598 enum fault_type type, /* type of fault */
1600 1599 enum seg_rw rw) /* type of access at fault */
1601 1600 {
1602 1601 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1603 1602 devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
1604 1603 devmap_handle_t *dhp;
1605 1604 struct devmap_softlock *slock = NULL;
1606 1605 ulong_t slpage = 0;
1607 1606 ulong_t off;
1608 1607 caddr_t maddr = addr;
1609 1608 int err;
1610 1609 int err_is_faultcode = 0;
1611 1610
1612 1611 TRACE_5(TR_FAC_DEVMAP, TR_DEVMAP_FAULT,
1613 1612 "segdev_fault: dhp_head=%p seg=%p addr=%p len=%lx type=%x",
1614 1613 (void *)dhp_head, (void *)seg, (void *)addr, len, type);
1615 1614 DEBUGF(7, (CE_CONT, "segdev_fault: dhp_head %p seg %p "
1616 1615 "addr %p len %lx type %x\n",
1617 1616 (void *)dhp_head, (void *)seg, (void *)addr, len, type));
1618 1617
1619 1618 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1620 1619
1621 1620 /* Handle non-devmap case */
1622 1621 if (dhp_head == NULL)
1623 1622 return (segdev_faultpages(hat, seg, addr, len, type, rw, NULL));
1624 1623
1625 1624 /* Find devmap handle */
1626 1625 if ((dhp = devmap_find_handle(dhp_head, addr)) == NULL)
1627 1626 return (FC_NOMAP);
1628 1627
1629 1628 /*
1630 1629 * The seg_dev driver does not implement copy-on-write,
1631 1630 * and always loads translations with maximal allowed permissions
1632 1631 * but we got an fault trying to access the device.
1633 1632 * Servicing the fault is not going to result in any better result
1634 1633 * RFE: If we want devmap_access callbacks to be involved in F_PROT
1635 1634 * faults, then the code below is written for that
1636 1635 * Pending resolution of the following:
1637 1636 * - determine if the F_INVAL/F_SOFTLOCK syncing
1638 1637 * is needed for F_PROT also or not. The code below assumes it does
1639 1638 * - If driver sees F_PROT and calls devmap_load with same type,
1640 1639 * then segdev_faultpages will fail with FC_PROT anyway, need to
1641 1640 * change that so calls from devmap_load to segdev_faultpages for
1642 1641 * F_PROT type are retagged to F_INVAL.
1643 1642 * RFE: Today we dont have drivers that use devmap and want to handle
1644 1643 * F_PROT calls. The code in segdev_fault* is written to allow
1645 1644 * this case but is not tested. A driver that needs this capability
1646 1645 * should be able to remove the short-circuit case; resolve the
1647 1646 * above issues and "should" work.
1648 1647 */
1649 1648 if (type == F_PROT) {
1650 1649 return (FC_PROT);
1651 1650 }
1652 1651
1653 1652 /*
1654 1653 * Loop through dhp list calling devmap_access or segdev_faultpages for
1655 1654 * each devmap handle.
1656 1655 * drivers which implement devmap_access can interpose on faults and do
1657 1656 * device-appropriate special actions before calling devmap_load.
1658 1657 */
1659 1658
1660 1659 /*
1661 1660 * Unfortunately, this simple loop has turned out to expose a variety
1662 1661 * of complex problems which results in the following convoluted code.
1663 1662 *
1664 1663 * First, a desire to handle a serialization of F_SOFTLOCK calls
1665 1664 * to the driver within the framework.
1666 1665 * This results in a dh_softlock structure that is on a per device
1667 1666 * (or device instance) basis and serializes devmap_access calls.
1668 1667 * Ideally we would need to do this for underlying
1669 1668 * memory/device regions that are being faulted on
1670 1669 * but that is hard to identify and with REMAP, harder
1671 1670 * Second, a desire to serialize F_INVAL(and F_PROT) calls w.r.t.
1672 1671 * to F_SOFTLOCK calls to the driver.
1673 1672 * These serializations are to simplify the driver programmer model.
1674 1673 * To support these two features, the code first goes through the
1675 1674 * devmap handles and counts the pages (slpage) that are covered
1676 1675 * by devmap_access callbacks.
1677 1676 * This part ends with a devmap_softlock_enter call
1678 1677 * which allows only one F_SOFTLOCK active on a device instance,
1679 1678 * but multiple F_INVAL/F_PROTs can be active except when a
1680 1679 * F_SOFTLOCK is active
1681 1680 *
1682 1681 * Next, we dont short-circuit the fault code upfront to call
1683 1682 * segdev_softunlock for F_SOFTUNLOCK, because we must use
1684 1683 * the same length when we softlock and softunlock.
1685 1684 *
1686 1685 * -Hat layers may not support softunlocking lengths less than the
1687 1686 * original length when there is large page support.
1688 1687 * -kpmem locking is dependent on keeping the lengths same.
1689 1688 * -if drivers handled F_SOFTLOCK, they probably also expect to
1690 1689 * see an F_SOFTUNLOCK of the same length
1691 1690 * Hence, if extending lengths during softlock,
1692 1691 * softunlock has to make the same adjustments and goes through
1693 1692 * the same loop calling segdev_faultpages/segdev_softunlock
1694 1693 * But some of the synchronization and error handling is different
1695 1694 */
1696 1695
1697 1696 if (type != F_SOFTUNLOCK) {
1698 1697 devmap_handle_t *dhpp = dhp;
1699 1698 size_t slen = len;
1700 1699
1701 1700 /*
1702 1701 * Calculate count of pages that are :
1703 1702 * a) within the (potentially extended) fault region
1704 1703 * b) AND covered by devmap handle with devmap_access
1705 1704 */
1706 1705 off = (ulong_t)(addr - dhpp->dh_uvaddr);
1707 1706 while (slen != 0) {
1708 1707 size_t mlen;
1709 1708
1710 1709 /*
1711 1710 * Softlocking on a region that allows remap is
1712 1711 * unsupported due to unresolved locking issues
1713 1712 * XXX: unclear what these are?
1714 1713 * One potential is that if there is a pending
1715 1714 * softlock, then a remap should not be allowed
1716 1715 * until the unlock is done. This is easily
1717 1716 * fixed by returning error in devmap*remap on
1718 1717 * checking the dh->dh_softlock->softlocked value
1719 1718 */
1720 1719 if ((type == F_SOFTLOCK) &&
1721 1720 (dhpp->dh_flags & DEVMAP_ALLOW_REMAP)) {
1722 1721 return (FC_NOSUPPORT);
1723 1722 }
1724 1723
1725 1724 mlen = MIN(slen, (dhpp->dh_len - off));
1726 1725 if (dhpp->dh_callbackops.devmap_access) {
1727 1726 size_t llen;
1728 1727 caddr_t laddr;
1729 1728 /*
1730 1729 * use extended length for large page mappings
1731 1730 */
1732 1731 HOLD_DHP_LOCK(dhpp);
1733 1732 if ((sdp->pageprot == 0) &&
1734 1733 (dhpp->dh_flags & DEVMAP_FLAG_LARGE)) {
1735 1734 devmap_get_large_pgsize(dhpp,
1736 1735 mlen, maddr, &llen, &laddr);
1737 1736 } else {
1738 1737 llen = mlen;
1739 1738 }
1740 1739 RELE_DHP_LOCK(dhpp);
1741 1740
1742 1741 slpage += btopr(llen);
1743 1742 slock = dhpp->dh_softlock;
1744 1743 }
1745 1744 maddr += mlen;
1746 1745 ASSERT(slen >= mlen);
1747 1746 slen -= mlen;
1748 1747 dhpp = dhpp->dh_next;
1749 1748 off = 0;
1750 1749 }
1751 1750 /*
1752 1751 * synchonize with other faulting threads and wait till safe
1753 1752 * devmap_softlock_enter might return due to signal in cv_wait
1754 1753 *
1755 1754 * devmap_softlock_enter has to be called outside of while loop
1756 1755 * to prevent a deadlock if len spans over multiple dhps.
1757 1756 * dh_softlock is based on device instance and if multiple dhps
1758 1757 * use the same device instance, the second dhp's LOCK call
1759 1758 * will hang waiting on the first to complete.
1760 1759 * devmap_setup verifies that slocks in a dhp_chain are same.
1761 1760 * RFE: this deadlock only hold true for F_SOFTLOCK. For
1762 1761 * F_INVAL/F_PROT, since we now allow multiple in parallel,
1763 1762 * we could have done the softlock_enter inside the loop
1764 1763 * and supported multi-dhp mappings with dissimilar devices
1765 1764 */
1766 1765 if (err = devmap_softlock_enter(slock, slpage, type))
1767 1766 return (FC_MAKE_ERR(err));
1768 1767 }
1769 1768
1770 1769 /* reset 'maddr' to the start addr of the range of fault. */
1771 1770 maddr = addr;
1772 1771
1773 1772 /* calculate the offset corresponds to 'addr' in the first dhp. */
1774 1773 off = (ulong_t)(addr - dhp->dh_uvaddr);
1775 1774
1776 1775 /*
1777 1776 * The fault length may span over multiple dhps.
1778 1777 * Loop until the total length is satisfied.
1779 1778 */
1780 1779 while (len != 0) {
1781 1780 size_t llen;
1782 1781 size_t mlen;
1783 1782 caddr_t laddr;
1784 1783
1785 1784 /*
1786 1785 * mlen is the smaller of 'len' and the length
1787 1786 * from addr to the end of mapping defined by dhp.
1788 1787 */
1789 1788 mlen = MIN(len, (dhp->dh_len - off));
1790 1789
1791 1790 HOLD_DHP_LOCK(dhp);
1792 1791 /*
1793 1792 * Pass the extended length and address to devmap_access
1794 1793 * if large pagesize is used for loading address translations.
1795 1794 */
1796 1795 if ((sdp->pageprot == 0) &&
1797 1796 (dhp->dh_flags & DEVMAP_FLAG_LARGE)) {
1798 1797 devmap_get_large_pgsize(dhp, mlen, maddr,
1799 1798 &llen, &laddr);
1800 1799 ASSERT(maddr == addr || laddr == maddr);
1801 1800 } else {
1802 1801 llen = mlen;
1803 1802 laddr = maddr;
1804 1803 }
1805 1804
1806 1805 if (dhp->dh_callbackops.devmap_access != NULL) {
1807 1806 offset_t aoff;
1808 1807
1809 1808 aoff = sdp->offset + (offset_t)(laddr - seg->s_base);
1810 1809
1811 1810 /*
1812 1811 * call driver's devmap_access entry point which will
1813 1812 * call devmap_load/contextmgmt to load the translations
1814 1813 *
1815 1814 * We drop the dhp_lock before calling access so
1816 1815 * drivers can call devmap_*_remap within access
1817 1816 */
1818 1817 RELE_DHP_LOCK(dhp);
1819 1818
1820 1819 err = (*dhp->dh_callbackops.devmap_access)(
1821 1820 dhp, (void *)dhp->dh_pvtp, aoff, llen, type, rw);
1822 1821 } else {
1823 1822 /*
1824 1823 * If no devmap_access entry point, then load mappings
1825 1824 * hold dhp_lock across faultpages if REMAP
1826 1825 */
1827 1826 err = segdev_faultpages(hat, seg, laddr, llen,
1828 1827 type, rw, dhp);
1829 1828 err_is_faultcode = 1;
1830 1829 RELE_DHP_LOCK(dhp);
1831 1830 }
1832 1831
1833 1832 if (err) {
1834 1833 if ((type == F_SOFTLOCK) && (maddr > addr)) {
1835 1834 /*
1836 1835 * If not first dhp, use
1837 1836 * segdev_fault(F_SOFTUNLOCK) for prior dhps
1838 1837 * While this is recursion, it is incorrect to
1839 1838 * call just segdev_softunlock
1840 1839 * if we are using either large pages
1841 1840 * or devmap_access. It will be more right
1842 1841 * to go through the same loop as above
1843 1842 * rather than call segdev_softunlock directly
1844 1843 * It will use the right lenghths as well as
1845 1844 * call into the driver devmap_access routines.
1846 1845 */
1847 1846 size_t done = (size_t)(maddr - addr);
1848 1847 (void) segdev_fault(hat, seg, addr, done,
1849 1848 F_SOFTUNLOCK, S_OTHER);
1850 1849 /*
1851 1850 * reduce slpage by number of pages
1852 1851 * released by segdev_softunlock
1853 1852 */
1854 1853 ASSERT(slpage >= btopr(done));
1855 1854 devmap_softlock_exit(slock,
1856 1855 slpage - btopr(done), type);
1857 1856 } else {
1858 1857 devmap_softlock_exit(slock, slpage, type);
1859 1858 }
1860 1859
1861 1860
1862 1861 /*
1863 1862 * Segdev_faultpages() already returns a faultcode,
1864 1863 * hence, result from segdev_faultpages() should be
1865 1864 * returned directly.
1866 1865 */
1867 1866 if (err_is_faultcode)
1868 1867 return (err);
1869 1868 return (FC_MAKE_ERR(err));
1870 1869 }
1871 1870
1872 1871 maddr += mlen;
1873 1872 ASSERT(len >= mlen);
1874 1873 len -= mlen;
1875 1874 dhp = dhp->dh_next;
1876 1875 off = 0;
1877 1876
1878 1877 ASSERT(!dhp || len == 0 || maddr == dhp->dh_uvaddr);
1879 1878 }
1880 1879 /*
1881 1880 * release the softlock count at end of fault
1882 1881 * For F_SOFTLOCk this is done in the later F_SOFTUNLOCK
1883 1882 */
1884 1883 if ((type == F_INVAL) || (type == F_PROT))
1885 1884 devmap_softlock_exit(slock, slpage, type);
1886 1885 return (0);
1887 1886 }
1888 1887
1889 1888 /*
1890 1889 * segdev_faultpages
1891 1890 *
1892 1891 * Used to fault in seg_dev segment pages. Called by segdev_fault or devmap_load
1893 1892 * This routine assumes that the callers makes sure that the fields
1894 1893 * in dhp used below are not changed due to remap during this call.
1895 1894 * Caller does HOLD_DHP_LOCK if neeed
1896 1895 * This routine returns a faultcode_t as a return value for segdev_fault.
1897 1896 */
1898 1897 static faultcode_t
1899 1898 segdev_faultpages(
1900 1899 struct hat *hat, /* the hat */
1901 1900 struct seg *seg, /* the seg_dev of interest */
1902 1901 caddr_t addr, /* the address of the fault */
1903 1902 size_t len, /* the length of the range */
1904 1903 enum fault_type type, /* type of fault */
1905 1904 enum seg_rw rw, /* type of access at fault */
1906 1905 devmap_handle_t *dhp) /* devmap handle */
1907 1906 {
1908 1907 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1909 1908 register caddr_t a;
1910 1909 struct vpage *vpage;
1911 1910 struct ddi_umem_cookie *kpmem_cookie = NULL;
1912 1911 int err;
1913 1912
1914 1913 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGES,
1915 1914 "segdev_faultpages: dhp=%p seg=%p addr=%p len=%lx",
1916 1915 (void *)dhp, (void *)seg, (void *)addr, len);
1917 1916 DEBUGF(5, (CE_CONT, "segdev_faultpages: "
1918 1917 "dhp %p seg %p addr %p len %lx\n",
1919 1918 (void *)dhp, (void *)seg, (void *)addr, len));
1920 1919
1921 1920 /*
1922 1921 * The seg_dev driver does not implement copy-on-write,
1923 1922 * and always loads translations with maximal allowed permissions
1924 1923 * but we got an fault trying to access the device.
1925 1924 * Servicing the fault is not going to result in any better result
1926 1925 * XXX: If we want to allow devmap_access to handle F_PROT calls,
1927 1926 * This code should be removed and let the normal fault handling
1928 1927 * take care of finding the error
1929 1928 */
1930 1929 if (type == F_PROT) {
1931 1930 return (FC_PROT);
1932 1931 }
1933 1932
1934 1933 if (type == F_SOFTUNLOCK) {
1935 1934 segdev_softunlock(hat, seg, addr, len, rw);
1936 1935 return (0);
1937 1936 }
1938 1937
1939 1938 /*
1940 1939 * For kernel pageable memory, fault/lock segkp pages
1941 1940 * We hold this until the completion of this
1942 1941 * fault (INVAL/PROT) or till unlock (SOFTLOCK).
1943 1942 */
1944 1943 if ((dhp != NULL) && dhp_is_kpmem(dhp)) {
1945 1944 kpmem_cookie = (struct ddi_umem_cookie *)dhp->dh_cookie;
1946 1945 if (err = acquire_kpmem_lock(kpmem_cookie, btopr(len)))
1947 1946 return (err);
1948 1947 }
1949 1948
1950 1949 /*
1951 1950 * If we have the same protections for the entire segment,
1952 1951 * insure that the access being attempted is legitimate.
1953 1952 */
1954 1953 rw_enter(&sdp->lock, RW_READER);
1955 1954 if (sdp->pageprot == 0) {
1956 1955 uint_t protchk;
1957 1956
1958 1957 switch (rw) {
1959 1958 case S_READ:
1960 1959 protchk = PROT_READ;
1961 1960 break;
1962 1961 case S_WRITE:
1963 1962 protchk = PROT_WRITE;
1964 1963 break;
1965 1964 case S_EXEC:
1966 1965 protchk = PROT_EXEC;
1967 1966 break;
1968 1967 case S_OTHER:
1969 1968 default:
1970 1969 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
1971 1970 break;
1972 1971 }
1973 1972
1974 1973 if ((sdp->prot & protchk) == 0) {
1975 1974 rw_exit(&sdp->lock);
1976 1975 /* undo kpmem locking */
1977 1976 if (kpmem_cookie != NULL) {
1978 1977 release_kpmem_lock(kpmem_cookie, btopr(len));
1979 1978 }
1980 1979 return (FC_PROT); /* illegal access type */
1981 1980 }
1982 1981 }
1983 1982
1984 1983 /*
1985 1984 * we do a single hat_devload for the range if
1986 1985 * - devmap framework (dhp is not NULL),
1987 1986 * - pageprot == 0, i.e., no per-page protection set and
1988 1987 * - is device pages, irrespective of whether we are using large pages
1989 1988 */
1990 1989 if ((sdp->pageprot == 0) && (dhp != NULL) && dhp_is_devmem(dhp)) {
1991 1990 pfn_t pfnum;
1992 1991 uint_t hat_flags;
1993 1992
1994 1993 if (dhp->dh_flags & DEVMAP_MAPPING_INVALID) {
1995 1994 rw_exit(&sdp->lock);
1996 1995 return (FC_NOMAP);
1997 1996 }
1998 1997
1999 1998 if (type == F_SOFTLOCK) {
2000 1999 mutex_enter(&freemem_lock);
2001 2000 sdp->softlockcnt += btopr(len);
2002 2001 mutex_exit(&freemem_lock);
2003 2002 }
2004 2003
2005 2004 hat_flags = ((type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD);
2006 2005 pfnum = dhp->dh_pfn + btop((uintptr_t)(addr - dhp->dh_uvaddr));
2007 2006 ASSERT(!pf_is_memory(pfnum));
2008 2007
2009 2008 hat_devload(hat, addr, len, pfnum, sdp->prot | dhp->dh_hat_attr,
2010 2009 hat_flags | sdp->hat_flags);
2011 2010 rw_exit(&sdp->lock);
2012 2011 return (0);
2013 2012 }
2014 2013
2015 2014 /* Handle cases where we have to loop through fault handling per-page */
2016 2015
2017 2016 if (sdp->vpage == NULL)
2018 2017 vpage = NULL;
2019 2018 else
2020 2019 vpage = &sdp->vpage[seg_page(seg, addr)];
2021 2020
2022 2021 /* loop over the address range handling each fault */
2023 2022 for (a = addr; a < addr + len; a += PAGESIZE) {
2024 2023 if (err = segdev_faultpage(hat, seg, a, vpage, type, rw, dhp)) {
2025 2024 break;
2026 2025 }
2027 2026 if (vpage != NULL)
2028 2027 vpage++;
2029 2028 }
2030 2029 rw_exit(&sdp->lock);
2031 2030 if (err && (type == F_SOFTLOCK)) { /* error handling for F_SOFTLOCK */
2032 2031 size_t done = (size_t)(a - addr); /* pages fault successfully */
2033 2032 if (done > 0) {
2034 2033 /* use softunlock for those pages */
2035 2034 segdev_softunlock(hat, seg, addr, done, S_OTHER);
2036 2035 }
2037 2036 if (kpmem_cookie != NULL) {
2038 2037 /* release kpmem lock for rest of pages */
2039 2038 ASSERT(len >= done);
2040 2039 release_kpmem_lock(kpmem_cookie, btopr(len - done));
2041 2040 }
2042 2041 } else if ((kpmem_cookie != NULL) && (type != F_SOFTLOCK)) {
2043 2042 /* for non-SOFTLOCK cases, release kpmem */
2044 2043 release_kpmem_lock(kpmem_cookie, btopr(len));
2045 2044 }
2046 2045 return (err);
2047 2046 }
2048 2047
2049 2048 /*
2050 2049 * Asynchronous page fault. We simply do nothing since this
2051 2050 * entry point is not supposed to load up the translation.
2052 2051 */
2053 2052 /*ARGSUSED*/
2054 2053 static faultcode_t
2055 2054 segdev_faulta(struct seg *seg, caddr_t addr)
2056 2055 {
2057 2056 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FAULTA,
2058 2057 "segdev_faulta: seg=%p addr=%p", (void *)seg, (void *)addr);
2059 2058 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2060 2059
2061 2060 return (0);
2062 2061 }
2063 2062
2064 2063 static int
2065 2064 segdev_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
2066 2065 {
2067 2066 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2068 2067 register devmap_handle_t *dhp;
2069 2068 register struct vpage *vp, *evp;
2070 2069 devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
2071 2070 ulong_t off;
2072 2071 size_t mlen, sz;
2073 2072
2074 2073 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT,
2075 2074 "segdev_setprot:start seg=%p addr=%p len=%lx prot=%x",
2076 2075 (void *)seg, (void *)addr, len, prot);
2077 2076 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2078 2077
2079 2078 if ((sz = sdp->softlockcnt) > 0 && dhp_head != NULL) {
2080 2079 /*
2081 2080 * Fail the setprot if pages are SOFTLOCKed through this
2082 2081 * mapping.
2083 2082 * Softlockcnt is protected from change by the as read lock.
2084 2083 */
2085 2084 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT_CK1,
2086 2085 "segdev_setprot:error softlockcnt=%lx", sz);
2087 2086 DEBUGF(1, (CE_CONT, "segdev_setprot: softlockcnt %ld\n", sz));
2088 2087 return (EAGAIN);
2089 2088 }
2090 2089
2091 2090 if (dhp_head != NULL) {
2092 2091 if ((dhp = devmap_find_handle(dhp_head, addr)) == NULL)
2093 2092 return (EINVAL);
2094 2093
2095 2094 /*
2096 2095 * check if violate maxprot.
2097 2096 */
2098 2097 off = (ulong_t)(addr - dhp->dh_uvaddr);
2099 2098 mlen = len;
2100 2099 while (dhp) {
2101 2100 if ((dhp->dh_maxprot & prot) != prot)
2102 2101 return (EACCES); /* violated maxprot */
2103 2102
2104 2103 if (mlen > (dhp->dh_len - off)) {
2105 2104 mlen -= dhp->dh_len - off;
2106 2105 dhp = dhp->dh_next;
2107 2106 off = 0;
2108 2107 } else
2109 2108 break;
2110 2109 }
2111 2110 } else {
2112 2111 if ((sdp->maxprot & prot) != prot)
2113 2112 return (EACCES);
2114 2113 }
2115 2114
2116 2115 rw_enter(&sdp->lock, RW_WRITER);
2117 2116 if (addr == seg->s_base && len == seg->s_size && sdp->pageprot == 0) {
2118 2117 if (sdp->prot == prot) {
2119 2118 rw_exit(&sdp->lock);
2120 2119 return (0); /* all done */
2121 2120 }
2122 2121 sdp->prot = (uchar_t)prot;
2123 2122 } else {
2124 2123 sdp->pageprot = 1;
2125 2124 if (sdp->vpage == NULL) {
2126 2125 /*
2127 2126 * First time through setting per page permissions,
2128 2127 * initialize all the vpage structures to prot
2129 2128 */
2130 2129 sdp->vpage = kmem_zalloc(vpgtob(seg_pages(seg)),
2131 2130 KM_SLEEP);
2132 2131 evp = &sdp->vpage[seg_pages(seg)];
2133 2132 for (vp = sdp->vpage; vp < evp; vp++)
2134 2133 VPP_SETPROT(vp, sdp->prot);
2135 2134 }
2136 2135 /*
2137 2136 * Now go change the needed vpages protections.
2138 2137 */
2139 2138 evp = &sdp->vpage[seg_page(seg, addr + len)];
2140 2139 for (vp = &sdp->vpage[seg_page(seg, addr)]; vp < evp; vp++)
2141 2140 VPP_SETPROT(vp, prot);
2142 2141 }
2143 2142 rw_exit(&sdp->lock);
2144 2143
2145 2144 if (dhp_head != NULL) {
2146 2145 devmap_handle_t *tdhp;
2147 2146 /*
2148 2147 * If large page size was used in hat_devload(),
2149 2148 * the same page size must be used in hat_unload().
2150 2149 */
2151 2150 dhp = tdhp = devmap_find_handle(dhp_head, addr);
2152 2151 while (tdhp != NULL) {
2153 2152 if (tdhp->dh_flags & DEVMAP_FLAG_LARGE) {
2154 2153 break;
2155 2154 }
2156 2155 tdhp = tdhp->dh_next;
2157 2156 }
2158 2157 if (tdhp) {
2159 2158 size_t slen = len;
2160 2159 size_t mlen;
2161 2160 size_t soff;
2162 2161
2163 2162 soff = (ulong_t)(addr - dhp->dh_uvaddr);
2164 2163 while (slen != 0) {
2165 2164 mlen = MIN(slen, (dhp->dh_len - soff));
2166 2165 hat_unload(seg->s_as->a_hat, dhp->dh_uvaddr,
2167 2166 dhp->dh_len, HAT_UNLOAD);
2168 2167 dhp = dhp->dh_next;
2169 2168 ASSERT(slen >= mlen);
2170 2169 slen -= mlen;
2171 2170 soff = 0;
2172 2171 }
2173 2172 return (0);
2174 2173 }
2175 2174 }
2176 2175
2177 2176 if ((prot & ~PROT_USER) == PROT_NONE) {
2178 2177 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
2179 2178 } else {
2180 2179 /*
2181 2180 * RFE: the segment should keep track of all attributes
2182 2181 * allowing us to remove the deprecated hat_chgprot
2183 2182 * and use hat_chgattr.
2184 2183 */
2185 2184 hat_chgprot(seg->s_as->a_hat, addr, len, prot);
2186 2185 }
2187 2186
2188 2187 return (0);
2189 2188 }
2190 2189
2191 2190 static int
2192 2191 segdev_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
2193 2192 {
2194 2193 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2195 2194 struct vpage *vp, *evp;
2196 2195
2197 2196 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_CHECKPROT,
2198 2197 "segdev_checkprot:start seg=%p addr=%p len=%lx prot=%x",
2199 2198 (void *)seg, (void *)addr, len, prot);
2200 2199 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2201 2200
2202 2201 /*
2203 2202 * If segment protection can be used, simply check against them
2204 2203 */
2205 2204 rw_enter(&sdp->lock, RW_READER);
2206 2205 if (sdp->pageprot == 0) {
2207 2206 register int err;
2208 2207
2209 2208 err = ((sdp->prot & prot) != prot) ? EACCES : 0;
2210 2209 rw_exit(&sdp->lock);
2211 2210 return (err);
2212 2211 }
2213 2212
2214 2213 /*
2215 2214 * Have to check down to the vpage level
2216 2215 */
2217 2216 evp = &sdp->vpage[seg_page(seg, addr + len)];
2218 2217 for (vp = &sdp->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
2219 2218 if ((VPP_PROT(vp) & prot) != prot) {
2220 2219 rw_exit(&sdp->lock);
2221 2220 return (EACCES);
2222 2221 }
2223 2222 }
2224 2223 rw_exit(&sdp->lock);
2225 2224 return (0);
2226 2225 }
2227 2226
2228 2227 static int
2229 2228 segdev_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2230 2229 {
2231 2230 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2232 2231 size_t pgno;
2233 2232
2234 2233 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_GETPROT,
2235 2234 "segdev_getprot:start seg=%p addr=%p len=%lx protv=%p",
2236 2235 (void *)seg, (void *)addr, len, (void *)protv);
2237 2236 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2238 2237
2239 2238 pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
2240 2239 if (pgno != 0) {
2241 2240 rw_enter(&sdp->lock, RW_READER);
2242 2241 if (sdp->pageprot == 0) {
2243 2242 do {
2244 2243 protv[--pgno] = sdp->prot;
2245 2244 } while (pgno != 0);
2246 2245 } else {
2247 2246 size_t pgoff = seg_page(seg, addr);
2248 2247
2249 2248 do {
2250 2249 pgno--;
2251 2250 protv[pgno] =
2252 2251 VPP_PROT(&sdp->vpage[pgno + pgoff]);
2253 2252 } while (pgno != 0);
2254 2253 }
2255 2254 rw_exit(&sdp->lock);
2256 2255 }
2257 2256 return (0);
2258 2257 }
2259 2258
2260 2259 static u_offset_t
2261 2260 segdev_getoffset(register struct seg *seg, caddr_t addr)
2262 2261 {
2263 2262 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2264 2263
2265 2264 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETOFFSET,
2266 2265 "segdev_getoffset:start seg=%p addr=%p", (void *)seg, (void *)addr);
2267 2266
2268 2267 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2269 2268
2270 2269 return ((u_offset_t)sdp->offset + (addr - seg->s_base));
2271 2270 }
2272 2271
2273 2272 /*ARGSUSED*/
2274 2273 static int
2275 2274 segdev_gettype(register struct seg *seg, caddr_t addr)
2276 2275 {
2277 2276 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2278 2277
2279 2278 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETTYPE,
2280 2279 "segdev_gettype:start seg=%p addr=%p", (void *)seg, (void *)addr);
2281 2280
2282 2281 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2283 2282
2284 2283 return (sdp->type);
2285 2284 }
2286 2285
2287 2286
2288 2287 /*ARGSUSED*/
2289 2288 static int
2290 2289 segdev_getvp(register struct seg *seg, caddr_t addr, struct vnode **vpp)
2291 2290 {
2292 2291 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2293 2292
2294 2293 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETVP,
2295 2294 "segdev_getvp:start seg=%p addr=%p", (void *)seg, (void *)addr);
2296 2295
2297 2296 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2298 2297
2299 2298 /*
2300 2299 * Note that this vp is the common_vp of the device, where the
2301 2300 * pages are hung ..
2302 2301 */
2303 2302 *vpp = VTOCVP(sdp->vp);
2304 2303
2305 2304 return (0);
2306 2305 }
2307 2306
2308 2307 static void
2309 2308 segdev_badop(void)
2310 2309 {
2311 2310 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGDEV_BADOP,
2312 2311 "segdev_badop:start");
2313 2312 panic("segdev_badop");
2314 2313 /*NOTREACHED*/
2315 2314 }
2316 2315
2317 2316 /*
2318 2317 * segdev pages are not in the cache, and thus can't really be controlled.
2319 2318 * Hence, syncs are simply always successful.
2320 2319 */
2321 2320 /*ARGSUSED*/
2322 2321 static int
2323 2322 segdev_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
2324 2323 {
2325 2324 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SYNC, "segdev_sync:start");
2326 2325
2327 2326 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2328 2327
2329 2328 return (0);
2330 2329 }
2331 2330
2332 2331 /*
2333 2332 * segdev pages are always "in core".
2334 2333 */
2335 2334 /*ARGSUSED*/
2336 2335 static size_t
2337 2336 segdev_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
2338 2337 {
2339 2338 size_t v = 0;
2340 2339
2341 2340 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_INCORE, "segdev_incore:start");
2342 2341
2343 2342 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2344 2343
2345 2344 for (len = (len + PAGEOFFSET) & PAGEMASK; len; len -= PAGESIZE,
2346 2345 v += PAGESIZE)
2347 2346 *vec++ = 1;
2348 2347 return (v);
2349 2348 }
2350 2349
2351 2350 /*
2352 2351 * segdev pages are not in the cache, and thus can't really be controlled.
2353 2352 * Hence, locks are simply always successful.
2354 2353 */
2355 2354 /*ARGSUSED*/
2356 2355 static int
2357 2356 segdev_lockop(struct seg *seg, caddr_t addr,
2358 2357 size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
2359 2358 {
2360 2359 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_LOCKOP, "segdev_lockop:start");
2361 2360
2362 2361 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2363 2362
2364 2363 return (0);
2365 2364 }
2366 2365
2367 2366 /*
2368 2367 * segdev pages are not in the cache, and thus can't really be controlled.
2369 2368 * Hence, advise is simply always successful.
2370 2369 */
2371 2370 /*ARGSUSED*/
2372 2371 static int
2373 2372 segdev_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2374 2373 {
2375 2374 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_ADVISE, "segdev_advise:start");
2376 2375
2377 2376 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2378 2377
2379 2378 return (0);
2380 2379 }
2381 2380
2382 2381 /*
2383 2382 * segdev pages are not dumped, so we just return
2384 2383 */
2385 2384 /*ARGSUSED*/
2386 2385 static void
2387 2386 segdev_dump(struct seg *seg)
2388 2387 {}
2389 2388
2390 2389 /*
2391 2390 * ddi_segmap_setup: Used by drivers who wish specify mapping attributes
2392 2391 * for a segment. Called from a drivers segmap(9E)
2393 2392 * routine.
2394 2393 */
2395 2394 /*ARGSUSED*/
2396 2395 int
2397 2396 ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp,
2398 2397 off_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cred,
2399 2398 ddi_device_acc_attr_t *accattrp, uint_t rnumber)
2400 2399 {
2401 2400 struct segdev_crargs dev_a;
2402 2401 int (*mapfunc)(dev_t dev, off_t off, int prot);
2403 2402 uint_t hat_attr;
2404 2403 pfn_t pfn;
2405 2404 int error, i;
2406 2405
2407 2406 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGMAP_SETUP,
2408 2407 "ddi_segmap_setup:start");
2409 2408
2410 2409 if ((mapfunc = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap) == nodev)
2411 2410 return (ENODEV);
2412 2411
2413 2412 /*
2414 2413 * Character devices that support the d_mmap
2415 2414 * interface can only be mmap'ed shared.
2416 2415 */
2417 2416 if ((flags & MAP_TYPE) != MAP_SHARED)
2418 2417 return (EINVAL);
2419 2418
2420 2419 /*
2421 2420 * Check that this region is indeed mappable on this platform.
2422 2421 * Use the mapping function.
2423 2422 */
2424 2423 if (ddi_device_mapping_check(dev, accattrp, rnumber, &hat_attr) == -1)
2425 2424 return (ENXIO);
2426 2425
2427 2426 /*
2428 2427 * Check to ensure that the entire range is
2429 2428 * legal and we are not trying to map in
2430 2429 * more than the device will let us.
2431 2430 */
2432 2431 for (i = 0; i < len; i += PAGESIZE) {
2433 2432 if (i == 0) {
2434 2433 /*
2435 2434 * Save the pfn at offset here. This pfn will be
2436 2435 * used later to get user address.
2437 2436 */
2438 2437 if ((pfn = (pfn_t)cdev_mmap(mapfunc, dev, offset,
2439 2438 maxprot)) == PFN_INVALID)
2440 2439 return (ENXIO);
2441 2440 } else {
2442 2441 if (cdev_mmap(mapfunc, dev, offset + i, maxprot) ==
2443 2442 PFN_INVALID)
2444 2443 return (ENXIO);
2445 2444 }
2446 2445 }
2447 2446
2448 2447 as_rangelock(as);
2449 2448 /* Pick an address w/o worrying about any vac alignment constraints. */
2450 2449 error = choose_addr(as, addrp, len, ptob(pfn), ADDR_NOVACALIGN, flags);
2451 2450 if (error != 0) {
2452 2451 as_rangeunlock(as);
2453 2452 return (error);
2454 2453 }
2455 2454
2456 2455 dev_a.mapfunc = mapfunc;
2457 2456 dev_a.dev = dev;
2458 2457 dev_a.offset = (offset_t)offset;
2459 2458 dev_a.type = flags & MAP_TYPE;
2460 2459 dev_a.prot = (uchar_t)prot;
2461 2460 dev_a.maxprot = (uchar_t)maxprot;
2462 2461 dev_a.hat_attr = hat_attr;
2463 2462 dev_a.hat_flags = 0;
2464 2463 dev_a.devmap_data = NULL;
2465 2464
2466 2465 error = as_map(as, *addrp, len, segdev_create, &dev_a);
2467 2466 as_rangeunlock(as);
2468 2467 return (error);
2469 2468
2470 2469 }
2471 2470
2472 2471 /*ARGSUSED*/
2473 2472 static int
2474 2473 segdev_pagelock(struct seg *seg, caddr_t addr, size_t len,
2475 2474 struct page ***ppp, enum lock_type type, enum seg_rw rw)
2476 2475 {
2477 2476 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_PAGELOCK,
2478 2477 "segdev_pagelock:start");
2479 2478 return (ENOTSUP);
2480 2479 }
2481 2480
2482 2481 /*ARGSUSED*/
2483 2482 static int
2484 2483 segdev_setpagesize(struct seg *seg, caddr_t addr, size_t len,
2485 2484 uint_t szc)
2486 2485 {
2487 2486 return (ENOTSUP);
2488 2487 }
2489 2488
2490 2489 /*
2491 2490 * devmap_device: Used by devmap framework to establish mapping
2492 2491 * called by devmap_seup(9F) during map setup time.
2493 2492 */
2494 2493 /*ARGSUSED*/
2495 2494 static int
2496 2495 devmap_device(devmap_handle_t *dhp, struct as *as, caddr_t *addr,
2497 2496 offset_t off, size_t len, uint_t flags)
2498 2497 {
2499 2498 devmap_handle_t *rdhp, *maxdhp;
2500 2499 struct segdev_crargs dev_a;
2501 2500 int err;
2502 2501 uint_t maxprot = PROT_ALL;
2503 2502 offset_t offset = 0;
2504 2503 pfn_t pfn;
2505 2504 struct devmap_pmem_cookie *pcp;
2506 2505
2507 2506 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVICE,
2508 2507 "devmap_device:start dhp=%p addr=%p off=%llx, len=%lx",
2509 2508 (void *)dhp, (void *)addr, off, len);
2510 2509
2511 2510 DEBUGF(2, (CE_CONT, "devmap_device: dhp %p addr %p off %llx len %lx\n",
2512 2511 (void *)dhp, (void *)addr, off, len));
2513 2512
2514 2513 as_rangelock(as);
2515 2514 if ((flags & MAP_FIXED) == 0) {
2516 2515 offset_t aligned_off;
2517 2516
2518 2517 rdhp = maxdhp = dhp;
2519 2518 while (rdhp != NULL) {
2520 2519 maxdhp = (maxdhp->dh_len > rdhp->dh_len) ?
2521 2520 maxdhp : rdhp;
2522 2521 rdhp = rdhp->dh_next;
2523 2522 maxprot |= dhp->dh_maxprot;
2524 2523 }
2525 2524 offset = maxdhp->dh_uoff - dhp->dh_uoff;
2526 2525
2527 2526 /*
2528 2527 * Use the dhp that has the
2529 2528 * largest len to get user address.
2530 2529 */
2531 2530 /*
2532 2531 * If MAPPING_INVALID, cannot use dh_pfn/dh_cvaddr,
2533 2532 * use 0 which is as good as any other.
2534 2533 */
2535 2534 if (maxdhp->dh_flags & DEVMAP_MAPPING_INVALID) {
2536 2535 aligned_off = (offset_t)0;
2537 2536 } else if (dhp_is_devmem(maxdhp)) {
2538 2537 aligned_off = (offset_t)ptob(maxdhp->dh_pfn) - offset;
2539 2538 } else if (dhp_is_pmem(maxdhp)) {
2540 2539 pcp = (struct devmap_pmem_cookie *)maxdhp->dh_pcookie;
2541 2540 pfn = page_pptonum(
2542 2541 pcp->dp_pparray[btop(maxdhp->dh_roff)]);
2543 2542 aligned_off = (offset_t)ptob(pfn) - offset;
2544 2543 } else {
2545 2544 aligned_off = (offset_t)(uintptr_t)maxdhp->dh_cvaddr -
2546 2545 offset;
2547 2546 }
2548 2547
2549 2548 /*
2550 2549 * Pick an address aligned to dh_cookie.
2551 2550 * for kernel memory/user memory, cookie is cvaddr.
2552 2551 * for device memory, cookie is physical address.
2553 2552 */
2554 2553 map_addr(addr, len, aligned_off, 1, flags);
2555 2554 if (*addr == NULL) {
2556 2555 as_rangeunlock(as);
2557 2556 return (ENOMEM);
2558 2557 }
2559 2558 } else {
2560 2559 /*
2561 2560 * User-specified address; blow away any previous mappings.
2562 2561 */
2563 2562 (void) as_unmap(as, *addr, len);
2564 2563 }
2565 2564
2566 2565 dev_a.mapfunc = NULL;
2567 2566 dev_a.dev = dhp->dh_dev;
2568 2567 dev_a.type = flags & MAP_TYPE;
2569 2568 dev_a.offset = off;
2570 2569 /*
2571 2570 * sdp->maxprot has the least restrict protection of all dhps.
2572 2571 */
2573 2572 dev_a.maxprot = maxprot;
2574 2573 dev_a.prot = dhp->dh_prot;
2575 2574 /*
2576 2575 * devmap uses dhp->dh_hat_attr for hat.
2577 2576 */
2578 2577 dev_a.hat_flags = 0;
2579 2578 dev_a.hat_attr = 0;
2580 2579 dev_a.devmap_data = (void *)dhp;
2581 2580
2582 2581 err = as_map(as, *addr, len, segdev_create, &dev_a);
2583 2582 as_rangeunlock(as);
2584 2583 return (err);
2585 2584 }
2586 2585
2587 2586 int
2588 2587 devmap_do_ctxmgt(devmap_cookie_t dhc, void *pvtp, offset_t off, size_t len,
2589 2588 uint_t type, uint_t rw, int (*ctxmgt)(devmap_cookie_t, void *, offset_t,
2590 2589 size_t, uint_t, uint_t))
2591 2590 {
2592 2591 register devmap_handle_t *dhp = (devmap_handle_t *)dhc;
2593 2592 struct devmap_ctx *devctx;
2594 2593 int do_timeout = 0;
2595 2594 int ret;
2596 2595
2597 2596 #ifdef lint
2598 2597 pvtp = pvtp;
2599 2598 #endif
2600 2599
2601 2600 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT,
2602 2601 "devmap_do_ctxmgt:start dhp=%p off=%llx, len=%lx",
2603 2602 (void *)dhp, off, len);
2604 2603 DEBUGF(7, (CE_CONT, "devmap_do_ctxmgt: dhp %p off %llx len %lx\n",
2605 2604 (void *)dhp, off, len));
2606 2605
2607 2606 if (ctxmgt == NULL)
2608 2607 return (FC_HWERR);
2609 2608
2610 2609 devctx = dhp->dh_ctx;
2611 2610
2612 2611 /*
2613 2612 * If we are on an MP system with more than one cpu running
2614 2613 * and if a thread on some CPU already has the context, wait
2615 2614 * for it to finish if there is a hysteresis timeout.
2616 2615 *
2617 2616 * We call cv_wait() instead of cv_wait_sig() because
2618 2617 * it does not matter much if it returned due to a signal
2619 2618 * or due to a cv_signal() or cv_broadcast(). In either event
2620 2619 * we need to complete the mapping otherwise the processes
2621 2620 * will die with a SEGV.
2622 2621 */
2623 2622 if ((dhp->dh_timeout_length > 0) && (ncpus > 1)) {
2624 2623 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK1,
2625 2624 "devmap_do_ctxmgt:doing hysteresis, devctl %p dhp %p",
2626 2625 devctx, dhp);
2627 2626 do_timeout = 1;
2628 2627 mutex_enter(&devctx->lock);
2629 2628 while (devctx->oncpu)
2630 2629 cv_wait(&devctx->cv, &devctx->lock);
2631 2630 devctx->oncpu = 1;
2632 2631 mutex_exit(&devctx->lock);
2633 2632 }
2634 2633
2635 2634 /*
2636 2635 * Call the contextmgt callback so that the driver can handle
2637 2636 * the fault.
2638 2637 */
2639 2638 ret = (*ctxmgt)(dhp, dhp->dh_pvtp, off, len, type, rw);
2640 2639
2641 2640 /*
2642 2641 * If devmap_access() returned -1, then there was a hardware
2643 2642 * error so we need to convert the return value to something
2644 2643 * that trap() will understand. Otherwise, the return value
2645 2644 * is already a fault code generated by devmap_unload()
2646 2645 * or devmap_load().
2647 2646 */
2648 2647 if (ret) {
2649 2648 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK2,
2650 2649 "devmap_do_ctxmgt: ret=%x dhp=%p devctx=%p",
2651 2650 ret, dhp, devctx);
2652 2651 DEBUGF(1, (CE_CONT, "devmap_do_ctxmgt: ret %x dhp %p\n",
2653 2652 ret, (void *)dhp));
2654 2653 if (devctx->oncpu) {
2655 2654 mutex_enter(&devctx->lock);
2656 2655 devctx->oncpu = 0;
2657 2656 cv_signal(&devctx->cv);
2658 2657 mutex_exit(&devctx->lock);
2659 2658 }
2660 2659 return (FC_HWERR);
2661 2660 }
2662 2661
2663 2662 /*
2664 2663 * Setup the timeout if we need to
2665 2664 */
2666 2665 if (do_timeout) {
2667 2666 mutex_enter(&devctx->lock);
2668 2667 if (dhp->dh_timeout_length > 0) {
2669 2668 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK3,
2670 2669 "devmap_do_ctxmgt:timeout set");
2671 2670 devctx->timeout = timeout(devmap_ctxto,
2672 2671 devctx, dhp->dh_timeout_length);
2673 2672 } else {
2674 2673 /*
2675 2674 * We don't want to wait so set oncpu to
2676 2675 * 0 and wake up anyone waiting.
2677 2676 */
2678 2677 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK4,
2679 2678 "devmap_do_ctxmgt:timeout not set");
2680 2679 devctx->oncpu = 0;
2681 2680 cv_signal(&devctx->cv);
2682 2681 }
2683 2682 mutex_exit(&devctx->lock);
2684 2683 }
2685 2684
2686 2685 return (DDI_SUCCESS);
2687 2686 }
2688 2687
2689 2688 /*
2690 2689 * end of mapping
2691 2690 * poff fault_offset |
2692 2691 * base | | |
2693 2692 * | | | |
2694 2693 * V V V V
2695 2694 * +-----------+---------------+-------+---------+-------+
2696 2695 * ^ ^ ^ ^
2697 2696 * |<--- offset--->|<-len->| |
2698 2697 * |<--- dh_len(size of mapping) --->|
2699 2698 * |<-- pg -->|
2700 2699 * -->|rlen|<--
2701 2700 */
2702 2701 static ulong_t
2703 2702 devmap_roundup(devmap_handle_t *dhp, ulong_t offset, size_t len,
2704 2703 ulong_t *opfn, ulong_t *pagesize)
2705 2704 {
2706 2705 register int level;
2707 2706 ulong_t pg;
2708 2707 ulong_t poff;
2709 2708 ulong_t base;
2710 2709 caddr_t uvaddr;
2711 2710 long rlen;
2712 2711
2713 2712 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP,
2714 2713 "devmap_roundup:start dhp=%p off=%lx len=%lx",
2715 2714 (void *)dhp, offset, len);
2716 2715 DEBUGF(2, (CE_CONT, "devmap_roundup: dhp %p off %lx len %lx\n",
2717 2716 (void *)dhp, offset, len));
2718 2717
2719 2718 /*
2720 2719 * get the max. pagesize that is aligned within the range
2721 2720 * <dh_pfn, dh_pfn+offset>.
2722 2721 *
2723 2722 * The calculations below use physical address to ddetermine
2724 2723 * the page size to use. The same calculations can use the
2725 2724 * virtual address to determine the page size.
2726 2725 */
2727 2726 base = (ulong_t)ptob(dhp->dh_pfn);
2728 2727 for (level = dhp->dh_mmulevel; level >= 0; level--) {
2729 2728 pg = page_get_pagesize(level);
2730 2729 poff = ((base + offset) & ~(pg - 1));
2731 2730 uvaddr = dhp->dh_uvaddr + (poff - base);
2732 2731 if ((poff >= base) &&
2733 2732 ((poff + pg) <= (base + dhp->dh_len)) &&
2734 2733 VA_PA_ALIGNED((uintptr_t)uvaddr, poff, pg))
2735 2734 break;
2736 2735 }
2737 2736
2738 2737 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP_CK1,
2739 2738 "devmap_roundup: base=%lx poff=%lx dhp=%p",
2740 2739 base, poff, dhp);
2741 2740 DEBUGF(2, (CE_CONT, "devmap_roundup: base %lx poff %lx pfn %lx\n",
2742 2741 base, poff, dhp->dh_pfn));
2743 2742
2744 2743 ASSERT(VA_PA_ALIGNED((uintptr_t)uvaddr, poff, pg));
2745 2744 ASSERT(level >= 0);
2746 2745
2747 2746 *pagesize = pg;
2748 2747 *opfn = dhp->dh_pfn + btop(poff - base);
2749 2748
2750 2749 rlen = len + offset - (poff - base + pg);
2751 2750
2752 2751 ASSERT(rlen < (long)len);
2753 2752
2754 2753 TRACE_5(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP_CK2,
2755 2754 "devmap_roundup:ret dhp=%p level=%x rlen=%lx psiz=%p opfn=%p",
2756 2755 (void *)dhp, level, rlen, pagesize, opfn);
2757 2756 DEBUGF(1, (CE_CONT, "devmap_roundup: dhp %p "
2758 2757 "level %x rlen %lx psize %lx opfn %lx\n",
2759 2758 (void *)dhp, level, rlen, *pagesize, *opfn));
2760 2759
2761 2760 return ((ulong_t)((rlen > 0) ? rlen : 0));
2762 2761 }
2763 2762
2764 2763 /*
2765 2764 * find the dhp that contains addr.
2766 2765 */
2767 2766 static devmap_handle_t *
2768 2767 devmap_find_handle(devmap_handle_t *dhp_head, caddr_t addr)
2769 2768 {
2770 2769 devmap_handle_t *dhp;
2771 2770
2772 2771 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_FIND_HANDLE,
2773 2772 "devmap_find_handle:start");
2774 2773
2775 2774 dhp = dhp_head;
2776 2775 while (dhp) {
2777 2776 if (addr >= dhp->dh_uvaddr &&
2778 2777 addr < (dhp->dh_uvaddr + dhp->dh_len))
2779 2778 return (dhp);
2780 2779 dhp = dhp->dh_next;
2781 2780 }
2782 2781
2783 2782 return ((devmap_handle_t *)NULL);
2784 2783 }
2785 2784
2786 2785 /*
2787 2786 * devmap_unload:
2788 2787 * Marks a segdev segment or pages if offset->offset+len
2789 2788 * is not the entire segment as intercept and unloads the
2790 2789 * pages in the range offset -> offset+len.
2791 2790 */
2792 2791 int
2793 2792 devmap_unload(devmap_cookie_t dhc, offset_t offset, size_t len)
2794 2793 {
2795 2794 register devmap_handle_t *dhp = (devmap_handle_t *)dhc;
2796 2795 caddr_t addr;
2797 2796 ulong_t size;
2798 2797 ssize_t soff;
2799 2798
2800 2799 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_UNLOAD,
2801 2800 "devmap_unload:start dhp=%p offset=%llx len=%lx",
2802 2801 (void *)dhp, offset, len);
2803 2802 DEBUGF(7, (CE_CONT, "devmap_unload: dhp %p offset %llx len %lx\n",
2804 2803 (void *)dhp, offset, len));
2805 2804
2806 2805 soff = (ssize_t)(offset - dhp->dh_uoff);
2807 2806 soff = round_down_p2(soff, PAGESIZE);
2808 2807 if (soff < 0 || soff >= dhp->dh_len)
2809 2808 return (FC_MAKE_ERR(EINVAL));
2810 2809
2811 2810 /*
2812 2811 * Address and size must be page aligned. Len is set to the
2813 2812 * number of bytes in the number of pages that are required to
2814 2813 * support len. Offset is set to the byte offset of the first byte
2815 2814 * of the page that contains offset.
2816 2815 */
2817 2816 len = round_up_p2(len, PAGESIZE);
2818 2817
2819 2818 /*
2820 2819 * If len is == 0, then calculate the size by getting
2821 2820 * the number of bytes from offset to the end of the segment.
2822 2821 */
2823 2822 if (len == 0)
2824 2823 size = dhp->dh_len - soff;
2825 2824 else {
2826 2825 size = len;
2827 2826 if ((soff + size) > dhp->dh_len)
2828 2827 return (FC_MAKE_ERR(EINVAL));
2829 2828 }
2830 2829
2831 2830 /*
2832 2831 * The address is offset bytes from the base address of
2833 2832 * the dhp.
2834 2833 */
2835 2834 addr = (caddr_t)(soff + dhp->dh_uvaddr);
2836 2835
2837 2836 /*
2838 2837 * If large page size was used in hat_devload(),
2839 2838 * the same page size must be used in hat_unload().
2840 2839 */
2841 2840 if (dhp->dh_flags & DEVMAP_FLAG_LARGE) {
2842 2841 hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr,
2843 2842 dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER);
2844 2843 } else {
2845 2844 hat_unload(dhp->dh_seg->s_as->a_hat, addr, size,
2846 2845 HAT_UNLOAD|HAT_UNLOAD_OTHER);
2847 2846 }
2848 2847
2849 2848 return (0);
2850 2849 }
2851 2850
2852 2851 /*
2853 2852 * calculates the optimal page size that will be used for hat_devload().
2854 2853 */
2855 2854 static void
2856 2855 devmap_get_large_pgsize(devmap_handle_t *dhp, size_t len, caddr_t addr,
2857 2856 size_t *llen, caddr_t *laddr)
2858 2857 {
2859 2858 ulong_t off;
2860 2859 ulong_t pfn;
2861 2860 ulong_t pgsize;
2862 2861 uint_t first = 1;
2863 2862
2864 2863 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GET_LARGE_PGSIZE,
2865 2864 "devmap_get_large_pgsize:start");
2866 2865
2867 2866 /*
2868 2867 * RFE - Code only supports large page mappings for devmem
2869 2868 * This code could be changed in future if we want to support
2870 2869 * large page mappings for kernel exported memory.
2871 2870 */
2872 2871 ASSERT(dhp_is_devmem(dhp));
2873 2872 ASSERT(!(dhp->dh_flags & DEVMAP_MAPPING_INVALID));
2874 2873
2875 2874 *llen = 0;
2876 2875 off = (ulong_t)(addr - dhp->dh_uvaddr);
2877 2876 while ((long)len > 0) {
2878 2877 /*
2879 2878 * get the optimal pfn to minimize address translations.
2880 2879 * devmap_roundup() returns residue bytes for next round
2881 2880 * calculations.
2882 2881 */
2883 2882 len = devmap_roundup(dhp, off, len, &pfn, &pgsize);
2884 2883
2885 2884 if (first) {
2886 2885 *laddr = dhp->dh_uvaddr + ptob(pfn - dhp->dh_pfn);
2887 2886 first = 0;
2888 2887 }
2889 2888
2890 2889 *llen += pgsize;
2891 2890 off = ptob(pfn - dhp->dh_pfn) + pgsize;
2892 2891 }
2893 2892 /* Large page mapping len/addr cover more range than original fault */
2894 2893 ASSERT(*llen >= len && *laddr <= addr);
2895 2894 ASSERT((*laddr + *llen) >= (addr + len));
2896 2895 }
2897 2896
2898 2897 /*
2899 2898 * Initialize the devmap_softlock structure.
2900 2899 */
2901 2900 static struct devmap_softlock *
2902 2901 devmap_softlock_init(dev_t dev, ulong_t id)
2903 2902 {
2904 2903 struct devmap_softlock *slock;
2905 2904 struct devmap_softlock *tmp;
2906 2905
2907 2906 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SOFTLOCK_INIT,
2908 2907 "devmap_softlock_init:start");
2909 2908
2910 2909 tmp = kmem_zalloc(sizeof (struct devmap_softlock), KM_SLEEP);
2911 2910 mutex_enter(&devmap_slock);
2912 2911
2913 2912 for (slock = devmap_slist; slock != NULL; slock = slock->next)
2914 2913 if ((slock->dev == dev) && (slock->id == id))
2915 2914 break;
2916 2915
2917 2916 if (slock == NULL) {
2918 2917 slock = tmp;
2919 2918 slock->dev = dev;
2920 2919 slock->id = id;
2921 2920 mutex_init(&slock->lock, NULL, MUTEX_DEFAULT, NULL);
2922 2921 cv_init(&slock->cv, NULL, CV_DEFAULT, NULL);
2923 2922 slock->next = devmap_slist;
2924 2923 devmap_slist = slock;
2925 2924 } else
2926 2925 kmem_free(tmp, sizeof (struct devmap_softlock));
2927 2926
2928 2927 mutex_enter(&slock->lock);
2929 2928 slock->refcnt++;
2930 2929 mutex_exit(&slock->lock);
2931 2930 mutex_exit(&devmap_slock);
2932 2931
2933 2932 return (slock);
2934 2933 }
2935 2934
2936 2935 /*
2937 2936 * Wake up processes that sleep on softlocked.
2938 2937 * Free dh_softlock if refcnt is 0.
2939 2938 */
2940 2939 static void
2941 2940 devmap_softlock_rele(devmap_handle_t *dhp)
2942 2941 {
2943 2942 struct devmap_softlock *slock = dhp->dh_softlock;
2944 2943 struct devmap_softlock *tmp;
2945 2944 struct devmap_softlock *parent;
2946 2945
2947 2946 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SOFTLOCK_RELE,
2948 2947 "devmap_softlock_rele:start");
2949 2948
2950 2949 mutex_enter(&devmap_slock);
2951 2950 mutex_enter(&slock->lock);
2952 2951
2953 2952 ASSERT(slock->refcnt > 0);
2954 2953
2955 2954 slock->refcnt--;
2956 2955
2957 2956 /*
2958 2957 * If no one is using the device, free up the slock data.
2959 2958 */
2960 2959 if (slock->refcnt == 0) {
2961 2960 slock->softlocked = 0;
2962 2961 cv_signal(&slock->cv);
2963 2962
2964 2963 if (devmap_slist == slock)
2965 2964 devmap_slist = slock->next;
2966 2965 else {
2967 2966 parent = devmap_slist;
2968 2967 for (tmp = devmap_slist->next; tmp != NULL;
2969 2968 tmp = tmp->next) {
2970 2969 if (tmp == slock) {
2971 2970 parent->next = tmp->next;
2972 2971 break;
2973 2972 }
2974 2973 parent = tmp;
2975 2974 }
2976 2975 }
2977 2976 mutex_exit(&slock->lock);
2978 2977 mutex_destroy(&slock->lock);
2979 2978 cv_destroy(&slock->cv);
2980 2979 kmem_free(slock, sizeof (struct devmap_softlock));
2981 2980 } else
2982 2981 mutex_exit(&slock->lock);
2983 2982
2984 2983 mutex_exit(&devmap_slock);
2985 2984 }
2986 2985
2987 2986 /*
2988 2987 * Wake up processes that sleep on dh_ctx->locked.
2989 2988 * Free dh_ctx if refcnt is 0.
2990 2989 */
2991 2990 static void
2992 2991 devmap_ctx_rele(devmap_handle_t *dhp)
2993 2992 {
2994 2993 struct devmap_ctx *devctx = dhp->dh_ctx;
2995 2994 struct devmap_ctx *tmp;
2996 2995 struct devmap_ctx *parent;
2997 2996 timeout_id_t tid;
2998 2997
2999 2998 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_CTX_RELE,
3000 2999 "devmap_ctx_rele:start");
3001 3000
3002 3001 mutex_enter(&devmapctx_lock);
3003 3002 mutex_enter(&devctx->lock);
3004 3003
3005 3004 ASSERT(devctx->refcnt > 0);
3006 3005
3007 3006 devctx->refcnt--;
3008 3007
3009 3008 /*
3010 3009 * If no one is using the device, free up the devctx data.
3011 3010 */
3012 3011 if (devctx->refcnt == 0) {
3013 3012 /*
3014 3013 * Untimeout any threads using this mapping as they are about
3015 3014 * to go away.
3016 3015 */
3017 3016 if (devctx->timeout != 0) {
3018 3017 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_CTX_RELE_CK1,
3019 3018 "devmap_ctx_rele:untimeout ctx->timeout");
3020 3019
3021 3020 tid = devctx->timeout;
3022 3021 mutex_exit(&devctx->lock);
3023 3022 (void) untimeout(tid);
3024 3023 mutex_enter(&devctx->lock);
3025 3024 }
3026 3025
3027 3026 devctx->oncpu = 0;
3028 3027 cv_signal(&devctx->cv);
3029 3028
3030 3029 if (devmapctx_list == devctx)
3031 3030 devmapctx_list = devctx->next;
3032 3031 else {
3033 3032 parent = devmapctx_list;
3034 3033 for (tmp = devmapctx_list->next; tmp != NULL;
3035 3034 tmp = tmp->next) {
3036 3035 if (tmp == devctx) {
3037 3036 parent->next = tmp->next;
3038 3037 break;
3039 3038 }
3040 3039 parent = tmp;
3041 3040 }
3042 3041 }
3043 3042 mutex_exit(&devctx->lock);
3044 3043 mutex_destroy(&devctx->lock);
3045 3044 cv_destroy(&devctx->cv);
3046 3045 kmem_free(devctx, sizeof (struct devmap_ctx));
3047 3046 } else
3048 3047 mutex_exit(&devctx->lock);
3049 3048
3050 3049 mutex_exit(&devmapctx_lock);
3051 3050 }
3052 3051
3053 3052 /*
3054 3053 * devmap_load:
3055 3054 * Marks a segdev segment or pages if offset->offset+len
3056 3055 * is not the entire segment as nointercept and faults in
3057 3056 * the pages in the range offset -> offset+len.
3058 3057 */
3059 3058 int
3060 3059 devmap_load(devmap_cookie_t dhc, offset_t offset, size_t len, uint_t type,
3061 3060 uint_t rw)
3062 3061 {
3063 3062 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3064 3063 struct as *asp = dhp->dh_seg->s_as;
3065 3064 caddr_t addr;
3066 3065 ulong_t size;
3067 3066 ssize_t soff; /* offset from the beginning of the segment */
3068 3067 int rc;
3069 3068
3070 3069 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_LOAD,
3071 3070 "devmap_load:start dhp=%p offset=%llx len=%lx",
3072 3071 (void *)dhp, offset, len);
3073 3072
3074 3073 DEBUGF(7, (CE_CONT, "devmap_load: dhp %p offset %llx len %lx\n",
3075 3074 (void *)dhp, offset, len));
3076 3075
3077 3076 /*
3078 3077 * Hat layer only supports devload to process' context for which
3079 3078 * the as lock is held. Verify here and return error if drivers
3080 3079 * inadvertently call devmap_load on a wrong devmap handle.
3081 3080 */
3082 3081 if ((asp != &kas) && !AS_LOCK_HELD(asp, &asp->a_lock))
3083 3082 return (FC_MAKE_ERR(EINVAL));
3084 3083
3085 3084 soff = (ssize_t)(offset - dhp->dh_uoff);
3086 3085 soff = round_down_p2(soff, PAGESIZE);
3087 3086 if (soff < 0 || soff >= dhp->dh_len)
3088 3087 return (FC_MAKE_ERR(EINVAL));
3089 3088
3090 3089 /*
3091 3090 * Address and size must be page aligned. Len is set to the
3092 3091 * number of bytes in the number of pages that are required to
3093 3092 * support len. Offset is set to the byte offset of the first byte
3094 3093 * of the page that contains offset.
3095 3094 */
3096 3095 len = round_up_p2(len, PAGESIZE);
3097 3096
3098 3097 /*
3099 3098 * If len == 0, then calculate the size by getting
3100 3099 * the number of bytes from offset to the end of the segment.
3101 3100 */
3102 3101 if (len == 0)
3103 3102 size = dhp->dh_len - soff;
3104 3103 else {
3105 3104 size = len;
3106 3105 if ((soff + size) > dhp->dh_len)
3107 3106 return (FC_MAKE_ERR(EINVAL));
3108 3107 }
3109 3108
3110 3109 /*
3111 3110 * The address is offset bytes from the base address of
3112 3111 * the segment.
3113 3112 */
3114 3113 addr = (caddr_t)(soff + dhp->dh_uvaddr);
3115 3114
3116 3115 HOLD_DHP_LOCK(dhp);
3117 3116 rc = segdev_faultpages(asp->a_hat,
3118 3117 dhp->dh_seg, addr, size, type, rw, dhp);
3119 3118 RELE_DHP_LOCK(dhp);
3120 3119 return (rc);
3121 3120 }
3122 3121
3123 3122 int
3124 3123 devmap_setup(dev_t dev, offset_t off, struct as *as, caddr_t *addrp,
3125 3124 size_t len, uint_t prot, uint_t maxprot, uint_t flags, struct cred *cred)
3126 3125 {
3127 3126 register devmap_handle_t *dhp;
3128 3127 int (*devmap)(dev_t, devmap_cookie_t, offset_t, size_t,
3129 3128 size_t *, uint_t);
3130 3129 int (*mmap)(dev_t, off_t, int);
3131 3130 struct devmap_callback_ctl *callbackops;
3132 3131 devmap_handle_t *dhp_head = NULL;
3133 3132 devmap_handle_t *dhp_prev = NULL;
3134 3133 devmap_handle_t *dhp_curr;
3135 3134 caddr_t addr;
3136 3135 int map_flag;
3137 3136 int ret;
3138 3137 ulong_t total_len;
3139 3138 size_t map_len;
3140 3139 size_t resid_len = len;
3141 3140 offset_t map_off = off;
3142 3141 struct devmap_softlock *slock = NULL;
3143 3142
3144 3143 #ifdef lint
3145 3144 cred = cred;
3146 3145 #endif
3147 3146
3148 3147 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_SETUP,
3149 3148 "devmap_setup:start off=%llx len=%lx", off, len);
3150 3149 DEBUGF(3, (CE_CONT, "devmap_setup: off %llx len %lx\n",
3151 3150 off, len));
3152 3151
3153 3152 devmap = devopsp[getmajor(dev)]->devo_cb_ops->cb_devmap;
3154 3153 mmap = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap;
3155 3154
3156 3155 /*
3157 3156 * driver must provide devmap(9E) entry point in cb_ops to use the
3158 3157 * devmap framework.
3159 3158 */
3160 3159 if (devmap == NULL || devmap == nulldev || devmap == nodev)
3161 3160 return (EINVAL);
3162 3161
3163 3162 /*
3164 3163 * To protect from an inadvertent entry because the devmap entry point
3165 3164 * is not NULL, return error if D_DEVMAP bit is not set in cb_flag and
3166 3165 * mmap is NULL.
3167 3166 */
3168 3167 map_flag = devopsp[getmajor(dev)]->devo_cb_ops->cb_flag;
3169 3168 if ((map_flag & D_DEVMAP) == 0 && (mmap == NULL || mmap == nulldev))
3170 3169 return (EINVAL);
3171 3170
3172 3171 /*
3173 3172 * devmap allows mmap(2) to map multiple registers.
3174 3173 * one devmap_handle is created for each register mapped.
3175 3174 */
3176 3175 for (total_len = 0; total_len < len; total_len += map_len) {
3177 3176 dhp = kmem_zalloc(sizeof (devmap_handle_t), KM_SLEEP);
3178 3177
3179 3178 if (dhp_prev != NULL)
3180 3179 dhp_prev->dh_next = dhp;
3181 3180 else
3182 3181 dhp_head = dhp;
3183 3182 dhp_prev = dhp;
3184 3183
3185 3184 dhp->dh_prot = prot;
3186 3185 dhp->dh_orig_maxprot = dhp->dh_maxprot = maxprot;
3187 3186 dhp->dh_dev = dev;
3188 3187 dhp->dh_timeout_length = CTX_TIMEOUT_VALUE;
3189 3188 dhp->dh_uoff = map_off;
3190 3189
3191 3190 /*
3192 3191 * Get mapping specific info from
3193 3192 * the driver, such as rnumber, roff, len, callbackops,
3194 3193 * accattrp and, if the mapping is for kernel memory,
3195 3194 * ddi_umem_cookie.
3196 3195 */
3197 3196 if ((ret = cdev_devmap(dev, dhp, map_off,
3198 3197 resid_len, &map_len, get_udatamodel())) != 0) {
3199 3198 free_devmap_handle(dhp_head);
3200 3199 return (ENXIO);
3201 3200 }
3202 3201
3203 3202 if (map_len & PAGEOFFSET) {
3204 3203 free_devmap_handle(dhp_head);
3205 3204 return (EINVAL);
3206 3205 }
3207 3206
3208 3207 callbackops = &dhp->dh_callbackops;
3209 3208
3210 3209 if ((callbackops->devmap_access == NULL) ||
3211 3210 (callbackops->devmap_access == nulldev) ||
3212 3211 (callbackops->devmap_access == nodev)) {
3213 3212 /*
3214 3213 * Normally devmap does not support MAP_PRIVATE unless
3215 3214 * the drivers provide a valid devmap_access routine.
3216 3215 */
3217 3216 if ((flags & MAP_PRIVATE) != 0) {
3218 3217 free_devmap_handle(dhp_head);
3219 3218 return (EINVAL);
3220 3219 }
3221 3220 } else {
3222 3221 /*
3223 3222 * Initialize dhp_softlock and dh_ctx if the drivers
3224 3223 * provide devmap_access.
3225 3224 */
3226 3225 dhp->dh_softlock = devmap_softlock_init(dev,
3227 3226 (ulong_t)callbackops->devmap_access);
3228 3227 dhp->dh_ctx = devmap_ctxinit(dev,
3229 3228 (ulong_t)callbackops->devmap_access);
3230 3229
3231 3230 /*
3232 3231 * segdev_fault can only work when all
3233 3232 * dh_softlock in a multi-dhp mapping
3234 3233 * are same. see comments in segdev_fault
3235 3234 * This code keeps track of the first
3236 3235 * dh_softlock allocated in slock and
3237 3236 * compares all later allocations and if
3238 3237 * not similar, returns an error.
3239 3238 */
3240 3239 if (slock == NULL)
3241 3240 slock = dhp->dh_softlock;
3242 3241 if (slock != dhp->dh_softlock) {
3243 3242 free_devmap_handle(dhp_head);
3244 3243 return (ENOTSUP);
3245 3244 }
3246 3245 }
3247 3246
3248 3247 map_off += map_len;
3249 3248 resid_len -= map_len;
3250 3249 }
3251 3250
3252 3251 /*
3253 3252 * get the user virtual address and establish the mapping between
3254 3253 * uvaddr and device physical address.
3255 3254 */
3256 3255 if ((ret = devmap_device(dhp_head, as, addrp, off, len, flags))
3257 3256 != 0) {
3258 3257 /*
3259 3258 * free devmap handles if error during the mapping.
3260 3259 */
3261 3260 free_devmap_handle(dhp_head);
3262 3261
3263 3262 return (ret);
3264 3263 }
3265 3264
3266 3265 /*
3267 3266 * call the driver's devmap_map callback to do more after the mapping,
3268 3267 * such as to allocate driver private data for context management.
3269 3268 */
3270 3269 dhp = dhp_head;
3271 3270 map_off = off;
3272 3271 addr = *addrp;
3273 3272 while (dhp != NULL) {
3274 3273 callbackops = &dhp->dh_callbackops;
3275 3274 dhp->dh_uvaddr = addr;
3276 3275 dhp_curr = dhp;
3277 3276 if (callbackops->devmap_map != NULL) {
3278 3277 ret = (*callbackops->devmap_map)((devmap_cookie_t)dhp,
3279 3278 dev, flags, map_off,
3280 3279 dhp->dh_len, &dhp->dh_pvtp);
3281 3280 if (ret != 0) {
3282 3281 struct segdev_data *sdp;
3283 3282
3284 3283 /*
3285 3284 * call driver's devmap_unmap entry point
3286 3285 * to free driver resources.
3287 3286 */
3288 3287 dhp = dhp_head;
3289 3288 map_off = off;
3290 3289 while (dhp != dhp_curr) {
3291 3290 callbackops = &dhp->dh_callbackops;
3292 3291 if (callbackops->devmap_unmap != NULL) {
3293 3292 (*callbackops->devmap_unmap)(
3294 3293 dhp, dhp->dh_pvtp,
3295 3294 map_off, dhp->dh_len,
3296 3295 NULL, NULL, NULL, NULL);
3297 3296 }
3298 3297 map_off += dhp->dh_len;
3299 3298 dhp = dhp->dh_next;
3300 3299 }
3301 3300 sdp = dhp_head->dh_seg->s_data;
3302 3301 sdp->devmap_data = NULL;
3303 3302 free_devmap_handle(dhp_head);
3304 3303 return (ENXIO);
3305 3304 }
3306 3305 }
3307 3306 map_off += dhp->dh_len;
3308 3307 addr += dhp->dh_len;
3309 3308 dhp = dhp->dh_next;
3310 3309 }
3311 3310
3312 3311 return (0);
3313 3312 }
3314 3313
3315 3314 int
3316 3315 ddi_devmap_segmap(dev_t dev, off_t off, ddi_as_handle_t as, caddr_t *addrp,
3317 3316 off_t len, uint_t prot, uint_t maxprot, uint_t flags, struct cred *cred)
3318 3317 {
3319 3318 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGMAP,
3320 3319 "devmap_segmap:start");
3321 3320 return (devmap_setup(dev, (offset_t)off, (struct as *)as, addrp,
3322 3321 (size_t)len, prot, maxprot, flags, cred));
3323 3322 }
3324 3323
3325 3324 /*
3326 3325 * Called from devmap_devmem_setup/remap to see if can use large pages for
3327 3326 * this device mapping.
3328 3327 * Also calculate the max. page size for this mapping.
3329 3328 * this page size will be used in fault routine for
3330 3329 * optimal page size calculations.
3331 3330 */
3332 3331 static void
3333 3332 devmap_devmem_large_page_setup(devmap_handle_t *dhp)
3334 3333 {
3335 3334 ASSERT(dhp_is_devmem(dhp));
3336 3335 dhp->dh_mmulevel = 0;
3337 3336
3338 3337 /*
3339 3338 * use large page size only if:
3340 3339 * 1. device memory.
3341 3340 * 2. mmu supports multiple page sizes,
3342 3341 * 3. Driver did not disallow it
3343 3342 * 4. dhp length is at least as big as the large pagesize
3344 3343 * 5. the uvaddr and pfn are large pagesize aligned
3345 3344 */
3346 3345 if (page_num_pagesizes() > 1 &&
3347 3346 !(dhp->dh_flags & (DEVMAP_USE_PAGESIZE | DEVMAP_MAPPING_INVALID))) {
3348 3347 ulong_t base;
3349 3348 int level;
3350 3349
3351 3350 base = (ulong_t)ptob(dhp->dh_pfn);
3352 3351 for (level = 1; level < page_num_pagesizes(); level++) {
3353 3352 size_t pgsize = page_get_pagesize(level);
3354 3353 if ((dhp->dh_len < pgsize) ||
3355 3354 (!VA_PA_PGSIZE_ALIGNED((uintptr_t)dhp->dh_uvaddr,
3356 3355 base, pgsize))) {
3357 3356 break;
3358 3357 }
3359 3358 }
3360 3359 dhp->dh_mmulevel = level - 1;
3361 3360 }
3362 3361 if (dhp->dh_mmulevel > 0) {
3363 3362 dhp->dh_flags |= DEVMAP_FLAG_LARGE;
3364 3363 } else {
3365 3364 dhp->dh_flags &= ~DEVMAP_FLAG_LARGE;
3366 3365 }
3367 3366 }
3368 3367
3369 3368 /*
3370 3369 * Called by driver devmap routine to pass device specific info to
3371 3370 * the framework. used for device memory mapping only.
3372 3371 */
3373 3372 int
3374 3373 devmap_devmem_setup(devmap_cookie_t dhc, dev_info_t *dip,
3375 3374 struct devmap_callback_ctl *callbackops, uint_t rnumber, offset_t roff,
3376 3375 size_t len, uint_t maxprot, uint_t flags, ddi_device_acc_attr_t *accattrp)
3377 3376 {
3378 3377 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3379 3378 ddi_acc_handle_t handle;
3380 3379 ddi_map_req_t mr;
3381 3380 ddi_acc_hdl_t *hp;
3382 3381 int err;
3383 3382
3384 3383 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVMEM_SETUP,
3385 3384 "devmap_devmem_setup:start dhp=%p offset=%llx rnum=%d len=%lx",
3386 3385 (void *)dhp, roff, rnumber, (uint_t)len);
3387 3386 DEBUGF(2, (CE_CONT, "devmap_devmem_setup: dhp %p offset %llx "
3388 3387 "rnum %d len %lx\n", (void *)dhp, roff, rnumber, len));
3389 3388
3390 3389 /*
3391 3390 * First to check if this function has been called for this dhp.
3392 3391 */
3393 3392 if (dhp->dh_flags & DEVMAP_SETUP_DONE)
3394 3393 return (DDI_FAILURE);
3395 3394
3396 3395 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3397 3396 return (DDI_FAILURE);
3398 3397
3399 3398 if (flags & DEVMAP_MAPPING_INVALID) {
3400 3399 /*
3401 3400 * Don't go up the tree to get pfn if the driver specifies
3402 3401 * DEVMAP_MAPPING_INVALID in flags.
3403 3402 *
3404 3403 * If DEVMAP_MAPPING_INVALID is specified, we have to grant
3405 3404 * remap permission.
3406 3405 */
3407 3406 if (!(flags & DEVMAP_ALLOW_REMAP)) {
3408 3407 return (DDI_FAILURE);
3409 3408 }
3410 3409 dhp->dh_pfn = PFN_INVALID;
3411 3410 } else {
3412 3411 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
3413 3412 if (handle == NULL)
3414 3413 return (DDI_FAILURE);
3415 3414
3416 3415 hp = impl_acc_hdl_get(handle);
3417 3416 hp->ah_vers = VERS_ACCHDL;
3418 3417 hp->ah_dip = dip;
3419 3418 hp->ah_rnumber = rnumber;
3420 3419 hp->ah_offset = roff;
3421 3420 hp->ah_len = len;
3422 3421 if (accattrp != NULL)
3423 3422 hp->ah_acc = *accattrp;
3424 3423
3425 3424 mr.map_op = DDI_MO_MAP_LOCKED;
3426 3425 mr.map_type = DDI_MT_RNUMBER;
3427 3426 mr.map_obj.rnumber = rnumber;
3428 3427 mr.map_prot = maxprot & dhp->dh_orig_maxprot;
3429 3428 mr.map_flags = DDI_MF_DEVICE_MAPPING;
3430 3429 mr.map_handlep = hp;
3431 3430 mr.map_vers = DDI_MAP_VERSION;
3432 3431
3433 3432 /*
3434 3433 * up the device tree to get pfn.
3435 3434 * The rootnex_map_regspec() routine in nexus drivers has been
3436 3435 * modified to return pfn if map_flags is DDI_MF_DEVICE_MAPPING.
3437 3436 */
3438 3437 err = ddi_map(dip, &mr, roff, len, (caddr_t *)&dhp->dh_pfn);
3439 3438 dhp->dh_hat_attr = hp->ah_hat_flags;
3440 3439 impl_acc_hdl_free(handle);
3441 3440
3442 3441 if (err)
3443 3442 return (DDI_FAILURE);
3444 3443 }
3445 3444 /* Should not be using devmem setup for memory pages */
3446 3445 ASSERT(!pf_is_memory(dhp->dh_pfn));
3447 3446
3448 3447 /* Only some of the flags bits are settable by the driver */
3449 3448 dhp->dh_flags |= (flags & DEVMAP_SETUP_FLAGS);
3450 3449 dhp->dh_len = ptob(btopr(len));
3451 3450
3452 3451 dhp->dh_cookie = DEVMAP_DEVMEM_COOKIE;
3453 3452 dhp->dh_roff = ptob(btop(roff));
3454 3453
3455 3454 /* setup the dh_mmulevel and DEVMAP_FLAG_LARGE */
3456 3455 devmap_devmem_large_page_setup(dhp);
3457 3456 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3458 3457 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3459 3458
3460 3459
3461 3460 if (callbackops != NULL) {
3462 3461 bcopy(callbackops, &dhp->dh_callbackops,
3463 3462 sizeof (struct devmap_callback_ctl));
3464 3463 }
3465 3464
3466 3465 /*
3467 3466 * Initialize dh_lock if we want to do remap.
3468 3467 */
3469 3468 if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) {
3470 3469 mutex_init(&dhp->dh_lock, NULL, MUTEX_DEFAULT, NULL);
3471 3470 dhp->dh_flags |= DEVMAP_LOCK_INITED;
3472 3471 }
3473 3472
3474 3473 dhp->dh_flags |= DEVMAP_SETUP_DONE;
3475 3474
3476 3475 return (DDI_SUCCESS);
3477 3476 }
3478 3477
3479 3478 int
3480 3479 devmap_devmem_remap(devmap_cookie_t dhc, dev_info_t *dip,
3481 3480 uint_t rnumber, offset_t roff, size_t len, uint_t maxprot,
3482 3481 uint_t flags, ddi_device_acc_attr_t *accattrp)
3483 3482 {
3484 3483 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3485 3484 ddi_acc_handle_t handle;
3486 3485 ddi_map_req_t mr;
3487 3486 ddi_acc_hdl_t *hp;
3488 3487 pfn_t pfn;
3489 3488 uint_t hat_flags;
3490 3489 int err;
3491 3490
3492 3491 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVMEM_REMAP,
3493 3492 "devmap_devmem_setup:start dhp=%p offset=%llx rnum=%d len=%lx",
3494 3493 (void *)dhp, roff, rnumber, (uint_t)len);
3495 3494 DEBUGF(2, (CE_CONT, "devmap_devmem_remap: dhp %p offset %llx "
3496 3495 "rnum %d len %lx\n", (void *)dhp, roff, rnumber, len));
3497 3496
3498 3497 /*
3499 3498 * Return failure if setup has not been done or no remap permission
3500 3499 * has been granted during the setup.
3501 3500 */
3502 3501 if ((dhp->dh_flags & DEVMAP_SETUP_DONE) == 0 ||
3503 3502 (dhp->dh_flags & DEVMAP_ALLOW_REMAP) == 0)
3504 3503 return (DDI_FAILURE);
3505 3504
3506 3505 /* Only DEVMAP_MAPPING_INVALID flag supported for remap */
3507 3506 if ((flags != 0) && (flags != DEVMAP_MAPPING_INVALID))
3508 3507 return (DDI_FAILURE);
3509 3508
3510 3509 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3511 3510 return (DDI_FAILURE);
3512 3511
3513 3512 if (!(flags & DEVMAP_MAPPING_INVALID)) {
3514 3513 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
3515 3514 if (handle == NULL)
3516 3515 return (DDI_FAILURE);
3517 3516 }
3518 3517
3519 3518 HOLD_DHP_LOCK(dhp);
3520 3519
3521 3520 /*
3522 3521 * Unload the old mapping, so next fault will setup the new mappings
3523 3522 * Do this while holding the dhp lock so other faults dont reestablish
3524 3523 * the mappings
3525 3524 */
3526 3525 hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr,
3527 3526 dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER);
3528 3527
3529 3528 if (flags & DEVMAP_MAPPING_INVALID) {
3530 3529 dhp->dh_flags |= DEVMAP_MAPPING_INVALID;
3531 3530 dhp->dh_pfn = PFN_INVALID;
3532 3531 } else {
3533 3532 /* clear any prior DEVMAP_MAPPING_INVALID flag */
3534 3533 dhp->dh_flags &= ~DEVMAP_MAPPING_INVALID;
3535 3534 hp = impl_acc_hdl_get(handle);
3536 3535 hp->ah_vers = VERS_ACCHDL;
3537 3536 hp->ah_dip = dip;
3538 3537 hp->ah_rnumber = rnumber;
3539 3538 hp->ah_offset = roff;
3540 3539 hp->ah_len = len;
3541 3540 if (accattrp != NULL)
3542 3541 hp->ah_acc = *accattrp;
3543 3542
3544 3543 mr.map_op = DDI_MO_MAP_LOCKED;
3545 3544 mr.map_type = DDI_MT_RNUMBER;
3546 3545 mr.map_obj.rnumber = rnumber;
3547 3546 mr.map_prot = maxprot & dhp->dh_orig_maxprot;
3548 3547 mr.map_flags = DDI_MF_DEVICE_MAPPING;
3549 3548 mr.map_handlep = hp;
3550 3549 mr.map_vers = DDI_MAP_VERSION;
3551 3550
3552 3551 /*
3553 3552 * up the device tree to get pfn.
3554 3553 * The rootnex_map_regspec() routine in nexus drivers has been
3555 3554 * modified to return pfn if map_flags is DDI_MF_DEVICE_MAPPING.
3556 3555 */
3557 3556 err = ddi_map(dip, &mr, roff, len, (caddr_t *)&pfn);
3558 3557 hat_flags = hp->ah_hat_flags;
3559 3558 impl_acc_hdl_free(handle);
3560 3559 if (err) {
3561 3560 RELE_DHP_LOCK(dhp);
3562 3561 return (DDI_FAILURE);
3563 3562 }
3564 3563 /*
3565 3564 * Store result of ddi_map first in local variables, as we do
3566 3565 * not want to overwrite the existing dhp with wrong data.
3567 3566 */
3568 3567 dhp->dh_pfn = pfn;
3569 3568 dhp->dh_hat_attr = hat_flags;
3570 3569 }
3571 3570
3572 3571 /* clear the large page size flag */
3573 3572 dhp->dh_flags &= ~DEVMAP_FLAG_LARGE;
3574 3573
3575 3574 dhp->dh_cookie = DEVMAP_DEVMEM_COOKIE;
3576 3575 dhp->dh_roff = ptob(btop(roff));
3577 3576
3578 3577 /* setup the dh_mmulevel and DEVMAP_FLAG_LARGE */
3579 3578 devmap_devmem_large_page_setup(dhp);
3580 3579 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3581 3580 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3582 3581
3583 3582 RELE_DHP_LOCK(dhp);
3584 3583 return (DDI_SUCCESS);
3585 3584 }
3586 3585
3587 3586 /*
3588 3587 * called by driver devmap routine to pass kernel virtual address mapping
3589 3588 * info to the framework. used only for kernel memory
3590 3589 * allocated from ddi_umem_alloc().
3591 3590 */
3592 3591 int
3593 3592 devmap_umem_setup(devmap_cookie_t dhc, dev_info_t *dip,
3594 3593 struct devmap_callback_ctl *callbackops, ddi_umem_cookie_t cookie,
3595 3594 offset_t off, size_t len, uint_t maxprot, uint_t flags,
3596 3595 ddi_device_acc_attr_t *accattrp)
3597 3596 {
3598 3597 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3599 3598 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)cookie;
3600 3599
3601 3600 #ifdef lint
3602 3601 dip = dip;
3603 3602 #endif
3604 3603
3605 3604 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_SETUP,
3606 3605 "devmap_umem_setup:start dhp=%p offset=%llx cookie=%p len=%lx",
3607 3606 (void *)dhp, off, cookie, len);
3608 3607 DEBUGF(2, (CE_CONT, "devmap_umem_setup: dhp %p offset %llx "
3609 3608 "cookie %p len %lx\n", (void *)dhp, off, (void *)cookie, len));
3610 3609
3611 3610 if (cookie == NULL)
3612 3611 return (DDI_FAILURE);
3613 3612
3614 3613 /* For UMEM_TRASH, this restriction is not needed */
3615 3614 if ((off + len) > cp->size)
3616 3615 return (DDI_FAILURE);
3617 3616
3618 3617 /* check if the cache attributes are supported */
3619 3618 if (i_ddi_check_cache_attr(flags) == B_FALSE)
3620 3619 return (DDI_FAILURE);
3621 3620
3622 3621 /*
3623 3622 * First to check if this function has been called for this dhp.
3624 3623 */
3625 3624 if (dhp->dh_flags & DEVMAP_SETUP_DONE)
3626 3625 return (DDI_FAILURE);
3627 3626
3628 3627 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3629 3628 return (DDI_FAILURE);
3630 3629
3631 3630 if (flags & DEVMAP_MAPPING_INVALID) {
3632 3631 /*
3633 3632 * If DEVMAP_MAPPING_INVALID is specified, we have to grant
3634 3633 * remap permission.
3635 3634 */
3636 3635 if (!(flags & DEVMAP_ALLOW_REMAP)) {
3637 3636 return (DDI_FAILURE);
3638 3637 }
3639 3638 } else {
3640 3639 dhp->dh_cookie = cookie;
3641 3640 dhp->dh_roff = ptob(btop(off));
3642 3641 dhp->dh_cvaddr = cp->cvaddr + dhp->dh_roff;
3643 3642 /* set HAT cache attributes */
3644 3643 i_ddi_cacheattr_to_hatacc(flags, &dhp->dh_hat_attr);
3645 3644 /* set HAT endianess attributes */
3646 3645 i_ddi_devacc_to_hatacc(accattrp, &dhp->dh_hat_attr);
3647 3646 }
3648 3647
3649 3648 /*
3650 3649 * The default is _not_ to pass HAT_LOAD_NOCONSIST to hat_devload();
3651 3650 * we pass HAT_LOAD_NOCONSIST _only_ in cases where hat tries to
3652 3651 * create consistent mappings but our intention was to create
3653 3652 * non-consistent mappings.
3654 3653 *
3655 3654 * DEVMEM: hat figures it out it's DEVMEM and creates non-consistent
3656 3655 * mappings.
3657 3656 *
3658 3657 * kernel exported memory: hat figures it out it's memory and always
3659 3658 * creates consistent mappings.
3660 3659 *
3661 3660 * /dev/mem: non-consistent mappings. See comments in common/io/mem.c
3662 3661 *
3663 3662 * /dev/kmem: consistent mappings are created unless they are
3664 3663 * MAP_FIXED. We _explicitly_ tell hat to create non-consistent
3665 3664 * mappings by passing HAT_LOAD_NOCONSIST in case of MAP_FIXED
3666 3665 * mappings of /dev/kmem. See common/io/mem.c
3667 3666 */
3668 3667
3669 3668 /* Only some of the flags bits are settable by the driver */
3670 3669 dhp->dh_flags |= (flags & DEVMAP_SETUP_FLAGS);
3671 3670
3672 3671 dhp->dh_len = ptob(btopr(len));
3673 3672 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3674 3673 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3675 3674
3676 3675 if (callbackops != NULL) {
3677 3676 bcopy(callbackops, &dhp->dh_callbackops,
3678 3677 sizeof (struct devmap_callback_ctl));
3679 3678 }
3680 3679 /*
3681 3680 * Initialize dh_lock if we want to do remap.
3682 3681 */
3683 3682 if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) {
3684 3683 mutex_init(&dhp->dh_lock, NULL, MUTEX_DEFAULT, NULL);
3685 3684 dhp->dh_flags |= DEVMAP_LOCK_INITED;
3686 3685 }
3687 3686
3688 3687 dhp->dh_flags |= DEVMAP_SETUP_DONE;
3689 3688
3690 3689 return (DDI_SUCCESS);
3691 3690 }
3692 3691
3693 3692 int
3694 3693 devmap_umem_remap(devmap_cookie_t dhc, dev_info_t *dip,
3695 3694 ddi_umem_cookie_t cookie, offset_t off, size_t len, uint_t maxprot,
3696 3695 uint_t flags, ddi_device_acc_attr_t *accattrp)
3697 3696 {
3698 3697 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3699 3698 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)cookie;
3700 3699
3701 3700 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_REMAP,
3702 3701 "devmap_umem_remap:start dhp=%p offset=%llx cookie=%p len=%lx",
3703 3702 (void *)dhp, off, cookie, len);
3704 3703 DEBUGF(2, (CE_CONT, "devmap_umem_remap: dhp %p offset %llx "
3705 3704 "cookie %p len %lx\n", (void *)dhp, off, (void *)cookie, len));
3706 3705
3707 3706 #ifdef lint
3708 3707 dip = dip;
3709 3708 accattrp = accattrp;
3710 3709 #endif
3711 3710 /*
3712 3711 * Reture failure if setup has not been done or no remap permission
3713 3712 * has been granted during the setup.
3714 3713 */
3715 3714 if ((dhp->dh_flags & DEVMAP_SETUP_DONE) == 0 ||
3716 3715 (dhp->dh_flags & DEVMAP_ALLOW_REMAP) == 0)
3717 3716 return (DDI_FAILURE);
3718 3717
3719 3718 /* No flags supported for remap yet */
3720 3719 if (flags != 0)
3721 3720 return (DDI_FAILURE);
3722 3721
3723 3722 /* check if the cache attributes are supported */
3724 3723 if (i_ddi_check_cache_attr(flags) == B_FALSE)
3725 3724 return (DDI_FAILURE);
3726 3725
3727 3726 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3728 3727 return (DDI_FAILURE);
3729 3728
3730 3729 /* For UMEM_TRASH, this restriction is not needed */
3731 3730 if ((off + len) > cp->size)
3732 3731 return (DDI_FAILURE);
3733 3732
3734 3733 HOLD_DHP_LOCK(dhp);
3735 3734 /*
3736 3735 * Unload the old mapping, so next fault will setup the new mappings
3737 3736 * Do this while holding the dhp lock so other faults dont reestablish
3738 3737 * the mappings
3739 3738 */
3740 3739 hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr,
3741 3740 dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER);
3742 3741
3743 3742 dhp->dh_cookie = cookie;
3744 3743 dhp->dh_roff = ptob(btop(off));
3745 3744 dhp->dh_cvaddr = cp->cvaddr + dhp->dh_roff;
3746 3745 /* set HAT cache attributes */
3747 3746 i_ddi_cacheattr_to_hatacc(flags, &dhp->dh_hat_attr);
3748 3747 /* set HAT endianess attributes */
3749 3748 i_ddi_devacc_to_hatacc(accattrp, &dhp->dh_hat_attr);
3750 3749
3751 3750 /* clear the large page size flag */
3752 3751 dhp->dh_flags &= ~DEVMAP_FLAG_LARGE;
3753 3752
3754 3753 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3755 3754 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3756 3755 RELE_DHP_LOCK(dhp);
3757 3756 return (DDI_SUCCESS);
3758 3757 }
3759 3758
3760 3759 /*
3761 3760 * to set timeout value for the driver's context management callback, e.g.
3762 3761 * devmap_access().
3763 3762 */
3764 3763 void
3765 3764 devmap_set_ctx_timeout(devmap_cookie_t dhc, clock_t ticks)
3766 3765 {
3767 3766 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3768 3767
3769 3768 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_SET_CTX_TIMEOUT,
3770 3769 "devmap_set_ctx_timeout:start dhp=%p ticks=%x",
3771 3770 (void *)dhp, ticks);
3772 3771 dhp->dh_timeout_length = ticks;
3773 3772 }
3774 3773
3775 3774 int
3776 3775 devmap_default_access(devmap_cookie_t dhp, void *pvtp, offset_t off,
3777 3776 size_t len, uint_t type, uint_t rw)
3778 3777 {
3779 3778 #ifdef lint
3780 3779 pvtp = pvtp;
3781 3780 #endif
3782 3781
3783 3782 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DEFAULT_ACCESS,
3784 3783 "devmap_default_access:start");
3785 3784 return (devmap_load(dhp, off, len, type, rw));
3786 3785 }
3787 3786
3788 3787 /*
3789 3788 * segkmem_alloc() wrapper to allocate memory which is both
3790 3789 * non-relocatable (for DR) and sharelocked, since the rest
3791 3790 * of this segment driver requires it.
3792 3791 */
3793 3792 static void *
3794 3793 devmap_alloc_pages(vmem_t *vmp, size_t size, int vmflag)
3795 3794 {
3796 3795 ASSERT(vmp != NULL);
3797 3796 ASSERT(kvseg.s_base != NULL);
3798 3797 vmflag |= (VM_NORELOC | SEGKMEM_SHARELOCKED);
3799 3798 return (segkmem_alloc(vmp, size, vmflag));
3800 3799 }
3801 3800
3802 3801 /*
3803 3802 * This is where things are a bit incestuous with seg_kmem: unlike
3804 3803 * seg_kp, seg_kmem does not keep its pages long-term sharelocked, so
3805 3804 * we need to do a bit of a dance around that to prevent duplication of
3806 3805 * code until we decide to bite the bullet and implement a new kernel
3807 3806 * segment for driver-allocated memory that is exported to user space.
3808 3807 */
3809 3808 static void
3810 3809 devmap_free_pages(vmem_t *vmp, void *inaddr, size_t size)
3811 3810 {
3812 3811 page_t *pp;
3813 3812 caddr_t addr = inaddr;
3814 3813 caddr_t eaddr;
3815 3814 pgcnt_t npages = btopr(size);
3816 3815
3817 3816 ASSERT(vmp != NULL);
3818 3817 ASSERT(kvseg.s_base != NULL);
3819 3818 ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
3820 3819
3821 3820 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
3822 3821
3823 3822 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
3824 3823 /*
3825 3824 * Use page_find() instead of page_lookup() to find the page
3826 3825 * since we know that it is hashed and has a shared lock.
3827 3826 */
3828 3827 pp = page_find(&kvp, (u_offset_t)(uintptr_t)addr);
3829 3828
3830 3829 if (pp == NULL)
3831 3830 panic("devmap_free_pages: page not found");
3832 3831 if (!page_tryupgrade(pp)) {
3833 3832 page_unlock(pp);
3834 3833 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr,
3835 3834 SE_EXCL);
3836 3835 if (pp == NULL)
3837 3836 panic("devmap_free_pages: page already freed");
3838 3837 }
3839 3838 /* Clear p_lckcnt so page_destroy() doesn't update availrmem */
3840 3839 pp->p_lckcnt = 0;
3841 3840 page_destroy(pp, 0);
3842 3841 }
3843 3842 page_unresv(npages);
3844 3843
3845 3844 if (vmp != NULL)
3846 3845 vmem_free(vmp, inaddr, size);
3847 3846 }
3848 3847
3849 3848 /*
3850 3849 * devmap_umem_alloc_np() replaces kmem_zalloc() as the method for
3851 3850 * allocating non-pageable kmem in response to a ddi_umem_alloc()
3852 3851 * default request. For now we allocate our own pages and we keep
3853 3852 * them long-term sharelocked, since: A) the fault routines expect the
3854 3853 * memory to already be locked; B) pageable umem is already long-term
3855 3854 * locked; C) it's a lot of work to make it otherwise, particularly
3856 3855 * since the nexus layer expects the pages to never fault. An RFE is to
3857 3856 * not keep the pages long-term locked, but instead to be able to
3858 3857 * take faults on them and simply look them up in kvp in case we
3859 3858 * fault on them. Even then, we must take care not to let pageout
3860 3859 * steal them from us since the data must remain resident; if we
3861 3860 * do this we must come up with some way to pin the pages to prevent
3862 3861 * faults while a driver is doing DMA to/from them.
3863 3862 */
3864 3863 static void *
3865 3864 devmap_umem_alloc_np(size_t size, size_t flags)
3866 3865 {
3867 3866 void *buf;
3868 3867 int vmflags = (flags & DDI_UMEM_NOSLEEP)? VM_NOSLEEP : VM_SLEEP;
3869 3868
3870 3869 buf = vmem_alloc(umem_np_arena, size, vmflags);
3871 3870 if (buf != NULL)
3872 3871 bzero(buf, size);
3873 3872 return (buf);
3874 3873 }
3875 3874
3876 3875 static void
3877 3876 devmap_umem_free_np(void *addr, size_t size)
3878 3877 {
3879 3878 vmem_free(umem_np_arena, addr, size);
3880 3879 }
3881 3880
3882 3881 /*
3883 3882 * allocate page aligned kernel memory for exporting to user land.
3884 3883 * The devmap framework will use the cookie allocated by ddi_umem_alloc()
3885 3884 * to find a user virtual address that is in same color as the address
3886 3885 * allocated here.
3887 3886 */
3888 3887 void *
3889 3888 ddi_umem_alloc(size_t size, int flags, ddi_umem_cookie_t *cookie)
3890 3889 {
3891 3890 register size_t len = ptob(btopr(size));
3892 3891 void *buf = NULL;
3893 3892 struct ddi_umem_cookie *cp;
3894 3893 int iflags = 0;
3895 3894
3896 3895 *cookie = NULL;
3897 3896
3898 3897 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_ALLOC,
3899 3898 "devmap_umem_alloc:start");
3900 3899 if (len == 0)
3901 3900 return ((void *)NULL);
3902 3901
3903 3902 /*
3904 3903 * allocate cookie
3905 3904 */
3906 3905 if ((cp = kmem_zalloc(sizeof (struct ddi_umem_cookie),
3907 3906 flags & DDI_UMEM_NOSLEEP ? KM_NOSLEEP : KM_SLEEP)) == NULL) {
3908 3907 ASSERT(flags & DDI_UMEM_NOSLEEP);
3909 3908 return ((void *)NULL);
3910 3909 }
3911 3910
3912 3911 if (flags & DDI_UMEM_PAGEABLE) {
3913 3912 /* Only one of the flags is allowed */
3914 3913 ASSERT(!(flags & DDI_UMEM_TRASH));
3915 3914 /* initialize resource with 0 */
3916 3915 iflags = KPD_ZERO;
3917 3916
3918 3917 /*
3919 3918 * to allocate unlocked pageable memory, use segkp_get() to
3920 3919 * create a segkp segment. Since segkp can only service kas,
3921 3920 * other segment drivers such as segdev have to do
3922 3921 * as_fault(segkp, SOFTLOCK) in its fault routine,
3923 3922 */
3924 3923 if (flags & DDI_UMEM_NOSLEEP)
3925 3924 iflags |= KPD_NOWAIT;
3926 3925
3927 3926 if ((buf = segkp_get(segkp, len, iflags)) == NULL) {
3928 3927 kmem_free(cp, sizeof (struct ddi_umem_cookie));
3929 3928 return ((void *)NULL);
3930 3929 }
3931 3930 cp->type = KMEM_PAGEABLE;
3932 3931 mutex_init(&cp->lock, NULL, MUTEX_DEFAULT, NULL);
3933 3932 cp->locked = 0;
3934 3933 } else if (flags & DDI_UMEM_TRASH) {
3935 3934 /* Only one of the flags is allowed */
3936 3935 ASSERT(!(flags & DDI_UMEM_PAGEABLE));
3937 3936 cp->type = UMEM_TRASH;
3938 3937 buf = NULL;
3939 3938 } else {
3940 3939 if ((buf = devmap_umem_alloc_np(len, flags)) == NULL) {
3941 3940 kmem_free(cp, sizeof (struct ddi_umem_cookie));
3942 3941 return ((void *)NULL);
3943 3942 }
3944 3943
3945 3944 cp->type = KMEM_NON_PAGEABLE;
3946 3945 }
3947 3946
3948 3947 /*
3949 3948 * need to save size here. size will be used when
3950 3949 * we do kmem_free.
3951 3950 */
3952 3951 cp->size = len;
3953 3952 cp->cvaddr = (caddr_t)buf;
3954 3953
3955 3954 *cookie = (void *)cp;
3956 3955 return (buf);
3957 3956 }
3958 3957
3959 3958 void
3960 3959 ddi_umem_free(ddi_umem_cookie_t cookie)
3961 3960 {
3962 3961 struct ddi_umem_cookie *cp;
3963 3962
3964 3963 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_FREE,
3965 3964 "devmap_umem_free:start");
3966 3965
3967 3966 /*
3968 3967 * if cookie is NULL, no effects on the system
3969 3968 */
3970 3969 if (cookie == NULL)
3971 3970 return;
3972 3971
3973 3972 cp = (struct ddi_umem_cookie *)cookie;
3974 3973
3975 3974 switch (cp->type) {
3976 3975 case KMEM_PAGEABLE :
3977 3976 ASSERT(cp->cvaddr != NULL && cp->size != 0);
3978 3977 /*
3979 3978 * Check if there are still any pending faults on the cookie
3980 3979 * while the driver is deleting it,
3981 3980 * XXX - could change to an ASSERT but wont catch errant drivers
3982 3981 */
3983 3982 mutex_enter(&cp->lock);
3984 3983 if (cp->locked) {
3985 3984 mutex_exit(&cp->lock);
3986 3985 panic("ddi_umem_free for cookie with pending faults %p",
3987 3986 (void *)cp);
3988 3987 return;
3989 3988 }
3990 3989
3991 3990 segkp_release(segkp, cp->cvaddr);
3992 3991
3993 3992 /*
3994 3993 * release mutex associated with this cookie.
3995 3994 */
3996 3995 mutex_destroy(&cp->lock);
3997 3996 break;
3998 3997 case KMEM_NON_PAGEABLE :
3999 3998 ASSERT(cp->cvaddr != NULL && cp->size != 0);
4000 3999 devmap_umem_free_np(cp->cvaddr, cp->size);
4001 4000 break;
4002 4001 case UMEM_TRASH :
4003 4002 break;
4004 4003 case UMEM_LOCKED :
4005 4004 /* Callers should use ddi_umem_unlock for this type */
4006 4005 ddi_umem_unlock(cookie);
4007 4006 /* Frees the cookie too */
4008 4007 return;
4009 4008 default:
4010 4009 /* panic so we can diagnose the underlying cause */
4011 4010 panic("ddi_umem_free: illegal cookie type 0x%x\n",
4012 4011 cp->type);
4013 4012 }
4014 4013
4015 4014 kmem_free(cookie, sizeof (struct ddi_umem_cookie));
4016 4015 }
4017 4016
4018 4017
4019 4018 static int
4020 4019 segdev_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
4021 4020 {
4022 4021 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4023 4022
4024 4023 /*
4025 4024 * It looks as if it is always mapped shared
4026 4025 */
4027 4026 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GETMEMID,
4028 4027 "segdev_getmemid:start");
4029 4028 memidp->val[0] = (uintptr_t)VTOCVP(sdp->vp);
4030 4029 memidp->val[1] = sdp->offset + (uintptr_t)(addr - seg->s_base);
4031 4030 return (0);
4032 4031 }
4033 4032
4034 4033 /*ARGSUSED*/
4035 4034 static lgrp_mem_policy_info_t *
4036 4035 segdev_getpolicy(struct seg *seg, caddr_t addr)
4037 4036 {
4038 4037 return (NULL);
4039 4038 }
4040 4039
4041 4040 /*ARGSUSED*/
4042 4041 static int
4043 4042 segdev_capable(struct seg *seg, segcapability_t capability)
4044 4043 {
4045 4044 return (0);
4046 4045 }
4047 4046
4048 4047 /*
4049 4048 * ddi_umem_alloc() non-pageable quantum cache max size.
4050 4049 * This is just a SWAG.
4051 4050 */
4052 4051 #define DEVMAP_UMEM_QUANTUM (8*PAGESIZE)
4053 4052
4054 4053 /*
4055 4054 * Initialize seg_dev from boot. This routine sets up the trash page
4056 4055 * and creates the umem_np_arena used to back non-pageable memory
4057 4056 * requests.
4058 4057 */
4059 4058 void
4060 4059 segdev_init(void)
4061 4060 {
4062 4061 struct seg kseg;
4063 4062
4064 4063 umem_np_arena = vmem_create("umem_np", NULL, 0, PAGESIZE,
4065 4064 devmap_alloc_pages, devmap_free_pages, heap_arena,
4066 4065 DEVMAP_UMEM_QUANTUM, VM_SLEEP);
4067 4066
4068 4067 kseg.s_as = &kas;
4069 4068 trashpp = page_create_va(&trashvp, 0, PAGESIZE,
4070 4069 PG_NORELOC | PG_EXCL | PG_WAIT, &kseg, NULL);
4071 4070 if (trashpp == NULL)
4072 4071 panic("segdev_init: failed to create trash page");
4073 4072 pagezero(trashpp, 0, PAGESIZE);
4074 4073 page_downgrade(trashpp);
4075 4074 }
4076 4075
4077 4076 /*
4078 4077 * Invoke platform-dependent support routines so that /proc can have
4079 4078 * the platform code deal with curious hardware.
4080 4079 */
4081 4080 int
4082 4081 segdev_copyfrom(struct seg *seg,
4083 4082 caddr_t uaddr, const void *devaddr, void *kaddr, size_t len)
4084 4083 {
4085 4084 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4086 4085 struct snode *sp = VTOS(VTOCVP(sdp->vp));
4087 4086
4088 4087 return (e_ddi_copyfromdev(sp->s_dip,
4089 4088 (off_t)(uaddr - seg->s_base), devaddr, kaddr, len));
4090 4089 }
4091 4090
4092 4091 int
4093 4092 segdev_copyto(struct seg *seg,
4094 4093 caddr_t uaddr, const void *kaddr, void *devaddr, size_t len)
4095 4094 {
4096 4095 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4097 4096 struct snode *sp = VTOS(VTOCVP(sdp->vp));
4098 4097
4099 4098 return (e_ddi_copytodev(sp->s_dip,
4100 4099 (off_t)(uaddr - seg->s_base), kaddr, devaddr, len));
4101 4100 }
↓ open down ↓ |
3888 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX