Print this page
use NULL dump segop as a shorthand for no-op
Instead of forcing every segment driver to implement a dummy function that
does nothing, handle NULL dump segop function pointer as a no-op shorthand.
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.
use NULL setpagesize segop as a shorthand for ENOTSUP
Instead of forcing every segment driver to implement a dummp function to
return (hopefully) ENOTSUP, handle NULL setpagesize segop function pointer
as "return ENOTSUP" shorthand.
use NULL capable segop as a shorthand for no-capabilities
Instead of forcing every segment driver to implement a dummy "return 0"
function, handle NULL capable segop function pointer as "no copabilities
supported" shorthand.
segop_getpolicy already checks for a NULL op
seg_inherit_notsup is redundant since segop_inherit checks for NULL properly
patch lower-case-segops
use C99 initializers in segment ops structures
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory. The code is there and in theory it runs when we get *extremely* low
on memory. In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_dev.c
+++ new/usr/src/uts/common/vm/seg_dev.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 28 /* All Rights Reserved */
29 29
30 30 /*
31 31 * University Copyright- Copyright (c) 1982, 1986, 1988
32 32 * The Regents of the University of California
33 33 * All Rights Reserved
34 34 *
35 35 * University Acknowledgment- Portions of this document are derived from
36 36 * software developed by the University of California, Berkeley, and its
37 37 * contributors.
38 38 */
39 39
40 40 /*
41 41 * VM - segment of a mapped device.
42 42 *
43 43 * This segment driver is used when mapping character special devices.
44 44 */
45 45
46 46 #include <sys/types.h>
47 47 #include <sys/t_lock.h>
48 48 #include <sys/sysmacros.h>
49 49 #include <sys/vtrace.h>
50 50 #include <sys/systm.h>
51 51 #include <sys/vmsystm.h>
52 52 #include <sys/mman.h>
53 53 #include <sys/errno.h>
54 54 #include <sys/kmem.h>
55 55 #include <sys/cmn_err.h>
56 56 #include <sys/vnode.h>
57 57 #include <sys/proc.h>
58 58 #include <sys/conf.h>
59 59 #include <sys/debug.h>
60 60 #include <sys/ddidevmap.h>
61 61 #include <sys/ddi_implfuncs.h>
62 62 #include <sys/lgrp.h>
63 63
64 64 #include <vm/page.h>
65 65 #include <vm/hat.h>
66 66 #include <vm/as.h>
67 67 #include <vm/seg.h>
68 68 #include <vm/seg_dev.h>
69 69 #include <vm/seg_kp.h>
70 70 #include <vm/seg_kmem.h>
71 71 #include <vm/vpage.h>
72 72
73 73 #include <sys/sunddi.h>
74 74 #include <sys/esunddi.h>
75 75 #include <sys/fs/snode.h>
76 76
77 77
78 78 #if DEBUG
79 79 int segdev_debug;
80 80 #define DEBUGF(level, args) { if (segdev_debug >= (level)) cmn_err args; }
81 81 #else
82 82 #define DEBUGF(level, args)
83 83 #endif
84 84
85 85 /* Default timeout for devmap context management */
86 86 #define CTX_TIMEOUT_VALUE 0
87 87
88 88 #define HOLD_DHP_LOCK(dhp) if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) \
89 89 { mutex_enter(&dhp->dh_lock); }
90 90
91 91 #define RELE_DHP_LOCK(dhp) if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) \
92 92 { mutex_exit(&dhp->dh_lock); }
93 93
94 94 #define round_down_p2(a, s) ((a) & ~((s) - 1))
95 95 #define round_up_p2(a, s) (((a) + (s) - 1) & ~((s) - 1))
96 96
97 97 /*
98 98 * VA_PA_ALIGNED checks to see if both VA and PA are on pgsize boundary
99 99 * VA_PA_PGSIZE_ALIGNED check to see if VA is aligned with PA w.r.t. pgsize
100 100 */
101 101 #define VA_PA_ALIGNED(uvaddr, paddr, pgsize) \
102 102 (((uvaddr | paddr) & (pgsize - 1)) == 0)
103 103 #define VA_PA_PGSIZE_ALIGNED(uvaddr, paddr, pgsize) \
104 104 (((uvaddr ^ paddr) & (pgsize - 1)) == 0)
105 105
106 106 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */
107 107
108 108 #define VTOCVP(vp) (VTOS(vp)->s_commonvp) /* we "know" it's an snode */
109 109
110 110 static struct devmap_ctx *devmapctx_list = NULL;
111 111 static struct devmap_softlock *devmap_slist = NULL;
112 112
113 113 /*
114 114 * mutex, vnode and page for the page of zeros we use for the trash mappings.
115 115 * One trash page is allocated on the first ddi_umem_setup call that uses it
116 116 * XXX Eventually, we may want to combine this with what segnf does when all
117 117 * hat layers implement HAT_NOFAULT.
118 118 *
119 119 * The trash page is used when the backing store for a userland mapping is
120 120 * removed but the application semantics do not take kindly to a SIGBUS.
121 121 * In that scenario, the applications pages are mapped to some dummy page
122 122 * which returns garbage on read and writes go into a common place.
123 123 * (Perfect for NO_FAULT semantics)
124 124 * The device driver is responsible to communicating to the app with some
125 125 * other mechanism that such remapping has happened and the app should take
126 126 * corrective action.
127 127 * We can also use an anonymous memory page as there is no requirement to
128 128 * keep the page locked, however this complicates the fault code. RFE.
129 129 */
130 130 static struct vnode trashvp;
131 131 static struct page *trashpp;
132 132
133 133 /* Non-pageable kernel memory is allocated from the umem_np_arena. */
134 134 static vmem_t *umem_np_arena;
135 135
136 136 /* Set the cookie to a value we know will never be a valid umem_cookie */
137 137 #define DEVMAP_DEVMEM_COOKIE ((ddi_umem_cookie_t)0x1)
138 138
139 139 /*
140 140 * Macros to check if type of devmap handle
141 141 */
142 142 #define cookie_is_devmem(c) \
143 143 ((c) == (struct ddi_umem_cookie *)DEVMAP_DEVMEM_COOKIE)
144 144
145 145 #define cookie_is_pmem(c) \
146 146 ((c) == (struct ddi_umem_cookie *)DEVMAP_PMEM_COOKIE)
147 147
148 148 #define cookie_is_kpmem(c) (!cookie_is_devmem(c) && !cookie_is_pmem(c) &&\
149 149 ((c)->type == KMEM_PAGEABLE))
150 150
151 151 #define dhp_is_devmem(dhp) \
152 152 (cookie_is_devmem((struct ddi_umem_cookie *)((dhp)->dh_cookie)))
153 153
154 154 #define dhp_is_pmem(dhp) \
155 155 (cookie_is_pmem((struct ddi_umem_cookie *)((dhp)->dh_cookie)))
156 156
157 157 #define dhp_is_kpmem(dhp) \
158 158 (cookie_is_kpmem((struct ddi_umem_cookie *)((dhp)->dh_cookie)))
159 159
160 160 /*
161 161 * Private seg op routines.
162 162 */
163 163 static int segdev_dup(struct seg *, struct seg *);
164 164 static int segdev_unmap(struct seg *, caddr_t, size_t);
165 165 static void segdev_free(struct seg *);
166 166 static faultcode_t segdev_fault(struct hat *, struct seg *, caddr_t, size_t,
167 167 enum fault_type, enum seg_rw);
168 168 static faultcode_t segdev_faulta(struct seg *, caddr_t);
169 169 static int segdev_setprot(struct seg *, caddr_t, size_t, uint_t);
170 170 static int segdev_checkprot(struct seg *, caddr_t, size_t, uint_t);
↓ open down ↓ |
170 lines elided |
↑ open up ↑ |
171 171 static void segdev_badop(void);
172 172 static int segdev_sync(struct seg *, caddr_t, size_t, int, uint_t);
173 173 static size_t segdev_incore(struct seg *, caddr_t, size_t, char *);
174 174 static int segdev_lockop(struct seg *, caddr_t, size_t, int, int,
175 175 ulong_t *, size_t);
176 176 static int segdev_getprot(struct seg *, caddr_t, size_t, uint_t *);
177 177 static u_offset_t segdev_getoffset(struct seg *, caddr_t);
178 178 static int segdev_gettype(struct seg *, caddr_t);
179 179 static int segdev_getvp(struct seg *, caddr_t, struct vnode **);
180 180 static int segdev_advise(struct seg *, caddr_t, size_t, uint_t);
181 -static void segdev_dump(struct seg *);
182 181 static int segdev_pagelock(struct seg *, caddr_t, size_t,
183 182 struct page ***, enum lock_type, enum seg_rw);
184 -static int segdev_setpagesize(struct seg *, caddr_t, size_t, uint_t);
185 183 static int segdev_getmemid(struct seg *, caddr_t, memid_t *);
186 -static lgrp_mem_policy_info_t *segdev_getpolicy(struct seg *, caddr_t);
187 -static int segdev_capable(struct seg *, segcapability_t);
188 184
189 185 /*
190 186 * XXX this struct is used by rootnex_map_fault to identify
191 187 * the segment it has been passed. So if you make it
192 188 * "static" you'll need to fix rootnex_map_fault.
193 189 */
194 -struct seg_ops segdev_ops = {
195 - segdev_dup,
196 - segdev_unmap,
197 - segdev_free,
198 - segdev_fault,
199 - segdev_faulta,
200 - segdev_setprot,
201 - segdev_checkprot,
202 - (int (*)())segdev_badop, /* kluster */
203 - (size_t (*)(struct seg *))NULL, /* swapout */
204 - segdev_sync, /* sync */
205 - segdev_incore,
206 - segdev_lockop, /* lockop */
207 - segdev_getprot,
208 - segdev_getoffset,
209 - segdev_gettype,
210 - segdev_getvp,
211 - segdev_advise,
212 - segdev_dump,
213 - segdev_pagelock,
214 - segdev_setpagesize,
215 - segdev_getmemid,
216 - segdev_getpolicy,
217 - segdev_capable,
218 - seg_inherit_notsup
190 +const struct seg_ops segdev_ops = {
191 + .dup = segdev_dup,
192 + .unmap = segdev_unmap,
193 + .free = segdev_free,
194 + .fault = segdev_fault,
195 + .faulta = segdev_faulta,
196 + .setprot = segdev_setprot,
197 + .checkprot = segdev_checkprot,
198 + .kluster = (int (*)())segdev_badop,
199 + .sync = segdev_sync,
200 + .incore = segdev_incore,
201 + .lockop = segdev_lockop,
202 + .getprot = segdev_getprot,
203 + .getoffset = segdev_getoffset,
204 + .gettype = segdev_gettype,
205 + .getvp = segdev_getvp,
206 + .advise = segdev_advise,
207 + .pagelock = segdev_pagelock,
208 + .getmemid = segdev_getmemid,
219 209 };
220 210
221 211 /*
222 212 * Private segdev support routines
223 213 */
224 214 static struct segdev_data *sdp_alloc(void);
225 215
226 216 static void segdev_softunlock(struct hat *, struct seg *, caddr_t,
227 217 size_t, enum seg_rw);
228 218
229 219 static faultcode_t segdev_faultpage(struct hat *, struct seg *, caddr_t,
230 220 struct vpage *, enum fault_type, enum seg_rw, devmap_handle_t *);
231 221
232 222 static faultcode_t segdev_faultpages(struct hat *, struct seg *, caddr_t,
233 223 size_t, enum fault_type, enum seg_rw, devmap_handle_t *);
234 224
235 225 static struct devmap_ctx *devmap_ctxinit(dev_t, ulong_t);
236 226 static struct devmap_softlock *devmap_softlock_init(dev_t, ulong_t);
237 227 static void devmap_softlock_rele(devmap_handle_t *);
238 228 static void devmap_ctx_rele(devmap_handle_t *);
239 229
240 230 static void devmap_ctxto(void *);
241 231
242 232 static devmap_handle_t *devmap_find_handle(devmap_handle_t *dhp_head,
243 233 caddr_t addr);
244 234
245 235 static ulong_t devmap_roundup(devmap_handle_t *dhp, ulong_t offset, size_t len,
246 236 ulong_t *opfn, ulong_t *pagesize);
247 237
248 238 static void free_devmap_handle(devmap_handle_t *dhp);
249 239
250 240 static int devmap_handle_dup(devmap_handle_t *dhp, devmap_handle_t **new_dhp,
251 241 struct seg *newseg);
252 242
253 243 static devmap_handle_t *devmap_handle_unmap(devmap_handle_t *dhp);
254 244
255 245 static void devmap_handle_unmap_head(devmap_handle_t *dhp, size_t len);
256 246
257 247 static void devmap_handle_unmap_tail(devmap_handle_t *dhp, caddr_t addr);
258 248
259 249 static int devmap_device(devmap_handle_t *dhp, struct as *as, caddr_t *addr,
260 250 offset_t off, size_t len, uint_t flags);
261 251
262 252 static void devmap_get_large_pgsize(devmap_handle_t *dhp, size_t len,
263 253 caddr_t addr, size_t *llen, caddr_t *laddr);
264 254
265 255 static void devmap_handle_reduce_len(devmap_handle_t *dhp, size_t len);
266 256
267 257 static void *devmap_alloc_pages(vmem_t *vmp, size_t size, int vmflag);
268 258 static void devmap_free_pages(vmem_t *vmp, void *inaddr, size_t size);
269 259
270 260 static void *devmap_umem_alloc_np(size_t size, size_t flags);
271 261 static void devmap_umem_free_np(void *addr, size_t size);
272 262
273 263 /*
274 264 * routines to lock and unlock underlying segkp segment for
275 265 * KMEM_PAGEABLE type cookies.
276 266 */
277 267 static faultcode_t acquire_kpmem_lock(struct ddi_umem_cookie *, size_t);
278 268 static void release_kpmem_lock(struct ddi_umem_cookie *, size_t);
279 269
280 270 /*
281 271 * Routines to synchronize F_SOFTLOCK and F_INVAL faults for
282 272 * drivers with devmap_access callbacks
283 273 */
284 274 static int devmap_softlock_enter(struct devmap_softlock *, size_t,
285 275 enum fault_type);
286 276 static void devmap_softlock_exit(struct devmap_softlock *, size_t,
287 277 enum fault_type);
288 278
289 279 static kmutex_t devmapctx_lock;
290 280
291 281 static kmutex_t devmap_slock;
292 282
293 283 /*
294 284 * Initialize the thread callbacks and thread private data.
295 285 */
296 286 static struct devmap_ctx *
297 287 devmap_ctxinit(dev_t dev, ulong_t id)
298 288 {
299 289 struct devmap_ctx *devctx;
300 290 struct devmap_ctx *tmp;
301 291 dev_info_t *dip;
302 292
303 293 tmp = kmem_zalloc(sizeof (struct devmap_ctx), KM_SLEEP);
304 294
305 295 mutex_enter(&devmapctx_lock);
306 296
307 297 dip = e_ddi_hold_devi_by_dev(dev, 0);
308 298 ASSERT(dip != NULL);
309 299 ddi_release_devi(dip);
310 300
311 301 for (devctx = devmapctx_list; devctx != NULL; devctx = devctx->next)
312 302 if ((devctx->dip == dip) && (devctx->id == id))
313 303 break;
314 304
315 305 if (devctx == NULL) {
316 306 devctx = tmp;
317 307 devctx->dip = dip;
318 308 devctx->id = id;
319 309 mutex_init(&devctx->lock, NULL, MUTEX_DEFAULT, NULL);
320 310 cv_init(&devctx->cv, NULL, CV_DEFAULT, NULL);
321 311 devctx->next = devmapctx_list;
322 312 devmapctx_list = devctx;
323 313 } else
324 314 kmem_free(tmp, sizeof (struct devmap_ctx));
325 315
326 316 mutex_enter(&devctx->lock);
327 317 devctx->refcnt++;
328 318 mutex_exit(&devctx->lock);
329 319 mutex_exit(&devmapctx_lock);
330 320
331 321 return (devctx);
332 322 }
333 323
334 324 /*
335 325 * Timeout callback called if a CPU has not given up the device context
336 326 * within dhp->dh_timeout_length ticks
337 327 */
338 328 static void
339 329 devmap_ctxto(void *data)
340 330 {
341 331 struct devmap_ctx *devctx = data;
342 332
343 333 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_CTXTO,
344 334 "devmap_ctxto:timeout expired, devctx=%p", (void *)devctx);
345 335 mutex_enter(&devctx->lock);
346 336 /*
347 337 * Set oncpu = 0 so the next mapping trying to get the device context
348 338 * can.
349 339 */
350 340 devctx->oncpu = 0;
351 341 devctx->timeout = 0;
352 342 cv_signal(&devctx->cv);
353 343 mutex_exit(&devctx->lock);
354 344 }
355 345
356 346 /*
357 347 * Create a device segment.
358 348 */
359 349 int
360 350 segdev_create(struct seg *seg, void *argsp)
361 351 {
362 352 struct segdev_data *sdp;
363 353 struct segdev_crargs *a = (struct segdev_crargs *)argsp;
364 354 devmap_handle_t *dhp = (devmap_handle_t *)a->devmap_data;
365 355 int error;
366 356
367 357 /*
368 358 * Since the address space is "write" locked, we
369 359 * don't need the segment lock to protect "segdev" data.
370 360 */
371 361 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
372 362
373 363 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
374 364
375 365 sdp = sdp_alloc();
376 366
377 367 sdp->mapfunc = a->mapfunc;
378 368 sdp->offset = a->offset;
379 369 sdp->prot = a->prot;
380 370 sdp->maxprot = a->maxprot;
381 371 sdp->type = a->type;
382 372 sdp->pageprot = 0;
383 373 sdp->softlockcnt = 0;
384 374 sdp->vpage = NULL;
385 375
386 376 if (sdp->mapfunc == NULL)
387 377 sdp->devmap_data = dhp;
388 378 else
389 379 sdp->devmap_data = dhp = NULL;
390 380
391 381 sdp->hat_flags = a->hat_flags;
392 382 sdp->hat_attr = a->hat_attr;
393 383
394 384 /*
395 385 * Currently, hat_flags supports only HAT_LOAD_NOCONSIST
396 386 */
397 387 ASSERT(!(sdp->hat_flags & ~HAT_LOAD_NOCONSIST));
398 388
399 389 /*
400 390 * Hold shadow vnode -- segdev only deals with
401 391 * character (VCHR) devices. We use the common
402 392 * vp to hang pages on.
403 393 */
404 394 sdp->vp = specfind(a->dev, VCHR);
405 395 ASSERT(sdp->vp != NULL);
406 396
407 397 seg->s_ops = &segdev_ops;
408 398 seg->s_data = sdp;
409 399
410 400 while (dhp != NULL) {
411 401 dhp->dh_seg = seg;
412 402 dhp = dhp->dh_next;
413 403 }
414 404
415 405 /*
416 406 * Inform the vnode of the new mapping.
417 407 */
418 408 /*
419 409 * It is ok to use pass sdp->maxprot to ADDMAP rather than to use
420 410 * dhp specific maxprot because spec_addmap does not use maxprot.
421 411 */
422 412 error = VOP_ADDMAP(VTOCVP(sdp->vp), sdp->offset,
423 413 seg->s_as, seg->s_base, seg->s_size,
424 414 sdp->prot, sdp->maxprot, sdp->type, CRED(), NULL);
↓ open down ↓ |
196 lines elided |
↑ open up ↑ |
425 415
426 416 if (error != 0) {
427 417 sdp->devmap_data = NULL;
428 418 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
429 419 HAT_UNLOAD_UNMAP);
430 420 } else {
431 421 /*
432 422 * Mappings of /dev/null don't count towards the VSZ of a
433 423 * process. Mappings of /dev/null have no mapping type.
434 424 */
435 - if ((SEGOP_GETTYPE(seg, (seg)->s_base) & (MAP_SHARED |
425 + if ((segop_gettype(seg, seg->s_base) & (MAP_SHARED |
436 426 MAP_PRIVATE)) == 0) {
437 427 seg->s_as->a_resvsize -= seg->s_size;
438 428 }
439 429 }
440 430
441 431 return (error);
442 432 }
443 433
444 434 static struct segdev_data *
445 435 sdp_alloc(void)
446 436 {
447 437 struct segdev_data *sdp;
448 438
449 439 sdp = kmem_zalloc(sizeof (struct segdev_data), KM_SLEEP);
450 440 rw_init(&sdp->lock, NULL, RW_DEFAULT, NULL);
451 441
452 442 return (sdp);
453 443 }
454 444
455 445 /*
456 446 * Duplicate seg and return new segment in newseg.
457 447 */
458 448 static int
459 449 segdev_dup(struct seg *seg, struct seg *newseg)
460 450 {
461 451 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
462 452 struct segdev_data *newsdp;
463 453 devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
464 454 size_t npages;
465 455 int ret;
466 456
467 457 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_DUP,
468 458 "segdev_dup:start dhp=%p, seg=%p", (void *)dhp, (void *)seg);
469 459
470 460 DEBUGF(3, (CE_CONT, "segdev_dup: dhp %p seg %p\n",
471 461 (void *)dhp, (void *)seg));
472 462
473 463 /*
474 464 * Since the address space is "write" locked, we
475 465 * don't need the segment lock to protect "segdev" data.
476 466 */
477 467 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
478 468
479 469 newsdp = sdp_alloc();
480 470
481 471 newseg->s_ops = seg->s_ops;
482 472 newseg->s_data = (void *)newsdp;
483 473
484 474 VN_HOLD(sdp->vp);
485 475 newsdp->vp = sdp->vp;
486 476 newsdp->mapfunc = sdp->mapfunc;
487 477 newsdp->offset = sdp->offset;
488 478 newsdp->pageprot = sdp->pageprot;
489 479 newsdp->prot = sdp->prot;
490 480 newsdp->maxprot = sdp->maxprot;
491 481 newsdp->type = sdp->type;
492 482 newsdp->hat_attr = sdp->hat_attr;
493 483 newsdp->hat_flags = sdp->hat_flags;
494 484 newsdp->softlockcnt = 0;
495 485
496 486 /*
497 487 * Initialize per page data if the segment we are
498 488 * dup'ing has per page information.
499 489 */
500 490 npages = seg_pages(newseg);
501 491
502 492 if (sdp->vpage != NULL) {
503 493 size_t nbytes = vpgtob(npages);
504 494
505 495 newsdp->vpage = kmem_zalloc(nbytes, KM_SLEEP);
506 496 bcopy(sdp->vpage, newsdp->vpage, nbytes);
507 497 } else
508 498 newsdp->vpage = NULL;
509 499
510 500 /*
511 501 * duplicate devmap handles
512 502 */
513 503 if (dhp != NULL) {
514 504 ret = devmap_handle_dup(dhp,
515 505 (devmap_handle_t **)&newsdp->devmap_data, newseg);
516 506 if (ret != 0) {
517 507 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DUP_CK1,
518 508 "segdev_dup:ret1 ret=%x, dhp=%p seg=%p",
519 509 ret, (void *)dhp, (void *)seg);
520 510 DEBUGF(1, (CE_CONT,
521 511 "segdev_dup: ret %x dhp %p seg %p\n",
522 512 ret, (void *)dhp, (void *)seg));
523 513 return (ret);
524 514 }
525 515 }
526 516
527 517 /*
528 518 * Inform the common vnode of the new mapping.
529 519 */
530 520 return (VOP_ADDMAP(VTOCVP(newsdp->vp),
531 521 newsdp->offset, newseg->s_as,
532 522 newseg->s_base, newseg->s_size, newsdp->prot,
533 523 newsdp->maxprot, sdp->type, CRED(), NULL));
534 524 }
535 525
536 526 /*
537 527 * duplicate devmap handles
538 528 */
539 529 static int
540 530 devmap_handle_dup(devmap_handle_t *dhp, devmap_handle_t **new_dhp,
541 531 struct seg *newseg)
542 532 {
543 533 devmap_handle_t *newdhp_save = NULL;
544 534 devmap_handle_t *newdhp = NULL;
545 535 struct devmap_callback_ctl *callbackops;
546 536
547 537 while (dhp != NULL) {
548 538 newdhp = kmem_alloc(sizeof (devmap_handle_t), KM_SLEEP);
549 539
550 540 /* Need to lock the original dhp while copying if REMAP */
551 541 HOLD_DHP_LOCK(dhp);
552 542 bcopy(dhp, newdhp, sizeof (devmap_handle_t));
553 543 RELE_DHP_LOCK(dhp);
554 544 newdhp->dh_seg = newseg;
555 545 newdhp->dh_next = NULL;
556 546 if (newdhp_save != NULL)
557 547 newdhp_save->dh_next = newdhp;
558 548 else
559 549 *new_dhp = newdhp;
560 550 newdhp_save = newdhp;
561 551
562 552 callbackops = &newdhp->dh_callbackops;
563 553
564 554 if (dhp->dh_softlock != NULL)
565 555 newdhp->dh_softlock = devmap_softlock_init(
566 556 newdhp->dh_dev,
567 557 (ulong_t)callbackops->devmap_access);
568 558 if (dhp->dh_ctx != NULL)
569 559 newdhp->dh_ctx = devmap_ctxinit(newdhp->dh_dev,
570 560 (ulong_t)callbackops->devmap_access);
571 561
572 562 /*
573 563 * Initialize dh_lock if we want to do remap.
574 564 */
575 565 if (newdhp->dh_flags & DEVMAP_ALLOW_REMAP) {
576 566 mutex_init(&newdhp->dh_lock, NULL, MUTEX_DEFAULT, NULL);
577 567 newdhp->dh_flags |= DEVMAP_LOCK_INITED;
578 568 }
579 569
580 570 if (callbackops->devmap_dup != NULL) {
581 571 int ret;
582 572
583 573 /*
584 574 * Call the dup callback so that the driver can
585 575 * duplicate its private data.
586 576 */
587 577 ret = (*callbackops->devmap_dup)(dhp, dhp->dh_pvtp,
588 578 (devmap_cookie_t *)newdhp, &newdhp->dh_pvtp);
589 579
590 580 if (ret != 0) {
591 581 /*
592 582 * We want to free up this segment as the driver
593 583 * has indicated that we can't dup it. But we
594 584 * don't want to call the drivers, devmap_unmap,
595 585 * callback function as the driver does not
596 586 * think this segment exists. The caller of
597 587 * devmap_dup will call seg_free on newseg
598 588 * as it was the caller that allocated the
599 589 * segment.
600 590 */
601 591 DEBUGF(1, (CE_CONT, "devmap_handle_dup ERROR: "
602 592 "newdhp %p dhp %p\n", (void *)newdhp,
603 593 (void *)dhp));
604 594 callbackops->devmap_unmap = NULL;
605 595 return (ret);
606 596 }
607 597 }
608 598
609 599 dhp = dhp->dh_next;
610 600 }
611 601
612 602 return (0);
613 603 }
614 604
615 605 /*
616 606 * Split a segment at addr for length len.
617 607 */
618 608 /*ARGSUSED*/
619 609 static int
620 610 segdev_unmap(struct seg *seg, caddr_t addr, size_t len)
621 611 {
622 612 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
623 613 register struct segdev_data *nsdp;
624 614 register struct seg *nseg;
625 615 register size_t opages; /* old segment size in pages */
626 616 register size_t npages; /* new segment size in pages */
627 617 register size_t dpages; /* pages being deleted (unmapped) */
628 618 register size_t nbytes;
629 619 devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
630 620 devmap_handle_t *dhpp;
631 621 devmap_handle_t *newdhp;
632 622 struct devmap_callback_ctl *callbackops;
633 623 caddr_t nbase;
634 624 offset_t off;
635 625 ulong_t nsize;
636 626 size_t mlen, sz;
637 627
638 628 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP,
639 629 "segdev_unmap:start dhp=%p, seg=%p addr=%p len=%lx",
640 630 (void *)dhp, (void *)seg, (void *)addr, len);
641 631
642 632 DEBUGF(3, (CE_CONT, "segdev_unmap: dhp %p seg %p addr %p len %lx\n",
643 633 (void *)dhp, (void *)seg, (void *)addr, len));
644 634
645 635 /*
646 636 * Since the address space is "write" locked, we
647 637 * don't need the segment lock to protect "segdev" data.
648 638 */
649 639 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
650 640
651 641 if ((sz = sdp->softlockcnt) > 0) {
652 642 /*
653 643 * Fail the unmap if pages are SOFTLOCKed through this mapping.
654 644 * softlockcnt is protected from change by the as write lock.
655 645 */
656 646 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK1,
657 647 "segdev_unmap:error softlockcnt = %ld", sz);
658 648 DEBUGF(1, (CE_CONT, "segdev_unmap: softlockcnt %ld\n", sz));
659 649 return (EAGAIN);
660 650 }
661 651
662 652 /*
663 653 * Check for bad sizes
664 654 */
665 655 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
666 656 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
667 657 panic("segdev_unmap");
668 658
669 659 if (dhp != NULL) {
670 660 devmap_handle_t *tdhp;
671 661 /*
672 662 * If large page size was used in hat_devload(),
673 663 * the same page size must be used in hat_unload().
674 664 */
675 665 dhpp = tdhp = devmap_find_handle(dhp, addr);
676 666 while (tdhp != NULL) {
677 667 if (tdhp->dh_flags & DEVMAP_FLAG_LARGE) {
678 668 break;
679 669 }
680 670 tdhp = tdhp->dh_next;
681 671 }
682 672 if (tdhp != NULL) { /* found a dhp using large pages */
683 673 size_t slen = len;
684 674 size_t mlen;
685 675 size_t soff;
686 676
687 677 soff = (ulong_t)(addr - dhpp->dh_uvaddr);
688 678 while (slen != 0) {
689 679 mlen = MIN(slen, (dhpp->dh_len - soff));
690 680 hat_unload(seg->s_as->a_hat, dhpp->dh_uvaddr,
691 681 dhpp->dh_len, HAT_UNLOAD_UNMAP);
692 682 dhpp = dhpp->dh_next;
693 683 ASSERT(slen >= mlen);
694 684 slen -= mlen;
695 685 soff = 0;
696 686 }
697 687 } else
698 688 hat_unload(seg->s_as->a_hat, addr, len,
699 689 HAT_UNLOAD_UNMAP);
700 690 } else {
701 691 /*
702 692 * Unload any hardware translations in the range
703 693 * to be taken out.
704 694 */
705 695 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD_UNMAP);
706 696 }
707 697
708 698 /*
709 699 * get the user offset which will used in the driver callbacks
710 700 */
711 701 off = sdp->offset + (offset_t)(addr - seg->s_base);
712 702
713 703 /*
714 704 * Inform the vnode of the unmapping.
715 705 */
716 706 ASSERT(sdp->vp != NULL);
717 707 (void) VOP_DELMAP(VTOCVP(sdp->vp), off, seg->s_as, addr, len,
718 708 sdp->prot, sdp->maxprot, sdp->type, CRED(), NULL);
719 709
720 710 /*
721 711 * Check for entire segment
722 712 */
723 713 if (addr == seg->s_base && len == seg->s_size) {
724 714 seg_free(seg);
725 715 return (0);
726 716 }
727 717
728 718 opages = seg_pages(seg);
729 719 dpages = btop(len);
730 720 npages = opages - dpages;
731 721
732 722 /*
733 723 * Check for beginning of segment
734 724 */
735 725 if (addr == seg->s_base) {
736 726 if (sdp->vpage != NULL) {
737 727 register struct vpage *ovpage;
738 728
739 729 ovpage = sdp->vpage; /* keep pointer to vpage */
740 730
741 731 nbytes = vpgtob(npages);
742 732 sdp->vpage = kmem_alloc(nbytes, KM_SLEEP);
743 733 bcopy(&ovpage[dpages], sdp->vpage, nbytes);
744 734
745 735 /* free up old vpage */
746 736 kmem_free(ovpage, vpgtob(opages));
747 737 }
748 738
749 739 /*
750 740 * free devmap handles from the beginning of the mapping.
751 741 */
752 742 if (dhp != NULL)
753 743 devmap_handle_unmap_head(dhp, len);
754 744
755 745 sdp->offset += (offset_t)len;
756 746
757 747 seg->s_base += len;
758 748 seg->s_size -= len;
759 749
760 750 return (0);
761 751 }
762 752
763 753 /*
764 754 * Check for end of segment
765 755 */
766 756 if (addr + len == seg->s_base + seg->s_size) {
767 757 if (sdp->vpage != NULL) {
768 758 register struct vpage *ovpage;
769 759
770 760 ovpage = sdp->vpage; /* keep pointer to vpage */
771 761
772 762 nbytes = vpgtob(npages);
773 763 sdp->vpage = kmem_alloc(nbytes, KM_SLEEP);
774 764 bcopy(ovpage, sdp->vpage, nbytes);
775 765
776 766 /* free up old vpage */
777 767 kmem_free(ovpage, vpgtob(opages));
778 768 }
779 769 seg->s_size -= len;
780 770
781 771 /*
782 772 * free devmap handles from addr to the end of the mapping.
783 773 */
784 774 if (dhp != NULL)
785 775 devmap_handle_unmap_tail(dhp, addr);
786 776
787 777 return (0);
788 778 }
789 779
790 780 /*
791 781 * The section to go is in the middle of the segment,
792 782 * have to make it into two segments. nseg is made for
793 783 * the high end while seg is cut down at the low end.
794 784 */
795 785 nbase = addr + len; /* new seg base */
796 786 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */
797 787 seg->s_size = addr - seg->s_base; /* shrink old seg */
798 788 nseg = seg_alloc(seg->s_as, nbase, nsize);
799 789 if (nseg == NULL)
800 790 panic("segdev_unmap seg_alloc");
801 791
802 792 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK2,
803 793 "segdev_unmap: seg=%p nseg=%p", (void *)seg, (void *)nseg);
804 794 DEBUGF(3, (CE_CONT, "segdev_unmap: segdev_dup seg %p nseg %p\n",
805 795 (void *)seg, (void *)nseg));
806 796 nsdp = sdp_alloc();
807 797
808 798 nseg->s_ops = seg->s_ops;
809 799 nseg->s_data = (void *)nsdp;
810 800
811 801 VN_HOLD(sdp->vp);
812 802 nsdp->mapfunc = sdp->mapfunc;
813 803 nsdp->offset = sdp->offset + (offset_t)(nseg->s_base - seg->s_base);
814 804 nsdp->vp = sdp->vp;
815 805 nsdp->pageprot = sdp->pageprot;
816 806 nsdp->prot = sdp->prot;
817 807 nsdp->maxprot = sdp->maxprot;
818 808 nsdp->type = sdp->type;
819 809 nsdp->hat_attr = sdp->hat_attr;
820 810 nsdp->hat_flags = sdp->hat_flags;
821 811 nsdp->softlockcnt = 0;
822 812
823 813 /*
824 814 * Initialize per page data if the segment we are
825 815 * dup'ing has per page information.
826 816 */
827 817 if (sdp->vpage != NULL) {
828 818 /* need to split vpage into two arrays */
829 819 register size_t nnbytes;
830 820 register size_t nnpages;
831 821 register struct vpage *ovpage;
832 822
833 823 ovpage = sdp->vpage; /* keep pointer to vpage */
834 824
835 825 npages = seg_pages(seg); /* seg has shrunk */
836 826 nbytes = vpgtob(npages);
837 827 nnpages = seg_pages(nseg);
838 828 nnbytes = vpgtob(nnpages);
839 829
840 830 sdp->vpage = kmem_alloc(nbytes, KM_SLEEP);
841 831 bcopy(ovpage, sdp->vpage, nbytes);
842 832
843 833 nsdp->vpage = kmem_alloc(nnbytes, KM_SLEEP);
844 834 bcopy(&ovpage[npages + dpages], nsdp->vpage, nnbytes);
845 835
846 836 /* free up old vpage */
847 837 kmem_free(ovpage, vpgtob(opages));
848 838 } else
849 839 nsdp->vpage = NULL;
850 840
851 841 /*
852 842 * unmap dhps.
853 843 */
854 844 if (dhp == NULL) {
855 845 nsdp->devmap_data = NULL;
856 846 return (0);
857 847 }
858 848 while (dhp != NULL) {
859 849 callbackops = &dhp->dh_callbackops;
860 850 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_UNMAP_CK3,
861 851 "segdev_unmap: dhp=%p addr=%p", dhp, addr);
862 852 DEBUGF(3, (CE_CONT, "unmap: dhp %p addr %p uvaddr %p len %lx\n",
863 853 (void *)dhp, (void *)addr,
864 854 (void *)dhp->dh_uvaddr, dhp->dh_len));
865 855
866 856 if (addr == (dhp->dh_uvaddr + dhp->dh_len)) {
867 857 dhpp = dhp->dh_next;
868 858 dhp->dh_next = NULL;
869 859 dhp = dhpp;
870 860 } else if (addr > (dhp->dh_uvaddr + dhp->dh_len)) {
871 861 dhp = dhp->dh_next;
872 862 } else if (addr > dhp->dh_uvaddr &&
873 863 (addr + len) < (dhp->dh_uvaddr + dhp->dh_len)) {
874 864 /*
875 865 * <addr, addr+len> is enclosed by dhp.
876 866 * create a newdhp that begins at addr+len and
877 867 * ends at dhp->dh_uvaddr+dhp->dh_len.
878 868 */
879 869 newdhp = kmem_alloc(sizeof (devmap_handle_t), KM_SLEEP);
880 870 HOLD_DHP_LOCK(dhp);
881 871 bcopy(dhp, newdhp, sizeof (devmap_handle_t));
882 872 RELE_DHP_LOCK(dhp);
883 873 newdhp->dh_seg = nseg;
884 874 newdhp->dh_next = dhp->dh_next;
885 875 if (dhp->dh_softlock != NULL)
886 876 newdhp->dh_softlock = devmap_softlock_init(
887 877 newdhp->dh_dev,
888 878 (ulong_t)callbackops->devmap_access);
889 879 if (dhp->dh_ctx != NULL)
890 880 newdhp->dh_ctx = devmap_ctxinit(newdhp->dh_dev,
891 881 (ulong_t)callbackops->devmap_access);
892 882 if (newdhp->dh_flags & DEVMAP_LOCK_INITED) {
893 883 mutex_init(&newdhp->dh_lock,
894 884 NULL, MUTEX_DEFAULT, NULL);
895 885 }
896 886 if (callbackops->devmap_unmap != NULL)
897 887 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
898 888 off, len, dhp, &dhp->dh_pvtp,
899 889 newdhp, &newdhp->dh_pvtp);
900 890 mlen = len + (addr - dhp->dh_uvaddr);
901 891 devmap_handle_reduce_len(newdhp, mlen);
902 892 nsdp->devmap_data = newdhp;
903 893 /* XX Changing len should recalculate LARGE flag */
904 894 dhp->dh_len = addr - dhp->dh_uvaddr;
905 895 dhpp = dhp->dh_next;
906 896 dhp->dh_next = NULL;
907 897 dhp = dhpp;
908 898 } else if ((addr > dhp->dh_uvaddr) &&
909 899 ((addr + len) >= (dhp->dh_uvaddr + dhp->dh_len))) {
910 900 mlen = dhp->dh_len + dhp->dh_uvaddr - addr;
911 901 /*
912 902 * <addr, addr+len> spans over dhps.
913 903 */
914 904 if (callbackops->devmap_unmap != NULL)
915 905 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
916 906 off, mlen, (devmap_cookie_t *)dhp,
917 907 &dhp->dh_pvtp, NULL, NULL);
918 908 /* XX Changing len should recalculate LARGE flag */
919 909 dhp->dh_len = addr - dhp->dh_uvaddr;
920 910 dhpp = dhp->dh_next;
921 911 dhp->dh_next = NULL;
922 912 dhp = dhpp;
923 913 nsdp->devmap_data = dhp;
924 914 } else if ((addr + len) >= (dhp->dh_uvaddr + dhp->dh_len)) {
925 915 /*
926 916 * dhp is enclosed by <addr, addr+len>.
927 917 */
928 918 dhp->dh_seg = nseg;
929 919 nsdp->devmap_data = dhp;
930 920 dhp = devmap_handle_unmap(dhp);
931 921 nsdp->devmap_data = dhp; /* XX redundant? */
932 922 } else if (((addr + len) > dhp->dh_uvaddr) &&
933 923 ((addr + len) < (dhp->dh_uvaddr + dhp->dh_len))) {
934 924 mlen = addr + len - dhp->dh_uvaddr;
935 925 if (callbackops->devmap_unmap != NULL)
936 926 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
937 927 dhp->dh_uoff, mlen, NULL,
938 928 NULL, dhp, &dhp->dh_pvtp);
939 929 devmap_handle_reduce_len(dhp, mlen);
940 930 nsdp->devmap_data = dhp;
941 931 dhp->dh_seg = nseg;
942 932 dhp = dhp->dh_next;
943 933 } else {
944 934 dhp->dh_seg = nseg;
945 935 dhp = dhp->dh_next;
946 936 }
947 937 }
948 938 return (0);
949 939 }
950 940
951 941 /*
952 942 * Utility function handles reducing the length of a devmap handle during unmap
953 943 * Note that is only used for unmapping the front portion of the handler,
954 944 * i.e., we are bumping up the offset/pfn etc up by len
955 945 * Do not use if reducing length at the tail.
956 946 */
957 947 static void
958 948 devmap_handle_reduce_len(devmap_handle_t *dhp, size_t len)
959 949 {
960 950 struct ddi_umem_cookie *cp;
961 951 struct devmap_pmem_cookie *pcp;
962 952 /*
963 953 * adjust devmap handle fields
964 954 */
965 955 ASSERT(len < dhp->dh_len);
966 956
967 957 /* Make sure only page-aligned changes are done */
968 958 ASSERT((len & PAGEOFFSET) == 0);
969 959
970 960 dhp->dh_len -= len;
971 961 dhp->dh_uoff += (offset_t)len;
972 962 dhp->dh_roff += (offset_t)len;
973 963 dhp->dh_uvaddr += len;
974 964 /* Need to grab dhp lock if REMAP */
975 965 HOLD_DHP_LOCK(dhp);
976 966 cp = dhp->dh_cookie;
977 967 if (!(dhp->dh_flags & DEVMAP_MAPPING_INVALID)) {
978 968 if (cookie_is_devmem(cp)) {
979 969 dhp->dh_pfn += btop(len);
980 970 } else if (cookie_is_pmem(cp)) {
981 971 pcp = (struct devmap_pmem_cookie *)dhp->dh_pcookie;
982 972 ASSERT((dhp->dh_roff & PAGEOFFSET) == 0 &&
983 973 dhp->dh_roff < ptob(pcp->dp_npages));
984 974 } else {
985 975 ASSERT(dhp->dh_roff < cp->size);
986 976 ASSERT(dhp->dh_cvaddr >= cp->cvaddr &&
987 977 dhp->dh_cvaddr < (cp->cvaddr + cp->size));
988 978 ASSERT((dhp->dh_cvaddr + len) <=
989 979 (cp->cvaddr + cp->size));
990 980
991 981 dhp->dh_cvaddr += len;
992 982 }
993 983 }
994 984 /* XXX - Should recalculate the DEVMAP_FLAG_LARGE after changes */
995 985 RELE_DHP_LOCK(dhp);
996 986 }
997 987
998 988 /*
999 989 * Free devmap handle, dhp.
1000 990 * Return the next devmap handle on the linked list.
1001 991 */
1002 992 static devmap_handle_t *
1003 993 devmap_handle_unmap(devmap_handle_t *dhp)
1004 994 {
1005 995 struct devmap_callback_ctl *callbackops = &dhp->dh_callbackops;
1006 996 struct segdev_data *sdp = (struct segdev_data *)dhp->dh_seg->s_data;
1007 997 devmap_handle_t *dhpp = (devmap_handle_t *)sdp->devmap_data;
1008 998
1009 999 ASSERT(dhp != NULL);
1010 1000
1011 1001 /*
1012 1002 * before we free up dhp, call the driver's devmap_unmap entry point
1013 1003 * to free resources allocated for this dhp.
1014 1004 */
1015 1005 if (callbackops->devmap_unmap != NULL) {
1016 1006 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp, dhp->dh_uoff,
1017 1007 dhp->dh_len, NULL, NULL, NULL, NULL);
1018 1008 }
1019 1009
1020 1010 if (dhpp == dhp) { /* releasing first dhp, change sdp data */
1021 1011 sdp->devmap_data = dhp->dh_next;
1022 1012 } else {
1023 1013 while (dhpp->dh_next != dhp) {
1024 1014 dhpp = dhpp->dh_next;
1025 1015 }
1026 1016 dhpp->dh_next = dhp->dh_next;
1027 1017 }
1028 1018 dhpp = dhp->dh_next; /* return value is next dhp in chain */
1029 1019
1030 1020 if (dhp->dh_softlock != NULL)
1031 1021 devmap_softlock_rele(dhp);
1032 1022
1033 1023 if (dhp->dh_ctx != NULL)
1034 1024 devmap_ctx_rele(dhp);
1035 1025
1036 1026 if (dhp->dh_flags & DEVMAP_LOCK_INITED) {
1037 1027 mutex_destroy(&dhp->dh_lock);
1038 1028 }
1039 1029 kmem_free(dhp, sizeof (devmap_handle_t));
1040 1030
1041 1031 return (dhpp);
1042 1032 }
1043 1033
1044 1034 /*
1045 1035 * Free complete devmap handles from dhp for len bytes
1046 1036 * dhp can be either the first handle or a subsequent handle
1047 1037 */
1048 1038 static void
1049 1039 devmap_handle_unmap_head(devmap_handle_t *dhp, size_t len)
1050 1040 {
1051 1041 struct devmap_callback_ctl *callbackops;
1052 1042
1053 1043 /*
1054 1044 * free the devmap handles covered by len.
1055 1045 */
1056 1046 while (len >= dhp->dh_len) {
1057 1047 len -= dhp->dh_len;
1058 1048 dhp = devmap_handle_unmap(dhp);
1059 1049 }
1060 1050 if (len != 0) { /* partial unmap at head of first remaining dhp */
1061 1051 callbackops = &dhp->dh_callbackops;
1062 1052
1063 1053 /*
1064 1054 * Call the unmap callback so the drivers can make
1065 1055 * adjustment on its private data.
1066 1056 */
1067 1057 if (callbackops->devmap_unmap != NULL)
1068 1058 (*callbackops->devmap_unmap)(dhp, dhp->dh_pvtp,
1069 1059 dhp->dh_uoff, len, NULL, NULL, dhp, &dhp->dh_pvtp);
1070 1060 devmap_handle_reduce_len(dhp, len);
1071 1061 }
1072 1062 }
1073 1063
1074 1064 /*
1075 1065 * Free devmap handles to truncate the mapping after addr
1076 1066 * RFE: Simpler to pass in dhp pointing at correct dhp (avoid find again)
1077 1067 * Also could then use the routine in middle unmap case too
1078 1068 */
1079 1069 static void
1080 1070 devmap_handle_unmap_tail(devmap_handle_t *dhp, caddr_t addr)
1081 1071 {
1082 1072 register struct seg *seg = dhp->dh_seg;
1083 1073 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1084 1074 register devmap_handle_t *dhph = (devmap_handle_t *)sdp->devmap_data;
1085 1075 struct devmap_callback_ctl *callbackops;
1086 1076 register devmap_handle_t *dhpp;
1087 1077 size_t maplen;
1088 1078 ulong_t off;
1089 1079 size_t len;
1090 1080
1091 1081 maplen = (size_t)(addr - dhp->dh_uvaddr);
1092 1082 dhph = devmap_find_handle(dhph, addr);
1093 1083
1094 1084 while (dhph != NULL) {
1095 1085 if (maplen == 0) {
1096 1086 dhph = devmap_handle_unmap(dhph);
1097 1087 } else {
1098 1088 callbackops = &dhph->dh_callbackops;
1099 1089 len = dhph->dh_len - maplen;
1100 1090 off = (ulong_t)sdp->offset + (addr - seg->s_base);
1101 1091 /*
1102 1092 * Call the unmap callback so the driver
1103 1093 * can make adjustments on its private data.
1104 1094 */
1105 1095 if (callbackops->devmap_unmap != NULL)
1106 1096 (*callbackops->devmap_unmap)(dhph,
1107 1097 dhph->dh_pvtp, off, len,
1108 1098 (devmap_cookie_t *)dhph,
1109 1099 &dhph->dh_pvtp, NULL, NULL);
1110 1100 /* XXX Reducing len needs to recalculate LARGE flag */
1111 1101 dhph->dh_len = maplen;
1112 1102 maplen = 0;
1113 1103 dhpp = dhph->dh_next;
1114 1104 dhph->dh_next = NULL;
1115 1105 dhph = dhpp;
1116 1106 }
1117 1107 } /* end while */
1118 1108 }
1119 1109
1120 1110 /*
1121 1111 * Free a segment.
1122 1112 */
1123 1113 static void
1124 1114 segdev_free(struct seg *seg)
1125 1115 {
1126 1116 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1127 1117 devmap_handle_t *dhp = (devmap_handle_t *)sdp->devmap_data;
1128 1118
1129 1119 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FREE,
1130 1120 "segdev_free: dhp=%p seg=%p", (void *)dhp, (void *)seg);
1131 1121 DEBUGF(3, (CE_CONT, "segdev_free: dhp %p seg %p\n",
1132 1122 (void *)dhp, (void *)seg));
1133 1123
1134 1124 /*
1135 1125 * Since the address space is "write" locked, we
1136 1126 * don't need the segment lock to protect "segdev" data.
1137 1127 */
1138 1128 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1139 1129
1140 1130 while (dhp != NULL)
1141 1131 dhp = devmap_handle_unmap(dhp);
1142 1132
1143 1133 VN_RELE(sdp->vp);
1144 1134 if (sdp->vpage != NULL)
1145 1135 kmem_free(sdp->vpage, vpgtob(seg_pages(seg)));
1146 1136
1147 1137 rw_destroy(&sdp->lock);
1148 1138 kmem_free(sdp, sizeof (*sdp));
1149 1139 }
1150 1140
1151 1141 static void
1152 1142 free_devmap_handle(devmap_handle_t *dhp)
1153 1143 {
1154 1144 register devmap_handle_t *dhpp;
1155 1145
1156 1146 /*
1157 1147 * free up devmap handle
1158 1148 */
1159 1149 while (dhp != NULL) {
1160 1150 dhpp = dhp->dh_next;
1161 1151 if (dhp->dh_flags & DEVMAP_LOCK_INITED) {
1162 1152 mutex_destroy(&dhp->dh_lock);
1163 1153 }
1164 1154
1165 1155 if (dhp->dh_softlock != NULL)
1166 1156 devmap_softlock_rele(dhp);
1167 1157
1168 1158 if (dhp->dh_ctx != NULL)
1169 1159 devmap_ctx_rele(dhp);
1170 1160
1171 1161 kmem_free(dhp, sizeof (devmap_handle_t));
1172 1162 dhp = dhpp;
1173 1163 }
1174 1164 }
1175 1165
1176 1166 /*
1177 1167 * routines to lock and unlock underlying segkp segment for
1178 1168 * KMEM_PAGEABLE type cookies.
1179 1169 * segkp only allows a single pending F_SOFTLOCK
1180 1170 * we keep track of number of locks in the cookie so we can
1181 1171 * have multiple pending faults and manage the calls to segkp.
1182 1172 * RFE: if segkp supports either pagelock or can support multiple
1183 1173 * calls to F_SOFTLOCK, then these routines can go away.
1184 1174 * If pagelock, segdev_faultpage can fault on a page by page basis
1185 1175 * and simplifies the code quite a bit.
1186 1176 * if multiple calls allowed but not partial ranges, then need for
1187 1177 * cookie->lock and locked count goes away, code can call as_fault directly
1188 1178 */
1189 1179 static faultcode_t
1190 1180 acquire_kpmem_lock(struct ddi_umem_cookie *cookie, size_t npages)
1191 1181 {
1192 1182 int err = 0;
1193 1183 ASSERT(cookie_is_kpmem(cookie));
1194 1184 /*
1195 1185 * Fault in pages in segkp with F_SOFTLOCK.
1196 1186 * We want to hold the lock until all pages have been loaded.
1197 1187 * segkp only allows single caller to hold SOFTLOCK, so cookie
1198 1188 * holds a count so we dont call into segkp multiple times
1199 1189 */
1200 1190 mutex_enter(&cookie->lock);
1201 1191
1202 1192 /*
1203 1193 * Check for overflow in locked field
1204 1194 */
1205 1195 if ((UINT32_MAX - cookie->locked) < npages) {
1206 1196 err = FC_MAKE_ERR(ENOMEM);
1207 1197 } else if (cookie->locked == 0) {
1208 1198 /* First time locking */
1209 1199 err = as_fault(kas.a_hat, &kas, cookie->cvaddr,
1210 1200 cookie->size, F_SOFTLOCK, PROT_READ|PROT_WRITE);
1211 1201 }
1212 1202 if (!err) {
1213 1203 cookie->locked += npages;
1214 1204 }
1215 1205 mutex_exit(&cookie->lock);
1216 1206 return (err);
1217 1207 }
1218 1208
1219 1209 static void
1220 1210 release_kpmem_lock(struct ddi_umem_cookie *cookie, size_t npages)
1221 1211 {
1222 1212 mutex_enter(&cookie->lock);
1223 1213 ASSERT(cookie_is_kpmem(cookie));
1224 1214 ASSERT(cookie->locked >= npages);
1225 1215 cookie->locked -= (uint_t)npages;
1226 1216 if (cookie->locked == 0) {
1227 1217 /* Last unlock */
1228 1218 if (as_fault(kas.a_hat, &kas, cookie->cvaddr,
1229 1219 cookie->size, F_SOFTUNLOCK, PROT_READ|PROT_WRITE))
1230 1220 panic("segdev releasing kpmem lock %p", (void *)cookie);
1231 1221 }
1232 1222 mutex_exit(&cookie->lock);
1233 1223 }
1234 1224
1235 1225 /*
1236 1226 * Routines to synchronize F_SOFTLOCK and F_INVAL faults for
1237 1227 * drivers with devmap_access callbacks
1238 1228 * slock->softlocked basically works like a rw lock
1239 1229 * -ve counts => F_SOFTLOCK in progress
1240 1230 * +ve counts => F_INVAL/F_PROT in progress
1241 1231 * We allow only one F_SOFTLOCK at a time
1242 1232 * but can have multiple pending F_INVAL/F_PROT calls
1243 1233 *
1244 1234 * This routine waits using cv_wait_sig so killing processes is more graceful
1245 1235 * Returns EINTR if coming out of this routine due to a signal, 0 otherwise
1246 1236 */
1247 1237 static int devmap_softlock_enter(
1248 1238 struct devmap_softlock *slock,
1249 1239 size_t npages,
1250 1240 enum fault_type type)
1251 1241 {
1252 1242 if (npages == 0)
1253 1243 return (0);
1254 1244 mutex_enter(&(slock->lock));
1255 1245 switch (type) {
1256 1246 case F_SOFTLOCK :
1257 1247 while (slock->softlocked) {
1258 1248 if (cv_wait_sig(&(slock)->cv, &(slock)->lock) == 0) {
1259 1249 /* signalled */
1260 1250 mutex_exit(&(slock->lock));
1261 1251 return (EINTR);
1262 1252 }
1263 1253 }
1264 1254 slock->softlocked -= npages; /* -ve count => locked */
1265 1255 break;
1266 1256 case F_INVAL :
1267 1257 case F_PROT :
1268 1258 while (slock->softlocked < 0)
1269 1259 if (cv_wait_sig(&(slock)->cv, &(slock)->lock) == 0) {
1270 1260 /* signalled */
1271 1261 mutex_exit(&(slock->lock));
1272 1262 return (EINTR);
1273 1263 }
1274 1264 slock->softlocked += npages; /* +ve count => f_invals */
1275 1265 break;
1276 1266 default:
1277 1267 ASSERT(0);
1278 1268 }
1279 1269 mutex_exit(&(slock->lock));
1280 1270 return (0);
1281 1271 }
1282 1272
1283 1273 static void devmap_softlock_exit(
1284 1274 struct devmap_softlock *slock,
1285 1275 size_t npages,
1286 1276 enum fault_type type)
1287 1277 {
1288 1278 if (slock == NULL)
1289 1279 return;
1290 1280 mutex_enter(&(slock->lock));
1291 1281 switch (type) {
1292 1282 case F_SOFTLOCK :
1293 1283 ASSERT(-slock->softlocked >= npages);
1294 1284 slock->softlocked += npages; /* -ve count is softlocked */
1295 1285 if (slock->softlocked == 0)
1296 1286 cv_signal(&slock->cv);
1297 1287 break;
1298 1288 case F_INVAL :
1299 1289 case F_PROT:
1300 1290 ASSERT(slock->softlocked >= npages);
1301 1291 slock->softlocked -= npages;
1302 1292 if (slock->softlocked == 0)
1303 1293 cv_signal(&slock->cv);
1304 1294 break;
1305 1295 default:
1306 1296 ASSERT(0);
1307 1297 }
1308 1298 mutex_exit(&(slock->lock));
1309 1299 }
1310 1300
1311 1301 /*
1312 1302 * Do a F_SOFTUNLOCK call over the range requested.
1313 1303 * The range must have already been F_SOFTLOCK'ed.
1314 1304 * The segment lock should be held, (but not the segment private lock?)
1315 1305 * The softunlock code below does not adjust for large page sizes
1316 1306 * assumes the caller already did any addr/len adjustments for
1317 1307 * pagesize mappings before calling.
1318 1308 */
1319 1309 /*ARGSUSED*/
1320 1310 static void
1321 1311 segdev_softunlock(
1322 1312 struct hat *hat, /* the hat */
1323 1313 struct seg *seg, /* seg_dev of interest */
1324 1314 caddr_t addr, /* base address of range */
1325 1315 size_t len, /* number of bytes */
1326 1316 enum seg_rw rw) /* type of access at fault */
1327 1317 {
1328 1318 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1329 1319 devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
1330 1320
1331 1321 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_SOFTUNLOCK,
1332 1322 "segdev_softunlock:dhp_head=%p sdp=%p addr=%p len=%lx",
1333 1323 dhp_head, sdp, addr, len);
1334 1324 DEBUGF(3, (CE_CONT, "segdev_softunlock: dhp %p lockcnt %lx "
1335 1325 "addr %p len %lx\n",
1336 1326 (void *)dhp_head, sdp->softlockcnt, (void *)addr, len));
1337 1327
1338 1328 hat_unlock(hat, addr, len);
1339 1329
1340 1330 if (dhp_head != NULL) {
1341 1331 devmap_handle_t *dhp;
1342 1332 size_t mlen;
1343 1333 size_t tlen = len;
1344 1334 ulong_t off;
1345 1335
1346 1336 dhp = devmap_find_handle(dhp_head, addr);
1347 1337 ASSERT(dhp != NULL);
1348 1338
1349 1339 off = (ulong_t)(addr - dhp->dh_uvaddr);
1350 1340 while (tlen != 0) {
1351 1341 mlen = MIN(tlen, (dhp->dh_len - off));
1352 1342
1353 1343 /*
1354 1344 * unlock segkp memory, locked during F_SOFTLOCK
1355 1345 */
1356 1346 if (dhp_is_kpmem(dhp)) {
1357 1347 release_kpmem_lock(
1358 1348 (struct ddi_umem_cookie *)dhp->dh_cookie,
1359 1349 btopr(mlen));
1360 1350 }
1361 1351
1362 1352 /*
1363 1353 * Do the softlock accounting for devmap_access
1364 1354 */
1365 1355 if (dhp->dh_callbackops.devmap_access != NULL) {
1366 1356 devmap_softlock_exit(dhp->dh_softlock,
1367 1357 btopr(mlen), F_SOFTLOCK);
1368 1358 }
1369 1359
1370 1360 tlen -= mlen;
1371 1361 dhp = dhp->dh_next;
1372 1362 off = 0;
1373 1363 }
1374 1364 }
1375 1365
1376 1366 mutex_enter(&freemem_lock);
1377 1367 ASSERT(sdp->softlockcnt >= btopr(len));
1378 1368 sdp->softlockcnt -= btopr(len);
1379 1369 mutex_exit(&freemem_lock);
1380 1370 if (sdp->softlockcnt == 0) {
1381 1371 /*
1382 1372 * All SOFTLOCKS are gone. Wakeup any waiting
1383 1373 * unmappers so they can try again to unmap.
1384 1374 * Check for waiters first without the mutex
1385 1375 * held so we don't always grab the mutex on
1386 1376 * softunlocks.
1387 1377 */
1388 1378 if (AS_ISUNMAPWAIT(seg->s_as)) {
1389 1379 mutex_enter(&seg->s_as->a_contents);
1390 1380 if (AS_ISUNMAPWAIT(seg->s_as)) {
1391 1381 AS_CLRUNMAPWAIT(seg->s_as);
1392 1382 cv_broadcast(&seg->s_as->a_cv);
1393 1383 }
1394 1384 mutex_exit(&seg->s_as->a_contents);
1395 1385 }
1396 1386 }
1397 1387
1398 1388 }
1399 1389
1400 1390 /*
1401 1391 * Handle fault for a single page.
1402 1392 * Done in a separate routine so we can handle errors more easily.
1403 1393 * This routine is called only from segdev_faultpages()
1404 1394 * when looping over the range of addresses requested. The segment lock is held.
1405 1395 */
1406 1396 static faultcode_t
1407 1397 segdev_faultpage(
1408 1398 struct hat *hat, /* the hat */
1409 1399 struct seg *seg, /* seg_dev of interest */
1410 1400 caddr_t addr, /* address in as */
1411 1401 struct vpage *vpage, /* pointer to vpage for seg, addr */
1412 1402 enum fault_type type, /* type of fault */
1413 1403 enum seg_rw rw, /* type of access at fault */
1414 1404 devmap_handle_t *dhp) /* devmap handle if any for this page */
1415 1405 {
1416 1406 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1417 1407 uint_t prot;
1418 1408 pfn_t pfnum = PFN_INVALID;
1419 1409 u_offset_t offset;
1420 1410 uint_t hat_flags;
1421 1411 dev_info_t *dip;
1422 1412
1423 1413 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGE,
1424 1414 "segdev_faultpage: dhp=%p seg=%p addr=%p", dhp, seg, addr);
1425 1415 DEBUGF(8, (CE_CONT, "segdev_faultpage: dhp %p seg %p addr %p \n",
1426 1416 (void *)dhp, (void *)seg, (void *)addr));
1427 1417
1428 1418 /*
1429 1419 * Initialize protection value for this page.
1430 1420 * If we have per page protection values check it now.
1431 1421 */
1432 1422 if (sdp->pageprot) {
1433 1423 uint_t protchk;
1434 1424
1435 1425 switch (rw) {
1436 1426 case S_READ:
1437 1427 protchk = PROT_READ;
1438 1428 break;
1439 1429 case S_WRITE:
1440 1430 protchk = PROT_WRITE;
1441 1431 break;
1442 1432 case S_EXEC:
1443 1433 protchk = PROT_EXEC;
1444 1434 break;
1445 1435 case S_OTHER:
1446 1436 default:
1447 1437 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
1448 1438 break;
1449 1439 }
1450 1440
1451 1441 prot = VPP_PROT(vpage);
1452 1442 if ((prot & protchk) == 0)
1453 1443 return (FC_PROT); /* illegal access type */
1454 1444 } else {
1455 1445 prot = sdp->prot;
1456 1446 /* caller has already done segment level protection check */
1457 1447 }
1458 1448
1459 1449 if (type == F_SOFTLOCK) {
1460 1450 mutex_enter(&freemem_lock);
1461 1451 sdp->softlockcnt++;
1462 1452 mutex_exit(&freemem_lock);
1463 1453 }
1464 1454
1465 1455 hat_flags = ((type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD);
1466 1456 offset = sdp->offset + (u_offset_t)(addr - seg->s_base);
1467 1457 /*
1468 1458 * In the devmap framework, sdp->mapfunc is set to NULL. we can get
1469 1459 * pfnum from dhp->dh_pfn (at beginning of segment) and offset from
1470 1460 * seg->s_base.
1471 1461 */
1472 1462 if (dhp == NULL) {
1473 1463 /* If segment has devmap_data, then dhp should be non-NULL */
1474 1464 ASSERT(sdp->devmap_data == NULL);
1475 1465 pfnum = (pfn_t)cdev_mmap(sdp->mapfunc, sdp->vp->v_rdev,
1476 1466 (off_t)offset, prot);
1477 1467 prot |= sdp->hat_attr;
1478 1468 } else {
1479 1469 ulong_t off;
1480 1470 struct ddi_umem_cookie *cp;
1481 1471 struct devmap_pmem_cookie *pcp;
1482 1472
1483 1473 /* ensure the dhp passed in contains addr. */
1484 1474 ASSERT(dhp == devmap_find_handle(
1485 1475 (devmap_handle_t *)sdp->devmap_data, addr));
1486 1476
1487 1477 off = addr - dhp->dh_uvaddr;
1488 1478
1489 1479 /*
1490 1480 * This routine assumes that the caller makes sure that the
1491 1481 * fields in dhp used below are unchanged due to remap during
1492 1482 * this call. Caller does HOLD_DHP_LOCK if neeed
1493 1483 */
1494 1484 cp = dhp->dh_cookie;
1495 1485 if (dhp->dh_flags & DEVMAP_MAPPING_INVALID) {
1496 1486 pfnum = PFN_INVALID;
1497 1487 } else if (cookie_is_devmem(cp)) {
1498 1488 pfnum = dhp->dh_pfn + btop(off);
1499 1489 } else if (cookie_is_pmem(cp)) {
1500 1490 pcp = (struct devmap_pmem_cookie *)dhp->dh_pcookie;
1501 1491 ASSERT((dhp->dh_roff & PAGEOFFSET) == 0 &&
1502 1492 dhp->dh_roff < ptob(pcp->dp_npages));
1503 1493 pfnum = page_pptonum(
1504 1494 pcp->dp_pparray[btop(off + dhp->dh_roff)]);
1505 1495 } else {
1506 1496 ASSERT(dhp->dh_roff < cp->size);
1507 1497 ASSERT(dhp->dh_cvaddr >= cp->cvaddr &&
1508 1498 dhp->dh_cvaddr < (cp->cvaddr + cp->size));
1509 1499 ASSERT((dhp->dh_cvaddr + off) <=
1510 1500 (cp->cvaddr + cp->size));
1511 1501 ASSERT((dhp->dh_cvaddr + off + PAGESIZE) <=
1512 1502 (cp->cvaddr + cp->size));
1513 1503
1514 1504 switch (cp->type) {
1515 1505 case UMEM_LOCKED :
1516 1506 if (cp->pparray != NULL) {
1517 1507 ASSERT((dhp->dh_roff &
1518 1508 PAGEOFFSET) == 0);
1519 1509 pfnum = page_pptonum(
1520 1510 cp->pparray[btop(off +
1521 1511 dhp->dh_roff)]);
1522 1512 } else {
1523 1513 pfnum = hat_getpfnum(
1524 1514 ((proc_t *)cp->procp)->p_as->a_hat,
1525 1515 cp->cvaddr + off);
1526 1516 }
1527 1517 break;
1528 1518 case UMEM_TRASH :
1529 1519 pfnum = page_pptonum(trashpp);
1530 1520 /*
1531 1521 * We should set hat_flags to HAT_NOFAULT also
1532 1522 * However, not all hat layers implement this
1533 1523 */
1534 1524 break;
1535 1525 case KMEM_PAGEABLE:
1536 1526 case KMEM_NON_PAGEABLE:
1537 1527 pfnum = hat_getpfnum(kas.a_hat,
1538 1528 dhp->dh_cvaddr + off);
1539 1529 break;
1540 1530 default :
1541 1531 pfnum = PFN_INVALID;
1542 1532 break;
1543 1533 }
1544 1534 }
1545 1535 prot |= dhp->dh_hat_attr;
1546 1536 }
1547 1537 if (pfnum == PFN_INVALID) {
1548 1538 return (FC_MAKE_ERR(EFAULT));
1549 1539 }
1550 1540 /* prot should already be OR'ed in with hat_attributes if needed */
1551 1541
1552 1542 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGE_CK1,
1553 1543 "segdev_faultpage: pfnum=%lx memory=%x prot=%x flags=%x",
1554 1544 pfnum, pf_is_memory(pfnum), prot, hat_flags);
1555 1545 DEBUGF(9, (CE_CONT, "segdev_faultpage: pfnum %lx memory %x "
1556 1546 "prot %x flags %x\n", pfnum, pf_is_memory(pfnum), prot, hat_flags));
1557 1547
1558 1548 if (pf_is_memory(pfnum) || (dhp != NULL)) {
1559 1549 /*
1560 1550 * It's not _really_ required here to pass sdp->hat_flags
1561 1551 * to hat_devload even though we do it.
1562 1552 * This is because hat figures it out DEVMEM mappings
1563 1553 * are non-consistent, anyway.
1564 1554 */
1565 1555 hat_devload(hat, addr, PAGESIZE, pfnum,
1566 1556 prot, hat_flags | sdp->hat_flags);
1567 1557 return (0);
1568 1558 }
1569 1559
1570 1560 /*
1571 1561 * Fall through to the case where devmap is not used and need to call
1572 1562 * up the device tree to set up the mapping
1573 1563 */
1574 1564
1575 1565 dip = VTOS(VTOCVP(sdp->vp))->s_dip;
1576 1566 ASSERT(dip);
1577 1567
1578 1568 /*
1579 1569 * When calling ddi_map_fault, we do not OR in sdp->hat_attr
1580 1570 * This is because this calls drivers which may not expect
1581 1571 * prot to have any other values than PROT_ALL
1582 1572 * The root nexus driver has a hack to peek into the segment
1583 1573 * structure and then OR in sdp->hat_attr.
1584 1574 * XX In case the bus_ops interfaces are ever revisited
1585 1575 * we need to fix this. prot should include other hat attributes
1586 1576 */
1587 1577 if (ddi_map_fault(dip, hat, seg, addr, NULL, pfnum, prot & PROT_ALL,
1588 1578 (uint_t)(type == F_SOFTLOCK)) != DDI_SUCCESS) {
1589 1579 return (FC_MAKE_ERR(EFAULT));
1590 1580 }
1591 1581 return (0);
1592 1582 }
1593 1583
1594 1584 static faultcode_t
1595 1585 segdev_fault(
1596 1586 struct hat *hat, /* the hat */
1597 1587 struct seg *seg, /* the seg_dev of interest */
1598 1588 caddr_t addr, /* the address of the fault */
1599 1589 size_t len, /* the length of the range */
1600 1590 enum fault_type type, /* type of fault */
1601 1591 enum seg_rw rw) /* type of access at fault */
1602 1592 {
1603 1593 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1604 1594 devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
1605 1595 devmap_handle_t *dhp;
1606 1596 struct devmap_softlock *slock = NULL;
1607 1597 ulong_t slpage = 0;
1608 1598 ulong_t off;
1609 1599 caddr_t maddr = addr;
1610 1600 int err;
1611 1601 int err_is_faultcode = 0;
1612 1602
1613 1603 TRACE_5(TR_FAC_DEVMAP, TR_DEVMAP_FAULT,
1614 1604 "segdev_fault: dhp_head=%p seg=%p addr=%p len=%lx type=%x",
1615 1605 (void *)dhp_head, (void *)seg, (void *)addr, len, type);
1616 1606 DEBUGF(7, (CE_CONT, "segdev_fault: dhp_head %p seg %p "
1617 1607 "addr %p len %lx type %x\n",
1618 1608 (void *)dhp_head, (void *)seg, (void *)addr, len, type));
1619 1609
1620 1610 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
1621 1611
1622 1612 /* Handle non-devmap case */
1623 1613 if (dhp_head == NULL)
1624 1614 return (segdev_faultpages(hat, seg, addr, len, type, rw, NULL));
1625 1615
1626 1616 /* Find devmap handle */
1627 1617 if ((dhp = devmap_find_handle(dhp_head, addr)) == NULL)
1628 1618 return (FC_NOMAP);
1629 1619
1630 1620 /*
1631 1621 * The seg_dev driver does not implement copy-on-write,
1632 1622 * and always loads translations with maximal allowed permissions
1633 1623 * but we got an fault trying to access the device.
1634 1624 * Servicing the fault is not going to result in any better result
1635 1625 * RFE: If we want devmap_access callbacks to be involved in F_PROT
1636 1626 * faults, then the code below is written for that
1637 1627 * Pending resolution of the following:
1638 1628 * - determine if the F_INVAL/F_SOFTLOCK syncing
1639 1629 * is needed for F_PROT also or not. The code below assumes it does
1640 1630 * - If driver sees F_PROT and calls devmap_load with same type,
1641 1631 * then segdev_faultpages will fail with FC_PROT anyway, need to
1642 1632 * change that so calls from devmap_load to segdev_faultpages for
1643 1633 * F_PROT type are retagged to F_INVAL.
1644 1634 * RFE: Today we dont have drivers that use devmap and want to handle
1645 1635 * F_PROT calls. The code in segdev_fault* is written to allow
1646 1636 * this case but is not tested. A driver that needs this capability
1647 1637 * should be able to remove the short-circuit case; resolve the
1648 1638 * above issues and "should" work.
1649 1639 */
1650 1640 if (type == F_PROT) {
1651 1641 return (FC_PROT);
1652 1642 }
1653 1643
1654 1644 /*
1655 1645 * Loop through dhp list calling devmap_access or segdev_faultpages for
1656 1646 * each devmap handle.
1657 1647 * drivers which implement devmap_access can interpose on faults and do
1658 1648 * device-appropriate special actions before calling devmap_load.
1659 1649 */
1660 1650
1661 1651 /*
1662 1652 * Unfortunately, this simple loop has turned out to expose a variety
1663 1653 * of complex problems which results in the following convoluted code.
1664 1654 *
1665 1655 * First, a desire to handle a serialization of F_SOFTLOCK calls
1666 1656 * to the driver within the framework.
1667 1657 * This results in a dh_softlock structure that is on a per device
1668 1658 * (or device instance) basis and serializes devmap_access calls.
1669 1659 * Ideally we would need to do this for underlying
1670 1660 * memory/device regions that are being faulted on
1671 1661 * but that is hard to identify and with REMAP, harder
1672 1662 * Second, a desire to serialize F_INVAL(and F_PROT) calls w.r.t.
1673 1663 * to F_SOFTLOCK calls to the driver.
1674 1664 * These serializations are to simplify the driver programmer model.
1675 1665 * To support these two features, the code first goes through the
1676 1666 * devmap handles and counts the pages (slpage) that are covered
1677 1667 * by devmap_access callbacks.
1678 1668 * This part ends with a devmap_softlock_enter call
1679 1669 * which allows only one F_SOFTLOCK active on a device instance,
1680 1670 * but multiple F_INVAL/F_PROTs can be active except when a
1681 1671 * F_SOFTLOCK is active
1682 1672 *
1683 1673 * Next, we dont short-circuit the fault code upfront to call
1684 1674 * segdev_softunlock for F_SOFTUNLOCK, because we must use
1685 1675 * the same length when we softlock and softunlock.
1686 1676 *
1687 1677 * -Hat layers may not support softunlocking lengths less than the
1688 1678 * original length when there is large page support.
1689 1679 * -kpmem locking is dependent on keeping the lengths same.
1690 1680 * -if drivers handled F_SOFTLOCK, they probably also expect to
1691 1681 * see an F_SOFTUNLOCK of the same length
1692 1682 * Hence, if extending lengths during softlock,
1693 1683 * softunlock has to make the same adjustments and goes through
1694 1684 * the same loop calling segdev_faultpages/segdev_softunlock
1695 1685 * But some of the synchronization and error handling is different
1696 1686 */
1697 1687
1698 1688 if (type != F_SOFTUNLOCK) {
1699 1689 devmap_handle_t *dhpp = dhp;
1700 1690 size_t slen = len;
1701 1691
1702 1692 /*
1703 1693 * Calculate count of pages that are :
1704 1694 * a) within the (potentially extended) fault region
1705 1695 * b) AND covered by devmap handle with devmap_access
1706 1696 */
1707 1697 off = (ulong_t)(addr - dhpp->dh_uvaddr);
1708 1698 while (slen != 0) {
1709 1699 size_t mlen;
1710 1700
1711 1701 /*
1712 1702 * Softlocking on a region that allows remap is
1713 1703 * unsupported due to unresolved locking issues
1714 1704 * XXX: unclear what these are?
1715 1705 * One potential is that if there is a pending
1716 1706 * softlock, then a remap should not be allowed
1717 1707 * until the unlock is done. This is easily
1718 1708 * fixed by returning error in devmap*remap on
1719 1709 * checking the dh->dh_softlock->softlocked value
1720 1710 */
1721 1711 if ((type == F_SOFTLOCK) &&
1722 1712 (dhpp->dh_flags & DEVMAP_ALLOW_REMAP)) {
1723 1713 return (FC_NOSUPPORT);
1724 1714 }
1725 1715
1726 1716 mlen = MIN(slen, (dhpp->dh_len - off));
1727 1717 if (dhpp->dh_callbackops.devmap_access) {
1728 1718 size_t llen;
1729 1719 caddr_t laddr;
1730 1720 /*
1731 1721 * use extended length for large page mappings
1732 1722 */
1733 1723 HOLD_DHP_LOCK(dhpp);
1734 1724 if ((sdp->pageprot == 0) &&
1735 1725 (dhpp->dh_flags & DEVMAP_FLAG_LARGE)) {
1736 1726 devmap_get_large_pgsize(dhpp,
1737 1727 mlen, maddr, &llen, &laddr);
1738 1728 } else {
1739 1729 llen = mlen;
1740 1730 }
1741 1731 RELE_DHP_LOCK(dhpp);
1742 1732
1743 1733 slpage += btopr(llen);
1744 1734 slock = dhpp->dh_softlock;
1745 1735 }
1746 1736 maddr += mlen;
1747 1737 ASSERT(slen >= mlen);
1748 1738 slen -= mlen;
1749 1739 dhpp = dhpp->dh_next;
1750 1740 off = 0;
1751 1741 }
1752 1742 /*
1753 1743 * synchonize with other faulting threads and wait till safe
1754 1744 * devmap_softlock_enter might return due to signal in cv_wait
1755 1745 *
1756 1746 * devmap_softlock_enter has to be called outside of while loop
1757 1747 * to prevent a deadlock if len spans over multiple dhps.
1758 1748 * dh_softlock is based on device instance and if multiple dhps
1759 1749 * use the same device instance, the second dhp's LOCK call
1760 1750 * will hang waiting on the first to complete.
1761 1751 * devmap_setup verifies that slocks in a dhp_chain are same.
1762 1752 * RFE: this deadlock only hold true for F_SOFTLOCK. For
1763 1753 * F_INVAL/F_PROT, since we now allow multiple in parallel,
1764 1754 * we could have done the softlock_enter inside the loop
1765 1755 * and supported multi-dhp mappings with dissimilar devices
1766 1756 */
1767 1757 if (err = devmap_softlock_enter(slock, slpage, type))
1768 1758 return (FC_MAKE_ERR(err));
1769 1759 }
1770 1760
1771 1761 /* reset 'maddr' to the start addr of the range of fault. */
1772 1762 maddr = addr;
1773 1763
1774 1764 /* calculate the offset corresponds to 'addr' in the first dhp. */
1775 1765 off = (ulong_t)(addr - dhp->dh_uvaddr);
1776 1766
1777 1767 /*
1778 1768 * The fault length may span over multiple dhps.
1779 1769 * Loop until the total length is satisfied.
1780 1770 */
1781 1771 while (len != 0) {
1782 1772 size_t llen;
1783 1773 size_t mlen;
1784 1774 caddr_t laddr;
1785 1775
1786 1776 /*
1787 1777 * mlen is the smaller of 'len' and the length
1788 1778 * from addr to the end of mapping defined by dhp.
1789 1779 */
1790 1780 mlen = MIN(len, (dhp->dh_len - off));
1791 1781
1792 1782 HOLD_DHP_LOCK(dhp);
1793 1783 /*
1794 1784 * Pass the extended length and address to devmap_access
1795 1785 * if large pagesize is used for loading address translations.
1796 1786 */
1797 1787 if ((sdp->pageprot == 0) &&
1798 1788 (dhp->dh_flags & DEVMAP_FLAG_LARGE)) {
1799 1789 devmap_get_large_pgsize(dhp, mlen, maddr,
1800 1790 &llen, &laddr);
1801 1791 ASSERT(maddr == addr || laddr == maddr);
1802 1792 } else {
1803 1793 llen = mlen;
1804 1794 laddr = maddr;
1805 1795 }
1806 1796
1807 1797 if (dhp->dh_callbackops.devmap_access != NULL) {
1808 1798 offset_t aoff;
1809 1799
1810 1800 aoff = sdp->offset + (offset_t)(laddr - seg->s_base);
1811 1801
1812 1802 /*
1813 1803 * call driver's devmap_access entry point which will
1814 1804 * call devmap_load/contextmgmt to load the translations
1815 1805 *
1816 1806 * We drop the dhp_lock before calling access so
1817 1807 * drivers can call devmap_*_remap within access
1818 1808 */
1819 1809 RELE_DHP_LOCK(dhp);
1820 1810
1821 1811 err = (*dhp->dh_callbackops.devmap_access)(
1822 1812 dhp, (void *)dhp->dh_pvtp, aoff, llen, type, rw);
1823 1813 } else {
1824 1814 /*
1825 1815 * If no devmap_access entry point, then load mappings
1826 1816 * hold dhp_lock across faultpages if REMAP
1827 1817 */
1828 1818 err = segdev_faultpages(hat, seg, laddr, llen,
1829 1819 type, rw, dhp);
1830 1820 err_is_faultcode = 1;
1831 1821 RELE_DHP_LOCK(dhp);
1832 1822 }
1833 1823
1834 1824 if (err) {
1835 1825 if ((type == F_SOFTLOCK) && (maddr > addr)) {
1836 1826 /*
1837 1827 * If not first dhp, use
1838 1828 * segdev_fault(F_SOFTUNLOCK) for prior dhps
1839 1829 * While this is recursion, it is incorrect to
1840 1830 * call just segdev_softunlock
1841 1831 * if we are using either large pages
1842 1832 * or devmap_access. It will be more right
1843 1833 * to go through the same loop as above
1844 1834 * rather than call segdev_softunlock directly
1845 1835 * It will use the right lenghths as well as
1846 1836 * call into the driver devmap_access routines.
1847 1837 */
1848 1838 size_t done = (size_t)(maddr - addr);
1849 1839 (void) segdev_fault(hat, seg, addr, done,
1850 1840 F_SOFTUNLOCK, S_OTHER);
1851 1841 /*
1852 1842 * reduce slpage by number of pages
1853 1843 * released by segdev_softunlock
1854 1844 */
1855 1845 ASSERT(slpage >= btopr(done));
1856 1846 devmap_softlock_exit(slock,
1857 1847 slpage - btopr(done), type);
1858 1848 } else {
1859 1849 devmap_softlock_exit(slock, slpage, type);
1860 1850 }
1861 1851
1862 1852
1863 1853 /*
1864 1854 * Segdev_faultpages() already returns a faultcode,
1865 1855 * hence, result from segdev_faultpages() should be
1866 1856 * returned directly.
1867 1857 */
1868 1858 if (err_is_faultcode)
1869 1859 return (err);
1870 1860 return (FC_MAKE_ERR(err));
1871 1861 }
1872 1862
1873 1863 maddr += mlen;
1874 1864 ASSERT(len >= mlen);
1875 1865 len -= mlen;
1876 1866 dhp = dhp->dh_next;
1877 1867 off = 0;
1878 1868
1879 1869 ASSERT(!dhp || len == 0 || maddr == dhp->dh_uvaddr);
1880 1870 }
1881 1871 /*
1882 1872 * release the softlock count at end of fault
1883 1873 * For F_SOFTLOCk this is done in the later F_SOFTUNLOCK
1884 1874 */
1885 1875 if ((type == F_INVAL) || (type == F_PROT))
1886 1876 devmap_softlock_exit(slock, slpage, type);
1887 1877 return (0);
1888 1878 }
1889 1879
1890 1880 /*
1891 1881 * segdev_faultpages
1892 1882 *
1893 1883 * Used to fault in seg_dev segment pages. Called by segdev_fault or devmap_load
1894 1884 * This routine assumes that the callers makes sure that the fields
1895 1885 * in dhp used below are not changed due to remap during this call.
1896 1886 * Caller does HOLD_DHP_LOCK if neeed
1897 1887 * This routine returns a faultcode_t as a return value for segdev_fault.
1898 1888 */
1899 1889 static faultcode_t
1900 1890 segdev_faultpages(
1901 1891 struct hat *hat, /* the hat */
1902 1892 struct seg *seg, /* the seg_dev of interest */
1903 1893 caddr_t addr, /* the address of the fault */
1904 1894 size_t len, /* the length of the range */
1905 1895 enum fault_type type, /* type of fault */
1906 1896 enum seg_rw rw, /* type of access at fault */
1907 1897 devmap_handle_t *dhp) /* devmap handle */
1908 1898 {
1909 1899 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
1910 1900 register caddr_t a;
1911 1901 struct vpage *vpage;
1912 1902 struct ddi_umem_cookie *kpmem_cookie = NULL;
1913 1903 int err;
1914 1904
1915 1905 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_FAULTPAGES,
1916 1906 "segdev_faultpages: dhp=%p seg=%p addr=%p len=%lx",
1917 1907 (void *)dhp, (void *)seg, (void *)addr, len);
1918 1908 DEBUGF(5, (CE_CONT, "segdev_faultpages: "
1919 1909 "dhp %p seg %p addr %p len %lx\n",
1920 1910 (void *)dhp, (void *)seg, (void *)addr, len));
1921 1911
1922 1912 /*
1923 1913 * The seg_dev driver does not implement copy-on-write,
1924 1914 * and always loads translations with maximal allowed permissions
1925 1915 * but we got an fault trying to access the device.
1926 1916 * Servicing the fault is not going to result in any better result
1927 1917 * XXX: If we want to allow devmap_access to handle F_PROT calls,
1928 1918 * This code should be removed and let the normal fault handling
1929 1919 * take care of finding the error
1930 1920 */
1931 1921 if (type == F_PROT) {
1932 1922 return (FC_PROT);
1933 1923 }
1934 1924
1935 1925 if (type == F_SOFTUNLOCK) {
1936 1926 segdev_softunlock(hat, seg, addr, len, rw);
1937 1927 return (0);
1938 1928 }
1939 1929
1940 1930 /*
1941 1931 * For kernel pageable memory, fault/lock segkp pages
1942 1932 * We hold this until the completion of this
1943 1933 * fault (INVAL/PROT) or till unlock (SOFTLOCK).
1944 1934 */
1945 1935 if ((dhp != NULL) && dhp_is_kpmem(dhp)) {
1946 1936 kpmem_cookie = (struct ddi_umem_cookie *)dhp->dh_cookie;
1947 1937 if (err = acquire_kpmem_lock(kpmem_cookie, btopr(len)))
1948 1938 return (err);
1949 1939 }
1950 1940
1951 1941 /*
1952 1942 * If we have the same protections for the entire segment,
1953 1943 * insure that the access being attempted is legitimate.
1954 1944 */
1955 1945 rw_enter(&sdp->lock, RW_READER);
1956 1946 if (sdp->pageprot == 0) {
1957 1947 uint_t protchk;
1958 1948
1959 1949 switch (rw) {
1960 1950 case S_READ:
1961 1951 protchk = PROT_READ;
1962 1952 break;
1963 1953 case S_WRITE:
1964 1954 protchk = PROT_WRITE;
1965 1955 break;
1966 1956 case S_EXEC:
1967 1957 protchk = PROT_EXEC;
1968 1958 break;
1969 1959 case S_OTHER:
1970 1960 default:
1971 1961 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
1972 1962 break;
1973 1963 }
1974 1964
1975 1965 if ((sdp->prot & protchk) == 0) {
1976 1966 rw_exit(&sdp->lock);
1977 1967 /* undo kpmem locking */
1978 1968 if (kpmem_cookie != NULL) {
1979 1969 release_kpmem_lock(kpmem_cookie, btopr(len));
1980 1970 }
1981 1971 return (FC_PROT); /* illegal access type */
1982 1972 }
1983 1973 }
1984 1974
1985 1975 /*
1986 1976 * we do a single hat_devload for the range if
1987 1977 * - devmap framework (dhp is not NULL),
1988 1978 * - pageprot == 0, i.e., no per-page protection set and
1989 1979 * - is device pages, irrespective of whether we are using large pages
1990 1980 */
1991 1981 if ((sdp->pageprot == 0) && (dhp != NULL) && dhp_is_devmem(dhp)) {
1992 1982 pfn_t pfnum;
1993 1983 uint_t hat_flags;
1994 1984
1995 1985 if (dhp->dh_flags & DEVMAP_MAPPING_INVALID) {
1996 1986 rw_exit(&sdp->lock);
1997 1987 return (FC_NOMAP);
1998 1988 }
1999 1989
2000 1990 if (type == F_SOFTLOCK) {
2001 1991 mutex_enter(&freemem_lock);
2002 1992 sdp->softlockcnt += btopr(len);
2003 1993 mutex_exit(&freemem_lock);
2004 1994 }
2005 1995
2006 1996 hat_flags = ((type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD);
2007 1997 pfnum = dhp->dh_pfn + btop((uintptr_t)(addr - dhp->dh_uvaddr));
2008 1998 ASSERT(!pf_is_memory(pfnum));
2009 1999
2010 2000 hat_devload(hat, addr, len, pfnum, sdp->prot | dhp->dh_hat_attr,
2011 2001 hat_flags | sdp->hat_flags);
2012 2002 rw_exit(&sdp->lock);
2013 2003 return (0);
2014 2004 }
2015 2005
2016 2006 /* Handle cases where we have to loop through fault handling per-page */
2017 2007
2018 2008 if (sdp->vpage == NULL)
2019 2009 vpage = NULL;
2020 2010 else
2021 2011 vpage = &sdp->vpage[seg_page(seg, addr)];
2022 2012
2023 2013 /* loop over the address range handling each fault */
2024 2014 for (a = addr; a < addr + len; a += PAGESIZE) {
2025 2015 if (err = segdev_faultpage(hat, seg, a, vpage, type, rw, dhp)) {
2026 2016 break;
2027 2017 }
2028 2018 if (vpage != NULL)
2029 2019 vpage++;
2030 2020 }
2031 2021 rw_exit(&sdp->lock);
2032 2022 if (err && (type == F_SOFTLOCK)) { /* error handling for F_SOFTLOCK */
2033 2023 size_t done = (size_t)(a - addr); /* pages fault successfully */
2034 2024 if (done > 0) {
2035 2025 /* use softunlock for those pages */
2036 2026 segdev_softunlock(hat, seg, addr, done, S_OTHER);
2037 2027 }
2038 2028 if (kpmem_cookie != NULL) {
2039 2029 /* release kpmem lock for rest of pages */
2040 2030 ASSERT(len >= done);
2041 2031 release_kpmem_lock(kpmem_cookie, btopr(len - done));
2042 2032 }
2043 2033 } else if ((kpmem_cookie != NULL) && (type != F_SOFTLOCK)) {
2044 2034 /* for non-SOFTLOCK cases, release kpmem */
2045 2035 release_kpmem_lock(kpmem_cookie, btopr(len));
2046 2036 }
2047 2037 return (err);
2048 2038 }
2049 2039
2050 2040 /*
2051 2041 * Asynchronous page fault. We simply do nothing since this
2052 2042 * entry point is not supposed to load up the translation.
2053 2043 */
2054 2044 /*ARGSUSED*/
2055 2045 static faultcode_t
2056 2046 segdev_faulta(struct seg *seg, caddr_t addr)
2057 2047 {
2058 2048 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_FAULTA,
2059 2049 "segdev_faulta: seg=%p addr=%p", (void *)seg, (void *)addr);
2060 2050 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2061 2051
2062 2052 return (0);
2063 2053 }
2064 2054
2065 2055 static int
2066 2056 segdev_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
2067 2057 {
2068 2058 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2069 2059 register devmap_handle_t *dhp;
2070 2060 register struct vpage *vp, *evp;
2071 2061 devmap_handle_t *dhp_head = (devmap_handle_t *)sdp->devmap_data;
2072 2062 ulong_t off;
2073 2063 size_t mlen, sz;
2074 2064
2075 2065 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT,
2076 2066 "segdev_setprot:start seg=%p addr=%p len=%lx prot=%x",
2077 2067 (void *)seg, (void *)addr, len, prot);
2078 2068 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2079 2069
2080 2070 if ((sz = sdp->softlockcnt) > 0 && dhp_head != NULL) {
2081 2071 /*
2082 2072 * Fail the setprot if pages are SOFTLOCKed through this
2083 2073 * mapping.
2084 2074 * Softlockcnt is protected from change by the as read lock.
2085 2075 */
2086 2076 TRACE_1(TR_FAC_DEVMAP, TR_DEVMAP_SETPROT_CK1,
2087 2077 "segdev_setprot:error softlockcnt=%lx", sz);
2088 2078 DEBUGF(1, (CE_CONT, "segdev_setprot: softlockcnt %ld\n", sz));
2089 2079 return (EAGAIN);
2090 2080 }
2091 2081
2092 2082 if (dhp_head != NULL) {
2093 2083 if ((dhp = devmap_find_handle(dhp_head, addr)) == NULL)
2094 2084 return (EINVAL);
2095 2085
2096 2086 /*
2097 2087 * check if violate maxprot.
2098 2088 */
2099 2089 off = (ulong_t)(addr - dhp->dh_uvaddr);
2100 2090 mlen = len;
2101 2091 while (dhp) {
2102 2092 if ((dhp->dh_maxprot & prot) != prot)
2103 2093 return (EACCES); /* violated maxprot */
2104 2094
2105 2095 if (mlen > (dhp->dh_len - off)) {
2106 2096 mlen -= dhp->dh_len - off;
2107 2097 dhp = dhp->dh_next;
2108 2098 off = 0;
2109 2099 } else
2110 2100 break;
2111 2101 }
2112 2102 } else {
2113 2103 if ((sdp->maxprot & prot) != prot)
2114 2104 return (EACCES);
2115 2105 }
2116 2106
2117 2107 rw_enter(&sdp->lock, RW_WRITER);
2118 2108 if (addr == seg->s_base && len == seg->s_size && sdp->pageprot == 0) {
2119 2109 if (sdp->prot == prot) {
2120 2110 rw_exit(&sdp->lock);
2121 2111 return (0); /* all done */
2122 2112 }
2123 2113 sdp->prot = (uchar_t)prot;
2124 2114 } else {
2125 2115 sdp->pageprot = 1;
2126 2116 if (sdp->vpage == NULL) {
2127 2117 /*
2128 2118 * First time through setting per page permissions,
2129 2119 * initialize all the vpage structures to prot
2130 2120 */
2131 2121 sdp->vpage = kmem_zalloc(vpgtob(seg_pages(seg)),
2132 2122 KM_SLEEP);
2133 2123 evp = &sdp->vpage[seg_pages(seg)];
2134 2124 for (vp = sdp->vpage; vp < evp; vp++)
2135 2125 VPP_SETPROT(vp, sdp->prot);
2136 2126 }
2137 2127 /*
2138 2128 * Now go change the needed vpages protections.
2139 2129 */
2140 2130 evp = &sdp->vpage[seg_page(seg, addr + len)];
2141 2131 for (vp = &sdp->vpage[seg_page(seg, addr)]; vp < evp; vp++)
2142 2132 VPP_SETPROT(vp, prot);
2143 2133 }
2144 2134 rw_exit(&sdp->lock);
2145 2135
2146 2136 if (dhp_head != NULL) {
2147 2137 devmap_handle_t *tdhp;
2148 2138 /*
2149 2139 * If large page size was used in hat_devload(),
2150 2140 * the same page size must be used in hat_unload().
2151 2141 */
2152 2142 dhp = tdhp = devmap_find_handle(dhp_head, addr);
2153 2143 while (tdhp != NULL) {
2154 2144 if (tdhp->dh_flags & DEVMAP_FLAG_LARGE) {
2155 2145 break;
2156 2146 }
2157 2147 tdhp = tdhp->dh_next;
2158 2148 }
2159 2149 if (tdhp) {
2160 2150 size_t slen = len;
2161 2151 size_t mlen;
2162 2152 size_t soff;
2163 2153
2164 2154 soff = (ulong_t)(addr - dhp->dh_uvaddr);
2165 2155 while (slen != 0) {
2166 2156 mlen = MIN(slen, (dhp->dh_len - soff));
2167 2157 hat_unload(seg->s_as->a_hat, dhp->dh_uvaddr,
2168 2158 dhp->dh_len, HAT_UNLOAD);
2169 2159 dhp = dhp->dh_next;
2170 2160 ASSERT(slen >= mlen);
2171 2161 slen -= mlen;
2172 2162 soff = 0;
2173 2163 }
2174 2164 return (0);
2175 2165 }
2176 2166 }
2177 2167
2178 2168 if ((prot & ~PROT_USER) == PROT_NONE) {
2179 2169 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
2180 2170 } else {
2181 2171 /*
2182 2172 * RFE: the segment should keep track of all attributes
2183 2173 * allowing us to remove the deprecated hat_chgprot
2184 2174 * and use hat_chgattr.
2185 2175 */
2186 2176 hat_chgprot(seg->s_as->a_hat, addr, len, prot);
2187 2177 }
2188 2178
2189 2179 return (0);
2190 2180 }
2191 2181
2192 2182 static int
2193 2183 segdev_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
2194 2184 {
2195 2185 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2196 2186 struct vpage *vp, *evp;
2197 2187
2198 2188 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_CHECKPROT,
2199 2189 "segdev_checkprot:start seg=%p addr=%p len=%lx prot=%x",
2200 2190 (void *)seg, (void *)addr, len, prot);
2201 2191 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2202 2192
2203 2193 /*
2204 2194 * If segment protection can be used, simply check against them
2205 2195 */
2206 2196 rw_enter(&sdp->lock, RW_READER);
2207 2197 if (sdp->pageprot == 0) {
2208 2198 register int err;
2209 2199
2210 2200 err = ((sdp->prot & prot) != prot) ? EACCES : 0;
2211 2201 rw_exit(&sdp->lock);
2212 2202 return (err);
2213 2203 }
2214 2204
2215 2205 /*
2216 2206 * Have to check down to the vpage level
2217 2207 */
2218 2208 evp = &sdp->vpage[seg_page(seg, addr + len)];
2219 2209 for (vp = &sdp->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
2220 2210 if ((VPP_PROT(vp) & prot) != prot) {
2221 2211 rw_exit(&sdp->lock);
2222 2212 return (EACCES);
2223 2213 }
2224 2214 }
2225 2215 rw_exit(&sdp->lock);
2226 2216 return (0);
2227 2217 }
2228 2218
2229 2219 static int
2230 2220 segdev_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
2231 2221 {
2232 2222 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2233 2223 size_t pgno;
2234 2224
2235 2225 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_GETPROT,
2236 2226 "segdev_getprot:start seg=%p addr=%p len=%lx protv=%p",
2237 2227 (void *)seg, (void *)addr, len, (void *)protv);
2238 2228 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2239 2229
2240 2230 pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
2241 2231 if (pgno != 0) {
2242 2232 rw_enter(&sdp->lock, RW_READER);
2243 2233 if (sdp->pageprot == 0) {
2244 2234 do {
2245 2235 protv[--pgno] = sdp->prot;
2246 2236 } while (pgno != 0);
2247 2237 } else {
2248 2238 size_t pgoff = seg_page(seg, addr);
2249 2239
2250 2240 do {
2251 2241 pgno--;
2252 2242 protv[pgno] =
2253 2243 VPP_PROT(&sdp->vpage[pgno + pgoff]);
2254 2244 } while (pgno != 0);
2255 2245 }
2256 2246 rw_exit(&sdp->lock);
2257 2247 }
2258 2248 return (0);
2259 2249 }
2260 2250
2261 2251 static u_offset_t
2262 2252 segdev_getoffset(register struct seg *seg, caddr_t addr)
2263 2253 {
2264 2254 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2265 2255
2266 2256 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETOFFSET,
2267 2257 "segdev_getoffset:start seg=%p addr=%p", (void *)seg, (void *)addr);
2268 2258
2269 2259 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2270 2260
2271 2261 return ((u_offset_t)sdp->offset + (addr - seg->s_base));
2272 2262 }
2273 2263
2274 2264 /*ARGSUSED*/
2275 2265 static int
2276 2266 segdev_gettype(register struct seg *seg, caddr_t addr)
2277 2267 {
2278 2268 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2279 2269
2280 2270 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETTYPE,
2281 2271 "segdev_gettype:start seg=%p addr=%p", (void *)seg, (void *)addr);
2282 2272
2283 2273 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2284 2274
2285 2275 return (sdp->type);
2286 2276 }
2287 2277
2288 2278
2289 2279 /*ARGSUSED*/
2290 2280 static int
2291 2281 segdev_getvp(register struct seg *seg, caddr_t addr, struct vnode **vpp)
2292 2282 {
2293 2283 register struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
2294 2284
2295 2285 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_GETVP,
2296 2286 "segdev_getvp:start seg=%p addr=%p", (void *)seg, (void *)addr);
2297 2287
2298 2288 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2299 2289
2300 2290 /*
2301 2291 * Note that this vp is the common_vp of the device, where the
2302 2292 * pages are hung ..
2303 2293 */
2304 2294 *vpp = VTOCVP(sdp->vp);
2305 2295
2306 2296 return (0);
2307 2297 }
2308 2298
2309 2299 static void
2310 2300 segdev_badop(void)
2311 2301 {
2312 2302 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGDEV_BADOP,
2313 2303 "segdev_badop:start");
2314 2304 panic("segdev_badop");
2315 2305 /*NOTREACHED*/
2316 2306 }
2317 2307
2318 2308 /*
2319 2309 * segdev pages are not in the cache, and thus can't really be controlled.
2320 2310 * Hence, syncs are simply always successful.
2321 2311 */
2322 2312 /*ARGSUSED*/
2323 2313 static int
2324 2314 segdev_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
2325 2315 {
2326 2316 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SYNC, "segdev_sync:start");
2327 2317
2328 2318 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2329 2319
2330 2320 return (0);
2331 2321 }
2332 2322
2333 2323 /*
2334 2324 * segdev pages are always "in core".
2335 2325 */
2336 2326 /*ARGSUSED*/
2337 2327 static size_t
2338 2328 segdev_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
2339 2329 {
2340 2330 size_t v = 0;
2341 2331
2342 2332 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_INCORE, "segdev_incore:start");
2343 2333
2344 2334 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2345 2335
2346 2336 for (len = (len + PAGEOFFSET) & PAGEMASK; len; len -= PAGESIZE,
2347 2337 v += PAGESIZE)
2348 2338 *vec++ = 1;
2349 2339 return (v);
2350 2340 }
2351 2341
2352 2342 /*
2353 2343 * segdev pages are not in the cache, and thus can't really be controlled.
2354 2344 * Hence, locks are simply always successful.
2355 2345 */
2356 2346 /*ARGSUSED*/
2357 2347 static int
2358 2348 segdev_lockop(struct seg *seg, caddr_t addr,
2359 2349 size_t len, int attr, int op, ulong_t *lockmap, size_t pos)
2360 2350 {
2361 2351 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_LOCKOP, "segdev_lockop:start");
2362 2352
2363 2353 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2364 2354
2365 2355 return (0);
2366 2356 }
2367 2357
2368 2358 /*
2369 2359 * segdev pages are not in the cache, and thus can't really be controlled.
2370 2360 * Hence, advise is simply always successful.
2371 2361 */
2372 2362 /*ARGSUSED*/
2373 2363 static int
↓ open down ↓ |
1928 lines elided |
↑ open up ↑ |
2374 2364 segdev_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
2375 2365 {
2376 2366 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_ADVISE, "segdev_advise:start");
2377 2367
2378 2368 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2379 2369
2380 2370 return (0);
2381 2371 }
2382 2372
2383 2373 /*
2384 - * segdev pages are not dumped, so we just return
2385 - */
2386 -/*ARGSUSED*/
2387 -static void
2388 -segdev_dump(struct seg *seg)
2389 -{}
2390 -
2391 -/*
2392 2374 * ddi_segmap_setup: Used by drivers who wish specify mapping attributes
2393 2375 * for a segment. Called from a drivers segmap(9E)
2394 2376 * routine.
2395 2377 */
2396 2378 /*ARGSUSED*/
2397 2379 int
2398 2380 ddi_segmap_setup(dev_t dev, off_t offset, struct as *as, caddr_t *addrp,
2399 2381 off_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cred,
2400 2382 ddi_device_acc_attr_t *accattrp, uint_t rnumber)
2401 2383 {
2402 2384 struct segdev_crargs dev_a;
2403 2385 int (*mapfunc)(dev_t dev, off_t off, int prot);
2404 2386 uint_t hat_attr;
2405 2387 pfn_t pfn;
2406 2388 int error, i;
2407 2389
2408 2390 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGMAP_SETUP,
2409 2391 "ddi_segmap_setup:start");
2410 2392
2411 2393 if ((mapfunc = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap) == nodev)
2412 2394 return (ENODEV);
2413 2395
2414 2396 /*
2415 2397 * Character devices that support the d_mmap
2416 2398 * interface can only be mmap'ed shared.
2417 2399 */
2418 2400 if ((flags & MAP_TYPE) != MAP_SHARED)
2419 2401 return (EINVAL);
2420 2402
2421 2403 /*
2422 2404 * Check that this region is indeed mappable on this platform.
2423 2405 * Use the mapping function.
2424 2406 */
2425 2407 if (ddi_device_mapping_check(dev, accattrp, rnumber, &hat_attr) == -1)
2426 2408 return (ENXIO);
2427 2409
2428 2410 /*
2429 2411 * Check to ensure that the entire range is
2430 2412 * legal and we are not trying to map in
2431 2413 * more than the device will let us.
2432 2414 */
2433 2415 for (i = 0; i < len; i += PAGESIZE) {
2434 2416 if (i == 0) {
2435 2417 /*
2436 2418 * Save the pfn at offset here. This pfn will be
2437 2419 * used later to get user address.
2438 2420 */
2439 2421 if ((pfn = (pfn_t)cdev_mmap(mapfunc, dev, offset,
2440 2422 maxprot)) == PFN_INVALID)
2441 2423 return (ENXIO);
2442 2424 } else {
2443 2425 if (cdev_mmap(mapfunc, dev, offset + i, maxprot) ==
2444 2426 PFN_INVALID)
2445 2427 return (ENXIO);
2446 2428 }
2447 2429 }
2448 2430
2449 2431 as_rangelock(as);
2450 2432 /* Pick an address w/o worrying about any vac alignment constraints. */
2451 2433 error = choose_addr(as, addrp, len, ptob(pfn), ADDR_NOVACALIGN, flags);
2452 2434 if (error != 0) {
2453 2435 as_rangeunlock(as);
2454 2436 return (error);
2455 2437 }
2456 2438
2457 2439 dev_a.mapfunc = mapfunc;
2458 2440 dev_a.dev = dev;
2459 2441 dev_a.offset = (offset_t)offset;
2460 2442 dev_a.type = flags & MAP_TYPE;
2461 2443 dev_a.prot = (uchar_t)prot;
2462 2444 dev_a.maxprot = (uchar_t)maxprot;
2463 2445 dev_a.hat_attr = hat_attr;
2464 2446 dev_a.hat_flags = 0;
2465 2447 dev_a.devmap_data = NULL;
2466 2448
2467 2449 error = as_map(as, *addrp, len, segdev_create, &dev_a);
2468 2450 as_rangeunlock(as);
2469 2451 return (error);
2470 2452
2471 2453 }
2472 2454
↓ open down ↓ |
71 lines elided |
↑ open up ↑ |
2473 2455 /*ARGSUSED*/
2474 2456 static int
2475 2457 segdev_pagelock(struct seg *seg, caddr_t addr, size_t len,
2476 2458 struct page ***ppp, enum lock_type type, enum seg_rw rw)
2477 2459 {
2478 2460 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_PAGELOCK,
2479 2461 "segdev_pagelock:start");
2480 2462 return (ENOTSUP);
2481 2463 }
2482 2464
2483 -/*ARGSUSED*/
2484 -static int
2485 -segdev_setpagesize(struct seg *seg, caddr_t addr, size_t len,
2486 - uint_t szc)
2487 -{
2488 - return (ENOTSUP);
2489 -}
2490 -
2491 2465 /*
2492 2466 * devmap_device: Used by devmap framework to establish mapping
2493 2467 * called by devmap_seup(9F) during map setup time.
2494 2468 */
2495 2469 /*ARGSUSED*/
2496 2470 static int
2497 2471 devmap_device(devmap_handle_t *dhp, struct as *as, caddr_t *addr,
2498 2472 offset_t off, size_t len, uint_t flags)
2499 2473 {
2500 2474 devmap_handle_t *rdhp, *maxdhp;
2501 2475 struct segdev_crargs dev_a;
2502 2476 int err;
2503 2477 uint_t maxprot = PROT_ALL;
2504 2478 offset_t offset = 0;
2505 2479 pfn_t pfn;
2506 2480 struct devmap_pmem_cookie *pcp;
2507 2481
2508 2482 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVICE,
2509 2483 "devmap_device:start dhp=%p addr=%p off=%llx, len=%lx",
2510 2484 (void *)dhp, (void *)addr, off, len);
2511 2485
2512 2486 DEBUGF(2, (CE_CONT, "devmap_device: dhp %p addr %p off %llx len %lx\n",
2513 2487 (void *)dhp, (void *)addr, off, len));
2514 2488
2515 2489 as_rangelock(as);
2516 2490 if ((flags & MAP_FIXED) == 0) {
2517 2491 offset_t aligned_off;
2518 2492
2519 2493 rdhp = maxdhp = dhp;
2520 2494 while (rdhp != NULL) {
2521 2495 maxdhp = (maxdhp->dh_len > rdhp->dh_len) ?
2522 2496 maxdhp : rdhp;
2523 2497 rdhp = rdhp->dh_next;
2524 2498 maxprot |= dhp->dh_maxprot;
2525 2499 }
2526 2500 offset = maxdhp->dh_uoff - dhp->dh_uoff;
2527 2501
2528 2502 /*
2529 2503 * Use the dhp that has the
2530 2504 * largest len to get user address.
2531 2505 */
2532 2506 /*
2533 2507 * If MAPPING_INVALID, cannot use dh_pfn/dh_cvaddr,
2534 2508 * use 0 which is as good as any other.
2535 2509 */
2536 2510 if (maxdhp->dh_flags & DEVMAP_MAPPING_INVALID) {
2537 2511 aligned_off = (offset_t)0;
2538 2512 } else if (dhp_is_devmem(maxdhp)) {
2539 2513 aligned_off = (offset_t)ptob(maxdhp->dh_pfn) - offset;
2540 2514 } else if (dhp_is_pmem(maxdhp)) {
2541 2515 pcp = (struct devmap_pmem_cookie *)maxdhp->dh_pcookie;
2542 2516 pfn = page_pptonum(
2543 2517 pcp->dp_pparray[btop(maxdhp->dh_roff)]);
2544 2518 aligned_off = (offset_t)ptob(pfn) - offset;
2545 2519 } else {
2546 2520 aligned_off = (offset_t)(uintptr_t)maxdhp->dh_cvaddr -
2547 2521 offset;
2548 2522 }
2549 2523
2550 2524 /*
2551 2525 * Pick an address aligned to dh_cookie.
2552 2526 * for kernel memory/user memory, cookie is cvaddr.
2553 2527 * for device memory, cookie is physical address.
2554 2528 */
2555 2529 map_addr(addr, len, aligned_off, 1, flags);
2556 2530 if (*addr == NULL) {
2557 2531 as_rangeunlock(as);
2558 2532 return (ENOMEM);
2559 2533 }
2560 2534 } else {
2561 2535 /*
2562 2536 * User-specified address; blow away any previous mappings.
2563 2537 */
2564 2538 (void) as_unmap(as, *addr, len);
2565 2539 }
2566 2540
2567 2541 dev_a.mapfunc = NULL;
2568 2542 dev_a.dev = dhp->dh_dev;
2569 2543 dev_a.type = flags & MAP_TYPE;
2570 2544 dev_a.offset = off;
2571 2545 /*
2572 2546 * sdp->maxprot has the least restrict protection of all dhps.
2573 2547 */
2574 2548 dev_a.maxprot = maxprot;
2575 2549 dev_a.prot = dhp->dh_prot;
2576 2550 /*
2577 2551 * devmap uses dhp->dh_hat_attr for hat.
2578 2552 */
2579 2553 dev_a.hat_flags = 0;
2580 2554 dev_a.hat_attr = 0;
2581 2555 dev_a.devmap_data = (void *)dhp;
2582 2556
2583 2557 err = as_map(as, *addr, len, segdev_create, &dev_a);
2584 2558 as_rangeunlock(as);
2585 2559 return (err);
2586 2560 }
2587 2561
2588 2562 int
2589 2563 devmap_do_ctxmgt(devmap_cookie_t dhc, void *pvtp, offset_t off, size_t len,
2590 2564 uint_t type, uint_t rw, int (*ctxmgt)(devmap_cookie_t, void *, offset_t,
2591 2565 size_t, uint_t, uint_t))
2592 2566 {
2593 2567 register devmap_handle_t *dhp = (devmap_handle_t *)dhc;
2594 2568 struct devmap_ctx *devctx;
2595 2569 int do_timeout = 0;
2596 2570 int ret;
2597 2571
2598 2572 #ifdef lint
2599 2573 pvtp = pvtp;
2600 2574 #endif
2601 2575
2602 2576 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT,
2603 2577 "devmap_do_ctxmgt:start dhp=%p off=%llx, len=%lx",
2604 2578 (void *)dhp, off, len);
2605 2579 DEBUGF(7, (CE_CONT, "devmap_do_ctxmgt: dhp %p off %llx len %lx\n",
2606 2580 (void *)dhp, off, len));
2607 2581
2608 2582 if (ctxmgt == NULL)
2609 2583 return (FC_HWERR);
2610 2584
2611 2585 devctx = dhp->dh_ctx;
2612 2586
2613 2587 /*
2614 2588 * If we are on an MP system with more than one cpu running
2615 2589 * and if a thread on some CPU already has the context, wait
2616 2590 * for it to finish if there is a hysteresis timeout.
2617 2591 *
2618 2592 * We call cv_wait() instead of cv_wait_sig() because
2619 2593 * it does not matter much if it returned due to a signal
2620 2594 * or due to a cv_signal() or cv_broadcast(). In either event
2621 2595 * we need to complete the mapping otherwise the processes
2622 2596 * will die with a SEGV.
2623 2597 */
2624 2598 if ((dhp->dh_timeout_length > 0) && (ncpus > 1)) {
2625 2599 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK1,
2626 2600 "devmap_do_ctxmgt:doing hysteresis, devctl %p dhp %p",
2627 2601 devctx, dhp);
2628 2602 do_timeout = 1;
2629 2603 mutex_enter(&devctx->lock);
2630 2604 while (devctx->oncpu)
2631 2605 cv_wait(&devctx->cv, &devctx->lock);
2632 2606 devctx->oncpu = 1;
2633 2607 mutex_exit(&devctx->lock);
2634 2608 }
2635 2609
2636 2610 /*
2637 2611 * Call the contextmgt callback so that the driver can handle
2638 2612 * the fault.
2639 2613 */
2640 2614 ret = (*ctxmgt)(dhp, dhp->dh_pvtp, off, len, type, rw);
2641 2615
2642 2616 /*
2643 2617 * If devmap_access() returned -1, then there was a hardware
2644 2618 * error so we need to convert the return value to something
2645 2619 * that trap() will understand. Otherwise, the return value
2646 2620 * is already a fault code generated by devmap_unload()
2647 2621 * or devmap_load().
2648 2622 */
2649 2623 if (ret) {
2650 2624 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK2,
2651 2625 "devmap_do_ctxmgt: ret=%x dhp=%p devctx=%p",
2652 2626 ret, dhp, devctx);
2653 2627 DEBUGF(1, (CE_CONT, "devmap_do_ctxmgt: ret %x dhp %p\n",
2654 2628 ret, (void *)dhp));
2655 2629 if (devctx->oncpu) {
2656 2630 mutex_enter(&devctx->lock);
2657 2631 devctx->oncpu = 0;
2658 2632 cv_signal(&devctx->cv);
2659 2633 mutex_exit(&devctx->lock);
2660 2634 }
2661 2635 return (FC_HWERR);
2662 2636 }
2663 2637
2664 2638 /*
2665 2639 * Setup the timeout if we need to
2666 2640 */
2667 2641 if (do_timeout) {
2668 2642 mutex_enter(&devctx->lock);
2669 2643 if (dhp->dh_timeout_length > 0) {
2670 2644 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK3,
2671 2645 "devmap_do_ctxmgt:timeout set");
2672 2646 devctx->timeout = timeout(devmap_ctxto,
2673 2647 devctx, dhp->dh_timeout_length);
2674 2648 } else {
2675 2649 /*
2676 2650 * We don't want to wait so set oncpu to
2677 2651 * 0 and wake up anyone waiting.
2678 2652 */
2679 2653 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DO_CTXMGT_CK4,
2680 2654 "devmap_do_ctxmgt:timeout not set");
2681 2655 devctx->oncpu = 0;
2682 2656 cv_signal(&devctx->cv);
2683 2657 }
2684 2658 mutex_exit(&devctx->lock);
2685 2659 }
2686 2660
2687 2661 return (DDI_SUCCESS);
2688 2662 }
2689 2663
2690 2664 /*
2691 2665 * end of mapping
2692 2666 * poff fault_offset |
2693 2667 * base | | |
2694 2668 * | | | |
2695 2669 * V V V V
2696 2670 * +-----------+---------------+-------+---------+-------+
2697 2671 * ^ ^ ^ ^
2698 2672 * |<--- offset--->|<-len->| |
2699 2673 * |<--- dh_len(size of mapping) --->|
2700 2674 * |<-- pg -->|
2701 2675 * -->|rlen|<--
2702 2676 */
2703 2677 static ulong_t
2704 2678 devmap_roundup(devmap_handle_t *dhp, ulong_t offset, size_t len,
2705 2679 ulong_t *opfn, ulong_t *pagesize)
2706 2680 {
2707 2681 register int level;
2708 2682 ulong_t pg;
2709 2683 ulong_t poff;
2710 2684 ulong_t base;
2711 2685 caddr_t uvaddr;
2712 2686 long rlen;
2713 2687
2714 2688 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP,
2715 2689 "devmap_roundup:start dhp=%p off=%lx len=%lx",
2716 2690 (void *)dhp, offset, len);
2717 2691 DEBUGF(2, (CE_CONT, "devmap_roundup: dhp %p off %lx len %lx\n",
2718 2692 (void *)dhp, offset, len));
2719 2693
2720 2694 /*
2721 2695 * get the max. pagesize that is aligned within the range
2722 2696 * <dh_pfn, dh_pfn+offset>.
2723 2697 *
2724 2698 * The calculations below use physical address to ddetermine
2725 2699 * the page size to use. The same calculations can use the
2726 2700 * virtual address to determine the page size.
2727 2701 */
2728 2702 base = (ulong_t)ptob(dhp->dh_pfn);
2729 2703 for (level = dhp->dh_mmulevel; level >= 0; level--) {
2730 2704 pg = page_get_pagesize(level);
2731 2705 poff = ((base + offset) & ~(pg - 1));
2732 2706 uvaddr = dhp->dh_uvaddr + (poff - base);
2733 2707 if ((poff >= base) &&
2734 2708 ((poff + pg) <= (base + dhp->dh_len)) &&
2735 2709 VA_PA_ALIGNED((uintptr_t)uvaddr, poff, pg))
2736 2710 break;
2737 2711 }
2738 2712
2739 2713 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP_CK1,
2740 2714 "devmap_roundup: base=%lx poff=%lx dhp=%p",
2741 2715 base, poff, dhp);
2742 2716 DEBUGF(2, (CE_CONT, "devmap_roundup: base %lx poff %lx pfn %lx\n",
2743 2717 base, poff, dhp->dh_pfn));
2744 2718
2745 2719 ASSERT(VA_PA_ALIGNED((uintptr_t)uvaddr, poff, pg));
2746 2720 ASSERT(level >= 0);
2747 2721
2748 2722 *pagesize = pg;
2749 2723 *opfn = dhp->dh_pfn + btop(poff - base);
2750 2724
2751 2725 rlen = len + offset - (poff - base + pg);
2752 2726
2753 2727 ASSERT(rlen < (long)len);
2754 2728
2755 2729 TRACE_5(TR_FAC_DEVMAP, TR_DEVMAP_ROUNDUP_CK2,
2756 2730 "devmap_roundup:ret dhp=%p level=%x rlen=%lx psiz=%p opfn=%p",
2757 2731 (void *)dhp, level, rlen, pagesize, opfn);
2758 2732 DEBUGF(1, (CE_CONT, "devmap_roundup: dhp %p "
2759 2733 "level %x rlen %lx psize %lx opfn %lx\n",
2760 2734 (void *)dhp, level, rlen, *pagesize, *opfn));
2761 2735
2762 2736 return ((ulong_t)((rlen > 0) ? rlen : 0));
2763 2737 }
2764 2738
2765 2739 /*
2766 2740 * find the dhp that contains addr.
2767 2741 */
2768 2742 static devmap_handle_t *
2769 2743 devmap_find_handle(devmap_handle_t *dhp_head, caddr_t addr)
2770 2744 {
2771 2745 devmap_handle_t *dhp;
2772 2746
2773 2747 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_FIND_HANDLE,
2774 2748 "devmap_find_handle:start");
2775 2749
2776 2750 dhp = dhp_head;
2777 2751 while (dhp) {
2778 2752 if (addr >= dhp->dh_uvaddr &&
2779 2753 addr < (dhp->dh_uvaddr + dhp->dh_len))
2780 2754 return (dhp);
2781 2755 dhp = dhp->dh_next;
2782 2756 }
2783 2757
2784 2758 return ((devmap_handle_t *)NULL);
2785 2759 }
2786 2760
2787 2761 /*
2788 2762 * devmap_unload:
2789 2763 * Marks a segdev segment or pages if offset->offset+len
2790 2764 * is not the entire segment as intercept and unloads the
2791 2765 * pages in the range offset -> offset+len.
2792 2766 */
2793 2767 int
2794 2768 devmap_unload(devmap_cookie_t dhc, offset_t offset, size_t len)
2795 2769 {
2796 2770 register devmap_handle_t *dhp = (devmap_handle_t *)dhc;
2797 2771 caddr_t addr;
2798 2772 ulong_t size;
2799 2773 ssize_t soff;
2800 2774
2801 2775 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_UNLOAD,
2802 2776 "devmap_unload:start dhp=%p offset=%llx len=%lx",
2803 2777 (void *)dhp, offset, len);
2804 2778 DEBUGF(7, (CE_CONT, "devmap_unload: dhp %p offset %llx len %lx\n",
2805 2779 (void *)dhp, offset, len));
2806 2780
2807 2781 soff = (ssize_t)(offset - dhp->dh_uoff);
2808 2782 soff = round_down_p2(soff, PAGESIZE);
2809 2783 if (soff < 0 || soff >= dhp->dh_len)
2810 2784 return (FC_MAKE_ERR(EINVAL));
2811 2785
2812 2786 /*
2813 2787 * Address and size must be page aligned. Len is set to the
2814 2788 * number of bytes in the number of pages that are required to
2815 2789 * support len. Offset is set to the byte offset of the first byte
2816 2790 * of the page that contains offset.
2817 2791 */
2818 2792 len = round_up_p2(len, PAGESIZE);
2819 2793
2820 2794 /*
2821 2795 * If len is == 0, then calculate the size by getting
2822 2796 * the number of bytes from offset to the end of the segment.
2823 2797 */
2824 2798 if (len == 0)
2825 2799 size = dhp->dh_len - soff;
2826 2800 else {
2827 2801 size = len;
2828 2802 if ((soff + size) > dhp->dh_len)
2829 2803 return (FC_MAKE_ERR(EINVAL));
2830 2804 }
2831 2805
2832 2806 /*
2833 2807 * The address is offset bytes from the base address of
2834 2808 * the dhp.
2835 2809 */
2836 2810 addr = (caddr_t)(soff + dhp->dh_uvaddr);
2837 2811
2838 2812 /*
2839 2813 * If large page size was used in hat_devload(),
2840 2814 * the same page size must be used in hat_unload().
2841 2815 */
2842 2816 if (dhp->dh_flags & DEVMAP_FLAG_LARGE) {
2843 2817 hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr,
2844 2818 dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER);
2845 2819 } else {
2846 2820 hat_unload(dhp->dh_seg->s_as->a_hat, addr, size,
2847 2821 HAT_UNLOAD|HAT_UNLOAD_OTHER);
2848 2822 }
2849 2823
2850 2824 return (0);
2851 2825 }
2852 2826
2853 2827 /*
2854 2828 * calculates the optimal page size that will be used for hat_devload().
2855 2829 */
2856 2830 static void
2857 2831 devmap_get_large_pgsize(devmap_handle_t *dhp, size_t len, caddr_t addr,
2858 2832 size_t *llen, caddr_t *laddr)
2859 2833 {
2860 2834 ulong_t off;
2861 2835 ulong_t pfn;
2862 2836 ulong_t pgsize;
2863 2837 uint_t first = 1;
2864 2838
2865 2839 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GET_LARGE_PGSIZE,
2866 2840 "devmap_get_large_pgsize:start");
2867 2841
2868 2842 /*
2869 2843 * RFE - Code only supports large page mappings for devmem
2870 2844 * This code could be changed in future if we want to support
2871 2845 * large page mappings for kernel exported memory.
2872 2846 */
2873 2847 ASSERT(dhp_is_devmem(dhp));
2874 2848 ASSERT(!(dhp->dh_flags & DEVMAP_MAPPING_INVALID));
2875 2849
2876 2850 *llen = 0;
2877 2851 off = (ulong_t)(addr - dhp->dh_uvaddr);
2878 2852 while ((long)len > 0) {
2879 2853 /*
2880 2854 * get the optimal pfn to minimize address translations.
2881 2855 * devmap_roundup() returns residue bytes for next round
2882 2856 * calculations.
2883 2857 */
2884 2858 len = devmap_roundup(dhp, off, len, &pfn, &pgsize);
2885 2859
2886 2860 if (first) {
2887 2861 *laddr = dhp->dh_uvaddr + ptob(pfn - dhp->dh_pfn);
2888 2862 first = 0;
2889 2863 }
2890 2864
2891 2865 *llen += pgsize;
2892 2866 off = ptob(pfn - dhp->dh_pfn) + pgsize;
2893 2867 }
2894 2868 /* Large page mapping len/addr cover more range than original fault */
2895 2869 ASSERT(*llen >= len && *laddr <= addr);
2896 2870 ASSERT((*laddr + *llen) >= (addr + len));
2897 2871 }
2898 2872
2899 2873 /*
2900 2874 * Initialize the devmap_softlock structure.
2901 2875 */
2902 2876 static struct devmap_softlock *
2903 2877 devmap_softlock_init(dev_t dev, ulong_t id)
2904 2878 {
2905 2879 struct devmap_softlock *slock;
2906 2880 struct devmap_softlock *tmp;
2907 2881
2908 2882 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SOFTLOCK_INIT,
2909 2883 "devmap_softlock_init:start");
2910 2884
2911 2885 tmp = kmem_zalloc(sizeof (struct devmap_softlock), KM_SLEEP);
2912 2886 mutex_enter(&devmap_slock);
2913 2887
2914 2888 for (slock = devmap_slist; slock != NULL; slock = slock->next)
2915 2889 if ((slock->dev == dev) && (slock->id == id))
2916 2890 break;
2917 2891
2918 2892 if (slock == NULL) {
2919 2893 slock = tmp;
2920 2894 slock->dev = dev;
2921 2895 slock->id = id;
2922 2896 mutex_init(&slock->lock, NULL, MUTEX_DEFAULT, NULL);
2923 2897 cv_init(&slock->cv, NULL, CV_DEFAULT, NULL);
2924 2898 slock->next = devmap_slist;
2925 2899 devmap_slist = slock;
2926 2900 } else
2927 2901 kmem_free(tmp, sizeof (struct devmap_softlock));
2928 2902
2929 2903 mutex_enter(&slock->lock);
2930 2904 slock->refcnt++;
2931 2905 mutex_exit(&slock->lock);
2932 2906 mutex_exit(&devmap_slock);
2933 2907
2934 2908 return (slock);
2935 2909 }
2936 2910
2937 2911 /*
2938 2912 * Wake up processes that sleep on softlocked.
2939 2913 * Free dh_softlock if refcnt is 0.
2940 2914 */
2941 2915 static void
2942 2916 devmap_softlock_rele(devmap_handle_t *dhp)
2943 2917 {
2944 2918 struct devmap_softlock *slock = dhp->dh_softlock;
2945 2919 struct devmap_softlock *tmp;
2946 2920 struct devmap_softlock *parent;
2947 2921
2948 2922 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SOFTLOCK_RELE,
2949 2923 "devmap_softlock_rele:start");
2950 2924
2951 2925 mutex_enter(&devmap_slock);
2952 2926 mutex_enter(&slock->lock);
2953 2927
2954 2928 ASSERT(slock->refcnt > 0);
2955 2929
2956 2930 slock->refcnt--;
2957 2931
2958 2932 /*
2959 2933 * If no one is using the device, free up the slock data.
2960 2934 */
2961 2935 if (slock->refcnt == 0) {
2962 2936 slock->softlocked = 0;
2963 2937 cv_signal(&slock->cv);
2964 2938
2965 2939 if (devmap_slist == slock)
2966 2940 devmap_slist = slock->next;
2967 2941 else {
2968 2942 parent = devmap_slist;
2969 2943 for (tmp = devmap_slist->next; tmp != NULL;
2970 2944 tmp = tmp->next) {
2971 2945 if (tmp == slock) {
2972 2946 parent->next = tmp->next;
2973 2947 break;
2974 2948 }
2975 2949 parent = tmp;
2976 2950 }
2977 2951 }
2978 2952 mutex_exit(&slock->lock);
2979 2953 mutex_destroy(&slock->lock);
2980 2954 cv_destroy(&slock->cv);
2981 2955 kmem_free(slock, sizeof (struct devmap_softlock));
2982 2956 } else
2983 2957 mutex_exit(&slock->lock);
2984 2958
2985 2959 mutex_exit(&devmap_slock);
2986 2960 }
2987 2961
2988 2962 /*
2989 2963 * Wake up processes that sleep on dh_ctx->locked.
2990 2964 * Free dh_ctx if refcnt is 0.
2991 2965 */
2992 2966 static void
2993 2967 devmap_ctx_rele(devmap_handle_t *dhp)
2994 2968 {
2995 2969 struct devmap_ctx *devctx = dhp->dh_ctx;
2996 2970 struct devmap_ctx *tmp;
2997 2971 struct devmap_ctx *parent;
2998 2972 timeout_id_t tid;
2999 2973
3000 2974 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_CTX_RELE,
3001 2975 "devmap_ctx_rele:start");
3002 2976
3003 2977 mutex_enter(&devmapctx_lock);
3004 2978 mutex_enter(&devctx->lock);
3005 2979
3006 2980 ASSERT(devctx->refcnt > 0);
3007 2981
3008 2982 devctx->refcnt--;
3009 2983
3010 2984 /*
3011 2985 * If no one is using the device, free up the devctx data.
3012 2986 */
3013 2987 if (devctx->refcnt == 0) {
3014 2988 /*
3015 2989 * Untimeout any threads using this mapping as they are about
3016 2990 * to go away.
3017 2991 */
3018 2992 if (devctx->timeout != 0) {
3019 2993 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_CTX_RELE_CK1,
3020 2994 "devmap_ctx_rele:untimeout ctx->timeout");
3021 2995
3022 2996 tid = devctx->timeout;
3023 2997 mutex_exit(&devctx->lock);
3024 2998 (void) untimeout(tid);
3025 2999 mutex_enter(&devctx->lock);
3026 3000 }
3027 3001
3028 3002 devctx->oncpu = 0;
3029 3003 cv_signal(&devctx->cv);
3030 3004
3031 3005 if (devmapctx_list == devctx)
3032 3006 devmapctx_list = devctx->next;
3033 3007 else {
3034 3008 parent = devmapctx_list;
3035 3009 for (tmp = devmapctx_list->next; tmp != NULL;
3036 3010 tmp = tmp->next) {
3037 3011 if (tmp == devctx) {
3038 3012 parent->next = tmp->next;
3039 3013 break;
3040 3014 }
3041 3015 parent = tmp;
3042 3016 }
3043 3017 }
3044 3018 mutex_exit(&devctx->lock);
3045 3019 mutex_destroy(&devctx->lock);
3046 3020 cv_destroy(&devctx->cv);
3047 3021 kmem_free(devctx, sizeof (struct devmap_ctx));
3048 3022 } else
3049 3023 mutex_exit(&devctx->lock);
3050 3024
3051 3025 mutex_exit(&devmapctx_lock);
3052 3026 }
3053 3027
3054 3028 /*
3055 3029 * devmap_load:
3056 3030 * Marks a segdev segment or pages if offset->offset+len
3057 3031 * is not the entire segment as nointercept and faults in
3058 3032 * the pages in the range offset -> offset+len.
3059 3033 */
3060 3034 int
3061 3035 devmap_load(devmap_cookie_t dhc, offset_t offset, size_t len, uint_t type,
3062 3036 uint_t rw)
3063 3037 {
3064 3038 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3065 3039 struct as *asp = dhp->dh_seg->s_as;
3066 3040 caddr_t addr;
3067 3041 ulong_t size;
3068 3042 ssize_t soff; /* offset from the beginning of the segment */
3069 3043 int rc;
3070 3044
3071 3045 TRACE_3(TR_FAC_DEVMAP, TR_DEVMAP_LOAD,
3072 3046 "devmap_load:start dhp=%p offset=%llx len=%lx",
3073 3047 (void *)dhp, offset, len);
3074 3048
3075 3049 DEBUGF(7, (CE_CONT, "devmap_load: dhp %p offset %llx len %lx\n",
3076 3050 (void *)dhp, offset, len));
3077 3051
3078 3052 /*
3079 3053 * Hat layer only supports devload to process' context for which
3080 3054 * the as lock is held. Verify here and return error if drivers
3081 3055 * inadvertently call devmap_load on a wrong devmap handle.
3082 3056 */
3083 3057 if ((asp != &kas) && !AS_LOCK_HELD(asp, &asp->a_lock))
3084 3058 return (FC_MAKE_ERR(EINVAL));
3085 3059
3086 3060 soff = (ssize_t)(offset - dhp->dh_uoff);
3087 3061 soff = round_down_p2(soff, PAGESIZE);
3088 3062 if (soff < 0 || soff >= dhp->dh_len)
3089 3063 return (FC_MAKE_ERR(EINVAL));
3090 3064
3091 3065 /*
3092 3066 * Address and size must be page aligned. Len is set to the
3093 3067 * number of bytes in the number of pages that are required to
3094 3068 * support len. Offset is set to the byte offset of the first byte
3095 3069 * of the page that contains offset.
3096 3070 */
3097 3071 len = round_up_p2(len, PAGESIZE);
3098 3072
3099 3073 /*
3100 3074 * If len == 0, then calculate the size by getting
3101 3075 * the number of bytes from offset to the end of the segment.
3102 3076 */
3103 3077 if (len == 0)
3104 3078 size = dhp->dh_len - soff;
3105 3079 else {
3106 3080 size = len;
3107 3081 if ((soff + size) > dhp->dh_len)
3108 3082 return (FC_MAKE_ERR(EINVAL));
3109 3083 }
3110 3084
3111 3085 /*
3112 3086 * The address is offset bytes from the base address of
3113 3087 * the segment.
3114 3088 */
3115 3089 addr = (caddr_t)(soff + dhp->dh_uvaddr);
3116 3090
3117 3091 HOLD_DHP_LOCK(dhp);
3118 3092 rc = segdev_faultpages(asp->a_hat,
3119 3093 dhp->dh_seg, addr, size, type, rw, dhp);
3120 3094 RELE_DHP_LOCK(dhp);
3121 3095 return (rc);
3122 3096 }
3123 3097
3124 3098 int
3125 3099 devmap_setup(dev_t dev, offset_t off, struct as *as, caddr_t *addrp,
3126 3100 size_t len, uint_t prot, uint_t maxprot, uint_t flags, struct cred *cred)
3127 3101 {
3128 3102 register devmap_handle_t *dhp;
3129 3103 int (*devmap)(dev_t, devmap_cookie_t, offset_t, size_t,
3130 3104 size_t *, uint_t);
3131 3105 int (*mmap)(dev_t, off_t, int);
3132 3106 struct devmap_callback_ctl *callbackops;
3133 3107 devmap_handle_t *dhp_head = NULL;
3134 3108 devmap_handle_t *dhp_prev = NULL;
3135 3109 devmap_handle_t *dhp_curr;
3136 3110 caddr_t addr;
3137 3111 int map_flag;
3138 3112 int ret;
3139 3113 ulong_t total_len;
3140 3114 size_t map_len;
3141 3115 size_t resid_len = len;
3142 3116 offset_t map_off = off;
3143 3117 struct devmap_softlock *slock = NULL;
3144 3118
3145 3119 #ifdef lint
3146 3120 cred = cred;
3147 3121 #endif
3148 3122
3149 3123 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_SETUP,
3150 3124 "devmap_setup:start off=%llx len=%lx", off, len);
3151 3125 DEBUGF(3, (CE_CONT, "devmap_setup: off %llx len %lx\n",
3152 3126 off, len));
3153 3127
3154 3128 devmap = devopsp[getmajor(dev)]->devo_cb_ops->cb_devmap;
3155 3129 mmap = devopsp[getmajor(dev)]->devo_cb_ops->cb_mmap;
3156 3130
3157 3131 /*
3158 3132 * driver must provide devmap(9E) entry point in cb_ops to use the
3159 3133 * devmap framework.
3160 3134 */
3161 3135 if (devmap == NULL || devmap == nulldev || devmap == nodev)
3162 3136 return (EINVAL);
3163 3137
3164 3138 /*
3165 3139 * To protect from an inadvertent entry because the devmap entry point
3166 3140 * is not NULL, return error if D_DEVMAP bit is not set in cb_flag and
3167 3141 * mmap is NULL.
3168 3142 */
3169 3143 map_flag = devopsp[getmajor(dev)]->devo_cb_ops->cb_flag;
3170 3144 if ((map_flag & D_DEVMAP) == 0 && (mmap == NULL || mmap == nulldev))
3171 3145 return (EINVAL);
3172 3146
3173 3147 /*
3174 3148 * devmap allows mmap(2) to map multiple registers.
3175 3149 * one devmap_handle is created for each register mapped.
3176 3150 */
3177 3151 for (total_len = 0; total_len < len; total_len += map_len) {
3178 3152 dhp = kmem_zalloc(sizeof (devmap_handle_t), KM_SLEEP);
3179 3153
3180 3154 if (dhp_prev != NULL)
3181 3155 dhp_prev->dh_next = dhp;
3182 3156 else
3183 3157 dhp_head = dhp;
3184 3158 dhp_prev = dhp;
3185 3159
3186 3160 dhp->dh_prot = prot;
3187 3161 dhp->dh_orig_maxprot = dhp->dh_maxprot = maxprot;
3188 3162 dhp->dh_dev = dev;
3189 3163 dhp->dh_timeout_length = CTX_TIMEOUT_VALUE;
3190 3164 dhp->dh_uoff = map_off;
3191 3165
3192 3166 /*
3193 3167 * Get mapping specific info from
3194 3168 * the driver, such as rnumber, roff, len, callbackops,
3195 3169 * accattrp and, if the mapping is for kernel memory,
3196 3170 * ddi_umem_cookie.
3197 3171 */
3198 3172 if ((ret = cdev_devmap(dev, dhp, map_off,
3199 3173 resid_len, &map_len, get_udatamodel())) != 0) {
3200 3174 free_devmap_handle(dhp_head);
3201 3175 return (ENXIO);
3202 3176 }
3203 3177
3204 3178 if (map_len & PAGEOFFSET) {
3205 3179 free_devmap_handle(dhp_head);
3206 3180 return (EINVAL);
3207 3181 }
3208 3182
3209 3183 callbackops = &dhp->dh_callbackops;
3210 3184
3211 3185 if ((callbackops->devmap_access == NULL) ||
3212 3186 (callbackops->devmap_access == nulldev) ||
3213 3187 (callbackops->devmap_access == nodev)) {
3214 3188 /*
3215 3189 * Normally devmap does not support MAP_PRIVATE unless
3216 3190 * the drivers provide a valid devmap_access routine.
3217 3191 */
3218 3192 if ((flags & MAP_PRIVATE) != 0) {
3219 3193 free_devmap_handle(dhp_head);
3220 3194 return (EINVAL);
3221 3195 }
3222 3196 } else {
3223 3197 /*
3224 3198 * Initialize dhp_softlock and dh_ctx if the drivers
3225 3199 * provide devmap_access.
3226 3200 */
3227 3201 dhp->dh_softlock = devmap_softlock_init(dev,
3228 3202 (ulong_t)callbackops->devmap_access);
3229 3203 dhp->dh_ctx = devmap_ctxinit(dev,
3230 3204 (ulong_t)callbackops->devmap_access);
3231 3205
3232 3206 /*
3233 3207 * segdev_fault can only work when all
3234 3208 * dh_softlock in a multi-dhp mapping
3235 3209 * are same. see comments in segdev_fault
3236 3210 * This code keeps track of the first
3237 3211 * dh_softlock allocated in slock and
3238 3212 * compares all later allocations and if
3239 3213 * not similar, returns an error.
3240 3214 */
3241 3215 if (slock == NULL)
3242 3216 slock = dhp->dh_softlock;
3243 3217 if (slock != dhp->dh_softlock) {
3244 3218 free_devmap_handle(dhp_head);
3245 3219 return (ENOTSUP);
3246 3220 }
3247 3221 }
3248 3222
3249 3223 map_off += map_len;
3250 3224 resid_len -= map_len;
3251 3225 }
3252 3226
3253 3227 /*
3254 3228 * get the user virtual address and establish the mapping between
3255 3229 * uvaddr and device physical address.
3256 3230 */
3257 3231 if ((ret = devmap_device(dhp_head, as, addrp, off, len, flags))
3258 3232 != 0) {
3259 3233 /*
3260 3234 * free devmap handles if error during the mapping.
3261 3235 */
3262 3236 free_devmap_handle(dhp_head);
3263 3237
3264 3238 return (ret);
3265 3239 }
3266 3240
3267 3241 /*
3268 3242 * call the driver's devmap_map callback to do more after the mapping,
3269 3243 * such as to allocate driver private data for context management.
3270 3244 */
3271 3245 dhp = dhp_head;
3272 3246 map_off = off;
3273 3247 addr = *addrp;
3274 3248 while (dhp != NULL) {
3275 3249 callbackops = &dhp->dh_callbackops;
3276 3250 dhp->dh_uvaddr = addr;
3277 3251 dhp_curr = dhp;
3278 3252 if (callbackops->devmap_map != NULL) {
3279 3253 ret = (*callbackops->devmap_map)((devmap_cookie_t)dhp,
3280 3254 dev, flags, map_off,
3281 3255 dhp->dh_len, &dhp->dh_pvtp);
3282 3256 if (ret != 0) {
3283 3257 struct segdev_data *sdp;
3284 3258
3285 3259 /*
3286 3260 * call driver's devmap_unmap entry point
3287 3261 * to free driver resources.
3288 3262 */
3289 3263 dhp = dhp_head;
3290 3264 map_off = off;
3291 3265 while (dhp != dhp_curr) {
3292 3266 callbackops = &dhp->dh_callbackops;
3293 3267 if (callbackops->devmap_unmap != NULL) {
3294 3268 (*callbackops->devmap_unmap)(
3295 3269 dhp, dhp->dh_pvtp,
3296 3270 map_off, dhp->dh_len,
3297 3271 NULL, NULL, NULL, NULL);
3298 3272 }
3299 3273 map_off += dhp->dh_len;
3300 3274 dhp = dhp->dh_next;
3301 3275 }
3302 3276 sdp = dhp_head->dh_seg->s_data;
3303 3277 sdp->devmap_data = NULL;
3304 3278 free_devmap_handle(dhp_head);
3305 3279 return (ENXIO);
3306 3280 }
3307 3281 }
3308 3282 map_off += dhp->dh_len;
3309 3283 addr += dhp->dh_len;
3310 3284 dhp = dhp->dh_next;
3311 3285 }
3312 3286
3313 3287 return (0);
3314 3288 }
3315 3289
3316 3290 int
3317 3291 ddi_devmap_segmap(dev_t dev, off_t off, ddi_as_handle_t as, caddr_t *addrp,
3318 3292 off_t len, uint_t prot, uint_t maxprot, uint_t flags, struct cred *cred)
3319 3293 {
3320 3294 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_SEGMAP,
3321 3295 "devmap_segmap:start");
3322 3296 return (devmap_setup(dev, (offset_t)off, (struct as *)as, addrp,
3323 3297 (size_t)len, prot, maxprot, flags, cred));
3324 3298 }
3325 3299
3326 3300 /*
3327 3301 * Called from devmap_devmem_setup/remap to see if can use large pages for
3328 3302 * this device mapping.
3329 3303 * Also calculate the max. page size for this mapping.
3330 3304 * this page size will be used in fault routine for
3331 3305 * optimal page size calculations.
3332 3306 */
3333 3307 static void
3334 3308 devmap_devmem_large_page_setup(devmap_handle_t *dhp)
3335 3309 {
3336 3310 ASSERT(dhp_is_devmem(dhp));
3337 3311 dhp->dh_mmulevel = 0;
3338 3312
3339 3313 /*
3340 3314 * use large page size only if:
3341 3315 * 1. device memory.
3342 3316 * 2. mmu supports multiple page sizes,
3343 3317 * 3. Driver did not disallow it
3344 3318 * 4. dhp length is at least as big as the large pagesize
3345 3319 * 5. the uvaddr and pfn are large pagesize aligned
3346 3320 */
3347 3321 if (page_num_pagesizes() > 1 &&
3348 3322 !(dhp->dh_flags & (DEVMAP_USE_PAGESIZE | DEVMAP_MAPPING_INVALID))) {
3349 3323 ulong_t base;
3350 3324 int level;
3351 3325
3352 3326 base = (ulong_t)ptob(dhp->dh_pfn);
3353 3327 for (level = 1; level < page_num_pagesizes(); level++) {
3354 3328 size_t pgsize = page_get_pagesize(level);
3355 3329 if ((dhp->dh_len < pgsize) ||
3356 3330 (!VA_PA_PGSIZE_ALIGNED((uintptr_t)dhp->dh_uvaddr,
3357 3331 base, pgsize))) {
3358 3332 break;
3359 3333 }
3360 3334 }
3361 3335 dhp->dh_mmulevel = level - 1;
3362 3336 }
3363 3337 if (dhp->dh_mmulevel > 0) {
3364 3338 dhp->dh_flags |= DEVMAP_FLAG_LARGE;
3365 3339 } else {
3366 3340 dhp->dh_flags &= ~DEVMAP_FLAG_LARGE;
3367 3341 }
3368 3342 }
3369 3343
3370 3344 /*
3371 3345 * Called by driver devmap routine to pass device specific info to
3372 3346 * the framework. used for device memory mapping only.
3373 3347 */
3374 3348 int
3375 3349 devmap_devmem_setup(devmap_cookie_t dhc, dev_info_t *dip,
3376 3350 struct devmap_callback_ctl *callbackops, uint_t rnumber, offset_t roff,
3377 3351 size_t len, uint_t maxprot, uint_t flags, ddi_device_acc_attr_t *accattrp)
3378 3352 {
3379 3353 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3380 3354 ddi_acc_handle_t handle;
3381 3355 ddi_map_req_t mr;
3382 3356 ddi_acc_hdl_t *hp;
3383 3357 int err;
3384 3358
3385 3359 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVMEM_SETUP,
3386 3360 "devmap_devmem_setup:start dhp=%p offset=%llx rnum=%d len=%lx",
3387 3361 (void *)dhp, roff, rnumber, (uint_t)len);
3388 3362 DEBUGF(2, (CE_CONT, "devmap_devmem_setup: dhp %p offset %llx "
3389 3363 "rnum %d len %lx\n", (void *)dhp, roff, rnumber, len));
3390 3364
3391 3365 /*
3392 3366 * First to check if this function has been called for this dhp.
3393 3367 */
3394 3368 if (dhp->dh_flags & DEVMAP_SETUP_DONE)
3395 3369 return (DDI_FAILURE);
3396 3370
3397 3371 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3398 3372 return (DDI_FAILURE);
3399 3373
3400 3374 if (flags & DEVMAP_MAPPING_INVALID) {
3401 3375 /*
3402 3376 * Don't go up the tree to get pfn if the driver specifies
3403 3377 * DEVMAP_MAPPING_INVALID in flags.
3404 3378 *
3405 3379 * If DEVMAP_MAPPING_INVALID is specified, we have to grant
3406 3380 * remap permission.
3407 3381 */
3408 3382 if (!(flags & DEVMAP_ALLOW_REMAP)) {
3409 3383 return (DDI_FAILURE);
3410 3384 }
3411 3385 dhp->dh_pfn = PFN_INVALID;
3412 3386 } else {
3413 3387 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
3414 3388 if (handle == NULL)
3415 3389 return (DDI_FAILURE);
3416 3390
3417 3391 hp = impl_acc_hdl_get(handle);
3418 3392 hp->ah_vers = VERS_ACCHDL;
3419 3393 hp->ah_dip = dip;
3420 3394 hp->ah_rnumber = rnumber;
3421 3395 hp->ah_offset = roff;
3422 3396 hp->ah_len = len;
3423 3397 if (accattrp != NULL)
3424 3398 hp->ah_acc = *accattrp;
3425 3399
3426 3400 mr.map_op = DDI_MO_MAP_LOCKED;
3427 3401 mr.map_type = DDI_MT_RNUMBER;
3428 3402 mr.map_obj.rnumber = rnumber;
3429 3403 mr.map_prot = maxprot & dhp->dh_orig_maxprot;
3430 3404 mr.map_flags = DDI_MF_DEVICE_MAPPING;
3431 3405 mr.map_handlep = hp;
3432 3406 mr.map_vers = DDI_MAP_VERSION;
3433 3407
3434 3408 /*
3435 3409 * up the device tree to get pfn.
3436 3410 * The rootnex_map_regspec() routine in nexus drivers has been
3437 3411 * modified to return pfn if map_flags is DDI_MF_DEVICE_MAPPING.
3438 3412 */
3439 3413 err = ddi_map(dip, &mr, roff, len, (caddr_t *)&dhp->dh_pfn);
3440 3414 dhp->dh_hat_attr = hp->ah_hat_flags;
3441 3415 impl_acc_hdl_free(handle);
3442 3416
3443 3417 if (err)
3444 3418 return (DDI_FAILURE);
3445 3419 }
3446 3420 /* Should not be using devmem setup for memory pages */
3447 3421 ASSERT(!pf_is_memory(dhp->dh_pfn));
3448 3422
3449 3423 /* Only some of the flags bits are settable by the driver */
3450 3424 dhp->dh_flags |= (flags & DEVMAP_SETUP_FLAGS);
3451 3425 dhp->dh_len = ptob(btopr(len));
3452 3426
3453 3427 dhp->dh_cookie = DEVMAP_DEVMEM_COOKIE;
3454 3428 dhp->dh_roff = ptob(btop(roff));
3455 3429
3456 3430 /* setup the dh_mmulevel and DEVMAP_FLAG_LARGE */
3457 3431 devmap_devmem_large_page_setup(dhp);
3458 3432 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3459 3433 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3460 3434
3461 3435
3462 3436 if (callbackops != NULL) {
3463 3437 bcopy(callbackops, &dhp->dh_callbackops,
3464 3438 sizeof (struct devmap_callback_ctl));
3465 3439 }
3466 3440
3467 3441 /*
3468 3442 * Initialize dh_lock if we want to do remap.
3469 3443 */
3470 3444 if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) {
3471 3445 mutex_init(&dhp->dh_lock, NULL, MUTEX_DEFAULT, NULL);
3472 3446 dhp->dh_flags |= DEVMAP_LOCK_INITED;
3473 3447 }
3474 3448
3475 3449 dhp->dh_flags |= DEVMAP_SETUP_DONE;
3476 3450
3477 3451 return (DDI_SUCCESS);
3478 3452 }
3479 3453
3480 3454 int
3481 3455 devmap_devmem_remap(devmap_cookie_t dhc, dev_info_t *dip,
3482 3456 uint_t rnumber, offset_t roff, size_t len, uint_t maxprot,
3483 3457 uint_t flags, ddi_device_acc_attr_t *accattrp)
3484 3458 {
3485 3459 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3486 3460 ddi_acc_handle_t handle;
3487 3461 ddi_map_req_t mr;
3488 3462 ddi_acc_hdl_t *hp;
3489 3463 pfn_t pfn;
3490 3464 uint_t hat_flags;
3491 3465 int err;
3492 3466
3493 3467 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_DEVMEM_REMAP,
3494 3468 "devmap_devmem_setup:start dhp=%p offset=%llx rnum=%d len=%lx",
3495 3469 (void *)dhp, roff, rnumber, (uint_t)len);
3496 3470 DEBUGF(2, (CE_CONT, "devmap_devmem_remap: dhp %p offset %llx "
3497 3471 "rnum %d len %lx\n", (void *)dhp, roff, rnumber, len));
3498 3472
3499 3473 /*
3500 3474 * Return failure if setup has not been done or no remap permission
3501 3475 * has been granted during the setup.
3502 3476 */
3503 3477 if ((dhp->dh_flags & DEVMAP_SETUP_DONE) == 0 ||
3504 3478 (dhp->dh_flags & DEVMAP_ALLOW_REMAP) == 0)
3505 3479 return (DDI_FAILURE);
3506 3480
3507 3481 /* Only DEVMAP_MAPPING_INVALID flag supported for remap */
3508 3482 if ((flags != 0) && (flags != DEVMAP_MAPPING_INVALID))
3509 3483 return (DDI_FAILURE);
3510 3484
3511 3485 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3512 3486 return (DDI_FAILURE);
3513 3487
3514 3488 if (!(flags & DEVMAP_MAPPING_INVALID)) {
3515 3489 handle = impl_acc_hdl_alloc(KM_SLEEP, NULL);
3516 3490 if (handle == NULL)
3517 3491 return (DDI_FAILURE);
3518 3492 }
3519 3493
3520 3494 HOLD_DHP_LOCK(dhp);
3521 3495
3522 3496 /*
3523 3497 * Unload the old mapping, so next fault will setup the new mappings
3524 3498 * Do this while holding the dhp lock so other faults dont reestablish
3525 3499 * the mappings
3526 3500 */
3527 3501 hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr,
3528 3502 dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER);
3529 3503
3530 3504 if (flags & DEVMAP_MAPPING_INVALID) {
3531 3505 dhp->dh_flags |= DEVMAP_MAPPING_INVALID;
3532 3506 dhp->dh_pfn = PFN_INVALID;
3533 3507 } else {
3534 3508 /* clear any prior DEVMAP_MAPPING_INVALID flag */
3535 3509 dhp->dh_flags &= ~DEVMAP_MAPPING_INVALID;
3536 3510 hp = impl_acc_hdl_get(handle);
3537 3511 hp->ah_vers = VERS_ACCHDL;
3538 3512 hp->ah_dip = dip;
3539 3513 hp->ah_rnumber = rnumber;
3540 3514 hp->ah_offset = roff;
3541 3515 hp->ah_len = len;
3542 3516 if (accattrp != NULL)
3543 3517 hp->ah_acc = *accattrp;
3544 3518
3545 3519 mr.map_op = DDI_MO_MAP_LOCKED;
3546 3520 mr.map_type = DDI_MT_RNUMBER;
3547 3521 mr.map_obj.rnumber = rnumber;
3548 3522 mr.map_prot = maxprot & dhp->dh_orig_maxprot;
3549 3523 mr.map_flags = DDI_MF_DEVICE_MAPPING;
3550 3524 mr.map_handlep = hp;
3551 3525 mr.map_vers = DDI_MAP_VERSION;
3552 3526
3553 3527 /*
3554 3528 * up the device tree to get pfn.
3555 3529 * The rootnex_map_regspec() routine in nexus drivers has been
3556 3530 * modified to return pfn if map_flags is DDI_MF_DEVICE_MAPPING.
3557 3531 */
3558 3532 err = ddi_map(dip, &mr, roff, len, (caddr_t *)&pfn);
3559 3533 hat_flags = hp->ah_hat_flags;
3560 3534 impl_acc_hdl_free(handle);
3561 3535 if (err) {
3562 3536 RELE_DHP_LOCK(dhp);
3563 3537 return (DDI_FAILURE);
3564 3538 }
3565 3539 /*
3566 3540 * Store result of ddi_map first in local variables, as we do
3567 3541 * not want to overwrite the existing dhp with wrong data.
3568 3542 */
3569 3543 dhp->dh_pfn = pfn;
3570 3544 dhp->dh_hat_attr = hat_flags;
3571 3545 }
3572 3546
3573 3547 /* clear the large page size flag */
3574 3548 dhp->dh_flags &= ~DEVMAP_FLAG_LARGE;
3575 3549
3576 3550 dhp->dh_cookie = DEVMAP_DEVMEM_COOKIE;
3577 3551 dhp->dh_roff = ptob(btop(roff));
3578 3552
3579 3553 /* setup the dh_mmulevel and DEVMAP_FLAG_LARGE */
3580 3554 devmap_devmem_large_page_setup(dhp);
3581 3555 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3582 3556 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3583 3557
3584 3558 RELE_DHP_LOCK(dhp);
3585 3559 return (DDI_SUCCESS);
3586 3560 }
3587 3561
3588 3562 /*
3589 3563 * called by driver devmap routine to pass kernel virtual address mapping
3590 3564 * info to the framework. used only for kernel memory
3591 3565 * allocated from ddi_umem_alloc().
3592 3566 */
3593 3567 int
3594 3568 devmap_umem_setup(devmap_cookie_t dhc, dev_info_t *dip,
3595 3569 struct devmap_callback_ctl *callbackops, ddi_umem_cookie_t cookie,
3596 3570 offset_t off, size_t len, uint_t maxprot, uint_t flags,
3597 3571 ddi_device_acc_attr_t *accattrp)
3598 3572 {
3599 3573 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3600 3574 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)cookie;
3601 3575
3602 3576 #ifdef lint
3603 3577 dip = dip;
3604 3578 #endif
3605 3579
3606 3580 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_SETUP,
3607 3581 "devmap_umem_setup:start dhp=%p offset=%llx cookie=%p len=%lx",
3608 3582 (void *)dhp, off, cookie, len);
3609 3583 DEBUGF(2, (CE_CONT, "devmap_umem_setup: dhp %p offset %llx "
3610 3584 "cookie %p len %lx\n", (void *)dhp, off, (void *)cookie, len));
3611 3585
3612 3586 if (cookie == NULL)
3613 3587 return (DDI_FAILURE);
3614 3588
3615 3589 /* For UMEM_TRASH, this restriction is not needed */
3616 3590 if ((off + len) > cp->size)
3617 3591 return (DDI_FAILURE);
3618 3592
3619 3593 /* check if the cache attributes are supported */
3620 3594 if (i_ddi_check_cache_attr(flags) == B_FALSE)
3621 3595 return (DDI_FAILURE);
3622 3596
3623 3597 /*
3624 3598 * First to check if this function has been called for this dhp.
3625 3599 */
3626 3600 if (dhp->dh_flags & DEVMAP_SETUP_DONE)
3627 3601 return (DDI_FAILURE);
3628 3602
3629 3603 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3630 3604 return (DDI_FAILURE);
3631 3605
3632 3606 if (flags & DEVMAP_MAPPING_INVALID) {
3633 3607 /*
3634 3608 * If DEVMAP_MAPPING_INVALID is specified, we have to grant
3635 3609 * remap permission.
3636 3610 */
3637 3611 if (!(flags & DEVMAP_ALLOW_REMAP)) {
3638 3612 return (DDI_FAILURE);
3639 3613 }
3640 3614 } else {
3641 3615 dhp->dh_cookie = cookie;
3642 3616 dhp->dh_roff = ptob(btop(off));
3643 3617 dhp->dh_cvaddr = cp->cvaddr + dhp->dh_roff;
3644 3618 /* set HAT cache attributes */
3645 3619 i_ddi_cacheattr_to_hatacc(flags, &dhp->dh_hat_attr);
3646 3620 /* set HAT endianess attributes */
3647 3621 i_ddi_devacc_to_hatacc(accattrp, &dhp->dh_hat_attr);
3648 3622 }
3649 3623
3650 3624 /*
3651 3625 * The default is _not_ to pass HAT_LOAD_NOCONSIST to hat_devload();
3652 3626 * we pass HAT_LOAD_NOCONSIST _only_ in cases where hat tries to
3653 3627 * create consistent mappings but our intention was to create
3654 3628 * non-consistent mappings.
3655 3629 *
3656 3630 * DEVMEM: hat figures it out it's DEVMEM and creates non-consistent
3657 3631 * mappings.
3658 3632 *
3659 3633 * kernel exported memory: hat figures it out it's memory and always
3660 3634 * creates consistent mappings.
3661 3635 *
3662 3636 * /dev/mem: non-consistent mappings. See comments in common/io/mem.c
3663 3637 *
3664 3638 * /dev/kmem: consistent mappings are created unless they are
3665 3639 * MAP_FIXED. We _explicitly_ tell hat to create non-consistent
3666 3640 * mappings by passing HAT_LOAD_NOCONSIST in case of MAP_FIXED
3667 3641 * mappings of /dev/kmem. See common/io/mem.c
3668 3642 */
3669 3643
3670 3644 /* Only some of the flags bits are settable by the driver */
3671 3645 dhp->dh_flags |= (flags & DEVMAP_SETUP_FLAGS);
3672 3646
3673 3647 dhp->dh_len = ptob(btopr(len));
3674 3648 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3675 3649 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3676 3650
3677 3651 if (callbackops != NULL) {
3678 3652 bcopy(callbackops, &dhp->dh_callbackops,
3679 3653 sizeof (struct devmap_callback_ctl));
3680 3654 }
3681 3655 /*
3682 3656 * Initialize dh_lock if we want to do remap.
3683 3657 */
3684 3658 if (dhp->dh_flags & DEVMAP_ALLOW_REMAP) {
3685 3659 mutex_init(&dhp->dh_lock, NULL, MUTEX_DEFAULT, NULL);
3686 3660 dhp->dh_flags |= DEVMAP_LOCK_INITED;
3687 3661 }
3688 3662
3689 3663 dhp->dh_flags |= DEVMAP_SETUP_DONE;
3690 3664
3691 3665 return (DDI_SUCCESS);
3692 3666 }
3693 3667
3694 3668 int
3695 3669 devmap_umem_remap(devmap_cookie_t dhc, dev_info_t *dip,
3696 3670 ddi_umem_cookie_t cookie, offset_t off, size_t len, uint_t maxprot,
3697 3671 uint_t flags, ddi_device_acc_attr_t *accattrp)
3698 3672 {
3699 3673 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3700 3674 struct ddi_umem_cookie *cp = (struct ddi_umem_cookie *)cookie;
3701 3675
3702 3676 TRACE_4(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_REMAP,
3703 3677 "devmap_umem_remap:start dhp=%p offset=%llx cookie=%p len=%lx",
3704 3678 (void *)dhp, off, cookie, len);
3705 3679 DEBUGF(2, (CE_CONT, "devmap_umem_remap: dhp %p offset %llx "
3706 3680 "cookie %p len %lx\n", (void *)dhp, off, (void *)cookie, len));
3707 3681
3708 3682 #ifdef lint
3709 3683 dip = dip;
3710 3684 accattrp = accattrp;
3711 3685 #endif
3712 3686 /*
3713 3687 * Reture failure if setup has not been done or no remap permission
3714 3688 * has been granted during the setup.
3715 3689 */
3716 3690 if ((dhp->dh_flags & DEVMAP_SETUP_DONE) == 0 ||
3717 3691 (dhp->dh_flags & DEVMAP_ALLOW_REMAP) == 0)
3718 3692 return (DDI_FAILURE);
3719 3693
3720 3694 /* No flags supported for remap yet */
3721 3695 if (flags != 0)
3722 3696 return (DDI_FAILURE);
3723 3697
3724 3698 /* check if the cache attributes are supported */
3725 3699 if (i_ddi_check_cache_attr(flags) == B_FALSE)
3726 3700 return (DDI_FAILURE);
3727 3701
3728 3702 if ((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) != dhp->dh_prot)
3729 3703 return (DDI_FAILURE);
3730 3704
3731 3705 /* For UMEM_TRASH, this restriction is not needed */
3732 3706 if ((off + len) > cp->size)
3733 3707 return (DDI_FAILURE);
3734 3708
3735 3709 HOLD_DHP_LOCK(dhp);
3736 3710 /*
3737 3711 * Unload the old mapping, so next fault will setup the new mappings
3738 3712 * Do this while holding the dhp lock so other faults dont reestablish
3739 3713 * the mappings
3740 3714 */
3741 3715 hat_unload(dhp->dh_seg->s_as->a_hat, dhp->dh_uvaddr,
3742 3716 dhp->dh_len, HAT_UNLOAD|HAT_UNLOAD_OTHER);
3743 3717
3744 3718 dhp->dh_cookie = cookie;
3745 3719 dhp->dh_roff = ptob(btop(off));
3746 3720 dhp->dh_cvaddr = cp->cvaddr + dhp->dh_roff;
3747 3721 /* set HAT cache attributes */
3748 3722 i_ddi_cacheattr_to_hatacc(flags, &dhp->dh_hat_attr);
3749 3723 /* set HAT endianess attributes */
3750 3724 i_ddi_devacc_to_hatacc(accattrp, &dhp->dh_hat_attr);
3751 3725
3752 3726 /* clear the large page size flag */
3753 3727 dhp->dh_flags &= ~DEVMAP_FLAG_LARGE;
3754 3728
3755 3729 dhp->dh_maxprot = maxprot & dhp->dh_orig_maxprot;
3756 3730 ASSERT((dhp->dh_prot & dhp->dh_orig_maxprot & maxprot) == dhp->dh_prot);
3757 3731 RELE_DHP_LOCK(dhp);
3758 3732 return (DDI_SUCCESS);
3759 3733 }
3760 3734
3761 3735 /*
3762 3736 * to set timeout value for the driver's context management callback, e.g.
3763 3737 * devmap_access().
3764 3738 */
3765 3739 void
3766 3740 devmap_set_ctx_timeout(devmap_cookie_t dhc, clock_t ticks)
3767 3741 {
3768 3742 devmap_handle_t *dhp = (devmap_handle_t *)dhc;
3769 3743
3770 3744 TRACE_2(TR_FAC_DEVMAP, TR_DEVMAP_SET_CTX_TIMEOUT,
3771 3745 "devmap_set_ctx_timeout:start dhp=%p ticks=%x",
3772 3746 (void *)dhp, ticks);
3773 3747 dhp->dh_timeout_length = ticks;
3774 3748 }
3775 3749
3776 3750 int
3777 3751 devmap_default_access(devmap_cookie_t dhp, void *pvtp, offset_t off,
3778 3752 size_t len, uint_t type, uint_t rw)
3779 3753 {
3780 3754 #ifdef lint
3781 3755 pvtp = pvtp;
3782 3756 #endif
3783 3757
3784 3758 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_DEFAULT_ACCESS,
3785 3759 "devmap_default_access:start");
3786 3760 return (devmap_load(dhp, off, len, type, rw));
3787 3761 }
3788 3762
3789 3763 /*
3790 3764 * segkmem_alloc() wrapper to allocate memory which is both
3791 3765 * non-relocatable (for DR) and sharelocked, since the rest
3792 3766 * of this segment driver requires it.
3793 3767 */
3794 3768 static void *
3795 3769 devmap_alloc_pages(vmem_t *vmp, size_t size, int vmflag)
3796 3770 {
3797 3771 ASSERT(vmp != NULL);
3798 3772 ASSERT(kvseg.s_base != NULL);
3799 3773 vmflag |= (VM_NORELOC | SEGKMEM_SHARELOCKED);
3800 3774 return (segkmem_alloc(vmp, size, vmflag));
3801 3775 }
3802 3776
3803 3777 /*
3804 3778 * This is where things are a bit incestuous with seg_kmem: unlike
3805 3779 * seg_kp, seg_kmem does not keep its pages long-term sharelocked, so
3806 3780 * we need to do a bit of a dance around that to prevent duplication of
3807 3781 * code until we decide to bite the bullet and implement a new kernel
3808 3782 * segment for driver-allocated memory that is exported to user space.
3809 3783 */
3810 3784 static void
3811 3785 devmap_free_pages(vmem_t *vmp, void *inaddr, size_t size)
3812 3786 {
3813 3787 page_t *pp;
3814 3788 caddr_t addr = inaddr;
3815 3789 caddr_t eaddr;
3816 3790 pgcnt_t npages = btopr(size);
3817 3791
3818 3792 ASSERT(vmp != NULL);
3819 3793 ASSERT(kvseg.s_base != NULL);
3820 3794 ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
3821 3795
3822 3796 hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
3823 3797
3824 3798 for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
3825 3799 /*
3826 3800 * Use page_find() instead of page_lookup() to find the page
3827 3801 * since we know that it is hashed and has a shared lock.
3828 3802 */
3829 3803 pp = page_find(&kvp, (u_offset_t)(uintptr_t)addr);
3830 3804
3831 3805 if (pp == NULL)
3832 3806 panic("devmap_free_pages: page not found");
3833 3807 if (!page_tryupgrade(pp)) {
3834 3808 page_unlock(pp);
3835 3809 pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr,
3836 3810 SE_EXCL);
3837 3811 if (pp == NULL)
3838 3812 panic("devmap_free_pages: page already freed");
3839 3813 }
3840 3814 /* Clear p_lckcnt so page_destroy() doesn't update availrmem */
3841 3815 pp->p_lckcnt = 0;
3842 3816 page_destroy(pp, 0);
3843 3817 }
3844 3818 page_unresv(npages);
3845 3819
3846 3820 if (vmp != NULL)
3847 3821 vmem_free(vmp, inaddr, size);
3848 3822 }
3849 3823
3850 3824 /*
3851 3825 * devmap_umem_alloc_np() replaces kmem_zalloc() as the method for
3852 3826 * allocating non-pageable kmem in response to a ddi_umem_alloc()
3853 3827 * default request. For now we allocate our own pages and we keep
3854 3828 * them long-term sharelocked, since: A) the fault routines expect the
3855 3829 * memory to already be locked; B) pageable umem is already long-term
3856 3830 * locked; C) it's a lot of work to make it otherwise, particularly
3857 3831 * since the nexus layer expects the pages to never fault. An RFE is to
3858 3832 * not keep the pages long-term locked, but instead to be able to
3859 3833 * take faults on them and simply look them up in kvp in case we
3860 3834 * fault on them. Even then, we must take care not to let pageout
3861 3835 * steal them from us since the data must remain resident; if we
3862 3836 * do this we must come up with some way to pin the pages to prevent
3863 3837 * faults while a driver is doing DMA to/from them.
3864 3838 */
3865 3839 static void *
3866 3840 devmap_umem_alloc_np(size_t size, size_t flags)
3867 3841 {
3868 3842 void *buf;
3869 3843 int vmflags = (flags & DDI_UMEM_NOSLEEP)? VM_NOSLEEP : VM_SLEEP;
3870 3844
3871 3845 buf = vmem_alloc(umem_np_arena, size, vmflags);
3872 3846 if (buf != NULL)
3873 3847 bzero(buf, size);
3874 3848 return (buf);
3875 3849 }
3876 3850
3877 3851 static void
3878 3852 devmap_umem_free_np(void *addr, size_t size)
3879 3853 {
3880 3854 vmem_free(umem_np_arena, addr, size);
3881 3855 }
3882 3856
3883 3857 /*
3884 3858 * allocate page aligned kernel memory for exporting to user land.
3885 3859 * The devmap framework will use the cookie allocated by ddi_umem_alloc()
3886 3860 * to find a user virtual address that is in same color as the address
3887 3861 * allocated here.
3888 3862 */
3889 3863 void *
3890 3864 ddi_umem_alloc(size_t size, int flags, ddi_umem_cookie_t *cookie)
3891 3865 {
3892 3866 register size_t len = ptob(btopr(size));
3893 3867 void *buf = NULL;
3894 3868 struct ddi_umem_cookie *cp;
3895 3869 int iflags = 0;
3896 3870
3897 3871 *cookie = NULL;
3898 3872
3899 3873 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_ALLOC,
3900 3874 "devmap_umem_alloc:start");
3901 3875 if (len == 0)
3902 3876 return ((void *)NULL);
3903 3877
3904 3878 /*
3905 3879 * allocate cookie
3906 3880 */
3907 3881 if ((cp = kmem_zalloc(sizeof (struct ddi_umem_cookie),
3908 3882 flags & DDI_UMEM_NOSLEEP ? KM_NOSLEEP : KM_SLEEP)) == NULL) {
3909 3883 ASSERT(flags & DDI_UMEM_NOSLEEP);
3910 3884 return ((void *)NULL);
3911 3885 }
3912 3886
3913 3887 if (flags & DDI_UMEM_PAGEABLE) {
3914 3888 /* Only one of the flags is allowed */
3915 3889 ASSERT(!(flags & DDI_UMEM_TRASH));
3916 3890 /* initialize resource with 0 */
3917 3891 iflags = KPD_ZERO;
3918 3892
3919 3893 /*
3920 3894 * to allocate unlocked pageable memory, use segkp_get() to
3921 3895 * create a segkp segment. Since segkp can only service kas,
3922 3896 * other segment drivers such as segdev have to do
3923 3897 * as_fault(segkp, SOFTLOCK) in its fault routine,
3924 3898 */
3925 3899 if (flags & DDI_UMEM_NOSLEEP)
3926 3900 iflags |= KPD_NOWAIT;
3927 3901
3928 3902 if ((buf = segkp_get(segkp, len, iflags)) == NULL) {
3929 3903 kmem_free(cp, sizeof (struct ddi_umem_cookie));
3930 3904 return ((void *)NULL);
3931 3905 }
3932 3906 cp->type = KMEM_PAGEABLE;
3933 3907 mutex_init(&cp->lock, NULL, MUTEX_DEFAULT, NULL);
3934 3908 cp->locked = 0;
3935 3909 } else if (flags & DDI_UMEM_TRASH) {
3936 3910 /* Only one of the flags is allowed */
3937 3911 ASSERT(!(flags & DDI_UMEM_PAGEABLE));
3938 3912 cp->type = UMEM_TRASH;
3939 3913 buf = NULL;
3940 3914 } else {
3941 3915 if ((buf = devmap_umem_alloc_np(len, flags)) == NULL) {
3942 3916 kmem_free(cp, sizeof (struct ddi_umem_cookie));
3943 3917 return ((void *)NULL);
3944 3918 }
3945 3919
3946 3920 cp->type = KMEM_NON_PAGEABLE;
3947 3921 }
3948 3922
3949 3923 /*
3950 3924 * need to save size here. size will be used when
3951 3925 * we do kmem_free.
3952 3926 */
3953 3927 cp->size = len;
3954 3928 cp->cvaddr = (caddr_t)buf;
3955 3929
3956 3930 *cookie = (void *)cp;
3957 3931 return (buf);
3958 3932 }
3959 3933
3960 3934 void
3961 3935 ddi_umem_free(ddi_umem_cookie_t cookie)
3962 3936 {
3963 3937 struct ddi_umem_cookie *cp;
3964 3938
3965 3939 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_UMEM_FREE,
3966 3940 "devmap_umem_free:start");
3967 3941
3968 3942 /*
3969 3943 * if cookie is NULL, no effects on the system
3970 3944 */
3971 3945 if (cookie == NULL)
3972 3946 return;
3973 3947
3974 3948 cp = (struct ddi_umem_cookie *)cookie;
3975 3949
3976 3950 switch (cp->type) {
3977 3951 case KMEM_PAGEABLE :
3978 3952 ASSERT(cp->cvaddr != NULL && cp->size != 0);
3979 3953 /*
3980 3954 * Check if there are still any pending faults on the cookie
3981 3955 * while the driver is deleting it,
3982 3956 * XXX - could change to an ASSERT but wont catch errant drivers
3983 3957 */
3984 3958 mutex_enter(&cp->lock);
3985 3959 if (cp->locked) {
3986 3960 mutex_exit(&cp->lock);
3987 3961 panic("ddi_umem_free for cookie with pending faults %p",
3988 3962 (void *)cp);
3989 3963 return;
3990 3964 }
3991 3965
3992 3966 segkp_release(segkp, cp->cvaddr);
3993 3967
3994 3968 /*
3995 3969 * release mutex associated with this cookie.
3996 3970 */
3997 3971 mutex_destroy(&cp->lock);
3998 3972 break;
3999 3973 case KMEM_NON_PAGEABLE :
4000 3974 ASSERT(cp->cvaddr != NULL && cp->size != 0);
4001 3975 devmap_umem_free_np(cp->cvaddr, cp->size);
4002 3976 break;
4003 3977 case UMEM_TRASH :
4004 3978 break;
4005 3979 case UMEM_LOCKED :
4006 3980 /* Callers should use ddi_umem_unlock for this type */
4007 3981 ddi_umem_unlock(cookie);
4008 3982 /* Frees the cookie too */
4009 3983 return;
4010 3984 default:
4011 3985 /* panic so we can diagnose the underlying cause */
4012 3986 panic("ddi_umem_free: illegal cookie type 0x%x\n",
4013 3987 cp->type);
4014 3988 }
4015 3989
4016 3990 kmem_free(cookie, sizeof (struct ddi_umem_cookie));
4017 3991 }
4018 3992
4019 3993
4020 3994 static int
4021 3995 segdev_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
↓ open down ↓ |
1521 lines elided |
↑ open up ↑ |
4022 3996 {
4023 3997 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4024 3998
4025 3999 /*
4026 4000 * It looks as if it is always mapped shared
4027 4001 */
4028 4002 TRACE_0(TR_FAC_DEVMAP, TR_DEVMAP_GETMEMID,
4029 4003 "segdev_getmemid:start");
4030 4004 memidp->val[0] = (uintptr_t)VTOCVP(sdp->vp);
4031 4005 memidp->val[1] = sdp->offset + (uintptr_t)(addr - seg->s_base);
4032 - return (0);
4033 -}
4034 -
4035 -/*ARGSUSED*/
4036 -static lgrp_mem_policy_info_t *
4037 -segdev_getpolicy(struct seg *seg, caddr_t addr)
4038 -{
4039 - return (NULL);
4040 -}
4041 -
4042 -/*ARGSUSED*/
4043 -static int
4044 -segdev_capable(struct seg *seg, segcapability_t capability)
4045 -{
4046 4006 return (0);
4047 4007 }
4048 4008
4049 4009 /*
4050 4010 * ddi_umem_alloc() non-pageable quantum cache max size.
4051 4011 * This is just a SWAG.
4052 4012 */
4053 4013 #define DEVMAP_UMEM_QUANTUM (8*PAGESIZE)
4054 4014
4055 4015 /*
4056 4016 * Initialize seg_dev from boot. This routine sets up the trash page
4057 4017 * and creates the umem_np_arena used to back non-pageable memory
4058 4018 * requests.
4059 4019 */
4060 4020 void
4061 4021 segdev_init(void)
4062 4022 {
4063 4023 struct seg kseg;
4064 4024
4065 4025 umem_np_arena = vmem_create("umem_np", NULL, 0, PAGESIZE,
4066 4026 devmap_alloc_pages, devmap_free_pages, heap_arena,
4067 4027 DEVMAP_UMEM_QUANTUM, VM_SLEEP);
4068 4028
4069 4029 kseg.s_as = &kas;
4070 4030 trashpp = page_create_va(&trashvp, 0, PAGESIZE,
4071 4031 PG_NORELOC | PG_EXCL | PG_WAIT, &kseg, NULL);
4072 4032 if (trashpp == NULL)
4073 4033 panic("segdev_init: failed to create trash page");
4074 4034 pagezero(trashpp, 0, PAGESIZE);
4075 4035 page_downgrade(trashpp);
4076 4036 }
4077 4037
4078 4038 /*
4079 4039 * Invoke platform-dependent support routines so that /proc can have
4080 4040 * the platform code deal with curious hardware.
4081 4041 */
4082 4042 int
4083 4043 segdev_copyfrom(struct seg *seg,
4084 4044 caddr_t uaddr, const void *devaddr, void *kaddr, size_t len)
4085 4045 {
4086 4046 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4087 4047 struct snode *sp = VTOS(VTOCVP(sdp->vp));
4088 4048
4089 4049 return (e_ddi_copyfromdev(sp->s_dip,
4090 4050 (off_t)(uaddr - seg->s_base), devaddr, kaddr, len));
4091 4051 }
4092 4052
4093 4053 int
4094 4054 segdev_copyto(struct seg *seg,
4095 4055 caddr_t uaddr, const void *kaddr, void *devaddr, size_t len)
4096 4056 {
4097 4057 struct segdev_data *sdp = (struct segdev_data *)seg->s_data;
4098 4058 struct snode *sp = VTOS(VTOCVP(sdp->vp));
4099 4059
4100 4060 return (e_ddi_copytodev(sp->s_dip,
4101 4061 (off_t)(uaddr - seg->s_base), kaddr, devaddr, len));
4102 4062 }
↓ open down ↓ |
47 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX