Print this page
5255 uts shouldn't open-code ISP2
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/drm/drmP.h
+++ new/usr/src/uts/common/io/drm/drmP.h
1 1 /*
2 2 * drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
3 3 * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
4 4 */
5 5 /*
6 6 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
7 7 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 8 * Copyright (c) 2009, Intel Corporation.
9 9 * All rights reserved.
10 10 *
11 11 * Permission is hereby granted, free of charge, to any person obtaining a
12 12 * copy of this software and associated documentation files (the "Software"),
13 13 * to deal in the Software without restriction, including without limitation
14 14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 15 * and/or sell copies of the Software, and to permit persons to whom the
16 16 * Software is furnished to do so, subject to the following conditions:
17 17 *
18 18 * The above copyright notice and this permission notice (including the next
19 19 * paragraph) shall be included in all copies or substantial portions of the
20 20 * Software.
21 21 *
22 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 28 * OTHER DEALINGS IN THE SOFTWARE.
29 29 *
30 30 * Authors:
31 31 * Rickard E. (Rik) Faith <faith@valinux.com>
32 32 * Gareth Hughes <gareth@valinux.com>
33 33 *
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
34 34 */
35 35
36 36 /*
37 37 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
38 38 * Use is subject to license terms.
39 39 */
40 40
41 41 #ifndef _DRMP_H
42 42 #define _DRMP_H
43 43
44 +#include <sys/sysmacros.h>
44 45 #include <sys/types.h>
45 46 #include <sys/conf.h>
46 47 #include <sys/modctl.h>
47 48 #include <sys/stat.h>
48 49 #include <sys/file.h>
49 50 #include <sys/cmn_err.h>
50 51 #include <sys/varargs.h>
51 52 #include <sys/pci.h>
52 53 #include <sys/ddi.h>
53 54 #include <sys/sunddi.h>
54 55 #include <sys/sunldi.h>
55 56 #include <sys/pmem.h>
56 57 #include <sys/agpgart.h>
57 58 #include <sys/time.h>
58 59 #include "drm_atomic.h"
59 60 #include "drm.h"
60 61 #include "queue.h"
61 62 #include "drm_linux_list.h"
62 63
63 64 #ifndef __inline__
64 65 #define __inline__ inline
65 66 #endif
66 67
67 68 #if !defined(__FUNCTION__)
68 69 #if defined(C99)
69 70 #define __FUNCTION__ __func__
70 71 #else
71 72 #define __FUNCTION__ " "
72 73 #endif
73 74 #endif
74 75
75 76 /* DRM space units */
76 77 #define DRM_PAGE_SHIFT PAGESHIFT
77 78 #define DRM_PAGE_SIZE (1 << DRM_PAGE_SHIFT)
78 79 #define DRM_PAGE_OFFSET (DRM_PAGE_SIZE - 1)
79 80 #define DRM_PAGE_MASK ~(DRM_PAGE_SIZE - 1)
80 81 #define DRM_MB2PAGES(x) ((x) << 8)
81 82 #define DRM_PAGES2BYTES(x) ((x) << DRM_PAGE_SHIFT)
82 83 #define DRM_BYTES2PAGES(x) ((x) >> DRM_PAGE_SHIFT)
83 84 #define DRM_PAGES2KB(x) ((x) << 2)
84 85 #define DRM_ALIGNED(offset) (((offset) & DRM_PAGE_OFFSET) == 0)
85 86
86 87 #define PAGE_SHIFT DRM_PAGE_SHIFT
87 88 #define PAGE_SIZE DRM_PAGE_SIZE
88 89
89 90 #define DRM_MAX_INSTANCES 8
90 91 #define DRM_DEVNODE "drm"
91 92 #define DRM_UNOPENED 0
92 93 #define DRM_OPENED 1
93 94
94 95 #define DRM_HASH_SIZE 16 /* Size of key hash table */
95 96 #define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
96 97 #define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
97 98
98 99 #define DRM_MEM_DMA 0
99 100 #define DRM_MEM_SAREA 1
100 101 #define DRM_MEM_DRIVER 2
101 102 #define DRM_MEM_MAGIC 3
102 103 #define DRM_MEM_IOCTLS 4
103 104 #define DRM_MEM_MAPS 5
104 105 #define DRM_MEM_BUFS 6
105 106 #define DRM_MEM_SEGS 7
106 107 #define DRM_MEM_PAGES 8
107 108 #define DRM_MEM_FILES 9
108 109 #define DRM_MEM_QUEUES 10
109 110 #define DRM_MEM_CMDS 11
110 111 #define DRM_MEM_MAPPINGS 12
111 112 #define DRM_MEM_BUFLISTS 13
112 113 #define DRM_MEM_DRMLISTS 14
113 114 #define DRM_MEM_TOTALDRM 15
114 115 #define DRM_MEM_BOUNDDRM 16
115 116 #define DRM_MEM_CTXBITMAP 17
116 117 #define DRM_MEM_STUB 18
117 118 #define DRM_MEM_SGLISTS 19
118 119 #define DRM_MEM_AGPLISTS 20
119 120 #define DRM_MEM_CTXLIST 21
120 121 #define DRM_MEM_MM 22
121 122 #define DRM_MEM_HASHTAB 23
122 123 #define DRM_MEM_OBJECTS 24
123 124
124 125 #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
125 126 #define DRM_MAP_HASH_OFFSET 0x10000000
126 127 #define DRM_MAP_HASH_ORDER 12
127 128 #define DRM_OBJECT_HASH_ORDER 12
128 129 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
129 130 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
130 131 #define DRM_MM_INIT_MAX_PAGES 256
131 132
132 133
133 134 /* Internal types and structures */
134 135 #define DRM_ARRAY_SIZE(x) (sizeof (x) / sizeof (x[0]))
135 136 #define DRM_MIN(a, b) ((a) < (b) ? (a) : (b))
136 137 #define DRM_MAX(a, b) ((a) > (b) ? (a) : (b))
137 138
138 139 #define DRM_IF_VERSION(maj, min) (maj << 16 | min)
139 140
140 141 #define __OS_HAS_AGP 1
141 142
142 143 #define DRM_DEV_MOD (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
143 144 #define DRM_DEV_UID 0
144 145 #define DRM_DEV_GID 0
145 146
146 147 #define DRM_CURRENTPID ddi_get_pid()
147 148 #define DRM_SPINLOCK(l) mutex_enter(l)
148 149 #define DRM_SPINUNLOCK(u) mutex_exit(u)
149 150 #define DRM_SPINLOCK_ASSERT(l)
150 151 #define DRM_LOCK() mutex_enter(&dev->dev_lock)
151 152 #define DRM_UNLOCK() mutex_exit(&dev->dev_lock)
152 153 #define DRM_LOCK_OWNED() ASSERT(mutex_owned(&dev->dev_lock))
153 154 #define spin_lock_irqsave(l, flag) mutex_enter(l)
154 155 #define spin_unlock_irqrestore(u, flag) mutex_exit(u)
155 156 #define spin_lock(l) mutex_enter(l)
156 157 #define spin_unlock(u) mutex_exit(u)
157 158
158 159
159 160 #define DRM_UDELAY(sec) delay(drv_usectohz(sec *1000))
160 161 #define DRM_MEMORYBARRIER()
161 162
162 163 typedef struct drm_file drm_file_t;
163 164 typedef struct drm_device drm_device_t;
164 165 typedef struct drm_driver_info drm_driver_t;
165 166
166 167 #define DRM_DEVICE drm_device_t *dev = dev1
167 168 #define DRM_IOCTL_ARGS \
168 169 drm_device_t *dev1, intptr_t data, drm_file_t *fpriv, int mode
169 170
170 171 #define DRM_COPYFROM_WITH_RETURN(dest, src, size) \
171 172 if (ddi_copyin((src), (dest), (size), 0)) { \
172 173 DRM_ERROR("%s: copy from user failed", __func__); \
173 174 return (EFAULT); \
174 175 }
175 176
176 177 #define DRM_COPYTO_WITH_RETURN(dest, src, size) \
177 178 if (ddi_copyout((src), (dest), (size), 0)) { \
178 179 DRM_ERROR("%s: copy to user failed", __func__); \
179 180 return (EFAULT); \
180 181 }
181 182
182 183 #define DRM_COPY_FROM_USER(dest, src, size) \
183 184 ddi_copyin((src), (dest), (size), 0) /* flag for src */
184 185
185 186 #define DRM_COPY_TO_USER(dest, src, size) \
186 187 ddi_copyout((src), (dest), (size), 0) /* flags for dest */
187 188
188 189 #define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
189 190 ddi_copyin((arg2), (arg1), (arg3), 0)
190 191
191 192 #define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
192 193 ddi_copyout((arg2), arg1, arg3, 0)
193 194
194 195 #define DRM_READ8(map, offset) \
195 196 *(volatile uint8_t *)((uintptr_t)((map)->dev_addr) + (offset))
196 197 #define DRM_READ16(map, offset) \
197 198 *(volatile uint16_t *)((uintptr_t)((map)->dev_addr) + (offset))
198 199 #define DRM_READ32(map, offset) \
199 200 *(volatile uint32_t *)((uintptr_t)((map)->dev_addr) + (offset))
200 201 #define DRM_WRITE8(map, offset, val) \
201 202 *(volatile uint8_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
202 203 #define DRM_WRITE16(map, offset, val) \
203 204 *(volatile uint16_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
204 205 #define DRM_WRITE32(map, offset, val) \
205 206 *(volatile uint32_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
206 207
207 208 typedef struct drm_wait_queue {
208 209 kcondvar_t cv;
209 210 kmutex_t lock;
210 211 }wait_queue_head_t;
211 212
212 213 #define DRM_INIT_WAITQUEUE(q, pri) \
213 214 { \
214 215 mutex_init(&(q)->lock, NULL, MUTEX_DRIVER, pri); \
215 216 cv_init(&(q)->cv, NULL, CV_DRIVER, NULL); \
216 217 }
217 218
218 219 #define DRM_FINI_WAITQUEUE(q) \
219 220 { \
220 221 mutex_destroy(&(q)->lock); \
221 222 cv_destroy(&(q)->cv); \
222 223 }
223 224
224 225 #define DRM_WAKEUP(q) \
225 226 { \
226 227 mutex_enter(&(q)->lock); \
227 228 cv_broadcast(&(q)->cv); \
228 229 mutex_exit(&(q)->lock); \
229 230 }
230 231
231 232 #define jiffies ddi_get_lbolt()
232 233
233 234 #define DRM_WAIT_ON(ret, q, timeout, condition) \
234 235 mutex_enter(&(q)->lock); \
235 236 while (!(condition)) { \
236 237 ret = cv_reltimedwait_sig(&(q)->cv, &(q)->lock, timeout,\
237 238 TR_CLOCK_TICK); \
238 239 if (ret == -1) { \
239 240 ret = EBUSY; \
240 241 break; \
241 242 } else if (ret == 0) { \
242 243 ret = EINTR; \
243 244 break; \
244 245 } else { \
245 246 ret = 0; \
246 247 } \
247 248 } \
248 249 mutex_exit(&(q)->lock);
249 250
250 251 #define DRM_WAIT(ret, q, condition) \
251 252 mutex_enter(&(q)->lock); \
252 253 if (!(condition)) { \
253 254 ret = cv_timedwait_sig(&(q)->cv, &(q)->lock, jiffies + 30 * DRM_HZ); \
254 255 if (ret == -1) { \
255 256 /* gfx maybe hang */ \
256 257 if (!(condition)) \
257 258 ret = -2; \
258 259 } else { \
259 260 ret = 0; \
260 261 } \
261 262 } \
262 263 mutex_exit(&(q)->lock);
263 264
264 265
265 266 #define DRM_GETSAREA() \
266 267 { \
267 268 drm_local_map_t *map; \
268 269 DRM_SPINLOCK_ASSERT(&dev->dev_lock); \
269 270 TAILQ_FOREACH(map, &dev->maplist, link) { \
270 271 if (map->type == _DRM_SHM && \
271 272 map->flags & _DRM_CONTAINS_LOCK) { \
272 273 dev_priv->sarea = map; \
273 274 break; \
274 275 } \
275 276 } \
276 277 }
277 278
278 279 #define LOCK_TEST_WITH_RETURN(dev, fpriv) \
279 280 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || \
280 281 dev->lock.filp != fpriv) { \
281 282 DRM_DEBUG("%s called without lock held", __func__); \
282 283 return (EINVAL); \
283 284 }
284 285
285 286 #define DRM_IRQ_ARGS caddr_t arg
286 287 #define IRQ_HANDLED DDI_INTR_CLAIMED
287 288 #define IRQ_NONE DDI_INTR_UNCLAIMED
288 289
289 290 enum {
290 291 DRM_IS_NOT_AGP,
291 292 DRM_IS_AGP,
292 293 DRM_MIGHT_BE_AGP
293 294 };
294 295
295 296 /* Capabilities taken from src/sys/dev/pci/pcireg.h. */
296 297 #ifndef PCIY_AGP
297 298 #define PCIY_AGP 0x02
298 299 #endif
299 300
300 301 #ifndef PCIY_EXPRESS
301 302 #define PCIY_EXPRESS 0x10
302 303 #endif
303 304
304 305 #define PAGE_ALIGN(addr) (((addr) + DRM_PAGE_SIZE - 1) & DRM_PAGE_MASK)
305 306 #define DRM_SUSER(p) (crgetsgid(p) == 0 || crgetsuid(p) == 0)
306 307
307 308 #define DRM_GEM_OBJIDR_HASHNODE 1024
308 309 #define idr_list_for_each(entry, head) \
309 310 for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) \
310 311 list_for_each(entry, &(head)->next[key])
311 312
312 313 /*
313 314 * wait for 400 milliseconds
314 315 */
315 316 #define DRM_HZ drv_usectohz(400000)
316 317
317 318 typedef unsigned long dma_addr_t;
↓ open down ↓ |
264 lines elided |
↑ open up ↑ |
318 319 typedef uint64_t u64;
319 320 typedef uint32_t u32;
320 321 typedef uint16_t u16;
321 322 typedef uint8_t u8;
322 323 typedef uint_t irqreturn_t;
323 324
324 325 #define DRM_SUPPORT 1
325 326 #define DRM_UNSUPPORT 0
326 327
327 328 #define __OS_HAS_AGP 1
328 -
329 -#define __offsetof(type, field) ((size_t)(&((type *)0)->field))
330 -#define offsetof(type, field) __offsetof(type, field)
331 329
332 330 typedef struct drm_pci_id_list
333 331 {
334 332 int vendor;
335 333 int device;
336 334 long driver_private;
337 335 char *name;
338 336 } drm_pci_id_list_t;
339 337
340 338 #define DRM_AUTH 0x1
341 339 #define DRM_MASTER 0x2
342 340 #define DRM_ROOT_ONLY 0x4
343 341 typedef int drm_ioctl_t(DRM_IOCTL_ARGS);
344 342 typedef struct drm_ioctl_desc {
345 343 int (*func)(DRM_IOCTL_ARGS);
346 344 int flags;
347 345 } drm_ioctl_desc_t;
348 346
349 347 typedef struct drm_magic_entry {
350 348 drm_magic_t magic;
351 349 struct drm_file *priv;
352 350 struct drm_magic_entry *next;
353 351 } drm_magic_entry_t;
354 352
355 353 typedef struct drm_magic_head {
356 354 struct drm_magic_entry *head;
357 355 struct drm_magic_entry *tail;
358 356 } drm_magic_head_t;
359 357
360 358 typedef struct drm_buf {
361 359 int idx; /* Index into master buflist */
362 360 int total; /* Buffer size */
363 361 int order; /* log-base-2(total) */
364 362 int used; /* Amount of buffer in use (for DMA) */
365 363 unsigned long offset; /* Byte offset (used internally) */
366 364 void *address; /* Address of buffer */
367 365 unsigned long bus_address; /* Bus address of buffer */
368 366 struct drm_buf *next; /* Kernel-only: used for free list */
369 367 volatile int pending; /* On hardware DMA queue */
370 368 drm_file_t *filp;
371 369 /* Uniq. identifier of holding process */
372 370 int context; /* Kernel queue for this buffer */
373 371 enum {
374 372 DRM_LIST_NONE = 0,
375 373 DRM_LIST_FREE = 1,
376 374 DRM_LIST_WAIT = 2,
377 375 DRM_LIST_PEND = 3,
378 376 DRM_LIST_PRIO = 4,
379 377 DRM_LIST_RECLAIM = 5
380 378 } list; /* Which list we're on */
381 379
382 380 int dev_priv_size; /* Size of buffer private stoarge */
383 381 void *dev_private; /* Per-buffer private storage */
384 382 } drm_buf_t;
385 383
386 384 typedef struct drm_freelist {
387 385 int initialized; /* Freelist in use */
388 386 uint32_t count; /* Number of free buffers */
389 387 drm_buf_t *next; /* End pointer */
390 388
391 389 int low_mark; /* Low water mark */
392 390 int high_mark; /* High water mark */
393 391 } drm_freelist_t;
394 392
395 393 typedef struct drm_buf_entry {
396 394 int buf_size;
397 395 int buf_count;
398 396 drm_buf_t *buflist;
399 397 int seg_count;
400 398 int page_order;
401 399
402 400 uint32_t *seglist;
403 401 unsigned long *seglist_bus;
404 402
405 403 drm_freelist_t freelist;
406 404 } drm_buf_entry_t;
407 405
408 406 typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
409 407
410 408 /* BEGIN CSTYLED */
411 409 typedef struct drm_local_map {
412 410 unsigned long offset; /* Physical address (0 for SAREA) */
413 411 unsigned long size; /* Physical size (bytes) */
414 412 drm_map_type_t type; /* Type of memory mapped */
415 413 drm_map_flags_t flags; /* Flags */
416 414 void *handle; /* User-space: "Handle" to pass to mmap */
417 415 /* Kernel-space: kernel-virtual address */
418 416 int mtrr; /* Boolean: MTRR used */
419 417 /* Private data */
420 418 int rid; /* PCI resource ID for bus_space */
421 419 int kernel_owned; /* Boolean: 1= initmapped, 0= addmapped */
422 420 caddr_t dev_addr; /* base device address */
423 421 ddi_acc_handle_t dev_handle; /* The data access handle */
424 422 ddi_umem_cookie_t drm_umem_cookie; /* For SAREA alloc and free */
425 423 TAILQ_ENTRY(drm_local_map) link;
426 424 } drm_local_map_t;
427 425 /* END CSTYLED */
428 426
429 427 /*
430 428 * This structure defines the drm_mm memory object, which will be used by the
431 429 * DRM for its buffer objects.
432 430 */
433 431 struct drm_gem_object {
434 432 /* Reference count of this object */
435 433 atomic_t refcount;
436 434
437 435 /* Handle count of this object. Each handle also holds a reference */
438 436 atomic_t handlecount;
439 437
440 438 /* Related drm device */
441 439 struct drm_device *dev;
442 440
443 441 int flink;
444 442 /*
445 443 * Size of the object, in bytes. Immutable over the object's
446 444 * lifetime.
447 445 */
448 446 size_t size;
449 447
450 448 /*
451 449 * Global name for this object, starts at 1. 0 means unnamed.
452 450 * Access is covered by the object_name_lock in the related drm_device
453 451 */
454 452 int name;
455 453
456 454 /*
457 455 * Memory domains. These monitor which caches contain read/write data
458 456 * related to the object. When transitioning from one set of domains
459 457 * to another, the driver is called to ensure that caches are suitably
460 458 * flushed and invalidated
461 459 */
462 460 uint32_t read_domains;
463 461 uint32_t write_domain;
464 462
465 463 /*
466 464 * While validating an exec operation, the
467 465 * new read/write domain values are computed here.
468 466 * They will be transferred to the above values
469 467 * at the point that any cache flushing occurs
470 468 */
471 469 uint32_t pending_read_domains;
472 470 uint32_t pending_write_domain;
473 471
474 472 void *driver_private;
475 473
476 474 drm_local_map_t *map;
477 475 ddi_dma_handle_t dma_hdl;
478 476 ddi_acc_handle_t acc_hdl;
479 477 caddr_t kaddr;
480 478 size_t real_size; /* real size of memory */
481 479 pfn_t *pfnarray;
482 480 };
483 481
484 482 struct idr_list {
485 483 struct idr_list *next, *prev;
486 484 struct drm_gem_object *obj;
487 485 uint32_t handle;
488 486 caddr_t contain_ptr;
489 487 };
490 488
491 489 struct drm_file {
492 490 TAILQ_ENTRY(drm_file) link;
493 491 int authenticated;
494 492 int master;
495 493 int minor;
496 494 pid_t pid;
497 495 uid_t uid;
498 496 int refs;
499 497 drm_magic_t magic;
500 498 unsigned long ioctl_count;
501 499 void *driver_priv;
502 500 /* Mapping of mm object handles to object pointers. */
503 501 struct idr_list object_idr;
504 502 /* Lock for synchronization of access to object_idr. */
505 503 kmutex_t table_lock;
506 504
507 505 dev_t dev;
508 506 cred_t *credp;
509 507 };
510 508
511 509 typedef struct drm_lock_data {
512 510 drm_hw_lock_t *hw_lock; /* Hardware lock */
513 511 drm_file_t *filp;
514 512 /* Uniq. identifier of holding process */
515 513 kcondvar_t lock_cv; /* lock queue - SOLARIS Specific */
516 514 kmutex_t lock_mutex; /* lock - SOLARIS Specific */
517 515 unsigned long lock_time; /* Time of last lock in clock ticks */
518 516 } drm_lock_data_t;
519 517
520 518 /*
521 519 * This structure, in drm_device_t, is always initialized while the device
522 520 * is open. dev->dma_lock protects the incrementing of dev->buf_use, which
523 521 * when set marks that no further bufs may be allocated until device teardown
524 522 * occurs (when the last open of the device has closed). The high/low
525 523 * watermarks of bufs are only touched by the X Server, and thus not
526 524 * concurrently accessed, so no locking is needed.
527 525 */
528 526 typedef struct drm_device_dma {
529 527 drm_buf_entry_t bufs[DRM_MAX_ORDER+1];
530 528 int buf_count;
531 529 drm_buf_t **buflist; /* Vector of pointers info bufs */
532 530 int seg_count;
533 531 int page_count;
534 532 unsigned long *pagelist;
535 533 unsigned long byte_count;
536 534 enum {
537 535 _DRM_DMA_USE_AGP = 0x01,
538 536 _DRM_DMA_USE_SG = 0x02
539 537 } flags;
540 538 } drm_device_dma_t;
541 539
542 540 typedef struct drm_agp_mem {
543 541 void *handle;
544 542 unsigned long bound; /* address */
545 543 int pages;
546 544 caddr_t phys_addr;
547 545 struct drm_agp_mem *prev;
548 546 struct drm_agp_mem *next;
549 547 } drm_agp_mem_t;
550 548
551 549 typedef struct drm_agp_head {
552 550 agp_info_t agp_info;
553 551 const char *chipset;
554 552 drm_agp_mem_t *memory;
555 553 unsigned long mode;
556 554 int enabled;
557 555 int acquired;
558 556 unsigned long base;
559 557 int mtrr;
560 558 int cant_use_aperture;
561 559 unsigned long page_mask;
562 560 ldi_ident_t agpgart_li;
563 561 ldi_handle_t agpgart_lh;
564 562 } drm_agp_head_t;
565 563
566 564
567 565 typedef struct drm_dma_handle {
568 566 ddi_dma_handle_t dma_hdl;
569 567 ddi_acc_handle_t acc_hdl;
570 568 ddi_dma_cookie_t cookie;
571 569 uint_t cookie_num;
572 570 uintptr_t vaddr; /* virtual addr */
573 571 uintptr_t paddr; /* physical addr */
574 572 size_t real_sz; /* real size of memory */
575 573 } drm_dma_handle_t;
576 574
577 575 typedef struct drm_sg_mem {
578 576 unsigned long handle;
579 577 void *virtual;
580 578 int pages;
581 579 dma_addr_t *busaddr;
582 580 ddi_umem_cookie_t *umem_cookie;
583 581 drm_dma_handle_t *dmah_sg;
584 582 drm_dma_handle_t *dmah_gart; /* Handle to PCI memory */
585 583 } drm_sg_mem_t;
586 584
587 585 /*
588 586 * Generic memory manager structs
589 587 */
590 588
591 589 struct drm_mm_node {
592 590 struct list_head fl_entry;
593 591 struct list_head ml_entry;
594 592 int free;
595 593 unsigned long start;
596 594 unsigned long size;
597 595 struct drm_mm *mm;
598 596 void *private;
599 597 };
600 598
601 599 struct drm_mm {
602 600 struct list_head fl_entry;
603 601 struct list_head ml_entry;
604 602 };
605 603
606 604 typedef TAILQ_HEAD(drm_map_list, drm_local_map) drm_map_list_t;
607 605
608 606 typedef TAILQ_HEAD(drm_vbl_sig_list, drm_vbl_sig) drm_vbl_sig_list_t;
609 607 typedef struct drm_vbl_sig {
610 608 TAILQ_ENTRY(drm_vbl_sig) link;
611 609 unsigned int sequence;
612 610 int signo;
613 611 int pid;
614 612 } drm_vbl_sig_t;
615 613
616 614
617 615 /* used for clone device */
618 616 typedef TAILQ_HEAD(drm_cminor_list, drm_cminor) drm_cminor_list_t;
619 617 typedef struct drm_cminor {
620 618 TAILQ_ENTRY(drm_cminor) link;
621 619 drm_file_t *fpriv;
622 620 int minor;
623 621 } drm_cminor_t;
624 622
625 623 /* location of GART table */
626 624 #define DRM_ATI_GART_MAIN 1
627 625 #define DRM_ATI_GART_FB 2
628 626
629 627 typedef struct ati_pcigart_info {
630 628 int gart_table_location;
631 629 int is_pcie;
632 630 void *addr;
633 631 dma_addr_t bus_addr;
634 632 drm_local_map_t mapping;
635 633 } drm_ati_pcigart_info;
636 634
637 635 /* DRM device structure */
638 636 struct drm_device;
639 637 struct drm_driver_info {
640 638 int (*load)(struct drm_device *, unsigned long);
641 639 int (*firstopen)(struct drm_device *);
642 640 int (*open)(struct drm_device *, drm_file_t *);
643 641 void (*preclose)(struct drm_device *, drm_file_t *);
644 642 void (*postclose)(struct drm_device *, drm_file_t *);
645 643 void (*lastclose)(struct drm_device *);
646 644 int (*unload)(struct drm_device *);
647 645 void (*reclaim_buffers_locked)(struct drm_device *, drm_file_t *);
648 646 int (*presetup)(struct drm_device *);
649 647 int (*postsetup)(struct drm_device *);
650 648 int (*open_helper)(struct drm_device *, drm_file_t *);
651 649 void (*free_filp_priv)(struct drm_device *, drm_file_t *);
652 650 void (*release)(struct drm_device *, void *);
653 651 int (*dma_ioctl)(DRM_IOCTL_ARGS);
654 652 void (*dma_ready)(struct drm_device *);
655 653 int (*dma_quiescent)(struct drm_device *);
656 654 int (*dma_flush_block_and_flush)(struct drm_device *,
657 655 int, drm_lock_flags_t);
658 656 int (*dma_flush_unblock)(struct drm_device *, int,
659 657 drm_lock_flags_t);
660 658 int (*context_ctor)(struct drm_device *, int);
661 659 int (*context_dtor)(struct drm_device *, int);
662 660 int (*kernel_context_switch)(struct drm_device *, int, int);
663 661 int (*kernel_context_switch_unlock)(struct drm_device *);
664 662 int (*device_is_agp) (struct drm_device *);
665 663 int (*irq_preinstall)(struct drm_device *);
666 664 void (*irq_postinstall)(struct drm_device *);
667 665 void (*irq_uninstall)(struct drm_device *dev);
668 666 uint_t (*irq_handler)(DRM_IRQ_ARGS);
669 667 int (*vblank_wait)(struct drm_device *, unsigned int *);
670 668 int (*vblank_wait2)(struct drm_device *, unsigned int *);
671 669 /* added for intel minimized vblank */
672 670 u32 (*get_vblank_counter)(struct drm_device *dev, int crtc);
673 671 int (*enable_vblank)(struct drm_device *dev, int crtc);
674 672 void (*disable_vblank)(struct drm_device *dev, int crtc);
675 673
676 674 /*
677 675 * Driver-specific constructor for drm_gem_objects, to set up
678 676 * obj->driver_private.
679 677 *
680 678 * Returns 0 on success.
681 679 */
682 680 int (*gem_init_object) (struct drm_gem_object *obj);
683 681 void (*gem_free_object) (struct drm_gem_object *obj);
684 682
685 683
686 684 drm_ioctl_desc_t *driver_ioctls;
687 685 int max_driver_ioctl;
688 686
689 687 int buf_priv_size;
690 688 int driver_major;
691 689 int driver_minor;
692 690 int driver_patchlevel;
693 691 const char *driver_name; /* Simple driver name */
694 692 const char *driver_desc; /* Longer driver name */
695 693 const char *driver_date; /* Date of last major changes. */
696 694
697 695 unsigned use_agp :1;
698 696 unsigned require_agp :1;
699 697 unsigned use_sg :1;
700 698 unsigned use_dma :1;
701 699 unsigned use_pci_dma :1;
702 700 unsigned use_dma_queue :1;
703 701 unsigned use_irq :1;
704 702 unsigned use_vbl_irq :1;
705 703 unsigned use_vbl_irq2 :1;
706 704 unsigned use_mtrr :1;
707 705 unsigned use_gem;
708 706 };
709 707
710 708 /*
711 709 * hardware-specific code needs to initialize mutexes which
712 710 * can be used in interrupt context, so they need to know
713 711 * the interrupt priority. Interrupt cookie in drm_device
714 712 * structure is the intr_block field.
715 713 */
716 714 #define DRM_INTR_PRI(dev) \
717 715 DDI_INTR_PRI((dev)->intr_block)
718 716
719 717 struct drm_device {
720 718 drm_driver_t *driver;
721 719 drm_cminor_list_t minordevs;
722 720 dev_info_t *dip;
723 721 void *drm_handle;
724 722 int drm_supported;
725 723 const char *desc; /* current driver description */
726 724 kmutex_t *irq_mutex;
727 725 kcondvar_t *irq_cv;
728 726
729 727 ddi_iblock_cookie_t intr_block;
730 728 uint32_t pci_device; /* PCI device id */
731 729 uint32_t pci_vendor;
732 730 char *unique; /* Unique identifier: e.g., busid */
733 731 int unique_len; /* Length of unique field */
734 732 int if_version; /* Highest interface version set */
735 733 int flags; /* Flags to open(2) */
736 734
737 735 /* Locks */
738 736 kmutex_t vbl_lock; /* protects vblank operations */
739 737 kmutex_t dma_lock; /* protects dev->dma */
740 738 kmutex_t irq_lock; /* protects irq condition checks */
741 739 kmutex_t dev_lock; /* protects everything else */
742 740 drm_lock_data_t lock; /* Information on hardware lock */
743 741 kmutex_t struct_mutex; /* < For others */
744 742
745 743 /* Usage Counters */
746 744 int open_count; /* Outstanding files open */
747 745 int buf_use; /* Buffers in use -- cannot alloc */
748 746
749 747 /* Performance counters */
750 748 unsigned long counters;
751 749 drm_stat_type_t types[15];
752 750 uint32_t counts[15];
753 751
754 752 /* Authentication */
755 753 drm_file_list_t files;
756 754 drm_magic_head_t magiclist[DRM_HASH_SIZE];
757 755
758 756 /* Linked list of mappable regions. Protected by dev_lock */
759 757 drm_map_list_t maplist;
760 758
761 759 drm_local_map_t **context_sareas;
762 760 int max_context;
763 761
764 762 /* DMA queues (contexts) */
765 763 drm_device_dma_t *dma; /* Optional pointer for DMA support */
766 764
767 765 /* Context support */
768 766 int irq; /* Interrupt used by board */
769 767 int irq_enabled; /* True if the irq handler is enabled */
770 768 int pci_domain;
771 769 int pci_bus;
772 770 int pci_slot;
773 771 int pci_func;
774 772 atomic_t context_flag; /* Context swapping flag */
775 773 int last_context; /* Last current context */
776 774
777 775 /* Only used for Radeon */
778 776 atomic_t vbl_received;
779 777 atomic_t vbl_received2;
780 778
781 779 drm_vbl_sig_list_t vbl_sig_list;
782 780 drm_vbl_sig_list_t vbl_sig_list2;
783 781 /*
784 782 * At load time, disabling the vblank interrupt won't be allowed since
785 783 * old clients may not call the modeset ioctl and therefore misbehave.
786 784 * Once the modeset ioctl *has* been called though, we can safely
787 785 * disable them when unused.
788 786 */
789 787 int vblank_disable_allowed;
790 788
791 789 wait_queue_head_t vbl_queue; /* vbl wait channel */
792 790 /* vbl wait channel array */
793 791 wait_queue_head_t *vbl_queues;
794 792
795 793 /* number of VBLANK interrupts */
796 794 /* (driver must alloc the right number of counters) */
797 795 atomic_t *_vblank_count;
798 796 /* signal list to send on VBLANK */
799 797 struct drm_vbl_sig_list *vbl_sigs;
800 798
801 799 /* number of signals pending on all crtcs */
802 800 atomic_t vbl_signal_pending;
803 801 /* number of users of vblank interrupts per crtc */
804 802 atomic_t *vblank_refcount;
805 803 /* protected by dev->vbl_lock, used for wraparound handling */
806 804 u32 *last_vblank;
807 805 /* so we don't call enable more than */
808 806 atomic_t *vblank_enabled;
809 807 /* Display driver is setting mode */
810 808 int *vblank_inmodeset;
811 809 /* Don't wait while crtc is likely disabled */
812 810 int *vblank_suspend;
813 811 /* size of vblank counter register */
814 812 u32 max_vblank_count;
815 813 int num_crtcs;
816 814 kmutex_t tasklet_lock;
817 815 void (*locked_tasklet_func)(struct drm_device *dev);
818 816
819 817 pid_t buf_pgid;
820 818 drm_agp_head_t *agp;
821 819 drm_sg_mem_t *sg; /* Scatter gather memory */
822 820 uint32_t *ctx_bitmap;
823 821 void *dev_private;
824 822 unsigned int agp_buffer_token;
825 823 drm_local_map_t *agp_buffer_map;
826 824
827 825 kstat_t *asoft_ksp; /* kstat support */
828 826
829 827 /* name Drawable information */
830 828 kmutex_t drw_lock;
831 829 unsigned int drw_bitfield_length;
832 830 u32 *drw_bitfield;
833 831 unsigned int drw_info_length;
834 832 drm_drawable_info_t **drw_info;
835 833
836 834 /* \name GEM information */
837 835 /* @{ */
838 836 kmutex_t object_name_lock;
839 837 struct idr_list object_name_idr;
840 838 atomic_t object_count;
841 839 atomic_t object_memory;
842 840 atomic_t pin_count;
843 841 atomic_t pin_memory;
844 842 atomic_t gtt_count;
845 843 atomic_t gtt_memory;
846 844 uint32_t gtt_total;
847 845 uint32_t invalidate_domains; /* domains pending invalidation */
848 846 uint32_t flush_domains; /* domains pending flush */
849 847 /* @} */
850 848
851 849 /*
852 850 * Saving S3 context
853 851 */
854 852 void *s3_private;
855 853 };
856 854
857 855 /* Memory management support (drm_memory.c) */
858 856 void drm_mem_init(void);
859 857 void drm_mem_uninit(void);
860 858 void *drm_alloc(size_t, int);
861 859 void *drm_calloc(size_t, size_t, int);
862 860 void *drm_realloc(void *, size_t, size_t, int);
863 861 void drm_free(void *, size_t, int);
864 862 int drm_ioremap(drm_device_t *, drm_local_map_t *);
865 863 void drm_ioremapfree(drm_local_map_t *);
866 864
867 865 void drm_core_ioremap(struct drm_local_map *, struct drm_device *);
868 866 void drm_core_ioremapfree(struct drm_local_map *, struct drm_device *);
869 867
870 868 void drm_pci_free(drm_device_t *, drm_dma_handle_t *);
871 869 void *drm_pci_alloc(drm_device_t *, size_t, size_t, dma_addr_t, int);
872 870
873 871 struct drm_local_map *drm_core_findmap(struct drm_device *, unsigned long);
874 872
875 873 int drm_context_switch(drm_device_t *, int, int);
876 874 int drm_context_switch_complete(drm_device_t *, int);
877 875 int drm_ctxbitmap_init(drm_device_t *);
878 876 void drm_ctxbitmap_cleanup(drm_device_t *);
879 877 void drm_ctxbitmap_free(drm_device_t *, int);
880 878 int drm_ctxbitmap_next(drm_device_t *);
881 879
882 880 /* Locking IOCTL support (drm_lock.c) */
883 881 int drm_lock_take(drm_lock_data_t *, unsigned int);
884 882 int drm_lock_transfer(drm_device_t *,
885 883 drm_lock_data_t *, unsigned int);
886 884 int drm_lock_free(drm_device_t *,
887 885 volatile unsigned int *, unsigned int);
888 886
889 887 /* Buffer management support (drm_bufs.c) */
890 888 unsigned long drm_get_resource_start(drm_device_t *, unsigned int);
891 889 unsigned long drm_get_resource_len(drm_device_t *, unsigned int);
892 890 int drm_initmap(drm_device_t *, unsigned long, unsigned long,
893 891 unsigned int, int, int);
894 892 void drm_rmmap(drm_device_t *, drm_local_map_t *);
895 893 int drm_addmap(drm_device_t *, unsigned long, unsigned long,
896 894 drm_map_type_t, drm_map_flags_t, drm_local_map_t **);
897 895 int drm_order(unsigned long);
898 896
899 897 /* DMA support (drm_dma.c) */
900 898 int drm_dma_setup(drm_device_t *);
901 899 void drm_dma_takedown(drm_device_t *);
902 900 void drm_free_buffer(drm_device_t *, drm_buf_t *);
903 901 void drm_reclaim_buffers(drm_device_t *, drm_file_t *);
904 902 #define drm_core_reclaim_buffers drm_reclaim_buffers
905 903
906 904 /* IRQ support (drm_irq.c) */
907 905 int drm_irq_install(drm_device_t *);
908 906 int drm_irq_uninstall(drm_device_t *);
909 907 uint_t drm_irq_handler(DRM_IRQ_ARGS);
910 908 void drm_driver_irq_preinstall(drm_device_t *);
911 909 void drm_driver_irq_postinstall(drm_device_t *);
912 910 void drm_driver_irq_uninstall(drm_device_t *);
913 911 int drm_vblank_wait(drm_device_t *, unsigned int *);
914 912 void drm_vbl_send_signals(drm_device_t *);
915 913 void drm_handle_vblank(struct drm_device *dev, int crtc);
916 914 u32 drm_vblank_count(struct drm_device *dev, int crtc);
917 915 int drm_vblank_get(struct drm_device *dev, int crtc);
918 916 void drm_vblank_put(struct drm_device *dev, int crtc);
919 917 int drm_vblank_init(struct drm_device *dev, int num_crtcs);
920 918 void drm_vblank_cleanup(struct drm_device *dev);
921 919 int drm_modeset_ctl(DRM_IOCTL_ARGS);
922 920
923 921 /* AGP/GART support (drm_agpsupport.c) */
924 922 int drm_device_is_agp(drm_device_t *);
925 923 int drm_device_is_pcie(drm_device_t *);
926 924 drm_agp_head_t *drm_agp_init(drm_device_t *);
927 925 void drm_agp_fini(drm_device_t *);
928 926 int drm_agp_do_release(drm_device_t *);
929 927 void *drm_agp_allocate_memory(size_t pages,
930 928 uint32_t type, drm_device_t *dev);
931 929 int drm_agp_free_memory(agp_allocate_t *handle, drm_device_t *dev);
932 930 int drm_agp_bind_memory(unsigned int, uint32_t, drm_device_t *);
933 931 int drm_agp_unbind_memory(unsigned long, drm_device_t *);
934 932 int drm_agp_bind_pages(drm_device_t *dev,
935 933 pfn_t *pages,
936 934 unsigned long num_pages,
937 935 uint32_t gtt_offset);
938 936 int drm_agp_unbind_pages(drm_device_t *dev,
939 937 unsigned long num_pages,
940 938 uint32_t gtt_offset,
941 939 uint32_t type);
942 940 void drm_agp_chipset_flush(struct drm_device *dev);
943 941 void drm_agp_rebind(struct drm_device *dev);
944 942
945 943 /* kstat support (drm_kstats.c) */
946 944 int drm_init_kstats(drm_device_t *);
947 945 void drm_fini_kstats(drm_device_t *);
948 946
949 947 /* Scatter Gather Support (drm_scatter.c) */
950 948 void drm_sg_cleanup(drm_device_t *, drm_sg_mem_t *);
951 949
952 950 /* ATI PCIGART support (ati_pcigart.c) */
953 951 int drm_ati_pcigart_init(drm_device_t *, drm_ati_pcigart_info *);
954 952 int drm_ati_pcigart_cleanup(drm_device_t *, drm_ati_pcigart_info *);
955 953
956 954 /* Locking IOCTL support (drm_drv.c) */
957 955 int drm_lock(DRM_IOCTL_ARGS);
958 956 int drm_unlock(DRM_IOCTL_ARGS);
959 957 int drm_version(DRM_IOCTL_ARGS);
960 958 int drm_setversion(DRM_IOCTL_ARGS);
961 959 /* Cache management (drm_cache.c) */
962 960 void drm_clflush_pages(caddr_t *pages, unsigned long num_pages);
963 961
964 962 /* Misc. IOCTL support (drm_ioctl.c) */
965 963 int drm_irq_by_busid(DRM_IOCTL_ARGS);
966 964 int drm_getunique(DRM_IOCTL_ARGS);
967 965 int drm_setunique(DRM_IOCTL_ARGS);
968 966 int drm_getmap(DRM_IOCTL_ARGS);
969 967 int drm_getclient(DRM_IOCTL_ARGS);
970 968 int drm_getstats(DRM_IOCTL_ARGS);
971 969 int drm_noop(DRM_IOCTL_ARGS);
972 970
973 971 /* Context IOCTL support (drm_context.c) */
974 972 int drm_resctx(DRM_IOCTL_ARGS);
975 973 int drm_addctx(DRM_IOCTL_ARGS);
976 974 int drm_modctx(DRM_IOCTL_ARGS);
977 975 int drm_getctx(DRM_IOCTL_ARGS);
978 976 int drm_switchctx(DRM_IOCTL_ARGS);
979 977 int drm_newctx(DRM_IOCTL_ARGS);
980 978 int drm_rmctx(DRM_IOCTL_ARGS);
981 979 int drm_setsareactx(DRM_IOCTL_ARGS);
982 980 int drm_getsareactx(DRM_IOCTL_ARGS);
983 981
984 982 /* Drawable IOCTL support (drm_drawable.c) */
985 983 int drm_adddraw(DRM_IOCTL_ARGS);
986 984 int drm_rmdraw(DRM_IOCTL_ARGS);
987 985 int drm_update_draw(DRM_IOCTL_ARGS);
988 986
989 987 /* Authentication IOCTL support (drm_auth.c) */
990 988 int drm_getmagic(DRM_IOCTL_ARGS);
991 989 int drm_authmagic(DRM_IOCTL_ARGS);
992 990 int drm_remove_magic(drm_device_t *, drm_magic_t);
993 991 drm_file_t *drm_find_file(drm_device_t *, drm_magic_t);
994 992 /* Buffer management support (drm_bufs.c) */
995 993 int drm_addmap_ioctl(DRM_IOCTL_ARGS);
996 994 int drm_rmmap_ioctl(DRM_IOCTL_ARGS);
997 995 int drm_addbufs_ioctl(DRM_IOCTL_ARGS);
998 996 int drm_infobufs(DRM_IOCTL_ARGS);
999 997 int drm_markbufs(DRM_IOCTL_ARGS);
1000 998 int drm_freebufs(DRM_IOCTL_ARGS);
1001 999 int drm_mapbufs(DRM_IOCTL_ARGS);
1002 1000
1003 1001 /* DMA support (drm_dma.c) */
1004 1002 int drm_dma(DRM_IOCTL_ARGS);
1005 1003
1006 1004 /* IRQ support (drm_irq.c) */
1007 1005 int drm_control(DRM_IOCTL_ARGS);
1008 1006 int drm_wait_vblank(DRM_IOCTL_ARGS);
1009 1007
1010 1008 /* AGP/GART support (drm_agpsupport.c) */
1011 1009 int drm_agp_acquire(DRM_IOCTL_ARGS);
1012 1010 int drm_agp_release(DRM_IOCTL_ARGS);
1013 1011 int drm_agp_enable(DRM_IOCTL_ARGS);
1014 1012 int drm_agp_info(DRM_IOCTL_ARGS);
1015 1013 int drm_agp_alloc(DRM_IOCTL_ARGS);
1016 1014 int drm_agp_free(DRM_IOCTL_ARGS);
1017 1015 int drm_agp_unbind(DRM_IOCTL_ARGS);
1018 1016 int drm_agp_bind(DRM_IOCTL_ARGS);
1019 1017
1020 1018 /* Scatter Gather Support (drm_scatter.c) */
1021 1019 int drm_sg_alloc(DRM_IOCTL_ARGS);
1022 1020 int drm_sg_free(DRM_IOCTL_ARGS);
1023 1021
1024 1022 /* drm_mm.c */
1025 1023 struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
1026 1024 unsigned long size, unsigned alignment);
1027 1025 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
1028 1026 unsigned long size,
1029 1027 unsigned alignment, int best_match);
1030 1028
1031 1029 extern void drm_mm_clean_ml(const struct drm_mm *mm);
1032 1030 extern int drm_debug_flag;
1033 1031
1034 1032 /* We add function to support DRM_DEBUG,DRM_ERROR,DRM_INFO */
1035 1033 extern void drm_debug(const char *fmt, ...);
1036 1034 extern void drm_error(const char *fmt, ...);
1037 1035 extern void drm_info(const char *fmt, ...);
1038 1036
1039 1037 #ifdef DEBUG
1040 1038 #define DRM_DEBUG if (drm_debug_flag >= 2) drm_debug
1041 1039 #define DRM_INFO if (drm_debug_flag >= 1) drm_info
1042 1040 #else
1043 1041 #define DRM_DEBUG(...)
1044 1042 #define DRM_INFO(...)
1045 1043 #endif
1046 1044
1047 1045 #define DRM_ERROR drm_error
1048 1046
1049 1047
1050 1048 #define MAX_INSTNUMS 16
1051 1049
1052 1050 extern int drm_dev_to_instance(dev_t);
1053 1051 extern int drm_dev_to_minor(dev_t);
1054 1052 extern void *drm_supp_register(dev_info_t *, drm_device_t *);
1055 1053 extern int drm_supp_unregister(void *);
1056 1054
1057 1055 extern int drm_open(drm_device_t *, drm_cminor_t *, int, int, cred_t *);
1058 1056 extern int drm_close(drm_device_t *, int, int, int, cred_t *);
1059 1057 extern int drm_attach(drm_device_t *);
1060 1058 extern int drm_detach(drm_device_t *);
1061 1059 extern int drm_probe(drm_device_t *, drm_pci_id_list_t *);
1062 1060
1063 1061 extern int drm_pci_init(drm_device_t *);
1064 1062 extern void drm_pci_end(drm_device_t *);
1065 1063 extern int pci_get_info(drm_device_t *, int *, int *, int *);
1066 1064 extern int pci_get_irq(drm_device_t *);
1067 1065 extern int pci_get_vendor(drm_device_t *);
1068 1066 extern int pci_get_device(drm_device_t *);
1069 1067
1070 1068 extern struct drm_drawable_info *drm_get_drawable_info(drm_device_t *,
1071 1069 drm_drawable_t);
1072 1070 /* File Operations helpers (drm_fops.c) */
1073 1071 extern drm_file_t *drm_find_file_by_proc(drm_device_t *, cred_t *);
1074 1072 extern drm_cminor_t *drm_find_file_by_minor(drm_device_t *, int);
1075 1073 extern int drm_open_helper(drm_device_t *, drm_cminor_t *, int, int,
1076 1074 cred_t *);
1077 1075
1078 1076 /* Graphics Execution Manager library functions (drm_gem.c) */
1079 1077 int drm_gem_init(struct drm_device *dev);
1080 1078 void drm_gem_object_free(struct drm_gem_object *obj);
1081 1079 struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
1082 1080 size_t size);
1083 1081 void drm_gem_object_handle_free(struct drm_gem_object *obj);
1084 1082
1085 1083 void drm_gem_object_reference(struct drm_gem_object *obj);
1086 1084 void drm_gem_object_unreference(struct drm_gem_object *obj);
1087 1085
1088 1086 int drm_gem_handle_create(struct drm_file *file_priv,
1089 1087 struct drm_gem_object *obj,
1090 1088 int *handlep);
1091 1089 void drm_gem_object_handle_reference(struct drm_gem_object *obj);
1092 1090
1093 1091 void drm_gem_object_handle_unreference(struct drm_gem_object *obj);
1094 1092
1095 1093 struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp,
1096 1094 int handle);
1097 1095 int drm_gem_close_ioctl(DRM_IOCTL_ARGS);
1098 1096 int drm_gem_flink_ioctl(DRM_IOCTL_ARGS);
1099 1097 int drm_gem_open_ioctl(DRM_IOCTL_ARGS);
1100 1098 void drm_gem_open(struct drm_file *file_private);
1101 1099 void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
1102 1100
1103 1101
1104 1102 #endif /* _DRMP_H */
↓ open down ↓ |
764 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX