drm: Partial sync with FreeBSD
[dragonfly.git] / sys / dev / drm / drm_drv.c
1 /*-
2  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  *
25  * Authors:
26  *    Rickard E. (Rik) Faith <faith@valinux.com>
27  *    Gareth Hughes <gareth@valinux.com>
28  *
29  */
30
31 /** @file drm_drv.c
32  * The catch-all file for DRM device support, including module setup/teardown,
33  * open/close, and ioctl dispatch.
34  */
35
36 #include <machine/limits.h>
37 #include "dev/drm/drmP.h"
38 #include "dev/drm/drm.h"
39 #include "dev/drm/drm_sarea.h"
40
41 #ifdef DRM_DEBUG_DEFAULT_ON
42 int drm_debug_flag = 1;
43 #else
44 int drm_debug_flag = 0;
45 #endif
46
47 static int drm_load(struct drm_device *dev);
48 static void drm_unload(struct drm_device *dev);
49 static drm_pci_id_list_t *drm_find_description(int vendor, int device,
50     drm_pci_id_list_t *idlist);
51
52 #define DRIVER_SOFTC(unit) \
53         ((struct drm_device *)devclass_get_softc(drm_devclass, unit))
54
55 MODULE_VERSION(drm, 1);
56 MODULE_DEPEND(drm, agp, 1, 1, 1);
57 MODULE_DEPEND(drm, pci, 1, 1, 1);
58
59 static drm_ioctl_desc_t           drm_ioctls[256] = {
60         DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
61         DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
62         DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
63         DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
64         DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
65         DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
66         DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
67         DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
68
69         DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
70         DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
71         DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
72         DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
73
74         DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
75         DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
76
77         DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
78         DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
79
80         DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
81         DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
82         DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
83         DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
84         DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
85         DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
86         DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
87
88         DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
89         DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
90
91         DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
92         DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
93
94         DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
95
96         DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
97         DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
98         DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
99         DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
100         DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
101         DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH),
102
103         DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
104
105         DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
106         DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
107         DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
108         DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
109         DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
110         DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
111         DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
112         DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
113
114         DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
115         DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
116         DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
117         DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
118         DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
119 };
120
121 static struct dev_ops drm_cdevsw = {
122         { "drm", 0, D_TRACKCLOSE },
123         .d_open =       drm_open,
124         .d_close =      drm_close,
125         .d_read =       drm_read,
126         .d_ioctl =      drm_ioctl,
127         .d_kqfilter =   drm_kqfilter,
128         .d_mmap =       drm_mmap
129 };
130
131 static int drm_msi = 1; /* Enable by default. */
132 TUNABLE_INT("hw.drm.msi", &drm_msi);
133
134 static struct drm_msi_blacklist_entry drm_msi_blacklist[] = {
135         {0x8086, 0x2772}, /* Intel i945G        */ \
136         {0x8086, 0x27A2}, /* Intel i945GM       */ \
137         {0x8086, 0x27AE}, /* Intel i945GME      */ \
138         {0, 0}
139 };
140
141 static int drm_msi_is_blacklisted(int vendor, int device)
142 {
143         int i = 0;
144         
145         for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) {
146                 if ((drm_msi_blacklist[i].vendor == vendor) &&
147                     (drm_msi_blacklist[i].device == device)) {
148                         return 1;
149                 }
150         }
151
152         return 0;
153 }
154
155 int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
156 {
157         drm_pci_id_list_t *id_entry;
158         int vendor, device;
159
160         vendor = pci_get_vendor(kdev);
161         device = pci_get_device(kdev);
162
163         if (pci_get_class(kdev) != PCIC_DISPLAY
164             || pci_get_subclass(kdev) != PCIS_DISPLAY_VGA)
165                 return ENXIO;
166
167         id_entry = drm_find_description(vendor, device, idlist);
168         if (id_entry != NULL) {
169                 if (!device_get_desc(kdev)) {
170                         DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
171                         device_set_desc(kdev, id_entry->name);
172                 }
173                 return 0;
174         }
175
176         return ENXIO;
177 }
178
179 int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
180 {
181         struct drm_device *dev;
182         drm_pci_id_list_t *id_entry;
183         int unit, msicount;
184         int rid = 0;
185
186         unit = device_get_unit(kdev);
187         dev = device_get_softc(kdev);
188
189         if (!strcmp(device_get_name(kdev), "drmsub"))
190                 dev->device = device_get_parent(kdev);
191         else
192                 dev->device = kdev;
193
194         dev->devnode = make_dev(&drm_cdevsw, unit, DRM_DEV_UID, DRM_DEV_GID,
195                                 DRM_DEV_MODE, "dri/card%d", unit);
196
197         dev->pci_domain = 0;
198         dev->pci_bus = pci_get_bus(dev->device);
199         dev->pci_slot = pci_get_slot(dev->device);
200         dev->pci_func = pci_get_function(dev->device);
201
202         dev->pci_vendor = pci_get_vendor(dev->device);
203         dev->pci_device = pci_get_device(dev->device);
204
205         if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) {
206                 if (drm_msi &&
207                     !drm_msi_is_blacklisted(dev->pci_vendor, dev->pci_device)) {
208                         msicount = pci_msi_count(dev->device);
209                         DRM_DEBUG("MSI count = %d\n", msicount);
210                         if (msicount > 1)
211                                 msicount = 1;
212
213                         if (pci_alloc_msi(dev->device, &rid, msicount, -1) == 0) {
214                                 DRM_INFO("MSI enabled %d message(s)\n",
215                                     msicount);
216                                 dev->msi_enabled = 1;
217                                 dev->irqrid = rid;
218                         }
219                 }
220
221                 dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ,
222                     &dev->irqrid, RF_SHAREABLE);
223                 if (!dev->irqr) {
224                         return ENOENT;
225                 }
226
227                 dev->irq = (int) rman_get_start(dev->irqr);
228         }
229
230         DRM_SPININIT(&dev->dev_lock, "drmdev");
231         lwkt_serialize_init(&dev->irq_lock);
232         DRM_SPININIT(&dev->vbl_lock, "drmvbl");
233         DRM_SPININIT(&dev->drw_lock, "drmdrw");
234
235         id_entry = drm_find_description(dev->pci_vendor,
236             dev->pci_device, idlist);
237         dev->id_entry = id_entry;
238
239         return drm_load(dev);
240 }
241
242 int drm_detach(device_t kdev)
243 {
244         struct drm_device *dev;
245
246         dev = device_get_softc(kdev);
247
248         drm_unload(dev);
249         
250         if (dev->irqr) {
251                 bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid,
252                     dev->irqr);
253
254                 if (dev->msi_enabled) {
255                         pci_release_msi(dev->device);
256                         DRM_INFO("MSI released\n");
257                 }
258         }
259
260         return 0;
261 }
262
263 #ifndef DRM_DEV_NAME
264 #define DRM_DEV_NAME "drm"
265 #endif
266
267 devclass_t drm_devclass;
268
269 drm_pci_id_list_t *drm_find_description(int vendor, int device,
270     drm_pci_id_list_t *idlist)
271 {
272         int i = 0;
273         
274         for (i = 0; idlist[i].vendor != 0; i++) {
275                 if ((idlist[i].vendor == vendor) &&
276                     ((idlist[i].device == device) ||
277                     (idlist[i].device == 0))) {
278                         return &idlist[i];
279                 }
280         }
281         return NULL;
282 }
283
284 static int drm_firstopen(struct drm_device *dev)
285 {
286         drm_local_map_t *map;
287         int i;
288
289         DRM_SPINLOCK_ASSERT(&dev->dev_lock);
290
291         /* prebuild the SAREA */
292         i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
293             _DRM_CONTAINS_LOCK, &map);
294         if (i != 0)
295                 return i;
296
297         if (dev->driver->firstopen)
298                 dev->driver->firstopen(dev);
299
300         dev->buf_use = 0;
301
302         if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
303                 i = drm_dma_setup(dev);
304                 if (i != 0)
305                         return i;
306         }
307
308         for (i = 0; i < DRM_HASH_SIZE; i++) {
309                 dev->magiclist[i].head = NULL;
310                 dev->magiclist[i].tail = NULL;
311         }
312
313         dev->lock.lock_queue = 0;
314         dev->irq_enabled = 0;
315         dev->context_flag = 0;
316         dev->last_context = 0;
317         dev->if_version = 0;
318
319         dev->buf_sigio = NULL;
320
321         DRM_DEBUG("\n");
322
323         return 0;
324 }
325
326 static int drm_lastclose(struct drm_device *dev)
327 {
328         drm_magic_entry_t *pt, *next;
329         drm_local_map_t *map, *mapsave;
330         int i;
331
332         DRM_SPINLOCK_ASSERT(&dev->dev_lock);
333
334         DRM_DEBUG("\n");
335
336         if (dev->driver->lastclose != NULL)
337                 dev->driver->lastclose(dev);
338
339         if (dev->irq_enabled)
340                 drm_irq_uninstall(dev);
341
342         if (dev->unique) {
343                 free(dev->unique, DRM_MEM_DRIVER);
344                 dev->unique = NULL;
345                 dev->unique_len = 0;
346         }
347         /* Clear pid list */
348         for (i = 0; i < DRM_HASH_SIZE; i++) {
349                 for (pt = dev->magiclist[i].head; pt; pt = next) {
350                         next = pt->next;
351                         free(pt, DRM_MEM_MAGIC);
352                 }
353                 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
354         }
355
356         DRM_UNLOCK();
357         drm_drawable_free_all(dev);
358         DRM_LOCK();
359
360         /* Clear AGP information */
361         if (dev->agp) {
362                 drm_agp_mem_t *entry;
363                 drm_agp_mem_t *nexte;
364
365                 /* Remove AGP resources, but leave dev->agp intact until
366                  * drm_unload is called.
367                  */
368                 for (entry = dev->agp->memory; entry; entry = nexte) {
369                         nexte = entry->next;
370                         if (entry->bound)
371                                 drm_agp_unbind_memory(entry->handle);
372                         drm_agp_free_memory(entry->handle);
373                         free(entry, DRM_MEM_AGPLISTS);
374                 }
375                 dev->agp->memory = NULL;
376
377                 if (dev->agp->acquired)
378                         drm_agp_release(dev);
379
380                 dev->agp->acquired = 0;
381                 dev->agp->enabled  = 0;
382         }
383         if (dev->sg != NULL) {
384                 drm_sg_cleanup(dev->sg);
385                 dev->sg = NULL;
386         }
387
388         TAILQ_FOREACH_MUTABLE(map, &dev->maplist, link, mapsave) {
389                 if (!(map->flags & _DRM_DRIVER))
390                         drm_rmmap(dev, map);
391         }
392
393         drm_dma_takedown(dev);
394         if (dev->lock.hw_lock) {
395                 dev->lock.hw_lock = NULL; /* SHM removed */
396                 dev->lock.file_priv = NULL;
397                 DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
398         }
399
400         return 0;
401 }
402
403 static int drm_load(struct drm_device *dev)
404 {
405         int i, retcode;
406
407         DRM_DEBUG("\n");
408
409         TAILQ_INIT(&dev->maplist);
410         dev->map_unrhdr = new_unrhdr(1, ((1 << DRM_MAP_HANDLE_BITS) - 1), NULL);
411         if (dev->map_unrhdr == NULL) {
412                 DRM_ERROR("Couldn't allocate map number allocator\n");
413                 return EINVAL;
414         }
415
416
417         drm_mem_init();
418         drm_sysctl_init(dev);
419         TAILQ_INIT(&dev->files);
420
421         dev->counters  = 6;
422         dev->types[0]  = _DRM_STAT_LOCK;
423         dev->types[1]  = _DRM_STAT_OPENS;
424         dev->types[2]  = _DRM_STAT_CLOSES;
425         dev->types[3]  = _DRM_STAT_IOCTLS;
426         dev->types[4]  = _DRM_STAT_LOCKS;
427         dev->types[5]  = _DRM_STAT_UNLOCKS;
428
429         for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
430                 atomic_set(&dev->counts[i], 0);
431
432         if (dev->driver->load != NULL) {
433                 DRM_LOCK();
434                 /* Shared code returns -errno. */
435                 retcode = -dev->driver->load(dev,
436                     dev->id_entry->driver_private);
437                 pci_enable_busmaster(dev->device);
438                 DRM_UNLOCK();
439                 if (retcode != 0)
440                         goto error;
441         }
442
443         if (drm_core_has_AGP(dev)) {
444                 if (drm_device_is_agp(dev))
445                         dev->agp = drm_agp_init();
446                 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) &&
447                     dev->agp == NULL) {
448                         DRM_ERROR("Card isn't AGP, or couldn't initialize "
449                             "AGP.\n");
450                         retcode = ENOMEM;
451                         goto error;
452                 }
453                 if (dev->agp != NULL) {
454                         if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
455                             dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
456                                 dev->agp->mtrr = 1;
457                 }
458         }
459
460         retcode = drm_ctxbitmap_init(dev);
461         if (retcode != 0) {
462                 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
463                 goto error;
464         }
465
466         DRM_INFO("Initialized %s %d.%d.%d %s\n",
467             dev->driver->name,
468             dev->driver->major,
469             dev->driver->minor,
470             dev->driver->patchlevel,
471             dev->driver->date);
472
473         return 0;
474
475 error:
476         drm_sysctl_cleanup(dev);
477         DRM_LOCK();
478         drm_lastclose(dev);
479         DRM_UNLOCK();
480         destroy_dev(dev->devnode);
481
482         DRM_SPINUNINIT(&dev->drw_lock);
483         DRM_SPINUNINIT(&dev->vbl_lock);
484         DRM_SPINUNINIT(&dev->dev_lock);
485
486         return retcode;
487 }
488
489 static void drm_unload(struct drm_device *dev)
490 {
491         int i;
492
493         DRM_DEBUG("\n");
494
495         drm_sysctl_cleanup(dev);
496         destroy_dev(dev->devnode);
497
498         drm_ctxbitmap_cleanup(dev);
499
500         if (dev->agp && dev->agp->mtrr) {
501                 int retcode;
502
503                 retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
504                     dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
505                 DRM_DEBUG("mtrr_del = %d", retcode);
506         }
507
508         drm_vblank_cleanup(dev);
509
510         DRM_LOCK();
511         drm_lastclose(dev);
512         DRM_UNLOCK();
513
514         /* Clean up PCI resources allocated by drm_bufs.c.  We're not really
515          * worried about resource consumption while the DRM is inactive (between
516          * lastclose and firstopen or unload) because these aren't actually
517          * taking up KVA, just keeping the PCI resource allocated.
518          */
519         for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
520                 if (dev->pcir[i] == NULL)
521                         continue;
522                 bus_release_resource(dev->device, SYS_RES_MEMORY,
523                     dev->pcirid[i], dev->pcir[i]);
524                 dev->pcir[i] = NULL;
525         }
526
527         if (dev->agp) {
528                 free(dev->agp, DRM_MEM_AGPLISTS);
529                 dev->agp = NULL;
530         }
531
532         if (dev->driver->unload != NULL) {
533                 DRM_LOCK();
534                 dev->driver->unload(dev);
535                 DRM_UNLOCK();
536         }
537
538         drm_mem_uninit();
539
540         pci_disable_busmaster(dev->device);
541
542         DRM_SPINUNINIT(&dev->drw_lock);
543         DRM_SPINUNINIT(&dev->vbl_lock);
544         DRM_SPINUNINIT(&dev->dev_lock);
545 }
546
547 int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
548 {
549         struct drm_version *version = data;
550         int len;
551
552 #define DRM_COPY( name, value )                                         \
553         len = strlen( value );                                          \
554         if ( len > name##_len ) len = name##_len;                       \
555         name##_len = strlen( value );                                   \
556         if ( len && name ) {                                            \
557                 if ( DRM_COPY_TO_USER( name, value, len ) )             \
558                         return EFAULT;                          \
559         }
560
561         version->version_major          = dev->driver->major;
562         version->version_minor          = dev->driver->minor;
563         version->version_patchlevel     = dev->driver->patchlevel;
564
565         DRM_COPY(version->name, dev->driver->name);
566         DRM_COPY(version->date, dev->driver->date);
567         DRM_COPY(version->desc, dev->driver->desc);
568
569         return 0;
570 }
571
572 int drm_open(struct dev_open_args *ap)
573 {
574         struct cdev *kdev = ap->a_head.a_dev;
575         int flags = ap->a_oflags;
576         int fmt = 0;
577         struct thread *p = curthread;
578         struct drm_device *dev = NULL;
579         int retcode = 0;
580
581         dev = DRIVER_SOFTC(minor(kdev));
582
583         DRM_DEBUG("open_count = %d\n", dev->open_count);
584
585         retcode = drm_open_helper(kdev, flags, fmt, p, dev);
586
587         if (!retcode) {
588                 atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
589                 DRM_LOCK();
590                 device_busy(dev->device);
591                 if (!dev->open_count++)
592                         retcode = drm_firstopen(dev);
593                 DRM_UNLOCK();
594         }
595
596         return retcode;
597 }
598
599 int drm_close(struct dev_close_args *ap)
600 {
601         struct cdev *kdev = ap->a_head.a_dev;
602         struct drm_file *file_priv;
603         struct drm_device *dev;
604         int retcode = 0;
605
606         dev = DRIVER_SOFTC(minor(kdev));
607         file_priv = drm_find_file_by_proc(dev, curthread);
608
609         DRM_DEBUG("open_count = %d\n", dev->open_count);
610
611         DRM_LOCK();
612
613         if (--file_priv->refs != 0)
614                 goto done;
615
616         if (dev->driver->preclose != NULL)
617                 dev->driver->preclose(dev, file_priv);
618
619         /* ========================================================
620          * Begin inline drm_release
621          */
622
623         DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
624             DRM_CURRENTPID, (long)dev->device, dev->open_count);
625
626         if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
627             && dev->lock.file_priv == file_priv) {
628                 DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
629                           DRM_CURRENTPID,
630                           _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
631                 if (dev->driver->reclaim_buffers_locked != NULL)
632                         dev->driver->reclaim_buffers_locked(dev, file_priv);
633
634                 drm_lock_free(&dev->lock,
635                     _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
636                 
637                                 /* FIXME: may require heavy-handed reset of
638                                    hardware at this point, possibly
639                                    processed via a callback to the X
640                                    server. */
641         } else if (dev->driver->reclaim_buffers_locked != NULL &&
642             dev->lock.hw_lock != NULL) {
643                 /* The lock is required to reclaim buffers */
644                 for (;;) {
645                         if (!dev->lock.hw_lock) {
646                                 /* Device has been unregistered */
647                                 retcode = EINTR;
648                                 break;
649                         }
650                         if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) {
651                                 dev->lock.file_priv = file_priv;
652                                 dev->lock.lock_time = jiffies;
653                                 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
654                                 break;  /* Got lock */
655                         }
656                         /* Contention */
657                         tsleep_interlock((void *)&dev->lock.lock_queue, PCATCH);
658                         DRM_UNLOCK();
659                         retcode = tsleep((void *)&dev->lock.lock_queue,
660                                          PCATCH | PINTERLOCKED, "drmlk2", 0);
661                         DRM_LOCK();
662                         if (retcode)
663                                 break;
664                 }
665                 if (retcode == 0) {
666                         dev->driver->reclaim_buffers_locked(dev, file_priv);
667                         drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
668                 }
669         }
670
671         if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
672             !dev->driver->reclaim_buffers_locked)
673                 drm_reclaim_buffers(dev, file_priv);
674
675         funsetown(&dev->buf_sigio);
676
677         if (dev->driver->postclose != NULL)
678                 dev->driver->postclose(dev, file_priv);
679         TAILQ_REMOVE(&dev->files, file_priv, link);
680         free(file_priv, DRM_MEM_FILES);
681
682         /* ========================================================
683          * End inline drm_release
684          */
685 done:
686         atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
687         device_unbusy(dev->device);
688         if (--dev->open_count == 0) {
689                 retcode = drm_lastclose(dev);
690         }
691
692         DRM_UNLOCK();
693
694         return (0);
695 }
696
697 /* drm_ioctl is called whenever a process performs an ioctl on /dev/drm.
698  */
699 int drm_ioctl(struct dev_ioctl_args *ap)
700 {
701         struct cdev *kdev = ap->a_head.a_dev;
702         u_long cmd = ap->a_cmd;
703         caddr_t data = ap->a_data;
704         struct thread *p = curthread;
705         struct drm_device *dev = drm_get_device_from_kdev(kdev);
706         int retcode = 0;
707         drm_ioctl_desc_t *ioctl;
708         int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv);
709         int nr = DRM_IOCTL_NR(cmd);
710         int is_driver_ioctl = 0;
711         struct drm_file *file_priv;
712
713         file_priv = drm_find_file_by_proc(dev, p);
714         if (!file_priv) {
715                 DRM_ERROR("can't find authenticator\n");
716                 return EINVAL;
717         }
718
719         atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
720         ++file_priv->ioctl_count;
721
722         DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
723             DRM_CURRENTPID, cmd, nr, (long)dev->device,
724             file_priv->authenticated);
725
726         switch (cmd) {
727         case FIONBIO:
728         case FIOASYNC:
729                 return 0;
730
731         case FIOSETOWN:
732                 return fsetown(*(int *)data, &dev->buf_sigio);
733
734         case FIOGETOWN:
735                 *(int *) data = fgetown(&dev->buf_sigio);
736                 return 0;
737         }
738
739         if (IOCGROUP(cmd) != DRM_IOCTL_BASE) {
740                 DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd));
741                 return EINVAL;
742         }
743
744         ioctl = &drm_ioctls[nr];
745         /* It's not a core DRM ioctl, try driver-specific. */
746         if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
747                 /* The array entries begin at DRM_COMMAND_BASE ioctl nr */
748                 nr -= DRM_COMMAND_BASE;
749                 if (nr > dev->driver->max_ioctl) {
750                         DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n",
751                             nr, dev->driver->max_ioctl);
752                         return EINVAL;
753                 }
754                 ioctl = &dev->driver->ioctls[nr];
755                 is_driver_ioctl = 1;
756         }
757         func = ioctl->func;
758
759         if (func == NULL) {
760                 DRM_DEBUG("no function\n");
761                 return EINVAL;
762         }
763
764         if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) ||
765             ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
766             ((ioctl->flags & DRM_MASTER) && !file_priv->master))
767                 return EACCES;
768
769         if (is_driver_ioctl) {
770                 DRM_LOCK();
771                 /* shared code returns -errno */
772                 retcode = -func(dev, data, file_priv);
773                 DRM_UNLOCK();
774         } else {
775                 retcode = func(dev, data, file_priv);
776         }
777
778         if (retcode != 0)
779                 DRM_DEBUG("    returning %d\n", retcode);
780
781         return retcode;
782 }
783
784 drm_local_map_t *drm_getsarea(struct drm_device *dev)
785 {
786         drm_local_map_t *map;
787
788         DRM_SPINLOCK_ASSERT(&dev->dev_lock);
789         TAILQ_FOREACH(map, &dev->maplist, link) {
790                 if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
791                         return map;
792         }
793
794         return NULL;
795 }
796
797 #if DRM_LINUX
798
799 #include <sys/sysproto.h>
800
801 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
802
803 #define LINUX_IOCTL_DRM_MIN             0x6400
804 #define LINUX_IOCTL_DRM_MAX             0x64ff
805
806 static linux_ioctl_function_t drm_linux_ioctl;
807 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl, 
808     LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
809
810 SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE, 
811     linux_ioctl_register_handler, &drm_handler);
812 SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE, 
813     linux_ioctl_unregister_handler, &drm_handler);
814
815 /* The bits for in/out are switched on Linux */
816 #define LINUX_IOC_IN    IOC_OUT
817 #define LINUX_IOC_OUT   IOC_IN
818
819 static int
820 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
821 {
822         int error;
823         int cmd = args->cmd;
824
825         args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
826         if (cmd & LINUX_IOC_IN)
827                 args->cmd |= IOC_IN;
828         if (cmd & LINUX_IOC_OUT)
829                 args->cmd |= IOC_OUT;
830         
831         error = ioctl(p, (struct ioctl_args *)args);
832
833         return error;
834 }
835 #endif /* DRM_LINUX */