2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
7 * Author Rickard E. (Rik) Faith <faith@valinux.com>
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
32 #include <drm/drm_core.h>
33 #include "drm_legacy.h"
34 #include "drm_internal.h"
36 unsigned int drm_debug = 0; /* 1 to enable debug output */
37 EXPORT_SYMBOL(drm_debug);
39 int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
41 unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
44 * Default to use monotonic timestamps for wait-for-vblank and page-flip
47 unsigned int drm_timestamp_monotonic = 1;
49 MODULE_AUTHOR(CORE_AUTHOR);
50 MODULE_DESCRIPTION(CORE_DESC);
51 MODULE_PARM_DESC(debug, "Enable debug output");
52 MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
53 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
54 MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
56 module_param_named(debug, drm_debug, int, 0600);
57 module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
58 module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
59 module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
62 static DEFINE_SPINLOCK(drm_minor_lock);
63 static struct idr drm_minors_idr;
66 struct class *drm_class;
68 static struct dentry *drm_debugfs_root;
71 void drm_err(const char *format, ...)
78 va_start(args, format);
83 printk(KERN_ERR "[" DRM_NAME ":%pf] *ERROR* %pV",
84 __builtin_return_address(0), &vaf);
91 EXPORT_SYMBOL(drm_err);
93 void drm_ut_debug_printk(const char *function_name, const char *format, ...)
99 va_start(args, format);
103 printk(KERN_DEBUG "[" DRM_NAME ":%s] %pV", function_name, &vaf);
108 EXPORT_SYMBOL(drm_ut_debug_printk);
111 struct drm_master *drm_master_create(struct drm_minor *minor)
113 struct drm_master *master;
115 master = kzalloc(sizeof(*master), GFP_KERNEL);
119 kref_init(&master->refcount);
120 spin_lock_init(&master->lock.spinlock);
121 init_waitqueue_head(&master->lock.lock_queue);
122 if (drm_ht_create(&master->magiclist, DRM_MAGIC_HASH_ORDER)) {
126 INIT_LIST_HEAD(&master->magicfree);
127 master->minor = minor;
132 struct drm_master *drm_master_get(struct drm_master *master)
134 kref_get(&master->refcount);
137 EXPORT_SYMBOL(drm_master_get);
139 static void drm_master_destroy(struct kref *kref)
141 struct drm_master *master = container_of(kref, struct drm_master, refcount);
142 struct drm_device *dev = master->minor->dev;
143 struct drm_map_list *r_list, *list_temp;
145 mutex_lock(&dev->struct_mutex);
146 if (dev->driver->master_destroy)
147 dev->driver->master_destroy(dev, master);
149 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
150 if (r_list->master == master) {
151 drm_legacy_rmmap_locked(dev, r_list->map);
156 if (master->unique) {
157 kfree(master->unique);
158 master->unique = NULL;
159 master->unique_len = 0;
162 drm_ht_remove(&master->magiclist);
164 mutex_unlock(&dev->struct_mutex);
168 void drm_master_put(struct drm_master **master)
170 kref_put(&(*master)->refcount, drm_master_destroy);
173 EXPORT_SYMBOL(drm_master_put);
176 int drm_setmaster_ioctl(struct drm_device *dev, void *data,
177 struct drm_file *file_priv)
179 DRM_DEBUG("setmaster\n");
181 if (file_priv->master != 0)
187 int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
188 struct drm_file *file_priv)
190 DRM_DEBUG("dropmaster\n");
191 if (file_priv->master != 0)
199 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
200 * of them is represented by a drm_minor object. Depending on the capabilities
201 * of the device-driver, different interfaces are registered.
203 * Minors can be accessed via dev->$minor_name. This pointer is either
204 * NULL or a valid drm_minor pointer and stays valid as long as the device is
205 * valid. This means, DRM minors have the same life-time as the underlying
206 * device. However, this doesn't mean that the minor is active. Minors are
207 * registered and unregistered dynamically according to device-state.
210 static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
214 case DRM_MINOR_LEGACY:
215 return &dev->primary;
216 case DRM_MINOR_RENDER:
218 case DRM_MINOR_CONTROL:
219 return &dev->control;
225 static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
227 struct drm_minor *minor;
231 minor = kzalloc(sizeof(*minor), GFP_KERNEL);
238 idr_preload(GFP_KERNEL);
239 spin_lock_irqsave(&drm_minor_lock, flags);
240 r = idr_alloc(&drm_minors_idr,
245 spin_unlock_irqrestore(&drm_minor_lock, flags);
253 minor->kdev = drm_sysfs_minor_alloc(minor);
254 if (IS_ERR(minor->kdev)) {
255 r = PTR_ERR(minor->kdev);
259 *drm_minor_get_slot(dev, type) = minor;
263 spin_lock_irqsave(&drm_minor_lock, flags);
264 idr_remove(&drm_minors_idr, minor->index);
265 spin_unlock_irqrestore(&drm_minor_lock, flags);
271 static void drm_minor_free(struct drm_device *dev, unsigned int type)
273 struct drm_minor **slot, *minor;
276 slot = drm_minor_get_slot(dev, type);
281 drm_mode_group_destroy(&minor->mode_group);
282 put_device(minor->kdev);
284 spin_lock_irqsave(&drm_minor_lock, flags);
285 idr_remove(&drm_minors_idr, minor->index);
286 spin_unlock_irqrestore(&drm_minor_lock, flags);
292 static int drm_minor_register(struct drm_device *dev, unsigned int type)
294 struct drm_minor *minor;
300 minor = *drm_minor_get_slot(dev, type);
304 ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
306 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
310 ret = device_add(minor->kdev);
314 /* replace NULL with @minor so lookups will succeed from now on */
315 spin_lock_irqsave(&drm_minor_lock, flags);
316 idr_replace(&drm_minors_idr, minor, minor->index);
317 spin_unlock_irqrestore(&drm_minor_lock, flags);
319 DRM_DEBUG("new minor registered %d\n", minor->index);
323 drm_debugfs_cleanup(minor);
327 static void drm_minor_unregister(struct drm_device *dev, unsigned int type)
329 struct drm_minor *minor;
332 minor = *drm_minor_get_slot(dev, type);
333 if (!minor || !device_is_registered(minor->kdev))
336 /* replace @minor with NULL so lookups will fail from now on */
337 spin_lock_irqsave(&drm_minor_lock, flags);
338 idr_replace(&drm_minors_idr, NULL, minor->index);
339 spin_unlock_irqrestore(&drm_minor_lock, flags);
341 device_del(minor->kdev);
342 dev_set_drvdata(minor->kdev, NULL); /* safety belt */
343 drm_debugfs_cleanup(minor);
347 * drm_minor_acquire - Acquire a DRM minor
348 * @minor_id: Minor ID of the DRM-minor
350 * Looks up the given minor-ID and returns the respective DRM-minor object. The
351 * refence-count of the underlying device is increased so you must release this
352 * object with drm_minor_release().
354 * As long as you hold this minor, it is guaranteed that the object and the
355 * minor->dev pointer will stay valid! However, the device may get unplugged and
356 * unregistered while you hold the minor.
359 * Pointer to minor-object with increased device-refcount, or PTR_ERR on
362 struct drm_minor *drm_minor_acquire(unsigned int minor_id)
364 struct drm_minor *minor;
367 spin_lock_irqsave(&drm_minor_lock, flags);
368 minor = idr_find(&drm_minors_idr, minor_id);
370 drm_dev_ref(minor->dev);
371 spin_unlock_irqrestore(&drm_minor_lock, flags);
374 return ERR_PTR(-ENODEV);
375 } else if (drm_device_is_unplugged(minor->dev)) {
376 drm_dev_unref(minor->dev);
377 return ERR_PTR(-ENODEV);
384 * drm_minor_release - Release DRM minor
385 * @minor: Pointer to DRM minor object
387 * Release a minor that was previously acquired via drm_minor_acquire().
389 void drm_minor_release(struct drm_minor *minor)
391 drm_dev_unref(minor->dev);
395 * drm_put_dev - Unregister and release a DRM device
398 * Called at module unload time or when a PCI device is unplugged.
400 * Use of this function is discouraged. It will eventually go away completely.
401 * Please use drm_dev_unregister() and drm_dev_unref() explicitly instead.
403 * Cleans up all DRM device, calling drm_lastclose().
405 void drm_put_dev(struct drm_device *dev)
410 DRM_ERROR("cleanup called no dev\n");
414 drm_dev_unregister(dev);
417 EXPORT_SYMBOL(drm_put_dev);
419 void drm_unplug_dev(struct drm_device *dev)
421 /* for a USB device */
422 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
423 drm_minor_unregister(dev, DRM_MINOR_RENDER);
424 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
426 mutex_lock(&drm_global_mutex);
428 drm_device_set_unplugged(dev);
430 if (dev->open_count == 0) {
433 mutex_unlock(&drm_global_mutex);
435 EXPORT_SYMBOL(drm_unplug_dev);
439 * We want to be able to allocate our own "struct address_space" to control
440 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
441 * stand-alone address_space objects, so we need an underlying inode. As there
442 * is no way to allocate an independent inode easily, we need a fake internal
445 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
446 * frees it again. You are allowed to use iget() and iput() to get references to
447 * the inode. But each drm_fs_inode_new() call must be paired with exactly one
448 * drm_fs_inode_free() call (which does not have to be the last iput()).
449 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
450 * between multiple inode-users. You could, technically, call
451 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
452 * iput(), but this way you'd end up with a new vfsmount for each inode.
455 static int drm_fs_cnt;
456 static struct vfsmount *drm_fs_mnt;
458 static const struct dentry_operations drm_fs_dops = {
459 .d_dname = simple_dname,
462 static const struct super_operations drm_fs_sops = {
463 .statfs = simple_statfs,
466 static struct dentry *drm_fs_mount(struct file_system_type *fs_type, int flags,
467 const char *dev_name, void *data)
469 return mount_pseudo(fs_type,
476 static struct file_system_type drm_fs_type = {
478 .owner = THIS_MODULE,
479 .mount = drm_fs_mount,
480 .kill_sb = kill_anon_super,
483 static struct inode *drm_fs_inode_new(void)
488 r = simple_pin_fs(&drm_fs_type, &drm_fs_mnt, &drm_fs_cnt);
490 DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
494 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
496 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
501 static void drm_fs_inode_free(struct inode *inode)
505 simple_release_fs(&drm_fs_mnt, &drm_fs_cnt);
510 * drm_dev_alloc - Allocate new DRM device
511 * @driver: DRM driver to allocate device for
512 * @parent: Parent device object
514 * Allocate and initialize a new DRM device. No device registration is done.
515 * Call drm_dev_register() to advertice the device to user space and register it
516 * with other core subsystems.
518 * The initial ref-count of the object is 1. Use drm_dev_ref() and
519 * drm_dev_unref() to take and drop further ref-counts.
521 * Note that for purely virtual devices @parent can be NULL.
524 * Pointer to new DRM device, or NULL if out of memory.
526 struct drm_device *drm_dev_alloc(struct drm_driver *driver,
527 struct device *parent)
529 struct drm_device *dev;
532 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
536 kref_init(&dev->ref);
538 dev->driver = driver;
540 INIT_LIST_HEAD(&dev->filelist);
541 INIT_LIST_HEAD(&dev->ctxlist);
542 INIT_LIST_HEAD(&dev->vmalist);
543 INIT_LIST_HEAD(&dev->maplist);
544 INIT_LIST_HEAD(&dev->vblank_event_list);
546 spin_lock_init(&dev->buf_lock);
547 spin_lock_init(&dev->event_lock);
548 mutex_init(&dev->struct_mutex);
549 mutex_init(&dev->ctxlist_mutex);
550 mutex_init(&dev->master_mutex);
552 dev->anon_inode = drm_fs_inode_new();
553 if (IS_ERR(dev->anon_inode)) {
554 ret = PTR_ERR(dev->anon_inode);
555 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
559 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
560 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
565 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
566 ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
571 ret = drm_minor_alloc(dev, DRM_MINOR_LEGACY);
575 if (drm_ht_create(&dev->map_hash, 12))
578 ret = drm_legacy_ctxbitmap_init(dev);
580 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
584 if (drm_core_check_feature(dev, DRIVER_GEM)) {
585 ret = drm_gem_init(dev);
587 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
595 drm_legacy_ctxbitmap_cleanup(dev);
597 drm_ht_remove(&dev->map_hash);
599 drm_minor_free(dev, DRM_MINOR_LEGACY);
600 drm_minor_free(dev, DRM_MINOR_RENDER);
601 drm_minor_free(dev, DRM_MINOR_CONTROL);
602 drm_fs_inode_free(dev->anon_inode);
604 mutex_destroy(&dev->master_mutex);
608 EXPORT_SYMBOL(drm_dev_alloc);
610 static void drm_dev_release(struct kref *ref)
612 struct drm_device *dev = container_of(ref, struct drm_device, ref);
614 if (drm_core_check_feature(dev, DRIVER_GEM))
615 drm_gem_destroy(dev);
617 drm_legacy_ctxbitmap_cleanup(dev);
618 drm_ht_remove(&dev->map_hash);
619 drm_fs_inode_free(dev->anon_inode);
621 drm_minor_free(dev, DRM_MINOR_LEGACY);
622 drm_minor_free(dev, DRM_MINOR_RENDER);
623 drm_minor_free(dev, DRM_MINOR_CONTROL);
625 mutex_destroy(&dev->master_mutex);
631 * drm_dev_ref - Take reference of a DRM device
632 * @dev: device to take reference of or NULL
634 * This increases the ref-count of @dev by one. You *must* already own a
635 * reference when calling this. Use drm_dev_unref() to drop this reference
638 * This function never fails. However, this function does not provide *any*
639 * guarantee whether the device is alive or running. It only provides a
640 * reference to the object and the memory associated with it.
642 void drm_dev_ref(struct drm_device *dev)
647 EXPORT_SYMBOL(drm_dev_ref);
650 * drm_dev_unref - Drop reference of a DRM device
651 * @dev: device to drop reference of or NULL
653 * This decreases the ref-count of @dev by one. The device is destroyed if the
654 * ref-count drops to zero.
656 void drm_dev_unref(struct drm_device *dev)
659 kref_put(&dev->ref, drm_dev_release);
661 EXPORT_SYMBOL(drm_dev_unref);
664 * drm_dev_register - Register DRM device
665 * @dev: Device to register
666 * @flags: Flags passed to the driver's .load() function
668 * Register the DRM device @dev with the system, advertise device to user-space
669 * and start normal device operation. @dev must be allocated via drm_dev_alloc()
672 * Never call this twice on any device!
675 * 0 on success, negative error code on failure.
677 int drm_dev_register(struct drm_device *dev, unsigned long flags)
681 mutex_lock(&drm_global_mutex);
683 ret = drm_minor_register(dev, DRM_MINOR_CONTROL);
687 ret = drm_minor_register(dev, DRM_MINOR_RENDER);
691 ret = drm_minor_register(dev, DRM_MINOR_LEGACY);
695 if (dev->driver->load) {
696 ret = dev->driver->load(dev, flags);
701 /* setup grouping for legacy outputs */
702 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
703 ret = drm_mode_group_init_legacy_group(dev,
704 &dev->primary->mode_group);
713 if (dev->driver->unload)
714 dev->driver->unload(dev);
716 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
717 drm_minor_unregister(dev, DRM_MINOR_RENDER);
718 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
720 mutex_unlock(&drm_global_mutex);
723 EXPORT_SYMBOL(drm_dev_register);
726 * drm_dev_unregister - Unregister DRM device
727 * @dev: Device to unregister
729 * Unregister the DRM device from the system. This does the reverse of
730 * drm_dev_register() but does not deallocate the device. The caller must call
731 * drm_dev_unref() to drop their final reference.
733 void drm_dev_unregister(struct drm_device *dev)
735 struct drm_map_list *r_list, *list_temp;
739 if (dev->driver->unload)
740 dev->driver->unload(dev);
743 drm_pci_agp_destroy(dev);
745 drm_vblank_cleanup(dev);
747 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
748 drm_legacy_rmmap(dev, r_list->map);
750 drm_minor_unregister(dev, DRM_MINOR_LEGACY);
751 drm_minor_unregister(dev, DRM_MINOR_RENDER);
752 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
754 EXPORT_SYMBOL(drm_dev_unregister);
757 * drm_dev_set_unique - Set the unique name of a DRM device
758 * @dev: device of which to set the unique name
759 * @fmt: format string for unique name
761 * Sets the unique name of a DRM device using the specified format string and
762 * a variable list of arguments. Drivers can use this at driver probe time if
763 * the unique name of the devices they drive is static.
765 * Return: 0 on success or a negative error code on failure.
767 int drm_dev_set_unique(struct drm_device *dev, const char *fmt, ...)
774 dev->unique = kvasprintf(GFP_KERNEL, fmt, ap);
777 return dev->unique ? 0 : -ENOMEM;
779 EXPORT_SYMBOL(drm_dev_set_unique);
784 * The DRM core module initializes all global DRM objects and makes them
785 * available to drivers. Once setup, drivers can probe their respective
787 * Currently, core management includes:
788 * - The "DRM-Global" key/value database
789 * - Global ID management for connectors
790 * - DRM major number allocation
791 * - DRM minor management
795 * Furthermore, the DRM core provides dynamic char-dev lookups. For each
796 * interface registered on a DRM device, you can request minor numbers from DRM
797 * core. DRM core takes care of major-number management and char-dev
798 * registration. A stub ->open() callback forwards any open() requests to the
803 static int drm_stub_open(struct inode *inode, struct file *filp)
805 const struct file_operations *new_fops;
806 struct drm_minor *minor;
811 mutex_lock(&drm_global_mutex);
812 minor = drm_minor_acquire(iminor(inode));
814 err = PTR_ERR(minor);
818 new_fops = fops_get(minor->dev->driver->fops);
824 replace_fops(filp, new_fops);
825 if (filp->f_op->open)
826 err = filp->f_op->open(inode, filp);
831 drm_minor_release(minor);
833 mutex_unlock(&drm_global_mutex);
837 static const struct file_operations drm_stub_fops = {
838 .owner = THIS_MODULE,
839 .open = drm_stub_open,
840 .llseek = noop_llseek,
843 static int __init drm_core_init(void)
848 drm_connector_ida_init();
849 idr_init(&drm_minors_idr);
851 if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
854 drm_class = drm_sysfs_create(THIS_MODULE, "drm");
855 if (IS_ERR(drm_class)) {
856 printk(KERN_ERR "DRM: Error creating drm class.\n");
857 ret = PTR_ERR(drm_class);
861 drm_debugfs_root = debugfs_create_dir("dri", NULL);
862 if (!drm_debugfs_root) {
863 DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
868 DRM_INFO("Initialized %s %d.%d.%d %s\n",
869 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
874 unregister_chrdev(DRM_MAJOR, "drm");
876 idr_destroy(&drm_minors_idr);
881 static void __exit drm_core_exit(void)
883 debugfs_remove(drm_debugfs_root);
886 unregister_chrdev(DRM_MAJOR, "drm");
888 drm_connector_ida_destroy();
889 idr_destroy(&drm_minors_idr);
892 module_init(drm_core_init);
893 module_exit(drm_core_exit);
896 #include <sys/devfs.h>
898 #include <linux/export.h>
899 #include <linux/dmi.h>
900 #include <drm/drmP.h>
901 #include <drm/drm_core.h>
903 #if DRM_DEBUG_DEFAULT_ON == 1
904 #define DRM_DEBUGBITS_ON (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS | \
905 DRM_DEBUGBITS_FAILED_IOCTL)
906 #elif DRM_DEBUG_DEFAULT_ON == 2
907 #define DRM_DEBUGBITS_ON (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS | \
908 DRM_DEBUGBITS_FAILED_IOCTL | DRM_DEBUGBITS_VERBOSE)
910 #define DRM_DEBUGBITS_ON (0x0)
913 int drm_notyet_flag = 0;
915 static int drm_load(struct drm_device *dev);
916 drm_pci_id_list_t *drm_find_description(int vendor, int device,
917 drm_pci_id_list_t *idlist);
919 #define DRIVER_SOFTC(unit) \
920 ((struct drm_device *)devclass_get_softc(drm_devclass, unit))
923 drm_modevent(module_t mod, int type, void *data)
928 TUNABLE_INT_FETCH("drm.debug", &drm_debug);
929 TUNABLE_INT_FETCH("drm.notyet", &drm_notyet_flag);
935 static moduledata_t drm_mod = {
940 DECLARE_MODULE(drm, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
941 MODULE_VERSION(drm, 1);
942 MODULE_DEPEND(drm, agp, 1, 1, 1);
943 MODULE_DEPEND(drm, pci, 1, 1, 1);
944 MODULE_DEPEND(drm, iicbus, 1, 1, 1);
946 static struct dev_ops drm_cdevsw = {
947 { "drm", 0, D_TRACKCLOSE | D_MPSAFE },
949 .d_close = drm_close,
951 .d_ioctl = drm_ioctl,
952 .d_kqfilter = drm_kqfilter,
954 .d_mmap_single = drm_mmap_single,
957 static int drm_msi = 0; /* Disable by default. This is because there are issues with
958 freezes using MSI and i915
960 TUNABLE_INT("hw.drm.msi.enable", &drm_msi);
961 SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
962 SYSCTL_NODE(_hw_drm, OID_AUTO, msi, CTLFLAG_RW, NULL, "DRM device msi");
963 SYSCTL_INT(_hw_drm_msi, OID_AUTO, enable, CTLFLAG_RD, &drm_msi, 0,
964 "Enable MSI interrupts for drm devices");
965 SYSCTL_INT(_hw_drm, OID_AUTO, debug, CTLFLAG_RW, &drm_debug, 0,
968 static struct drm_msi_blacklist_entry drm_msi_blacklist[] = {
969 {0x8086, 0x2772}, /* Intel i945G */ \
970 {0x8086, 0x27A2}, /* Intel i945GM */ \
971 {0x8086, 0x27AE}, /* Intel i945GME */ \
975 static int drm_msi_is_blacklisted(struct drm_device *dev, unsigned long flags)
979 if (dev->driver->use_msi != NULL) {
982 use_msi = dev->driver->use_msi(dev, flags);
987 /* TODO: Maybe move this to a callback in i915? */
988 for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) {
989 if ((drm_msi_blacklist[i].vendor == dev->pci_vendor) &&
990 (drm_msi_blacklist[i].device == dev->pci_device)) {
998 int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
1000 drm_pci_id_list_t *id_entry;
1003 vendor = pci_get_vendor(kdev);
1004 device = pci_get_device(kdev);
1006 if (pci_get_class(kdev) != PCIC_DISPLAY)
1009 id_entry = drm_find_description(vendor, device, idlist);
1010 if (id_entry != NULL) {
1011 if (!device_get_desc(kdev)) {
1012 DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
1013 device_set_desc(kdev, id_entry->name);
1021 int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
1023 struct drm_device *dev;
1024 drm_pci_id_list_t *id_entry;
1029 unit = device_get_unit(kdev);
1030 dev = device_get_softc(kdev);
1032 if (!strcmp(device_get_name(kdev), "drmsub"))
1033 dev->dev = device_get_parent(kdev);
1037 dev->pci_domain = pci_get_domain(dev->dev);
1038 dev->pci_bus = pci_get_bus(dev->dev);
1039 dev->pci_slot = pci_get_slot(dev->dev);
1040 dev->pci_func = pci_get_function(dev->dev);
1042 dev->pci_vendor = pci_get_vendor(dev->dev);
1043 dev->pci_device = pci_get_device(dev->dev);
1044 dev->pci_subvendor = pci_get_subvendor(dev->dev);
1045 dev->pci_subdevice = pci_get_subdevice(dev->dev);
1047 id_entry = drm_find_description(dev->pci_vendor,
1048 dev->pci_device, idlist);
1049 dev->id_entry = id_entry;
1051 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) {
1052 msi_enable = drm_msi;
1054 if (drm_msi_is_blacklisted(dev, dev->id_entry->driver_private)) {
1058 dev->irq_type = pci_alloc_1intr(dev->dev, msi_enable,
1059 &dev->irqrid, &irq_flags);
1061 dev->irqr = bus_alloc_resource_any(dev->dev, SYS_RES_IRQ,
1062 &dev->irqrid, irq_flags);
1068 dev->irq = (int) rman_get_start(dev->irqr);
1071 lockinit(&dev->dev_lock, "drmdev", 0, LK_CANRECURSE);
1072 lwkt_serialize_init(&dev->irq_lock);
1073 lockinit(&dev->event_lock, "drmev", 0, LK_CANRECURSE);
1074 lockinit(&dev->struct_mutex, "drmslk", 0, LK_CANRECURSE);
1076 error = drm_load(dev);
1080 error = drm_create_cdevs(kdev);
1087 bus_release_resource(dev->dev, SYS_RES_IRQ,
1088 dev->irqrid, dev->irqr);
1090 if (dev->irq_type == PCI_INTR_TYPE_MSI) {
1091 pci_release_msi(dev->dev);
1097 drm_create_cdevs(device_t kdev)
1099 struct drm_device *dev;
1102 unit = device_get_unit(kdev);
1103 dev = device_get_softc(kdev);
1105 dev->devnode = make_dev(&drm_cdevsw, unit, DRM_DEV_UID, DRM_DEV_GID,
1106 DRM_DEV_MODE, "dri/card%d", unit);
1109 dev->devnode->si_drv1 = dev;
1113 #ifndef DRM_DEV_NAME
1114 #define DRM_DEV_NAME "drm"
1117 devclass_t drm_devclass;
1119 drm_pci_id_list_t *drm_find_description(int vendor, int device,
1120 drm_pci_id_list_t *idlist)
1124 for (i = 0; idlist[i].vendor != 0; i++) {
1125 if ((idlist[i].vendor == vendor) &&
1126 ((idlist[i].device == device) ||
1127 (idlist[i].device == 0))) {
1134 static int drm_load(struct drm_device *dev)
1140 INIT_LIST_HEAD(&dev->maplist);
1143 drm_sysctl_init(dev);
1144 INIT_LIST_HEAD(&dev->filelist);
1147 dev->types[0] = _DRM_STAT_LOCK;
1148 dev->types[1] = _DRM_STAT_OPENS;
1149 dev->types[2] = _DRM_STAT_CLOSES;
1150 dev->types[3] = _DRM_STAT_IOCTLS;
1151 dev->types[4] = _DRM_STAT_LOCKS;
1152 dev->types[5] = _DRM_STAT_UNLOCKS;
1154 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
1155 atomic_set(&dev->counts[i], 0);
1157 INIT_LIST_HEAD(&dev->vblank_event_list);
1159 if (drm_core_has_AGP(dev)) {
1160 if (drm_device_is_agp(dev))
1161 dev->agp = drm_agp_init();
1162 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) &&
1164 DRM_ERROR("Card isn't AGP, or couldn't initialize "
1169 if (dev->agp != NULL && dev->agp->agp_info.ai_aperture_base != 0) {
1170 if (drm_mtrr_add(dev->agp->agp_info.ai_aperture_base,
1171 dev->agp->agp_info.ai_aperture_size, DRM_MTRR_WC) == 0)
1172 dev->agp->agp_mtrr = 1;
1176 if (dev->driver->driver_features & DRIVER_GEM) {
1177 retcode = drm_gem_init(dev);
1179 DRM_ERROR("Cannot initialize graphics execution "
1185 if (dev->driver->load != NULL) {
1187 /* Shared code returns -errno. */
1188 retcode = -dev->driver->load(dev,
1189 dev->id_entry->driver_private);
1190 if (pci_enable_busmaster(dev->dev))
1191 DRM_ERROR("Request to enable bus-master failed.\n");
1197 DRM_INFO("Initialized %s %d.%d.%d %s\n",
1201 dev->driver->patchlevel,
1207 drm_gem_destroy(dev);
1209 drm_sysctl_cleanup(dev);
1213 if (dev->devnode != NULL)
1214 destroy_dev(dev->devnode);
1216 lockuninit(&dev->vbl_lock);
1217 lockuninit(&dev->dev_lock);
1218 lockuninit(&dev->event_lock);
1219 lockuninit(&dev->struct_mutex);
1225 * Stub is needed for devfs
1227 int drm_close(struct dev_close_args *ap)
1232 void drm_cdevpriv_dtor(void *cd)
1234 struct drm_file *file_priv = cd;
1235 struct drm_device *dev = file_priv->dev;
1238 DRM_DEBUG("open_count = %d\n", dev->open_count);
1242 if (dev->driver->preclose != NULL)
1243 dev->driver->preclose(dev, file_priv);
1245 /* ========================================================
1246 * Begin inline drm_release
1249 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
1250 DRM_CURRENTPID, (long)dev->dev, dev->open_count);
1252 if (dev->driver->driver_features & DRIVER_GEM)
1253 drm_gem_release(dev, file_priv);
1255 if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
1256 && dev->lock.file_priv == file_priv) {
1257 DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
1259 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
1260 if (dev->driver->reclaim_buffers_locked != NULL)
1261 dev->driver->reclaim_buffers_locked(dev, file_priv);
1263 drm_lock_free(&dev->lock,
1264 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
1266 /* FIXME: may require heavy-handed reset of
1267 hardware at this point, possibly
1268 processed via a callback to the X
1270 } else if (dev->driver->reclaim_buffers_locked != NULL &&
1271 dev->lock.hw_lock != NULL) {
1272 /* The lock is required to reclaim buffers */
1274 if (!dev->lock.hw_lock) {
1275 /* Device has been unregistered */
1280 retcode = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue,
1281 PCATCH, "drmlk2", 0);
1286 dev->driver->reclaim_buffers_locked(dev, file_priv);
1290 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
1291 !dev->driver->reclaim_buffers_locked)
1292 drm_legacy_reclaim_buffers(dev, file_priv);
1294 funsetown(&dev->buf_sigio);
1296 if (dev->driver->postclose != NULL)
1297 dev->driver->postclose(dev, file_priv);
1298 list_del(&file_priv->lhead);
1301 /* ========================================================
1302 * End inline drm_release
1305 atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
1306 device_unbusy(dev->dev);
1307 if (--dev->open_count == 0) {
1308 retcode = drm_lastclose(dev);
1315 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
1316 struct sysctl_oid *top)
1318 struct sysctl_oid *oid;
1320 ksnprintf(dev->busid_str, sizeof(dev->busid_str),
1321 "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
1322 dev->pci_slot, dev->pci_func);
1323 oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
1324 CTLFLAG_RD, dev->busid_str, 0, NULL);
1327 dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
1328 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
1329 "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
1337 drm_mmap_single(struct dev_mmap_single_args *ap)
1339 struct drm_device *dev;
1340 struct cdev *kdev = ap->a_head.a_dev;
1341 vm_ooffset_t *offset = ap->a_offset;
1342 vm_size_t size = ap->a_size;
1343 struct vm_object **obj_res = ap->a_object;
1344 int nprot = ap->a_nprot;
1346 dev = drm_get_device_from_kdev(kdev);
1347 if (dev->drm_ttm_bdev != NULL) {
1348 return (ttm_bo_mmap_single(dev->drm_ttm_bdev, offset, size,
1350 } else if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
1351 return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
1359 #include <sys/sysproto.h>
1361 MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
1363 #define LINUX_IOCTL_DRM_MIN 0x6400
1364 #define LINUX_IOCTL_DRM_MAX 0x64ff
1366 static linux_ioctl_function_t drm_linux_ioctl;
1367 static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
1368 LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
1370 /* The bits for in/out are switched on Linux */
1371 #define LINUX_IOC_IN IOC_OUT
1372 #define LINUX_IOC_OUT IOC_IN
1375 drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
1378 int cmd = args->cmd;
1380 args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
1381 if (cmd & LINUX_IOC_IN)
1382 args->cmd |= IOC_IN;
1383 if (cmd & LINUX_IOC_OUT)
1384 args->cmd |= IOC_OUT;
1386 error = ioctl(p, (struct ioctl_args *)args);
1390 #endif /* DRM_LINUX */
1393 drm_core_init(void *arg)
1399 linux_ioctl_register_handler(&drm_handler);
1400 #endif /* DRM_LINUX */
1402 DRM_INFO("Initialized %s %d.%d.%d %s\n",
1403 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
1408 drm_core_exit(void *arg)
1412 linux_ioctl_unregister_handler(&drm_handler);
1413 #endif /* DRM_LINUX */
1415 drm_global_release();
1418 SYSINIT(drm_register, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
1419 drm_core_init, NULL);
1420 SYSUNINIT(drm_unregister, SI_SUB_DRIVERS, SI_ORDER_MIDDLE,
1421 drm_core_exit, NULL);
1424 #include <linux/dmi.h>
1427 * Check if dmi_system_id structure matches system DMI data
1430 dmi_found(const struct dmi_system_id *dsi)
1434 char *sys_vendor, *board_vendor, *product_name, *board_name;
1436 sys_vendor = kgetenv("smbios.system.maker");
1437 board_vendor = kgetenv("smbios.planar.maker");
1438 product_name = kgetenv("smbios.system.product");
1439 board_name = kgetenv("smbios.planar.product");
1441 for (i = 0; i < NELEM(dsi->matches); i++) {
1442 slot = dsi->matches[i].slot;
1446 case DMI_SYS_VENDOR:
1447 if (sys_vendor != NULL &&
1448 !strcmp(sys_vendor, dsi->matches[i].substr))
1452 case DMI_BOARD_VENDOR:
1453 if (board_vendor != NULL &&
1454 !strcmp(board_vendor, dsi->matches[i].substr))
1458 case DMI_PRODUCT_NAME:
1459 if (product_name != NULL &&
1460 !strcmp(product_name, dsi->matches[i].substr))
1464 case DMI_BOARD_NAME:
1465 if (board_name != NULL &&
1466 !strcmp(board_name, dsi->matches[i].substr))
1477 if (sys_vendor != NULL)
1478 kfreeenv(sys_vendor);
1479 if (board_vendor != NULL)
1480 kfreeenv(board_vendor);
1481 if (product_name != NULL)
1482 kfreeenv(product_name);
1483 if (board_name != NULL)
1484 kfreeenv(board_name);
1489 int dmi_check_system(const struct dmi_system_id *sysid)
1491 const struct dmi_system_id *dsi;
1494 for (dsi = sysid; dsi->matches[0].slot != 0 ; dsi++) {
1495 if (dmi_found(dsi)) {
1497 if (dsi->callback && dsi->callback(dsi))