drm/i915: Update to Linux 3.18
authorFrançois Tigeot <ftigeot@wolfpond.org>
Sat, 24 Oct 2015 09:48:26 +0000 (11:48 +0200)
committerFrançois Tigeot <ftigeot@wolfpond.org>
Sat, 24 Oct 2015 09:55:23 +0000 (11:55 +0200)
* Revamped, more robust vblank handling

* More paranoid pageflips, with added error detection and recovery logic

* Lots of Cherryview improvements, including runtime power management
  and better eDP panel support

* Lots of Broadwell improvements. GPU commands can now be submitted via
  a new execlist mechanism

* Preparatory work for Skylake support

96 files changed:
sys/dev/drm/drm/Makefile
sys/dev/drm/drm_agpsupport.c
sys/dev/drm/drm_auth.c
sys/dev/drm/drm_bufs.c
sys/dev/drm/drm_crtc.c
sys/dev/drm/drm_dma.c
sys/dev/drm/drm_dp_helper.c
sys/dev/drm/drm_dragonfly.c
sys/dev/drm/drm_drv.c
sys/dev/drm/drm_edid.c
sys/dev/drm/drm_fb_helper.c
sys/dev/drm/drm_fops.c
sys/dev/drm/drm_gem.c
sys/dev/drm/drm_internal.h [new file with mode: 0644]
sys/dev/drm/drm_ioctl.c
sys/dev/drm/drm_irq.c
sys/dev/drm/drm_legacy.h
sys/dev/drm/drm_memory.c
sys/dev/drm/drm_modes.c
sys/dev/drm/drm_modeset_lock.c
sys/dev/drm/drm_pci.c
sys/dev/drm/drm_probe_helper.c
sys/dev/drm/drm_rect.c
sys/dev/drm/drm_scatter.c
sys/dev/drm/drm_vm.c
sys/dev/drm/i915/Makefile
sys/dev/drm/i915/dvo_ns2501.c
sys/dev/drm/i915/i915_cmd_parser.c
sys/dev/drm/i915/i915_dma.c
sys/dev/drm/i915/i915_drv.c
sys/dev/drm/i915/i915_drv.h
sys/dev/drm/i915/i915_gem.c
sys/dev/drm/i915/i915_gem_context.c
sys/dev/drm/i915/i915_gem_evict.c
sys/dev/drm/i915/i915_gem_execbuffer.c
sys/dev/drm/i915/i915_gem_gtt.c
sys/dev/drm/i915/i915_gem_gtt.h
sys/dev/drm/i915/i915_gem_render_state.c
sys/dev/drm/i915/i915_gem_render_state.h [copied from sys/dev/drm/i915/intel_renderstate.h with 70% similarity]
sys/dev/drm/i915/i915_gem_stolen.c
sys/dev/drm/i915/i915_gem_tiling.c
sys/dev/drm/i915/i915_gem_userptr.c
sys/dev/drm/i915/i915_irq.c
sys/dev/drm/i915/i915_params.c
sys/dev/drm/i915/i915_reg.h
sys/dev/drm/i915/intel_bios.c
sys/dev/drm/i915/intel_bios.h
sys/dev/drm/i915/intel_ddi.c
sys/dev/drm/i915/intel_display.c
sys/dev/drm/i915/intel_dp.c
sys/dev/drm/i915/intel_dp_mst.c
sys/dev/drm/i915/intel_drv.h
sys/dev/drm/i915/intel_dsi.c
sys/dev/drm/i915/intel_dsi.h
sys/dev/drm/i915/intel_dsi_cmd.c
sys/dev/drm/i915/intel_dsi_panel_vbt.c
sys/dev/drm/i915/intel_dsi_pll.c
sys/dev/drm/i915/intel_dvo.c
sys/dev/drm/i915/intel_fbdev.c
sys/dev/drm/i915/intel_hdmi.c
sys/dev/drm/i915/intel_lrc.c [new file with mode: 0644]
sys/dev/drm/i915/intel_lrc.h [new file with mode: 0644]
sys/dev/drm/i915/intel_lvds.c
sys/dev/drm/i915/intel_panel.c
sys/dev/drm/i915/intel_pm.c
sys/dev/drm/i915/intel_renderstate.h
sys/dev/drm/i915/intel_ringbuffer.c
sys/dev/drm/i915/intel_ringbuffer.h
sys/dev/drm/i915/intel_sprite.c
sys/dev/drm/i915/intel_uncore.c
sys/dev/drm/include/asm/cacheflush.h
sys/dev/drm/include/drm/drmP.h
sys/dev/drm/include/drm/drm_agpsupport.h [new file with mode: 0644]
sys/dev/drm/include/drm/drm_crtc.h
sys/dev/drm/include/drm/drm_dp_helper.h
sys/dev/drm/include/drm/drm_fb_helper.h
sys/dev/drm/include/drm/drm_gem.h [new file with mode: 0644]
sys/dev/drm/include/drm/drm_legacy.h [new file with mode: 0644]
sys/dev/drm/include/drm/drm_memory.h [deleted file]
sys/dev/drm/include/drm/drm_modeset_lock.h
sys/dev/drm/include/linux/backlight.h
sys/dev/drm/include/linux/fb.h
sys/dev/drm/include/uapi_drm/radeon_drm.h
sys/dev/drm/include/uapi_linux/fb.h
sys/dev/drm/radeon/Makefile
sys/dev/drm/radeon/atombios_dp.c
sys/dev/drm/radeon/drm_buffer.c [moved from sys/dev/drm/drm_buffer.c with 99% similarity]
sys/dev/drm/radeon/drm_buffer.h [moved from sys/dev/drm/include/drm/drm_buffer.h with 100% similarity]
sys/dev/drm/radeon/r300_cmdbuf.c
sys/dev/drm/radeon/radeon.h
sys/dev/drm/radeon/radeon_cp.c
sys/dev/drm/radeon/radeon_display.c
sys/dev/drm/radeon/radeon_drv.c
sys/dev/drm/radeon/radeon_drv.h
sys/dev/drm/radeon/radeon_pm.c
sys/dev/drm/radeon/radeon_state.c

index fac5190..9df50f3 100644 (file)
@@ -5,7 +5,6 @@ SRCS    = \
        drm_agpsupport.c \
        drm_auth.c \
        drm_bufs.c \
-       drm_buffer.c \
        drm_cache.c \
        drm_context.c \
        drm_crtc.c \
index be2f6d4..6ab2331 100644 (file)
@@ -35,6 +35,8 @@
  */
 
 #include <drm/drmP.h>
+#include <linux/module.h>
+#include "drm_legacy.h"
 
 #include <dev/agp/agpreg.h>
 #include <bus/pci/pcireg.h>
@@ -341,10 +343,10 @@ int drm_agp_free_ioctl(struct drm_device *dev, void *data,
        return retcode;
 }
 
-drm_agp_head_t *drm_agp_init(void)
+struct drm_agp_head *drm_agp_init(void)
 {
        device_t agpdev;
-       drm_agp_head_t *head   = NULL;
+       struct drm_agp_head *head = NULL;
        int      agp_available = 1;
    
        agpdev = DRM_AGP_FIND_DEVICE();
index 78a62ac..d81f20f 100644 (file)
 static struct drm_file *drm_find_file(struct drm_device *dev, drm_magic_t magic)
 {
        struct drm_file *retval = NULL;
-       drm_magic_entry_t *pt;
+       struct drm_magic_entry *pt;
        struct drm_hash_item *hash;
 
        mutex_lock(&dev->struct_mutex);
        if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
-               pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item);
+               pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
                retval = pt->priv;
        }
        mutex_unlock(&dev->struct_mutex);
@@ -68,7 +68,7 @@ static struct drm_file *drm_find_file(struct drm_device *dev, drm_magic_t magic)
 static int drm_add_magic(struct drm_device *dev, struct drm_file *priv,
                         drm_magic_t magic)
 {
-       drm_magic_entry_t *entry;
+       struct drm_magic_entry *entry;
 
        DRM_DEBUG("%d\n", magic);
 
@@ -91,7 +91,7 @@ static int drm_add_magic(struct drm_device *dev, struct drm_file *priv,
  */
 static int drm_remove_magic(struct drm_device *dev, drm_magic_t magic)
 {
-       drm_magic_entry_t *pt;
+       struct drm_magic_entry *pt;
        struct drm_hash_item *hash;
 
        DRM_DEBUG("%d\n", magic);
@@ -101,7 +101,7 @@ static int drm_remove_magic(struct drm_device *dev, drm_magic_t magic)
                mutex_unlock(&dev->struct_mutex);
                return -EINVAL;
        }
-       pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item);
+       pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
        drm_ht_remove_item(&dev->magiclist, hash);
        list_del(&pt->head);
        mutex_unlock(&dev->struct_mutex);
index e7cc0a2..eefb358 100644 (file)
@@ -1,18 +1,13 @@
-/**
- * \file drm_bufs.c
- * Generic buffer template
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
 /*
- * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ * Legacy: Generic DRM Buffer Management
  *
  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All Rights Reserved.
  *
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Gareth Hughes <gareth@valinux.com>
+ *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
  * to deal in the Software without restriction, including without limitation
@@ -31,8 +26,6 @@
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
- *
- * $FreeBSD: src/sys/dev/drm2/drm_bufs.c,v 1.1 2012/05/22 11:07:44 kib Exp $
  */
 
 #include <sys/conf.h>
 #include <linux/types.h>
 #include <linux/export.h>
 #include <drm/drmP.h>
+#include "drm_legacy.h"
 
-/* Allocation of PCI memory resources (framebuffer, registers, etc.) for
- * drm_get_resource_*.  Note that they are not RF_ACTIVE, so there's no virtual
- * address for accessing them.  Cleaned up at unload.
- */
-static int drm_alloc_resource(struct drm_device *dev, int resource)
-{
-       struct resource *res;
-       int rid;
-
-       DRM_LOCK_ASSERT(dev);
-
-       if (resource >= DRM_MAX_PCI_RESOURCE) {
-               DRM_ERROR("Resource %d too large\n", resource);
-               return 1;
-       }
-
-       if (dev->pcir[resource] != NULL) {
-               return 0;
-       }
-
-       DRM_UNLOCK(dev);
-       rid = PCIR_BAR(resource);
-       res = bus_alloc_resource_any(dev->dev, SYS_RES_MEMORY, &rid,
-           RF_SHAREABLE);
-       DRM_LOCK(dev);
-       if (res == NULL) {
-               DRM_ERROR("Couldn't find resource 0x%x\n", resource);
-               return 1;
-       }
-
-       if (dev->pcir[resource] == NULL) {
-               dev->pcirid[resource] = rid;
-               dev->pcir[resource] = res;
-       }
-
-       return 0;
-}
-
-unsigned long drm_get_resource_start(struct drm_device *dev,
-                                    unsigned int resource)
-{
-       if (drm_alloc_resource(dev, resource) != 0)
-               return 0;
-
-       return rman_get_start(dev->pcir[resource]);
-}
-
-unsigned long drm_get_resource_len(struct drm_device *dev,
-                                  unsigned int resource)
-{
-       if (drm_alloc_resource(dev, resource) != 0)
-               return 0;
-
-       return rman_get_size(dev->pcir[resource]);
-}
-
-int drm_addmap(struct drm_device * dev, resource_size_t offset,
-              unsigned int size, enum drm_map_type type,
-              enum drm_map_flags flags, struct drm_local_map ** map_ptr)
+int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset,
+                     unsigned int size, enum drm_map_type type,
+                     enum drm_map_flags flags, struct drm_local_map **map_ptr)
 {
        struct drm_local_map *map;
        struct drm_map_list *entry = NULL;
@@ -269,8 +207,8 @@ done:
  * \return zero on success or a negative value on error.
  *
  */
-int drm_addmap_ioctl(struct drm_device *dev, void *data,
-                    struct drm_file *file_priv)
+int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
+                           struct drm_file *file_priv)
 {
        struct drm_map *request = data;
        drm_local_map_t *map;
@@ -283,7 +221,7 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data,
                return -EACCES;
 
        DRM_LOCK(dev);
-       err = drm_addmap(dev, request->offset, request->size, request->type,
+       err = drm_legacy_addmap(dev, request->offset, request->size, request->type,
            request->flags, &map);
        DRM_UNLOCK(dev);
        if (err != 0)
@@ -299,29 +237,34 @@ int drm_addmap_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
-void drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
+/**
+ * Remove a map private from list and deallocate resources if the mapping
+ * isn't in use.
+ *
+ * Searches the map on drm_device::maplist, removes it from the list, see if
+ * its being used, and free any associate resource (such as MTRR's) if it's not
+ * being on use.
+ *
+ * \sa drm_legacy_addmap
+ */
+int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
 {
        struct drm_map_list *r_list = NULL, *list_t;
        drm_dma_handle_t dmah;
        int found = 0;
 
-       DRM_LOCK_ASSERT(dev);
-
-       if (map == NULL)
-               return;
-
        /* Find the list entry for the map and remove it */
        list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
                if (r_list->map == map) {
                        list_del(&r_list->head);
-                       drm_free(r_list, M_DRM);
+                       kfree(r_list);
                        found = 1;
                        break;
                }
        }
 
        if (!found)
-               return;
+               return -EINVAL;
 
        switch (map->type) {
        case _DRM_REGISTERS:
@@ -345,15 +288,26 @@ void drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
        case _DRM_CONSISTENT:
                dmah.vaddr = map->handle;
                dmah.busaddr = map->offset;
-               drm_pci_free(dev, &dmah);
-               break;
-       default:
-               DRM_ERROR("Bad map type %d\n", map->type);
+               dmah.size = map->size;
+               __drm_legacy_pci_free(dev, &dmah);
                break;
        }
+       kfree(map);
+
+       return 0;
+}
+
+int drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
+{
+       int ret;
 
-       drm_free(map, M_DRM);
+       mutex_lock(&dev->struct_mutex);
+       ret = drm_legacy_rmmap_locked(dev, map);
+       mutex_unlock(&dev->struct_mutex);
+
+       return ret;
 }
+EXPORT_SYMBOL(drm_legacy_rmmap);
 
 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
  * the last close of the device, and this is necessary for cleanup when things
@@ -370,8 +324,8 @@ void drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
  * \param arg pointer to a struct drm_map structure.
  * \return zero on success or a negative value on error.
  */
-int drm_rmmap_ioctl(struct drm_device *dev, void *data,
-                   struct drm_file *file_priv)
+int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *file_priv)
 {
        struct drm_map *request = data;
        struct drm_local_map *map = NULL;
@@ -401,7 +355,7 @@ int drm_rmmap_ioctl(struct drm_device *dev, void *data,
                return 0;
        }
 
-       drm_rmmap(dev, map);
+       drm_legacy_rmmap(dev, map);
 
        DRM_UNLOCK(dev);
 
@@ -847,7 +801,7 @@ static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *reques
  * reallocates the buffer list of the same size order to accommodate the new
  * buffers.
  */
-int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
+int drm_legacy_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
 {
        int order, ret;
 
@@ -878,7 +832,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
        return ret;
 }
 
-static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
+static int drm_legacy_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
 {
        int order, ret;
 
@@ -912,7 +866,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
        return ret;
 }
 
-int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
+int drm_legacy_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
 {
        int order, ret;
 
@@ -960,18 +914,18 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
  * PCI memory respectively.
  */
-int drm_addbufs(struct drm_device *dev, void *data,
-               struct drm_file *file_priv)
+int drm_legacy_addbufs(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
 {
        struct drm_buf_desc *request = data;
        int err;
 
        if (request->flags & _DRM_AGP_BUFFER)
-               err = drm_addbufs_agp(dev, request);
+               err = drm_legacy_addbufs_agp(dev, request);
        else if (request->flags & _DRM_SG_BUFFER)
-               err = drm_addbufs_sg(dev, request);
+               err = drm_legacy_addbufs_sg(dev, request);
        else
-               err = drm_addbufs_pci(dev, request);
+               err = drm_legacy_addbufs_pci(dev, request);
 
        return err;
 }
@@ -993,8 +947,8 @@ int drm_addbufs(struct drm_device *dev, void *data,
  * lock, preventing of allocating more buffers after this call. Information
  * about each requested buffer is then copied into user space.
  */
-int drm_infobufs(struct drm_device *dev, void *data,
-                struct drm_file *file_priv)
+int drm_legacy_infobufs(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
 {
        struct drm_device_dma *dma = dev->dma;
        struct drm_buf_info *request = data;
@@ -1074,8 +1028,8 @@ int drm_infobufs(struct drm_device *dev, void *data,
  *
  * \note This ioctl is deprecated and mostly never used.
  */
-int drm_markbufs(struct drm_device *dev, void *data,
-                struct drm_file *file_priv)
+int drm_legacy_markbufs(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
 {
        struct drm_device_dma *dma = dev->dma;
        struct drm_buf_desc *request = data;
@@ -1121,8 +1075,8 @@ int drm_markbufs(struct drm_device *dev, void *data,
  * Calls free_buffer() for each used buffer.
  * This function is primarily used for debugging.
  */
-int drm_freebufs(struct drm_device *dev, void *data,
-                struct drm_file *file_priv)
+int drm_legacy_freebufs(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
 {
        drm_device_dma_t *dma = dev->dma;
        struct drm_buf_free *request = data;
@@ -1152,7 +1106,7 @@ int drm_freebufs(struct drm_device *dev, void *data,
                        retcode = -EINVAL;
                        break;
                }
-               drm_free_buffer(dev, buf);
+               drm_legacy_free_buffer(dev, buf);
        }
        spin_unlock(&dev->dma_lock);
 
@@ -1173,8 +1127,8 @@ int drm_freebufs(struct drm_device *dev, void *data,
  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
  * drm_mmap_dma().
  */
-int drm_mapbufs(struct drm_device *dev, void *data,
-               struct drm_file *file_priv)
+int drm_legacy_mapbufs(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
 {
        drm_device_dma_t *dma = dev->dma;
        int retcode = 0;
@@ -1250,3 +1204,29 @@ int drm_mapbufs(struct drm_device *dev, void *data,
 
        return retcode;
 }
+
+int drm_legacy_dma_ioctl(struct drm_device *dev, void *data,
+                 struct drm_file *file_priv)
+{
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               return -EINVAL;
+
+       if (dev->driver->dma_ioctl)
+               return dev->driver->dma_ioctl(dev, data, file_priv);
+       else
+               return -EINVAL;
+}
+
+struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev)
+{
+       struct drm_map_list *entry;
+
+       list_for_each_entry(entry, &dev->maplist, head) {
+               if (entry->map && entry->map->type == _DRM_SHM &&
+                   (entry->map->flags & _DRM_CONTAINS_LOCK)) {
+                       return entry->map;
+               }
+       }
+       return NULL;
+}
+EXPORT_SYMBOL(drm_legacy_getsarea);
index fe4ccf4..4146779 100644 (file)
@@ -45,103 +45,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
                                                        struct drm_mode_fb_cmd2 *r,
                                                        struct drm_file *file_priv);
 
-/**
- * drm_modeset_lock_all - take all modeset locks
- * @dev: drm device
- *
- * This function takes all modeset locks, suitable where a more fine-grained
- * scheme isn't (yet) implemented. Locks must be dropped with
- * drm_modeset_unlock_all.
- */
-void drm_modeset_lock_all(struct drm_device *dev)
-{
-       struct drm_mode_config *config = &dev->mode_config;
-       struct drm_modeset_acquire_ctx *ctx;
-       int ret;
-
-       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
-       if (WARN_ON(!ctx))
-               return;
-
-       mutex_lock(&config->mutex);
-
-       drm_modeset_acquire_init(ctx, 0);
-
-retry:
-       ret = drm_modeset_lock(&config->connection_mutex, ctx);
-       if (ret)
-               goto fail;
-       ret = drm_modeset_lock_all_crtcs(dev, ctx);
-       if (ret)
-               goto fail;
-
-       WARN_ON(config->acquire_ctx);
-
-       /* now we hold the locks, so now that it is safe, stash the
-        * ctx for drm_modeset_unlock_all():
-        */
-       config->acquire_ctx = ctx;
-
-       drm_warn_on_modeset_not_all_locked(dev);
-
-       return;
-
-fail:
-       if (ret == -EDEADLK) {
-               drm_modeset_backoff(ctx);
-               goto retry;
-       }
-}
-EXPORT_SYMBOL(drm_modeset_lock_all);
-
-/**
- * drm_modeset_unlock_all - drop all modeset locks
- * @dev: device
- *
- * This function drop all modeset locks taken by drm_modeset_lock_all.
- */
-void drm_modeset_unlock_all(struct drm_device *dev)
-{
-       struct drm_mode_config *config = &dev->mode_config;
-       struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
-
-       if (WARN_ON(!ctx))
-               return;
-
-       config->acquire_ctx = NULL;
-       drm_modeset_drop_locks(ctx);
-       drm_modeset_acquire_fini(ctx);
-
-       kfree(ctx);
-
-       mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_modeset_unlock_all);
-
-/**
- * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
- * @dev: device
- *
- * Useful as a debug assert.
- */
-void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
-{
-       struct drm_crtc *crtc;
-
-       /* Locking is currently fubar in the panic handler. */
-#if 0
-       if (oops_in_progress)
-               return;
-#endif
-
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
-               WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
-       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
-       WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
-}
-EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
-
 /* Avoid boilerplate.  I'm tired of typing. */
 #define DRM_ENUM_NAME_FN(fnname, list)                         \
        const char *fnname(int val)                             \
@@ -501,9 +404,6 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
        if (ret)
                goto out;
 
-       /* Grab the idr reference. */
-       drm_framebuffer_reference(fb);
-
        dev->mode_config.num_fb++;
        list_add(&fb->head, &dev->mode_config.fb_list);
 out:
@@ -513,10 +413,34 @@ out:
 }
 EXPORT_SYMBOL(drm_framebuffer_init);
 
+/* dev->mode_config.fb_lock must be held! */
+static void __drm_framebuffer_unregister(struct drm_device *dev,
+                                        struct drm_framebuffer *fb)
+{
+       mutex_lock(&dev->mode_config.idr_mutex);
+       idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
+       mutex_unlock(&dev->mode_config.idr_mutex);
+
+       fb->base.id = 0;
+}
+
 static void drm_framebuffer_free(struct kref *kref)
 {
        struct drm_framebuffer *fb =
                        container_of(kref, struct drm_framebuffer, refcount);
+       struct drm_device *dev = fb->dev;
+
+       /*
+        * The lookup idr holds a weak reference, which has not necessarily been
+        * removed at this point. Check for that.
+        */
+       mutex_lock(&dev->mode_config.fb_lock);
+       if (fb->base.id) {
+               /* Mark fb as reaped and drop idr ref. */
+               __drm_framebuffer_unregister(dev, fb);
+       }
+       mutex_unlock(&dev->mode_config.fb_lock);
+
        fb->funcs->destroy(fb);
 }
 
@@ -553,8 +477,10 @@ struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
 
        mutex_lock(&dev->mode_config.fb_lock);
        fb = __drm_framebuffer_lookup(dev, id);
-       if (fb)
-               drm_framebuffer_reference(fb);
+       if (fb) {
+               if (!kref_get_unless_zero(&fb->refcount))
+                       fb = NULL;
+       }
        mutex_unlock(&dev->mode_config.fb_lock);
 
        return fb;
@@ -598,19 +524,6 @@ static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
        kref_put(&fb->refcount, drm_framebuffer_free_bug);
 }
 
-/* dev->mode_config.fb_lock must be held! */
-static void __drm_framebuffer_unregister(struct drm_device *dev,
-                                        struct drm_framebuffer *fb)
-{
-       mutex_lock(&dev->mode_config.idr_mutex);
-       idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
-       mutex_unlock(&dev->mode_config.idr_mutex);
-
-       fb->base.id = 0;
-
-       __drm_framebuffer_unreference(fb);
-}
-
 /**
  * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
  * @fb: fb to unregister
@@ -750,11 +663,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
        crtc->funcs = funcs;
        crtc->invert_dimensions = false;
 
-       drm_modeset_lock_all(dev);
        drm_modeset_lock_init(&crtc->mutex);
-       /* dropped by _unlock_all(): */
-       drm_modeset_lock(&crtc->mutex, config->acquire_ctx);
-
        ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
        if (ret)
                goto out;
@@ -772,7 +681,6 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
                cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
 
  out:
-       drm_modeset_unlock_all(dev);
 
        return ret;
 }
@@ -838,6 +746,59 @@ static void drm_mode_remove(struct drm_connector *connector,
        drm_mode_destroy(connector->dev, mode);
 }
 
+/**
+ * drm_connector_get_cmdline_mode - reads the user's cmdline mode
+ * @connector: connector to quwery
+ * @mode: returned mode
+ *
+ * The kernel supports per-connector configration of its consoles through
+ * use of the video= parameter. This function parses that option and
+ * extracts the user's specified mode (or enable/disable status) for a
+ * particular connector. This is typically only used during the early fbdev
+ * setup.
+ */
+static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
+{
+       struct drm_cmdline_mode *mode = &connector->cmdline_mode;
+       char *option = NULL;
+
+       if (fb_get_options(connector->name, &option))
+               return;
+
+       if (!drm_mode_parse_command_line_for_connector(option,
+                                                      connector,
+                                                      mode))
+               return;
+
+       if (mode->force) {
+               const char *s;
+
+               switch (mode->force) {
+               case DRM_FORCE_OFF:
+                       s = "OFF";
+                       break;
+               case DRM_FORCE_ON_DIGITAL:
+                       s = "ON - dig";
+                       break;
+               default:
+               case DRM_FORCE_ON:
+                       s = "ON";
+                       break;
+               }
+
+               DRM_INFO("forcing %s connector %s\n", connector->name, s);
+               connector->force = mode->force;
+       }
+
+       DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
+                     connector->name,
+                     mode->xres, mode->yres,
+                     mode->refresh_specified ? mode->refresh : 60,
+                     mode->rb ? " reduced blanking" : "",
+                     mode->margins ? " with margins" : "",
+                     mode->interlace ?  " interlaced" : "");
+}
+
 /**
  * drm_connector_init - Init a preallocated connector
  * @dev: DRM device
@@ -888,6 +849,8 @@ int drm_connector_init(struct drm_device *dev,
        connector->edid_blob_ptr = NULL;
        connector->status = connector_status_unknown;
 
+       drm_connector_get_cmdline_mode(connector);
+
        list_add_tail(&connector->head, &dev->mode_config.connector_list);
        dev->mode_config.num_connector++;
 
@@ -937,6 +900,29 @@ void drm_connector_cleanup(struct drm_connector *connector)
 }
 EXPORT_SYMBOL(drm_connector_cleanup);
 
+/**
+ * drm_connector_index - find the index of a registered connector
+ * @connector: connector to find index for
+ *
+ * Given a registered connector, return the index of that connector within a DRM
+ * device's list of connectors.
+ */
+unsigned int drm_connector_index(struct drm_connector *connector)
+{
+       unsigned int index = 0;
+       struct drm_connector *tmp;
+
+       list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) {
+               if (tmp == connector)
+                       return index;
+
+               index++;
+       }
+
+       BUG();
+}
+EXPORT_SYMBOL(drm_connector_index);
+
 /**
  * drm_connector_register - register a connector
  * @connector: the connector to register
@@ -1243,6 +1229,29 @@ void drm_plane_cleanup(struct drm_plane *plane)
 }
 EXPORT_SYMBOL(drm_plane_cleanup);
 
+/**
+ * drm_plane_index - find the index of a registered plane
+ * @plane: plane to find index for
+ *
+ * Given a registered plane, return the index of that CRTC within a DRM
+ * device's list of planes.
+ */
+unsigned int drm_plane_index(struct drm_plane *plane)
+{
+       unsigned int index = 0;
+       struct drm_plane *tmp;
+
+       list_for_each_entry(tmp, &plane->dev->mode_config.plane_list, head) {
+               if (tmp == plane)
+                       return index;
+
+               index++;
+       }
+
+       BUG();
+}
+EXPORT_SYMBOL(drm_plane_index);
+
 /**
  * drm_plane_force_disable - Forcibly disable a plane
  * @plane: plane to disable
@@ -1254,19 +1263,21 @@ EXPORT_SYMBOL(drm_plane_cleanup);
  */
 void drm_plane_force_disable(struct drm_plane *plane)
 {
-       struct drm_framebuffer *old_fb = plane->fb;
        int ret;
 
-       if (!old_fb)
+       if (!plane->fb)
                return;
 
+       plane->old_fb = plane->fb;
        ret = plane->funcs->disable_plane(plane);
        if (ret) {
                DRM_ERROR("failed to disable plane with busy fb\n");
+               plane->old_fb = NULL;
                return;
        }
        /* disconnect the plane from the fb and crtc: */
-       __drm_framebuffer_unreference(old_fb);
+       __drm_framebuffer_unreference(plane->old_fb);
+       plane->old_fb = NULL;
        plane->fb = NULL;
        plane->crtc = NULL;
 }
@@ -2231,33 +2242,29 @@ out:
  *
  * src_{x,y,w,h} are provided in 16.16 fixed point format
  */
-static int setplane_internal(struct drm_plane *plane,
-                            struct drm_crtc *crtc,
-                            struct drm_framebuffer *fb,
-                            int32_t crtc_x, int32_t crtc_y,
-                            uint32_t crtc_w, uint32_t crtc_h,
-                            /* src_{x,y,w,h} values are 16.16 fixed point */
-                            uint32_t src_x, uint32_t src_y,
-                            uint32_t src_w, uint32_t src_h)
+static int __setplane_internal(struct drm_plane *plane,
+                              struct drm_crtc *crtc,
+                              struct drm_framebuffer *fb,
+                              int32_t crtc_x, int32_t crtc_y,
+                              uint32_t crtc_w, uint32_t crtc_h,
+                              /* src_{x,y,w,h} values are 16.16 fixed point */
+                              uint32_t src_x, uint32_t src_y,
+                              uint32_t src_w, uint32_t src_h)
 {
-       struct drm_device *dev = plane->dev;
-       struct drm_framebuffer *old_fb = NULL;
        int ret = 0;
        unsigned int fb_width, fb_height;
        int i;
 
        /* No fb means shut it down */
        if (!fb) {
-               drm_modeset_lock_all(dev);
-               old_fb = plane->fb;
+               plane->old_fb = plane->fb;
                ret = plane->funcs->disable_plane(plane);
                if (!ret) {
                        plane->crtc = NULL;
                        plane->fb = NULL;
                } else {
-                       old_fb = NULL;
+                       plane->old_fb = NULL;
                }
-               drm_modeset_unlock_all(dev);
                goto out;
        }
 
@@ -2297,8 +2304,7 @@ static int setplane_internal(struct drm_plane *plane,
                goto out;
        }
 
-       drm_modeset_lock_all(dev);
-       old_fb = plane->fb;
+       plane->old_fb = plane->fb;
        ret = plane->funcs->update_plane(plane, crtc, fb,
                                         crtc_x, crtc_y, crtc_w, crtc_h,
                                         src_x, src_y, src_w, src_h);
@@ -2307,18 +2313,37 @@ static int setplane_internal(struct drm_plane *plane,
                plane->fb = fb;
                fb = NULL;
        } else {
-               old_fb = NULL;
+               plane->old_fb = NULL;
        }
-       drm_modeset_unlock_all(dev);
 
 out:
        if (fb)
                drm_framebuffer_unreference(fb);
-       if (old_fb)
-               drm_framebuffer_unreference(old_fb);
+       if (plane->old_fb)
+               drm_framebuffer_unreference(plane->old_fb);
+       plane->old_fb = NULL;
 
        return ret;
+}
 
+static int setplane_internal(struct drm_plane *plane,
+                            struct drm_crtc *crtc,
+                            struct drm_framebuffer *fb,
+                            int32_t crtc_x, int32_t crtc_y,
+                            uint32_t crtc_w, uint32_t crtc_h,
+                            /* src_{x,y,w,h} values are 16.16 fixed point */
+                            uint32_t src_x, uint32_t src_y,
+                            uint32_t src_w, uint32_t src_h)
+{
+       int ret;
+
+       drm_modeset_lock_all(plane->dev);
+       ret = __setplane_internal(plane, crtc, fb,
+                                 crtc_x, crtc_y, crtc_w, crtc_h,
+                                 src_x, src_y, src_w, src_h);
+       drm_modeset_unlock_all(plane->dev);
+
+       return ret;
 }
 
 /**
@@ -2422,7 +2447,7 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
         * crtcs. Atomic modeset will have saner semantics ...
         */
        list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head)
-               tmp->old_fb = tmp->primary->fb;
+               tmp->primary->old_fb = tmp->primary->fb;
 
        fb = set->fb;
 
@@ -2435,8 +2460,9 @@ int drm_mode_set_config_internal(struct drm_mode_set *set)
        list_for_each_entry(tmp, &crtc->dev->mode_config.crtc_list, head) {
                if (tmp->primary->fb)
                        drm_framebuffer_reference(tmp->primary->fb);
-               if (tmp->old_fb)
-                       drm_framebuffer_unreference(tmp->old_fb);
+               if (tmp->primary->old_fb)
+                       drm_framebuffer_unreference(tmp->primary->old_fb);
+               tmp->primary->old_fb = NULL;
        }
 
        return ret;
@@ -2683,6 +2709,7 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
        int ret = 0;
 
        BUG_ON(!crtc->cursor);
+       WARN_ON(crtc->cursor->crtc != crtc && crtc->cursor->crtc != NULL);
 
        /*
         * Obtain fb we'll be using (either new or existing) and take an extra
@@ -2702,11 +2729,9 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
                        fb = NULL;
                }
        } else {
-               mutex_lock(&dev->mode_config.mutex);
                fb = crtc->cursor->fb;
                if (fb)
                        drm_framebuffer_reference(fb);
-               mutex_unlock(&dev->mode_config.mutex);
        }
 
        if (req->flags & DRM_MODE_CURSOR_MOVE) {
@@ -2728,7 +2753,7 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
         * setplane_internal will take care of deref'ing either the old or new
         * framebuffer depending on success.
         */
-       ret = setplane_internal(crtc->cursor, crtc, fb,
+       ret = __setplane_internal(crtc->cursor, crtc, fb,
                                crtc_x, crtc_y, crtc_w, crtc_h,
                                0, 0, src_w, src_h);
 
@@ -2764,10 +2789,12 @@ static int drm_mode_cursor_common(struct drm_device *dev,
         * If this crtc has a universal cursor plane, call that plane's update
         * handler rather than using legacy cursor handlers.
         */
-       if (crtc->cursor)
-               return drm_mode_cursor_universal(crtc, req, file_priv);
+       drm_modeset_lock_crtc(crtc);
+       if (crtc->cursor) {
+               ret = drm_mode_cursor_universal(crtc, req, file_priv);
+               goto out;
+       }
 
-       drm_modeset_lock(&crtc->mutex, NULL);
        if (req->flags & DRM_MODE_CURSOR_BO) {
                if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) {
                        ret = -ENXIO;
@@ -2791,7 +2818,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
                }
        }
 out:
-       drm_modeset_unlock(&crtc->mutex);
+       drm_modeset_unlock_crtc(crtc);
 
        return ret;
 
@@ -3340,7 +3367,16 @@ void drm_fb_release(struct drm_file *priv)
        struct drm_device *dev = priv->dev;
        struct drm_framebuffer *fb, *tfb;
 
-       mutex_lock(&priv->fbs_lock);
+       /*
+        * When the file gets released that means no one else can access the fb
+        * list any more, so no need to grab fpriv->fbs_lock. And we need to to
+        * avoid upsetting lockdep since the universal cursor code adds a
+        * framebuffer while holding mutex locks.
+        *
+        * Note that a real deadlock between fpriv->fbs_lock and the modeset
+        * locks is impossible here since no one else but this function can get
+        * at it any more.
+        */
        list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
 
                mutex_lock(&dev->mode_config.fb_lock);
@@ -3353,7 +3389,6 @@ void drm_fb_release(struct drm_file *priv)
                /* This will also drop the fpriv->fbs reference. */
                drm_framebuffer_remove(fb);
        }
-       mutex_unlock(&priv->fbs_lock);
 }
 
 /**
@@ -3465,9 +3500,10 @@ EXPORT_SYMBOL(drm_property_create_enum);
  * @flags: flags specifying the property type
  * @name: name of the property
  * @props: enumeration lists with property bitflags
- * @num_values: number of pre-defined values
+ * @num_props: size of the @props array
+ * @supported_bits: bitmask of all supported enumeration values
  *
- * This creates a new generic drm property which can then be attached to a drm
+ * This creates a new bitmask drm property which can then be attached to a drm
  * object with drm_object_attach_property. The returned property object must be
  * freed with drm_property_destroy.
  *
@@ -3480,19 +3516,28 @@ EXPORT_SYMBOL(drm_property_create_enum);
 struct drm_property *drm_property_create_bitmask(struct drm_device *dev,
                                         int flags, const char *name,
                                         const struct drm_prop_enum_list *props,
-                                        int num_values)
+                                        int num_props,
+                                        uint64_t supported_bits)
 {
        struct drm_property *property;
-       int i, ret;
+       int i, ret, index = 0;
+       int num_values = hweight64(supported_bits);
 
        flags |= DRM_MODE_PROP_BITMASK;
 
        property = drm_property_create(dev, flags, name, num_values);
        if (!property)
                return NULL;
+       for (i = 0; i < num_props; i++) {
+               if (!(supported_bits & (1ULL << props[i].type)))
+                       continue;
 
-       for (i = 0; i < num_values; i++) {
-               ret = drm_property_add_enum(property, i,
+               if (WARN_ON(index >= num_values)) {
+                       drm_property_destroy(dev, property);
+                       return NULL;
+               }
+
+               ret = drm_property_add_enum(property, index++,
                                      props[i].type,
                                      props[i].name);
                if (ret) {
@@ -4118,12 +4163,25 @@ static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
        return ret;
 }
 
-static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
-                                     struct drm_property *property,
-                                     uint64_t value)
+/**
+ * drm_mode_plane_set_obj_prop - set the value of a property
+ * @plane: drm plane object to set property value for
+ * @property: property to set
+ * @value: value the property should be set to
+ *
+ * This functions sets a given property on a given plane object. This function
+ * calls the driver's ->set_property callback and changes the software state of
+ * the property if the callback succeeds.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
+                               struct drm_property *property,
+                               uint64_t value)
 {
        int ret = -EINVAL;
-       struct drm_plane *plane = obj_to_plane(obj);
+       struct drm_mode_object *obj = &plane->base;
 
        if (plane->funcs->set_property)
                ret = plane->funcs->set_property(plane, property, value);
@@ -4132,6 +4190,7 @@ static int drm_mode_plane_set_obj_prop(struct drm_mode_object *obj,
 
        return ret;
 }
+EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
 
 /**
  * drm_mode_getproperty_ioctl - get the current value of a object's property
@@ -4270,7 +4329,8 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
                ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value);
                break;
        case DRM_MODE_OBJECT_PLANE:
-               ret = drm_mode_plane_set_obj_prop(arg_obj, property, arg->value);
+               ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj),
+                                                 property, arg->value);
                break;
        }
 
@@ -4499,7 +4559,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 {
        struct drm_mode_crtc_page_flip *page_flip = data;
        struct drm_crtc *crtc;
-       struct drm_framebuffer *fb = NULL, *old_fb = NULL;
+       struct drm_framebuffer *fb = NULL;
        struct drm_pending_vblank_event *e = NULL;
        int ret = -EINVAL;
 
@@ -4514,7 +4574,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
        if (!crtc)
                return -ENOENT;
 
-       drm_modeset_lock(&crtc->mutex, NULL);
+       drm_modeset_lock_crtc(crtc);
        if (crtc->primary->fb == NULL) {
                /* The framebuffer is currently unbound, presumably
                 * due to a hotplug event, that userspace has not
@@ -4575,7 +4635,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 #endif
        }
 
-       old_fb = crtc->primary->fb;
+       crtc->primary->old_fb = crtc->primary->fb;
        ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
        if (ret) {
                if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
@@ -4585,7 +4645,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
                        kfree(e);
                }
                /* Keep the old fb, don't unref it. */
-               old_fb = NULL;
+               crtc->primary->old_fb = NULL;
        } else {
                /*
                 * Warn if the driver hasn't properly updated the crtc->fb
@@ -4601,9 +4661,10 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
 out:
        if (fb)
                drm_framebuffer_unreference(fb);
-       if (old_fb)
-               drm_framebuffer_unreference(old_fb);
-       drm_modeset_unlock(&crtc->mutex);
+       if (crtc->primary->old_fb)
+               drm_framebuffer_unreference(crtc->primary->old_fb);
+       crtc->primary->old_fb = NULL;
+       drm_modeset_unlock_crtc(crtc);
 
        return ret;
 }
@@ -4619,9 +4680,14 @@ out:
 void drm_mode_config_reset(struct drm_device *dev)
 {
        struct drm_crtc *crtc;
+       struct drm_plane *plane;
        struct drm_encoder *encoder;
        struct drm_connector *connector;
 
+       list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+               if (plane->funcs->reset)
+                       plane->funcs->reset(plane);
+
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
                if (crtc->funcs->reset)
                        crtc->funcs->reset(crtc);
@@ -5072,3 +5138,21 @@ void drm_mode_config_cleanup(struct drm_device *dev)
        drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
 }
 EXPORT_SYMBOL(drm_mode_config_cleanup);
+
+struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
+                                                      unsigned int supported_rotations)
+{
+       static const struct drm_prop_enum_list props[] = {
+               { DRM_ROTATE_0,   "rotate-0" },
+               { DRM_ROTATE_90,  "rotate-90" },
+               { DRM_ROTATE_180, "rotate-180" },
+               { DRM_ROTATE_270, "rotate-270" },
+               { DRM_REFLECT_X,  "reflect-x" },
+               { DRM_REFLECT_Y,  "reflect-y" },
+       };
+
+       return drm_property_create_bitmask(dev, 0, "rotation",
+                                          props, ARRAY_SIZE(props),
+                                          supported_rotations);
+}
+EXPORT_SYMBOL(drm_mode_create_rotation_property);
index ec980db..76caf38 100644 (file)
@@ -1,4 +1,14 @@
-/*-
+/**
+ * \file drm_dma.c
+ * DMA IOCTL and function support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All Rights Reserved.
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Rickard E. (Rik) Faith <faith@valinux.com>
- *    Gareth Hughes <gareth@valinux.com>
- *
- * $FreeBSD: head/sys/dev/drm2/drm_dma.c 235783 2012-05-22 11:07:44Z kib $
  */
 
-/** @file drm_dma.c
- * Support code for DMA buffer management.
+#include <linux/export.h>
+#include <drm/drmP.h>
+#include "drm_legacy.h"
+
+/**
+ * Initialize the DMA data.
+ *
+ * \param dev DRM device.
+ * \return zero on success or a negative value on failure.
  *
- * The implementation used to be significantly more complicated, but the
- * complexity has been moved into the drivers as different buffer management
- * schemes evolved.
+ * Allocate and initialize a drm_device_dma structure.
  */
+int drm_legacy_dma_setup(struct drm_device *dev)
+{
 
-#include <drm/drmP.h>
+       if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
+           drm_core_check_feature(dev, DRIVER_MODESET)) {
+               return 0;
+       }
 
-int drm_dma_setup(struct drm_device *dev)
-{
+       dev->buf_use = 0;
+       atomic_set(&dev->buf_alloc, 0);
 
-       dev->dma = kmalloc(sizeof(*dev->dma), M_DRM,
-                          M_WAITOK | M_NULLOK | M_ZERO);
-       if (dev->dma == NULL)
-               return ENOMEM;
+       dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
+       if (!dev->dma)
+               return -ENOMEM;
 
        spin_init(&dev->dma_lock, "drmdma_lock");
 
        return 0;
 }
 
-void drm_dma_takedown(struct drm_device *dev)
+/**
+ * Cleanup the DMA resources.
+ *
+ * \param dev DRM device.
+ *
+ * Free all pages associated with DMA buffers, the buffers and pages lists, and
+ * finally the drm_device::dma structure itself.
+ */
+void drm_legacy_dma_takedown(struct drm_device *dev)
 {
        drm_device_dma_t  *dma = dev->dma;
        int               i, j;
@@ -89,7 +110,7 @@ void drm_dma_takedown(struct drm_device *dev)
 }
 
 
-void drm_free_buffer(struct drm_device *dev, drm_buf_t *buf)
+void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf)
 {
        if (!buf)
                return;
@@ -99,7 +120,8 @@ void drm_free_buffer(struct drm_device *dev, drm_buf_t *buf)
        buf->used     = 0;
 }
 
-void drm_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
+void drm_legacy_reclaim_buffers(struct drm_device *dev,
+                               struct drm_file *file_priv)
 {
        drm_device_dma_t *dma = dev->dma;
        int              i;
@@ -111,7 +133,7 @@ void drm_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
                if (dma->buflist[i]->file_priv == file_priv) {
                        switch (dma->buflist[i]->list) {
                        case DRM_LIST_NONE:
-                               drm_free_buffer(dev, dma->buflist[i]);
+                               drm_legacy_free_buffer(dev, dma->buflist[i]);
                                break;
                        case DRM_LIST_WAIT:
                                dma->buflist[i]->list = DRM_LIST_RECLAIM;
@@ -123,16 +145,3 @@ void drm_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
                }
        }
 }
-
-/* Call into the driver-specific DMA handler */
-int drm_dma(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-
-       if (dev->driver->dma_ioctl) {
-               /* shared code returns -errno */
-               return dev->driver->dma_ioctl(dev, data, file_priv);
-       } else {
-               DRM_DEBUG("DMA ioctl on driver with no dma handler\n");
-               return -EINVAL;
-       }
-}
index b2dfc82..1b188e3 100644 (file)
@@ -252,3 +252,14 @@ ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
 }
 EXPORT_SYMBOL(drm_dp_dpcd_write);
 
+/**
+ * drm_dp_aux_unregister() - unregister an AUX adapter
+ * @aux: DisplayPort AUX channel
+ */
+void drm_dp_aux_unregister(struct drm_dp_aux *aux)
+{
+#if 0
+       i2c_del_adapter(&aux->ddc);
+#endif
+}
+EXPORT_SYMBOL(drm_dp_aux_unregister);
index 1e5fe6c..ea87ce9 100644 (file)
@@ -144,3 +144,59 @@ void drm_fini_pdev(struct pci_dev **pdev)
 
        kfree(*pdev);
 }
+
+/* Allocation of PCI memory resources (framebuffer, registers, etc.) for
+ * drm_get_resource_*.  Note that they are not RF_ACTIVE, so there's no virtual
+ * address for accessing them.  Cleaned up at unload.
+ */
+static int drm_alloc_resource(struct drm_device *dev, int resource)
+{
+       struct resource *res;
+       int rid;
+
+       DRM_LOCK_ASSERT(dev);
+
+       if (resource >= DRM_MAX_PCI_RESOURCE) {
+               DRM_ERROR("Resource %d too large\n", resource);
+               return 1;
+       }
+
+       if (dev->pcir[resource] != NULL) {
+               return 0;
+       }
+
+       DRM_UNLOCK(dev);
+       rid = PCIR_BAR(resource);
+       res = bus_alloc_resource_any(dev->dev, SYS_RES_MEMORY, &rid,
+           RF_SHAREABLE);
+       DRM_LOCK(dev);
+       if (res == NULL) {
+               DRM_ERROR("Couldn't find resource 0x%x\n", resource);
+               return 1;
+       }
+
+       if (dev->pcir[resource] == NULL) {
+               dev->pcirid[resource] = rid;
+               dev->pcir[resource] = res;
+       }
+
+       return 0;
+}
+
+unsigned long drm_get_resource_start(struct drm_device *dev,
+                                    unsigned int resource)
+{
+       if (drm_alloc_resource(dev, resource) != 0)
+               return 0;
+
+       return rman_get_start(dev->pcir[resource]);
+}
+
+unsigned long drm_get_resource_len(struct drm_device *dev,
+                                  unsigned int resource)
+{
+       if (drm_alloc_resource(dev, resource) != 0)
+               return 0;
+
+       return rman_get_size(dev->pcir[resource]);
+}
index 150355f..ccde8cd 100644 (file)
 #include <drm/drmP.h>
 #include <drm/drm_core.h>
 #include "drm_legacy.h"
+#include "drm_internal.h"
 
 unsigned int drm_debug = 0;    /* 1 to enable debug output */
 EXPORT_SYMBOL(drm_debug);
 
-unsigned int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
+int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
 
 unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
 
@@ -49,7 +50,7 @@ MODULE_AUTHOR(CORE_AUTHOR);
 MODULE_DESCRIPTION(CORE_DESC);
 MODULE_LICENSE("GPL and additional rights");
 MODULE_PARM_DESC(debug, "Enable debug output");
-MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
+MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
 MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
 MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
 
@@ -68,7 +69,7 @@ struct class *drm_class;
 static struct dentry *drm_debugfs_root;
 #endif
 
-int drm_err(const char *func, const char *format, ...)
+void drm_err(const char *func, const char *format, ...)
 {
 #if 0
        struct va_format vaf;
@@ -86,7 +87,6 @@ int drm_err(const char *func, const char *format, ...)
 
        return r;
 #endif
-       return 0;
 }
 EXPORT_SYMBOL(drm_err);
 
@@ -139,7 +139,6 @@ EXPORT_SYMBOL(drm_master_get);
 static void drm_master_destroy(struct kref *kref)
 {
        struct drm_master *master = container_of(kref, struct drm_master, refcount);
-       struct drm_magic_entry *pt, *next;
        struct drm_device *dev = master->minor->dev;
        struct drm_map_list *r_list, *list_temp;
 
@@ -149,7 +148,7 @@ static void drm_master_destroy(struct kref *kref)
 
        list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
                if (r_list->master == master) {
-                       drm_rmmap_locked(dev, r_list->map);
+                       drm_legacy_rmmap_locked(dev, r_list->map);
                        r_list = NULL;
                }
        }
@@ -160,12 +159,6 @@ static void drm_master_destroy(struct kref *kref)
                master->unique_len = 0;
        }
 
-       list_for_each_entry_safe(pt, next, &master->magicfree, head) {
-               list_del(&pt->head);
-               drm_ht_remove_item(&master->magiclist, &pt->hash_item);
-               kfree(pt);
-       }
-
        drm_ht_remove(&master->magiclist);
 
        mutex_unlock(&dev->struct_mutex);
@@ -586,7 +579,7 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
                goto err_ht;
        }
 
-       if (driver->driver_features & DRIVER_GEM) {
+       if (drm_core_check_feature(dev, DRIVER_GEM)) {
                ret = drm_gem_init(dev);
                if (ret) {
                        DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
@@ -616,7 +609,7 @@ static void drm_dev_release(struct kref *ref)
 {
        struct drm_device *dev = container_of(ref, struct drm_device, ref);
 
-       if (dev->driver->driver_features & DRIVER_GEM)
+       if (drm_core_check_feature(dev, DRIVER_GEM))
                drm_gem_destroy(dev);
 
        drm_legacy_ctxbitmap_cleanup(dev);
@@ -750,7 +743,7 @@ void drm_dev_unregister(struct drm_device *dev)
        drm_vblank_cleanup(dev);
 
        list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
-               drm_rmmap(dev, r_list->map);
+               drm_legacy_rmmap(dev, r_list->map);
 
        drm_minor_unregister(dev, DRM_MINOR_LEGACY);
        drm_minor_unregister(dev, DRM_MINOR_RENDER);
@@ -1136,83 +1129,6 @@ drm_pci_id_list_t *drm_find_description(int vendor, int device,
        return NULL;
 }
 
-/**
- * Take down the DRM device.
- *
- * \param dev DRM device structure.
- *
- * Frees every resource in \p dev.
- *
- * \sa drm_device
- */
-int drm_lastclose(struct drm_device * dev)
-{
-       drm_magic_entry_t *pt, *next;
-
-       DRM_DEBUG("\n");
-
-       if (dev->driver->lastclose != NULL)
-               dev->driver->lastclose(dev);
-
-       if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled)
-               drm_irq_uninstall(dev);
-
-       DRM_LOCK(dev);
-       if (dev->unique) {
-               drm_free(dev->unique, M_DRM);
-               dev->unique = NULL;
-               dev->unique_len = 0;
-       }
-
-       /* Clear pid list */
-       if (dev->magicfree.next) {
-               list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
-                       list_del(&pt->head);
-                       drm_ht_remove_item(&dev->magiclist, &pt->hash_item);
-                       kfree(pt);
-               }
-               drm_ht_remove(&dev->magiclist);
-       }
-       
-       /* Clear AGP information */
-       if (dev->agp) {
-               drm_agp_mem_t *entry;
-               drm_agp_mem_t *nexte;
-
-               /* Remove AGP resources, but leave dev->agp intact until
-                * drm_unload is called.
-                */
-               for (entry = dev->agp->memory; entry; entry = nexte) {
-                       nexte = entry->next;
-                       if (entry->bound)
-                               drm_agp_unbind_memory(entry->handle);
-                       drm_agp_free_memory(entry->handle);
-                       drm_free(entry, M_DRM);
-               }
-               dev->agp->memory = NULL;
-
-               if (dev->agp->acquired)
-                       drm_agp_release(dev);
-
-               dev->agp->acquired = 0;
-               dev->agp->enabled  = 0;
-       }
-       if (dev->sg != NULL) {
-               drm_sg_cleanup(dev->sg);
-               dev->sg = NULL;
-       }
-
-       drm_dma_takedown(dev);
-       if (dev->lock.hw_lock) {
-               dev->lock.hw_lock = NULL; /* SHM removed */
-               dev->lock.file_priv = NULL;
-               wakeup(&dev->lock.lock_queue);
-       }
-       DRM_UNLOCK(dev);
-
-       return 0;
-}
-
 static int drm_load(struct drm_device *dev)
 {
        int i, retcode;
@@ -1371,7 +1287,7 @@ void drm_cdevpriv_dtor(void *cd)
 
        if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
            !dev->driver->reclaim_buffers_locked)
-               drm_reclaim_buffers(dev, file_priv);
+               drm_legacy_reclaim_buffers(dev, file_priv);
 
        funsetown(&dev->buf_sigio);
 
@@ -1393,20 +1309,6 @@ void drm_cdevpriv_dtor(void *cd)
        DRM_UNLOCK(dev);
 }
 
-drm_local_map_t *drm_getsarea(struct drm_device *dev)
-{
-       struct drm_map_list *entry;
-
-       list_for_each_entry(entry, &dev->maplist, head) {
-               if (entry->map && entry->map->type == _DRM_SHM &&
-                   (entry->map->flags & _DRM_CONTAINS_LOCK)) {
-                       return entry->map;
-               }
-       }
-
-       return NULL;
-}
-
 int
 drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
     struct sysctl_oid *top)
index 4a2b181..4c3cfa4 100644 (file)
@@ -636,27 +636,27 @@ static const struct drm_display_mode edid_cea_modes[] = {
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
                        DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 6 - 1440x480i@60Hz */
-       { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
-                  1602, 1716, 0, 480, 488, 494, 525, 0,
+       /* 6 - 720(1440)x480i@60Hz */
+       { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
+                  801, 858, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 7 - 1440x480i@60Hz */
-       { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
-                  1602, 1716, 0, 480, 488, 494, 525, 0,
+       /* 7 - 720(1440)x480i@60Hz */
+       { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
+                  801, 858, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 8 - 1440x240@60Hz */
-       { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
-                  1602, 1716, 0, 240, 244, 247, 262, 0,
+       /* 8 - 720(1440)x240@60Hz */
+       { DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
+                  801, 858, 0, 240, 244, 247, 262, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 9 - 1440x240@60Hz */
-       { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
-                  1602, 1716, 0, 240, 244, 247, 262, 0,
+       /* 9 - 720(1440)x240@60Hz */
+       { DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
+                  801, 858, 0, 240, 244, 247, 262, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
@@ -718,27 +718,27 @@ static const struct drm_display_mode edid_cea_modes[] = {
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
                        DRM_MODE_FLAG_INTERLACE),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 21 - 1440x576i@50Hz */
-       { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
-                  1590, 1728, 0, 576, 580, 586, 625, 0,
+       /* 21 - 720(1440)x576i@50Hz */
+       { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
+                  795, 864, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 22 - 1440x576i@50Hz */
-       { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
-                  1590, 1728, 0, 576, 580, 586, 625, 0,
+       /* 22 - 720(1440)x576i@50Hz */
+       { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
+                  795, 864, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 23 - 1440x288@50Hz */
-       { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
-                  1590, 1728, 0, 288, 290, 293, 312, 0,
+       /* 23 - 720(1440)x288@50Hz */
+       { DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
+                  795, 864, 0, 288, 290, 293, 312, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 24 - 1440x288@50Hz */
-       { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
-                  1590, 1728, 0, 288, 290, 293, 312, 0,
+       /* 24 - 720(1440)x288@50Hz */
+       { DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
+                  795, 864, 0, 288, 290, 293, 312, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
@@ -841,17 +841,17 @@ static const struct drm_display_mode edid_cea_modes[] = {
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 44 - 1440x576i@100Hz */
-       { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
-                  1590, 1728, 0, 576, 580, 586, 625, 0,
+       /* 44 - 720(1440)x576i@100Hz */
+       { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+                  795, 864, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_DBLCLK),
+                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 45 - 1440x576i@100Hz */
-       { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
-                  1590, 1728, 0, 576, 580, 586, 625, 0,
+       /* 45 - 720(1440)x576i@100Hz */
+       { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+                  795, 864, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
-                       DRM_MODE_FLAG_DBLCLK),
+                       DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
        /* 46 - 1920x1080i@120Hz */
        { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
@@ -874,15 +874,15 @@ static const struct drm_display_mode edid_cea_modes[] = {
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 50 - 1440x480i@120Hz */
-       { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
-                  1602, 1716, 0, 480, 488, 494, 525, 0,
+       /* 50 - 720(1440)x480i@120Hz */
+       { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
+                  801, 858, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 51 - 1440x480i@120Hz */
-       { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
-                  1602, 1716, 0, 480, 488, 494, 525, 0,
+       /* 51 - 720(1440)x480i@120Hz */
+       { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
+                  801, 858, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
@@ -896,15 +896,15 @@ static const struct drm_display_mode edid_cea_modes[] = {
                   796, 864, 0, 576, 581, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 54 - 1440x576i@200Hz */
-       { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
-                  1590, 1728, 0, 576, 580, 586, 625, 0,
+       /* 54 - 720(1440)x576i@200Hz */
+       { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+                  795, 864, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 55 - 1440x576i@200Hz */
-       { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
-                  1590, 1728, 0, 576, 580, 586, 625, 0,
+       /* 55 - 720(1440)x576i@200Hz */
+       { DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+                  795, 864, 0, 576, 580, 586, 625, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
@@ -918,15 +918,15 @@ static const struct drm_display_mode edid_cea_modes[] = {
                   798, 858, 0, 480, 489, 495, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
          .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
-       /* 58 - 1440x480i@240 */
-       { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
-                  1602, 1716, 0, 480, 488, 494, 525, 0,
+       /* 58 - 720(1440)x480i@240 */
+       { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
+                  801, 858, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
-       /* 59 - 1440x480i@240 */
-       { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
-                  1602, 1716, 0, 480, 488, 494, 525, 0,
+       /* 59 - 720(1440)x480i@240 */
+       { DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
+                  801, 858, 0, 480, 488, 494, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
                        DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
          .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
@@ -3447,10 +3447,10 @@ EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
 /**
  * drm_assign_hdmi_deep_color_info - detect whether monitor supports
  * hdmi deep color modes and update drm_display_info if so.
- *
  * @edid: monitor EDID information
  * @info: Updated with maximum supported deep color bpc and color format
  *        if deep color supported.
+ * @connector: DRM connector, used only for debug output
  *
  * Parse the CEA extension according to CEA-861-B.
  * Return true if HDMI deep color supported, false if not or unknown.
index 899c8b8..d3bff98 100644 (file)
@@ -123,7 +123,7 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
 
        WARN_ON(!mutex_is_locked(&fb_helper->dev->mode_config.mutex));
        if (fb_helper->connector_count + 1 > fb_helper->connector_info_alloc_count) {
-               temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector) * (fb_helper->connector_count + 1), M_DRM, M_WAITOK);
+               temp = krealloc(fb_helper->connector_info, sizeof(struct drm_fb_helper_connector *) * (fb_helper->connector_count + 1), M_DRM, M_WAITOK);
                if (!temp)
                        return -ENOMEM;
 
@@ -168,60 +168,6 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
 }
 EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
 
-static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
-{
-       struct drm_fb_helper_connector *fb_helper_conn;
-       int i;
-
-       for (i = 0; i < fb_helper->connector_count; i++) {
-               struct drm_cmdline_mode *mode;
-               struct drm_connector *connector;
-               char *option = NULL;
-
-               fb_helper_conn = fb_helper->connector_info[i];
-               connector = fb_helper_conn->connector;
-               mode = &fb_helper_conn->cmdline_mode;
-
-               /* do something on return - turn off connector maybe */
-               if (fb_get_options(connector->name, &option))
-                       continue;
-
-               if (drm_mode_parse_command_line_for_connector(option,
-                                                             connector,
-                                                             mode)) {
-                       if (mode->force) {
-                               const char *s;
-                               switch (mode->force) {
-                               case DRM_FORCE_OFF:
-                                       s = "OFF";
-                                       break;
-                               case DRM_FORCE_ON_DIGITAL:
-                                       s = "ON - dig";
-                                       break;
-                               default:
-                               case DRM_FORCE_ON:
-                                       s = "ON";
-                                       break;
-                               }
-
-                               DRM_INFO("forcing %s connector %s\n",
-                                        connector->name, s);
-                               connector->force = mode->force;
-                       }
-
-                       DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
-                                     connector->name,
-                                     mode->xres, mode->yres,
-                                     mode->refresh_specified ? mode->refresh : 60,
-                                     mode->rb ? " reduced blanking" : "",
-                                     mode->margins ? " with margins" : "",
-                                     mode->interlace ?  " interlaced" : "");
-               }
-
-       }
-       return 0;
-}
-
 #if 0
 static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
 {
@@ -344,10 +290,17 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
 
        drm_warn_on_modeset_not_all_locked(dev);
 
-       list_for_each_entry(plane, &dev->mode_config.plane_list, head)
+       list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
                if (plane->type != DRM_PLANE_TYPE_PRIMARY)
                        drm_plane_force_disable(plane);
 
+               if (dev->mode_config.rotation_property) {
+                       drm_mode_plane_set_obj_prop(plane,
+                                                   dev->mode_config.rotation_property,
+                                                   BIT(DRM_ROTATE_0));
+               }
+       }
+
        for (i = 0; i < fb_helper->crtc_count; i++) {
                struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
                struct drm_crtc *crtc = mode_set->crtc;
@@ -1052,7 +1005,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
                struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
                struct drm_cmdline_mode *cmdline_mode;
 
-               cmdline_mode = &fb_helper_conn->cmdline_mode;
+               cmdline_mode = &fb_helper_conn->connector->cmdline_mode;
 
                if (cmdline_mode->bpp_specified) {
                        switch (cmdline_mode->bpp) {
@@ -1314,9 +1267,7 @@ EXPORT_SYMBOL(drm_has_preferred_mode);
 
 static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
 {
-       struct drm_cmdline_mode *cmdline_mode;
-       cmdline_mode = &fb_connector->cmdline_mode;
-       return cmdline_mode->specified;
+       return fb_connector->connector->cmdline_mode.specified;
 }
 
 struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
@@ -1326,7 +1277,7 @@ struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *f
        struct drm_display_mode *mode = NULL;
        bool prefer_non_interlace;
 
-       cmdline_mode = &fb_helper_conn->cmdline_mode;
+       cmdline_mode = &fb_helper_conn->connector->cmdline_mode;
        if (cmdline_mode->specified == false)
                return mode;
 
@@ -1711,8 +1662,6 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
        struct drm_device *dev = fb_helper->dev;
        int count = 0;
 
-       drm_fb_helper_parse_command_line(fb_helper);
-
        mutex_lock(&dev->mode_config.mutex);
        count = drm_fb_helper_probe_connector_modes(fb_helper,
                                                    dev->mode_config.max_width,
index f312cd4..943a26a 100644 (file)
@@ -41,6 +41,7 @@
 #include <drm/drmP.h>
 #include <linux/module.h>
 #include "drm_legacy.h"
+#include "drm_internal.h"
 
 /* from BKL pushdown: note that nothing else serializes idr_find() */
 DEFINE_MUTEX(drm_global_mutex);
@@ -58,7 +59,7 @@ static int drm_setup(struct drm_device *dev)
        DRM_LOCK_ASSERT(dev);
 
        /* prebuild the SAREA */
-       i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
+       i = drm_legacy_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
            _DRM_CONTAINS_LOCK, &map);
        if (i != 0)
                return i;
@@ -69,7 +70,7 @@ static int drm_setup(struct drm_device *dev)
        dev->buf_use = 0;
 
        if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
-               i = drm_dma_setup(dev);
+               i = drm_legacy_dma_setup(dev);
                if (i != 0)
                        return i;
        }
@@ -157,7 +158,7 @@ int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
        init_waitqueue_head(&priv->event_wait);
        priv->event_space = 4096; /* set aside 4k for event buffer */
 
-       if (dev->driver->driver_features & DRIVER_GEM)
+       if (drm_core_check_feature(dev, DRIVER_GEM))
                drm_gem_open(dev, priv);
 
        if (dev->driver->open) {
@@ -207,7 +208,7 @@ static void drm_unload(struct drm_device *dev)
        if (dev->devnode != NULL)
                destroy_dev(dev->devnode);
 
-       if (dev->driver->driver_features & DRIVER_GEM)
+       if (drm_core_check_feature(dev, DRIVER_GEM))
                drm_gem_destroy(dev);
 
        if (dev->agp && dev->agp->agp_mtrr) {
@@ -259,11 +260,105 @@ static void drm_unload(struct drm_device *dev)
        lockuninit(&dev->struct_mutex);
 }
 
+/**
+ * Take down the DRM device.
+ *
+ * \param dev DRM device structure.
+ *
+ * Frees every resource in \p dev.
+ *
+ * \sa drm_device
+ */
+int drm_lastclose(struct drm_device * dev)
+{
+       DRM_DEBUG("\n");
+
+       if (dev->driver->lastclose)
+               dev->driver->lastclose(dev);
+       DRM_DEBUG("driver lastclose completed\n");
+
+       if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
+               drm_irq_uninstall(dev);
+
+       mutex_lock(&dev->struct_mutex);
+
+       if (dev->unique) {
+               drm_free(dev->unique, M_DRM);
+               dev->unique = NULL;
+               dev->unique_len = 0;
+       }
+
+       /* Clear AGP information */
+       if (dev->agp) {
+               drm_agp_mem_t *entry;
+               drm_agp_mem_t *nexte;
+
+               /* Remove AGP resources, but leave dev->agp intact until
+                * drm_unload is called.
+                */
+               for (entry = dev->agp->memory; entry; entry = nexte) {
+                       nexte = entry->next;
+                       if (entry->bound)
+                               drm_agp_unbind_memory(entry->handle);
+                       drm_agp_free_memory(entry->handle);
+                       drm_free(entry, M_DRM);
+               }
+               dev->agp->memory = NULL;
+
+               if (dev->agp->acquired)
+                       drm_agp_release(dev);
+
+               dev->agp->acquired = 0;
+               dev->agp->enabled  = 0;
+       }
+       if (dev->sg != NULL) {
+               drm_legacy_sg_cleanup(dev->sg);
+               dev->sg = NULL;
+       }
+
+       drm_legacy_dma_takedown(dev);
+
+       if (dev->lock.hw_lock) {
+               dev->lock.hw_lock = NULL; /* SHM removed */
+               dev->lock.file_priv = NULL;
+               wakeup(&dev->lock.lock_queue);
+       }
+
+       mutex_unlock(&dev->struct_mutex);
+
+       DRM_DEBUG("lastclose completed\n");
+       return 0;
+}
+
+/**
+ * Release file.
+ *
+ * \param inode device inode
+ * \param file_priv DRM file private.
+ * \return zero on success or a negative number on failure.
+ *
+ * If the hardware lock is held then free it, and take it again for the kernel
+ * context since it's necessary to reclaim buffers. Unlink the file private
+ * data from its list and free it. Decreases the open count and if it reaches
+ * zero calls drm_lastclose().
+ */
 int drm_release(device_t kdev)
 {
-       struct drm_device *dev;
+       struct drm_device *dev = device_get_softc(kdev);
+       struct drm_magic_entry *pt, *next;
+
+       mutex_lock(&drm_global_mutex);
+
+       /* Clear pid list */
+       if (dev->magicfree.next) {
+               list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
+                       list_del(&pt->head);
+                       drm_ht_remove_item(&dev->magiclist, &pt->hash_item);
+                       kfree(pt);
+               }
+               drm_ht_remove(&dev->magiclist);
+       }
 
-       dev = device_get_softc(kdev);
        drm_unload(dev);
        if (dev->irqr) {
                bus_release_resource(dev->dev, SYS_RES_IRQ, dev->irqrid,
@@ -273,6 +368,9 @@ int drm_release(device_t kdev)
                        DRM_INFO("MSI released\n");
                }
        }
+
+       mutex_unlock(&drm_global_mutex);
+
        return (0);
 }
 
index e005703..f2dea6c 100644 (file)
@@ -70,6 +70,8 @@
 #include <linux/module.h>
 #include <drm/drmP.h>
 #include <drm/drm_vma_manager.h>
+#include <drm/drm_gem.h>
+#include "drm_internal.h"
 
 /** @file drm_gem.c
  *
@@ -515,7 +517,7 @@ drm_gem_close_ioctl(struct drm_device *dev, void *data,
        struct drm_gem_close *args = data;
        int ret;
 
-       if (!(dev->driver->driver_features & DRIVER_GEM))
+       if (!drm_core_check_feature(dev, DRIVER_GEM))
                return -ENODEV;
 
        ret = drm_gem_handle_delete(file_priv, args->handle);
@@ -537,7 +539,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
        struct drm_gem_object *obj;
        int ret;
 
-       if (!(dev->driver->driver_features & DRIVER_GEM))
+       if (!drm_core_check_feature(dev, DRIVER_GEM))
                return -ENODEV;
 
        obj = drm_gem_object_lookup(dev, file_priv, args->handle);
@@ -593,7 +595,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
        int ret;
        u32 handle;
 
-       if (!(dev->driver->driver_features & DRIVER_GEM))
+       if (!drm_core_check_feature(dev, DRIVER_GEM))
                return -ENODEV;
 
        lockmgr(&dev->object_name_lock, LK_EXCLUSIVE);
diff --git a/sys/dev/drm/drm_internal.h b/sys/dev/drm/drm_internal.h
new file mode 100644 (file)
index 0000000..2efbd68
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *   Daniel Vetter <daniel.vetter@ffwll.ch>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* drm_gem.c */
+int drm_gem_init(struct drm_device *dev);
+void drm_gem_destroy(struct drm_device *dev);
+int drm_gem_handle_create_tail(struct drm_file *file_priv,
+                              struct drm_gem_object *obj,
+                              u32 *handlep);
index bae312e..eb010aa 100644 (file)
@@ -59,8 +59,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER),
 
-       DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
 
        DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
@@ -84,12 +84,12 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
 
        DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
 
-       DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
-       DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH),
+       DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH),
 
        DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
@@ -102,8 +102,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
        DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
-       DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+       DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
        DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
 
index 98584ad..9becab6 100644 (file)
  */
 #define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
 
+static bool
+drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+                         struct timeval *tvblank, unsigned flags);
+
 /**
  * drm_update_vblank_count - update the master vblank counter
  * @dev: DRM device
@@ -71,7 +75,8 @@
 static void drm_update_vblank_count(struct drm_device *dev, int crtc)
 {
        struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
-       u32 cur_vblank, diff, tslot, rc;
+       u32 cur_vblank, diff, tslot;
+       bool rc;
        struct timeval t_vblank;
 
        /*
@@ -100,9 +105,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
                          crtc, vblank->last, cur_vblank, diff);
        }
 
-       DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
+       DRM_DEBUG("updating vblank count on crtc %d, missed %d\n",
                  crtc, diff);
 
+       if (diff == 0)
+               return;
+
        /* Reinitialize corresponding vblank timestamp if high-precision query
         * available. Skip this step if query unsupported or failed. Will
         * reinitialize delayed at next vblank interrupt in that case.
@@ -128,7 +136,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
        struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
        u32 vblcount;
        s64 diff_ns;
-       int vblrc;
+       bool vblrc;
        struct timeval tvblank;
        int count = DRM_TIMESTAMP_MAXRETRIES;
 
@@ -144,8 +152,15 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
         * has been ticking all along until this time. This makes the
         * count account for the entire time between drm_vblank_on() and
         * drm_vblank_off().
+        *
+        * But only do this if precise vblank timestamps are available.
+        * Otherwise we might read a totally bogus timestamp since drivers
+        * lacking precise timestamp support rely upon sampling the system clock
+        * at vblank interrupt time. Which obviously won't work out well if the
+        * vblank interrupt is disabled.
         */
-       if (!vblank->enabled) {
+       if (!vblank->enabled &&
+           drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0)) {
                drm_update_vblank_count(dev, crtc);
                lockmgr(&dev->vblank_time_lock, LK_RELEASE);
                return;
@@ -193,7 +208,14 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
         * available. In that case we can't account for this and just
         * hope for the best.
         */
-       if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
+       if (vblrc && (abs64(diff_ns) > 1000000)) {
+               /* Store new timestamp in ringbuffer. */
+               vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
+
+               /* Increment cooked vblank count. This also atomically commits
+                * the timestamp computed above.
+                */
+               smp_mb__before_atomic();
                atomic_inc(&vblank->count);
                smp_mb__after_atomic();
        }
@@ -236,7 +258,10 @@ void drm_vblank_cleanup(struct drm_device *dev)
                struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
 
                del_timer_sync(&vblank->disable_timer);
-               vblank_disable_fn((unsigned long)vblank);
+
+               lockmgr(&dev->vbl_lock, LK_EXCLUSIVE);
+               vblank_disable_and_save(dev, crtc);
+               lockmgr(&dev->vbl_lock, LK_RELEASE);
        }
 
        kfree(dev->vblank);
@@ -291,7 +316,7 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
        return 0;
 
 err:
-       drm_vblank_cleanup(dev);
+       dev->num_crtcs = 0;
        return ret;
 }
 EXPORT_SYMBOL(drm_vblank_init);
@@ -684,7 +709,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
         * within vblank area, counting down the number of lines until
         * start of scanout.
         */
-       invbl = vbl_status & DRM_SCANOUTPOS_INVBL;
+       invbl = vbl_status & DRM_SCANOUTPOS_IN_VBLANK;
 
        /* Convert scanout position into elapsed time at raw_time query
         * since start of scanout at first display scanline. delta_ns
@@ -716,7 +741,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
 
        vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
        if (invbl)
-               vbl_status |= DRM_VBLANKTIME_INVBL;
+               vbl_status |= DRM_VBLANKTIME_IN_VBLANK;
 
        return vbl_status;
 }
@@ -753,10 +778,11 @@ static struct timeval get_drm_timestamp(void)
  * call, i.e., it isn't very precisely locked to the true vblank.
  *
  * Returns:
- * Non-zero if timestamp is considered to be very precise, zero otherwise.
+ * True if timestamp is considered to be very precise, false otherwise.
  */
-u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
-                             struct timeval *tvblank, unsigned flags)
+static bool
+drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+                         struct timeval *tvblank, unsigned flags)
 {
        int ret;
 
@@ -768,7 +794,7 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
                ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
                                                        tvblank, flags);
                if (ret > 0)
-                       return (u32) ret;
+                       return true;
        }
 
        /* GPU high precision timestamp query unsupported or failed.
@@ -776,9 +802,8 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
         */
        *tvblank = get_drm_timestamp();
 
-       return 0;
+       return false;
 }
-EXPORT_SYMBOL(drm_get_last_vbltimestamp);
 
 /**
  * drm_vblank_count - retrieve "cooked" vblank counter value
@@ -794,9 +819,9 @@ EXPORT_SYMBOL(drm_get_last_vbltimestamp);
  */
 u32 drm_vblank_count(struct drm_device *dev, int crtc)
 {
-       struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
-
-       return atomic_read(&vblank->count);
+       if (WARN_ON(crtc >= dev->num_crtcs))
+               return 0;
+       return atomic_read(&dev->vblank[crtc].count);
 }
 EXPORT_SYMBOL(drm_vblank_count);
 
@@ -819,6 +844,9 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
        struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
        u32 cur_vblank;
 
+       if (WARN_ON(crtc >= dev->num_crtcs))
+               return 0;
+
        /* Read timestamp from slot of _vblank_time ringbuffer
         * that corresponds to current vblank count. Retry if
         * count has incremented during readout. This works like
@@ -938,6 +966,9 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
        struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
        int ret = 0;
 
+       if (WARN_ON(crtc >= dev->num_crtcs))
+               return -EINVAL;
+
        lockmgr(&dev->vbl_lock, LK_EXCLUSIVE);
        /* Going from 0->1 means we have to enable interrupts again */
        if (atomic_add_return(1, &vblank->refcount) == 1) {
@@ -988,11 +1019,19 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
 
        BUG_ON(atomic_read(&vblank->refcount) == 0);
 
+       if (WARN_ON(crtc >= dev->num_crtcs))
+               return;
+
        /* Last user schedules interrupt disable */
-       if (atomic_dec_and_test(&vblank->refcount) &&
-           (drm_vblank_offdelay > 0))
-               mod_timer(&vblank->disable_timer,
-                         jiffies + ((drm_vblank_offdelay * HZ)/1000));
+       if (atomic_dec_and_test(&vblank->refcount)) {
+               if (drm_vblank_offdelay == 0)
+                       return;
+               else if (dev->vblank_disable_immediate || drm_vblank_offdelay < 0)
+                       vblank_disable_fn((unsigned long)vblank);
+               else
+                       mod_timer(&vblank->disable_timer,
+                                 jiffies + ((drm_vblank_offdelay * HZ)/1000));
+       }
 }
 EXPORT_SYMBOL(drm_vblank_put);
 
@@ -1011,6 +1050,50 @@ void drm_crtc_vblank_put(struct drm_crtc *crtc)
 }
 EXPORT_SYMBOL(drm_crtc_vblank_put);
 
+/**
+ * drm_wait_one_vblank - wait for one vblank
+ * @dev: DRM device
+ * @crtc: crtc index
+ *
+ * This waits for one vblank to pass on @crtc, using the irq driver interfaces.
+ * It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
+ * due to lack of driver support or because the crtc is off.
+ */
+void drm_wait_one_vblank(struct drm_device *dev, int crtc)
+{
+       int ret;
+       u32 last;
+
+       ret = drm_vblank_get(dev, crtc);
+       if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", crtc, ret))
+               return;
+
+       last = drm_vblank_count(dev, crtc);
+
+       ret = wait_event_timeout(dev->vblank[crtc].queue,
+                                last != drm_vblank_count(dev, crtc),
+                                msecs_to_jiffies(100));
+
+       WARN(ret == 0, "vblank wait timed out on crtc %i\n", crtc);
+
+       drm_vblank_put(dev, crtc);
+}
+EXPORT_SYMBOL(drm_wait_one_vblank);
+
+/**
+ * drm_crtc_wait_one_vblank - wait for one vblank
+ * @crtc: DRM crtc
+ *
+ * This waits for one vblank to pass on @crtc, using the irq driver interfaces.
+ * It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
+ * due to lack of driver support or because the crtc is off.
+ */
+void drm_crtc_wait_one_vblank(struct drm_crtc *crtc)
+{
+       drm_wait_one_vblank(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
+
 /**
  * drm_vblank_off - disable vblank events on a CRTC
  * @dev: DRM device
@@ -1117,9 +1200,12 @@ void drm_vblank_on(struct drm_device *dev, int crtc)
        vblank->last =
                (dev->driver->get_vblank_counter(dev, crtc) - 1) &
                dev->max_vblank_count;
-
-       /* re-enable interrupts if there's are users left */
-       if (atomic_read(&vblank->refcount) != 0)
+       /*
+        * re-enable interrupts if there are users left, or the
+        * user wishes vblank interrupts to be enabled all the time.
+        */
+       if (atomic_read(&vblank->refcount) != 0 ||
+           (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
                WARN_ON(drm_vblank_enable(dev, crtc));
        lockmgr(&dev->vbl_lock, LK_RELEASE);
 }
@@ -1172,6 +1258,10 @@ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
        /* vblank is not initialized (IRQ not installed ?), or has been freed */
        if (!dev->num_crtcs)
                return;
+
+       if (WARN_ON(crtc >= dev->num_crtcs))
+               return;
+
        /*
         * To avoid all the problems that might happen if interrupts
         * were enabled/disabled around or between these calls, we just
@@ -1271,6 +1361,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
                                  union drm_wait_vblank *vblwait,
                                  struct drm_file *file_priv)
 {
+       struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
        struct drm_pending_vblank_event *e;
        struct timeval now;
        unsigned int seq;
@@ -1293,6 +1384,18 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
 
        lockmgr(&dev->event_lock, LK_EXCLUSIVE);
 
+       /*
+        * drm_vblank_off() might have been called after we called
+        * drm_vblank_get(). drm_vblank_off() holds event_lock
+        * around the vblank disable, so no need for further locking.
+        * The reference from drm_vblank_get() protects against
+        * vblank disable from another source.
+        */
+       if (!vblank->enabled) {
+               ret = -EINVAL;
+               goto err_unlock;
+       }
+
        if (file_priv->event_space < sizeof e->event) {
                ret = -EBUSY;
                goto err_unlock;
@@ -1492,6 +1595,9 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
 
        lockmgr(&dev->event_lock, LK_EXCLUSIVE);
 
+       if (WARN_ON(crtc >= dev->num_crtcs))
+               return false;
+
        /* Need timestamp lock to prevent concurrent execution with
         * vblank enable/disable, as this would cause inconsistent
         * or corrupted timestamps and vblank counts.
index d34f20a..af13878 100644 (file)
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 
+/*
+ * This file contains legacy interfaces that modern drm drivers
+ * should no longer be using. They cannot be removed as legacy
+ * drivers use them, and removing them are API breaks.
+ */
+#include <linux/list.h>
+#include <drm/drm_legacy.h>
+
+struct agp_memory;
+
 struct drm_device;
 struct drm_file;
 
@@ -48,4 +58,42 @@ int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f);
 int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f);
 int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
 
+/*
+ * Generic Buffer Management
+ */
+
+#define DRM_MAP_HASH_OFFSET 0x10000000
+
+int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_infobufs(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_markbufs(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f);
+int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f);
+
+/*
+ * AGP Support
+ */
+
+/*
+ * Generic Userspace Locking-API
+ */
+
+/* DMA support */
+int drm_legacy_dma_setup(struct drm_device *dev);
+void drm_legacy_dma_takedown(struct drm_device *dev);
+void drm_legacy_free_buffer(struct drm_device *dev,
+                           struct drm_buf * buf);
+void drm_legacy_reclaim_buffers(struct drm_device *dev,
+                               struct drm_file *filp);
+
+/* Scatter Gather Support */
+void drm_legacy_sg_cleanup(struct drm_sg_mem *entry);
+int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv);
+int drm_legacy_sg_free(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv);
+
 #endif /* __DRM_LEGACY_H__ */
index 8e45293..0611247 100644 (file)
  */
 
 #include <drm/drmP.h>
+#include "drm_legacy.h"
+
+#ifdef HAVE_PAGE_AGP
+# include <asm/agp.h>
+#else
+# ifdef __powerpc__
+#  define PAGE_AGP     __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
+# else
+#  define PAGE_AGP     PAGE_KERNEL
+# endif
+#endif
 
 MALLOC_DEFINE(M_DRM, "m_drm", "DRM memory allocations");
 
index 815810b..c8c70aa 100644 (file)
@@ -1255,6 +1255,7 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
        if (!mode)
                return NULL;
 
+       mode->type |= DRM_MODE_TYPE_USERDEF;
        drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
        return mode;
 }
index 0dc57d5..0a71b8f 100644 (file)
@@ -35,7 +35,7 @@
  * of extra utility/tracking out of our acquire-ctx.  This is provided
  * by drm_modeset_lock / drm_modeset_acquire_ctx.
  *
- * For basic principles of ww_mutex, see: Documentation/ww-mutex-design.txt
+ * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt
  *
  * The basic usage pattern is to:
  *
  */
 
 
+/**
+ * drm_modeset_lock_all - take all modeset locks
+ * @dev: drm device
+ *
+ * This function takes all modeset locks, suitable where a more fine-grained
+ * scheme isn't (yet) implemented. Locks must be dropped with
+ * drm_modeset_unlock_all.
+ */
+void drm_modeset_lock_all(struct drm_device *dev)
+{
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_modeset_acquire_ctx *ctx;
+       int ret;
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (WARN_ON(!ctx))
+               return;
+
+       mutex_lock(&config->mutex);
+
+       drm_modeset_acquire_init(ctx, 0);
+
+retry:
+       ret = drm_modeset_lock(&config->connection_mutex, ctx);
+       if (ret)
+               goto fail;
+       ret = drm_modeset_lock_all_crtcs(dev, ctx);
+       if (ret)
+               goto fail;
+
+       WARN_ON(config->acquire_ctx);
+
+       /* now we hold the locks, so now that it is safe, stash the
+        * ctx for drm_modeset_unlock_all():
+        */
+       config->acquire_ctx = ctx;
+
+       drm_warn_on_modeset_not_all_locked(dev);
+
+       return;
+
+fail:
+       if (ret == -EDEADLK) {
+               drm_modeset_backoff(ctx);
+               goto retry;
+       }
+}
+EXPORT_SYMBOL(drm_modeset_lock_all);
+
+/**
+ * drm_modeset_unlock_all - drop all modeset locks
+ * @dev: device
+ *
+ * This function drop all modeset locks taken by drm_modeset_lock_all.
+ */
+void drm_modeset_unlock_all(struct drm_device *dev)
+{
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
+
+       if (WARN_ON(!ctx))
+               return;
+
+       config->acquire_ctx = NULL;
+       drm_modeset_drop_locks(ctx);
+       drm_modeset_acquire_fini(ctx);
+
+       kfree(ctx);
+
+       mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_modeset_unlock_all);
+
+/**
+ * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx
+ * @crtc: drm crtc
+ *
+ * This function locks the given crtc using a hidden acquire context. This is
+ * necessary so that drivers internally using the atomic interfaces can grab
+ * further locks with the lock acquire context.
+ */
+void drm_modeset_lock_crtc(struct drm_crtc *crtc)
+{
+       struct drm_modeset_acquire_ctx *ctx;
+       int ret;
+
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (WARN_ON(!ctx))
+               return;
+
+       drm_modeset_acquire_init(ctx, 0);
+
+retry:
+       ret = drm_modeset_lock(&crtc->mutex, ctx);
+       if (ret)
+               goto fail;
+
+       WARN_ON(crtc->acquire_ctx);
+
+       /* now we hold the locks, so now that it is safe, stash the
+        * ctx for drm_modeset_unlock_crtc():
+        */
+       crtc->acquire_ctx = ctx;
+
+       return;
+
+fail:
+       if (ret == -EDEADLK) {
+               drm_modeset_backoff(ctx);
+               goto retry;
+       }
+}
+EXPORT_SYMBOL(drm_modeset_lock_crtc);
+
+/**
+ * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls
+ * @crtc: drm crtc
+ *
+ * Legacy ioctl operations like cursor updates or page flips only have per-crtc
+ * locking, and store the acquire ctx in the corresponding crtc. All other
+ * legacy operations take all locks and use a global acquire context. This
+ * function grabs the right one.
+ */
+struct drm_modeset_acquire_ctx *
+drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc)
+{
+       if (crtc->acquire_ctx)
+               return crtc->acquire_ctx;
+
+       WARN_ON(!crtc->dev->mode_config.acquire_ctx);
+
+       return crtc->dev->mode_config.acquire_ctx;
+}
+EXPORT_SYMBOL(drm_modeset_legacy_acquire_ctx);
+
+/**
+ * drm_modeset_unlock_crtc - drop crtc lock
+ * @crtc: drm crtc
+ *
+ * This drops the crtc lock acquire with drm_modeset_lock_crtc() and all other
+ * locks acquired through the hidden context.
+ */
+void drm_modeset_unlock_crtc(struct drm_crtc *crtc)
+{
+       struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx;
+
+       if (WARN_ON(!ctx))
+               return;
+
+       crtc->acquire_ctx = NULL;
+       drm_modeset_drop_locks(ctx);
+       drm_modeset_acquire_fini(ctx);
+
+       kfree(ctx);
+}
+EXPORT_SYMBOL(drm_modeset_unlock_crtc);
+
+/**
+ * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
+ * @dev: device
+ *
+ * Useful as a debug assert.
+ */
+void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
+{
+       struct drm_crtc *crtc;
+
+       /* Locking is currently fubar in the panic handler. */
+#if 0
+       if (oops_in_progress)
+               return;
+#endif
+
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+               WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+       WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+       WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+}
+EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
+
 /**
  * drm_modeset_acquire_init - initialize acquire context
  * @ctx: the acquire context
index 77d7aeb..81697f3 100644 (file)
@@ -1,5 +1,6 @@
-/*-
- * Copyright 2003 Eric Anholt.
+/*
+ * Copyright 2003 José Fonseca.
+ * Copyright 2003 Leif Delgass.
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
- * AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * $FreeBSD: src/sys/dev/drm2/drm_pci.c,v 1.1 2012/05/22 11:07:44 kib Exp $
- */
-
-/**
- * \file drm_pci.h
- * \brief PCI consistent, DMA-accessible memory allocation.
- *
- * \author Eric Anholt <anholt@FreeBSD.org>
  */
 
 #include <drm/drmP.h>
+#include <drm/drm_legacy.h>
 
 /**********************************************************************/
 /** \name PCI memory */
@@ -109,22 +102,30 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
        return dmah;
 }
 
-/**
- * \brief Free a DMA-accessible consistent memory block.
+/*
+ * Free a PCI consistent memory block without freeing its descriptor.
+ *
+ * This function is for internal use in the Linux-specific DRM core code.
  */
-void
-drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
+void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
 {
        if (dmah == NULL)
                return;
 
        bus_dmamem_free(dmah->tag, dmah->vaddr, dmah->map);
        bus_dma_tag_destroy(dmah->tag);
-
-       drm_free(dmah, M_DRM);
 }
 
-/*@}*/
+/**
+ * drm_pci_free - Free a PCI consistent memory block
+ * @dev: DRM device
+ * @dmah: handle to memory block
+ */
+void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
+{
+       __drm_legacy_pci_free(dev, dmah);
+       kfree(dmah);
+}
 
 int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
 {
index 15263a5..5e8e247 100644 (file)
@@ -82,6 +82,22 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
        return;
 }
 
+static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
+{
+       struct drm_display_mode *mode;
+
+       if (!connector->cmdline_mode.specified)
+               return 0;
+
+       mode = drm_mode_create_from_cmdline_mode(connector->dev,
+                                                &connector->cmdline_mode);
+       if (mode == NULL)
+               return 0;
+
+       drm_mode_probed_add(connector, mode);
+       return 1;
+}
+
 static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
                                                              uint32_t maxX, uint32_t maxY, bool merge_type_bits)
 {
@@ -141,6 +157,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
 
        if (count == 0 && connector->status == connector_status_connected)
                count = drm_add_modes_noedid(connector, 1024, 768);
+       count += drm_helper_probe_add_cmdline_mode(connector);
        if (count == 0)
                goto prune;
 
index 7047ca0..631f5af 100644 (file)
@@ -293,3 +293,143 @@ void drm_rect_debug_print(const struct drm_rect *r, bool fixed_point)
                DRM_DEBUG_KMS("%dx%d%+d%+d\n", w, h, r->x1, r->y1);
 }
 EXPORT_SYMBOL(drm_rect_debug_print);
+
+/**
+ * drm_rect_rotate - Rotate the rectangle
+ * @r: rectangle to be rotated
+ * @width: Width of the coordinate space
+ * @height: Height of the coordinate space
+ * @rotation: Transformation to be applied
+ *
+ * Apply @rotation to the coordinates of rectangle @r.
+ *
+ * @width and @height combined with @rotation define
+ * the location of the new origin.
+ *
+ * @width correcsponds to the horizontal and @height
+ * to the vertical axis of the untransformed coordinate
+ * space.
+ */
+void drm_rect_rotate(struct drm_rect *r,
+                    int width, int height,
+                    unsigned int rotation)
+{
+       struct drm_rect tmp;
+
+       if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
+               tmp = *r;
+
+               if (rotation & BIT(DRM_REFLECT_X)) {
+                       r->x1 = width - tmp.x2;
+                       r->x2 = width - tmp.x1;
+               }
+
+               if (rotation & BIT(DRM_REFLECT_Y)) {
+                       r->y1 = height - tmp.y2;
+                       r->y2 = height - tmp.y1;
+               }
+       }
+
+       switch (rotation & 0xf) {
+       case BIT(DRM_ROTATE_0):
+               break;
+       case BIT(DRM_ROTATE_90):
+               tmp = *r;
+               r->x1 = tmp.y1;
+               r->x2 = tmp.y2;
+               r->y1 = width - tmp.x2;
+               r->y2 = width - tmp.x1;
+               break;
+       case BIT(DRM_ROTATE_180):
+               tmp = *r;
+               r->x1 = width - tmp.x2;
+               r->x2 = width - tmp.x1;
+               r->y1 = height - tmp.y2;
+               r->y2 = height - tmp.y1;
+               break;
+       case BIT(DRM_ROTATE_270):
+               tmp = *r;
+               r->x1 = height - tmp.y2;
+               r->x2 = height - tmp.y1;
+               r->y1 = tmp.x1;
+               r->y2 = tmp.x2;
+               break;
+       default:
+               break;
+       }
+}
+EXPORT_SYMBOL(drm_rect_rotate);
+
+/**
+ * drm_rect_rotate_inv - Inverse rotate the rectangle
+ * @r: rectangle to be rotated
+ * @width: Width of the coordinate space
+ * @height: Height of the coordinate space
+ * @rotation: Transformation whose inverse is to be applied
+ *
+ * Apply the inverse of @rotation to the coordinates
+ * of rectangle @r.
+ *
+ * @width and @height combined with @rotation define
+ * the location of the new origin.
+ *
+ * @width correcsponds to the horizontal and @height
+ * to the vertical axis of the original untransformed
+ * coordinate space, so that you never have to flip
+ * them when doing a rotatation and its inverse.
+ * That is, if you do:
+ *
+ * drm_rotate(&r, width, height, rotation);
+ * drm_rotate_inv(&r, width, height, rotation);
+ *
+ * you will always get back the original rectangle.
+ */
+void drm_rect_rotate_inv(struct drm_rect *r,
+                        int width, int height,
+                        unsigned int rotation)
+{
+       struct drm_rect tmp;
+
+       switch (rotation & 0xf) {
+       case BIT(DRM_ROTATE_0):
+               break;
+       case BIT(DRM_ROTATE_90):
+               tmp = *r;
+               r->x1 = width - tmp.y2;
+               r->x2 = width - tmp.y1;
+               r->y1 = tmp.x1;
+               r->y2 = tmp.x2;
+               break;
+       case BIT(DRM_ROTATE_180):
+               tmp = *r;
+               r->x1 = width - tmp.x2;
+               r->x2 = width - tmp.x1;
+               r->y1 = height - tmp.y2;
+               r->y2 = height - tmp.y1;
+               break;
+       case BIT(DRM_ROTATE_270):
+               tmp = *r;
+               r->x1 = tmp.y1;
+               r->x2 = tmp.y2;
+               r->y1 = height - tmp.x2;
+               r->y2 = height - tmp.x1;
+               break;
+       default:
+               break;
+       }
+
+       if (rotation & (BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y))) {
+               tmp = *r;
+
+               if (rotation & BIT(DRM_REFLECT_X)) {
+                       r->x1 = width - tmp.x2;
+                       r->x2 = width - tmp.x1;
+               }
+
+               if (rotation & BIT(DRM_REFLECT_Y)) {
+                       r->y1 = height - tmp.y2;
+                       r->y2 = height - tmp.y1;
+               }
+       }
+}
+EXPORT_SYMBOL(drm_rect_rotate_inv);
index 4e8c0d2..c625aa3 100644 (file)
  */
 
 #include <drm/drmP.h>
+#include "drm_legacy.h"
 
-int drm_sg_alloc(struct drm_device *dev, void *data,
-                struct drm_file *file_priv)
+int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
+                       struct drm_file *file_priv)
 {
        struct drm_scatter_gather *request = data;
        struct drm_sg_mem *entry;
@@ -55,7 +56,7 @@ int drm_sg_alloc(struct drm_device *dev, void *data,
        entry->vaddr = kmem_alloc_attr(&kernel_map, size, M_WAITOK | M_ZERO,
            0, BUS_SPACE_MAXADDR_32BIT, VM_MEMATTR_WRITE_COMBINING);
        if (entry->vaddr == 0) {
-               drm_sg_cleanup(entry);
+               drm_legacy_sg_cleanup(entry);
                return (-ENOMEM);
        }
 
@@ -67,7 +68,7 @@ int drm_sg_alloc(struct drm_device *dev, void *data,
        DRM_LOCK(dev);
        if (dev->sg) {
                DRM_UNLOCK(dev);
-               drm_sg_cleanup(entry);
+               drm_legacy_sg_cleanup(entry);
                return (-EINVAL);
        }
        dev->sg = entry;
@@ -82,8 +83,7 @@ int drm_sg_alloc(struct drm_device *dev, void *data,
        return (0);
 }
 
-void
-drm_sg_cleanup(struct drm_sg_mem *entry)
+void drm_legacy_sg_cleanup(struct drm_sg_mem *entry)
 {
        if (entry == NULL)
                return;
@@ -97,8 +97,8 @@ drm_sg_cleanup(struct drm_sg_mem *entry)
        return;
 }
 
-int
-drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int drm_legacy_sg_free(struct drm_device *dev, void *data,
+                      struct drm_file *file_priv)
 {
        struct drm_scatter_gather *request = data;
        struct drm_sg_mem *entry;
@@ -113,7 +113,7 @@ drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
 
        DRM_DEBUG("free 0x%jx\n", (uintmax_t)entry->vaddr);
 
-       drm_sg_cleanup(entry);
+       drm_legacy_sg_cleanup(entry);
 
        return (0);
 }
index 8c45840..25963bb 100644 (file)
@@ -34,6 +34,8 @@
 #include <vm/vm_pager.h>
 
 #include <drm/drmP.h>
+#include <asm/pgtable.h>
+#include "drm_legacy.h"
 
 int drm_mmap(struct dev_mmap_args *ap)
 {
index 7503363..731a96b 100644 (file)
@@ -18,6 +18,7 @@ SRCS +=       i915_cmd_parser.c \
        i915_gem_tiling.c \
        i915_gem_userptr.c \
        i915_irq.c \
+       intel_lrc.o \
        intel_ringbuffer.c \
        intel_uncore.c
 
@@ -49,6 +50,7 @@ SRCS += \
        intel_crt.c \
        intel_ddi.c \
        intel_dp.c \
+       intel_dp_mst.c \
        intel_dsi_cmd.c \
        intel_dsi.c \
        intel_dsi_pll.c \
index f7e2024..9bc49ac 100644 (file)
 
 #define NS2501_REGC 0x0c
 
+enum {
+       MODE_640x480,
+       MODE_800x600,
+       MODE_1024x768,
+};
+
+struct ns2501_reg {
+        uint8_t offset;
+        uint8_t value;
+};
+
+/*
+ * Magic values based on what the BIOS on
+ * Fujitsu-Siemens Lifebook S6010 programs (1024x768 panel).
+ */
+static const struct ns2501_reg regs_1024x768[][86] = {
+       [MODE_640x480] = {
+               [0] = { .offset = 0x0a, .value = 0x81, },
+               [1] = { .offset = 0x18, .value = 0x07, },
+               [2] = { .offset = 0x19, .value = 0x00, },
+               [3] = { .offset = 0x1a, .value = 0x00, },
+               [4] = { .offset = 0x1b, .value = 0x11, },
+               [5] = { .offset = 0x1c, .value = 0x54, },
+               [6] = { .offset = 0x1d, .value = 0x03, },
+               [7] = { .offset = 0x1e, .value = 0x02, },
+               [8] = { .offset = 0xf3, .value = 0x90, },
+               [9] = { .offset = 0xf9, .value = 0x00, },
+               [10] = { .offset = 0xc1, .value = 0x90, },
+               [11] = { .offset = 0xc2, .value = 0x00, },
+               [12] = { .offset = 0xc3, .value = 0x0f, },
+               [13] = { .offset = 0xc4, .value = 0x03, },
+               [14] = { .offset = 0xc5, .value = 0x16, },
+               [15] = { .offset = 0xc6, .value = 0x00, },
+               [16] = { .offset = 0xc7, .value = 0x02, },
+               [17] = { .offset = 0xc8, .value = 0x02, },
+               [18] = { .offset = 0xf4, .value = 0x00, },
+               [19] = { .offset = 0x80, .value = 0xff, },
+               [20] = { .offset = 0x81, .value = 0x07, },
+               [21] = { .offset = 0x82, .value = 0x3d, },
+               [22] = { .offset = 0x83, .value = 0x05, },
+               [23] = { .offset = 0x94, .value = 0x00, },
+               [24] = { .offset = 0x95, .value = 0x00, },
+               [25] = { .offset = 0x96, .value = 0x05, },
+               [26] = { .offset = 0x97, .value = 0x00, },
+               [27] = { .offset = 0x9a, .value = 0x88, },
+               [28] = { .offset = 0x9b, .value = 0x00, },
+               [29] = { .offset = 0x98, .value = 0x00, },
+               [30] = { .offset = 0x99, .value = 0x00, },
+               [31] = { .offset = 0xf7, .value = 0x88, },
+               [32] = { .offset = 0xf8, .value = 0x0a, },
+               [33] = { .offset = 0x9c, .value = 0x24, },
+               [34] = { .offset = 0x9d, .value = 0x00, },
+               [35] = { .offset = 0x9e, .value = 0x25, },
+               [36] = { .offset = 0x9f, .value = 0x03, },
+               [37] = { .offset = 0xa0, .value = 0x28, },
+               [38] = { .offset = 0xa1, .value = 0x01, },
+               [39] = { .offset = 0xa2, .value = 0x28, },
+               [40] = { .offset = 0xa3, .value = 0x05, },
+               [41] = { .offset = 0xb6, .value = 0x09, },
+               [42] = { .offset = 0xb8, .value = 0x00, },
+               [43] = { .offset = 0xb9, .value = 0xa0, },
+               [44] = { .offset = 0xba, .value = 0x00, },
+               [45] = { .offset = 0xbb, .value = 0x20, },
+               [46] = { .offset = 0x10, .value = 0x00, },
+               [47] = { .offset = 0x11, .value = 0xa0, },
+               [48] = { .offset = 0x12, .value = 0x02, },
+               [49] = { .offset = 0x20, .value = 0x00, },
+               [50] = { .offset = 0x22, .value = 0x00, },
+               [51] = { .offset = 0x23, .value = 0x00, },
+               [52] = { .offset = 0x24, .value = 0x00, },
+               [53] = { .offset = 0x25, .value = 0x00, },
+               [54] = { .offset = 0x8c, .value = 0x10, },
+               [55] = { .offset = 0x8d, .value = 0x02, },
+               [56] = { .offset = 0x8e, .value = 0x10, },
+               [57] = { .offset = 0x8f, .value = 0x00, },
+               [58] = { .offset = 0x90, .value = 0xff, },
+               [59] = { .offset = 0x91, .value = 0x07, },
+               [60] = { .offset = 0x92, .value = 0xa0, },
+               [61] = { .offset = 0x93, .value = 0x02, },
+               [62] = { .offset = 0xa5, .value = 0x00, },
+               [63] = { .offset = 0xa6, .value = 0x00, },
+               [64] = { .offset = 0xa7, .value = 0x00, },
+               [65] = { .offset = 0xa8, .value = 0x00, },
+               [66] = { .offset = 0xa9, .value = 0x04, },
+               [67] = { .offset = 0xaa, .value = 0x70, },
+               [68] = { .offset = 0xab, .value = 0x4f, },
+               [69] = { .offset = 0xac, .value = 0x00, },
+               [70] = { .offset = 0xa4, .value = 0x84, },
+               [71] = { .offset = 0x7e, .value = 0x18, },
+               [72] = { .offset = 0x84, .value = 0x00, },
+               [73] = { .offset = 0x85, .value = 0x00, },
+               [74] = { .offset = 0x86, .value = 0x00, },
+               [75] = { .offset = 0x87, .value = 0x00, },
+               [76] = { .offset = 0x88, .value = 0x00, },
+               [77] = { .offset = 0x89, .value = 0x00, },
+               [78] = { .offset = 0x8a, .value = 0x00, },
+               [79] = { .offset = 0x8b, .value = 0x00, },
+               [80] = { .offset = 0x26, .value = 0x00, },
+               [81] = { .offset = 0x27, .value = 0x00, },
+               [82] = { .offset = 0xad, .value = 0x00, },
+               [83] = { .offset = 0x08, .value = 0x30, }, /* 0x31 */
+               [84] = { .offset = 0x41, .value = 0x00, },
+               [85] = { .offset = 0xc0, .value = 0x05, },
+       },
+       [MODE_800x600] = {
+               [0] = { .offset = 0x0a, .value = 0x81, },
+               [1] = { .offset = 0x18, .value = 0x07, },
+               [2] = { .offset = 0x19, .value = 0x00, },
+               [3] = { .offset = 0x1a, .value = 0x00, },
+               [4] = { .offset = 0x1b, .value = 0x19, },
+               [5] = { .offset = 0x1c, .value = 0x64, },
+               [6] = { .offset = 0x1d, .value = 0x02, },
+               [7] = { .offset = 0x1e, .value = 0x02, },
+               [8] = { .offset = 0xf3, .value = 0x90, },
+               [9] = { .offset = 0xf9, .value = 0x00, },
+               [10] = { .offset = 0xc1, .value = 0xd7, },
+               [11] = { .offset = 0xc2, .value = 0x00, },
+               [12] = { .offset = 0xc3, .value = 0xf8, },
+               [13] = { .offset = 0xc4, .value = 0x03, },
+               [14] = { .offset = 0xc5, .value = 0x1a, },
+               [15] = { .offset = 0xc6, .value = 0x00, },
+               [16] = { .offset = 0xc7, .value = 0x73, },
+               [17] = { .offset = 0xc8, .value = 0x02, },
+               [18] = { .offset = 0xf4, .value = 0x00, },
+               [19] = { .offset = 0x80, .value = 0x27, },
+               [20] = { .offset = 0x81, .value = 0x03, },
+               [21] = { .offset = 0x82, .value = 0x41, },
+               [22] = { .offset = 0x83, .value = 0x05, },
+               [23] = { .offset = 0x94, .value = 0x00, },
+               [24] = { .offset = 0x95, .value = 0x00, },
+               [25] = { .offset = 0x96, .value = 0x05, },
+               [26] = { .offset = 0x97, .value = 0x00, },
+               [27] = { .offset = 0x9a, .value = 0x88, },
+               [28] = { .offset = 0x9b, .value = 0x00, },
+               [29] = { .offset = 0x98, .value = 0x00, },
+               [30] = { .offset = 0x99, .value = 0x00, },
+               [31] = { .offset = 0xf7, .value = 0x88, },
+               [32] = { .offset = 0xf8, .value = 0x06, },
+               [33] = { .offset = 0x9c, .value = 0x23, },
+               [34] = { .offset = 0x9d, .value = 0x00, },
+               [35] = { .offset = 0x9e, .value = 0x25, },
+               [36] = { .offset = 0x9f, .value = 0x03, },
+               [37] = { .offset = 0xa0, .value = 0x28, },
+               [38] = { .offset = 0xa1, .value = 0x01, },
+               [39] = { .offset = 0xa2, .value = 0x28, },
+               [40] = { .offset = 0xa3, .value = 0x05, },
+               [41] = { .offset = 0xb6, .value = 0x09, },
+               [42] = { .offset = 0xb8, .value = 0x30, },
+               [43] = { .offset = 0xb9, .value = 0xc8, },
+               [44] = { .offset = 0xba, .value = 0x00, },
+               [45] = { .offset = 0xbb, .value = 0x20, },
+               [46] = { .offset = 0x10, .value = 0x20, },
+               [47] = { .offset = 0x11, .value = 0xc8, },
+               [48] = { .offset = 0x12, .value = 0x02, },
+               [49] = { .offset = 0x20, .value = 0x00, },
+               [50] = { .offset = 0x22, .value = 0x00, },
+               [51] = { .offset = 0x23, .value = 0x00, },
+               [52] = { .offset = 0x24, .value = 0x00, },
+               [53] = { .offset = 0x25, .value = 0x00, },
+               [54] = { .offset = 0x8c, .value = 0x10, },
+               [55] = { .offset = 0x8d, .value = 0x02, },
+               [56] = { .offset = 0x8e, .value = 0x04, },
+               [57] = { .offset = 0x8f, .value = 0x00, },
+               [58] = { .offset = 0x90, .value = 0xff, },
+               [59] = { .offset = 0x91, .value = 0x07, },
+               [60] = { .offset = 0x92, .value = 0xa0, },
+               [61] = { .offset = 0x93, .value = 0x02, },
+               [62] = { .offset = 0xa5, .value = 0x00, },
+               [63] = { .offset = 0xa6, .value = 0x00, },
+               [64] = { .offset = 0xa7, .value = 0x00, },
+               [65] = { .offset = 0xa8, .value = 0x00, },
+               [66] = { .offset = 0xa9, .value = 0x83, },
+               [67] = { .offset = 0xaa, .value = 0x40, },
+               [68] = { .offset = 0xab, .value = 0x32, },
+               [69] = { .offset = 0xac, .value = 0x00, },
+               [70] = { .offset = 0xa4, .value = 0x80, },
+               [71] = { .offset = 0x7e, .value = 0x18, },
+               [72] = { .offset = 0x84, .value = 0x00, },
+               [73] = { .offset = 0x85, .value = 0x00, },
+               [74] = { .offset = 0x86, .value = 0x00, },
+               [75] = { .offset = 0x87, .value = 0x00, },
+               [76] = { .offset = 0x88, .value = 0x00, },
+               [77] = { .offset = 0x89, .value = 0x00, },
+               [78] = { .offset = 0x8a, .value = 0x00, },
+               [79] = { .offset = 0x8b, .value = 0x00, },
+               [80] = { .offset = 0x26, .value = 0x00, },
+               [81] = { .offset = 0x27, .value = 0x00, },
+               [82] = { .offset = 0xad, .value = 0x00, },
+               [83] = { .offset = 0x08, .value = 0x30, }, /* 0x31 */
+               [84] = { .offset = 0x41, .value = 0x00, },
+               [85] = { .offset = 0xc0, .value = 0x07, },
+       },
+       [MODE_1024x768] = {
+               [0] = { .offset = 0x0a, .value = 0x81, },
+               [1] = { .offset = 0x18, .value = 0x07, },
+               [2] = { .offset = 0x19, .value = 0x00, },
+               [3] = { .offset = 0x1a, .value = 0x00, },
+               [4] = { .offset = 0x1b, .value = 0x11, },
+               [5] = { .offset = 0x1c, .value = 0x54, },
+               [6] = { .offset = 0x1d, .value = 0x03, },
+               [7] = { .offset = 0x1e, .value = 0x02, },
+               [8] = { .offset = 0xf3, .value = 0x90, },
+               [9] = { .offset = 0xf9, .value = 0x00, },
+               [10] = { .offset = 0xc1, .value = 0x90, },
+               [11] = { .offset = 0xc2, .value = 0x00, },
+               [12] = { .offset = 0xc3, .value = 0x0f, },
+               [13] = { .offset = 0xc4, .value = 0x03, },
+               [14] = { .offset = 0xc5, .value = 0x16, },
+               [15] = { .offset = 0xc6, .value = 0x00, },
+               [16] = { .offset = 0xc7, .value = 0x02, },
+               [17] = { .offset = 0xc8, .value = 0x02, },
+               [18] = { .offset = 0xf4, .value = 0x00, },
+               [19] = { .offset = 0x80, .value = 0xff, },
+               [20] = { .offset = 0x81, .value = 0x07, },
+               [21] = { .offset = 0x82, .value = 0x3d, },
+               [22] = { .offset = 0x83, .value = 0x05, },
+               [23] = { .offset = 0x94, .value = 0x00, },
+               [24] = { .offset = 0x95, .value = 0x00, },
+               [25] = { .offset = 0x96, .value = 0x05, },
+               [26] = { .offset = 0x97, .value = 0x00, },
+               [27] = { .offset = 0x9a, .value = 0x88, },
+               [28] = { .offset = 0x9b, .value = 0x00, },
+               [29] = { .offset = 0x98, .value = 0x00, },
+               [30] = { .offset = 0x99, .value = 0x00, },
+               [31] = { .offset = 0xf7, .value = 0x88, },
+               [32] = { .offset = 0xf8, .value = 0x0a, },
+               [33] = { .offset = 0x9c, .value = 0x24, },
+               [34] = { .offset = 0x9d, .value = 0x00, },
+               [35] = { .offset = 0x9e, .value = 0x25, },
+               [36] = { .offset = 0x9f, .value = 0x03, },
+               [37] = { .offset = 0xa0, .value = 0x28, },
+               [38] = { .offset = 0xa1, .value = 0x01, },
+               [39] = { .offset = 0xa2, .value = 0x28, },
+               [40] = { .offset = 0xa3, .value = 0x05, },
+               [41] = { .offset = 0xb6, .value = 0x09, },
+               [42] = { .offset = 0xb8, .value = 0x00, },
+               [43] = { .offset = 0xb9, .value = 0xa0, },
+               [44] = { .offset = 0xba, .value = 0x00, },
+               [45] = { .offset = 0xbb, .value = 0x20, },
+               [46] = { .offset = 0x10, .value = 0x00, },
+               [47] = { .offset = 0x11, .value = 0xa0, },
+               [48] = { .offset = 0x12, .value = 0x02, },
+               [49] = { .offset = 0x20, .value = 0x00, },
+               [50] = { .offset = 0x22, .value = 0x00, },
+               [51] = { .offset = 0x23, .value = 0x00, },
+               [52] = { .offset = 0x24, .value = 0x00, },
+               [53] = { .offset = 0x25, .value = 0x00, },
+               [54] = { .offset = 0x8c, .value = 0x10, },
+               [55] = { .offset = 0x8d, .value = 0x02, },
+               [56] = { .offset = 0x8e, .value = 0x10, },
+               [57] = { .offset = 0x8f, .value = 0x00, },
+               [58] = { .offset = 0x90, .value = 0xff, },
+               [59] = { .offset = 0x91, .value = 0x07, },
+               [60] = { .offset = 0x92, .value = 0xa0, },
+               [61] = { .offset = 0x93, .value = 0x02, },
+               [62] = { .offset = 0xa5, .value = 0x00, },
+               [63] = { .offset = 0xa6, .value = 0x00, },
+               [64] = { .offset = 0xa7, .value = 0x00, },
+               [65] = { .offset = 0xa8, .value = 0x00, },
+               [66] = { .offset = 0xa9, .value = 0x04, },
+               [67] = { .offset = 0xaa, .value = 0x70, },
+               [68] = { .offset = 0xab, .value = 0x4f, },
+               [69] = { .offset = 0xac, .value = 0x00, },
+               [70] = { .offset = 0xa4, .value = 0x84, },
+               [71] = { .offset = 0x7e, .value = 0x18, },
+               [72] = { .offset = 0x84, .value = 0x00, },
+               [73] = { .offset = 0x85, .value = 0x00, },
+               [74] = { .offset = 0x86, .value = 0x00, },
+               [75] = { .offset = 0x87, .value = 0x00, },
+               [76] = { .offset = 0x88, .value = 0x00, },
+               [77] = { .offset = 0x89, .value = 0x00, },
+               [78] = { .offset = 0x8a, .value = 0x00, },
+               [79] = { .offset = 0x8b, .value = 0x00, },
+               [80] = { .offset = 0x26, .value = 0x00, },
+               [81] = { .offset = 0x27, .value = 0x00, },
+               [82] = { .offset = 0xad, .value = 0x00, },
+               [83] = { .offset = 0x08, .value = 0x34, }, /* 0x35 */
+               [84] = { .offset = 0x41, .value = 0x00, },
+               [85] = { .offset = 0xc0, .value = 0x01, },
+       },
+};
+
+static const struct ns2501_reg regs_init[] = {
+       [0] = { .offset = 0x35, .value = 0xff, },
+       [1] = { .offset = 0x34, .value = 0x00, },
+       [2] = { .offset = 0x08, .value = 0x30, },
+};
+
 struct ns2501_priv {
-       //I2CDevRec d;
        bool quiet;
-       int reg_8_shadow;
-       int reg_8_set;
-       // Shadow registers for i915
-       int dvoc;
-       int pll_a;
-       int srcdim;
-       int fw_blc;
+       const struct ns2501_reg *regs;
 };
 
 #define NSPTR(d) ((NS2501Ptr)(d->DriverPrivate.ptr))
@@ -214,11 +495,9 @@ static bool ns2501_init(struct intel_dvo_device *dvo,
                goto out;
        }
        ns->quiet = false;
-       ns->reg_8_set = 0;
-       ns->reg_8_shadow =
-           NS2501_8_PD | NS2501_8_BPAS | NS2501_8_VEN | NS2501_8_HEN;
 
        DRM_DEBUG_KMS("init ns2501 dvo controller successfully!\n");
+
        return true;
 
 out:
@@ -251,9 +530,9 @@ static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
         * of the panel in here so we could always accept it
         * by disabling the scaler.
         */
-       if ((mode->hdisplay == 800 && mode->vdisplay == 600) ||
-           (mode->hdisplay == 640 && mode->vdisplay == 480) ||
-           (mode->hdisplay == 1024 && mode->vdisplay == 768)) {
+       if ((mode->hdisplay == 640 && mode->vdisplay == 480 && mode->clock == 25175) ||
+           (mode->hdisplay == 800 && mode->vdisplay == 600 && mode->clock == 40000) ||
+           (mode->hdisplay == 1024 && mode->vdisplay == 768 && mode->clock == 65000)) {
                return MODE_OK;
        } else {
                return MODE_ONE_SIZE;   /* Is this a reasonable error? */
@@ -264,180 +543,30 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
                            struct drm_display_mode *mode,
                            struct drm_display_mode *adjusted_mode)
 {
-       bool ok;
-       int retries = 10;
        struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+       int mode_idx, i;
 
        DRM_DEBUG_KMS
            ("set mode (hdisplay=%d,htotal=%d,vdisplay=%d,vtotal=%d).\n",
             mode->hdisplay, mode->htotal, mode->vdisplay, mode->vtotal);
 
-       /*
-        * Where do I find the native resolution for which scaling is not required???
-        *
-        * First trigger the DVO on as otherwise the chip does not appear on the i2c
-        * bus.
-        */
-       do {
-               ok = true;
-
-               if (mode->hdisplay == 800 && mode->vdisplay == 600) {
-                       /* mode 277 */
-                       ns->reg_8_shadow &= ~NS2501_8_BPAS;
-                       DRM_DEBUG_KMS("switching to 800x600\n");
-
-                       /*
-                        * No, I do not know where this data comes from.
-                        * It is just what the video bios left in the DVO, so
-                        * I'm just copying it here over.
-                        * This also means that I cannot support any other modes
-                        * except the ones supported by the bios.
-                        */
-                       ok &= ns2501_writeb(dvo, 0x11, 0xc8);   // 0xc7 also works.
-                       ok &= ns2501_writeb(dvo, 0x1b, 0x19);
-                       ok &= ns2501_writeb(dvo, 0x1c, 0x62);   // VBIOS left 0x64 here, but 0x62 works nicer
-                       ok &= ns2501_writeb(dvo, 0x1d, 0x02);
-
-                       ok &= ns2501_writeb(dvo, 0x34, 0x03);
-                       ok &= ns2501_writeb(dvo, 0x35, 0xff);
+       if (mode->hdisplay == 640 && mode->vdisplay == 480)
+               mode_idx = MODE_640x480;
+       else if (mode->hdisplay == 800 && mode->vdisplay == 600)
+               mode_idx = MODE_800x600;
+       else if (mode->hdisplay == 1024 && mode->vdisplay == 768)
+               mode_idx = MODE_1024x768;
+       else
+               return;
 
-                       ok &= ns2501_writeb(dvo, 0x80, 0x27);
-                       ok &= ns2501_writeb(dvo, 0x81, 0x03);
-                       ok &= ns2501_writeb(dvo, 0x82, 0x41);
-                       ok &= ns2501_writeb(dvo, 0x83, 0x05);
+       /* Hopefully doing it every time won't hurt... */
+       for (i = 0; i < ARRAY_SIZE(regs_init); i++)
+               ns2501_writeb(dvo, regs_init[i].offset, regs_init[i].value);
 
-                       ok &= ns2501_writeb(dvo, 0x8d, 0x02);
-                       ok &= ns2501_writeb(dvo, 0x8e, 0x04);
-                       ok &= ns2501_writeb(dvo, 0x8f, 0x00);
+       ns->regs = regs_1024x768[mode_idx];
 
-                       ok &= ns2501_writeb(dvo, 0x90, 0xfe);   /* vertical. VBIOS left 0xff here, but 0xfe works better */
-                       ok &= ns2501_writeb(dvo, 0x91, 0x07);
-                       ok &= ns2501_writeb(dvo, 0x94, 0x00);
-                       ok &= ns2501_writeb(dvo, 0x95, 0x00);
-
-                       ok &= ns2501_writeb(dvo, 0x96, 0x00);
-
-                       ok &= ns2501_writeb(dvo, 0x99, 0x00);
-                       ok &= ns2501_writeb(dvo, 0x9a, 0x88);
-
-                       ok &= ns2501_writeb(dvo, 0x9c, 0x23);   /* Looks like first and last line of the image. */
-                       ok &= ns2501_writeb(dvo, 0x9d, 0x00);
-                       ok &= ns2501_writeb(dvo, 0x9e, 0x25);
-                       ok &= ns2501_writeb(dvo, 0x9f, 0x03);
-
-                       ok &= ns2501_writeb(dvo, 0xa4, 0x80);
-
-                       ok &= ns2501_writeb(dvo, 0xb6, 0x00);
-
-                       ok &= ns2501_writeb(dvo, 0xb9, 0xc8);   /* horizontal? */
-                       ok &= ns2501_writeb(dvo, 0xba, 0x00);   /* horizontal? */
-
-                       ok &= ns2501_writeb(dvo, 0xc0, 0x05);   /* horizontal? */
-                       ok &= ns2501_writeb(dvo, 0xc1, 0xd7);
-
-                       ok &= ns2501_writeb(dvo, 0xc2, 0x00);
-                       ok &= ns2501_writeb(dvo, 0xc3, 0xf8);
-
-                       ok &= ns2501_writeb(dvo, 0xc4, 0x03);
-                       ok &= ns2501_writeb(dvo, 0xc5, 0x1a);
-
-                       ok &= ns2501_writeb(dvo, 0xc6, 0x00);
-                       ok &= ns2501_writeb(dvo, 0xc7, 0x73);
-                       ok &= ns2501_writeb(dvo, 0xc8, 0x02);
-
-               } else if (mode->hdisplay == 640 && mode->vdisplay == 480) {
-                       /* mode 274 */
-                       DRM_DEBUG_KMS("switching to 640x480\n");
-                       /*
-                        * No, I do not know where this data comes from.
-                        * It is just what the video bios left in the DVO, so
-                        * I'm just copying it here over.
-                        * This also means that I cannot support any other modes
-                        * except the ones supported by the bios.
-                        */
-                       ns->reg_8_shadow &= ~NS2501_8_BPAS;
-
-                       ok &= ns2501_writeb(dvo, 0x11, 0xa0);
-                       ok &= ns2501_writeb(dvo, 0x1b, 0x11);
-                       ok &= ns2501_writeb(dvo, 0x1c, 0x54);
-                       ok &= ns2501_writeb(dvo, 0x1d, 0x03);
-
-                       ok &= ns2501_writeb(dvo, 0x34, 0x03);
-                       ok &= ns2501_writeb(dvo, 0x35, 0xff);
-
-                       ok &= ns2501_writeb(dvo, 0x80, 0xff);
-                       ok &= ns2501_writeb(dvo, 0x81, 0x07);
-                       ok &= ns2501_writeb(dvo, 0x82, 0x3d);
-                       ok &= ns2501_writeb(dvo, 0x83, 0x05);
-
-                       ok &= ns2501_writeb(dvo, 0x8d, 0x02);
-                       ok &= ns2501_writeb(dvo, 0x8e, 0x10);
-                       ok &= ns2501_writeb(dvo, 0x8f, 0x00);
-
-                       ok &= ns2501_writeb(dvo, 0x90, 0xff);   /* vertical */
-                       ok &= ns2501_writeb(dvo, 0x91, 0x07);
-                       ok &= ns2501_writeb(dvo, 0x94, 0x00);
-                       ok &= ns2501_writeb(dvo, 0x95, 0x00);
-
-                       ok &= ns2501_writeb(dvo, 0x96, 0x05);
-
-                       ok &= ns2501_writeb(dvo, 0x99, 0x00);
-                       ok &= ns2501_writeb(dvo, 0x9a, 0x88);
-
-                       ok &= ns2501_writeb(dvo, 0x9c, 0x24);
-                       ok &= ns2501_writeb(dvo, 0x9d, 0x00);
-                       ok &= ns2501_writeb(dvo, 0x9e, 0x25);
-                       ok &= ns2501_writeb(dvo, 0x9f, 0x03);
-
-                       ok &= ns2501_writeb(dvo, 0xa4, 0x84);
-
-                       ok &= ns2501_writeb(dvo, 0xb6, 0x09);
-
-                       ok &= ns2501_writeb(dvo, 0xb9, 0xa0);   /* horizontal? */
-                       ok &= ns2501_writeb(dvo, 0xba, 0x00);   /* horizontal? */
-
-                       ok &= ns2501_writeb(dvo, 0xc0, 0x05);   /* horizontal? */
-                       ok &= ns2501_writeb(dvo, 0xc1, 0x90);
-
-                       ok &= ns2501_writeb(dvo, 0xc2, 0x00);
-                       ok &= ns2501_writeb(dvo, 0xc3, 0x0f);
-
-                       ok &= ns2501_writeb(dvo, 0xc4, 0x03);
-                       ok &= ns2501_writeb(dvo, 0xc5, 0x16);
-
-                       ok &= ns2501_writeb(dvo, 0xc6, 0x00);
-                       ok &= ns2501_writeb(dvo, 0xc7, 0x02);
-                       ok &= ns2501_writeb(dvo, 0xc8, 0x02);
-
-               } else if (mode->hdisplay == 1024 && mode->vdisplay == 768) {
-                       /* mode 280 */
-                       DRM_DEBUG_KMS("switching to 1024x768\n");
-                       /*
-                        * This might or might not work, actually. I'm silently
-                        * assuming here that the native panel resolution is
-                        * 1024x768. If not, then this leaves the scaler disabled
-                        * generating a picture that is likely not the expected.
-                        *
-                        * Problem is that I do not know where to take the panel
-                        * dimensions from.
-                        *
-                        * Enable the bypass, scaling not required.
-                        *
-                        * The scaler registers are irrelevant here....
-                        *
-                        */
-                       ns->reg_8_shadow |= NS2501_8_BPAS;
-                       ok &= ns2501_writeb(dvo, 0x37, 0x44);
-               } else {
-                       /*
-                        * Data not known. Bummer!
-                        * Hopefully, the code should not go here
-                        * as mode_OK delivered no other modes.
-                        */
-                       ns->reg_8_shadow |= NS2501_8_BPAS;
-               }
-               ok &= ns2501_writeb(dvo, NS2501_REG8, ns->reg_8_shadow);
-       } while (!ok && retries--);
+       for (i = 0; i < 84; i++)
+               ns2501_writeb(dvo, ns->regs[i].offset, ns->regs[i].value);
 }
 
 /* set the NS2501 power state */
@@ -448,60 +577,46 @@ static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
        if (!ns2501_readb(dvo, NS2501_REG8, &ch))
                return false;
 
-       if (ch & NS2501_8_PD)
-               return true;
-       else
-               return false;
+       return ch & NS2501_8_PD;
 }
 
 /* set the NS2501 power state */
 static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
 {
-       bool ok;
-       int retries = 10;
        struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
-       unsigned char ch;
 
        DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
 
-       ch = ns->reg_8_shadow;
+       if (enable) {
+               if (WARN_ON(ns->regs[83].offset != 0x08 ||
+                           ns->regs[84].offset != 0x41 ||
+                           ns->regs[85].offset != 0xc0))
+                       return;
 
-       if (enable)
-               ch |= NS2501_8_PD;
-       else
-               ch &= ~NS2501_8_PD;
-
-       if (ns->reg_8_set == 0 || ns->reg_8_shadow != ch) {
-               ns->reg_8_set = 1;
-               ns->reg_8_shadow = ch;
-
-               do {
-                       ok = true;
-                       ok &= ns2501_writeb(dvo, NS2501_REG8, ch);
-                       ok &=
-                           ns2501_writeb(dvo, 0x34,
-                                         enable ? 0x03 : 0x00);
-                       ok &=
-                           ns2501_writeb(dvo, 0x35,
-                                         enable ? 0xff : 0x00);
-               } while (!ok && retries--);
-       }
-}
+               ns2501_writeb(dvo, 0xc0, ns->regs[85].value | 0x08);
 
-static void ns2501_dump_regs(struct intel_dvo_device *dvo)
-{
-       uint8_t val;
-
-       ns2501_readb(dvo, NS2501_FREQ_LO, &val);
-       DRM_DEBUG_KMS("NS2501_FREQ_LO: 0x%02x\n", val);
-       ns2501_readb(dvo, NS2501_FREQ_HI, &val);
-       DRM_DEBUG_KMS("NS2501_FREQ_HI: 0x%02x\n", val);
-       ns2501_readb(dvo, NS2501_REG8, &val);
-       DRM_DEBUG_KMS("NS2501_REG8: 0x%02x\n", val);
-       ns2501_readb(dvo, NS2501_REG9, &val);
-       DRM_DEBUG_KMS("NS2501_REG9: 0x%02x\n", val);
-       ns2501_readb(dvo, NS2501_REGC, &val);
-       DRM_DEBUG_KMS("NS2501_REGC: 0x%02x\n", val);
+               ns2501_writeb(dvo, 0x41, ns->regs[84].value);
+
+               ns2501_writeb(dvo, 0x34, 0x01);
+               msleep(15);
+
+               ns2501_writeb(dvo, 0x08, 0x35);
+               if (!(ns->regs[83].value & NS2501_8_BPAS))
+                       ns2501_writeb(dvo, 0x08, 0x31);
+               msleep(200);
+
+               ns2501_writeb(dvo, 0x34, 0x03);
+
+               ns2501_writeb(dvo, 0xc0, ns->regs[85].value);
+       } else {
+               ns2501_writeb(dvo, 0x34, 0x01);
+               msleep(200);
+
+               ns2501_writeb(dvo, 0x08, 0x34);
+               msleep(15);
+
+               ns2501_writeb(dvo, 0x34, 0x00);
+       }
 }
 
 static void ns2501_destroy(struct intel_dvo_device *dvo)
@@ -521,6 +636,5 @@ struct intel_dvo_dev_ops ns2501_ops = {
        .mode_set = ns2501_mode_set,
        .dpms = ns2501_dpms,
        .get_hw_state = ns2501_get_hw_state,
-       .dump_regs = ns2501_dump_regs,
        .destroy = ns2501_destroy,
 };
index 6351e49..b2498e2 100644 (file)
@@ -849,8 +849,6 @@ finish:
  */
 bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
        if (!ring->needs_cmd_parser)
                return false;
 
@@ -859,7 +857,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
         * disabled. That will cause all of the parser's PPGTT checks to
         * fail. For now, disable parsing when PPGTT is off.
         */
-       if (!dev_priv->mm.aliasing_ppgtt)
+       if (USES_PPGTT(ring->dev))
                return false;
 
        return (i915.enable_cmd_parser == 1);
index 4f5666e..dcd4de4 100644 (file)
  *
  */
 
+#include <linux/async.h>
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
+#include <drm/drm_legacy.h>
 #include "i915_drv.h"
 #include "intel_drv.h"
 #include "intel_ringbuffer.h"
@@ -171,7 +173,7 @@ static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       dev_priv->sarea = drm_getsarea(dev);
+       dev_priv->sarea = drm_legacy_getsarea(dev);
        if (!dev_priv->sarea) {
                DRM_ERROR("can not find sarea!\n");
                i915_dma_cleanup(dev);
@@ -986,7 +988,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
                value = HAS_WT(dev);
                break;
        case I915_PARAM_HAS_ALIASING_PPGTT:
-               value = dev_priv->mm.aliasing_ppgtt || USES_FULL_PPGTT(dev);
+               value = USES_PPGTT(dev);
                break;
        case I915_PARAM_HAS_WAIT_TIMEOUT:
                value = 1;
@@ -1349,10 +1351,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
        if (ret)
                goto cleanup_irq;
 
-#if 0
-       INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
-#endif
-
        intel_modeset_gem_init(dev);
 
        /* Always safe in the mode setting case. */
@@ -1379,7 +1377,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
         * scanning against hotplug events. Hence do this first and ignore the
         * tiny window where we will loose hotplug notifactions.
         */
-       intel_fbdev_initial_config(dev);
+       async_schedule(intel_fbdev_initial_config, dev_priv);
 
        drm_kms_helper_poll_init(dev);
 
@@ -1390,7 +1388,6 @@ cleanup_gem:
        i915_gem_cleanup_ringbuffer(dev);
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
-       WARN_ON(dev_priv->mm.aliasing_ppgtt);
 cleanup_irq:
        drm_irq_uninstall(dev);
 cleanup_gem_stolen:
@@ -1539,10 +1536,10 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
        info = (struct intel_device_info *)&dev_priv->info;
 
        if (IS_VALLEYVIEW(dev))
-               for_each_pipe(pipe)
+               for_each_pipe(dev_priv, pipe)
                        info->num_sprites[pipe] = 2;
        else
-               for_each_pipe(pipe)
+               for_each_pipe(dev_priv, pipe)
                        info->num_sprites[pipe] = 1;
 
        if (i915.disable_display) {
@@ -1615,9 +1612,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        dev->dev_private = dev_priv;
        dev_priv->dev = dev;
 
-       /* copy initial configuration to dev_priv->info */
+       /* Setup the write-once "constant" device info */
        device_info = (struct intel_device_info *)&dev_priv->info;
-       *device_info = *info;
+       memcpy(device_info, info, sizeof(dev_priv->info));
+       device_info->device_id = dev->pdev->device;
 
        lockinit(&dev_priv->irq_lock, "userirq", 0, LK_CANRECURSE);
        lockinit(&dev_priv->gpu_error.lock, "915err", 0, LK_CANRECURSE);
@@ -1671,7 +1669,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        base = drm_get_resource_start(dev, mmio_bar);
        size = drm_get_resource_len(dev, mmio_bar);
 
-       ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
+       ret = drm_legacy_addmap(dev, base, size, _DRM_REGISTERS,
            _DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map);
 #endif
 
@@ -1685,15 +1683,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
                goto out_regs;
 
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               ret = i915_kick_out_vgacon(dev_priv);
+               /* WARNING: Apparently we must kick fbdev drivers before vgacon,
+                * otherwise the vga fbdev driver falls over. */
+               ret = i915_kick_out_firmware_fb(dev_priv);
                if (ret) {
-                       DRM_ERROR("failed to remove conflicting VGA console\n");
+                       DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
                        goto out_gtt;
                }
 
-               ret = i915_kick_out_firmware_fb(dev_priv);
+               ret = i915_kick_out_vgacon(dev_priv);
                if (ret) {
-                       DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+                       DRM_ERROR("failed to remove conflicting VGA console\n");
                        goto out_gtt;
                }
        }
@@ -1842,7 +1842,7 @@ out_mtrrfree:
        io_mapping_free(dev_priv->gtt.mappable);
 #endif
 out_gtt:
-       dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
+       i915_global_gtt_cleanup(dev);
 out_regs:
        intel_uncore_fini(dev);
 free_priv:
@@ -1888,9 +1888,6 @@ int i915_driver_unload(struct drm_device *dev)
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
                intel_fbdev_fini(dev);
                intel_modeset_cleanup(dev);
-#if 0
-               cancel_work_sync(&dev_priv->console_resume_work);
-#endif
 
                /*
                 * free the memory space allocated for the child device
@@ -1920,7 +1917,6 @@ int i915_driver_unload(struct drm_device *dev)
                mutex_lock(&dev->struct_mutex);
                i915_gem_cleanup_ringbuffer(dev);
                i915_gem_context_fini(dev);
-               WARN_ON(dev_priv->mm.aliasing_ppgtt);
                mutex_unlock(&dev->struct_mutex);
 #if 0
                i915_gem_cleanup_stolen(dev);
@@ -1930,21 +1926,19 @@ int i915_driver_unload(struct drm_device *dev)
                        i915_free_hws(dev);
        }
 
-       WARN_ON(!list_empty(&dev_priv->vm_list));
-
        drm_vblank_cleanup(dev);
 
        intel_teardown_gmbus(dev);
        intel_teardown_mchbar(dev);
 
        bus_generic_detach(dev->dev);
-       drm_rmmap(dev, dev_priv->mmio_map);
+       drm_legacy_rmmap(dev, dev_priv->mmio_map);
 
        destroy_workqueue(dev_priv->dp_wq);
        destroy_workqueue(dev_priv->wq);
        pm_qos_remove_request(&dev_priv->pm_qos);
 
-       dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
+       i915_global_gtt_cleanup(dev);
 
        intel_uncore_fini(dev);
 #if 0
@@ -2010,6 +2004,9 @@ void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
        i915_gem_context_close(dev, file);
        i915_gem_release(dev, file);
        mutex_unlock(&dev->struct_mutex);
+
+       if (drm_core_check_feature(dev, DRIVER_MODESET))
+               intel_modeset_preclose(dev, file);
 }
 
 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
@@ -2041,34 +2038,34 @@ const struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
 #if 0
index 712b339..16a41b9 100644 (file)
@@ -483,6 +483,10 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
        if (i915.semaphores >= 0)
                return i915.semaphores;
 
+       /* TODO: make semaphores and Execlists play nicely together */
+       if (i915.enable_execlists)
+               return false;
+
        /* Until we get further testing... */
        if (IS_GEN8(dev))
                return false;
@@ -526,6 +530,13 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
        drm_modeset_unlock_all(dev);
 }
 
+
+#if 0
+static int intel_suspend_complete(struct drm_i915_private *dev_priv);
+static int intel_resume_prepare(struct drm_i915_private *dev_priv,
+                               bool rpm_resume);
+#endif
+
 static int i915_drm_freeze(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -643,30 +654,20 @@ int i915_suspend(device_t kdev)
 }
 
 #if 0
-void intel_console_resume(struct work_struct *work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(work, struct drm_i915_private,
-                            console_resume_work);
-       struct drm_device *dev = dev_priv->dev;
-
-       console_lock();
-       intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
-       console_unlock();
-}
-
 static int i915_drm_thaw_early(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
 
-       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-               hsw_disable_pc8(dev_priv);
+       ret = intel_resume_prepare(dev_priv, false);
+       if (ret)
+               DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
 
        intel_uncore_early_sanitize(dev, true);
        intel_uncore_sanitize(dev);
        intel_power_domains_init_hw(dev_priv);
 
-       return 0;
+       return ret;
 }
 #endif
 
@@ -719,19 +720,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
 
        intel_opregion_init(dev);
 
-       /*
-        * The console lock can be pretty contented on resume due
-        * to all the printk activity.  Try to keep it out of the hot
-        * path of resume if possible.
-        */
-#if 0
-       if (console_trylock()) {
-               intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
-               console_unlock();
-       } else {
-               schedule_work(&dev_priv->console_resume_work);
-       }
-#endif
+       intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
 
        mutex_lock(&dev_priv->modeset_restore_lock);
        dev_priv->modeset_restore = MODESET_DONE;
@@ -861,7 +850,13 @@ int i915_reset(struct drm_device *dev)
                        !dev_priv->ums.mm_suspended) {
                dev_priv->ums.mm_suspended = 0;
 
+               /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
+               dev_priv->gpu_error.reload_in_reset = true;
+
                ret = i915_gem_init_hw(dev);
+
+               dev_priv->gpu_error.reload_in_reset = false;
+
                mutex_unlock(&dev->struct_mutex);
                if (ret) {
                        DRM_ERROR("Failed hw init on reset %d\n", ret);
@@ -882,8 +877,6 @@ int i915_reset(struct drm_device *dev)
                 */
                if (INTEL_INFO(dev)->gen > 5)
                        intel_reset_gt_powersave(dev);
-
-               intel_hpd_init(dev);
        } else {
                mutex_unlock(&dev->struct_mutex);
        }
@@ -943,6 +936,7 @@ static int i915_pm_suspend_late(struct device *dev)
        struct pci_dev *pdev = to_pci_dev(dev);
        struct drm_device *drm_dev = pci_get_drvdata(pdev);
        struct drm_i915_private *dev_priv = drm_dev->dev_private;
+       int ret;
 
        /*
         * We have a suspedn ordering issue with the snd-hda driver also
@@ -956,13 +950,16 @@ static int i915_pm_suspend_late(struct device *dev)
        if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       if (IS_HASWELL(drm_dev) || IS_BROADWELL(drm_dev))
-               hsw_enable_pc8(dev_priv);
+       ret = intel_suspend_complete(dev_priv);
 
-       pci_disable_device(pdev);
-       pci_set_power_state(pdev, PCI_D3hot);
+       if (ret)
+               DRM_ERROR("Suspend complete failed: %d\n", ret);
+       else {
+               pci_disable_device(pdev);
+               pci_set_power_state(pdev, PCI_D3hot);
+       }
 
-       return 0;
+       return ret;
 }
 
 static int i915_pm_resume(struct device *dev)
@@ -987,6 +984,15 @@ static int i915_pm_freeze(struct device *dev)
        return i915_drm_freeze(drm_dev);
 }
 
+static int i915_pm_freeze_late(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct drm_device *drm_dev = pci_get_drvdata(pdev);
+       struct drm_i915_private *dev_priv = drm_dev->dev_private;
+
+       return intel_suspend_complete(dev_priv);
+}
+
 static int i915_pm_thaw(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
@@ -1003,23 +1009,26 @@ static int i915_pm_poweroff(struct device *dev)
        return i915_drm_freeze(drm_dev);
 }
 
-static int hsw_runtime_suspend(struct drm_i915_private *dev_priv)
+static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
 {
        hsw_enable_pc8(dev_priv);
 
        return 0;
 }
 
-static int snb_runtime_resume(struct drm_i915_private *dev_priv)
+static int snb_resume_prepare(struct drm_i915_private *dev_priv,
+                               bool rpm_resume)
 {
        struct drm_device *dev = dev_priv->dev;
 
-       intel_init_pch_refclk(dev);
+       if (rpm_resume)
+               intel_init_pch_refclk(dev);
 
        return 0;
 }
 
-static int hsw_runtime_resume(struct drm_i915_private *dev_priv)
+static int hsw_resume_prepare(struct drm_i915_private *dev_priv,
+                               bool rpm_resume)
 {
        hsw_disable_pc8(dev_priv);
 
@@ -1317,7 +1326,7 @@ static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
        I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
 }
 
-static int vlv_runtime_suspend(struct drm_i915_private *dev_priv)
+static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
 {
        u32 mask;
        int err;
@@ -1357,7 +1366,8 @@ err1:
        return err;
 }
 
-static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
+static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
+                               bool rpm_resume)
 {
        struct drm_device *dev = dev_priv->dev;
        int err;
@@ -1382,8 +1392,10 @@ static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
 
        vlv_check_no_gt_access(dev_priv);
 
-       intel_init_clock_gating(dev);
-       i915_gem_restore_fences(dev);
+       if (rpm_resume) {
+               intel_init_clock_gating(dev);
+               i915_gem_restore_fences(dev);
+       }
 
        return ret;
 }
@@ -1398,7 +1410,9 @@ static int intel_runtime_suspend(struct device *device)
        if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
                return -ENODEV;
 
-       WARN_ON(!HAS_RUNTIME_PM(dev));
+       if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
+               return -ENODEV;
+
        assert_force_wake_inactive(dev_priv);
 
        DRM_DEBUG_KMS("Suspending device\n");
@@ -1435,17 +1449,7 @@ static int intel_runtime_suspend(struct device *device)
        cancel_work_sync(&dev_priv->rps.work);
        intel_runtime_pm_disable_interrupts(dev);
 
-       if (IS_GEN6(dev)) {
-               ret = 0;
-       } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
-               ret = hsw_runtime_suspend(dev_priv);
-       } else if (IS_VALLEYVIEW(dev)) {
-               ret = vlv_runtime_suspend(dev_priv);
-       } else {
-               ret = -ENODEV;
-               WARN_ON(1);
-       }
-
+       ret = intel_suspend_complete(dev_priv);
        if (ret) {
                DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
                intel_runtime_pm_restore_interrupts(dev);
@@ -1457,13 +1461,29 @@ static int intel_runtime_suspend(struct device *device)
        dev_priv->pm.suspended = true;
 
        /*
-        * current versions of firmware which depend on this opregion
-        * notification have repurposed the D1 definition to mean
-        * "runtime suspended" vs. what you would normally expect (D3)
-        * to distinguish it from notifications that might be sent
-        * via the suspend path.
+        * FIXME: We really should find a document that references the arguments
+        * used below!
         */
-       intel_opregion_notify_adapter(dev, PCI_D1);
+       if (IS_HASWELL(dev)) {
+               /*
+                * current versions of firmware which depend on this opregion
+                * notification have repurposed the D1 definition to mean
+                * "runtime suspended" vs. what you would normally expect (D3)
+                * to distinguish it from notifications that might be sent via
+                * the suspend path.
+                */
+               intel_opregion_notify_adapter(dev, PCI_D1);
+       } else {
+               /*
+                * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
+                * being detected, and the call we do at intel_runtime_resume()
+                * won't be able to restore them. Since PCI_D3hot matches the
+                * actual specification and appears to be working, use it. Let's
+                * assume the other non-Haswell platforms will stay the same as
+                * Broadwell.
+                */
+               intel_opregion_notify_adapter(dev, PCI_D3hot);
+       }
 
        DRM_DEBUG_KMS("Device suspended\n");
        return 0;
@@ -1476,24 +1496,15 @@ static int intel_runtime_resume(struct device *device)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       WARN_ON(!HAS_RUNTIME_PM(dev));
+       if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
+               return -ENODEV;
 
        DRM_DEBUG_KMS("Resuming device\n");
 
        intel_opregion_notify_adapter(dev, PCI_D0);
        dev_priv->pm.suspended = false;
 
-       if (IS_GEN6(dev)) {
-               ret = snb_runtime_resume(dev_priv);
-       } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
-               ret = hsw_runtime_resume(dev_priv);
-       } else if (IS_VALLEYVIEW(dev)) {
-               ret = vlv_runtime_resume(dev_priv);
-       } else {
-               WARN_ON(1);
-               ret = -ENODEV;
-       }
-
+       ret = intel_resume_prepare(dev_priv, true);
        /*
         * No point of rolling back things in case of an error, as the best
         * we can do is to hope that things will still work (and disable RPM).
@@ -1512,6 +1523,48 @@ static int intel_runtime_resume(struct device *device)
        return ret;
 }
 
+/*
+ * This function implements common functionality of runtime and system
+ * suspend sequence.
+ */
+static int intel_suspend_complete(struct drm_i915_private *dev_priv)
+{
+       struct drm_device *dev = dev_priv->dev;
+       int ret;
+
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+               ret = hsw_suspend_complete(dev_priv);
+       else if (IS_VALLEYVIEW(dev))
+               ret = vlv_suspend_complete(dev_priv);
+       else
+               ret = 0;
+
+       return ret;
+}
+
+/*
+ * This function implements common functionality of runtime and system
+ * resume sequence. Variable rpm_resume used for implementing different
+ * code paths.
+ */
+static int intel_resume_prepare(struct drm_i915_private *dev_priv,
+                               bool rpm_resume)
+{
+       struct drm_device *dev = dev_priv->dev;
+       int ret;
+
+       if (IS_GEN6(dev))
+               ret = snb_resume_prepare(dev_priv, rpm_resume);
+       else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+               ret = hsw_resume_prepare(dev_priv, rpm_resume);
+       else if (IS_VALLEYVIEW(dev))
+               ret = vlv_resume_prepare(dev_priv, rpm_resume);
+       else
+               ret = 0;
+
+       return ret;
+}
+
 static const struct dev_pm_ops i915_pm_ops = {
        .suspend = i915_pm_suspend,
        .resume = i915_pm_resume,
index 2fff216..976829c 100644 (file)
 #include "i915_reg.h"
 #include "intel_bios.h"
 #include "intel_ringbuffer.h"
+#include "intel_lrc.h"
 #include "i915_gem_gtt.h"
+#include "i915_gem_render_state.h"
 #include <linux/io-mapping.h>
 #include <linux/i2c.h>
 #include <drm/intel-gtt.h>
+#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
+#include <drm/drm_gem.h>
 #include <linux/backlight.h>
 #include <linux/hashtable.h>
 #include <linux/kref.h>
 /* General customization:
  */
 
-#define DRIVER_AUTHOR          "Tungsten Graphics, Inc."
-
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20140725"
+#define DRIVER_DATE            "20140905"
 
 enum i915_pipe {
        INVALID_PIPE = -1,
@@ -165,7 +167,10 @@ enum hpd_pin {
         I915_GEM_DOMAIN_INSTRUCTION | \
         I915_GEM_DOMAIN_VERTEX)
 
-#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
+#define for_each_pipe(__dev_priv, __p) \
+       for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
+#define for_each_plane(pipe, p) \
+       for ((p) = 0; (p) < INTEL_INFO(dev)->num_sprites[(pipe)] + 1; (p)++)
 #define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
 
 #define for_each_crtc(dev, crtc) \
@@ -174,6 +179,11 @@ enum hpd_pin {
 #define for_each_intel_crtc(dev, intel_crtc) \
        list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
 
+#define for_each_intel_encoder(dev, intel_encoder)             \
+       list_for_each_entry(intel_encoder,                      \
+                           &(dev)->mode_config.encoder_list,   \
+                           base.head)
+
 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
        list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
                if ((intel_encoder)->base.crtc == (__crtc))
@@ -200,10 +210,13 @@ enum intel_dpll_id {
 #define I915_NUM_PLLS 2
 
 struct intel_dpll_hw_state {
+       /* i9xx, pch plls */
        uint32_t dpll;
        uint32_t dpll_md;
        uint32_t fp0;
        uint32_t fp1;
+
+       /* hsw, bdw */
        uint32_t wrpll;
 };
 
@@ -280,7 +293,7 @@ struct intel_overlay;
 struct intel_overlay_error_state;
 
 struct drm_i915_master_private {
-       drm_local_map_t *sarea;
+       struct drm_local_map *sarea;
        struct _drm_i915_sarea *sarea_priv;
 };
 #define I915_FENCE_REG_NONE -1
@@ -390,6 +403,7 @@ struct drm_i915_error_state {
                pid_t pid;
                char comm[TASK_COMM_LEN];
        } ring[I915_NUM_RINGS];
+
        struct drm_i915_error_buffer {
                u32 size;
                u32 name;
@@ -408,6 +422,7 @@ struct drm_i915_error_state {
        } **active_bo, **pinned_bo;
 
        u32 *active_bo_count, *pinned_bo_count;
+       u32 vm_count;
 };
 
 struct intel_connector;
@@ -553,6 +568,7 @@ struct intel_uncore {
 
 struct intel_device_info {
        u32 display_mmio_offset;
+       u16 device_id;
        u8 num_pipes:3;
        u8 num_sprites[I915_MAX_PIPES];
        u8 gen;
@@ -617,13 +633,21 @@ struct intel_context {
        uint8_t remap_slice;
        struct drm_i915_file_private *file_priv;
        struct i915_ctx_hang_stats hang_stats;
-       struct i915_address_space *vm;
+       struct i915_hw_ppgtt *ppgtt;
 
+       /* Legacy ring buffer submission */
        struct {
                struct drm_i915_gem_object *rcs_state;
                bool initialized;
        } legacy_hw_ctx;
 
+       /* Execlists */
+       bool rcs_initialized;
+       struct {
+               struct drm_i915_gem_object *state;
+               struct intel_ringbuffer *ringbuf;
+       } engine[I915_NUM_RINGS];
+
        struct list_head link;
 };
 
@@ -637,6 +661,8 @@ struct i915_fbc {
        struct drm_mm_node compressed_fb;
        struct drm_mm_node *compressed_llb;
 
+       bool false_color;
+
        struct intel_fbc_work {
                struct delayed_work work;
                struct drm_crtc *crtc;
@@ -690,6 +716,7 @@ enum intel_sbi_destination {
 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
 #define QUIRK_BACKLIGHT_PRESENT (1<<3)
+#define QUIRK_PIPEB_FORCE (1<<4)
 
 struct intel_fbdev;
 struct intel_fbc_work;
@@ -1155,6 +1182,7 @@ struct i915_gem_mm {
 };
 
 struct drm_i915_error_state_buf {
+       struct drm_i915_private *i915;
        unsigned bytes;
        unsigned size;
        int err;
@@ -1227,6 +1255,9 @@ struct i915_gpu_error {
 
        /* For missed irq/seqno simulation. */
        unsigned int test_irq_rings;
+
+       /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset   */
+       bool reload_in_reset;
 };
 
 enum modeset_restore {
@@ -1236,6 +1267,12 @@ enum modeset_restore {
 };
 
 struct ddi_vbt_port_info {
+       /*
+        * This is an index in the HDMI/DVI DDI buffer translation table.
+        * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
+        * populate this field.
+        */
+#define HDMI_LEVEL_SHIFT_UNKNOWN       0xff
        uint8_t hdmi_level_shift;
 
        uint8_t supports_dvi:1;
@@ -1465,7 +1502,6 @@ struct drm_i915_private {
        u32 pipestat_irq_mask[I915_MAX_PIPES];
 
        struct work_struct hotplug_work;
-       bool enable_hotplug_processing;
        struct {
                unsigned long hpd_last_jiffies;
                int hpd_cnt;
@@ -1492,6 +1528,9 @@ struct drm_i915_private {
        /* LVDS info */
        bool no_aux_handshake;
 
+       /* protects panel power sequencer state */
+       struct lock pps_mutex;
+
        struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
        int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
        int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -1544,6 +1583,20 @@ struct drm_i915_private {
        struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
        int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
+       /*
+        * workarounds are currently applied at different places and
+        * changes are being done to consolidate them so exact count is
+        * not clear at this point, use a max value for now.
+        */
+#define I915_MAX_WA_REGS  16
+       struct {
+               u32 addr;
+               u32 value;
+               /* bitmask representing WA bits */
+               u32 mask;
+       } intel_wa_regs[I915_MAX_WA_REGS];
+       u32 num_wa_regs;
+
        /* Reclocking support */
        bool render_reclock_avail;
        bool lvds_downclock_avail;
@@ -1579,14 +1632,9 @@ struct drm_i915_private {
 #ifdef CONFIG_DRM_I915_FBDEV
        /* list of fbdev register on this device */
        struct intel_fbdev *fbdev;
+       struct work_struct fbdev_suspend_work;
 #endif
 
-       /*
-        * The console may be contended at resume, but we don't
-        * want it to block on it.
-        */
-       struct work_struct console_resume_work;
-
        struct drm_property *broadcast_rgb_property;
        struct drm_property *force_audio_property;
 
@@ -1632,12 +1680,28 @@ struct drm_i915_private {
         */
        struct workqueue_struct *dp_wq;
 
+       uint32_t bios_vgacntr;
+
        /* Old dri1 support infrastructure, beware the dragons ya fools entering
         * here! */
        struct i915_dri1_state dri1;
        /* Old ums support infrastructure, same warning applies. */
        struct i915_ums_state ums;
 
+       /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
+       struct {
+               int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
+                                 struct intel_engine_cs *ring,
+                                 struct intel_context *ctx,
+                                 struct drm_i915_gem_execbuffer2 *args,
+                                 struct list_head *vmas,
+                                 struct drm_i915_gem_object *batch_obj,
+                                 u64 exec_start, u32 flags);
+               int (*init_rings)(struct drm_device *dev);
+               void (*cleanup_ring)(struct intel_engine_cs *ring);
+               void (*stop_ring)(struct intel_engine_cs *ring);
+       } gt;
+
        /*
         * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
         * will be rejected. Instead look for a better place.
@@ -1779,13 +1843,6 @@ struct drm_i915_gem_object {
         * Only honoured if hardware has relevant pte bit
         */
        unsigned long gt_ro:1;
-
-       /*
-        * Is the GPU currently using a fence to access this buffer,
-        */
-       unsigned int pending_fenced_gpu_access:1;
-       unsigned int fenced_gpu_access:1;
-
        unsigned int cache_level:3;
 
        unsigned int has_aliasing_ppgtt_mapping:1;
@@ -1823,7 +1880,7 @@ struct drm_i915_gem_object {
        struct drm_file *pin_filp;
 
        /** for phy allocated objects */
-       drm_dma_handle_t *phys_handle;
+       struct drm_dma_handle *phys_handle;
 
        union {
                struct i915_gem_userptr {
@@ -1989,51 +2046,62 @@ struct drm_i915_cmd_table {
        int count;
 };
 
-#define INTEL_INFO(dev)        (&to_i915(dev)->info)
+/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
+#define __I915__(p) ({ \
+       const struct drm_i915_private *__p; \
+       if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \
+               __p = (const struct drm_i915_private *)p; \
+       else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
+               __p = to_i915((const struct drm_device *)p); \
+       __p; \
+})
+
+#define INTEL_INFO(p)  (&__I915__(p)->info)
+#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
 
-#define IS_I830(dev)           ((dev)->pdev->device == 0x3577)
-#define IS_845G(dev)           ((dev)->pdev->device == 0x2562)
+#define IS_I830(dev)           (INTEL_DEVID(dev) == 0x3577)
+#define IS_845G(dev)           (INTEL_DEVID(dev) == 0x2562)
 #define IS_I85X(dev)           (INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev)          ((dev)->pdev->device == 0x2572)
+#define IS_I865G(dev)          (INTEL_DEVID(dev) == 0x2572)
 #define IS_I915G(dev)          (INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev)         ((dev)->pdev->device == 0x2592)
-#define IS_I945G(dev)          ((dev)->pdev->device == 0x2772)
+#define IS_I915GM(dev)         (INTEL_DEVID(dev) == 0x2592)
+#define IS_I945G(dev)          (INTEL_DEVID(dev) == 0x2772)
 #define IS_I945GM(dev)         (INTEL_INFO(dev)->is_i945gm)
 #define IS_BROADWATER(dev)     (INTEL_INFO(dev)->is_broadwater)
 #define IS_CRESTLINE(dev)      (INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev)           ((dev)->pdev->device == 0x2A42)
+#define IS_GM45(dev)           (INTEL_DEVID(dev) == 0x2A42)
 #define IS_G4X(dev)            (INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev)     ((dev)->pdev->device == 0xa001)
-#define IS_PINEVIEW_M(dev)     ((dev)->pdev->device == 0xa011)
+#define IS_PINEVIEW_G(dev)     (INTEL_DEVID(dev) == 0xa001)
+#define IS_PINEVIEW_M(dev)     (INTEL_DEVID(dev) == 0xa011)
 #define IS_PINEVIEW(dev)       (INTEL_INFO(dev)->is_pineview)
 #define IS_G33(dev)            (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_M(dev)     ((dev)->pdev->device == 0x0046)
+#define IS_IRONLAKE_M(dev)     (INTEL_DEVID(dev) == 0x0046)
 #define IS_IVYBRIDGE(dev)      (INTEL_INFO(dev)->is_ivybridge)
-#define IS_IVB_GT1(dev)                ((dev)->pdev->device == 0x0156 || \
-                                (dev)->pdev->device == 0x0152 || \
-                                (dev)->pdev->device == 0x015a)
-#define IS_SNB_GT1(dev)                ((dev)->pdev->device == 0x0102 || \
-                                (dev)->pdev->device == 0x0106 || \
-                                (dev)->pdev->device == 0x010A)
+#define IS_IVB_GT1(dev)                (INTEL_DEVID(dev) == 0x0156 || \
+                                INTEL_DEVID(dev) == 0x0152 || \
+                                INTEL_DEVID(dev) == 0x015a)
+#define IS_SNB_GT1(dev)                (INTEL_DEVID(dev) == 0x0102 || \
+                                INTEL_DEVID(dev) == 0x0106 || \
+                                INTEL_DEVID(dev) == 0x010A)
 #define IS_VALLEYVIEW(dev)     (INTEL_INFO(dev)->is_valleyview)
 #define IS_CHERRYVIEW(dev)     (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
 #define IS_HASWELL(dev)        (INTEL_INFO(dev)->is_haswell)
 #define IS_BROADWELL(dev)      (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
 #define IS_MOBILE(dev)         (INTEL_INFO(dev)->is_mobile)
 #define IS_HSW_EARLY_SDV(dev)  (IS_HASWELL(dev) && \
-                                ((dev)->pdev->device & 0xFF00) == 0x0C00)
+                                (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
 #define IS_BDW_ULT(dev)                (IS_BROADWELL(dev) && \
-                                (((dev)->pdev->device & 0xf) == 0x2  || \
-                                ((dev)->pdev->device & 0xf) == 0x6 || \
-                                ((dev)->pdev->device & 0xf) == 0xe))
+                                ((INTEL_DEVID(dev) & 0xf) == 0x2  || \
+                                (INTEL_DEVID(dev) & 0xf) == 0x6 || \
+                                (INTEL_DEVID(dev) & 0xf) == 0xe))
 #define IS_HSW_ULT(dev)                (IS_HASWELL(dev) && \
-                                ((dev)->pdev->device & 0xFF00) == 0x0A00)
+                                (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
 #define IS_ULT(dev)            (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
 #define IS_HSW_GT3(dev)                (IS_HASWELL(dev) && \
-                                ((dev)->pdev->device & 0x00F0) == 0x0020)
+                                (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
 /* ULX machines are also considered ULT. */
-#define IS_HSW_ULX(dev)                ((dev)->pdev->device == 0x0A0E || \
-                                (dev)->pdev->device == 0x0A1E)
+#define IS_HSW_ULX(dev)                (INTEL_DEVID(dev) == 0x0A0E || \
+                                INTEL_DEVID(dev) == 0x0A1E)
 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
 /*
@@ -2065,10 +2133,11 @@ struct drm_i915_cmd_table {
 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
 
 #define HAS_HW_CONTEXTS(dev)   (INTEL_INFO(dev)->gen >= 6)
+#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
 #define HAS_ALIASING_PPGTT(dev)        (INTEL_INFO(dev)->gen >= 6)
 #define HAS_PPGTT(dev)         (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
-#define USES_PPGTT(dev)                intel_enable_ppgtt(dev, false)
-#define USES_FULL_PPGTT(dev)   intel_enable_ppgtt(dev, true)
+#define USES_PPGTT(dev)                (i915.enable_ppgtt)
+#define USES_FULL_PPGTT(dev)   (i915.enable_ppgtt == 2)
 
 #define HAS_OVERLAY(dev)               (INTEL_INFO(dev)->has_overlay)
 #define OVERLAY_NEEDS_PHYSICAL(dev)    (INTEL_INFO(dev)->overlay_needs_physical)
@@ -2152,6 +2221,7 @@ struct i915_params {
        int enable_rc6;
        int enable_fbc;
        int enable_ppgtt;
+       int enable_execlists;
        int enable_psr;
        unsigned int preliminary_hw_support;
        int disable_power_well;
@@ -2198,8 +2268,6 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
 
-extern void intel_console_resume(struct work_struct *work);
-
 /* i915_irq.c */
 void i915_queue_hangcheck(struct drm_device *dev);
 __printf(3, 4)
@@ -2246,6 +2314,20 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *file_priv);
 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
                             struct drm_file *file_priv);
+void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
+                                       struct intel_engine_cs *ring);
+void i915_gem_execbuffer_retire_commands(struct drm_device *dev,
+                                        struct drm_file *file,
+                                        struct intel_engine_cs *ring,
+                                        struct drm_i915_gem_object *obj);
+int i915_gem_ringbuffer_submission(struct drm_device *dev,
+                                  struct drm_file *file,
+                                  struct intel_engine_cs *ring,
+                                  struct intel_context *ctx,
+                                  struct drm_i915_gem_execbuffer2 *args,
+                                  struct list_head *vmas,
+                                  struct drm_i915_gem_object *batch_obj,
+                                  u64 exec_start, u32 flags);
 int i915_gem_execbuffer(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
@@ -2280,6 +2362,12 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
+unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
+                             long target,
+                             unsigned flags);
+#define I915_SHRINK_PURGEABLE 0x1
+#define I915_SHRINK_UNBOUND 0x2
+#define I915_SHRINK_BOUND 0x4
 void *i915_gem_object_alloc(struct drm_device *dev);
 void i915_gem_object_free(struct drm_i915_gem_object *obj);
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -2393,6 +2481,7 @@ void i915_gem_reset(struct drm_device *dev);
 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_init(struct drm_device *dev);
+int i915_gem_init_rings(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
 void i915_gem_init_swizzling(struct drm_device *dev);
@@ -2465,7 +2554,7 @@ static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
 }
 
 /* Some GGTT VM helpers */
-#define obj_to_ggtt(obj) \
+#define i915_obj_to_ggtt(obj) \
        (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
 static inline bool i915_is_ggtt(struct i915_address_space *vm)
 {
@@ -2474,21 +2563,30 @@ static inline bool i915_is_ggtt(struct i915_address_space *vm)
        return vm == ggtt;
 }
 
+static inline struct i915_hw_ppgtt *
+i915_vm_to_ppgtt(struct i915_address_space *vm)
+{
+       WARN_ON(i915_is_ggtt(vm));
+
+       return container_of(vm, struct i915_hw_ppgtt, base);
+}
+
+
 static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
 {
-       return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
+       return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj));
 }
 
 static inline unsigned long
 i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
 {
-       return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
+       return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj));
 }
 
 static inline unsigned long
 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
 {
-       return i915_gem_obj_size(obj, obj_to_ggtt(obj));
+       return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj));
 }
 
 static inline int __must_check
@@ -2496,7 +2594,8 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
                      uint32_t alignment,
                      unsigned flags)
 {
-       return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL);
+       return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj),
+                                  alignment, flags | PIN_GLOBAL);
 }
 
 static inline int
@@ -2508,7 +2607,6 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
 void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
 
 /* i915_gem_context.c */
-#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base)
 int __must_check i915_gem_context_init(struct drm_device *dev);
 void i915_gem_context_fini(struct drm_device *dev);
 void i915_gem_context_reset(struct drm_device *dev);
@@ -2520,6 +2618,8 @@ int i915_switch_context(struct intel_engine_cs *ring,
 struct intel_context *
 i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
 void i915_gem_context_free(struct kref *ctx_ref);
+struct drm_i915_gem_object *
+i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
 static inline void i915_gem_context_reference(struct intel_context *ctx)
 {
        kref_get(&ctx->ref);
@@ -2540,8 +2640,6 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
                                   struct drm_file *file);
 
-/* i915_gem_render_state.c */
-int i915_gem_render_state_init(struct intel_engine_cs *ring);
 /* i915_gem_evict.c */
 int __must_check i915_gem_evict_something(struct drm_device *dev,
                                          struct i915_address_space *vm,
@@ -2609,6 +2707,7 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
                            const struct i915_error_state_file_priv *error);
 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
+                             struct drm_i915_private *i915,
                              size_t count, loff_t pos);
 static inline void i915_error_state_buf_release(
        struct drm_i915_error_state_buf *eb)
@@ -2623,7 +2722,7 @@ void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
 void i915_destroy_error_state(struct drm_device *dev);
 
 void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
-const char *i915_cache_level_str(int type);
+const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
 
 /* i915_cmd_parser.c */
 int i915_cmd_parser_get_version(void);
@@ -2717,6 +2816,7 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev,
                                         bool force_restore);
 extern void i915_redisable_vga(struct drm_device *dev);
 extern void i915_redisable_vga_power_on(struct drm_device *dev);
+extern void gen8_fbc_sw_flush(struct drm_device *dev, u32 value);
 extern void intel_disable_fbc(struct drm_device *dev);
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
 extern void intel_init_pch_refclk(struct drm_device *dev);
index 244d455..473be59 100644 (file)
@@ -80,7 +80,6 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
                                         struct drm_i915_fence_reg *fence,
                                         bool enable);
 
-static unsigned long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -1092,7 +1091,13 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
                if (i915_terminally_wedged(error))
                        return -EIO;
 
-               return -EAGAIN;
+               /*
+                * Check if GPU Reset is in progress - we need intel_ring_begin
+                * to work properly to reinit the hw state while the gpu is
+                * still marked as reset-in-progress. Handle this with a flag.
+                */
+               if (!error->reload_in_reset)
+                       return -EAGAIN;
        }
 
        return 0;
@@ -1607,7 +1612,10 @@ retry:
         */
        if (*mres != NULL) {
                oldm = *mres;
-               vm_page_remove(oldm);
+               if ((oldm->flags & PG_BUSY) == 0)
+                       kprintf("i915_gem_fault: Page was not busy\n");
+               else
+                       vm_page_remove(oldm);
                *mres = NULL;
        } else {
                oldm = NULL;
@@ -1897,7 +1905,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
         * offsets on purgeable objects by truncating it and marking it purged,
         * which prevents userspace from ever using that object again.
         */
-       i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
+       i915_gem_shrink(dev_priv,
+                       obj->base.size >> PAGE_SHIFT,
+                       I915_SHRINK_BOUND |
+                       I915_SHRINK_UNBOUND |
+                       I915_SHRINK_PURGEABLE);
        ret = drm_gem_create_mmap_offset(&obj->base);
        if (ret != -ENOSPC)
                goto out;
@@ -2100,12 +2112,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
        return 0;
 }
 
-static unsigned long
-__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
-                 bool purgeable_only)
+unsigned long
+i915_gem_shrink(struct drm_i915_private *dev_priv,
+               long target, unsigned flags)
 {
-       struct list_head still_in_list;
-       struct drm_i915_gem_object *obj;
+       const bool purgeable_only = flags & I915_SHRINK_PURGEABLE;
        unsigned long count = 0;
 
        /*
@@ -2127,62 +2138,68 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
         * dev->struct_mutex and so we won't ever be able to observe an
         * object on the bound_list with a reference count equals 0.
         */
-       INIT_LIST_HEAD(&still_in_list);
-       while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
-               obj = list_first_entry(&dev_priv->mm.unbound_list,
-                                      typeof(*obj), global_list);
-               list_move_tail(&obj->global_list, &still_in_list);
+       if (flags & I915_SHRINK_UNBOUND) {
+               struct list_head still_in_list;
 
-               if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
-                       continue;
+               INIT_LIST_HEAD(&still_in_list);
+               while (count < target && !list_empty(&dev_priv->mm.unbound_list)) {
+                       struct drm_i915_gem_object *obj;
 
-               drm_gem_object_reference(&obj->base);
+                       obj = list_first_entry(&dev_priv->mm.unbound_list,
+                                              typeof(*obj), global_list);
+                       list_move_tail(&obj->global_list, &still_in_list);
 
-               if (i915_gem_object_put_pages(obj) == 0)
-                       count += obj->base.size >> PAGE_SHIFT;
+                       if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+                               continue;
+
+                       drm_gem_object_reference(&obj->base);
 
-               drm_gem_object_unreference(&obj->base);
+                       if (i915_gem_object_put_pages(obj) == 0)
+                               count += obj->base.size >> PAGE_SHIFT;
+
+                       drm_gem_object_unreference(&obj->base);
+               }
+               list_splice(&still_in_list, &dev_priv->mm.unbound_list);
        }
-       list_splice(&still_in_list, &dev_priv->mm.unbound_list);
 
-       INIT_LIST_HEAD(&still_in_list);
-       while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
-               struct i915_vma *vma, *v;
+       if (flags & I915_SHRINK_BOUND) {
+               struct list_head still_in_list;
 
-               obj = list_first_entry(&dev_priv->mm.bound_list,
-                                      typeof(*obj), global_list);
-               list_move_tail(&obj->global_list, &still_in_list);
+               INIT_LIST_HEAD(&still_in_list);
+               while (count < target && !list_empty(&dev_priv->mm.bound_list)) {
+                       struct drm_i915_gem_object *obj;
+                       struct i915_vma *vma, *v;
 
-               if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
-                       continue;
+                       obj = list_first_entry(&dev_priv->mm.bound_list,
+                                              typeof(*obj), global_list);
+                       list_move_tail(&obj->global_list, &still_in_list);
 
-               drm_gem_object_reference(&obj->base);
+                       if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+                               continue;
 
-               list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
-                       if (i915_vma_unbind(vma))
-                               break;
+                       drm_gem_object_reference(&obj->base);
 
-               if (i915_gem_object_put_pages(obj) == 0)
-                       count += obj->base.size >> PAGE_SHIFT;
+                       list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+                               if (i915_vma_unbind(vma))
+                                       break;
+
+                       if (i915_gem_object_put_pages(obj) == 0)
+                               count += obj->base.size >> PAGE_SHIFT;
 
-               drm_gem_object_unreference(&obj->base);
+                       drm_gem_object_unreference(&obj->base);
+               }
+               list_splice(&still_in_list, &dev_priv->mm.bound_list);
        }
-       list_splice(&still_in_list, &dev_priv->mm.bound_list);
 
        return count;
 }
 
-static unsigned long
-i915_gem_purge(struct drm_i915_private *dev_priv, long target)
-{
-       return __i915_gem_shrink(dev_priv, target, true);
-}
-
 static unsigned long
 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 {
        i915_gem_evict_everything(dev_priv->dev);
-       return __i915_gem_shrink(dev_priv, LONG_MAX, false);
+       return i915_gem_shrink(dev_priv, LONG_MAX,
+                              I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
 }
 
 static int
@@ -2214,7 +2231,11 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        for (i = 0; i < page_count; i++) {
                page = shmem_read_mapping_page(vm_obj, i);
                if (IS_ERR(page)) {
-                       i915_gem_purge(dev_priv, page_count);
+                       i915_gem_shrink(dev_priv,
+                                       page_count,
+                                       I915_SHRINK_BOUND |
+                                       I915_SHRINK_UNBOUND |
+                                       I915_SHRINK_PURGEABLE);
                        page = shmem_read_mapping_page(vm_obj, i);
                }
                if (IS_ERR(page)) {
@@ -2297,8 +2318,6 @@ static void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
                               struct intel_engine_cs *ring)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        u32 seqno = intel_ring_get_seqno(ring);
 
        BUG_ON(ring == NULL);
@@ -2317,19 +2336,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
        list_move_tail(&obj->ring_list, &ring->active_list);
 
        obj->last_read_seqno = seqno;
-
-       if (obj->fenced_gpu_access) {
-               obj->last_fenced_seqno = seqno;
-
-               /* Bump MRU to take account of the delayed flush */
-               if (obj->fence_reg != I915_FENCE_REG_NONE) {
-                       struct drm_i915_fence_reg *reg;
-
-                       reg = &dev_priv->fence_regs[obj->fence_reg];
-                       list_move_tail(&reg->lru_list,
-                                      &dev_priv->mm.fence_list);
-               }
-       }
 }
 
 void i915_vma_move_to_active(struct i915_vma *vma,
@@ -2365,7 +2371,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
        obj->base.write_domain = 0;
 
        obj->last_fenced_seqno = 0;
-       obj->fenced_gpu_access = false;
 
        obj->active = 0;
        drm_gem_object_unreference(&obj->base);
@@ -2463,10 +2468,21 @@ int __i915_add_request(struct intel_engine_cs *ring,
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct drm_i915_gem_request *request;
+       struct intel_ringbuffer *ringbuf;
        u32 request_ring_position, request_start;
        int ret;
 
-       request_start = intel_ring_get_tail(ring->buffer);
+       request = ring->preallocated_lazy_request;
+       if (WARN_ON(request == NULL))
+               return -ENOMEM;
+
+       if (i915.enable_execlists) {
+               struct intel_context *ctx = request->ctx;
+               ringbuf = ctx->engine[ring->id].ringbuf;
+       } else
+               ringbuf = ring->buffer;
+
+       request_start = intel_ring_get_tail(ringbuf);
        /*
         * Emit any outstanding flushes - execbuf can fail to emit the flush
         * after having emitted the batchbuffer command. Hence we need to fix
@@ -2474,24 +2490,32 @@ int __i915_add_request(struct intel_engine_cs *ring,
         * is that the flush _must_ happen before the next request, no matter
         * what.
         */
-       ret = intel_ring_flush_all_caches(ring);
-       if (ret)
-               return ret;
-
-       request = ring->preallocated_lazy_request;
-       if (WARN_ON(request == NULL))
-               return -ENOMEM;
+       if (i915.enable_execlists) {
+               ret = logical_ring_flush_all_caches(ringbuf);
+               if (ret)
+                       return ret;
+       } else {
+               ret = intel_ring_flush_all_caches(ring);
+               if (ret)
+                       return ret;
+       }
 
        /* Record the position of the start of the request so that
         * should we detect the updated seqno part-way through the
         * GPU processing the request, we never over-estimate the
         * position of the head.
         */
-       request_ring_position = intel_ring_get_tail(ring->buffer);
+       request_ring_position = intel_ring_get_tail(ringbuf);
 
-       ret = ring->add_request(ring);
-       if (ret)
-               return ret;
+       if (i915.enable_execlists) {
+               ret = ring->emit_request(ringbuf);
+               if (ret)
+                       return ret;
+       } else {
+               ret = ring->add_request(ring);
+               if (ret)
+                       return ret;
+       }
 
        request->seqno = intel_ring_get_seqno(ring);
        request->ring = ring;
@@ -2506,12 +2530,14 @@ int __i915_add_request(struct intel_engine_cs *ring,
         */
        request->batch_obj = obj;
 
-       /* Hold a reference to the current context so that we can inspect
-        * it later in case a hangcheck error event fires.
-        */
-       request->ctx = ring->last_context;
-       if (request->ctx)
-               i915_gem_context_reference(request->ctx);
+       if (!i915.enable_execlists) {
+               /* Hold a reference to the current context so that we can inspect
+                * it later in case a hangcheck error event fires.
+                */
+               request->ctx = ring->last_context;
+               if (request->ctx)
+                       i915_gem_context_reference(request->ctx);
+       }
 
        request->emitted_jiffies = jiffies;
        list_add_tail(&request->list, &ring->request_list);
@@ -2682,6 +2708,18 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
                i915_gem_free_request(request);
        }
 
+       while (!list_empty(&ring->execlist_queue)) {
+               struct intel_ctx_submit_request *submit_req;
+
+               submit_req = list_first_entry(&ring->execlist_queue,
+                               struct intel_ctx_submit_request,
+                               execlist_link);
+               list_del(&submit_req->execlist_link);
+               intel_runtime_pm_put(dev_priv);
+               i915_gem_context_unreference(submit_req->ctx);
+               kfree(submit_req);
+       }
+
        /* These may not have been flush before the reset, do so now */
        kfree(ring->preallocated_lazy_request);
        ring->preallocated_lazy_request = NULL;
@@ -2766,6 +2804,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
+               struct intel_ringbuffer *ringbuf;
 
                request = list_first_entry(&ring->request_list,
                                           struct drm_i915_gem_request,
@@ -2775,12 +2814,24 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
                        break;
 
                trace_i915_gem_request_retire(ring, request->seqno);
+
+               /* This is one of the few common intersection points
+                * between legacy ringbuffer submission and execlists:
+                * we need to tell them apart in order to find the correct
+                * ringbuffer to which the request belongs to.
+                */
+               if (i915.enable_execlists) {
+                       struct intel_context *ctx = request->ctx;
+                       ringbuf = ctx->engine[ring->id].ringbuf;
+               } else
+                       ringbuf = ring->buffer;
+
                /* We know the GPU must have read the request to have
                 * sent us the seqno + interrupt, so use the position
                 * of tail of the request to update the last known position
                 * of the GPU head.
                 */
-               ring->buffer->last_retired_head = request->tail;
+               ringbuf->last_retired_head = request->tail;
 
                i915_gem_free_request(request);
        }
@@ -3050,6 +3101,9 @@ int i915_vma_unbind(struct i915_vma *vma)
         * cause memory corruption through use-after-free.
         */
 
+       /* Throw away the active reference before moving to the unbound list */
+       i915_gem_object_retire(obj);
+
        if (i915_is_ggtt(vma->vm)) {
                i915_gem_object_finish_gtt(obj);
 
@@ -3064,9 +3118,8 @@ int i915_vma_unbind(struct i915_vma *vma)
        vma->unbind_vma(vma);
 
        list_del_init(&vma->mm_list);
-       /* Avoid an unnecessary call to unbind on rebind. */
        if (i915_is_ggtt(vma->vm))
-               obj->map_and_fenceable = true;
+               obj->map_and_fenceable = false;
 
        drm_mm_remove_node(&vma->node);
        i915_gem_vma_destroy(vma);
@@ -3095,9 +3148,11 @@ int i915_gpu_idle(struct drm_device *dev)
 
        /* Flush everything onto the inactive list. */
        for_each_ring(ring, dev_priv, i) {
-               ret = i915_switch_context(ring, ring->default_context);
-               if (ret)
-                       return ret;
+               if (!i915.enable_execlists) {
+                       ret = i915_switch_context(ring, ring->default_context);
+                       if (ret)
+                               return ret;
+               }
 
                ret = intel_ring_idle(ring);
                if (ret)
@@ -3311,7 +3366,6 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
                obj->last_fenced_seqno = 0;
        }
 
-       obj->fenced_gpu_access = false;
        return 0;
 }
 
@@ -3418,6 +3472,9 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
                        return 0;
                }
        } else if (enable) {
+               if (WARN_ON(!obj->map_and_fenceable))
+                       return -EINVAL;
+
                reg = i915_find_fence_reg(dev);
                if (IS_ERR(reg))
                        return PTR_ERR(reg);
@@ -3439,17 +3496,20 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
        return 0;
 }
 
-static bool i915_gem_valid_gtt_space(struct drm_device *dev,
-                                    struct drm_mm_node *gtt_space,
+static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
                                     unsigned long cache_level)
 {
+       struct drm_mm_node *gtt_space = &vma->node;
        struct drm_mm_node *other;
 
-       /* On non-LLC machines we have to be careful when putting differing
-        * types of snoopable memory together to avoid the prefetcher
-        * crossing memory domains and dying.
+       /*
+        * On some machines we have to be careful when putting differing types
+        * of snoopable memory together to avoid the prefetcher crossing memory
+        * domains and dying. During vm initialisation, we decide whether or not
+        * these constraints apply and set the drm_mm.color_adjust
+        * appropriately.
         */
-       if (HAS_LLC(dev))
+       if (vma->vm->mm.color_adjust == NULL)
                return true;
 
        if (!drm_mm_node_allocated(gtt_space))
@@ -3587,8 +3647,7 @@ search_free:
 
                goto err_free_vma;
        }
-       if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
-                                             obj->cache_level))) {
+       if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
                ret = -EINVAL;
                goto err_remove_node;
        }
@@ -3728,11 +3787,12 @@ int
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 {
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
        uint32_t old_write_domain, old_read_domains;
        int ret;
 
        /* Not valid to be called on unbound objects. */
-       if (!i915_gem_obj_bound_any(obj))
+       if (vma == NULL)
                return -EINVAL;
 
        if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@ -3774,13 +3834,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
                                            old_write_domain);
 
        /* And bump the LRU for this access */
-       if (i915_gem_object_is_inactive(obj)) {
-               struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
-               if (vma)
-                       list_move_tail(&vma->mm_list,
-                                      &dev_priv->gtt.base.inactive_list);
-
-       }
+       if (i915_gem_object_is_inactive(obj))
+               list_move_tail(&vma->mm_list,
+                              &dev_priv->gtt.base.inactive_list);
 
        return 0;
 }
@@ -3801,7 +3857,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
        }
 
        list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
-               if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
+               if (!i915_gem_valid_gtt_space(vma, cache_level)) {
                        ret = i915_vma_unbind(vma);
                        if (ret)
                                return ret;
@@ -3944,9 +4000,6 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
 {
        struct i915_vma *vma;
 
-       if (list_empty(&obj->vma_list))
-               return false;
-
        vma = i915_gem_obj_to_ggtt(obj);
        if (!vma)
                return false;
@@ -4472,8 +4525,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 
        obj->fence_reg = I915_FENCE_REG_NONE;
        obj->madv = I915_MADV_WILLNEED;
-       /* Avoid an unnecessary call to unbind on the first bind. */
-       obj->map_and_fenceable = true;
 
        i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
 }
@@ -4644,12 +4695,18 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
 
 void i915_gem_vma_destroy(struct i915_vma *vma)
 {
+       struct i915_address_space *vm = NULL;
        WARN_ON(vma->node.allocated);
 
        /* Keep the vma as a placeholder in the execbuffer reservation lists */
        if (!list_empty(&vma->exec_list))
                return;
 
+       vm = vma->vm;
+
+       if (!i915_is_ggtt(vm))
+               i915_ppgtt_put(i915_vm_to_ppgtt(vm));
+
        list_del(&vma->vma_link);
 
        kfree(vma);
@@ -4663,7 +4720,7 @@ i915_gem_stop_ringbuffers(struct drm_device *dev)
        int i;
 
        for_each_ring(ring, dev_priv, i)
-               intel_stop_ring_buffer(ring);
+               dev_priv->gt.stop_ring(ring);
 }
 
 int
@@ -4785,11 +4842,46 @@ intel_enable_blt(struct drm_device *dev)
        return true;
 }
 
-static int i915_gem_init_rings(struct drm_device *dev)
+static void init_unused_ring(struct drm_device *dev, u32 base)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       I915_WRITE(RING_CTL(base), 0);
+       I915_WRITE(RING_HEAD(base), 0);
+       I915_WRITE(RING_TAIL(base), 0);
+       I915_WRITE(RING_START(base), 0);
+}
+
+static void init_unused_rings(struct drm_device *dev)
+{
+       if (IS_I830(dev)) {
+               init_unused_ring(dev, PRB1_BASE);
+               init_unused_ring(dev, SRB0_BASE);
+               init_unused_ring(dev, SRB1_BASE);
+               init_unused_ring(dev, SRB2_BASE);
+               init_unused_ring(dev, SRB3_BASE);
+       } else if (IS_GEN2(dev)) {
+               init_unused_ring(dev, SRB0_BASE);
+               init_unused_ring(dev, SRB1_BASE);
+       } else if (IS_GEN3(dev)) {
+               init_unused_ring(dev, PRB1_BASE);
+               init_unused_ring(dev, PRB2_BASE);
+       }
+}
+
+int i915_gem_init_rings(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
+       /*
+        * At least 830 can leave some of the unused rings
+        * "active" (ie. head != tail) after resume which
+        * will prevent c3 entry. Makes sure all unused rings
+        * are totally idle.
+        */
+       init_unused_rings(dev);
+
        ret = intel_init_render_ring_buffer(dev);
        if (ret)
                return ret;
@@ -4870,7 +4962,7 @@ i915_gem_init_hw(struct drm_device *dev)
 
        i915_gem_init_swizzling(dev);
 
-       ret = i915_gem_init_rings(dev);
+       ret = dev_priv->gt.init_rings(dev);
        if (ret)
                return ret;
 
@@ -4888,6 +4980,14 @@ i915_gem_init_hw(struct drm_device *dev)
        if (ret && ret != -EIO) {
                DRM_ERROR("Context enable failed %d\n", ret);
                i915_gem_cleanup_ringbuffer(dev);
+
+               return ret;
+       }
+
+       ret = i915_ppgtt_init_hw(dev);
+       if (ret && ret != -EIO) {
+               DRM_ERROR("PPGTT enable failed %d\n", ret);
+               i915_gem_cleanup_ringbuffer(dev);
        }
 
        return ret;
@@ -4898,6 +4998,9 @@ int i915_gem_init(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
+       i915.enable_execlists = intel_sanitize_enable_execlists(dev,
+                       i915.enable_execlists);
+
        mutex_lock(&dev->struct_mutex);
 
        if (IS_VALLEYVIEW(dev)) {
@@ -4908,7 +5011,24 @@ int i915_gem_init(struct drm_device *dev)
                        DRM_DEBUG_DRIVER("allow wake ack timed out\n");
        }
 
-       i915_gem_init_userptr(dev);
+       if (!i915.enable_execlists) {
+               dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
+               dev_priv->gt.init_rings = i915_gem_init_rings;
+               dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
+               dev_priv->gt.stop_ring = intel_stop_ring_buffer;
+       } else {
+               dev_priv->gt.do_execbuf = intel_execlists_submission;
+               dev_priv->gt.init_rings = intel_logical_rings_init;
+               dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
+               dev_priv->gt.stop_ring = intel_logical_ring_stop;
+       }
+
+       ret = i915_gem_init_userptr(dev);
+       if (ret) {
+               mutex_unlock(&dev->struct_mutex);
+               return ret;
+       }
+
        i915_gem_init_global_gtt(dev);
 
        ret = i915_gem_context_init(dev);
@@ -4943,7 +5063,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
        int i;
 
        for_each_ring(ring, dev_priv, i)
-               intel_cleanup_ring_buffer(ring);
+               dev_priv->gt.cleanup_ring(ring);
 }
 
 int
@@ -5281,9 +5401,7 @@ unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
        struct drm_i915_private *dev_priv = o->base.dev->dev_private;
        struct i915_vma *vma;
 
-       if (!dev_priv->mm.aliasing_ppgtt ||
-           vm == &dev_priv->mm.aliasing_ppgtt->base)
-               vm = &dev_priv->gtt.base;
+       WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
 
        list_for_each_entry(vma, &o->vma_list, vma_link) {
                if (vma->vm == vm)
@@ -5324,9 +5442,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
        struct drm_i915_private *dev_priv = o->base.dev->dev_private;
        struct i915_vma *vma;
 
-       if (!dev_priv->mm.aliasing_ppgtt ||
-           vm == &dev_priv->mm.aliasing_ppgtt->base)
-               vm = &dev_priv->gtt.base;
+       WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
 
        BUG_ON(list_empty(&o->vma_list));
 
@@ -5350,14 +5466,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
        if (!i915_gem_shrinker_lock(dev, &unlock))
                return SHRINK_STOP;
 
-       freed = i915_gem_purge(dev_priv, sc->nr_to_scan);
-       if (freed < sc->nr_to_scan)
-               freed += __i915_gem_shrink(dev_priv,
-                                          sc->nr_to_scan - freed,
-                                          false);
+       freed = i915_gem_shrink(dev_priv,
+                               sc->nr_to_scan,
+                               I915_SHRINK_BOUND |
+                               I915_SHRINK_UNBOUND |
+                               I915_SHRINK_PURGEABLE);
        if (freed < sc->nr_to_scan)
-               freed += i915_gem_shrink_all(dev_priv);
-
+               freed += i915_gem_shrink(dev_priv,
+                                        sc->nr_to_scan - freed,
+                                        I915_SHRINK_BOUND |
+                                        I915_SHRINK_UNBOUND);
        if (unlock)
                mutex_unlock(&dev->struct_mutex);
 
@@ -5369,14 +5487,8 @@ struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
 {
        struct i915_vma *vma;
 
-       /* This WARN has probably outlived its usefulness (callers already
-        * WARN if they don't find the GGTT vma they expect). When removing,
-        * remember to remove the pre-check in is_pin_display() as well */
-       if (WARN_ON(list_empty(&obj->vma_list)))
-               return NULL;
-
        vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
-       if (vma->vm != obj_to_ggtt(obj))
+       if (vma->vm != i915_obj_to_ggtt(obj))
                return NULL;
 
        return vma;
index ae5976d..74dda24 100644 (file)
 #define GEN6_CONTEXT_ALIGN (64<<10)
 #define GEN7_CONTEXT_ALIGN 4096
 
-static void do_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
-{
-       struct drm_device *dev = ppgtt->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_address_space *vm = &ppgtt->base;
-
-       if (ppgtt == dev_priv->mm.aliasing_ppgtt ||
-           (list_empty(&vm->active_list) && list_empty(&vm->inactive_list))) {
-               ppgtt->base.cleanup(&ppgtt->base);
-               return;
-       }
-
-       /*
-        * Make sure vmas are unbound before we take down the drm_mm
-        *
-        * FIXME: Proper refcounting should take care of this, this shouldn't be
-        * needed at all.
-        */
-       if (!list_empty(&vm->active_list)) {
-               struct i915_vma *vma;
-
-               list_for_each_entry(vma, &vm->active_list, mm_list)
-                       if (WARN_ON(list_empty(&vma->vma_link) ||
-                                   list_is_singular(&vma->vma_link)))
-                               break;
-
-               i915_gem_evict_vm(&ppgtt->base, true);
-       } else {
-               i915_gem_retire_requests(dev);
-               i915_gem_evict_vm(&ppgtt->base, false);
-       }
-
-       ppgtt->base.cleanup(&ppgtt->base);
-}
-
-static void ppgtt_release(struct kref *kref)
-{
-       struct i915_hw_ppgtt *ppgtt =
-               container_of(kref, struct i915_hw_ppgtt, ref);
-
-       do_ppgtt_cleanup(ppgtt);
-       kfree(ppgtt);
-}
-
 static size_t get_context_alignment(struct drm_device *dev)
 {
        if (IS_GEN6(dev))
@@ -179,24 +135,20 @@ static int get_context_size(struct drm_device *dev)
 void i915_gem_context_free(struct kref *ctx_ref)
 {
        struct intel_context *ctx = container_of(ctx_ref,
-                                                  typeof(*ctx), ref);
-       struct i915_hw_ppgtt *ppgtt = NULL;
+                                                typeof(*ctx), ref);
 
-       if (ctx->legacy_hw_ctx.rcs_state) {
-               /* We refcount even the aliasing PPGTT to keep the code symmetric */
-               if (USES_PPGTT(ctx->legacy_hw_ctx.rcs_state->base.dev))
-                       ppgtt = ctx_to_ppgtt(ctx);
-       }
+       if (i915.enable_execlists)
+               intel_lr_context_free(ctx);
+
+       i915_ppgtt_put(ctx->ppgtt);
 
-       if (ppgtt)
-               kref_put(&ppgtt->ref, ppgtt_release);
        if (ctx->legacy_hw_ctx.rcs_state)
                drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
        list_del(&ctx->link);
        kfree(ctx);
 }
 
-static struct drm_i915_gem_object *
+struct drm_i915_gem_object *
 i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
 {
        struct drm_i915_gem_object *obj;
@@ -226,29 +178,9 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
        return obj;
 }
 
-static struct i915_hw_ppgtt *
-create_vm_for_ctx(struct drm_device *dev, struct intel_context *ctx)
-{
-       struct i915_hw_ppgtt *ppgtt;
-       int ret;
-
-       ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
-       if (!ppgtt)
-               return ERR_PTR(-ENOMEM);
-
-       ret = i915_gem_init_ppgtt(dev, ppgtt);
-       if (ret) {
-               kfree(ppgtt);
-               return ERR_PTR(ret);
-       }
-
-       ppgtt->ctx = ctx;
-       return ppgtt;
-}
-
 static struct intel_context *
 __create_hw_context(struct drm_device *dev,
-                 struct drm_i915_file_private *file_priv)
+                   struct drm_i915_file_private *file_priv)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_context *ctx;
@@ -301,11 +233,9 @@ err_out:
  */
 static struct intel_context *
 i915_gem_create_context(struct drm_device *dev,
-                       struct drm_i915_file_private *file_priv,
-                       bool create_vm)
+                       struct drm_i915_file_private *file_priv)
 {
        const bool is_global_default_ctx = file_priv == NULL;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_context *ctx;
        int ret = 0;
 
@@ -331,34 +261,18 @@ i915_gem_create_context(struct drm_device *dev,
                }
        }
 
-       if (create_vm) {
-               struct i915_hw_ppgtt *ppgtt = create_vm_for_ctx(dev, ctx);
+       if (USES_FULL_PPGTT(dev)) {
+               struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
 
                if (IS_ERR_OR_NULL(ppgtt)) {
                        DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
                                         PTR_ERR(ppgtt));
                        ret = PTR_ERR(ppgtt);
                        goto err_unpin;
-               } else
-                       ctx->vm = &ppgtt->base;
-
-               /* This case is reserved for the global default context and
-                * should only happen once. */
-               if (is_global_default_ctx) {
-                       if (WARN_ON(dev_priv->mm.aliasing_ppgtt)) {
-                               ret = -EEXIST;
-                               goto err_unpin;
-                       }
-
-                       dev_priv->mm.aliasing_ppgtt = ppgtt;
                }
-       } else if (USES_PPGTT(dev)) {
-               /* For platforms which only have aliasing PPGTT, we fake the
-                * address space and refcounting. */
-               ctx->vm = &dev_priv->mm.aliasing_ppgtt->base;
-               kref_get(&dev_priv->mm.aliasing_ppgtt->ref);
-       } else
-               ctx->vm = &dev_priv->gtt.base;
+
+               ctx->ppgtt = ppgtt;
+       }
 
        return ctx;
 
@@ -375,34 +289,23 @@ void i915_gem_context_reset(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
 
-       /* Prevent the hardware from restoring the last context (which hung) on
-        * the next switch */
+       /* In execlists mode we will unreference the context when the execlist
+        * queue is cleared and the requests destroyed.
+        */
+       if (i915.enable_execlists)
+               return;
+
        for (i = 0; i < I915_NUM_RINGS; i++) {
                struct intel_engine_cs *ring = &dev_priv->ring[i];
-               struct intel_context *dctx = ring->default_context;
                struct intel_context *lctx = ring->last_context;
 
-               /* Do a fake switch to the default context */
-               if (lctx == dctx)
-                       continue;
-
-               if (!lctx)
-                       continue;
+               if (lctx) {
+                       if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
+                               i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
 
-               if (dctx->legacy_hw_ctx.rcs_state && i == RCS) {
-                       WARN_ON(i915_gem_obj_ggtt_pin(dctx->legacy_hw_ctx.rcs_state,
-                                                     get_context_alignment(dev), 0));
-                       /* Fake a finish/inactive */
-                       dctx->legacy_hw_ctx.rcs_state->base.write_domain = 0;
-                       dctx->legacy_hw_ctx.rcs_state->active = 0;
+                       i915_gem_context_unreference(lctx);
+                       ring->last_context = NULL;
                }
-
-               if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
-                       i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
-
-               i915_gem_context_unreference(lctx);
-               i915_gem_context_reference(dctx);
-               ring->last_context = dctx;
        }
 }
 
@@ -417,7 +320,11 @@ int i915_gem_context_init(struct drm_device *dev)
        if (WARN_ON(dev_priv->ring[RCS].default_context))
                return 0;
 
-       if (HAS_HW_CONTEXTS(dev)) {
+       if (i915.enable_execlists) {
+               /* NB: intentionally left blank. We will allocate our own
+                * backing objects as we need them, thank you very much */
+               dev_priv->hw_context_size = 0;
+       } else if (HAS_HW_CONTEXTS(dev)) {
                dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
                if (dev_priv->hw_context_size > (1<<20)) {
                        DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
@@ -426,18 +333,23 @@ int i915_gem_context_init(struct drm_device *dev)
                }
        }
 
-       ctx = i915_gem_create_context(dev, NULL, USES_PPGTT(dev));
+       ctx = i915_gem_create_context(dev, NULL);
        if (IS_ERR(ctx)) {
                DRM_ERROR("Failed to create default global context (error %ld)\n",
                          PTR_ERR(ctx));
                return PTR_ERR(ctx);
        }
 
-       /* NB: RCS will hold a ref for all rings */
-       for (i = 0; i < I915_NUM_RINGS; i++)
-               dev_priv->ring[i].default_context = ctx;
+       for (i = 0; i < I915_NUM_RINGS; i++) {
+               struct intel_engine_cs *ring = &dev_priv->ring[i];
 
-       DRM_DEBUG_DRIVER("%s context support initialized\n", dev_priv->hw_context_size ? "HW" : "fake");
+               /* NB: RCS will hold a ref for all rings */
+               ring->default_context = ctx;
+       }
+
+       DRM_DEBUG_DRIVER("%s context support initialized\n",
+                       i915.enable_execlists ? "LR" :
+                       dev_priv->hw_context_size ? "HW" : "fake");
        return 0;
 }
 
@@ -489,19 +401,11 @@ int i915_gem_context_enable(struct drm_i915_private *dev_priv)
        struct intel_engine_cs *ring;
        int ret, i;
 
-       /* This is the only place the aliasing PPGTT gets enabled, which means
-        * it has to happen before we bail on reset */
-       if (dev_priv->mm.aliasing_ppgtt) {
-               struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-               ppgtt->enable(ppgtt);
-       }
+       BUG_ON(!dev_priv->ring[RCS].default_context);
 
-       /* FIXME: We should make this work, even in reset */
-       if (i915_reset_in_progress(&dev_priv->gpu_error))
+       if (i915.enable_execlists)
                return 0;
 
-       BUG_ON(!dev_priv->ring[RCS].default_context);
-
        for_each_ring(ring, dev_priv, i) {
                ret = i915_switch_context(ring, ring->default_context);
                if (ret)
@@ -527,7 +431,7 @@ int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
        idr_init(&file_priv->context_idr);
 
        mutex_lock(&dev->struct_mutex);
-       ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
+       ctx = i915_gem_create_context(dev, file_priv);
        mutex_unlock(&dev->struct_mutex);
 
        if (IS_ERR(ctx)) {
@@ -563,6 +467,7 @@ mi_set_context(struct intel_engine_cs *ring,
               struct intel_context *new_context,
               u32 hw_flags)
 {
+       u32 flags = hw_flags | MI_MM_SPACE_GTT;
        const int num_rings =
                /* Use an extended w/a on ivb+ if signalling from other rings */
                i915_semaphore_is_enabled(ring->dev) ?
@@ -581,6 +486,9 @@ mi_set_context(struct intel_engine_cs *ring,
                        return ret;
        }
 
+       /* These flags are for resource streamer on HSW+ */
+       if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
+               flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
 
        len = 4;
        if (INTEL_INFO(ring->dev)->gen >= 7)
@@ -610,10 +518,7 @@ mi_set_context(struct intel_engine_cs *ring,
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_emit(ring, MI_SET_CONTEXT);
        intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->legacy_hw_ctx.rcs_state) |
-                       MI_MM_SPACE_GTT |
-                       MI_SAVE_EXT_STATE_EN |
-                       MI_RESTORE_EXT_STATE_EN |
-                       hw_flags);
+                       flags);
        /*
         * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
         * WaMiSetContext_Hang:snb,ivb,vlv
@@ -646,7 +551,6 @@ static int do_switch(struct intel_engine_cs *ring,
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct intel_context *from = ring->last_context;
-       struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
        u32 hw_flags = 0;
        bool uninitialized = false;
        int ret, i;
@@ -674,8 +578,8 @@ static int do_switch(struct intel_engine_cs *ring,
         */
        from = ring->last_context;
 
-       if (USES_FULL_PPGTT(ring->dev)) {
-               ret = ppgtt->switch_mm(ppgtt, ring, false);
+       if (to->ppgtt) {
+               ret = to->ppgtt->switch_mm(to->ppgtt, ring);
                if (ret)
                        goto unpin_out;
        }
@@ -755,6 +659,12 @@ done:
        ring->last_context = to;
 
        if (uninitialized) {
+               if (ring->init_context) {
+                       ret = ring->init_context(ring);
+                       if (ret)
+                               DRM_ERROR("ring init context: %d\n", ret);
+               }
+
                ret = i915_gem_render_state_init(ring);
                if (ret)
                        DRM_ERROR("init render state: %d\n", ret);
@@ -775,14 +685,19 @@ unpin_out:
  *
  * The context life cycle is simple. The context refcount is incremented and
  * decremented by 1 and create and destroy. If the context is in use by the GPU,
- * it will have a refoucnt > 1. This allows us to destroy the context abstract
+ * it will have a refcount > 1. This allows us to destroy the context abstract
  * object while letting the normal object tracking destroy the backing BO.
+ *
+ * This function should not be used in execlists mode.  Instead the context is
+ * switched by writing to the ELSP and requests keep a reference to their
+ * context.
  */
 int i915_switch_context(struct intel_engine_cs *ring,
                        struct intel_context *to)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
+       WARN_ON(i915.enable_execlists);
        WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
        if (to->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
@@ -798,9 +713,9 @@ int i915_switch_context(struct intel_engine_cs *ring,
        return do_switch(ring, to);
 }
 
-static bool hw_context_enabled(struct drm_device *dev)
+static bool contexts_enabled(struct drm_device *dev)
 {
-       return to_i915(dev)->hw_context_size;
+       return i915.enable_execlists || to_i915(dev)->hw_context_size;
 }
 
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -811,14 +726,14 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
        struct intel_context *ctx;
        int ret;
 
-       if (!hw_context_enabled(dev))
+       if (!contexts_enabled(dev))
                return -ENODEV;
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
 
-       ctx = i915_gem_create_context(dev, file_priv, USES_FULL_PPGTT(dev));
+       ctx = i915_gem_create_context(dev, file_priv);
        mutex_unlock(&dev->struct_mutex);
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
index bbf4b12..886ff2e 100644 (file)
@@ -243,7 +243,7 @@ int
 i915_gem_evict_everything(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct i915_address_space *vm;
+       struct i915_address_space *vm, *v;
        bool lists_empty = true;
        int ret;
 
@@ -270,7 +270,7 @@ i915_gem_evict_everything(struct drm_device *dev)
        i915_gem_retire_requests(dev);
 
        /* Having flushed everything, unbind() should never raise an error */
-       list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+       list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link)
                WARN_ON(i915_gem_evict_vm(vm, false));
 
        return 0;
index 65e9aaf..b6091bf 100644 (file)
@@ -35,6 +35,7 @@
 
 #define  __EXEC_OBJECT_HAS_PIN (1<<31)
 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
+#define  __EXEC_OBJECT_NEEDS_MAP (1<<29)
 #define  __EXEC_OBJECT_NEEDS_BIAS (1<<28)
 
 #define BATCH_OFFSET_BIAS (256*1024)
@@ -94,7 +95,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
               struct i915_address_space *vm,
               struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = vm->dev->dev_private;
        struct drm_i915_gem_object *obj;
        struct list_head objects;
        int i, ret;
@@ -129,20 +129,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
        i = 0;
        while (!list_empty(&objects)) {
                struct i915_vma *vma;
-               struct i915_address_space *bind_vm = vm;
-
-               if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
-                   USES_FULL_PPGTT(vm->dev)) {
-                       ret = -EINVAL;
-                       goto err;
-               }
-
-               /* If we have secure dispatch, or the userspace assures us that
-                * they know what they're doing, use the GGTT VM.
-                */
-               if (((args->flags & I915_EXEC_SECURE) &&
-                   (i == (args->buffer_count - 1))))
-                       bind_vm = &dev_priv->gtt.base;
 
                obj = list_first_entry(&objects,
                                       struct drm_i915_gem_object,
@@ -156,7 +142,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
                 * from the (obj, vm) we don't run the risk of creating
                 * duplicated vmas for the same vm.
                 */
-               vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
+               vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
                if (IS_ERR(vma)) {
                        DRM_DEBUG("Failed to lookup VMA\n");
                        ret = PTR_ERR(vma);
@@ -307,7 +293,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint64_t delta = reloc->delta + target_offset;
-       uint32_t __iomem *reloc_entry;
+       uint64_t offset;
        void __iomem *reloc_page;
        int ret;
 
@@ -320,25 +306,24 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
                return ret;
 
        /* Map the page containing the relocation we're going to perform.  */
-       reloc->offset += i915_gem_obj_ggtt_offset(obj);
+       offset = i915_gem_obj_ggtt_offset(obj);
+       offset += reloc->offset;
        reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
-                       reloc->offset & ~PAGE_MASK);
-       reloc_entry = (uint32_t __iomem *)
-               ((char *)reloc_page + offset_in_page(reloc->offset));
-       iowrite32(lower_32_bits(delta), reloc_entry);
+                                             offset & ~PAGE_MASK);
+       iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
 
        if (INTEL_INFO(dev)->gen >= 8) {
-               reloc_entry += 1;
+               offset += sizeof(uint32_t);
 
-               if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
+               if (offset_in_page(offset) == 0) {
                        io_mapping_unmap_atomic(reloc_page);
-                       reloc_page = io_mapping_map_atomic_wc(
-                                       dev_priv->gtt.mappable,
-                                       reloc->offset + sizeof(uint32_t));
-                       reloc_entry = reloc_page;
+                       reloc_page =
+                               io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+                                                        offset);
                }
 
-               iowrite32(upper_32_bits(delta), reloc_entry);
+               iowrite32(upper_32_bits(delta),
+                         reloc_page + offset_in_page(offset));
        }
 
        io_mapping_unmap_atomic(reloc_page);
@@ -534,14 +519,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
        return ret;
 }
 
-static int
-need_reloc_mappable(struct i915_vma *vma)
-{
-       struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
-       return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
-               i915_is_ggtt(vma->vm);
-}
-
 static int
 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
                                struct intel_engine_cs *ring,
@@ -549,20 +526,12 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 {
        struct drm_i915_gem_object *obj = vma->obj;
        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
-       bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
-       bool need_fence;
        uint64_t flags;
        int ret;
 
        flags = 0;
-
-       need_fence =
-               has_fenced_gpu_access &&
-               entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
-               obj->tiling_mode != I915_TILING_NONE;
-       if (need_fence || need_reloc_mappable(vma))
+       if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
                flags |= PIN_MAPPABLE;
-
        if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
                flags |= PIN_GLOBAL;
        if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
@@ -574,17 +543,13 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 
        entry->flags |= __EXEC_OBJECT_HAS_PIN;
 
-       if (has_fenced_gpu_access) {
-               if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
-                       ret = i915_gem_object_get_fence(obj);
-                       if (ret)
-                               return ret;
-
-                       if (i915_gem_object_pin_fence(obj))
-                               entry->flags |= __EXEC_OBJECT_HAS_FENCE;
+       if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
+               ret = i915_gem_object_get_fence(obj);
+               if (ret)
+                       return ret;
 
-                       obj->pending_fenced_gpu_access = true;
-               }
+               if (i915_gem_object_pin_fence(obj))
+                       entry->flags |= __EXEC_OBJECT_HAS_FENCE;
        }
 
        if (entry->offset != vma->node.start) {
@@ -601,26 +566,40 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 }
 
 static bool
-eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
+need_reloc_mappable(struct i915_vma *vma)
 {
        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
-       struct drm_i915_gem_object *obj = vma->obj;
-       bool need_fence, need_mappable;
 
-       need_fence =
-               has_fenced_gpu_access &&
-               entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
-               obj->tiling_mode != I915_TILING_NONE;
-       need_mappable = need_fence || need_reloc_mappable(vma);
+       if (entry->relocation_count == 0)
+               return false;
+
+       if (!i915_is_ggtt(vma->vm))
+               return false;
+
+       /* See also use_cpu_reloc() */
+       if (HAS_LLC(vma->obj->base.dev))
+               return false;
 
-       WARN_ON((need_mappable || need_fence) &&
+       if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+               return false;
+
+       return true;
+}
+
+static bool
+eb_vma_misplaced(struct i915_vma *vma)
+{
+       struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
+       struct drm_i915_gem_object *obj = vma->obj;
+
+       WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
               !i915_is_ggtt(vma->vm));
 
        if (entry->alignment &&
            vma->node.start & (entry->alignment - 1))
                return true;
 
-       if (need_mappable && !obj->map_and_fenceable)
+       if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
                return true;
 
        if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
@@ -642,9 +621,6 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
        bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
        int retry;
 
-       if (list_empty(vmas))
-               return 0;
-
        i915_gem_retire_requests_ring(ring);
 
        vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
@@ -658,20 +634,21 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
                obj = vma->obj;
                entry = vma->exec_entry;
 
+               if (!has_fenced_gpu_access)
+                       entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
                need_fence =
-                       has_fenced_gpu_access &&
                        entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
                        obj->tiling_mode != I915_TILING_NONE;
                need_mappable = need_fence || need_reloc_mappable(vma);
 
-               if (need_mappable)
+               if (need_mappable) {
+                       entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
                        list_move(&vma->exec_list, &ordered_vmas);
-               else
+               else
                        list_move_tail(&vma->exec_list, &ordered_vmas);
 
                obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
                obj->base.pending_write_domain = 0;
-               obj->pending_fenced_gpu_access = false;
        }
        list_splice(&ordered_vmas, vmas);
 
@@ -696,7 +673,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
                        if (!drm_mm_node_allocated(&vma->node))
                                continue;
 
-                       if (eb_vma_misplaced(vma, has_fenced_gpu_access))
+                       if (eb_vma_misplaced(vma))
                                ret = i915_vma_unbind(vma);
                        else
                                ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
@@ -744,9 +721,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
        int i, total, ret;
        unsigned count = args->buffer_count;
 
-       if (WARN_ON(list_empty(&eb->vmas)))
-               return 0;
-
        vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
 
        /* We may process another execbuffer during the unlock... */
@@ -890,18 +864,24 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
 }
 
 static int
-validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+validate_exec_list(struct drm_device *dev,
+                  struct drm_i915_gem_exec_object2 *exec,
                   int count)
 {
-       int i;
        unsigned relocs_total = 0;
        unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
+       unsigned invalid_flags;
+       int i;
+
+       invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
+       if (USES_FULL_PPGTT(dev))
+               invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
 
        for (i = 0; i < count; i++) {
                char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
                int length; /* limited by fault_in_pages_readable() */
 
-               if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
+               if (exec[i].flags & invalid_flags)
                        return -EINVAL;
 
                /* First check for malicious input causing overflow in
@@ -953,16 +933,26 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
                return ERR_PTR(-EIO);
        }
 
+       if (i915.enable_execlists && !ctx->engine[ring->id].state) {
+               int ret = intel_lr_context_deferred_create(ctx, ring);
+               if (ret) {
+                       DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
+                       return ERR_PTR(ret);
+               }
+       }
+
        return ctx;
 }
 
-static void
+void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                                   struct intel_engine_cs *ring)
 {
+       u32 seqno = intel_ring_get_seqno(ring);
        struct i915_vma *vma;
 
        list_for_each_entry(vma, vmas, exec_list) {
+               struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
                struct drm_i915_gem_object *obj = vma->obj;
                u32 old_read = obj->base.read_domains;
                u32 old_write = obj->base.write_domain;
@@ -971,24 +961,31 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                if (obj->base.write_domain == 0)
                        obj->base.pending_read_domains |= obj->base.read_domains;
                obj->base.read_domains = obj->base.pending_read_domains;
-               obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
 
                i915_vma_move_to_active(vma, ring);
                if (obj->base.write_domain) {
                        obj->dirty = 1;
-                       obj->last_write_seqno = intel_ring_get_seqno(ring);
+                       obj->last_write_seqno = seqno;
 
                        intel_fb_obj_invalidate(obj, ring);
 
                        /* update for the implicit flush after a batch */
                        obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
                }
+               if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
+                       obj->last_fenced_seqno = seqno;
+                       if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
+                               struct drm_i915_private *dev_priv = to_i915(ring->dev);
+                               list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
+                                              &dev_priv->mm.fence_list);
+                       }
+               }
 
                trace_i915_gem_object_change_domain(obj, old_read, old_write);
        }
 }
 
-static void
+void
 i915_gem_execbuffer_retire_commands(struct drm_device *dev,
                                    struct drm_file *file,
                                    struct intel_engine_cs *ring,
@@ -1028,14 +1025,14 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
        return 0;
 }
 
-static int
-legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
-                            struct intel_engine_cs *ring,
-                            struct intel_context *ctx,
-                            struct drm_i915_gem_execbuffer2 *args,
-                            struct list_head *vmas,
-                            struct drm_i915_gem_object *batch_obj,
-                            u64 exec_start, u32 flags)
+int
+i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
+                              struct intel_engine_cs *ring,
+                              struct intel_context *ctx,
+                              struct drm_i915_gem_execbuffer2 *args,
+                              struct list_head *vmas,
+                              struct drm_i915_gem_object *batch_obj,
+                              u64 exec_start, u32 flags)
 {
        struct drm_clip_rect *cliprects = NULL;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1256,7 +1253,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (!i915_gem_check_execbuffer(args))
                return -EINVAL;
 
-       ret = validate_exec_list(exec, args->buffer_count);
+       ret = validate_exec_list(dev, exec, args->buffer_count);
        if (ret)
                return ret;
 
@@ -1317,8 +1314,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        i915_gem_context_reference(ctx);
 
-       vm = ctx->vm;
-       if (!USES_FULL_PPGTT(dev))
+       if (ctx->ppgtt)
+               vm = &ctx->ppgtt->base;
+       else
                vm = &dev_priv->gtt.base;
 
        eb = eb_create(args);
@@ -1385,25 +1383,36 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
         * batch" bit. Hence we need to pin secure batches into the global gtt.
         * hsw should have this fixed, but bdw mucks it up again. */
-       if (flags & I915_DISPATCH_SECURE &&
-           !batch_obj->has_global_gtt_mapping) {
-               /* When we have multiple VMs, we'll need to make sure that we
-                * allocate space first */
-               struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
-               BUG_ON(!vma);
-               vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
-       }
+       if (flags & I915_DISPATCH_SECURE) {
+               /*
+                * So on first glance it looks freaky that we pin the batch here
+                * outside of the reservation loop. But:
+                * - The batch is already pinned into the relevant ppgtt, so we
+                *   already have the backing storage fully allocated.
+                * - No other BO uses the global gtt (well contexts, but meh),
+                *   so we don't really have issues with mutliple objects not
+                *   fitting due to fragmentation.
+                * So this is actually safe.
+                */
+               ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
+               if (ret)
+                       goto err;
 
-       if (flags & I915_DISPATCH_SECURE)
                exec_start += i915_gem_obj_ggtt_offset(batch_obj);
-       else
+       else
                exec_start += i915_gem_obj_offset(batch_obj, vm);
 
-       ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
-                       args, &eb->vmas, batch_obj, exec_start, flags);
-       if (ret)
-               goto err;
+       ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
+                                     &eb->vmas, batch_obj, exec_start, flags);
 
+       /*
+        * FIXME: We crucially rely upon the active tracking for the (ppgtt)
+        * batch vma for correctness. For less ugly and less fragility this
+        * needs to be adjusted to also track the ggtt batch vma properly as
+        * active.
+        */
+       if (flags & I915_DISPATCH_SECURE)
+               i915_gem_object_ggtt_unpin(batch_obj);
 err:
        /* the request owns the ref now */
        i915_gem_context_unreference(ctx);
index 48920e9..19935a8 100644 (file)
 static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
 static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
 
-bool intel_enable_ppgtt(struct drm_device *dev, bool full)
-{
-       if (i915.enable_ppgtt == 0)
-               return false;
-
-       if (i915.enable_ppgtt == 1 && full)
-               return false;
-
-       return true;
-}
-
 static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
 {
        if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev))
@@ -79,7 +68,6 @@ static void ppgtt_bind_vma(struct i915_vma *vma,
                           enum i915_cache_level cache_level,
                           u32 flags);
 static void ppgtt_unbind_vma(struct i915_vma *vma);
-static int gen8_ppgtt_enable(struct i915_hw_ppgtt *ppgtt);
 
 static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
                                             enum i915_cache_level level,
@@ -217,19 +205,12 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
 
 /* Broadwell Page Directory Pointer Descriptors */
 static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
-                          uint64_t val, bool synchronous)
+                          uint64_t val)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
        int ret;
 
        BUG_ON(entry >= 4);
 
-       if (synchronous) {
-               I915_WRITE(GEN8_RING_PDP_UDW(ring, entry), val >> 32);
-               I915_WRITE(GEN8_RING_PDP_LDW(ring, entry), (u32)val);
-               return 0;
-       }
-
        ret = intel_ring_begin(ring, 6);
        if (ret)
                return ret;
@@ -246,8 +227,7 @@ static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
 }
 
 static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct intel_engine_cs *ring,
-                         bool synchronous)
+                         struct intel_engine_cs *ring)
 {
        int i, ret;
 
@@ -256,7 +236,7 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
 
        for (i = used_pd - 1; i >= 0; i--) {
                dma_addr_t addr = ppgtt->pd_dma_addr[i];
-               ret = gen8_write_pdp(ring, i, addr, synchronous);
+               ret = gen8_write_pdp(ring, i, addr);
                if (ret)
                        return ret;
        }
@@ -405,9 +385,6 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
 
-       list_del(&vm->global_link);
-       drm_mm_takedown(&vm->mm);
-
        gen8_ppgtt_unmap_pages(ppgtt);
        gen8_ppgtt_free(ppgtt);
 }
@@ -617,7 +594,6 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
                kunmap_atomic(pd_vaddr);
        }
 
-       ppgtt->enable = gen8_ppgtt_enable;
        ppgtt->switch_mm = gen8_mm_switch;
        ppgtt->base.clear_range = gen8_ppgtt_clear_range;
        ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
@@ -726,29 +702,10 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 }
 
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                        struct intel_engine_cs *ring,
-                        bool synchronous)
+                        struct intel_engine_cs *ring)
 {
-       struct drm_device *dev = ppgtt->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       /* If we're in reset, we can assume the GPU is sufficiently idle to
-        * manually frob these bits. Ideally we could use the ring functions,
-        * except our error handling makes it quite difficult (can't use
-        * intel_ring_begin, ring->flush, or intel_ring_advance)
-        *
-        * FIXME: We should try not to special case reset
-        */
-       if (synchronous ||
-           i915_reset_in_progress(&dev_priv->gpu_error)) {
-               WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
-               I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-               I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
-               POSTING_READ(RING_PP_DIR_BASE(ring));
-               return 0;
-       }
-
        /* NB: TLBs must be flushed and invalidated before a switch */
        ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
        if (ret)
@@ -770,29 +727,10 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 }
 
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct intel_engine_cs *ring,
-                         bool synchronous)
+                         struct intel_engine_cs *ring)
 {
-       struct drm_device *dev = ppgtt->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       /* If we're in reset, we can assume the GPU is sufficiently idle to
-        * manually frob these bits. Ideally we could use the ring functions,
-        * except our error handling makes it quite difficult (can't use
-        * intel_ring_begin, ring->flush, or intel_ring_advance)
-        *
-        * FIXME: We should try not to special case reset
-        */
-       if (synchronous ||
-           i915_reset_in_progress(&dev_priv->gpu_error)) {
-               WARN_ON(ppgtt != dev_priv->mm.aliasing_ppgtt);
-               I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
-               I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
-               POSTING_READ(RING_PP_DIR_BASE(ring));
-               return 0;
-       }
-
        /* NB: TLBs must be flushed and invalidated before a switch */
        ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
        if (ret)
@@ -821,14 +759,11 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 }
 
 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
-                         struct intel_engine_cs *ring,
-                    &nbs