drm/i915: Update to Linux 4.6
authorFrançois Tigeot <ftigeot@wolfpond.org>
Fri, 6 Jan 2017 08:46:52 +0000 (10:46 +0200)
committerFrançois Tigeot <ftigeot@wolfpond.org>
Fri, 6 Jan 2017 09:49:43 +0000 (10:49 +0100)
* Skylake and Kabylake support improvements

* FBC (FrameBuffer Compression) now enabled by default on Haswell and
  Broadwell GPUs

* PSR (Panel Self Refresh) support improved, now enabled by default on
  Valleyview, CherryView, Haswell and Broadwell

* Improved DSI panel support

* HDMI hotplug fixes

* Various bugfixes everywhere

99 files changed:
sys/conf/files
sys/dev/drm/drm_atomic.c
sys/dev/drm/drm_atomic_helper.c
sys/dev/drm/drm_bridge.c
sys/dev/drm/drm_crtc.c
sys/dev/drm/drm_crtc_helper.c
sys/dev/drm/drm_dp_helper.c
sys/dev/drm/drm_dp_mst_topology.c
sys/dev/drm/drm_edid.c
sys/dev/drm/drm_encoder_slave.c
sys/dev/drm/drm_fb_helper.c
sys/dev/drm/drm_fops.c
sys/dev/drm/drm_irq.c
sys/dev/drm/drm_mipi_dsi.c
sys/dev/drm/drm_modes.c
sys/dev/drm/drm_probe_helper.c
sys/dev/drm/i915/Makefile
sys/dev/drm/i915/i915_dma.c
sys/dev/drm/i915/i915_drv.c
sys/dev/drm/i915/i915_drv.h
sys/dev/drm/i915/i915_gem.c
sys/dev/drm/i915/i915_gem_context.c
sys/dev/drm/i915/i915_gem_evict.c
sys/dev/drm/i915/i915_gem_execbuffer.c
sys/dev/drm/i915/i915_gem_fence.c
sys/dev/drm/i915/i915_gem_gtt.c
sys/dev/drm/i915/i915_gem_gtt.h
sys/dev/drm/i915/i915_gem_shrinker.c
sys/dev/drm/i915/i915_gem_stolen.c
sys/dev/drm/i915/i915_gem_userptr.c
sys/dev/drm/i915/i915_gpu_error.c [new file with mode: 0644]
sys/dev/drm/i915/i915_guc_reg.h
sys/dev/drm/i915/i915_guc_submission.c
sys/dev/drm/i915/i915_irq.c
sys/dev/drm/i915/i915_params.c
sys/dev/drm/i915/i915_params.h [new file with mode: 0644]
sys/dev/drm/i915/i915_reg.h
sys/dev/drm/i915/i915_suspend.c
sys/dev/drm/i915/i915_sysfs.c
sys/dev/drm/i915/i915_trace.h
sys/dev/drm/i915/intel_atomic.c
sys/dev/drm/i915/intel_atomic_plane.c
sys/dev/drm/i915/intel_audio.c
sys/dev/drm/i915/intel_bios.c
sys/dev/drm/i915/intel_bios.h
sys/dev/drm/i915/intel_crt.c
sys/dev/drm/i915/intel_csr.c
sys/dev/drm/i915/intel_ddi.c
sys/dev/drm/i915/intel_display.c
sys/dev/drm/i915/intel_dp.c
sys/dev/drm/i915/intel_dp_mst.c
sys/dev/drm/i915/intel_drv.h
sys/dev/drm/i915/intel_dsi.c
sys/dev/drm/i915/intel_dsi.h
sys/dev/drm/i915/intel_dsi_panel_vbt.c
sys/dev/drm/i915/intel_dsi_pll.c
sys/dev/drm/i915/intel_fbc.c
sys/dev/drm/i915/intel_fbdev.c
sys/dev/drm/i915/intel_guc.h
sys/dev/drm/i915/intel_guc_fwif.h
sys/dev/drm/i915/intel_guc_loader.c
sys/dev/drm/i915/intel_hdmi.c
sys/dev/drm/i915/intel_lrc.c
sys/dev/drm/i915/intel_lrc.h
sys/dev/drm/i915/intel_lvds.c
sys/dev/drm/i915/intel_overlay.c
sys/dev/drm/i915/intel_pm.c
sys/dev/drm/i915/intel_psr.c
sys/dev/drm/i915/intel_ringbuffer.c
sys/dev/drm/i915/intel_ringbuffer.h
sys/dev/drm/i915/intel_runtime_pm.c
sys/dev/drm/i915/intel_sdvo.c
sys/dev/drm/i915/intel_sdvo_regs.h
sys/dev/drm/i915/intel_sideband.c
sys/dev/drm/i915/intel_sprite.c
sys/dev/drm/i915/intel_tv.c
sys/dev/drm/i915/intel_uncore.c
sys/dev/drm/include/drm/drmP.h
sys/dev/drm/include/drm/drm_atomic.h
sys/dev/drm/include/drm/drm_atomic_helper.h
sys/dev/drm/include/drm/drm_crtc.h
sys/dev/drm/include/drm/drm_crtc_helper.h
sys/dev/drm/include/drm/drm_dp_aux_dev.h [new file with mode: 0644]
sys/dev/drm/include/drm/drm_dp_helper.h
sys/dev/drm/include/drm/drm_edid.h
sys/dev/drm/include/drm/drm_fb_helper.h
sys/dev/drm/include/drm/drm_gem.h
sys/dev/drm/include/drm/drm_mipi_dsi.h
sys/dev/drm/include/drm/drm_modeset_helper_vtables.h [new file with mode: 0644]
sys/dev/drm/include/drm/drm_modeset_lock.h
sys/dev/drm/include/drm/drm_of.h
sys/dev/drm/include/drm/drm_plane_helper.h
sys/dev/drm/include/drm/i915_pciids.h
sys/dev/drm/include/linux/cache.h
sys/dev/drm/include/uapi_drm/i915_drm.h
sys/dev/drm/radeon/atombios_crtc.c
sys/dev/drm/radeon/radeon_connectors.c
sys/dev/drm/radeon/radeon_legacy_crtc.c
sys/dev/drm/radeon/radeon_legacy_encoders.c

index d4738ae..40add8a 100644 (file)
@@ -2123,6 +2123,7 @@ dev/drm/i915/i915_gem_render_state.c      optional i915 drm
 dev/drm/i915/i915_gem_shrinker.c       optional i915 drm
 dev/drm/i915/i915_gem_tiling.c         optional i915 drm
 dev/drm/i915/i915_gem_userptr.c                optional i915 drm
+dev/drm/i915/i915_gpu_error.c          optional i915 drm
 dev/drm/i915/i915_guc_submission.c     optional i915 drm
 dev/drm/i915/i915_irq.c                        optional i915 drm
 dev/drm/i915/i915_params.c             optional i915 drm
index 657586e..2d27c1e 100644 (file)
@@ -28,6 +28,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
+#include <uapi_drm/drm_mode.h>
 #include <drm/drm_plane_helper.h>
 
 #include "drm_crtc_internal.h"
@@ -67,8 +68,6 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
         */
        state->allow_modeset = true;
 
-       state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
-
        state->crtcs = kcalloc(dev->mode_config.num_crtc,
                               sizeof(*state->crtcs), GFP_KERNEL);
        if (!state->crtcs)
@@ -85,16 +84,6 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
                                      sizeof(*state->plane_states), GFP_KERNEL);
        if (!state->plane_states)
                goto fail;
-       state->connectors = kcalloc(state->num_connector,
-                                   sizeof(*state->connectors),
-                                   GFP_KERNEL);
-       if (!state->connectors)
-               goto fail;
-       state->connector_states = kcalloc(state->num_connector,
-                                         sizeof(*state->connector_states),
-                                         GFP_KERNEL);
-       if (!state->connector_states)
-               goto fail;
 
        state->dev = dev;
 
@@ -389,6 +378,58 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
 }
 EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
 
+/**
+ * drm_atomic_replace_property_blob - replace a blob property
+ * @blob: a pointer to the member blob to be replaced
+ * @new_blob: the new blob to replace with
+ * @replaced: whether the blob has been replaced
+ *
+ * RETURNS:
+ * Zero on success, error code on failure
+ */
+static void
+drm_atomic_replace_property_blob(struct drm_property_blob **blob,
+                                struct drm_property_blob *new_blob,
+                                bool *replaced)
+{
+       struct drm_property_blob *old_blob = *blob;
+
+       if (old_blob == new_blob)
+               return;
+
+       if (old_blob)
+               drm_property_unreference_blob(old_blob);
+       if (new_blob)
+               drm_property_reference_blob(new_blob);
+       *blob = new_blob;
+       *replaced = true;
+
+       return;
+}
+
+static int
+drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
+                                        struct drm_property_blob **blob,
+                                        uint64_t blob_id,
+                                        ssize_t expected_size,
+                                        bool *replaced)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_property_blob *new_blob = NULL;
+
+       if (blob_id != 0) {
+               new_blob = drm_property_lookup_blob(dev, blob_id);
+               if (new_blob == NULL)
+                       return -EINVAL;
+               if (expected_size > 0 && expected_size != new_blob->length)
+                       return -EINVAL;
+       }
+
+       drm_atomic_replace_property_blob(blob, new_blob, replaced);
+
+       return 0;
+}
+
 /**
  * drm_atomic_crtc_set_property - set property on CRTC
  * @crtc: the drm CRTC to set a property on
@@ -411,6 +452,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
 {
        struct drm_device *dev = crtc->dev;
        struct drm_mode_config *config = &dev->mode_config;
+       bool replaced = false;
        int ret;
 
        if (property == config->prop_active)
@@ -421,8 +463,31 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
                ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
                drm_property_unreference_blob(mode);
                return ret;
-       }
-       else if (crtc->funcs->atomic_set_property)
+       } else if (property == config->degamma_lut_property) {
+               ret = drm_atomic_replace_property_blob_from_id(crtc,
+                                       &state->degamma_lut,
+                                       val,
+                                       -1,
+                                       &replaced);
+               state->color_mgmt_changed = replaced;
+               return ret;
+       } else if (property == config->ctm_property) {
+               ret = drm_atomic_replace_property_blob_from_id(crtc,
+                                       &state->ctm,
+                                       val,
+                                       sizeof(struct drm_color_ctm),
+                                       &replaced);
+               state->color_mgmt_changed = replaced;
+               return ret;
+       } else if (property == config->gamma_lut_property) {
+               ret = drm_atomic_replace_property_blob_from_id(crtc,
+                                       &state->gamma_lut,
+                                       val,
+                                       -1,
+                                       &replaced);
+               state->color_mgmt_changed = replaced;
+               return ret;
+       } else if (crtc->funcs->atomic_set_property)
                return crtc->funcs->atomic_set_property(crtc, state, property, val);
        else
                return -EINVAL;
@@ -458,6 +523,12 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
                *val = state->active;
        else if (property == config->prop_mode_id)
                *val = (state->mode_blob) ? state->mode_blob->base.id : 0;
+       else if (property == config->degamma_lut_property)
+               *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
+       else if (property == config->ctm_property)
+               *val = (state->ctm) ? state->ctm->base.id : 0;
+       else if (property == config->gamma_lut_property)
+               *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0;
        else if (crtc->funcs->atomic_get_property)
                return crtc->funcs->atomic_get_property(crtc, state, property, val);
        else
@@ -825,19 +896,27 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
 
        index = drm_connector_index(connector);
 
-       /*
-        * Construction of atomic state updates can race with a connector
-        * hot-add which might overflow. In this case flip the table and just
-        * restart the entire ioctl - no one is fast enough to livelock a cpu
-        * with physical hotplug events anyway.
-        *
-        * Note that we only grab the indexes once we have the right lock to
-        * prevent hotplug/unplugging of connectors. So removal is no problem,
-        * at most the array is a bit too large.
-        */
        if (index >= state->num_connector) {
-               DRM_DEBUG_ATOMIC("Hot-added connector would overflow state array, restarting\n");
-               return ERR_PTR(-EAGAIN);
+               struct drm_connector **c;
+               struct drm_connector_state **cs;
+               int alloc = max(index + 1, config->num_connector);
+
+               c = krealloc(state->connectors, alloc * sizeof(*state->connectors), M_DRM, M_WAITOK);
+               if (!c)
+                       return ERR_PTR(-ENOMEM);
+
+               state->connectors = c;
+               memset(&state->connectors[state->num_connector], 0,
+                      sizeof(*state->connectors) * (alloc - state->num_connector));
+
+               cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), M_DRM, M_WAITOK);
+               if (!cs)
+                       return ERR_PTR(-ENOMEM);
+
+               state->connector_states = cs;
+               memset(&state->connector_states[state->num_connector], 0,
+                      sizeof(*state->connector_states) * (alloc - state->num_connector));
+               state->num_connector = alloc;
        }
 
        if (state->connector_states[index])
@@ -1341,18 +1420,6 @@ int drm_atomic_async_commit(struct drm_atomic_state *state)
 }
 EXPORT_SYMBOL(drm_atomic_async_commit);
 
-#ifdef __DragonFly__
-/*
- * The Linux layer version of kfree() is a macro and can't be called
- * directly via a function pointer
- */
-static void
-drm_atomic_event_destroy(struct drm_pending_event *e)
-{
-       kfree(e);
-}
-#endif
-
 /*
  * The big monstor ioctl
  */
@@ -1361,48 +1428,23 @@ static struct drm_pending_vblank_event *create_vblank_event(
                struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data)
 {
        struct drm_pending_vblank_event *e = NULL;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->event_lock, flags);
-       if (file_priv->event_space < sizeof e->event) {
-               spin_unlock_irqrestore(&dev->event_lock, flags);
-               goto out;
-       }
-       file_priv->event_space -= sizeof e->event;
-       spin_unlock_irqrestore(&dev->event_lock, flags);
+       int ret;
 
        e = kzalloc(sizeof *e, GFP_KERNEL);
-       if (e == NULL) {
-               spin_lock_irqsave(&dev->event_lock, flags);
-               file_priv->event_space += sizeof e->event;
-               spin_unlock_irqrestore(&dev->event_lock, flags);
-               goto out;
-       }
+       if (!e)
+               return NULL;
 
        e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
-       e->event.base.length = sizeof e->event;
+       e->event.base.length = sizeof(e->event);
        e->event.user_data = user_data;
-       e->base.event = &e->event.base;
-       e->base.file_priv = file_priv;
-#ifdef __DragonFly__
-       e->base.destroy = drm_atomic_event_destroy;
-#else
-       e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
-#endif
 
-out:
-       return e;
-}
-
-static void destroy_vblank_event(struct drm_device *dev,
-               struct drm_file *file_priv, struct drm_pending_vblank_event *e)
-{
-       unsigned long flags;
+       ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
+       if (ret) {
+               kfree(e);
+               return NULL;
+       }
 
-       spin_lock_irqsave(&dev->event_lock, flags);
-       file_priv->event_space += sizeof e->event;
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-       kfree(e);
+       return e;
 }
 
 static int atomic_set_prop(struct drm_atomic_state *state,
@@ -1664,8 +1706,7 @@ out:
                        if (!crtc_state->event)
                                continue;
 
-                       destroy_vblank_event(dev, file_priv,
-                                            crtc_state->event);
+                       drm_event_cancel_free(dev, &crtc_state->event->base);
                }
        }
 
index ab31806..971851c 100644 (file)
@@ -67,7 +67,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
        struct drm_crtc_state *crtc_state;
 
        if (plane->state->crtc) {
-               crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)];
+               crtc_state = drm_atomic_get_existing_crtc_state(state,
+                                                               plane->state->crtc);
 
                if (WARN_ON(!crtc_state))
                        return;
@@ -76,8 +77,8 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
        }
 
        if (plane_state->crtc) {
-               crtc_state =
-                       state->crtc_states[drm_crtc_index(plane_state->crtc)];
+               crtc_state = drm_atomic_get_existing_crtc_state(state,
+                                                               plane_state->crtc);
 
                if (WARN_ON(!crtc_state))
                        return;
@@ -86,110 +87,185 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
        }
 }
 
-static bool
-check_pending_encoder_assignment(struct drm_atomic_state *state,
-                                struct drm_encoder *new_encoder)
+static int handle_conflicting_encoders(struct drm_atomic_state *state,
+                                      bool disable_conflicting_encoders)
 {
-       struct drm_connector *connector;
        struct drm_connector_state *conn_state;
-       int i;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+       unsigned encoder_mask = 0;
+       int i, ret;
 
+       /*
+        * First loop, find all newly assigned encoders from the connectors
+        * part of the state. If the same encoder is assigned to multiple
+        * connectors bail out.
+        */
        for_each_connector_in_state(state, connector, conn_state, i) {
-               if (conn_state->best_encoder != new_encoder)
+               const struct drm_connector_helper_funcs *funcs = connector->helper_private;
+               struct drm_encoder *new_encoder;
+
+               if (!conn_state->crtc)
+                       continue;
+
+               if (funcs->atomic_best_encoder)
+                       new_encoder = funcs->atomic_best_encoder(connector, conn_state);
+               else
+                       new_encoder = funcs->best_encoder(connector);
+
+               if (new_encoder) {
+                       if (encoder_mask & (1 << drm_encoder_index(new_encoder))) {
+                               DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] on [CONNECTOR:%d:%s] already assigned\n",
+                                       new_encoder->base.id, new_encoder->name,
+                                       connector->base.id, connector->name);
+
+                               return -EINVAL;
+                       }
+
+                       encoder_mask |= 1 << drm_encoder_index(new_encoder);
+               }
+       }
+
+       if (!encoder_mask)
+               return 0;
+
+       /*
+        * Second loop, iterate over all connectors not part of the state.
+        *
+        * If a conflicting encoder is found and disable_conflicting_encoders
+        * is not set, an error is returned. Userspace can provide a solution
+        * through the atomic ioctl.
+        *
+        * If the flag is set conflicting connectors are removed from the crtc
+        * and the crtc is disabled if no encoder is left. This preserves
+        * compatibility with the legacy set_config behavior.
+        */
+       drm_for_each_connector(connector, state->dev) {
+               struct drm_crtc_state *crtc_state;
+
+               if (drm_atomic_get_existing_connector_state(state, connector))
+                       continue;
+
+               encoder = connector->state->best_encoder;
+               if (!encoder || !(encoder_mask & (1 << drm_encoder_index(encoder))))
                        continue;
 
-               /* encoder already assigned and we're trying to re-steal it! */
-               if (connector->state->best_encoder != conn_state->best_encoder)
-                       return false;
+               if (!disable_conflicting_encoders) {
+                       DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s] by [CONNECTOR:%d:%s]\n",
+                                        encoder->base.id, encoder->name,
+                                        connector->state->crtc->base.id,
+                                        connector->state->crtc->name,
+                                        connector->base.id, connector->name);
+                       return -EINVAL;
+               }
+
+               conn_state = drm_atomic_get_connector_state(state, connector);
+               if (IS_ERR(conn_state))
+                       return PTR_ERR(conn_state);
+
+               DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n",
+                                encoder->base.id, encoder->name,
+                                conn_state->crtc->base.id, conn_state->crtc->name,
+                                connector->base.id, connector->name);
+
+               crtc_state = drm_atomic_get_existing_crtc_state(state, conn_state->crtc);
+
+               ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
+               if (ret)
+                       return ret;
+
+               if (!crtc_state->connector_mask) {
+                       ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
+                                                               NULL);
+                       if (ret < 0)
+                               return ret;
+
+                       crtc_state->active = false;
+               }
        }
 
-       return true;
+       return 0;
 }
 
-static struct drm_crtc *
-get_current_crtc_for_encoder(struct drm_device *dev,
-                            struct drm_encoder *encoder)
+static void
+set_best_encoder(struct drm_atomic_state *state,
+                struct drm_connector_state *conn_state,
+                struct drm_encoder *encoder)
 {
-       struct drm_mode_config *config = &dev->mode_config;
-       struct drm_connector *connector;
+       struct drm_crtc_state *crtc_state;
+       struct drm_crtc *crtc;
 
-       WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+       if (conn_state->best_encoder) {
+               /* Unset the encoder_mask in the old crtc state. */
+               crtc = conn_state->connector->state->crtc;
 
-       drm_for_each_connector(connector, dev) {
-               if (connector->state->best_encoder != encoder)
-                       continue;
+               /* A NULL crtc is an error here because we should have
+                *  duplicated a NULL best_encoder when crtc was NULL.
+                * As an exception restoring duplicated atomic state
+                * during resume is allowed, so don't warn when
+                * best_encoder is equal to encoder we intend to set.
+                */
+               WARN_ON(!crtc && encoder != conn_state->best_encoder);
+               if (crtc) {
+                       crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
+
+                       crtc_state->encoder_mask &=
+                               ~(1 << drm_encoder_index(conn_state->best_encoder));
+               }
+       }
 
-               return connector->state->crtc;
+       if (encoder) {
+               crtc = conn_state->crtc;
+               WARN_ON(!crtc);
+               if (crtc) {
+                       crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
+
+                       crtc_state->encoder_mask |=
+                               1 << drm_encoder_index(encoder);
+               }
        }
 
-       return NULL;
+       conn_state->best_encoder = encoder;
 }
 
-static int
+static void
 steal_encoder(struct drm_atomic_state *state,
-             struct drm_encoder *encoder,
-             struct drm_crtc *encoder_crtc)
+             struct drm_encoder *encoder)
 {
-       struct drm_mode_config *config = &state->dev->mode_config;
        struct drm_crtc_state *crtc_state;
        struct drm_connector *connector;
        struct drm_connector_state *connector_state;
-       int ret;
-
-       /*
-        * We can only steal an encoder coming from a connector, which means we
-        * must already hold the connection_mutex.
-        */
-       WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+       int i;
 
-       DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
-                        encoder->base.id, encoder->name,
-                        encoder_crtc->base.id, encoder_crtc->name);
+       for_each_connector_in_state(state, connector, connector_state, i) {
+               struct drm_crtc *encoder_crtc;
 
-       crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
-       if (IS_ERR(crtc_state))
-               return PTR_ERR(crtc_state);
+               if (connector_state->best_encoder != encoder)
+                       continue;
 
-       crtc_state->connectors_changed = true;
+               encoder_crtc = connector->state->crtc;
 
-       list_for_each_entry(connector, &config->connector_list, head) {
-               if (connector->state->best_encoder != encoder)
-                       continue;
+               DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], stealing it\n",
+                                encoder->base.id, encoder->name,
+                                encoder_crtc->base.id, encoder_crtc->name);
 
-               DRM_DEBUG_ATOMIC("Stealing encoder from [CONNECTOR:%d:%s]\n",
-                                connector->base.id,
-                                connector->name);
+               set_best_encoder(state, connector_state, NULL);
 
-               connector_state = drm_atomic_get_connector_state(state,
-                                                                connector);
-               if (IS_ERR(connector_state))
-                       return PTR_ERR(connector_state);
+               crtc_state = drm_atomic_get_existing_crtc_state(state, encoder_crtc);
+               crtc_state->connectors_changed = true;
 
-               ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
-               if (ret)
-                       return ret;
-               connector_state->best_encoder = NULL;
+               return;
        }
-
-       return 0;
 }
 
 static int
-update_connector_routing(struct drm_atomic_state *state, int conn_idx)
+update_connector_routing(struct drm_atomic_state *state,
+                        struct drm_connector *connector,
+                        struct drm_connector_state *connector_state)
 {
        const struct drm_connector_helper_funcs *funcs;
        struct drm_encoder *new_encoder;
-       struct drm_crtc *encoder_crtc;
-       struct drm_connector *connector;
-       struct drm_connector_state *connector_state;
        struct drm_crtc_state *crtc_state;
-       int idx, ret;
-
-       connector = state->connectors[conn_idx];
-       connector_state = state->connector_states[conn_idx];
-
-       if (!connector)
-               return 0;
 
        DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
                         connector->base.id,
@@ -197,16 +273,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
 
        if (connector->state->crtc != connector_state->crtc) {
                if (connector->state->crtc) {
-                       idx = drm_crtc_index(connector->state->crtc);
-
-                       crtc_state = state->crtc_states[idx];
+                       crtc_state = drm_atomic_get_existing_crtc_state(state, connector->state->crtc);
                        crtc_state->connectors_changed = true;
                }
 
                if (connector_state->crtc) {
-                       idx = drm_crtc_index(connector_state->crtc);
-
-                       crtc_state = state->crtc_states[idx];
+                       crtc_state = drm_atomic_get_existing_crtc_state(state, connector_state->crtc);
                        crtc_state->connectors_changed = true;
                }
        }
@@ -216,7 +288,7 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
                                connector->base.id,
                                connector->name);
 
-               connector_state->best_encoder = NULL;
+               set_best_encoder(state, connector_state, NULL);
 
                return 0;
        }
@@ -245,6 +317,8 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
        }
 
        if (new_encoder == connector_state->best_encoder) {
+               set_best_encoder(state, connector_state, new_encoder);
+
                DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
                                 connector->base.id,
                                 connector->name,
@@ -256,33 +330,11 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
                return 0;
        }
 
-       if (!check_pending_encoder_assignment(state, new_encoder)) {
-               DRM_DEBUG_ATOMIC("Encoder for [CONNECTOR:%d:%s] already assigned\n",
-                                connector->base.id,
-                                connector->name);
-               return -EINVAL;
-       }
-
-       encoder_crtc = get_current_crtc_for_encoder(state->dev,
-                                                   new_encoder);
+       steal_encoder(state, new_encoder);
 
-       if (encoder_crtc) {
-               ret = steal_encoder(state, new_encoder, encoder_crtc);
-               if (ret) {
-                       DRM_DEBUG_ATOMIC("Encoder stealing failed for [CONNECTOR:%d:%s]\n",
-                                        connector->base.id,
-                                        connector->name);
-                       return ret;
-               }
-       }
-
-       if (WARN_ON(!connector_state->crtc))
-               return -EINVAL;
+       set_best_encoder(state, connector_state, new_encoder);
 
-       connector_state->best_encoder = new_encoder;
-       idx = drm_crtc_index(connector_state->crtc);
-
-       crtc_state = state->crtc_states[idx];
+       crtc_state = drm_atomic_get_existing_crtc_state(state, connector_state->crtc);
        crtc_state->connectors_changed = true;
 
        DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
@@ -323,8 +375,8 @@ mode_fixup(struct drm_atomic_state *state)
                if (!conn_state->crtc || !conn_state->best_encoder)
                        continue;
 
-               crtc_state =
-                       state->crtc_states[drm_crtc_index(conn_state->crtc)];
+               crtc_state = drm_atomic_get_existing_crtc_state(state,
+                                                               conn_state->crtc);
 
                /*
                 * Each encoder has at most one connector (since we always steal
@@ -445,13 +497,18 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
                }
        }
 
+       ret = handle_conflicting_encoders(state, state->legacy_set_config);
+       if (ret)
+               return ret;
+
        for_each_connector_in_state(state, connector, connector_state, i) {
                /*
                 * This only sets crtc->mode_changed for routing changes,
                 * drivers must set crtc->mode_changed themselves when connector
                 * properties need to be updated.
                 */
-               ret = update_connector_routing(state, i);
+               ret = update_connector_routing(state, connector,
+                                              connector_state);
                if (ret)
                        return ret;
        }
@@ -617,14 +674,14 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
        for_each_connector_in_state(old_state, connector, old_conn_state, i) {
                const struct drm_encoder_helper_funcs *funcs;
                struct drm_encoder *encoder;
-               struct drm_crtc_state *old_crtc_state;
 
                /* Shut down everything that's in the changeset and currently
                 * still on. So need to check the old, saved state. */
                if (!old_conn_state->crtc)
                        continue;
 
-               old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)];
+               old_crtc_state = drm_atomic_get_existing_crtc_state(old_state,
+                                                                   old_conn_state->crtc);
 
                if (!old_crtc_state->active ||
                    !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
@@ -1496,7 +1553,7 @@ void drm_atomic_helper_swap_state(struct drm_device *dev,
 {
        int i;
 
-       for (i = 0; i < dev->mode_config.num_connector; i++) {
+       for (i = 0; i < state->num_connector; i++) {
                struct drm_connector *connector = state->connectors[i];
 
                if (!connector)
@@ -1722,28 +1779,18 @@ static int update_output_state(struct drm_atomic_state *state,
        struct drm_crtc_state *crtc_state;
        struct drm_connector *connector;
        struct drm_connector_state *conn_state;
-       int ret, i, j;
+       int ret, i;
 
        ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
                               state->acquire_ctx);
        if (ret)
                return ret;
 
-       /* First grab all affected connector/crtc states. */
-       for (i = 0; i < set->num_connectors; i++) {
-               conn_state = drm_atomic_get_connector_state(state,
-                                                           set->connectors[i]);
-               if (IS_ERR(conn_state))
-                       return PTR_ERR(conn_state);
-       }
-
-       for_each_crtc_in_state(state, crtc, crtc_state, i) {
-               ret = drm_atomic_add_affected_connectors(state, crtc);
-               if (ret)
-                       return ret;
-       }
+       /* First disable all connectors on the target crtc. */
+       ret = drm_atomic_add_affected_connectors(state, set->crtc);
+       if (ret)
+               return ret;
 
-       /* Then recompute connector->crtc links and crtc enabling state. */
        for_each_connector_in_state(state, connector, conn_state, i) {
                if (conn_state->crtc == set->crtc) {
                        ret = drm_atomic_set_crtc_for_connector(conn_state,
@@ -1751,16 +1798,19 @@ static int update_output_state(struct drm_atomic_state *state,
                        if (ret)
                                return ret;
                }
+       }
 
-               for (j = 0; j < set->num_connectors; j++) {
-                       if (set->connectors[j] == connector) {
-                               ret = drm_atomic_set_crtc_for_connector(conn_state,
-                                                                       set->crtc);
-                               if (ret)
-                                       return ret;
-                               break;
-                       }
-               }
+       /* Then set all connectors from set->connectors on the target crtc */
+       for (i = 0; i < set->num_connectors; i++) {
+               conn_state = drm_atomic_get_connector_state(state,
+                                                           set->connectors[i]);
+               if (IS_ERR(conn_state))
+                       return PTR_ERR(conn_state);
+
+               ret = drm_atomic_set_crtc_for_connector(conn_state,
+                                                       set->crtc);
+               if (ret)
+                       return ret;
        }
 
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
@@ -1803,6 +1853,7 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set)
        if (!state)
                return -ENOMEM;
 
+       state->legacy_set_config = true;
        state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
 retry:
        ret = __drm_atomic_helper_set_config(set, state);
@@ -2449,8 +2500,12 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_dpms);
  */
 void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
 {
-       if (crtc->state)
+       if (crtc->state) {
                drm_property_unreference_blob(crtc->state->mode_blob);
+               drm_property_unreference_blob(crtc->state->degamma_lut);
+               drm_property_unreference_blob(crtc->state->ctm);
+               drm_property_unreference_blob(crtc->state->gamma_lut);
+       }
        kfree(crtc->state);
        crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
 
@@ -2474,10 +2529,17 @@ void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
 
        if (state->mode_blob)
                drm_property_reference_blob(state->mode_blob);
+       if (state->degamma_lut)
+               drm_property_reference_blob(state->degamma_lut);
+       if (state->ctm)
+               drm_property_reference_blob(state->ctm);
+       if (state->gamma_lut)
+               drm_property_reference_blob(state->gamma_lut);
        state->mode_changed = false;
        state->active_changed = false;
        state->planes_changed = false;
        state->connectors_changed = false;
+       state->color_mgmt_changed = false;
        state->event = NULL;
 }
 EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
@@ -2518,6 +2580,9 @@ void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
                                            struct drm_crtc_state *state)
 {
        drm_property_unreference_blob(state->mode_blob);
+       drm_property_unreference_blob(state->degamma_lut);
+       drm_property_unreference_blob(state->ctm);
+       drm_property_unreference_blob(state->gamma_lut);
 }
 EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
 
@@ -2552,8 +2617,10 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane)
        kfree(plane->state);
        plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
 
-       if (plane->state)
+       if (plane->state) {
                plane->state->plane = plane;
+               plane->state->rotation = BIT(DRM_ROTATE_0);
+       }
 }
 EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
 
@@ -2829,3 +2896,98 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
        kfree(state);
 }
 EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
+
+/**
+ * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
+ * @crtc: CRTC object
+ * @red: red correction table
+ * @green: green correction table
+ * @blue: green correction table
+ * @start:
+ * @size: size of the tables
+ *
+ * Implements support for legacy gamma correction table for drivers
+ * that support color management through the DEGAMMA_LUT/GAMMA_LUT
+ * properties.
+ */
+void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
+                                       u16 *red, u16 *green, u16 *blue,
+                                       uint32_t start, uint32_t size)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_mode_config *config = &dev->mode_config;
+       struct drm_atomic_state *state;
+       struct drm_crtc_state *crtc_state;
+       struct drm_property_blob *blob = NULL;
+       struct drm_color_lut *blob_data;
+       int i, ret = 0;
+
+       state = drm_atomic_state_alloc(crtc->dev);
+       if (!state)
+               return;
+
+       blob = drm_property_create_blob(dev,
+                                       sizeof(struct drm_color_lut) * size,
+                                       NULL);
+       if (IS_ERR(blob)) {
+               ret = PTR_ERR(blob);
+               blob = NULL;
+               goto fail;
+       }
+
+       /* Prepare GAMMA_LUT with the legacy values. */
+       blob_data = (struct drm_color_lut *) blob->data;
+       for (i = 0; i < size; i++) {
+               blob_data[i].red = red[i];
+               blob_data[i].green = green[i];
+               blob_data[i].blue = blue[i];
+       }
+
+       state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
+retry:
+       crtc_state = drm_atomic_get_crtc_state(state, crtc);
+       if (IS_ERR(crtc_state)) {
+               ret = PTR_ERR(crtc_state);
+               goto fail;
+       }
+
+       /* Reset DEGAMMA_LUT and CTM properties. */
+       ret = drm_atomic_crtc_set_property(crtc, crtc_state,
+                       config->degamma_lut_property, 0);
+       if (ret)
+               goto fail;
+
+       ret = drm_atomic_crtc_set_property(crtc, crtc_state,
+                       config->ctm_property, 0);
+       if (ret)
+               goto fail;
+
+       ret = drm_atomic_crtc_set_property(crtc, crtc_state,
+                       config->gamma_lut_property, blob->base.id);
+       if (ret)
+               goto fail;
+
+       ret = drm_atomic_commit(state);
+       if (ret)
+               goto fail;
+
+       /* Driver takes ownership of state on successful commit. */
+
+       drm_property_unreference_blob(blob);
+
+       return;
+fail:
+       if (ret == -EDEADLK)
+               goto backoff;
+
+       drm_atomic_state_free(state);
+       drm_property_unreference_blob(blob);
+
+       return;
+backoff:
+       drm_atomic_state_clear(state);
+       drm_atomic_legacy_backoff(state);
+
+       goto retry;
+}
+EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
index b10410e..f016194 100644 (file)
@@ -185,7 +185,8 @@ void drm_bridge_disable(struct drm_bridge *bridge)
 
        drm_bridge_disable(bridge->next);
 
-       bridge->funcs->disable(bridge);
+       if (bridge->funcs->disable)
+               bridge->funcs->disable(bridge);
 }
 EXPORT_SYMBOL(drm_bridge_disable);
 
@@ -205,7 +206,8 @@ void drm_bridge_post_disable(struct drm_bridge *bridge)
        if (!bridge)
                return;
 
-       bridge->funcs->post_disable(bridge);
+       if (bridge->funcs->post_disable)
+               bridge->funcs->post_disable(bridge);
 
        drm_bridge_post_disable(bridge->next);
 }
@@ -255,7 +257,8 @@ void drm_bridge_pre_enable(struct drm_bridge *bridge)
 
        drm_bridge_pre_enable(bridge->next);
 
-       bridge->funcs->pre_enable(bridge);
+       if (bridge->funcs->pre_enable)
+               bridge->funcs->pre_enable(bridge);
 }
 EXPORT_SYMBOL(drm_bridge_pre_enable);
 
@@ -275,7 +278,8 @@ void drm_bridge_enable(struct drm_bridge *bridge)
        if (!bridge)
                return;
 
-       bridge->funcs->enable(bridge);
+       if (bridge->funcs->enable)
+               bridge->funcs->enable(bridge);
 
        drm_bridge_enable(bridge->next);
 }
index f8b7b25..ceded8a 100644 (file)
@@ -434,9 +434,7 @@ EXPORT_SYMBOL(drm_framebuffer_init);
 static void __drm_framebuffer_unregister(struct drm_device *dev,
                                         struct drm_framebuffer *fb)
 {
-       mutex_lock(&dev->mode_config.idr_mutex);
-       idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
-       mutex_unlock(&dev->mode_config.idr_mutex);
+       drm_mode_object_put(dev, &fb->base);
 
        fb->base.id = 0;
 }
@@ -928,7 +926,7 @@ int drm_connector_init(struct drm_device *dev,
                goto out_put;
        }
        connector->name =
-               drm_asprintf(GFP_KERNEL, "%s-%d",
+               kasprintf(GFP_KERNEL, "%s-%d",
                          drm_connector_enum_list[connector_type].name,
                          connector->connector_type_id);
        if (!connector->name) {
@@ -1156,6 +1154,29 @@ out_unlock:
 }
 EXPORT_SYMBOL(drm_encoder_init);
 
+/**
+ * drm_encoder_index - find the index of a registered encoder
+ * @encoder: encoder to find index for
+ *
+ * Given a registered encoder, return the index of that encoder within a DRM
+ * device's list of encoders.
+ */
+unsigned int drm_encoder_index(struct drm_encoder *encoder)
+{
+       unsigned int index = 0;
+       struct drm_encoder *tmp;
+
+       drm_for_each_encoder(tmp, encoder->dev) {
+               if (tmp == encoder)
+                       return index;
+
+               index++;
+       }
+
+       BUG();
+}
+EXPORT_SYMBOL(drm_encoder_index);
+
 /**
  * drm_encoder_cleanup - cleans up an initialised encoder
  * @encoder: encoder to cleanup
@@ -1508,6 +1529,41 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
                return -ENOMEM;
        dev->mode_config.prop_mode_id = prop;
 
+       prop = drm_property_create(dev,
+                       DRM_MODE_PROP_BLOB,
+                       "DEGAMMA_LUT", 0);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.degamma_lut_property = prop;
+
+       prop = drm_property_create_range(dev,
+                       DRM_MODE_PROP_IMMUTABLE,
+                       "DEGAMMA_LUT_SIZE", 0, UINT_MAX);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.degamma_lut_size_property = prop;
+
+       prop = drm_property_create(dev,
+                       DRM_MODE_PROP_BLOB,
+                       "CTM", 0);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.ctm_property = prop;
+
+       prop = drm_property_create(dev,
+                       DRM_MODE_PROP_BLOB,
+                       "GAMMA_LUT", 0);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.gamma_lut_property = prop;
+
+       prop = drm_property_create_range(dev,
+                       DRM_MODE_PROP_IMMUTABLE,
+                       "GAMMA_LUT_SIZE", 0, UINT_MAX);
+       if (!prop)
+               return -ENOMEM;
+       dev->mode_config.gamma_lut_size_property = prop;
+
        return 0;
 }
 
@@ -5194,18 +5250,6 @@ out:
        return ret;
 }
 
-#ifdef __DragonFly__
-/*
- * The Linux layer version of kfree() is a macro and can't be called
- * directly via a function pointer
- */
-static void
-drm_crtc_event_destroy(struct drm_pending_event *e)
-{
-       kfree(e);
-}
-#endif
-
 /**
  * drm_mode_page_flip_ioctl - schedule an asynchronous fb update
  * @dev: DRM device
@@ -5231,7 +5275,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
        struct drm_crtc *crtc;
        struct drm_framebuffer *fb = NULL;
        struct drm_pending_vblank_event *e = NULL;
-       unsigned long flags;
        int ret = -EINVAL;
 
        if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
@@ -5282,45 +5325,26 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
        }
 
        if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-               ret = -ENOMEM;
-               spin_lock_irqsave(&dev->event_lock, flags);
-               if (file_priv->event_space < sizeof(e->event)) {
-                       spin_unlock_irqrestore(&dev->event_lock, flags);
-                       goto out;
-               }
-               file_priv->event_space -= sizeof(e->event);
-               spin_unlock_irqrestore(&dev->event_lock, flags);
-
-               e = kzalloc(sizeof(*e), GFP_KERNEL);
-               if (e == NULL) {
-                       spin_lock_irqsave(&dev->event_lock, flags);
-                       file_priv->event_space += sizeof(e->event);
-                       spin_unlock_irqrestore(&dev->event_lock, flags);
+               e = kzalloc(sizeof *e, GFP_KERNEL);
+               if (!e) {
+                       ret = -ENOMEM;
                        goto out;
                }
-
                e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
                e->event.base.length = sizeof(e->event);
                e->event.user_data = page_flip->user_data;
-               e->base.event = &e->event.base;
-               e->base.file_priv = file_priv;
-#ifdef __DragonFly__
-               e->base.destroy = drm_crtc_event_destroy;
-#else
-               e->base.destroy =
-                       (void (*) (struct drm_pending_event *)) kfree;
-#endif
+               ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
+               if (ret) {
+                       kfree(e);
+                       goto out;
+               }
        }
 
        crtc->primary->old_fb = crtc->primary->fb;
        ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
        if (ret) {
-               if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-                       spin_lock_irqsave(&dev->event_lock, flags);
-                       file_priv->event_space += sizeof(e->event);
-                       spin_unlock_irqrestore(&dev->event_lock, flags);
-                       kfree(e);
-               }
+               if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT)
+                       drm_event_cancel_free(dev, &e->base);
                /* Keep the old fb, don't unref it. */
                crtc->primary->old_fb = NULL;
        } else {
@@ -5703,6 +5727,48 @@ int drm_format_vert_chroma_subsampling(uint32_t format)
 }
 EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
 
+/**
+ * drm_format_plane_width - width of the plane given the first plane
+ * @width: width of the first plane
+ * @format: pixel format
+ * @plane: plane index
+ *
+ * Returns:
+ * The width of @plane, given that the width of the first plane is @width.
+ */
+int drm_format_plane_width(int width, uint32_t format, int plane)
+{
+       if (plane >= drm_format_num_planes(format))
+               return 0;
+
+       if (plane == 0)
+               return width;
+
+       return width / drm_format_horz_chroma_subsampling(format);
+}
+EXPORT_SYMBOL(drm_format_plane_width);
+
+/**
+ * drm_format_plane_height - height of the plane given the first plane
+ * @height: height of the first plane
+ * @format: pixel format
+ * @plane: plane index
+ *
+ * Returns:
+ * The height of @plane, given that the height of the first plane is @height.
+ */
+int drm_format_plane_height(int height, uint32_t format, int plane)
+{
+       if (plane >= drm_format_num_planes(format))
+               return 0;
+
+       if (plane == 0)
+               return height;
+
+       return height / drm_format_vert_chroma_subsampling(format);
+}
+EXPORT_SYMBOL(drm_format_plane_height);
+
 /**
  * drm_rotation_simplify() - Try to simplify the rotation
  * @rotation: Rotation to be simplified
index b1cfd3a..8fec16e 100644 (file)
@@ -73,8 +73,6 @@
  * &drm_crtc_helper_funcs, struct &drm_encoder_helper_funcs and struct
  * &drm_connector_helper_funcs.
  */
-MODULE_AUTHOR("David Airlie, Jesse Barnes");
-MODULE_DESCRIPTION("DRM KMS helper");
 
 /**
  * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
@@ -223,6 +221,15 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
  * disconnected connectors. Then it will disable all unused encoders and CRTCs
  * either by calling their disable callback if available or by calling their
  * dpms callback with DRM_MODE_DPMS_OFF.
+ *
+ * NOTE:
+ *
+ * This function is part of the legacy modeset helper library and will cause
+ * major confusion with atomic drivers. This is because atomic helpers guarantee
+ * to never call ->disable() hooks on a disabled function, or ->enable() hooks
+ * on an enabled functions. drm_helper_disable_unused_functions() on the other
+ * hand throws such guarantees into the wind and calls disable hooks
+ * unconditionally on unused functions.
  */
 void drm_helper_disable_unused_functions(struct drm_device *dev)
 {
@@ -331,16 +338,21 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
                }
 
                encoder_funcs = encoder->helper_private;
-               if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
-                                                     adjusted_mode))) {
-                       DRM_DEBUG_KMS("Encoder fixup failed\n");
-                       goto done;
+               if (encoder_funcs->mode_fixup) {
+                       if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
+                                                             adjusted_mode))) {
+                               DRM_DEBUG_KMS("Encoder fixup failed\n");
+                               goto done;
+                       }
                }
        }
 
-       if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
-               DRM_DEBUG_KMS("CRTC fixup failed\n");
-               goto done;
+       if (crtc_funcs->mode_fixup) {
+               if (!(ret = crtc_funcs->mode_fixup(crtc, mode,
+                                               adjusted_mode))) {
+                       DRM_DEBUG_KMS("CRTC fixup failed\n");
+                       goto done;
+               }
        }
        DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
 
@@ -581,8 +593,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                if (set->crtc->primary->fb == NULL) {
                        DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
                        mode_changed = true;
-               } else if (set->fb == NULL) {
-                       mode_changed = true;
                } else if (set->fb->pixel_format !=
                           set->crtc->primary->fb->pixel_format) {
                        mode_changed = true;
@@ -593,7 +603,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
        if (set->x != set->crtc->x || set->y != set->crtc->y)
                fb_changed = true;
 
-       if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+       if (!drm_mode_equal(set->mode, &set->crtc->mode)) {
                DRM_DEBUG_KMS("modes are different, full mode set\n");
                drm_mode_debug_printmodeline(&set->crtc->mode);
                drm_mode_debug_printmodeline(set->mode);
@@ -985,15 +995,15 @@ int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mod
        if (crtc->funcs->atomic_duplicate_state)
                crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
        else {
-               crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
-               if (!crtc_state)
-                       return -ENOMEM;
-               if (crtc->state)
-                       __drm_atomic_helper_crtc_duplicate_state(crtc, crtc_state);
-               else
-                       crtc_state->crtc = crtc;
+               if (!crtc->state)
+                       drm_atomic_helper_crtc_reset(crtc);
+
+               crtc_state = drm_atomic_helper_crtc_duplicate_state(crtc);
        }
 
+       if (!crtc_state)
+               return -ENOMEM;
+
        crtc_state->planes_changed = true;
        crtc_state->mode_changed = true;
        ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
@@ -1014,11 +1024,11 @@ int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mod
        ret = drm_helper_crtc_mode_set_base(crtc, x, y, old_fb);
 
 out:
-       if (crtc->funcs->atomic_destroy_state)
-               crtc->funcs->atomic_destroy_state(crtc, crtc_state);
-       else {
-               __drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
-               kfree(crtc_state);
+       if (crtc_state) {
+               if (crtc->funcs->atomic_destroy_state)
+                       crtc->funcs->atomic_destroy_state(crtc, crtc_state);
+               else
+                       drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
        }
 
        return ret;
@@ -1069,3 +1079,36 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
        return drm_plane_helper_commit(plane, plane_state, old_fb);
 }
 EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
+
+/**
+ * drm_helper_crtc_enable_color_mgmt - enable color management properties
+ * @crtc: DRM CRTC
+ * @degamma_lut_size: the size of the degamma lut (before CSC)
+ * @gamma_lut_size: the size of the gamma lut (after CSC)
+ *
+ * This function lets the driver enable the color correction properties on a
+ * CRTC. This includes 3 degamma, csc and gamma properties that userspace can
+ * set and 2 size properties to inform the userspace of the lut sizes.
+ */
+void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
+                                      int degamma_lut_size,
+                                      int gamma_lut_size)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_mode_config *config = &dev->mode_config;
+
+       drm_object_attach_property(&crtc->base,
+                                  config->degamma_lut_property, 0);
+       drm_object_attach_property(&crtc->base,
+                                  config->ctm_property, 0);
+       drm_object_attach_property(&crtc->base,
+                                  config->gamma_lut_property, 0);
+
+       drm_object_attach_property(&crtc->base,
+                                  config->degamma_lut_size_property,
+                                  degamma_lut_size);
+       drm_object_attach_property(&crtc->base,
+                                  config->gamma_lut_size_property,
+                                  gamma_lut_size);
+}
+EXPORT_SYMBOL(drm_helper_crtc_enable_color_mgmt);
index 241bf1f..ed0be0f 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/sched.h>
 #include <linux/i2c.h>
 #include <drm/drm_dp_helper.h>
+#include <drm/drm_dp_aux_dev.h>
 #include <drm/drmP.h>
 
 /**
@@ -177,7 +178,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
 {
        struct drm_dp_aux_msg msg;
        unsigned int retry;
-       int err;
+       int err = 0;
 
        memset(&msg, 0, sizeof(msg));
        msg.address = offset;
@@ -185,6 +186,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
        msg.buffer = buffer;
        msg.size = size;
 
+       mutex_lock(&aux->hw_mutex);
+
        /*
         * The specification doesn't give any recommendation on how often to
         * retry native transactions. We used to retry 7 times like for
@@ -193,25 +196,24 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
         */
        for (retry = 0; retry < 32; retry++) {
 
-               mutex_lock(&aux->hw_mutex);
                err = aux->transfer(aux, &msg);
-               mutex_unlock(&aux->hw_mutex);
                if (err < 0) {
                        if (err == -EBUSY)
                                continue;
 
-                       return err;
+                       goto unlock;
                }
 
 
                switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) {
                case DP_AUX_NATIVE_REPLY_ACK:
                        if (err < size)
-                               return -EPROTO;
-                       return err;
+                               err = -EPROTO;
+                       goto unlock;
 
                case DP_AUX_NATIVE_REPLY_NACK:
-                       return -EIO;
+                       err = -EIO;
+                       goto unlock;
 
                case DP_AUX_NATIVE_REPLY_DEFER:
                        usleep_range(AUX_RETRY_INTERVAL, AUX_RETRY_INTERVAL + 100);
@@ -220,7 +222,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
        }
 
        DRM_DEBUG_KMS("too many retries, giving up\n");
-       return -EIO;
+       err = -EIO;
+
+unlock:
+       mutex_unlock(&aux->hw_mutex);
+       return err;
 }
 
 /**
@@ -541,9 +547,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
        int max_retries = max(7, drm_dp_i2c_retry_count(msg, dp_aux_i2c_speed_khz));
 
        for (retry = 0, defer_i2c = 0; retry < (max_retries + defer_i2c); retry++) {
-               mutex_lock(&aux->hw_mutex);
                ret = aux->transfer(aux, msg);
-               mutex_unlock(&aux->hw_mutex);
                if (ret < 0) {
                        if (ret == -EBUSY)
                                continue;
@@ -681,6 +685,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
 
        memset(&msg, 0, sizeof(msg));
 
+       mutex_lock(&aux->hw_mutex);
+
        for (i = 0; i < num; i++) {
                msg.address = msgs[i].addr;
                drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
@@ -735,6 +741,8 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
        msg.size = 0;
        (void)drm_dp_i2c_do_msg(aux, &msg);
 
+       mutex_unlock(&aux->hw_mutex);
+
        return err;
 }
 
@@ -751,6 +759,8 @@ static const struct i2c_algorithm drm_dp_i2c_algo = {
  */
 int drm_dp_aux_register(struct drm_dp_aux *aux)
 {
+       int ret;
+
        lockinit(&aux->hw_mutex, "ahwm", 0, LK_CANRECURSE);
 
        aux->ddc.algo = &drm_dp_i2c_algo;
@@ -769,7 +779,17 @@ int drm_dp_aux_register(struct drm_dp_aux *aux)
        strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
                sizeof(aux->ddc.name));
 
-       return i2c_add_adapter(&aux->ddc);
+       ret = drm_dp_aux_register_devnode(aux);
+       if (ret)
+               return ret;
+
+       ret = i2c_add_adapter(&aux->ddc);
+       if (ret) {
+               drm_dp_aux_unregister_devnode(aux);
+               return ret;
+       }
+
+       return 0;
 }
 EXPORT_SYMBOL(drm_dp_aux_register);
 
@@ -779,6 +799,7 @@ EXPORT_SYMBOL(drm_dp_aux_register);
  */
 void drm_dp_aux_unregister(struct drm_dp_aux *aux)
 {
+       drm_dp_aux_unregister_devnode(aux);
        i2c_del_adapter(&aux->ddc);
 }
 EXPORT_SYMBOL(drm_dp_aux_unregister);
index d4ee03c..8a3244a 100644 (file)
@@ -1673,13 +1673,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
        u8 sinks[DRM_DP_MAX_SDP_STREAMS];
        int i;
 
+       port = drm_dp_get_validated_port_ref(mgr, port);
+       if (!port)
+               return -EINVAL;
+
        port_num = port->port_num;
        mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
        if (!mstb) {
                mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
 
-               if (!mstb)
+               if (!mstb) {
+                       drm_dp_put_port(port);
                        return -EINVAL;
+               }
        }
 
        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -1708,6 +1714,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
        kfree(txmsg);
 fail_put:
        drm_dp_put_mst_branch_device(mstb);
+       drm_dp_put_port(port);
        return ret;
 }
 
@@ -1790,6 +1797,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
                req_payload.start_slot = cur_slots;
                if (mgr->proposed_vcpis[i]) {
                        port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+                       port = drm_dp_get_validated_port_ref(mgr, port);
+                       if (!port) {
+                               mutex_unlock(&mgr->payload_lock);
+                               return -EINVAL;
+                       }
                        req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
                        req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
                } else {
@@ -1817,6 +1829,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
                        mgr->payloads[i].payload_state = req_payload.payload_state;
                }
                cur_slots += req_payload.num_slots;
+
+               if (port)
+                       drm_dp_put_port(port);
        }
 
        for (i = 0; i < mgr->max_payloads; i++) {
@@ -2122,6 +2137,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
 
        if (mgr->mst_primary) {
                int sret;
+               u8 guid[16];
+
                sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
                if (sret != DP_RECEIVER_CAP_SIZE) {
                        DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
@@ -2136,6 +2153,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
                        ret = -1;
                        goto out_unlock;
                }
+
+               /* Some hubs forget their guids after they resume */
+               sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+               if (sret != 16) {
+                       DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+                       ret = -1;
+                       goto out_unlock;
+               }
+               drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+
                ret = 0;
        } else
                ret = -1;
index aea9d9a..e642656 100644 (file)
@@ -207,7 +207,7 @@ static const struct drm_display_mode drm_dmt_modes[] = {
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
        /* 0x0f - 1024x768@43Hz, interlace */
        { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
-                  1208, 1264, 0, 768, 768, 772, 817, 0,
+                  1208, 1264, 0, 768, 768, 776, 817, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
                   DRM_MODE_FLAG_INTERLACE) },
        /* 0x10 - 1024x768@60Hz */
@@ -524,12 +524,12 @@ static const struct drm_display_mode edid_est_modes[] = {
                   720, 840, 0, 480, 481, 484, 500, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
        { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
-                  704,  832, 0, 480, 489, 491, 520, 0,
+                  704,  832, 0, 480, 489, 492, 520, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
        { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
                   768,  864, 0, 480, 483, 486, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
-       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+       { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
                   752, 800, 0, 480, 490, 492, 525, 0,
                   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
        { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
@@ -541,7 +541,7 @@ static const struct drm_display_mode edid_est_modes[] = {
        { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
                   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
-       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+       { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
                   1136, 1312, 0,  768, 769, 772, 800, 0,
                   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
        { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
@@ -1476,6 +1476,33 @@ struct edid *drm_get_edid_iic(struct drm_connector *connector,
        return drm_do_get_edid(connector, drm_do_probe_ddc_edid_iic, adapter);
 }
 
+/**
+ * drm_get_edid_switcheroo - get EDID data for a vga_switcheroo output
+ * @connector: connector we're probing
+ * @adapter: I2C adapter to use for DDC
+ *
+ * Wrapper around drm_get_edid() for laptops with dual GPUs using one set of
+ * outputs. The wrapper adds the requisite vga_switcheroo calls to temporarily
+ * switch DDC to the GPU which is retrieving EDID.
+ *
+ * Return: Pointer to valid EDID or %NULL if we couldn't find any.
+ */
+#if 0
+struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
+                                    struct i2c_adapter *adapter)
+{
+       struct pci_dev *pdev = connector->dev->pdev;
+       struct edid *edid;
+
+       vga_switcheroo_lock_ddc(pdev);
+       edid = drm_get_edid(connector, adapter);
+       vga_switcheroo_unlock_ddc(pdev);
+
+       return edid;
+}
+EXPORT_SYMBOL(drm_get_edid_switcheroo);
+#endif
+
 /**
  * drm_edid_duplicate - duplicate an EDID and the extensions
  * @edid: EDID to duplicate
@@ -2297,7 +2324,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
 {
        int i, j, m, modes = 0;
        struct drm_display_mode *mode;
-       u8 *est = ((u8 *)timing) + 5;
+       u8 *est = ((u8 *)timing) + 6;
 
        for (i = 0; i < 6; i++) {
                for (j = 7; j >= 0; j--) {
@@ -3354,7 +3381,7 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
        u8 *cea;
        u8 *name;
        u8 *db;
-       int sad_count = 0;
+       int total_sad_count = 0;
        int mnl;
        int dbl;
 
@@ -3368,6 +3395,7 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
 
        name = NULL;
        drm_for_each_detailed_block((u8 *)edid, monitor_name, &name);
+       /* max: 13 bytes EDID, 16 bytes ELD */
        for (mnl = 0; name && mnl < 13; mnl++) {
                if (name[mnl] == 0x0a)
                        break;
@@ -3396,11 +3424,15 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
                        dbl = cea_db_payload_len(db);
 
                        switch (cea_db_tag(db)) {
+                               int sad_count;
+
                        case AUDIO_BLOCK:
                                /* Audio Data Block, contains SADs */
-                               sad_count = dbl / 3;
-                               if (dbl >= 1)
-                                       memcpy(eld + 20 + mnl, &db[1], dbl);
+                               sad_count = min(dbl / 3, 15 - total_sad_count);
+                               if (sad_count >= 1)
+                                       memcpy(eld + 20 + mnl + total_sad_count * 3,
+                                              &db[1], sad_count * 3);
+                               total_sad_count += sad_count;
                                break;
                        case SPEAKER_BLOCK:
                                /* Speaker Allocation Data Block */
@@ -3417,10 +3449,13 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
                        }
                }
        }
-       eld[5] |= sad_count << 4;
-       eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
+       eld[5] |= total_sad_count << 4;
+
+       eld[DRM_ELD_BASELINE_ELD_LEN] =
+               DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
 
-       DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
+       DRM_DEBUG_KMS("ELD size %d, SAD count %d\n",
+                     drm_eld_size(eld), total_sad_count);
 }
 EXPORT_SYMBOL(drm_edid_to_eld);
 
index 67d5e46..4bf12ee 100644 (file)
@@ -142,6 +142,9 @@ bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
                const struct drm_display_mode *mode,
                struct drm_display_mode *adjusted_mode)
 {
+       if (!get_slave_funcs(encoder)->mode_fixup)
+               return true;
+
        return get_slave_funcs(encoder)->mode_fixup(encoder, mode, adjusted_mode);
 }
 EXPORT_SYMBOL(drm_i2c_encoder_mode_fixup);
index ccbc554..baa7396 100644 (file)
@@ -101,21 +101,17 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
 {
        struct drm_device *dev = fb_helper->dev;
        struct drm_connector *connector;
-       int i;
+       int i, ret;
 
        if (!drm_fbdev_emulation)
                return 0;
 
        mutex_lock(&dev->mode_config.mutex);
        drm_for_each_connector(connector, dev) {
-               struct drm_fb_helper_connector *fb_helper_connector;
+               ret = drm_fb_helper_add_one_connector(fb_helper, connector);
 
-               fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
-               if (!fb_helper_connector)
+               if (ret)
                        goto fail;
-
-               fb_helper_connector->connector = connector;
-               fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
        }
        mutex_unlock(&dev->mode_config.mutex);
        return 0;
@@ -127,7 +123,7 @@ fail:
        fb_helper->connector_count = 0;
        mutex_unlock(&dev->mode_config.mutex);
 
-       return -ENOMEM;
+       return ret;
 }
 EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
 
@@ -2038,13 +2034,13 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
        width = dev->mode_config.max_width;
        height = dev->mode_config.max_height;
 
-       crtcs = kcalloc(dev->mode_config.num_connector,
+       crtcs = kcalloc(fb_helper->connector_count,
                        sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
-       modes = kcalloc(dev->mode_config.num_connector,
+       modes = kcalloc(fb_helper->connector_count,
                        sizeof(struct drm_display_mode *), GFP_KERNEL);
-       offsets = kcalloc(dev->mode_config.num_connector,
+       offsets = kcalloc(fb_helper->connector_count,
                          sizeof(struct drm_fb_offset), GFP_KERNEL);
-       enabled = kcalloc(dev->mode_config.num_connector,
+       enabled = kcalloc(fb_helper->connector_count,
                          sizeof(bool), GFP_KERNEL);
        if (!crtcs || !modes || !enabled || !offsets) {
                DRM_ERROR("Memory allocation failed\n");
@@ -2058,9 +2054,9 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
              fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
                                               offsets,
                                               enabled, width, height))) {
-               memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
-               memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
-               memset(offsets, 0, dev->mode_config.num_connector*sizeof(offsets[0]));
+               memset(modes, 0, fb_helper->connector_count*sizeof(modes[0]));
+               memset(crtcs, 0, fb_helper->connector_count*sizeof(crtcs[0]));
+               memset(offsets, 0, fb_helper->connector_count*sizeof(offsets[0]));
 
                if (!drm_target_cloned(fb_helper, modes, offsets,
                                       enabled, width, height) &&
@@ -2140,6 +2136,27 @@ out:
  * drm_fb_helper_fill_fix() are provided as helpers to setup simple default
  * values for the fbdev info structure.
  *
+ * HANG DEBUGGING:
+ *
+ * When you have fbcon support built-in or already loaded, this function will do
+ * a full modeset to setup the fbdev console. Due to locking misdesign in the
+ * VT/fbdev subsystem that entire modeset sequence has to be done while holding
+ * console_lock. Until console_unlock is called no dmesg lines will be sent out
+ * to consoles, not even serial console. This means when your driver crashes,
+ * you will see absolutely nothing else but a system stuck in this function,
+ * with no further output. Any kind of printk() you place within your own driver
+ * or in the drm core modeset code will also never show up.
+ *
+ * Standard debug practice is to run the fbcon setup without taking the
+ * console_lock as a hack, to be able to see backtraces and crashes on the
+ * serial line. This can be done by setting the fb.lockless_register_fb=1 kernel
+ * cmdline option.
+ *
+ * The other option is to just disable fbdev emulation since very likely the
+ * first modest from userspace will crash in the same way, and is even easier to
+ * debug. This can be done by setting the drm_kms_helper.fbdev_emulation=0
+ * kernel cmdline option.
+ *
  * RETURNS:
  * Zero if everything went ok, nonzero otherwise.
  */
@@ -2224,9 +2241,9 @@ EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
  * but the module doesn't depend on any fb console symbols.  At least
  * attempt to load fbcon to avoid leaving the system without a usable console.
  */
-#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
-static int __init drm_fb_helper_modinit(void)
+int __init drm_fb_helper_modinit(void)
 {
+#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
        const char *name = "fbcon";
        struct module *fbcon;
 
@@ -2236,8 +2253,7 @@ static int __init drm_fb_helper_modinit(void)
 
        if (!fbcon)
                request_module_nowait(name);
+#endif
        return 0;
 }
-
-module_init(drm_fb_helper_modinit);
-#endif
+EXPORT_SYMBOL(drm_fb_helper_modinit);
index 8ea4927..4e5016e 100644 (file)
 #include "drm_legacy.h"
 #include "drm_internal.h"
 
-/* from BKL pushdown: note that nothing else serializes idr_find() */
+/* from BKL pushdown */
 DEFINE_MUTEX(drm_global_mutex);
-EXPORT_SYMBOL(drm_global_mutex);
+
+/**
+ * DOC: file operations
+ *
+ * Drivers must define the file operations structure that forms the DRM
+ * userspace API entry point, even though most of those operations are
+ * implemented in the DRM core. The mandatory functions are drm_open(),
+ * drm_read(), drm_ioctl() and drm_compat_ioctl if CONFIG_COMPAT is enabled.
+ * Drivers which implement private ioctls that require 32/64 bit compatibility
+ * support must provided their onw .compat_ioctl() handler that processes
+ * private ioctls and calls drm_compat_ioctl() for core ioctls.
+ *
+ * In addition drm_read() and drm_poll() provide support for DRM events. DRM
+ * events are a generic and extensible means to send asynchronous events to
+ * userspace through the file descriptor. They are used to send vblank event and
+ * page flip completions by the KMS API. But drivers can also use it for their
+ * own needs, e.g. to signal completion of rendering.
+ *
+ * The memory mapping implementation will vary depending on how the driver
+ * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
+ * function, modern drivers should use one of the provided memory-manager
+ * specific implementations. For GEM-based drivers this is drm_gem_mmap().
+ *
+ * No other file operations are supported by the DRM userspace API. Overall the
+ * following is an example #file_operations structure:
+ *
+ *     static const example_drm_fops = {
+ *             .owner = THIS_MODULE,
+ *             .open = drm_open,
+ *             .release = drm_release,
+ *             .unlocked_ioctl = drm_ioctl,
+ *     #ifdef CONFIG_COMPAT
+ *             .compat_ioctl = drm_compat_ioctl,
+ *     #endif
+ *             .poll = drm_poll,
+ *             .read = drm_read,
+ *             .llseek = no_llseek,
+ *             .mmap = drm_gem_mmap,
+ *     };
+ */
 
 extern drm_pci_id_list_t *drm_find_description(int vendor, int device,
     drm_pci_id_list_t *idlist);
@@ -95,8 +134,20 @@ static int drm_setup(struct drm_device *dev)
 #define DRIVER_SOFTC(unit) \
        ((struct drm_device *)devclass_get_softc(drm_devclass, unit))
 
-int
-drm_open(struct dev_open_args *ap)
+/**
+ * drm_open - open method for DRM file
+ * @inode: device inode
+ * @filp: file pointer.
+ *
+ * This function must be used by drivers as their .open() #file_operations
+ * method. It looks up the correct DRM device and instantiates all the per-file
+ * resources for it.
+ *
+ * RETURNS:
+ *
+ * 0 on success or negative errno value on falure.
+ */
+int drm_open(struct dev_open_args *ap)
 {
        struct cdev *kdev = ap->a_head.a_dev;
        int flags = ap->a_oflags;
@@ -126,8 +177,35 @@ drm_open(struct dev_open_args *ap)
 
        return (retcode);
 }
+EXPORT_SYMBOL(drm_open);
 
-/* drm_open_helper is called whenever a process opens /dev/drm. */
+/*
+ * Check whether DRI will run on this CPU.
+ *
+ * \return non-zero if the DRI will run on this CPU, or zero otherwise.
+ */
+
+/*
+ * drm_new_set_master - Allocate a new master object and become master for the
+ * associated master realm.
+ *
+ * @dev: The associated device.
+ * @fpriv: File private identifying the client.
+ *
+ * This function must be called with dev::struct_mutex held.
+ * Returns negative error code on failure. Zero on success.
+ */
+
+/*
+ * Called whenever a process opens /dev/drm.
+ *
+ * \param filp file pointer.
+ * \param minor acquired minor-object.
+ * \return zero on success or a negative number on failure.
+ *
+ * Creates and initializes a drm_file structure for the file private data in \p
+ * filp and add it into the double linked list in \p dev.
+ */
 int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
                    struct drm_device *dev, struct file *fp)
 {
@@ -153,7 +231,11 @@ int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
        /* for compatibility root is always authenticated */
        priv->authenticated = capable(CAP_SYS_ADMIN);
 
+       INIT_LIST_HEAD(&priv->lhead);
        INIT_LIST_HEAD(&priv->fbs);
+       lockinit(&priv->fbs_lock, "dpfl", 0, LK_CANRECURSE);
+       INIT_LIST_HEAD(&priv->blobs);
+       INIT_LIST_HEAD(&priv->pending_event_list);
        INIT_LIST_HEAD(&priv->event_list);
        init_waitqueue_head(&priv->event_wait);
        priv->event_space = 4096; /* set aside 4k for event buffer */
@@ -185,7 +267,13 @@ int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
        return retcode;
 }
 
-/**
+/*
+ * drm_legacy_dev_reinit
+ *
+ * Reinitializes a legacy/ums drm device in it's lastclose function.
+ */
+
+/*
  * Take down the DRM device.
  *
  * \param dev DRM device structure.
@@ -231,16 +319,17 @@ int drm_lastclose(struct drm_device * dev)
 }
 
 /**
- * Release file.
+ * drm_release - release method for DRM file
+ * @inode: device inode
+ * @filp: file pointer.
  *
- * \param inode device inode
- * \param file_priv DRM file private.
- * \return zero on success or a negative number on failure.
+ * This function must be used by drivers as their .release() #file_operations
+ * method. It frees any resources associated with the open file, and if this is
+ * the last open file for the DRM device also proceeds to call drm_lastclose().
+ *
+ * RETURNS:
  *
- * If the hardware lock is held then free it, and take it again for the kernel
- * context since it's necessary to reclaim buffers. Unlink the file private
- * data from its list and free it. Decreases the open count and if it reaches
- * zero calls drm_lastclose().
+ * Always succeeds and returns 0.
  */
 int drm_release(device_t kdev)
 {
@@ -328,6 +417,7 @@ int drm_release(device_t kdev)
 
        return (0);
 }
+EXPORT_SYMBOL(drm_release);
 
 static bool
 drm_dequeue_event(struct drm_device *dev, struct drm_file *file_priv,
@@ -356,8 +446,33 @@ out:
        return ret;
 }
 
-int
-drm_read(struct dev_read_args *ap)
+/**
+ * drm_read - read method for DRM file
+ * @filp: file pointer
+ * @buffer: userspace destination pointer for the read
+ * @count: count in bytes to read
+ * @offset: offset to read
+ *
+ * This function must be used by drivers as their .read() #file_operations
+ * method iff they use DRM events for asynchronous signalling to userspace.
+ * Since events are used by the KMS API for vblank and page flip completion this
+ * means all modern display drivers must use it.
+ *
+ * @offset is ignore, DRM events are read like a pipe. Therefore drivers also
+ * must set the .llseek() #file_operation to no_llseek(). Polling support is
+ * provided by drm_poll().
+ *
+ * This function will only ever read a full event. Therefore userspace must
+ * supply a big enough buffer to fit any event to ensure forward progress. Since
+ * the maximum event space is currently 4K it's recommended to just use that for
+ * safety.
+ *
+ * RETURNS:
+ *
+ * Number of bytes read (always aligned to full events, and can be 0) or a
+ * negative error code on failure.
+ */
+int drm_read(struct dev_read_args *ap)
 {
        struct cdev *kdev = ap->a_head.a_dev;
        struct uio *uio = ap->a_uio;
@@ -390,6 +505,23 @@ drm_read(struct dev_read_args *ap)
        return (error);
 }
 
+/**
+ * drm_poll - poll method for DRM file
+ * @filp: file pointer
+ * @wait: poll waiter table
+ *
+ * This function must be used by drivers as their .read() #file_operations
+ * method iff they use DRM events for asynchronous signalling to userspace.
+ * Since events are used by the KMS API for vblank and page flip completion this
+ * means all modern display drivers must use it.
+ *
+ * See also drm_read().
+ *
+ * RETURNS:
+ *
+ * Mask of POLL flags indicating the current status of the file.
+ */
+
 static int
 drmfilt(struct knote *kn, long hint)
 {
@@ -459,3 +591,164 @@ drm_kqfilter(struct dev_kqfilter_args *ap)
 
        return (0);
 }
+
+#ifdef __DragonFly__
+/*
+ * The Linux layer version of kfree() is a macro and can't be called
+ * directly via a function pointer
+ */
+static void
+drm_event_destroy(struct drm_pending_event *e)
+{
+       kfree(e);
+}
+#endif
+
+/**
+ * drm_event_reserve_init_locked - init a DRM event and reserve space for it
+ * @dev: DRM device
+ * @file_priv: DRM file private data
+ * @p: tracking structure for the pending event
+ * @e: actual event data to deliver to userspace
+ *
+ * This function prepares the passed in event for eventual delivery. If the event
+ * doesn't get delivered (because the IOCTL fails later on, before queuing up
+ * anything) then the even must be cancelled and freed using
+ * drm_event_cancel_free(). Successfully initialized events should be sent out
+ * using drm_send_event() or drm_send_event_locked() to signal completion of the
+ * asynchronous event to userspace.
+ *
+ * If callers embedded @p into a larger structure it must be allocated with
+ * kmalloc and @p must be the first member element.
+ *
+ * This is the locked version of drm_event_reserve_init() for callers which
+ * already hold dev->event_lock.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+int drm_event_reserve_init_locked(struct drm_device *dev,
+                                 struct drm_file *file_priv,
+                                 struct drm_pending_event *p,
+                                 struct drm_event *e)
+{
+       if (file_priv->event_space < e->length)
+               return -ENOMEM;
+
+       file_priv->event_space -= e->length;
+
+       p->event = e;
+       p->file_priv = file_priv;
+
+       /* we *could* pass this in as arg, but everyone uses kfree: */
+#ifdef __DragonFly__
+       p->destroy = drm_event_destroy;
+#else
+       p->destroy = (void (*) (struct drm_pending_event *)) kfree;
+#endif
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_event_reserve_init_locked);
+
+/**
+ * drm_event_reserve_init - init a DRM event and reserve space for it
+ * @dev: DRM device
+ * @file_priv: DRM file private data
+ * @p: tracking structure for the pending event
+ * @e: actual event data to deliver to userspace
+ *
+ * This function prepares the passed in event for eventual delivery. If the event
+ * doesn't get delivered (because the IOCTL fails later on, before queuing up
+ * anything) then the even must be cancelled and freed using
+ * drm_event_cancel_free(). Successfully initialized events should be sent out
+ * using drm_send_event() or drm_send_event_locked() to signal completion of the
+ * asynchronous event to userspace.
+ *
+ * If callers embedded @p into a larger structure it must be allocated with
+ * kmalloc and @p must be the first member element.
+ *
+ * Callers which already hold dev->event_lock should use
+ * drm_event_reserve_init() instead.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+int drm_event_reserve_init(struct drm_device *dev,
+                          struct drm_file *file_priv,
+                          struct drm_pending_event *p,
+                          struct drm_event *e)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+       ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_event_reserve_init);
+
+/**
+ * drm_event_cancel_free - free a DRM event and release it's space
+ * @dev: DRM device
+ * @p: tracking structure for the pending event
+ *
+ * This function frees the event @p initialized with drm_event_reserve_init()
+ * and releases any allocated space.
+ */
+void drm_event_cancel_free(struct drm_device *dev,
+                          struct drm_pending_event *p)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&dev->event_lock, flags);
+       p->file_priv->event_space += p->event->length;
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+       p->destroy(p);
+}
+EXPORT_SYMBOL(drm_event_cancel_free);
+
+/**
+ * drm_send_event_locked - send DRM event to file descriptor
+ * @dev: DRM device
+ * @e: DRM event to deliver
+ *
+ * This function sends the event @e, initialized with drm_event_reserve_init(),
+ * to its associated userspace DRM file. Callers must already hold
+ * dev->event_lock, see drm_send_event() for the unlocked version.
+ */
+void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
+{
+       assert_spin_locked(&dev->event_lock);
+
+       list_add_tail(&e->link,
+                     &e->file_priv->event_list);
+       wake_up_interruptible(&e->file_priv->event_wait);
+#ifdef __DragonFly__
+       KNOTE(&e->file_priv->dkq.ki_note, 0);
+#endif
+
+}
+EXPORT_SYMBOL(drm_send_event_locked);
+
+/**
+ * drm_send_event - send DRM event to file descriptor
+ * @dev: DRM device
+ * @e: DRM event to deliver
+ *
+ * This function sends the event @e, initialized with drm_event_reserve_init(),
+ * to its associated userspace DRM file. This function acquires dev->event_lock,
+ * see drm_send_event_locked() for callers which already hold this lock.
+ */
+void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev->event_lock, irqflags);
+       drm_send_event_locked(dev, e);
+       spin_unlock_irqrestore(&dev->event_lock, irqflags);
+}
+EXPORT_SYMBOL(drm_send_event);
index 743e0cd..2828217 100644 (file)
@@ -1031,18 +1031,12 @@ static void send_vblank_event(struct drm_device *dev,
                struct drm_pending_vblank_event *e,
                unsigned long seq, struct timeval *now)
 {
-       assert_spin_locked(&dev->event_lock);
-
        e->event.sequence = seq;
        e->event.tv_sec = now->tv_sec;
        e->event.tv_usec = now->tv_usec;
 
-       list_add_tail(&e->base.link,
-                     &e->base.file_priv->event_list);
-       wake_up_interruptible(&e->base.file_priv->event_wait);
-#ifdef __DragonFly__
-       KNOTE(&e->base.file_priv->dkq.ki_note, 0);
-#endif
+       drm_send_event_locked(dev, &e->base);
+
        trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
                                         e->event.sequence);
 }
@@ -1639,18 +1633,6 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
        return 0;
 }
 
-#ifdef __DragonFly__
-/*
- * The Linux layer version of kfree() is a macro and can't be called
- * directly via a function pointer
- */
-static void
-drm_vblank_event_destroy(struct drm_pending_event *e)
-{
-       kfree(e);
-}
-#endif
-
 static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
                                  union drm_wait_vblank *vblwait,
                                  struct drm_file *file_priv)
@@ -1673,13 +1655,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
        e->event.base.type = DRM_EVENT_VBLANK;
        e->event.base.length = sizeof(e->event);
        e->event.user_data = vblwait->request.signal;
-       e->base.event = &e->event.base;
-       e->base.file_priv = file_priv;
-#ifdef __DragonFly__
-       e->base.destroy = drm_vblank_event_destroy;
-#else
-       e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
-#endif
 
        spin_lock_irqsave(&dev->event_lock, flags);
 
@@ -1695,12 +1670,12 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
                goto err_unlock;
        }
 
-       if (file_priv->event_space < sizeof(e->event)) {
-               ret = -EBUSY;
+       ret = drm_event_reserve_init_locked(dev, file_priv, &e->base,
+                                           &e->event.base);
+
+       if (ret)
                goto err_unlock;
-       }
 
-       file_priv->event_space -= sizeof(e->event);
        seq = drm_vblank_count_and_time(dev, pipe, &now);
 
        if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
index f33ff16..5cbd4ab 100644 (file)
 #if 0
 static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
 {
-       return of_driver_match_device(dev, drv);
+       struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+
+       /* attempt OF style match */
+       if (of_driver_match_device(dev, drv))
+               return 1;
+
+       /* compare DSI device and driver names */
+       if (!strcmp(dsi->name, drv->name))
+               return 1;
+
+       return 0;
 }
 
 static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
@@ -129,14 +139,20 @@ static int mipi_dsi_device_add(struct mipi_dsi_device *dsi)
        return device_add(&dsi->dev);
 }
 
+#if IS_ENABLED(CONFIG_OF)
 static struct mipi_dsi_device *
 of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
 {
-       struct mipi_dsi_device *dsi;
        struct device *dev = host->dev;
+       struct mipi_dsi_device_info info = { };
        int ret;
        u32 reg;
 
+       if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) {
+               dev_err(dev, "modalias failure on %s\n", node->full_name);
+               return ERR_PTR(-EINVAL);
+       }
+
        ret = of_property_read_u32(node, "reg", &reg);
        if (ret) {
                dev_err(dev, "device node %s has no valid reg property: %d\n",
@@ -144,32 +160,111 @@ of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
                return ERR_PTR(-EINVAL);
        }
 
-       if (reg > 3) {
-               dev_err(dev, "device node %s has invalid reg property: %u\n",
-                       node->full_name, reg);
+       info.channel = reg;
+       info.node = of_node_get(node);
+
+       return mipi_dsi_device_register_full(host, &info);
+}
+#else
+static struct mipi_dsi_device *
+of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
+{
+       return ERR_PTR(-ENODEV);
+}
+#endif
+
+/**
+ * mipi_dsi_device_register_full - create a MIPI DSI device
+ * @host: DSI host to which this device is connected
+ * @info: pointer to template containing DSI device information
+ *
+ * Create a MIPI DSI device by using the device information provided by
+ * mipi_dsi_device_info template
+ *
+ * Returns:
+ * A pointer to the newly created MIPI DSI device, or, a pointer encoded
+ * with an error
+ */
+struct mipi_dsi_device *
+mipi_dsi_device_register_full(struct mipi_dsi_host *host,
+                             const struct mipi_dsi_device_info *info)
+{
+       struct mipi_dsi_device *dsi;
+       struct device *dev = host->dev;
+       int ret;
+
+       if (!info) {
+               dev_err(dev, "invalid mipi_dsi_device_info pointer\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       if (info->channel > 3) {
+               dev_err(dev, "invalid virtual channel: %u\n", info->channel);
                return ERR_PTR(-EINVAL);
        }
 
        dsi = mipi_dsi_device_alloc(host);
        if (IS_ERR(dsi)) {
-               dev_err(dev, "failed to allocate DSI device %s: %ld\n",
-                       node->full_name, PTR_ERR(dsi));
+               dev_err(dev, "failed to allocate DSI device %ld\n",
+                       PTR_ERR(dsi));
                return dsi;
        }
 
-       dsi->dev.of_node = of_node_get(node);
-       dsi->channel = reg;
+       dsi->dev.of_node = info->node;
+       dsi->channel = info->channel;
+       strlcpy(dsi->name, info->type, sizeof(dsi->name));
 
        ret = mipi_dsi_device_add(dsi);
        if (ret) {
-               dev_err(dev, "failed to add DSI device %s: %d\n",
-                       node->full_name, ret);
+               dev_err(dev, "failed to add DSI device %d\n", ret);
                kfree(dsi);
                return ERR_PTR(ret);
        }
 
        return dsi;
 }
+EXPORT_SYMBOL(mipi_dsi_device_register_full);
+
+/**
+ * mipi_dsi_device_unregister - unregister MIPI DSI device
+ * @dsi: DSI peripheral device
+ */
+void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi)
+{
+       device_unregister(&dsi->dev);
+}
+EXPORT_SYMBOL(mipi_dsi_device_unregister);
+
+static DEFINE_MUTEX(host_lock);
+static LIST_HEAD(host_list);
+
+/**
+ * of_find_mipi_dsi_host_by_node() - find the MIPI DSI host matching a
+ *                                  device tree node
+ * @node: device tree node
+ *
+ * Returns:
+ * A pointer to the MIPI DSI host corresponding to @node or NULL if no
+ * such device exists (or has not been registered yet).
+ */
+struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node)
+{
+       struct mipi_dsi_host *host;
+
+       mutex_lock(&host_lock);
+
+       list_for_each_entry(host, &host_list, list) {
+               if (host->dev->of_node == node) {
+                       mutex_unlock(&host_lock);
+                       return host;
+               }
+       }
+
+       mutex_unlock(&host_lock);
+
+       return NULL;
+}
+EXPORT_SYMBOL(of_find_mipi_dsi_host_by_node);
 
 int mipi_dsi_host_register(struct mipi_dsi_host *host)
 {
@@ -182,6 +277,10 @@ int mipi_dsi_host_register(struct mipi_dsi_host *host)
                of_mipi_dsi_device_add(host, node);
        }
 
+       mutex_lock(&host_lock);
+       list_add_tail(&host->list, &host_list);
+       mutex_unlock(&host_lock);
+
        return 0;
 }
 EXPORT_SYMBOL(mipi_dsi_host_register);
@@ -190,7 +289,7 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
 {
        struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
 
-       device_unregister(&dsi->dev);
+       mipi_dsi_device_unregister(dsi);
 
        return 0;
 }
@@ -198,6 +297,10 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
 void mipi_dsi_host_unregister(struct mipi_dsi_host *host)
 {
        device_for_each_child(host->dev, NULL, mipi_dsi_remove_device_fn);
+
+       mutex_lock(&host_lock);
+       list_del_init(&host->list);
+       mutex_unlock(&host_lock);
 }
 EXPORT_SYMBOL(mipi_dsi_host_unregister);
 #endif
index d491756..87e4872 100644 (file)
@@ -1367,8 +1367,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
        }
 done:
        if (i >= 0) {
-               printk(KERN_WARNING
-                       "parse error at position %i in video mode '%s'\n",
+               pr_warn("[drm] parse error at position %i in video mode '%s'\n",
                        i, name);
                mode->specified = false;
                return false;
index e0568f6..7c1e6db 100644 (file)
@@ -354,10 +354,14 @@ static void output_poll_execute(struct work_struct *work)
        struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
        struct drm_connector *connector;
        enum drm_connector_status old_status;
-       bool repoll = false, changed = false;
+       bool repoll = false, changed;
+
+       /* Pick up any changes detected by the probe functions. */
+       changed = dev->mode_config.delayed_event;
+       dev->mode_config.delayed_event = false;
 
        if (!drm_kms_helper_poll)
-               return;
+               goto out;
 
        mutex_lock(&dev->mode_config.mutex);
        drm_for_each_connector(connector, dev) {
@@ -384,6 +388,24 @@ static void output_poll_execute(struct work_struct *work)
                if (old_status != connector->status) {
                        const char *old, *new;
 
+                       /*
+                        * The poll work sets force=false when calling detect so
+                        * that drivers can avoid to do disruptive tests (e.g.
+                        * when load detect cycles could cause flickering on
+                        * other, running displays). This bears the risk that we
+                        * flip-flop between unknown here in the poll work and
+                        * the real state when userspace forces a full detect
+                        * call after receiving a hotplug event due to this
+                        * change.
+                        *
+                        * Hence clamp an unknown detect status to the old
+                        * value.
+                        */
+                       if (connector->status == connector_status_unknown) {
+                               connector->status = old_status;
+                               continue;
+                       }
+
                        old = drm_get_connector_status_name(old_status);
                        new = drm_get_connector_status_name(connector->status);
 
@@ -399,6 +421,7 @@ static void output_poll_execute(struct work_struct *work)
 
        mutex_unlock(&dev->mode_config.mutex);
 
+out:
        if (changed)
                drm_kms_helper_hotplug_event(dev);
 
index 6cb3469..19eff46 100644 (file)
@@ -24,6 +24,7 @@ SRCS +=       i915_cmd_parser.c \
        i915_gem_stolen.c \
        i915_gem_tiling.c \
        i915_gem_userptr.c \
+       i915_gpu_error.c \
        intel_lrc.c \
        intel_mocs.c \
        intel_ringbuffer.c \
index bd6e979..cdaf247 100644 (file)
@@ -375,13 +375,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
        ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
        if (ret)
                goto cleanup_vga_client;
-
-       /* Initialise stolen first so that we may reserve preallocated
-        * objects for the BIOS to KMS transition.
-        */
-       ret = i915_gem_init_stolen(dev);
-       if (ret)
-               goto cleanup_vga_switcheroo;
 #endif
 
        intel_power_domains_init_hw(dev_priv, false);
@@ -390,7 +383,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
        ret = intel_irq_install(dev_priv);
        if (ret)
-               goto cleanup_gem_stolen;
+               goto cleanup_csr;
 
        intel_setup_gmbus(dev);
 
@@ -444,10 +437,9 @@ cleanup_irq:
        intel_guc_ucode_fini(dev);
        drm_irq_uninstall(dev);
        intel_teardown_gmbus(dev);
-cleanup_gem_stolen:
-       i915_gem_cleanup_stolen(dev);
+cleanup_csr:
+       intel_csr_ucode_fini(dev_priv);
 #if 0
-cleanup_vga_switcheroo:
        vga_switcheroo_unregister_client(dev->pdev);
 cleanup_vga_client:
        vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -806,7 +798,41 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
                     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
                        DRM_INFO("Display fused off, disabling\n");
                        info->num_pipes = 0;
+               } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
+                       DRM_INFO("PipeC fused off\n");
+                       info->num_pipes -= 1;
+               }
+       } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
+               u32 dfsm = I915_READ(SKL_DFSM);
+               u8 disabled_mask = 0;
+               bool invalid;
+               int num_bits;
+
+               if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
+                       disabled_mask |= BIT(PIPE_A);
+               if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
+                       disabled_mask |= BIT(PIPE_B);
+               if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
+                       disabled_mask |= BIT(PIPE_C);
+
+               num_bits = hweight8(disabled_mask);
+
+               switch (disabled_mask) {
+               case BIT(PIPE_A):
+               case BIT(PIPE_B):
+               case BIT(PIPE_A) | BIT(PIPE_B):
+               case BIT(PIPE_A) | BIT(PIPE_C):
+                       invalid = true;
+                       break;
+               default:
+                       invalid = false;
                }
+
+               if (num_bits > info->num_pipes || invalid)
+                       DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
+                                 disabled_mask);
+               else
+                       info->num_pipes -= num_bits;
        }
 
        /* Initialize slice/subslice/EU info */
@@ -845,6 +871,98 @@ static void intel_init_dpio(struct drm_i915_private *dev_priv)
        }
 }
 
+static int i915_workqueues_init(struct drm_i915_private *dev_priv)
+{
+       /*
+        * The i915 workqueue is primarily used for batched retirement of
+        * requests (and thus managing bo) once the task has been completed
+        * by the GPU. i915_gem_retire_requests() is called directly when we
+        * need high-priority retirement, such as waiting for an explicit
+        * bo.
+        *
+        * It is also used for periodic low-priority events, such as
+        * idle-timers and recording error state.
+        *
+        * All tasks on the workqueue are expected to acquire the dev mutex
+        * so there is no point in running more than one instance of the
+        * workqueue at any time.  Use an ordered one.
+        */
+       dev_priv->wq = alloc_ordered_workqueue("i915", 0);
+       if (dev_priv->wq == NULL)
+               goto out_err;
+
+       dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+       if (dev_priv->hotplug.dp_wq == NULL)
+               goto out_free_wq;
+
+       dev_priv->gpu_error.hangcheck_wq =
+               alloc_ordered_workqueue("i915-hangcheck", 0);
+       if (dev_priv->gpu_error.hangcheck_wq == NULL)
+               goto out_free_dp_wq;
+
+       return 0;
+
+out_free_dp_wq:
+       destroy_workqueue(dev_priv->hotplug.dp_wq);
+out_free_wq:
+       destroy_workqueue(dev_priv->wq);
+out_err:
+       DRM_ERROR("Failed to allocate workqueues.\n");
+
+       return -ENOMEM;
+}
+
+static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
+{
+       destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
+       destroy_workqueue(dev_priv->hotplug.dp_wq);
+       destroy_workqueue(dev_priv->wq);
+}
+
+static int i915_mmio_setup(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int mmio_bar;
+       int mmio_size;
+
+       mmio_bar = IS_GEN2(dev) ? 1 : 0;
+       /*
+        * Before gen4, the registers and the GTT are behind different BARs.
+        * However, from gen4 onwards, the registers and the GTT are shared
+        * in the same BAR, so we want to restrict this ioremap from
+        * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+        * the register BAR remains the same size for all the earlier
+        * generations up to Ironlake.
+        */
+       if (INTEL_INFO(dev)->gen < 5)
+               mmio_size = 512 * 1024;
+       else
+               mmio_size = 2 * 1024 * 1024;
+       dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
+       if (dev_priv->regs == NULL) {
+               DRM_ERROR("failed to map registers\n");
+
+               return -EIO;
+       }
+
+       /* Try to make sure MCHBAR is enabled before poking at it */
+       intel_setup_mchbar(dev);
+
+       return 0;
+}
+
+static void i915_mmio_cleanup(struct drm_device *dev)
+{
+#if 0
+       struct drm_i915_private *dev_priv = to_i915(dev);
+#endif
+
+       intel_teardown_mchbar(dev);
+#if 0
+       pci_iounmap(dev->pdev, dev_priv->regs);
+#endif
+}
+
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -860,7 +978,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 {
        struct drm_i915_private *dev_priv;
        struct intel_device_info *info, *device_info;
-       int ret = 0, mmio_bar, mmio_size;
+       int ret = 0;
        uint32_t aperture_size;
 
        /* XXX: struct pci_dev */
@@ -888,6 +1006,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        lockinit(&dev_priv->modeset_restore_lock, "i915mrl", 0, LK_CANRECURSE);
        lockinit(&dev_priv->av_mutex, "i915am", 0, LK_CANRECURSE);
 
+       ret = i915_workqueues_init(dev_priv);
+       if (ret < 0)
+               goto out_free_priv;
+
        intel_pm_setup(dev);
 
        intel_runtime_pm_get(dev_priv);
@@ -906,28 +1028,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        if (i915_get_bridge_dev(dev)) {
                ret = -EIO;
-               goto free_priv;
+               goto out_runtime_pm_put;
        }
 
-       mmio_bar = IS_GEN2(dev) ? 1 : 0;
-       /* Before gen4, the registers and the GTT are behind different BARs.
-        * However, from gen4 onwards, the registers and the GTT are shared
-        * in the same BAR, so we want to restrict this ioremap from
-        * clobbering the GTT which we want ioremap_wc instead. Fortunately,
-        * the register BAR remains the same size for all the earlier
-        * generations up to Ironlake.
-        */
-       if (info->gen < 5)
-               mmio_size = 512*1024;
-       else
-               mmio_size = 2*1024*1024;
-
-       dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
-       if (!dev_priv->regs) {
-               DRM_ERROR("failed to map registers\n");
-               ret = -EIO;
+       ret = i915_mmio_setup(dev);
+       if (ret < 0)
                goto put_bridge;
-       }
 
        /* This must be called before any calls to HAS_PCH_* */
        intel_detect_pch(dev);
@@ -936,7 +1042,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        ret = i915_gem_gtt_init(dev);
        if (ret)
-               goto out_freecsr;
+               goto out_uncore_fini;
 
        /* WARNING: Apparently we must kick fbdev drivers before vgacon,
         * otherwise the vga fbdev driver falls over. */
@@ -984,49 +1090,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
                                              aperture_size);
 
-       /* The i915 workqueue is primarily used for batched retirement of
-        * requests (and thus managing bo) once the task has been completed
-        * by the GPU. i915_gem_retire_requests() is called directly when we
-        * need high-priority retirement, such as waiting for an explicit
-        * bo.
-        *
-        * It is also used for periodic low-priority events, such as
-        * idle-timers and recording error state.
-        *
-        * All tasks on the workqueue are expected to acquire the dev mutex
-        * so there is no point in running more than one instance of the
-        * workqueue at any time.  Use an ordered one.
-        */
-       dev_priv->wq = alloc_ordered_workqueue("i915", 0);
-       if (dev_priv->wq == NULL) {
-               DRM_ERROR("Failed to create our workqueue.\n");
-               ret = -ENOMEM;
-               goto out_mtrrfree;
-       }
-
-       dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
-       if (dev_priv->hotplug.dp_wq == NULL) {
-               DRM_ERROR("Failed to create our dp workqueue.\n");
-               ret = -ENOMEM;
-               goto out_freewq;
-       }
-
-       dev_priv->gpu_error.hangcheck_wq =
-               alloc_ordered_workqueue("i915-hangcheck", 0);
-       if (dev_priv->gpu_error.hangcheck_wq == NULL) {
-               DRM_ERROR("Failed to create our hangcheck workqueue.\n");
-               ret = -ENOMEM;
-               goto out_freedpwq;
-       }
-
        intel_irq_init(dev_priv);
        intel_uncore_sanitize(dev);
 
-       /* Try to make sure MCHBAR is enabled before poking at it */
-       intel_setup_mchbar(dev);
        intel_opregion_setup(dev);
 
-       i915_gem_load(dev);
+       i915_gem_load_init(dev);
+       i915_gem_shrinker_init(dev_priv);
 
        /* On the 945G/GM, the chipset reports the MSI capability on the
         * integrated graphics even though the support isn't actually there
@@ -1094,34 +1164,34 @@ out_power_well:
        intel_power_domains_fini(dev_priv);
        drm_vblank_cleanup(dev);
 out_gem_unload:
+       i915_gem_shrinker_cleanup(dev_priv);
+
+#if 0
+       if (dev->pdev->msi_enabled)
+               pci_disable_msi(dev->pdev);
+#endif
 
        intel_teardown_mchbar(dev);
        pm_qos_remove_request(&dev_priv->pm_qos);
-       destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
-out_freedpwq:
-       destroy_workqueue(dev_priv->hotplug.dp_wq);
-out_freewq:
-       destroy_workqueue(dev_priv->wq);
-out_mtrrfree:
        arch_phys_wc_del(dev_priv->gtt.mtrr);
 #if 0
        io_mapping_free(dev_priv->gtt.mappable);
 #endif
 out_gtt:
        i915_global_gtt_cleanup(dev);
-out_freecsr:
-       intel_csr_ucode_fini(dev_priv);
+out_uncore_fini:
        intel_uncore_fini(dev);
-#if 0
-       pci_iounmap(dev->pdev, dev_priv->regs);
-#endif
+       i915_mmio_cleanup(dev);
 put_bridge:
        pci_dev_put(dev_priv->bridge_dev);
-free_priv:
 
+       i915_gem_load_cleanup(dev);
+out_runtime_pm_put:
        intel_runtime_pm_put(dev_priv);
-
+       i915_workqueues_cleanup(dev_priv);
+out_free_priv:
        kfree(dev_priv);
+
        return ret;
 }
 
@@ -1146,10 +1216,9 @@ int i915_driver_unload(struct drm_device *dev)
 
        i915_teardown_sysfs(dev);
 
-#if 0
-       WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
-       unregister_shrinker(&dev_priv->mm.shrinker);
+       i915_gem_shrinker_cleanup(dev_priv);
 
+#if 0
        io_mapping_free(dev_priv->gtt.mappable);
 #endif
        arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1181,6 +1250,8 @@ int i915_driver_unload(struct drm_device *dev)
        vga_client_register(dev->pdev, NULL, NULL, NULL);
 #endif
 
+       intel_csr_ucode_fini(dev_priv);
+
        /* Free error state after interrupts are fully disabled. */
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
 #if 0
@@ -1201,26 +1272,17 @@ int i915_driver_unload(struct drm_device *dev)
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
        intel_fbc_cleanup_cfb(dev_priv);
-       i915_gem_cleanup_stolen(dev);
-
-       intel_csr_ucode_fini(dev_priv);
 
-       intel_teardown_mchbar(dev);
-
-       destroy_workqueue(dev_priv->hotplug.dp_wq);
-       destroy_workqueue(dev_priv->wq);
-       destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
        pm_qos_remove_request(&dev_priv->pm_qos);
 
        i915_global_gtt_cleanup(dev);
 
        intel_uncore_fini(dev);
-#if 0
-       if (dev_priv->regs != NULL)
-               pci_iounmap(dev->pdev, dev_priv->regs);
-#endif
+       i915_mmio_cleanup(dev);
 
+       i915_gem_load_cleanup(dev);
        pci_dev_put(dev_priv->bridge_dev);
+       i915_workqueues_cleanup(dev_priv);
        kfree(dev_priv);
 
        return 0;
@@ -1263,8 +1325,6 @@ void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
        i915_gem_context_close(dev, file);
        i915_gem_release(dev, file);
        mutex_unlock(&dev->struct_mutex);
-
-       intel_modeset_preclose(dev, file);
 }
 
 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
@@ -1299,7 +1359,7 @@ const struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE,  drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
        DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-       DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
        DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
@@ -1334,8 +1394,8 @@ const struct drm_ioctl_desc i915_ioctls[] = {
 #if 0
        DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
 #endif
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
 };
 
 int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
index ae58953..8628d53 100644 (file)
@@ -610,13 +610,7 @@ static int i915_drm_suspend(struct drm_device *dev)
 
        intel_suspend_gt_powersave(dev);
 
-       /*
-        * Disable CRTCs directly since we want to preserve sw state
-        * for _thaw. Also, power gate the CRTC power wells.
-        */
-       drm_modeset_lock_all(dev);
        intel_display_suspend(dev);
-       drm_modeset_unlock_all(dev);
 
 #if 0
        intel_dp_mst_suspend(dev);
@@ -780,12 +774,10 @@ static int i915_drm_resume(struct drm_device *dev)
                dev_priv->display.hpd_irq_setup(dev);
        spin_unlock_irq(&dev_priv->irq_lock);
 
-       drm_modeset_lock_all(dev);
-       intel_display_resume(dev);
-       drm_modeset_unlock_all(dev);
-
        intel_dp_mst_resume(dev);
 
+       intel_display_resume(dev);
+
        /*
         * ... but also need to make sure that hotplug processing
         * doesn't cause havoc. Like in the driver load code we don't
@@ -829,7 +821,37 @@ static int i915_drm_resume_early(struct drm_device *dev)
         * FIXME: This should be solved with a special hdmi sink device or
         * similar so that power domains can be employed.
         */
+
+       /*
+        * Note that we need to set the power state explicitly, since we
+        * powered off the device during freeze and the PCI core won't power
+        * it back up for us during thaw. Powering off the device during
+        * freeze is not a hard requirement though, and during the
+        * suspend/resume phases the PCI core makes sure we get here with the
+        * device powered on. So in case we change our freeze logic and keep
+        * the device powered we can also remove the following set power state
+        * call.
+        */
 #if 0
+       ret = pci_set_power_state(dev->pdev, PCI_D0);
+       if (ret) {
+               DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
+               goto out;
+       }
+
+       /*
+        * Note that pci_enable_device() first enables any parent bridge
+        * device and only then sets the power state for this device. The
+        * bridge enabling is a nop though, since bridge devices are resumed
+        * first. The order of enabling power and enabling the device is
+        * imposed by the PCI core as described above, so here we preserve the
+        * same order for the freeze/thaw phases.
+        *
+        * TODO: eventually we should remove pci_disable_device() /
+        * pci_enable_enable_device() from suspend/resume. Due to how they
+        * depend on the device enable refcount we can't anyway depend on them
+        * disabling/enabling the device.
+        */
        if (pci_enable_device(dev->pdev)) {
                ret = -EIO;
                goto out;
@@ -1128,7 +1150,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
         */
        broxton_init_cdclk(dev);
        broxton_ddi_phy_init(dev);
-       intel_prepare_ddi(dev);
 
        return 0;
 }
@@ -1387,8 +1408,8 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
                return 0;
 
        DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
-                       wait_for_on ? "on" : "off",
-                       I915_READ(VLV_GTLC_PW_STATUS));
+                     onoff(wait_for_on),
+                     I915_READ(VLV_GTLC_PW_STATUS));
 
        /*
         * RC6 transitioning can be delayed up to 2 msec (see
@@ -1397,7 +1418,7 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
        err = wait_for(COND, 3);
        if (err)
                DRM_ERROR("timeout waiting for GT wells to go %s\n",
-                         wait_for_on ? "on" : "off");
+                         onoff(wait_for_on));
 
        return err;
 #undef COND
@@ -1408,7 +1429,7 @@ static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
        if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
                return;
 
-       DRM_ERROR("GT register access while GT waking disabled\n");
+       DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
        I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
 }
 
@@ -1553,6 +1574,10 @@ static int intel_runtime_suspend(struct device *device)
 
        enable_rpm_wakeref_asserts(dev_priv);
        WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
+
+       if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
+               DRM_ERROR("Unclaimed access detected prior to suspending\n");
+
        dev_priv->pm.suspended = true;
 
        /*
@@ -1601,6 +1626,8 @@ static int intel_runtime_resume(struct device *device)
 
        intel_opregion_notify_adapter(dev, PCI_D0);
        dev_priv->pm.suspended = false;
+       if (intel_uncore_unclaimed_mmio(dev_priv))
+               DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
 
        intel_guc_resume(dev);
 
index 9b2aa8d..c988686 100644 (file)
@@ -34,6 +34,7 @@
 #include <uapi_drm/drm_fourcc.h>
 
 #include <drm/drmP.h>
+#include "i915_params.h"
 #include "i915_reg.h"
 #include "intel_bios.h"
 #include "intel_ringbuffer.h"
@@ -65,7 +66,7 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20151218"
+#define DRIVER_DATE            "20160229"
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
                BUILD_BUG_ON(__i915_warn_cond); \
        WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
 #else
-#define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x )
+#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
 #endif
 
 #undef WARN_ON_ONCE
-#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x )
+#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
 
 #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
                             (long) (x), __func__);
  */
 #define I915_STATE_WARN(condition, format...) ({                       \
        int __ret_warn_on = !!(condition);                              \
-       if (unlikely(__ret_warn_on)) {                                  \
-               if (i915.verbose_state_checks)                          \
-                       WARN(1, format);                                \
-               else                                                    \
+       if (unlikely(__ret_warn_on))                                    \
+               if (!WARN(i915.verbose_state_checks, format))           \
                        DRM_ERROR(format);                              \
-       }                                                               \
        unlikely(__ret_warn_on);                                        \
 })
 
-#define I915_STATE_WARN_ON(condition) ({                               \
-       int __ret_warn_on = !!(condition);                              \
-       if (unlikely(__ret_warn_on)) {                                  \
-               if (i915.verbose_state_checks)                          \
-                       WARN(1, "WARN_ON(" #condition ")\n");           \
-               else                                                    \
-                       DRM_ERROR("WARN_ON(" #condition ")\n");         \
-       }                                                               \
-       unlikely(__ret_warn_on);                                        \
-})
+#define I915_STATE_WARN_ON(x)                                          \
+       I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
 
 static inline const char *yesno(bool v)
 {
        return v ? "yes" : "no";
 }
 
+static inline const char *onoff(bool v)
+{
+       return v ? "on" : "off";
+}
+
 enum i915_pipe {
        INVALID_PIPE = -1,
        PIPE_A = 0,
@@ -273,6 +268,9 @@ struct i915_hotplug {
 
 #define for_each_pipe(__dev_priv, __p) \
        for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
+#define for_each_pipe_masked(__dev_priv, __p, __mask) \
+       for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
+               for_each_if ((__mask) & (1 << (__p)))
 #define for_each_plane(__dev_priv, __pipe, __p)                                \
        for ((__p) = 0;                                                 \
             (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
@@ -346,7 +344,7 @@ struct drm_i915_file_private {
                unsigned boosts;
        } rps;
 
-       struct intel_engine_cs *bsd_ring;
+       unsigned int bsd_ring;
 };
 
 enum intel_dpll_id {
@@ -640,6 +638,7 @@ struct drm_i915_display_funcs {
                          struct dpll *best_clock);
        int (*compute_pipe_wm)(struct intel_crtc *crtc,
                               struct drm_atomic_state *state);
+       void (*program_watermarks)(struct intel_crtc_state *cstate);
        void (*update_wm)(struct drm_crtc *crtc);
        int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
        void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
@@ -664,9 +663,6 @@ struct drm_i915_display_funcs {
                          struct drm_i915_gem_object *obj,
                          struct drm_i915_gem_request *req,
                          uint32_t flags);
-       void (*update_primary_plane)(struct drm_crtc *crtc,
-                                    struct drm_framebuffer *fb,
-                                    int x, int y);
        void (*hpd_irq_setup)(struct drm_device *dev);
        /* clock updates for mode set */
        /* cursor updates */
@@ -733,6 +729,8 @@ struct intel_uncore {
                i915_reg_t reg_post;
                u32 val_reset;
        } fw_domain[FW_DOMAIN_ID_COUNT];
+
+       int unclaimed_mmio_check;
 };
 
 /* Iterate over initialised fw domains */
@@ -897,6 +895,9 @@ struct intel_context {
                struct drm_i915_gem_object *state;
                struct intel_ringbuffer *ringbuf;
                int pin_count;
+               struct i915_vma *lrc_vma;
+               u64 lrc_desc;
+               uint32_t *lrc_reg_state;
        } engine[I915_NUM_RINGS];
 
        struct list_head link;
@@ -910,16 +911,15 @@ enum fb_op_origin {
        ORIGIN_DIRTYFB,
 };
 
-struct i915_fbc {
+struct intel_fbc {
        /* This is always the inner lock when overlapping with struct_mutex and
         * it's the outer lock when overlapping with stolen_lock. */
        struct lock lock;
        unsigned threshold;
-       unsigned int fb_id;
        unsigned int possible_framebuffer_bits;
        unsigned int busy_bits;
+       unsigned int visible_pipes_mask;
        struct intel_crtc *crtc;
-       int y;
 
        struct drm_mm_node compressed_fb;
        struct drm_mm_node *compressed_llb;
@@ -929,18 +929,52 @@ struct i915_fbc {
        bool enabled;
        bool active;
 
+       struct intel_fbc_state_cache {
+               struct {
+                       unsigned int mode_flags;
+                       uint32_t hsw_bdw_pixel_rate;
+               } crtc;
+
+               struct {
+                       unsigned int rotation;
+                       int src_w;
+                       int src_h;
+                       bool visible;
+               } plane;
+
+               struct {
+                       u64 ilk_ggtt_offset;
+                       uint32_t pixel_format;
+                       unsigned int stride;
+                       int fence_reg;
+                       unsigned int tiling_mode;
+               } fb;
+       } state_cache;
+
+       struct intel_fbc_reg_params {
+               struct {
+                       enum i915_pipe pipe;
+                       enum plane plane;
+                       unsigned int fence_y_offset;
+               } crtc;
+
+               struct {
+                       u64 ggtt_offset;
+                       uint32_t pixel_format;
+                       unsigned int stride;
+                       int fence_reg;
+               } fb;
+
+               int cfb_size;
+       } params;
+
        struct intel_fbc_work {
                bool scheduled;
+               u32 scheduled_vblank;
                struct work_struct work;
-               struct drm_framebuffer *fb;
-               unsigned long enable_jiffies;
        } work;
 
        const char *no_fbc_reason;
-
-       bool (*is_active)(struct drm_i915_private *dev_priv);
-       void (*activate)(struct intel_crtc *crtc);
-       void (*deactivate)(struct drm_i915_private *dev_priv);
 };
 
 /**
@@ -980,6 +1014,7 @@ struct i915_psr {
        unsigned busy_frontbuffer_bits;
        bool psr2_support;
        bool aux_frame_sync;
+       bool link_standby;
 };
 
 enum intel_pch {
@@ -1311,7 +1346,7 @@ struct i915_gem_mm {
        bool busy;
 
        /* the indicator for dispatch video commands on two BSD rings */
-       int bsd_ring_dispatch_index;
+       unsigned int bsd_ring_dispatch_index;
 
        /** Bit 6 swizzling required for X tiling */
        uint32_t bit_6_swizzle_x;
@@ -1497,7 +1532,7 @@ struct intel_vbt_data {
                u8 seq_version;
                u32 size;
                u8 *data;
-               u8 *sequence[MIPI_SEQ_MAX];
+               const u8 *sequence[MIPI_SEQ_MAX];
        } dsi;
 
        int crt_ddc_pin;
@@ -1669,11 +1704,18 @@ struct i915_wa_reg {
        u32 mask;
 };
 
-#define I915_MAX_WA_REGS 16
+/*
+ * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only
+ * allowing it for RCS as we don't foresee any requirement of having
+ * a whitelist for other engines. When it is really required for
+ * other engines then the limit need to be increased.
+ */
+#define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS)
 
 struct i915_workarounds {
        struct i915_wa_reg reg[I915_MAX_WA_REGS];
        u32 count;
+       u32 hw_whitelist_count[I915_NUM_RINGS];
 };
 
 struct i915_virtual_gpu {
@@ -1771,7 +1813,7 @@ struct drm_i915_private {
        u32 pipestat_irq_mask[I915_MAX_PIPES];
 
        struct i915_hotplug hotplug;
-       struct i915_fbc fbc;
+       struct intel_fbc fbc;
        struct i915_drrs drrs;
        struct intel_opregion opregion;
        struct intel_vbt_data vbt;
@@ -1795,7 +1837,7 @@ struct drm_i915_private {
 
        unsigned int fsb_freq, mem_freq, is_ddr3;
        unsigned int skl_boot_cdclk;
-       unsigned int cdclk_freq, max_cdclk_freq;
+       unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
        unsigned int max_dotclk_freq;
        unsigned int hpll_freq;
        unsigned int czclk_freq;
@@ -1820,6 +1862,7 @@ struct drm_i915_private {
 
        enum modeset_restore modeset_restore;
        struct lock modeset_restore_lock;
+       struct drm_atomic_state *modeset_restore_state;
 
        struct list_head vm_list; /* Global list of all address spaces */
        struct i915_gtt gtt; /* VM representing the global address space */
@@ -1840,8 +1883,13 @@ struct drm_i915_private {
        struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
 #endif
 
+       /* dpll and cdclk state is protected by connection_mutex */
        int num_shared_dpll;
        struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+
+       unsigned int active_crtcs;
+       unsigned int min_pixclk[I915_MAX_PIPES];
+
        int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
        struct i915_workarounds workarounds;
@@ -1942,6 +1990,7 @@ struct drm_i915_private {
                };
 
                uint8_t max_level;
+
        } wm;
 
        struct i915_runtime_pm pm;
@@ -1958,6 +2007,8 @@ struct drm_i915_private {
                void (*stop_ring)(struct intel_engine_cs *ring);
        } gt;
 
+       struct intel_context *kernel_context;
+
        bool edp_low_vswing;
 
        /* perform PHY state sanity checks? */
@@ -2282,9 +2333,9 @@ struct drm_i915_gem_request {
 
 };
 
-int i915_gem_request_alloc(struct intel_engine_cs *ring,
-                          struct intel_context *ctx,
-                          struct drm_i915_gem_request **req_out);
+struct drm_i915_gem_request * __must_check
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+                      struct intel_context *ctx);
 void i915_gem_request_cancel(struct drm_i915_gem_request *req);
 void i915_gem_request_free(struct kref *req_ref);
 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
@@ -2591,6 +2642,12 @@ struct drm_i915_cmd_table {
 
 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
 #define HAS_BROKEN_CS_TLB(dev)         (IS_I830(dev) || IS_845G(dev))
+
+/* WaRsDisableCoarsePowerGating:skl,bxt */
+#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
+                                                IS_SKL_GT3(dev) || \
+                                                IS_SKL_GT4(dev))
+
 /*
  * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
  * even when in MSI mode. This results in spurious interrupt warnings if the
@@ -2680,44 +2737,7 @@ extern int i915_max_ioctl;
 extern int i915_suspend_switcheroo(device_t kdev);
 extern int i915_resume_switcheroo(struct drm_device *dev);
 
-/* i915_params.c */
-struct i915_params {
-       int modeset;
-       int panel_ignore_lid;
-       int semaphores;
-       int lvds_channel_mode;
-       int panel_use_ssc;
-       int vbt_sdvo_panel_type;
-       int enable_rc6;
-       int enable_dc;
-       int enable_fbc;
-       int enable_ppgtt;
-       int enable_execlists;
-       int enable_psr;
-       unsigned int preliminary_hw_support;
-       int disable_power_well;
-       int enable_ips;
-       int invert_brightness;
-       int enable_cmd_parser;
-       /* leave bools at the end to not create holes */
-       bool enable_hangcheck;
-       bool fastboot;
-       bool prefault_disable;
-       bool load_detect_test;
-       int  reset;
-       bool disable_display;
-       bool disable_vtd_wa;
-       bool enable_guc_submission;
-       int guc_log_level;
-       int use_mmio_flip;
-       int mmio_debug;
-       bool verbose_state_checks;
-       bool nuclear_pageflip;
-       int edp_vswing;
-};
-extern struct i915_params i915 __read_mostly;
-
-                               /* i915_dma.c */
+/* i915_dma.c */
 extern int i915_driver_load(struct drm_device *, unsigned long flags);
 extern int i915_driver_unload(struct drm_device *);
 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
@@ -2760,7 +2780,8 @@ extern void intel_uncore_sanitize(struct drm_device *dev);
 extern void intel_uncore_early_sanitize(struct drm_device *dev,
                                        bool restore_forcewake);
 extern void intel_uncore_init(struct drm_device *dev);
-extern void intel_uncore_check_errors(struct drm_device *dev);
+extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
+extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
 extern void intel_uncore_fini(struct drm_device *dev);
 extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
@@ -2882,7 +2903,8 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
-void i915_gem_load(struct drm_device *dev);
+void i915_gem_load_init(struct drm_device *dev);
+void i915_gem_load_cleanup(struct drm_device *dev);
 void *i915_gem_object_alloc(struct drm_device *dev);
 void i915_gem_object_free(struct drm_i915_gem_object *obj);
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -3150,18 +3172,11 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
 /* Some GGTT VM helpers */
 #define i915_obj_to_ggtt(obj) \
        (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
-static inline bool i915_is_ggtt(struct i915_address_space *vm)
-{
-       struct i915_address_space *ggtt =
-               &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
-       return vm == ggtt;
-}
 
 static inline struct i915_hw_ppgtt *
 i915_vm_to_ppgtt(struct i915_address_space *vm)
 {
        WARN_ON(i915_is_ggtt(vm));
-
        return container_of(vm, struct i915_hw_ppgtt, base);
 }
 
@@ -3299,6 +3314,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
 #define I915_SHRINK_ACTIVE 0x8
 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
+void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
 
 
 /* i915_gem_tiling.c */
@@ -3471,16 +3487,14 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val
 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
 void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
-u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
-void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
+void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
-u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
-void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg);
 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum i915_pipe pipe, int reg, u32 val);
 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
index 1bc7154..6082a59 100644 (file)
@@ -137,10 +137,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 
        pinned = 0;
        mutex_lock(&dev->struct_mutex);
-       list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
+       list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
                if (vma->pin_count)
                        pinned += vma->node.size;
-       list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
+       list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
                if (vma->pin_count)
                        pinned += vma->node.size;
        mutex_unlock(&dev->struct_mutex);
@@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
                drm_clflush_virt_range(vaddr, PAGE_SIZE);
                kunmap_atomic(src);
 
-               page_cache_release(page);
+               put_page(page);
                vaddr += PAGE_SIZE;
        }
 
@@ -243,7 +243,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
                        set_page_dirty(page);
                        if (obj->madv == I915_MADV_WILLNEED)
                                mark_page_accessed(page);
-                       page_cache_release(page);
+                       put_page(page);
                        vaddr += PAGE_SIZE;
                }
                obj->dirty = 0;
@@ -273,7 +273,7 @@ drop_pages(struct drm_i915_gem_object *obj)
        int ret;
 
        drm_gem_object_reference(&obj->base);
-       list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
+       list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
                if (i915_vma_unbind(vma))
                        break;
 
@@ -494,7 +494,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
        *needs_clflush = 0;
 
 #if 0
-       if (!obj->base.filp)
+       if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
                return -EINVAL;
 #endif
 
@@ -1212,7 +1212,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
        const bool irq_test_in_progress =
                ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
        unsigned long timeout_expire;
-       s64 before, now;
+       s64 before = 0; /* Only to silence a compiler warning. */
        int ret, sl_timeout = 1;
 
        WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
@@ -1232,14 +1232,17 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                        return -ETIME;
 
                timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
+
+               /*
+                * Record current time in case interrupted by signal, or wedged.
+                */
+               before = ktime_get_raw_ns();
        }
 
        if (INTEL_INFO(dev_priv)->gen >= 6)
                gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
 
-       /* Record current time in case interrupted by signal, or wedged */
        trace_i915_gem_request_wait_begin(req);
-       before = ktime_get_raw_ns();
 
        /* Optimistic spin for the next jiffie before touching IRQs */
 #if 0
@@ -1312,11 +1315,10 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                ring->irq_put(ring);
 
 out:
-       now = ktime_get_raw_ns();
        trace_i915_gem_request_wait_end(req);
 
        if (timeout) {
-               s64 tres = *timeout - (now - before);
+               s64 tres = *timeout - (ktime_get_raw_ns() - before);
 
                *timeout = tres < 0 ? 0 : tres;
 
@@ -2533,7 +2535,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
        list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
        i915_gem_request_assign(&obj->last_read_req[ring->id], req);
 
-       list_move_tail(&vma->mm_list, &vma->vm->active_list);
+       list_move_tail(&vma->vm_link, &vma->vm->active_list);
 }
 
 static void
@@ -2571,9 +2573,9 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
        list_move_tail(&obj->global_list,
                       &to_i915(obj->base.dev)->mm.bound_list);
 
-       list_for_each_entry(vma, &obj->vma_list, vma_link) {
-               if (!list_empty(&vma->mm_list))
-                       list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
+       list_for_each_entry(vma, &obj->vma_list, obj_link) {
+               if (!list_empty(&vma->vm_link))
+                       list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
        }
 
        i915_gem_request_assign(&obj->last_fenced_req, NULL);
@@ -2797,10 +2799,8 @@ void i915_gem_request_free(struct kref *req_ref)
                i915_gem_request_remove_from_client(req);
 
        if (ctx) {
-               if (i915.enable_execlists) {
-                       if (ctx != req->ring->default_context)
-                               intel_lr_context_unpin(req);
-               }
+               if (i915.enable_execlists && ctx != req->i915->kernel_context)
+                       intel_lr_context_unpin(ctx, req->ring);
 
                i915_gem_context_unreference(ctx);
        }
@@ -2808,9 +2808,10 @@ void i915_gem_request_free(struct kref *req_ref)
        kfree(req);
 }
 
-int i915_gem_request_alloc(struct intel_engine_cs *ring,
-                          struct intel_context *ctx,
-                          struct drm_i915_gem_request **req_out)
+static inline int
+__i915_gem_request_alloc(struct intel_engine_cs *ring,
+                        struct intel_context *ctx,
+                        struct drm_i915_gem_request **req_out)
 {
        struct drm_i915_private *dev_priv = to_i915(ring->dev);
        struct drm_i915_gem_request *req;
@@ -2873,6 +2874,31 @@ err:
        return ret;
 }
 
+/**
+ * i915_gem_request_alloc - allocate a request structure
+ *
+ * @engine: engine that we wish to issue the request on.
+ * @ctx: context that the request will be associated with.
+ *       This can be NULL if the request is not directly related to
+ *       any specific user context, in which case this function will
+ *       choose an appropriate context to use.
+ *
+ * Returns a pointer to the allocated request if successful,
+ * or an error code if not.
+ */
+struct drm_i915_gem_request *
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+                      struct intel_context *ctx)
+{
+       struct drm_i915_gem_request *req;
+       int err;
+
+       if (ctx == NULL)
+               ctx = to_i915(engine->dev)->kernel_context;
+       err = __i915_gem_request_alloc(engine, ctx, &req);
+       return err ? ERR_PTR(err) : req;
+}
+
 void i915_gem_request_cancel(struct drm_i915_gem_request *req)
 {
        intel_ring_reserved_space_cancel(req->ringbuf);
@@ -3064,11 +3090,9 @@ i915_gem_retire_requests(struct drm_device *dev)
                i915_gem_retire_requests_ring(ring);
                idle &= list_empty(&ring->request_list);
                if (i915.enable_execlists) {
-                       unsigned long flags;
-
-                       spin_lock_irqsave(&ring->execlist_lock, flags);
+                       spin_lock_irq(&ring->execlist_lock);
                        idle &= list_empty(&ring->execlist_queue);
-                       spin_unlock_irqrestore(&ring->execlist_lock, flags);
+                       spin_unlock_irq(&ring->execlist_lock);
 
                        intel_execlists_retire_requests(ring);
                }
@@ -3290,9 +3314,13 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
                        return 0;
 
                if (*to_req == NULL) {
-                       ret = i915_gem_request_alloc(to, to->default_context, to_req);
-                       if (ret)
-                               return ret;
+                       struct drm_i915_gem_request *req;
+
+                       req = i915_gem_request_alloc(to, NULL);
+                       if (IS_ERR(req))
+                               return PTR_ERR(req);
+
+                       *to_req = req;
                }
 
                trace_i915_gem_ring_sync_to(*to_req, from, from_req);
@@ -3409,7 +3437,7 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
        int ret;
 
-       if (list_empty(&vma->vma_link))
+       if (list_empty(&vma->obj_link))
                return 0;
 
        if (!drm_mm_node_allocated(&vma->node)) {
@@ -3428,8 +3456,7 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
                        return ret;
        }
 
-       if (i915_is_ggtt(vma->vm) &&
-           vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
+       if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
                i915_gem_object_finish_gtt(obj);
 
                /* release the fence reg _after_ flushing */
@@ -3443,8 +3470,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
        vma->vm->unbind_vma(vma);
        vma->bound = 0;
 
-       list_del_init(&vma->mm_list);
-       if (i915_is_ggtt(vma->vm)) {
+       list_del_init(&vma->vm_link);
+       if (vma->is_ggtt) {
                if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
                        obj->map_and_fenceable = false;
                } else if (vma->ggtt_view.pages) {
@@ -3492,9 +3519,9 @@ int i915_gpu_idle(struct drm_device *dev)
                if (!i915.enable_execlists) {
                        struct drm_i915_gem_request *req;
 
-                       ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-                       if (ret)
-                               return ret;
+                       req = i915_gem_request_alloc(ring, NULL);
+                       if (IS_ERR(req))
+                               return PTR_ERR(req);
 
                        ret = i915_switch_context(req);
                        if (ret) {
@@ -3701,7 +3728,7 @@ search_free:
                goto err_remove_node;
 
        list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
-       list_add_tail(&vma->mm_list, &vm->inactive_list);
+       list_add_tail(&vma->vm_link, &vm->inactive_list);
 
        return vma;
 
@@ -3866,7 +3893,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
        /* And bump the LRU for this access */
        vma = i915_gem_obj_to_ggtt(obj);
        if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
-               list_move_tail(&vma->mm_list,
+               list_move_tail(&vma->vm_link,
                               &to_i915(obj->base.dev)->gtt.base.inactive_list);
 
        return 0;
@@ -3901,7 +3928,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
         * catch the issue of the CS prefetch crossing page boundaries and
         * reading an invalid PTE on older architectures.
         */
-       list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+       list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
                if (!drm_mm_node_allocated(&vma->node))
                        continue;
 
@@ -3964,7 +3991,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                         */
                }
 
-               list_for_each_entry(vma, &obj->vma_list, vma_link) {
+               list_for_each_entry(vma, &obj->vma_list, obj_link) {
                        if (!drm_mm_node_allocated(&vma->node))
                                continue;
 
@@ -3974,7 +4001,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                }
        }
 
-       list_for_each_entry(vma, &obj->vma_list, vma_link)
+       list_for_each_entry(vma, &obj->vma_list, obj_link)
                vma->node.color = cache_level;
        obj->cache_level = cache_level;
 
@@ -4448,10 +4475,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        if (ret)
                goto unref;
 
-       BUILD_BUG_ON(I915_NUM_RINGS > 16);
-       args->busy = obj->active << 16;
-       if (obj->last_write_req)
-               args->busy |= obj->last_write_req->ring->id;
+       args->busy = 0;
+       if (obj->active) {
+               int i;
+
+               for (i = 0; i < I915_NUM_RINGS; i++) {
+                       struct drm_i915_gem_request *req;
+
+                       req = obj->last_read_req[i];
+                       if (req)
+                               args->busy |= 1 << (16 + req->ring->exec_id);
+               }
+               if (obj->last_write_req)
+                       args->busy |= obj->last_write_req->ring->exec_id;
+       }
 
 unref:
        drm_gem_object_unreference(&obj->base);
@@ -4646,7 +4683,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 
        trace_i915_gem_object_destroy(obj);
 
-       list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+       list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
                int ret;
 
                vma->pin_count = 0;
@@ -4705,7 +4742,7 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
                                     struct i915_address_space *vm)
 {
        struct i915_vma *vma;
-       list_for_each_entry(vma, &obj->vma_list, vma_link) {
+       list_for_each_entry(vma, &obj->vma_list, obj_link) {
                if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
                    vma->vm == vm)
                        return vma;
@@ -4722,7 +4759,7 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
        if (WARN_ONCE(!view, "no view specified"))
                return ERR_PTR(-EINVAL);
 
-       list_for_each_entry(vma, &obj->vma_list, vma_link)
+       list_for_each_entry(vma, &obj->vma_list, obj_link)
                if (vma->vm == ggtt &&
                    i915_ggtt_view_equal(&vma->ggtt_view, view))
                        return vma;
@@ -4731,19 +4768,16 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
 
 void i915_gem_vma_destroy(struct i915_vma *vma)
 {
-       struct i915_address_space *vm = NULL;
        WARN_ON(vma->node.allocated);
 
        /* Keep the vma as a placeholder in the execbuffer reservation lists */
        if (!list_empty(&vma->exec_list))
                return;
 
-       vm = vma->vm;
+       if (!vma->is_ggtt)
+               i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
 
-       if (!i915_is_ggtt(vm))
-               i915_ppgtt_put(i915_vm_to_ppgtt(vm));
-
-       list_del(&vma->vma_link);
+       list_del(&vma->obj_link);
 
        kfree(vma);
 }
@@ -4967,7 +5001,7 @@ i915_gem_init_hw(struct drm_device *dev)
         */
        init_unused_rings(dev);
 
-       BUG_ON(!dev_priv->ring[RCS].default_context);
+       BUG_ON(!dev_priv->kernel_context);
 
        ret = i915_ppgtt_init_hw(dev);
        if (ret) {
@@ -5004,10 +5038,9 @@ i915_gem_init_hw(struct drm_device *dev)
        for_each_ring(ring, dev_priv, i) {
                struct drm_i915_gem_request *req;
 
-               WARN_ON(!ring->default_context);
-
-               ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-               if (ret) {
+               req = i915_gem_request_alloc(ring, NULL);
+               if (IS_ERR(req)) {
+                       ret = PTR_ERR(req);
                        i915_gem_cleanup_ringbuffer(dev);
                        goto out;
                }
@@ -5130,7 +5163,7 @@ init_ring_lists(struct intel_engine_cs *ring)
 }
 
 void
-i915_gem_load(struct drm_device *dev)
+i915_gem_load_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
@@ -5180,11 +5213,20 @@ i915_gem_load(struct drm_device *dev)
 
        dev_priv->mm.interruptible = true;
 
-       i915_gem_shrinker_init(dev_priv);
-
        lockinit(&dev_priv->fb_tracking.lock, "drmftl", 0, LK_CANRECURSE);
 }
 
+void i915_gem_load_cleanup(struct drm_device *dev)
+{
+#if 0
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       kmem_cache_destroy(dev_priv->requests);
+       kmem_cache_destroy(dev_priv->vmas);
+       kmem_cache_destroy(dev_priv->objects);
+#endif
+}
+
 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -5255,6 +5297,8 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
        spin_init(&file_priv->mm.lock, "i915_priv");
        INIT_LIST_HEAD(&file_priv->mm.request_list);
 
+       file_priv->bsd_ring = -1;
+
        ret = i915_gem_context_open(dev, file);
        if (ret)
                kfree(file_priv);
@@ -5297,8 +5341,8 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
 
        WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
 
-       list_for_each_entry(vma, &o->vma_list, vma_link) {
-               if (i915_is_ggtt(vma->vm) &&
+       list_for_each_entry(vma, &o->vma_list, obj_link) {
+               if (vma->is_ggtt &&
                    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
                        continue;
                if (vma->vm == vm)
@@ -5316,7 +5360,7 @@ u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
        struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
        struct i915_vma *vma;
 
-       list_for_each_entry(vma, &o->vma_list, vma_link)
+       list_for_each_entry(vma, &o->vma_list, obj_link)
                if (vma->vm == ggtt &&
                    i915_ggtt_view_equal(&vma->ggtt_view, view))
                        return vma->node.start;
@@ -5330,8 +5374,8 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
 {
        struct i915_vma *vma;
 
-       list_for_each_entry(vma, &o->vma_list, vma_link) {
-               if (i915_is_ggtt(vma->vm) &&
+       list_for_each_entry(vma, &o->vma_list, obj_link) {
+               if (vma->is_ggtt &&
                    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
                        continue;
                if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
@@ -5347,7 +5391,7 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
        struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
        struct i915_vma *vma;
 
-       list_for_each_entry(vma, &o->vma_list, vma_link)
+       list_for_each_entry(vma, &o->vma_list, obj_link)
                if (vma->vm == ggtt &&
                    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
                    drm_mm_node_allocated(&vma->node))
@@ -5360,7 +5404,7 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
 {
        struct i915_vma *vma;
 
-       list_for_each_entry(vma, &o->vma_list, vma_link)
+       list_for_each_entry(vma, &o->vma_list, obj_link)
                if (drm_mm_node_allocated(&vma->node))
                        return true;
 
@@ -5377,8 +5421,8 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
 
        BUG_ON(list_empty(&o->vma_list));
 
-       list_for_each_entry(vma, &o->vma_list, vma_link) {
-               if (i915_is_ggtt(vma->vm) &&
+       list_for_each_entry(vma, &o->vma_list, obj_link) {
+               if (vma->is_ggtt &&
                    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
                        continue;
                if (vma->vm == vm)
@@ -5390,7 +5434,7 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
 {
        struct i915_vma *vma;
-       list_for_each_entry(vma, &obj->vma_list, vma_link)
+       list_for_each_entry(vma, &obj->vma_list, obj_link)
                if (vma->pin_count > 0)
                        return true;
 
index c25083c..5dd84e1 100644 (file)
@@ -142,7 +142,7 @@ static void i915_gem_context_clean(struct intel_context *ctx)
                return;
 
        list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
-                                mm_list) {
+                                vm_link) {
                if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
                        break;
        }
@@ -321,6 +321,18 @@ err_destroy:
        return ERR_PTR(ret);
 }
 
+static void i915_gem_context_unpin(struct intel_context *ctx,
+                                  struct intel_engine_cs *engine)
+{
+       if (i915.enable_execlists) {
+               intel_lr_context_unpin(ctx, engine);
+       } else {
+               if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state)
+                       i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
+               i915_gem_context_unreference(ctx);
+       }
+}
+
 void i915_gem_context_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -329,40 +341,31 @@ void i915_gem_context_reset(struct drm_device *dev)
        if (i915.enable_execlists) {
                struct intel_context *ctx;
 
-               list_for_each_entry(ctx, &dev_priv->context_list, link) {
+               list_for_each_entry(ctx, &dev_priv->context_list, link)
                        intel_lr_context_reset(dev, ctx);
-               }
-
-               return;
        }
 
        for (i = 0; i < I915_NUM_RINGS; i++) {
                struct intel_engine_cs *ring = &dev_priv->ring[i];
-               struct intel_context *lctx = ring->last_context;
-
-               if (lctx) {
-                       if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
-                               i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
 
-                       i915_gem_context_unreference(lctx);
+               if (ring->last_context) {
+                       i915_gem_context_unpin(ring->last_context, ring);
                        ring->last_context = NULL;
                }
-
-               /* Force the GPU state to be reinitialised on enabling */
-               if (ring->default_context)
-                       ring->default_context->legacy_hw_ctx.initialized = false;
        }
+
+       /* Force the GPU state to be reinitialised on enabling */
+       dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
 }
 
 int i915_gem_context_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_context *ctx;
-       int i;
 
        /* Init should only be called once per module load. Eventually the
         * restriction on the context_disabled check can be loosened. */
-       if (WARN_ON(dev_priv->ring[RCS].default_context))
+       if (WARN_ON(dev_priv->kernel_context))
                return 0;
 
        if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
@@ -392,12 +395,7 @@ int i915_gem_context_init(struct drm_device *dev)
                return PTR_ERR(ctx);
        }
 
-       for (i = 0; i < I915_NUM_RINGS; i++) {
-               struct intel_engine_cs *ring = &dev_priv->ring[i];
-
-               /* NB: RCS will hold a ref for all rings */
-               ring->default_context = ctx;
-       }
+       dev_priv->kernel_context = ctx;
 
        DRM_DEBUG_DRIVER("%s context support initialized\n",
                        i915.enable_execlists ? "LR" :
@@ -408,7 +406,7 @@ int i915_gem_context_init(struct drm_device *dev)
 void i915_gem_context_fini(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_context *dctx = dev_priv->ring[RCS].default_context;
+       struct intel_context *dctx = dev_priv->kernel_context;
        int i;
 
        if (dctx->legacy_hw_ctx.rcs_state) {
@@ -424,28 +422,21 @@ void i915_gem_context_fini(struct drm_device *dev)
                 * to offset the do_switch part, so that i915_gem_context_unreference()
                 * can then free the base object correctly. */
                WARN_ON(!dev_priv->ring[RCS].last_context);
-               if (dev_priv->ring[RCS].last_context == dctx) {
-                       /* Fake switch to NULL context */
-                       WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
-                       i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
-                       i915_gem_context_unreference(dctx);
-                       dev_priv->ring[RCS].last_context = NULL;
-               }
 
                i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
        }
 
-       for (i = 0; i < I915_NUM_RINGS; i++) {
+       for (i = I915_NUM_RINGS; --i >= 0;) {
                struct intel_engine_cs *ring = &dev_priv->ring[i];
 
-               if (ring->last_context)
-                       i915_gem_context_unreference(ring->last_context);
-
-               ring->default_context = NULL;
-               ring->last_context = NULL;
+               if (ring->last_context) {
+                       i915_gem_context_unpin(ring->last_context, ring);
+                       ring->last_context = NULL;
+               }
        }
 
        i915_gem_context_unreference(dctx);
+       dev_priv->kernel_context = NULL;
 }
 
 int i915_gem_context_enable(struct drm_i915_gem_request *req)
@@ -864,6 +855,9 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
        if (!contexts_enabled(dev))
                return -ENODEV;
 
+       if (args->pad != 0)
+               return -EINVAL;
+
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
@@ -887,6 +881,9 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
        struct intel_context *ctx;
        int ret;
 
+       if (args->pad != 0)
+               return -EINVAL;
+
        if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
                return -ENOENT;
 
index 07c6e4d..ea1f8d1 100644 (file)
@@ -116,7 +116,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
 
 search_again:
        /* First see if there is a large enough contiguous idle region... */
-       list_for_each_entry(vma, &vm->inactive_list, mm_list) {
+       list_for_each_entry(vma, &vm->inactive_list, vm_link) {
                if (mark_free(vma, &unwind_list))
                        goto found;
        }
@@ -125,7 +125,7 @@ search_again:
                goto none;
 
        /* Now merge in the soon-to-be-expired objects... */
-       list_for_each_entry(vma, &vm->active_list, mm_list) {
+       list_for_each_entry(vma, &vm->active_list, vm_link) {
                if (mark_free(vma, &unwind_list))
                        goto found;
        }
@@ -270,7 +270,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
                WARN_ON(!list_empty(&vm->active_list));
        }
 
-       list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
+       list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
                if (vma->pin_count == 0)
                        WARN_ON(i915_vma_unbind(vma));
 
index a353a9d..6b59d4a 100644 (file)
@@ -193,13 +193,10 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
                return eb->lut[handle];
        } else {
                struct hlist_head *head;
-               struct hlist_node *node;
+               struct i915_vma *vma;
 
                head = &eb->buckets[handle & eb->and];
-               hlist_for_each(node, head) {
-                       struct i915_vma *vma;
-
-                       vma = hlist_entry(node, struct i915_vma, exec_node);
+               hlist_for_each_entry(vma, head, exec_node) {
                        if (vma->exec_handle == handle)
                                return vma;
                }
@@ -671,7 +668,7 @@ need_reloc_mappable(struct i915_vma *vma)
        if (entry->relocation_count == 0)
                return false;
 
-       if (!i915_is_ggtt(vma->vm))
+       if (!vma->is_ggtt)
                return false;
 
        /* See also use_cpu_reloc() */
@@ -690,8 +687,7 @@ eb_vma_misplaced(struct i915_vma *vma)
        struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
        struct drm_i915_gem_object *obj = vma->obj;
 
-       WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
-              !i915_is_ggtt(vma->vm));
+       WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
 
        if (entry->alignment &&
            vma->node.start & (entry->alignment - 1))
@@ -1311,6 +1307,9 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
        exec_start = params->batch_obj_vm_offset +
                     params->args_batch_start_offset;
 
+       if (exec_len == 0)
+               exec_len = params->batch_obj->base.size;
+
        ret = ring->dispatch_execbuffer(params->request,
                                        exec_start, exec_len,
                                        params->dispatch_flags);
@@ -1327,33 +1326,23 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 
 /**
  * Find one BSD ring to dispatch the corresponding BSD command.
- * The Ring ID is returned.
+ * The ring index is returned.
  */
-static int gen8_dispatch_bsd_ring(struct drm_device *dev,
-                                 struct drm_file *file)
+static unsigned int
+gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_file_private *file_priv = file->driver_priv;
 
-       /* Check whether the file_priv is using one ring */
-       if (file_priv->bsd_ring)
-               return file_priv->bsd_ring->id;
-       else {
-               /* If no, use the ping-pong mechanism to select one ring */
-               int ring_id;
-
-               mutex_lock(&dev->struct_mutex);
-               if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
-                       ring_id = VCS;
-                       dev_priv->mm.bsd_ring_dispatch_index = 1;
-               } else {
-                       ring_id = VCS2;
-                       dev_priv->mm.bsd_ring_dispatch_index = 0;
-               }
-               file_priv->bsd_ring = &dev_priv->ring[ring_id];
-               mutex_unlock(&dev->struct_mutex);
-               return ring_id;
+       /* Check whether the file_priv has already selected one ring. */
+       if ((int)file_priv->bsd_ring < 0) {
+               /* If not, use the ping-pong mechanism to select one. */
+               mutex_lock(&dev_priv->dev->struct_mutex);
+               file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
+               dev_priv->mm.bsd_ring_dispatch_index ^= 1;
+               mutex_unlock(&dev_priv->dev->struct_mutex);
        }
+
+       return file_priv->bsd_ring;
 }
 
 static struct drm_i915_gem_object *
@@ -1376,6 +1365,64 @@ eb_get_batch(struct eb_vmas *eb)
        return vma->obj;
 }
 
+#define I915_USER_RINGS (4)
+
+static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
+       [I915_EXEC_DEFAULT]     = RCS,
+       [I915_EXEC_RENDER]      = RCS,
+       [I915_EXEC_BLT]         = BCS,
+       [I915_EXEC_BSD]         = VCS,
+       [I915_EXEC_VEBOX]       = VECS
+};
+
+static int
+eb_select_ring(struct drm_i915_private *dev_priv,
+              struct drm_file *file,
+              struct drm_i915_gem_execbuffer2 *args,
+              struct intel_engine_cs **ring)
+{
+       unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
+
+       if (user_ring_id > I915_USER_RINGS) {
+               DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
+               return -EINVAL;
+       }
+
+       if ((user_ring_id != I915_EXEC_BSD) &&
+           ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
+               DRM_DEBUG("execbuf with non bsd ring but with invalid "
+                         "bsd dispatch flags: %d\n", (int)(args->flags));
+               return -EINVAL;
+       }
+
+       if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
+               unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
+
+               if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
+                       bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
+               } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
+                          bsd_idx <= I915_EXEC_BSD_RING2) {
+                       bsd_idx >>= I915_EXEC_BSD_SHIFT;
+                       bsd_idx--;
+               } else {
+                       DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
+                                 bsd_idx);
+                       return -EINVAL;
+               }
+
+               *ring = &dev_priv->ring[_VCS(bsd_idx)];
+       } else {
+               *ring = &dev_priv->ring[user_ring_map[user_ring_id]];
+       }
+
+       if (!intel_ring_initialized(*ring)) {
+               DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int
 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct drm_file *file,
@@ -1383,6 +1430,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct drm_i915_gem_exec_object2 *exec)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_request *req = NULL;
        struct eb_vmas *eb;
        struct drm_i915_gem_object *batch_obj;
        struct drm_i915_gem_exec_object2 shadow_exec_entry;
@@ -1410,51 +1458,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (args->flags & I915_EXEC_IS_PINNED)
                dispatch_flags |= I915_DISPATCH_PINNED;
 
-       if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
-               DRM_DEBUG("execbuf with unknown ring: %d\n",
-                         (int)(args->flags & I915_EXEC_RING_MASK));
-               return -EINVAL;
-       }
-
-       if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
-           ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
-               DRM_DEBUG("execbuf with non bsd ring but with invalid "
-                       "bsd dispatch flags: %d\n", (int)(args->flags));
-               return -EINVAL;
-       } 
-
-       if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
-               ring = &dev_priv->ring[RCS];
-       else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
-               if (HAS_BSD2(dev)) {
-                       int ring_id;
-
-                       switch (args->flags & I915_EXEC_BSD_MASK) {
-                       case I915_EXEC_BSD_DEFAULT:
-                               ring_id = gen8_dispatch_bsd_ring(dev, file);
-                               ring = &dev_priv->ring[ring_id];
-                               break;
-                       case I915_EXEC_BSD_RING1:
-                               ring = &dev_priv->ring[VCS];
-                               break;
-                       case I915_EXEC_BSD_RING2:
-                               ring = &dev_priv->ring[VCS2];
-                               break;
-                       default:
-                               DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
-                                         (int)(args->flags & I915_EXEC_BSD_MASK));
-                               return -EINVAL;
-                       }
-               } else
-                       ring = &dev_priv->ring[VCS];
-       } else
-               ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
-
-       if (!intel_ring_initialized(ring)) {
-               DRM_DEBUG("execbuf with invalid ring: %d\n",
-                         (int)(args->flags & I915_EXEC_RING_MASK));
-               return -EINVAL;
-       }
+       ret = eb_select_ring(dev_priv, file, args, &ring);
+       if (ret)
+               return ret;
 
        if (args->buffer_count < 1) {
                DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
@@ -1601,11 +1607,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
 
        /* Allocate a request for this batch buffer nice and early. */
-       ret = i915_gem_request_alloc(ring, ctx, &params->request);
-       if (ret)
+       req = i915_gem_request_alloc(ring, ctx);
+       if (IS_ERR(req)) {
+               ret = PTR_ERR(req);
                goto err_batch_unpin;
+       }
 
-       ret = i915_gem_request_add_to_client(params->request, file);
+       ret = i915_gem_request_add_to_client(req, file);
        if (ret)
                goto err_batch_unpin;
 
@@ -1621,6 +1629,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        params->dispatch_flags          = dispatch_flags;
        params->batch_obj               = batch_obj;
        params->ctx                     = ctx;
+       params->request                 = req;
 
        ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
 
@@ -1644,8 +1653,8 @@ err:
         * must be freed again. If it was submitted then it is being tracked
         * on the active request list and no clean up is required here.
         */
-       if (ret && params->request)
-               i915_gem_request_cancel(params->request);
+       if (ret && !IS_ERR_OR_NULL(req))
+               i915_gem_request_cancel(req);
 
        mutex_unlock(&dev->struct_mutex);
 
index e145a7e..6c87eaa 100644 (file)
@@ -35,8 +35,8 @@
  * set of these objects.
  *
  * Fences are used to detile GTT memory mappings. They're also connected to the
- * hardware frontbuffer render tracking and hence interract with frontbuffer
- * conmpression. Furthermore on older platforms fences are required for tiled
+ * hardware frontbuffer render tracking and hence interact with frontbuffer
+ * compression. Furthermore on older platforms fences are required for tiled
  * objects used by the display engine. They can also be used by the render
  * engine - they're required for blitter commands and are optional for render
  * commands. But on gen4+ both display (with the exception of fbc) and rendering
@@ -47,8 +47,8 @@
  *
  * Finally note that because fences are such a restricted resource they're
  * dynamically associated with objects. Furthermore fence state is committed to
- * the hardware lazily to avoid unecessary stalls on gen2/3. Therefore code must
- * explictly call i915_gem_object_get_fence() to synchronize fencing status
+ * the hardware lazily to avoid unnecessary stalls on gen2/3. Therefore code must
+ * explicitly call i915_gem_object_get_fence() to synchronize fencing status
  * for cpu access. Also note that some code wants an unfenced view, for those
  * cases the fence can be removed forcefully with i915_gem_object_put_fence().
  *
@@ -528,7 +528,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
  * required.
  *
  * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
- * 17 is not just a page offset, so as we page an objet out and back in,
+ * 17 is not just a page offset, so as we page an object out and back in,
  * individual pages in it will have different bit 17 addresses, resulting in
  * each 64 bytes being swapped with its neighbor!
  *
index c0d7e04..056d58f 100644 (file)
 static int
 i915_get_ggtt_vma_pages(struct i915_vma *vma);
 
-const struct i915_ggtt_view i915_ggtt_view_normal;
+const struct i915_ggtt_view i915_ggtt_view_normal = {
+       .type = I915_GGTT_VIEW_NORMAL,
+};
 const struct i915_ggtt_view i915_ggtt_view_rotated = {
-        .type = I915_GGTT_VIEW_ROTATED
+       .type = I915_GGTT_VIEW_ROTATED,
 };
 
 static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
@@ -1275,8 +1277,6 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
                gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
                        /* Same reasoning as pd */
                        WARN_ON(!pt);
-                       if (pt == NULL)         /* XXX dillon hack */
-                               continue;       /* XXX dillon hack */
                        WARN_ON(!pd_len);
                        WARN_ON(!gen8_pte_count(pd_start, pd_len));
 
@@ -2135,6 +2135,25 @@ static void i915_address_space_init(struct i915_address_space *vm,
        list_add_tail(&vm->global_link, &dev_priv->vm_list);
 }
 
+static void gtt_write_workarounds(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /* This function is for gtt related workarounds. This function is
+        * called on driver load and after a GPU reset, so you can place
+        * workarounds here even if they get overwritten by GPU reset.
+        */
+       /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
+       if (IS_BROADWELL(dev))
+               I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
+       else if (IS_CHERRYVIEW(dev))
+               I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
+       else if (IS_SKYLAKE(dev))
+               I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
+       else if (IS_BROXTON(dev))
+               I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
+}
+
 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2151,6 +2170,8 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 
 int i915_ppgtt_init_hw(struct drm_device *dev)
 {
+       gtt_write_workarounds(dev);
+
        /* In the case of execlists, PPGTT is enabled by the context descriptor
         * and the PDPs are contained within the context itself.  We don't
         * need to do anything here. */
@@ -2753,7 +2774,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
                }
                vma->bound |= GLOBAL_BIND;
                __i915_vma_set_map_and_fenceable(vma);
-               list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
+               list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list);
        }
 
        /* Clear any non-preallocated blocks */
@@ -2833,6 +2854,8 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
                ppgtt->base.cleanup(&ppgtt->base);
        }
 
+       i915_gem_cleanup_stolen(dev);
+
        if (drm_mm_initialized(&vm->mm)) {
                if (intel_vgpu_active(dev))
                        intel_vgt_deballoon();
@@ -3205,12 +3228,21 @@ int i915_gem_gtt_init(struct drm_device *dev)
        }
 
        gtt->base.dev = dev;
+       gtt->base.is_ggtt = true;
 
        ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
                             &gtt->mappable_base, &gtt->mappable_end);
        if (ret)
                return ret;
 
+       /*
+        * Initialise stolen early so that we may reserve preallocated
+        * objects for the BIOS to KMS transition.
+        */
+       ret = i915_gem_init_stolen(dev);
+       if (ret)
+               goto out_gtt_cleanup;
+
        /* GMADR is the PCI mmio aperture into the global GTT. */
        DRM_INFO("Memory usable by graphics device = %luM\n",
                 gtt->base.total >> 20);
@@ -3230,6 +3262,11 @@ int i915_gem_gtt_init(struct drm_device *dev)
        DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
 
        return 0;
+
+out_gtt_cleanup:
+       gtt->base.cleanup(&dev_priv->gtt.base);
+
+       return ret;
 }
 
 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
@@ -3252,7 +3289,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
        vm = &dev_priv->gtt.base;
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
                flush = false;
-               list_for_each_entry(vma, &obj->vma_list, vma_link) {
+               list_for_each_entry(vma, &obj->vma_list, obj_link) {
                        if (vma->vm != vm)
                                continue;
 
@@ -3308,19 +3345,20 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
        if (vma == NULL)
                return ERR_PTR(-ENOMEM);
 
-       INIT_LIST_HEAD(&vma->vma_link);
-       INIT_LIST_HEAD(&vma->mm_list);
+       INIT_LIST_HEAD(&vma->vm_link);
+       INIT_LIST_HEAD(&vma->obj_link);
        INIT_LIST_HEAD(&vma->exec_list);
        vma->vm = vm;
        vma->obj = obj;
+       vma->is_ggtt = i915_is_ggtt(vm);
 
        if (i915_is_ggtt(vm))
                vma->ggtt_view = *ggtt_view;
-
-       list_add_tail(&vma->vma_link, &obj->vma_list);
-       if (!i915_is_ggtt(vm))
+       else
                i915_ppgtt_get(i915_vm_to_ppgtt(vm));
 
+       list_add_tail(&vma->obj_link, &obj->vma_list);
+
        return vma;
 }
 
@@ -3361,8 +3399,9 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
 }
 
 static struct scatterlist *
-rotate_pages(dma_addr_t *in, unsigned int offset,
+rotate_pages(const dma_addr_t *in, unsigned int offset,
             unsigned int width, unsigned int height,
+            unsigned int stride,
             struct sg_table *st, struct scatterlist *sg)
 {
        unsigned int column, row;
@@ -3374,7 +3413,7 @@ rotate_pages(dma_addr_t *in, unsigned int offset,
        }
 
        for (column = 0; column < width; column++) {
-               src_idx = width * (height - 1) + column;
+               src_idx = stride * (height - 1) + column;
                for (row = 0; row < height; row++) {
                        st->nents++;
                        /* We don't need the pages, but need to initialize
@@ -3385,7 +3424,7 @@ rotate_pages(dma_addr_t *in, unsigned int offset,
                        sg_dma_address(sg) = in[offset + src_idx];
                        sg_dma_len(sg) = PAGE_SIZE;
                        sg = sg_next(sg);
-                       src_idx -= width;
+                       src_idx -= stride;
                }
        }
 
@@ -3393,10 +3432,9 @@ rotate_pages(dma_addr_t *in, unsigned int offset,
 }
 
 static struct sg_table *
-intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
+intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
                          struct drm_i915_gem_object *obj)
 {
-       struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info;
        unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
        unsigned int size_pages_uv;
        struct sg_page_iter sg_iter;
@@ -3438,6 +3476,7 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
        /* Rotate the pages. */
        sg = rotate_pages(page_addr_list, 0,
                     rot_info->width_pages, rot_info->height_pages,
+                    rot_info->width_pages,
                     st, NULL);
 
        /* Append the UV plane if NV12. */
@@ -3453,6 +3492,7 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
                rotate_pages(page_addr_list, uv_start_page,
                             rot_info->width_pages_uv,
                             rot_info->height_pages_uv,
+                            rot_info->width_pages_uv,
                             st, sg);
        }
 
@@ -3534,7 +3574,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
                vma->ggtt_view.pages = vma->obj->pages;
        else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
                vma->ggtt_view.pages =
-                       intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
+                       intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
        else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
                vma->ggtt_view.pages =
                        intel_partial_pages(&vma->ggtt_view, vma->obj);
@@ -3590,13 +3630,9 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
                return 0;
 
        if (vma->bound == 0 && vma->vm->allocate_va_range) {
-               trace_i915_va_alloc(vma->vm,
-                                   vma->node.start,
-                                   vma->node.size,
-                                   VM_TO_TRACE_NAME(vma->vm));
-
                /* XXX: i915_vma_pin() will fix this +- hack */
                vma->pin_count++;
+               trace_i915_va_alloc(vma);
                ret = vma->vm->allocate_va_range(vma->vm,
                                                 vma->node.start,
                                                 vma->node.size);
@@ -3628,7 +3664,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
        if (view->type == I915_GGTT_VIEW_NORMAL) {
                return obj->base.size;
        } else if (view->type == I915_GGTT_VIEW_ROTATED) {
-               return view->params.rotation_info.size;
+               return view->params.rotated.size;
        } else if (view->type == I915_GGTT_VIEW_PARTIAL) {
                return view->params.partial.size << PAGE_SHIFT;
        } else {
index ff44321..7b17697 100644 (file)
@@ -44,7 +44,6 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
 
 #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
 
-
 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
 #define GEN6_GTT_ADDR_ENCODE(addr)     ((addr) | (((addr) >> 28) & 0xff0))
 #define GEN6_PTE_ADDR_ENCODE(addr)     GEN6_GTT_ADDR_ENCODE(addr)
@@ -156,7 +155,7 @@ struct i915_ggtt_view {
                        u64 offset;
                        unsigned int size;
                } partial;
-               struct intel_rotation_info rotation_info;
+               struct intel_rotation_info rotated;
        } params;
 
        struct sg_table *pages;
@@ -184,6 +183,7 @@ struct i915_vma {
 #define GLOBAL_BIND    (1<<0)
 #define LOCAL_BIND     (1<<1)
        unsigned int bound : 4;
+       bool is_ggtt : 1;
 
        /**
         * Support different GGTT views into the same object.
@@ -195,9 +195,9 @@ struct i915_vma {
        struct i915_ggtt_view ggtt_view;
 
        /** This object's place on the active/inactive lists */
-       struct list_head mm_list;
+       struct list_head vm_link;
 
-       struct list_head vma_link; /* Link in the object's VMA list */
+       struct list_head obj_link; /* Link in the object's VMA list */
 
        /** This vma's place in the batchbuffer or on the eviction list */
        struct list_head exec_list;
@@ -276,6 +276,8 @@ struct i915_address_space {
        u64 start;              /* Start offset always 0 for dri2 */
        u64 total;              /* size addr space maps (ex. 2GB for ggtt) */
 
+       bool is_ggtt;
+
        struct i915_page_scratch *scratch_page;
        struct i915_page_table *scratch_pt;
        struct i915_page_directory *scratch_pd;
@@ -331,6 +333,8 @@ struct i915_address_space {
                        u32 flags);
 };
 
+#define i915_is_ggtt(V) ((V)->is_ggtt)
+
 /* The Graphics Translation Table is the way in which GEN hardware translates a
  * Graphics Virtual Address into a Physical Address. In addition to the normal
  * collateral associated with any va->pa translations GEN hardware also has a
@@ -343,6 +347,8 @@ struct i915_gtt {
 
        size_t stolen_size;             /* Total size of stolen memory */
        size_t stolen_usable_size;      /* Total size minus BIOS reserved */
+       size_t stolen_reserved_base;
+       size_t stolen_reserved_size;
        u64 mappable_end;               /* End offset that we can CPU map */
        struct io_mapping *mappable;    /* Mapping to our CPU mappable region */
        phys_addr_t mappable_base;      /* PA of our GMADR */
@@ -417,7 +423,7 @@ static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
 static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
                                      uint32_t pde_shift)
 {
-       const uint64_t mask = ~((1 << pde_shift) - 1);
+       const uint64_t mask = ~((1ULL << pde_shift) - 1);
        uint64_t end;
 
        WARN_ON(length == 0);
index fb84da5..6f1a716 100644 (file)
@@ -46,6 +46,46 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
 }
 #endif
 
+static int num_vma_bound(struct drm_i915_gem_object *obj)
+{
+       struct i915_vma *vma;
+       int count = 0;
+
+       list_for_each_entry(vma, &obj->vma_list, obj_link) {
+               if (drm_mm_node_allocated(&vma->node))
+                       count++;
+               if (vma->pin_count)
+                       count++;
+       }
+
+       return count;
+}
+
+static bool swap_available(void)
+{
+       return get_nr_swap_pages() > 0;
+}
+
+static bool can_release_pages(struct drm_i915_gem_object *obj)
+{
+       /* Only report true if by unbinding the object and putting its pages
+        * we can actually make forward progress towards freeing physical
+        * pages.
+        *
+        * If the pages are pinned for any other reason than being bound
+        * to the GPU, simply unbinding from the GPU is not going to succeed
+        * in releasing our pin count on the pages themselves.
+        */
+       if (obj->pages_pin_count != num_vma_bound(obj))
+               return false;
+
+       /* We can only return physical pages to the system if we can either
+        * discard the contents (because the user has marked them as being
+        * purgeable) or if we can move their contents out to swap.
+        */
+       return swap_available() || obj->madv == I915_MADV_DONTNEED;
+}
+
 /**
  * i915_gem_shrink - Shrink buffer object caches
  * @dev_priv: i915 device
@@ -128,11 +168,14 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
                        if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
                                continue;
 
+                       if (!can_release_pages(obj))
+                               continue;
+
                        drm_gem_object_reference(&obj->base);
 
                        /* For the unbound phase, this should be a no-op! */
                        list_for_each_entry_safe(vma, v,
-                                                &obj->vma_list, vma_link)
+                                                &obj->vma_list, obj_link)
                                if (i915_vma_unbind(vma))
                                        break;
 
@@ -188,21 +231,6 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
        return true;
 }
 
-static int num_vma_bound(struct drm_i915_gem_object *obj)
-{
-       struct i915_vma *vma;
-       int count = 0;
-
-       list_for_each_entry(vma, &obj->vma_list, vma_link) {
-               if (drm_mm_node_allocated(&vma->node))
-                       count++;
-               if (vma->pin_count)
-                       count++;
-       }
-
-       return count;
-}
-
 static unsigned long
 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 {
@@ -222,7 +250,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
                        count += obj->base.size >> PAGE_SHIFT;
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               if (!obj->active && obj->pages_pin_count == num_vma_bound(obj))
+               if (!obj->active && can_release_pages(obj))
                        count += obj->base.size >> PAGE_SHIFT;
        }
 
@@ -341,9 +369,23 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
        dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
        dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
        dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
-       register_shrinker(&dev_priv->mm.shrinker);
+       WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
 
        dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
-       register_oom_notifier(&dev_priv->mm.oom_notifier);
+       WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
+#endif
+}
+
+/**
+ * i915_gem_shrinker_cleanup - Clean up i915 shrinker
+ * @dev_priv: i915 device
+ *
+ * This function unregisters the i915 shrinker and OOM handler.
+ */
+void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
+{
+#if 0
+       WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
+       unregister_shrinker(&dev_priv->mm.shrinker);
 #endif
 }
index 40727d1..0c7cd77 100644 (file)
@@ -461,6 +461,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
                return 0;
        }
 
+       dev_priv->gtt.stolen_reserved_base = reserved_base;
+       dev_priv->gtt.stolen_reserved_size = reserved_size;
+
        /* It is possible for the reserved area to end before the end of stolen
         * memory, so just consider the start. */
        reserved_total = stolen_top - reserved_base;
@@ -572,6 +575,9 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
        if (obj->pages == NULL)
                goto cleanup;
 
+       obj->get_page.sg = obj->pages->sgl;
+       obj->get_page.last = 0;
+
        i915_gem_object_pin_pages(obj);
        obj->stolen = stolen;
 
@@ -635,6 +641,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
        if (!drm_mm_initialized(&dev_priv->mm.stolen))
                return NULL;
 
+       lockdep_assert_held(&dev->struct_mutex);
+
        DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
                        stolen_offset, gtt_offset, size);
 
@@ -692,7 +700,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 
                vma->bound |= GLOBAL_BIND;
                __i915_vma_set_map_and_fenceable(vma);
-               list_add_tail(&vma->mm_list, &ggtt->inactive_list);
+               list_add_tail(&vma->vm_link, &ggtt->inactive_list);
        }
 
        list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
index 6ffa09f..e25c666 100644 (file)
@@ -45,21 +45,18 @@ struct i915_mmu_notifier {
        struct hlist_node node;
        struct mmu_notifier mn;
        struct rb_root objects;
-       struct list_head linear;
-       bool has_linear;
 };
 
 struct i915_mmu_object {
        struct i915_mmu_notifier *mn;
+       struct drm_i915_gem_object *obj;
        struct interval_tree_node it;
        struct list_head link;
-       struct drm_i915_gem_object *obj;
        struct work_struct work;
-       bool active;
-       bool is_linear;
+       bool attached;
 };
 
-static void __cancel_userptr__worker(struct work_struct *work)
+static void cancel_userptr(struct work_struct *work)
 {
        struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
        struct drm_i915_gem_object *obj = mo->obj;
@@ -77,7 +74,7 @@ static void __cancel_userptr__worker(struct work_struct *work)
                was_interruptible = dev_priv->mm.interruptible;
                dev_priv->mm.interruptible = false;
 
-               list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
+               list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) {
                        int ret = i915_vma_unbind(vma);
                        WARN_ON(ret && ret != -EIO);
                }
@@ -90,24 +87,22 @@ static void __cancel_userptr__worker(struct work_struct *work)
        mutex_unlock(&dev->struct_mutex);
 }
 
-static unsigned long cancel_userptr(struct i915_mmu_object *mo)
+static void add_object(struct i915_mmu_object *mo)
 {
-       unsigned long end = mo->obj->userptr.ptr + mo->obj->base.size;
-
-       /* The mmu_object is released late when destroying the
-        * GEM object so it is entirely possible to gain a
-        * reference on an object in the process of being freed
-        * since our serialisation is via the spinlock and not
-        * the struct_mutex - and consequently use it after it
-        * is freed and then double free it.
-        */
-       if (mo->active && kref_get_unless_zero(&mo->obj->base.refcount)) {
-               schedule_work(&mo->work);
-               /* only schedule one work packet to avoid the refleak */
-               mo->active = false;
-       }
+       if (mo->attached)
+               return;
+
+       interval_tree_insert(&mo->it, &mo->mn->objects);
+       mo->attached = true;
+}
 
-       return end;
+static void del_object(struct i915_mmu_object *mo)
+{
+       if (!mo->attached)
+               return;
+
+       interval_tree_remove(&mo->it, &mo->mn->objects);
+       mo->attached = false;
 }
 
 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
@@ -118,28 +113,36 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
        struct i915_mmu_notifier *mn =
                container_of(_mn, struct i915_mmu_notifier, mn);
        struct i915_mmu_object *mo;
+       struct interval_tree_node *it;
+       LIST_HEAD(cancelled);
+
+       if (RB_EMPTY_ROOT(&mn->objects))
+               return;
 
        /* interval ranges are inclusive, but invalidate range is exclusive */
        end--;
 
        spin_lock(&mn->lock);
-       if (mn->has_linear) {
-               list_for_each_entry(mo, &mn->linear, link) {
-                       if (mo->it.last < start || mo->it.start > end)
-                               continue;
-
-                       cancel_userptr(mo);
-               }
-       } else {
-               struct interval_tree_node *it;
+       it = interval_tree_iter_first(&mn->objects, start, end);
+       while (it) {
+               /* The mmu_object is released late when destroying the
+                * GEM object so it is entirely possible to gain a
+                * reference on an object in the process of being freed
+                * since our serialisation is via the spinlock and not
+                * the struct_mutex - and consequently use it after it
+                * is freed and then double free it. To prevent that
+                * use-after-free we only acquire a reference on the
+                * object if it is not in the process of being destroyed.
+                */
+               mo = container_of(it, struct i915_mmu_object, it);
+               if (kref_get_unless_zero(&mo->obj->base.refcount))
+                       schedule_work(&mo->work);
 
-               it = interval_tree_iter_first(&mn->objects, start, end);
-               while (it) {
-                       mo = container_of(it, struct i915_mmu_object, it);
-                       start = cancel_userptr(mo);
-                       it = interval_tree_iter_next(it, start, end);
-               }
+               list_add(&mo->link, &cancelled);
+               it = interval_tree_iter_next(it, start, end);
        }
+       list_for_each_entry(mo, &cancelled, link)
+               del_object(mo);
        spin_unlock(&mn->lock);
 }
 
@@ -160,8 +163,6 @@ i915_mmu_notifier_create(struct mm_struct *mm)
        spin_lock_init(&mn->lock);
        mn->mn.ops = &i915_gem_userptr_notifier;
        mn->objects = RB_ROOT;
-       INIT_LIST_HEAD(&mn->linear);
-       mn->has_linear = false;
 
         /* Protected by mmap_sem (write-lock) */
        ret = __mmu_notifier_register(&mn->mn, mm);
@@ -173,85 +174,6 @@ i915_mmu_notifier_create(struct mm_struct *mm)
        return mn;
 }
 
-static int
-i915_mmu_notifier_add(struct drm_device *dev,
-                     struct i915_mmu_notifier *mn,
-                     struct i915_mmu_object *mo)
-{
-       struct interval_tree_node *it;
-       int ret = 0;
-
-       /* By this point we have already done a lot of expensive setup that
-        * we do not want to repeat just because the caller (e.g. X) has a
-        * signal pending (and partly because of that expensive setup, X
-        * using an interrupt timer is likely to get stuck in an EINTR loop).
-        */
-       mutex_lock(&dev->struct_mutex);
-
-       /* Make sure we drop the final active reference (and thereby
-        * remove the objects from the interval tree) before we do
-        * the check for overlapping objects.
-        */
-       i915_gem_retire_requests(dev);
-
-       spin_lock(&mn->lock);
-       it = interval_tree_iter_first(&mn->objects,
-                                     mo->it.start, mo->it.last);
-       if (it) {
-               struct drm_i915_gem_object *obj;
-
-               /* We only need to check the first object in the range as it
-                * either has cancelled gup work queued and we need to
-                * return back to the user to give time for the gup-workers
-                * to flush their object references upon which the object will
-                * be removed from the interval-tree, or the the range is
-                * still in use by another client and the overlap is invalid.
-                *
-                * If we do have an overlap, we cannot use the interval tree
-                * for fast range invalidation.
-                */
-
-               obj = container_of(it, struct i915_mmu_object, it)->obj;
-               if (!obj->userptr.workers)
-                       mn->has_linear = mo->is_linear = true;
-               else
-                       ret = -EAGAIN;
-       } else
-               interval_tree_insert(&mo->it, &mn->objects);
-
-       if (ret == 0)
-               list_add(&mo->link, &mn->linear);
-
-       spin_unlock(&mn->lock);
-       mutex_unlock(&dev->struct_mutex);
-
-       return ret;
-}
-
-static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
-{
-       struct i915_mmu_object *mo;
-
-       list_for_each_entry(mo, &mn->linear, link)
-               if (mo->is_linear)
-                       return true;
-
-       return false;
-}
-
-static void
-i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
-                     struct i915_mmu_object *mo)
-{
-       spin_lock(&mn->lock);
-       list_del(&mo->link);
-       if (mo->is_linear)
-               mn->has_linear = i915_mmu_notifier_has_linear(mn);
-       else
-               interval_tree_remove(&mo->it, &mn->objects);
-       spin_unlock(&mn->lock);
-}
-
 static void
 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
 {
@@ -261,7 +183,9 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
        if (mo == NULL)
                return;
 
-       i915_mmu_notifier_del(mo->mn, mo);
+       spin_lock(&mo->mn->lock);
+       del_object(mo);
+       spin_unlock(&mo->mn->lock);
        kfree(mo);
 
        obj->userptr.mmu_object = NULL;
@@ -295,7 +219,6 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
 {
        struct i915_mmu_notifier *mn;
        struct i915_mmu_object *mo;
-       int ret;
 
        if (flags & I915_USERPTR_UNSYNCHRONIZED)
                return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
@@ -312,16 +235,10 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
                return -ENOMEM;
 
        mo->mn = mn;
-       mo->it.start = obj->userptr.ptr;
-       mo->it.last = mo->it.start + obj->base.size - 1;
        mo->obj = obj;
-       INIT_WORK(&mo->work, __cancel_userptr__worker);
-
-       ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
-       if (ret) {
-               kfree(mo);
-               return ret;
-       }
+       mo->it.start = obj->userptr.ptr;
+       mo->it.last = obj->userptr.ptr + obj->base.size - 1;
+       INIT_WORK(&mo->work, cancel_userptr);
 
        obj->userptr.mmu_object = mo;
        return 0;
@@ -553,8 +470,10 @@ __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
        /* In order to serialise get_pages with an outstanding
         * cancel_userptr, we must drop the struct_mutex and try again.
         */
-       if (!value || !work_pending(&obj->userptr.mmu_object->work))
-               obj->userptr.mmu_object->active = value;
+       if (!value)
+               del_object(obj->userptr.mmu_object);
+       else if (!work_pending(&obj->userptr.mmu_object->work))
+               add_object(obj->userptr.mmu_object);
        else
                ret = -EAGAIN;
        spin_unlock(&obj->userptr.mmu_object->mn->lock);
@@ -583,19 +502,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
        if (pvec != NULL) {
                struct mm_struct *mm = obj->userptr.mm->mm;
 
-               down_read(&mm->mmap_sem);
-               while (pinned < npages) {
-                       ret = get_user_pages(work->task, mm,
-                                            obj->userptr.ptr + pinned * PAGE_SIZE,
-                                            npages - pinned,
-                                            !obj->userptr.read_only, 0,
-                                            pvec + pinned, NULL);
-                       if (ret < 0)
-                               break;
-
-                       pinned += ret;
+               ret = -EFAULT;
+               if (atomic_inc_not_zero(&mm->mm_users)) {
+                       down_read(&mm->mmap_sem);
+                       while (pinned < npages) {
+                               ret = get_user_pages_remote
+                                       (work->task, mm,
+                                        obj->userptr.ptr + pinned * PAGE_SIZE,
+                                        npages - pinned,
+                                        !obj->userptr.read_only, 0,
+                                        pvec + pinned, NULL);
+                               if (ret < 0)
+                                       break;
+
+                               pinned += ret;
+                       }
+                       up_read(&mm->mmap_sem);
+                       mmput(mm);
                }
-               up_read(&mm->mmap_sem);
        }
 
        mutex_lock(&dev->struct_mutex);
@@ -765,7 +689,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
                        set_page_dirty(page);
 
                mark_page_accessed(page);
-               page_cache_release(page);
+               put_page(page);
        }
        obj->dirty = 0;
 
diff --git a/sys/dev/drm/i915/i915_gpu_error.c b/sys/dev/drm/i915/i915_gpu_error.c
new file mode 100644 (file)
index 0000000..bab1227
--- /dev/null
@@ -0,0 +1,1436 @@
+/*
+ * Copyright (c) 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Keith Packard <keithp@keithp.com>
+ *    Mika Kuoppala <mika.kuoppala@intel.com>
+ *
+ */
+
+#include "i915_drv.h"
+
+#if 0
+static const char *ring_str(int ring)
+{
+       switch (ring) {
+       case RCS: return "render";
+       case VCS: return "bsd";
+       case BCS: return "blt";
+       case VECS: return "vebox";
+       case VCS2: return "bsd2";
+       default: return "";
+       }
+}
+
+static const char *pin_flag(int pinned)
+{
+       if (pinned > 0)
+               return " P";
+       else if (pinned < 0)
+               return " p";
+       else
+               return "";
+}
+
+static const char *tiling_flag(int tiling)
+{
+       switch (tiling) {
+       default:
+       case I915_TILING_NONE: return "";
+       case I915_TILING_X: return " X";
+       case I915_TILING_Y: return " Y";
+       }
+}
+
+static const char *dirty_flag(int dirty)
+{
+       return dirty ? " dirty" : "";
+}
+
+static const char *purgeable_flag(int purgeable)
+{
+       return purgeable ? " purgeable" : "";
+}
+
+static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
+{
+
+       if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
+               e->err = -ENOSPC;
+               return false;
+       }
+
+       if (e->bytes == e->size - 1 || e->err)
+               return false;
+
+       return true;
+}
+
+static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
+                             unsigned len)
+{
+       if (e->pos + len <= e->start) {
+               e->pos += len;
+               return false;
+       }
+
+       /* First vsnprintf needs to fit in its entirety for memmove */
+       if (len >= e->size) {
+               e->err = -EIO;
+               return false;
+       }
+
+       return true;
+}
+
+static void __i915_error_advance(struct drm_i915_error_state_buf *e,
+                                unsigned len)
+{
+       /* If this is first printf in this window, adjust it so that
+        * start position matches start of the buffer
+        */
+
+       if (e->pos < e->start) {
+               const size_t off = e->start - e->pos;
+
+               /* Should not happen but be paranoid */
+               if (off > len || e->bytes) {
+                       e->err = -EIO;
+                       return;
+               }
+
+               memmove(e->buf, e->buf + off, len - off);
+               e->bytes = len - off;
+               e->pos = e->start;
+               return;
+       }
+
+       e->bytes += len;
+       e->pos += len;
+}
+
+static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
+                              const char *f, va_list args)
+{
+       unsigned len;
+
+       if (!__i915_error_ok(e))
+               return;
+
+       /* Seek the first printf which is hits start position */
+       if (e->pos < e->start) {
+               va_list tmp;
+
+               va_copy(tmp, args);
+               len = vsnprintf(NULL, 0, f, tmp);
+               va_end(tmp);
+
+               if (!__i915_error_seek(e, len))
+                       return;
+       }
+
+       len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
+       if (len >= e->size - e->bytes)
+               len = e->size - e->bytes - 1;
+
+       __i915_error_advance(e, len);
+}
+
+static void i915_error_puts(struct drm_i915_error_state_buf *e,
+                           const char *str)
+{
+       unsigned len;
+
+       if (!__i915_error_ok(e))
+               return;
+
+       len = strlen(str);
+
+       /* Seek the first printf which is hits start position */
+       if (e->pos < e->start) {
+               if (!__i915_error_seek(e, len))
+                       return;
+       }
+
+       if (len >= e->size - e->bytes)
+               len = e->size - e->bytes - 1;
+       memcpy(e->buf + e->bytes, str, len);
+
+       __i915_error_advance(e, len);
+}
+
+#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
+#define err_puts(e, s) i915_error_puts(e, s)
+
+static void print_error_buffers(struct drm_i915_error_state_buf *m,
+                               const char *name,
+                               struct drm_i915_error_buffer *err,
+                               int count)
+{
+       int i;
+
+       err_printf(m, "  %s [%d]:\n", name, count);
+
+       while (count--) {
+               err_printf(m, "    %08x_%08x %8u %02x %02x [ ",
+                          upper_32_bits(err->gtt_offset),
+                          lower_32_bits(err->gtt_offset),
+                          err->size,
+                          err->read_domains,
+                          err->write_domain);
+               for (i = 0; i < I915_NUM_RINGS; i++)
+                       err_printf(m, "%02x ", err->rseqno[i]);
+
+               err_printf(m, "] %02x", err->wseqno);
+               err_puts(m, pin_flag(err->pinned));
+               err_puts(m, tiling_flag(err->tiling));
+               err_puts(m, dirty_flag(err->dirty));
+               err_puts(m, purgeable_flag(err->purgeable));
+               err_puts(m, err->userptr ? " userptr" : "");
+               err_puts(m, err->ring != -1 ? " " : "");
+               err_puts(m, ring_str(err->ring));
+               err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
+
+               if (err->name)
+                       err_printf(m, " (name: %d)", err->name);
+               if (err->fence_reg != I915_FENCE_REG_NONE)
+                       err_printf(m, " (fence: %d)", err->fence_reg);
+
+               err_puts(m, "\n");
+               err++;
+       }
+}
+
+static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
+{
+       switch (a) {
+       case HANGCHECK_IDLE:
+               return "idle";
+       case HANGCHECK_WAIT:
+               return "wait";
+       case HANGCHECK_ACTIVE:
+               return "active";
+       case HANGCHECK_ACTIVE_LOOP:
+               return "active (loop)";
+       case HANGCHECK_KICK:
+               return "kick";
+       case HANGCHECK_HUNG:
+               return "hung";
+       }
+
+       return "unknown";
+}
+
+static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
+                                 struct drm_device *dev,
+                                 struct drm_i915_error_state *error,
+                                 int ring_idx)
+{
+       struct drm_i915_error_ring *ring = &error->ring[ring_idx];
+
+       if (!ring->valid)
+               return;
+
+       err_printf(m, "%s command stream:\n", ring_str(ring_idx));
+       err_printf(m, "  START: 0x%08x\n", ring->start);
+       err_printf(m, "  HEAD:  0x%08x\n", ring->head);
+       err_printf(m, "  TAIL:  0x%08x\n", ring->tail);
+       err_printf(m, "  CTL:   0x%08x\n", ring->ctl);
+       err_printf(m, "  HWS:   0x%08x\n", ring->hws);
+       err_printf(m, "  ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
+       err_printf(m, "  IPEIR: 0x%08x\n", ring->ipeir);
+       err_printf(m, "  IPEHR: 0x%08x\n", ring->ipehr);
+       err_printf(m, "  INSTDONE: 0x%08x\n", ring->instdone);
+       if (INTEL_INFO(dev)->gen >= 4) {
+               err_printf(m, "  BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
+               err_printf(m, "  BB_STATE: 0x%08x\n", ring->bbstate);
+               err_printf(m, "  INSTPS: 0x%08x\n", ring->instps);
+       }
+       err_printf(m, "  INSTPM: 0x%08x\n", ring->instpm);
+       err_printf(m, "  FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
+                  lower_32_bits(ring->faddr));
+       if (INTEL_INFO(dev)->gen >= 6) {
+               err_printf(m, "  RC PSMI: 0x%08x\n", ring->rc_psmi);
+               err_printf(m, "  FAULT_REG: 0x%08x\n", ring->fault_reg);
+               err_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
+                          ring->semaphore_mboxes[0],
+                          ring->semaphore_seqno[0]);
+               err_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
+                          ring->semaphore_mboxes[1],
+                          ring->semaphore_seqno[1]);
+               if (HAS_VEBOX(dev)) {
+                       err_printf(m, "  SYNC_2: 0x%08x [last synced 0x%08x]\n",
+                                  ring->semaphore_mboxes[2],
+                                  ring->semaphore_seqno[2]);
+               }
+       }
+       if (USES_PPGTT(dev)) {
+               err_printf(m, "  GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
+
+               if (INTEL_INFO(dev)->gen >= 8) {
+                       int i;
+                       for (i = 0; i < 4; i++)
+                               err_printf(m, "  PDP%d: 0x%016llx\n",
+                                          i, ring->vm_info.pdp[i]);
+               } else {
+                       err_printf(m, "  PP_DIR_BASE: 0x%08x\n",
+                                  ring->vm_info.pp_dir_base);
+               }
+       }
+       err_printf(m, "  seqno: 0x%08x\n", ring->seqno);
+       err_printf(m, "  waiting: %s\n", yesno(ring->waiting));
+       err_printf(m, "  ring->head: 0x%08x\n", ring->cpu_ring_head);
+       err_printf(m, "  ring->tail: 0x%08x\n", ring->cpu_ring_tail);
+       err_printf(m, "  hangcheck: %s [%d]\n",
+                  hangcheck_action_to_str(ring->hangcheck_action),
+                  ring->hangcheck_score);
+}
+
+void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
+{
+       va_list args;
+
+       va_start(args, f);
+       i915_error_vprintf(e, f, args);
+       va_end(args);
+}
+
+static void print_error_obj(struct drm_i915_error_state_buf *m,
+                           struct drm_i915_error_object *obj)
+{
+       int page, offset, elt;
+
+       for (page = offset = 0; page < obj->page_count; page++) {
+               for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+                       err_printf(m, "%08x :  %08x\n", offset,
+                                  obj->pages[page][elt]);
+                       offset += 4;
+               }
+       }
+}
+
+int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
+                           const struct i915_error_state_file_priv *error_priv)
+{
+       struct drm_device *dev = error_priv->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_error_state *error = error_priv->error;
+       struct drm_i915_error_object *obj;
+       int i, j, offset, elt;
+       int max_hangcheck_score;
+
+       if (!error) {
+               err_printf(m, "no error state collected\n");
+               goto out;
+       }
+
+       err_printf(m, "%s\n", error->error_msg);
+       err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
+                  error->time.tv_usec);
+       err_printf(m, "Kernel: " UTS_RELEASE "\n");
+       max_hangcheck_score = 0;
+       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+               if (error->ring[i].hangcheck_score > max_hangcheck_score)
+                       max_hangcheck_score = error->ring[i].hangcheck_score;
+       }
+       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+               if (error->ring[i].hangcheck_score == max_hangcheck_score &&
+                   error->ring[i].pid != -1) {
+                       err_printf(m, "Active process (on ring %s): %s [%d]\n",
+                                  ring_str(i),
+                                  error->ring[i].comm,
+                                  error->ring[i].pid);
+               }
+       }
+       err_printf(m, "Reset count: %u\n", error->reset_count);
+       err_printf(m, "Suspend count: %u\n", error->suspend_count);
+       err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
+       err_printf(m, "PCI Revision: 0x%02x\n", dev->pdev->revision);
+       err_printf(m, "PCI Subsystem: %04x:%04x\n",
+                  dev->pdev->subsystem_vendor,
+                  dev->pdev->subsystem_device);
+       err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
+
+       if (HAS_CSR(dev)) {
+               struct intel_csr *csr = &dev_priv->csr;
+
+               err_printf(m, "DMC loaded: %s\n",
+                          yesno(csr->dmc_payload != NULL));
+               err_printf(m, "DMC fw version: %d.%d\n",
+                          CSR_VERSION_MAJOR(csr->version),
+                          CSR_VERSION_MINOR(csr->version));
+       }
+
+       err_printf(m, "EIR: 0x%08x\n", error->eir);
+       err_printf(m, "IER: 0x%08x\n", error->ier);
+       if (INTEL_INFO(dev)->gen >= 8) {
+               for (i = 0; i < 4; i++)
+                       err_printf(m, "GTIER gt %d: 0x%08x\n", i,
+                                  error->gtier[i]);
+       } else if (HAS_PCH_SPLIT(dev) || IS_VALLEYVIEW(dev))
+               err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
+       err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+       err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
+       err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
+       err_printf(m, "CCID: 0x%08x\n", error->ccid);
+       err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
+
+       for (i = 0; i < dev_priv->num_fence_regs; i++)
+               err_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
+
+       for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
+               err_printf(m, "  INSTDONE_%d: 0x%08x\n", i,
+                          error->extra_instdone[i]);
+
+       if (INTEL_INFO(dev)->gen >= 6) {
+               err_printf(m, "ERROR: 0x%08x\n", error->error);
+
+               if (INTEL_INFO(dev)->gen >= 8)
+                       err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
+                                  error->fault_data1, error->fault_data0);
+
+               err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
+       }
+
+       if (INTEL_INFO(dev)->gen == 7)
+               err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+
+       for (i = 0; i < ARRAY_SIZE(error->ring); i++)
+               i915_ring_error_state(m, dev, error, i);
+
+       for (i = 0; i < error->vm_count; i++) {
+               err_printf(m, "vm[%d]\n", i);
+
+               print_error_buffers(m, "Active",
+                                   error->active_bo[i],
+                                   error->active_bo_count[i]);
+
+               print_error_buffers(m, "Pinned",
+                                   error->pinned_bo[i],
+                                   error->pinned_bo_count[i]);
+       }
+
+       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+               obj = error->ring[i].batchbuffer;
+               if (obj) {
+                       err_puts(m, dev_priv->ring[i].name);
+                       if (error->ring[i].pid != -1)
+                               err_printf(m, " (submitted by %s [%d])",
+                                          error->ring[i].comm,
+                                          error->ring[i].pid);
+                       err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
+                                  upper_32_bits(obj->gtt_offset),
+                                  lower_32_bits(obj->gtt_offset));
+                       print_error_obj(m, obj);
+               }
+
+               obj = error->ring[i].wa_batchbuffer;
+               if (obj) {
+                       err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
+                                  dev_priv->ring[i].name,
+                                  lower_32_bits(obj->gtt_offset));
+                       print_error_obj(m, obj);
+               }
+
+               if (error->ring[i].num_requests) {
+                       err_printf(m, "%s --- %d requests\n",
+                                  dev_priv->ring[i].name,
+                                  error->ring[i].num_requests);
+                       for (j = 0; j < error->ring[i].num_requests; j++) {
+                               err_printf(m, "  seqno 0x%08x, emitted %ld, tail 0x%08x\n",
+                                          error->ring[i].requests[j].seqno,
+                                          error->ring[i].requests[j].jiffies,
+                                          error->ring[i].requests[j].tail);
+                       }
+               }
+
+               if ((obj = error->ring[i].ringbuffer)) {
+                       err_printf(m, "%s --- ringbuffer = 0x%08x\n",
+                                  dev_priv->ring[i].name,
+                                  lower_32_bits(obj->gtt_offset));
+                       print_error_obj(m, obj);
+               }
+
+               if ((obj = error->ring[i].hws_page)) {
+                       u64 hws_offset = obj->gtt_offset;
+                       u32 *hws_page = &obj->pages[0][0];
+
+                       if (i915.enable_execlists) {
+                               hws_offset += LRC_PPHWSP_PN * PAGE_SIZE;
+                               hws_page = &obj->pages[LRC_PPHWSP_PN][0];
+                       }
+                       err_printf(m, "%s --- HW Status = 0x%08llx\n",
+                                  dev_priv->ring[i].name, hws_offset);
+                       offset = 0;
+                       for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+                               err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+                                          offset,
+                                          hws_page[elt],
+                                          hws_page[elt+1],
+                                          hws_page[elt+2],
+                                          hws_page[elt+3]);
+                                       offset += 16;
+                       }
+               }
+
+               if ((obj = error->ring[i].ctx)) {
+                       err_printf(m, "%s --- HW Context = 0x%08x\n",
+                                  dev_priv->ring[i].name,
+                                  lower_32_bits(obj->gtt_offset));
+                       print_error_obj(m, obj);
+               }
+       }
+
+       if ((obj = error->semaphore_obj)) {
+               err_printf(m, "Semaphore page = 0x%08x\n",
+                          lower_32_bits(obj->gtt_offset));
+               for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+                       err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+                                  elt * 4,
+                                  obj->pages[0][elt],
+                                  obj->pages[0][elt+1],
+                                  obj->pages[0][elt+2],
+                                  obj->pages[0][elt+3]);
+               }
+       }
+
+       if (error->overlay)
+               intel_overlay_print_error_state(m, error->overlay);
+
+       if (error->display)
+               intel_display_print_error_state(m, dev, error->display);
+
+out:
+       if (m->bytes == 0 && m->err)
+               return m->err;
+
+       return 0;
+}
+
+int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
+                             struct drm_i915_private *i915,
+                             size_t count, loff_t pos)
+{
+       memset(ebuf, 0, sizeof(*ebuf));
+       ebuf->i915 = i915;
+
+       /* We need to have enough room to store any i915_error_state printf
+        * so that we can move it to start position.
+        */
+       ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
+       ebuf->buf = kmalloc(ebuf->size,
+                               GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
+
+       if (ebuf->buf == NULL) {
+               ebuf->size = PAGE_SIZE;
+               ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
+       }
+
+       if (ebuf->buf == NULL) {
+               ebuf->size = 128;
+               ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
+       }
+
+       if (ebuf->buf == NULL)
+               return -ENOMEM;
+
+       ebuf->start = pos;
+
+       return 0;
+}
+
+static void i915_error_object_free(struct drm_i915_error_object *obj)
+{
+       int page;
+
+       if (obj == NULL)
+               return;
+
+       for (page = 0; page < obj->page_count; page++)
+               kfree(obj->pages[page]);
+
+       kfree(obj);
+}
+
+static void i915_error_state_free(struct kref *error_ref)
+{
+       struct drm_i915_error_state *error = container_of(error_ref,
+                                                         typeof(*error), ref);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+               i915_error_object_free(error->ring[i].batchbuffer);
+               i915_error_object_free(error->ring[i].wa_batchbuffer);
+               i915_error_object_free(error->ring[i].ringbuffer);
+               i915_error_object_free(error->ring[i].hws_page);
+               i915_error_object_free(error->ring[i].ctx);
+               kfree(error->ring[i].requests);
+       }
+
+       i915_error_object_free(error->semaphore_obj);
+
+       for (i = 0; i < error->vm_count; i++)
+               kfree(error->active_bo[i]);
+
+       kfree(error->active_bo);
+       kfree(error->active_bo_count);
+       kfree(error->pinned_bo);
+       kfree(error->pinned_bo_count);
+       kfree(error->overlay);
+       kfree(error->display);
+       kfree(error);
+}
+
+static struct drm_i915_error_object *
+i915_error_object_create(struct drm_i915_private *dev_priv,
+                        struct drm_i915_gem_object *src,
+                        struct i915_address_space *vm)
+{
+       struct drm_i915_error_object *dst;
+       struct i915_vma *vma = NULL;
+       int num_pages;
+       bool use_ggtt;
+       int i = 0;
+       u64 reloc_offset;
+
+       if (src == NULL || src->pages == NULL)
+               return NULL;
+
+       num_pages = src->base.size >> PAGE_SHIFT;
+
+       dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
+       if (dst == NULL)
+               return NULL;
+
+       if (i915_gem_obj_bound(src, vm))
+               dst->gtt_offset = i915_gem_obj_offset(src, vm);
+       else
+               dst->gtt_offset = -1;
+
+       reloc_offset = dst->gtt_offset;
+       if (i915_is_ggtt(vm))
+               vma = i915_gem_obj_to_ggtt(src);
+       use_ggtt = (src->cache_level == I915_CACHE_NONE &&
+                  vma && (vma->bound & GLOBAL_BIND) &&
+                  reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
+
+       /* Cannot access stolen address directly, try to use the aperture */
+       if (src->stolen) {
+               use_ggtt = true;
+
+               if (!(vma && vma->bound & GLOBAL_BIND))
+                       goto unwind;
+
+               reloc_offset = i915_gem_obj_ggtt_offset(src);
+               if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end)
+                       goto unwind;
+       }
+
+       /* Cannot access snooped pages through the aperture */
+       if (use_ggtt && src->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv->dev))
+               goto unwind;
+
+       dst->page_count = num_pages;
+       while (num_pages--) {
+               unsigned long flags;
+               void *d;
+
+               d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+               if (d == NULL)
+                       goto unwind;
+
+               local_irq_save(flags);
+               if (use_ggtt) {
+                       void __iomem *s;
+
+                       /* Simply ignore tiling or any overlapping fence.
+                        * It's part of the error state, and this hopefully
+                        * captures what the GPU read.
+                        */
+
+                       s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+                                                    reloc_offset);
+                       memcpy_fromio(d, s, PAGE_SIZE);
+                       io_mapping_unmap_atomic(s);
+               } else {
+                       struct page *page;
+                       void *s;
+
+                       page = i915_gem_object_get_page(src, i);
+
+                       drm_clflush_pages(&page, 1);
+
+                       s = kmap_atomic(page);
+                       memcpy(d, s, PAGE_SIZE);
+                       kunmap_atomic(s);
+
+                       drm_clflush_pages(&page, 1);
+               }
+               local_irq_restore(flags);
+
+               dst->pages[i++] = d;
+               reloc_offset += PAGE_SIZE;
+       }
+
+       return dst;
+
+unwind:
+       while (i--)
+               kfree(dst->pages[i]);
+       kfree(dst);
+       return NULL;
+}
+#define i915_error_ggtt_object_create(dev_priv, src) \
+       i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
+
+static void capture_bo(struct drm_i915_error_buffer *err,
+                      struct i915_vma *vma)
+{
+       struct drm_i915_gem_object *obj = vma->obj;
+       int i;
+
+       err->size = obj->base.size;
+       err->name = obj->base.name;
+       for (i = 0; i < I915_NUM_RINGS; i++)
+               err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
+       err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
+       err->gtt_offset = vma->node.start;
+       err->read_domains = obj->base.read_domains;
+       err->write_domain = obj->base.write_domain;
+       err->fence_reg = obj->fence_reg;
+       err->pinned = 0;
+       if (i915_gem_obj_is_pinned(obj))
+               err->pinned = 1;
+       err->tiling = obj->tiling_mode;
+       err->dirty = obj->dirty;
+       err->purgeable = obj->madv != I915_MADV_WILLNEED;
+       err->userptr = obj->userptr.mm != NULL;
+       err->ring = obj->last_write_req ?
+                       i915_gem_request_get_ring(obj->last_write_req)->id : -1;
+       err->cache_level = obj->cache_level;
+}
+
+static u32 capture_active_bo(struct drm_i915_error_buffer *err,
+                            int count, struct list_head *head)
+{
+       struct i915_vma *vma;
+       int i = 0;
+
+       list_for_each_entry(vma, head, vm_link) {
+               capture_bo(err++, vma);
+               if (++i == count)
+                       break;
+       }
+
+       return i;
+}
+
+static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
+                            int count, struct list_head *head,
+                            struct i915_address_space *vm)
+{
+       struct drm_i915_gem_object *obj;
+       struct drm_i915_error_buffer * const first = err;
+       struct drm_i915_error_buffer * const last = err + count;
+
+       list_for_each_entry(obj, head, global_list) {
+               struct i915_vma *vma;
+
+               if (err == last)
+                       break;
+
+               list_for_each_entry(vma, &obj->vma_list, obj_link)
+                       if (vma->vm == vm && vma->pin_count > 0)
+                               capture_bo(err++, vma);
+       }
+
+       return err - first;
+}
+
+/* Generate a semi-unique error code. The code is not meant to have meaning, The
+ * code's only purpose is to try to prevent false duplicated bug reports by
+ * grossly estimating a GPU error state.
+ *
+ * TODO Ideally, hashing the batchbuffer would be a very nice way to determine
+ * the hang if we could strip the GTT offset information from it.
+ *
+ * It's only a small step better than a random number in its current form.
+ */
+static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
+                                        struct drm_i915_error_state *error,
+                                        int *ring_id)
+{
+       uint32_t error_code = 0;
+       int i;
+
+       /* IPEHR would be an ideal way to detect errors, as it's the gross
+        * measure of "the command that hung." However, has some very common
+        * synchronization commands which almost always appear in the case
+        * strictly a client bug. Use instdone to differentiate those some.
+        */
+       for (i = 0; i < I915_NUM_RINGS; i++) {
+               if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
+                       if (ring_id)
+                               *ring_id = i;
+
+                       return error->ring[i].ipehr ^ error->ring[i].instdone;
+               }
+       }
+
+       return error_code;
+}
+
+static void i915_gem_record_fences(struct drm_device *dev,
+                                  struct drm_i915_error_state *error)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int i;
+
+       if (IS_GEN3(dev) || IS_GEN2(dev)) {
+               for (i = 0; i < dev_priv->num_fence_regs; i++)
+                       error->fence[i] = I915_READ(FENCE_REG(i));
+       } else if (IS_GEN5(dev) || IS_GEN4(dev)) {
+               for (i = 0; i < dev_priv->num_fence_regs; i++)
+                       error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
+       } else if (INTEL_INFO(dev)->gen >= 6) {
+               for (i = 0; i < dev_priv->num_fence_regs; i++)
+                       error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
+       }
+}
+
+
+static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
+                                       struct drm_i915_error_state *error,
+                                       struct intel_engine_cs *ring,
+                                       struct drm_i915_error_ring *ering)
+{
+       struct intel_engine_cs *to;
+       int i;
+
+       if (!i915_semaphore_is_enabled(dev_priv->dev))
+               return;
+
+       if (!error->semaphore_obj)
+               error->semaphore_obj =
+                       i915_error_ggtt_object_create(dev_priv,
+                                                     dev_priv->semaphore_obj);
+
+       for_each_ring(to, dev_priv, i) {
+               int idx;
+               u16 signal_offset;
+               u32 *tmp;
+
+               if (ring == to)
+                       continue;
+
+               signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1))
+                               / 4;
+               tmp = error->semaphore_obj->pages[0];
+               idx = intel_ring_sync_index(ring, to);
+
+               ering->semaphore_mboxes[idx] = tmp[signal_offset];
+               ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx];
+       }
+}
+
+static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
+                                       struct intel_engine_cs *ring,
+                                       struct drm_i915_error_ring *ering)
+{
+       ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base));
+       ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base));
+       ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0];
+       ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1];
+
+       if (HAS_VEBOX(dev_priv->dev)) {
+               ering->semaphore_mboxes[2] =
+                       I915_READ(RING_SYNC_2(ring->mmio_base));
+               ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2];
+       }
+}
+
+static void i915_record_ring_state(struct drm_device *dev,
+                                  struct drm_i915_error_state *error,
+                                  struct intel_engine_cs *ring,
+                                  struct drm_i915_error_ring *ering)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (INTEL_INFO(dev)->gen >= 6) {
+               ering->rc_psmi = I915_READ(RING_PSMI_CTL(ring->mmio_base));
+               ering->fault_reg = I915_READ(RING_FAULT_REG(ring));
+               if (INTEL_INFO(dev)->gen >= 8)
+                       gen8_record_semaphore_state(dev_priv, error, ring, ering);
+               else
+                       gen6_record_semaphore_state(dev_priv, ring, ering);
+       }
+
+       if (INTEL_INFO(dev)->gen >= 4) {
+               ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base));
+               ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base));
+               ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
+               ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base));
+               ering->instps = I915_READ(RING_INSTPS(ring->mmio_base));
+               ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base));
+               if (INTEL_INFO(dev)->gen >= 8) {
+                       ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32;
+                       ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32;
+               }
+               ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base));
+       } else {
+               ering->faddr = I915_READ(DMA_FADD_I8XX);
+               ering->ipeir = I915_READ(IPEIR);
+               ering->ipehr = I915_READ(IPEHR);
+               ering->instdone = I915_READ(GEN2_INSTDONE);
+       }
+
+       ering->waiting = waitqueue_active(&ring->irq_queue);
+       ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base));
+       ering->seqno = ring->get_seqno(ring, false);
+       ering->acthd = intel_ring_get_active_head(ring);
+       ering->start = I915_READ_START(ring);
+       ering->head = I915_READ_HEAD(ring);
+       ering->tail = I915_READ_TAIL(ring);
+       ering->ctl = I915_READ_CTL(ring);
+
+       if (I915_NEED_GFX_HWS(dev)) {
+               i915_reg_t mmio;
+
+               if (IS_GEN7(dev)) {
+                       switch (ring->id) {
+                       default:
+                       case RCS:
+                               mmio = RENDER_HWS_PGA_GEN7;
+                               break;
+                       case BCS:
+                               mmio = BLT_HWS_PGA_GEN7;
+                               break;
+                       case VCS:
+                               mmio = BSD_HWS_PGA_GEN7;
+                               break;
+                       case VECS:
+                               mmio = VEBOX_HWS_PGA_GEN7;
+                               break;
+                       }
+               } else if (IS_GEN6(ring->dev)) {
+                       mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
+               } else {
+                       /* XXX: gen8 returns to sanity */
+                       mmio = RING_HWS_PGA(ring->mmio_base);
+               }
+
+               ering->hws = I915_READ(mmio);
+       }
+
+       ering->hangcheck_score = ring->hangcheck.score;
+       ering->hangcheck_action = ring->hangcheck.action;
+
+       if (USES_PPGTT(dev)) {
+               int i;
+
+               ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
+
+               if (IS_GEN6(dev))
+                       ering->vm_info.pp_dir_base =
+                               I915_READ(RING_PP_DIR_BASE_READ(ring));
+               else if (IS_GEN7(dev))
+                       ering->vm_info.pp_dir_base =
+                               I915_READ(RING_PP_DIR_BASE(ring));
+               else if (INTEL_INFO(dev)->gen >= 8)
+                       for (i = 0; i < 4; i++) {
+                               ering->vm_info.pdp[i] =
+                                       I915_READ(GEN8_RING_PDP_UDW(ring, i));
+                               ering->vm_info.pdp[i] <<= 32;
+                               ering->vm_info.pdp[i] |=
+                                       I915_READ(GEN8_RING_PDP_LDW(ring, i));
+                       }
+       }
+}
+
+
+static void i915_gem_record_active_context(struct intel_engine_cs *ring,
+                                          struct drm_i915_error_state *error,
+                                          struct drm_i915_error_ring *ering)
+{
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct drm_i915_gem_object *obj;
+
+       /* Currently render ring is the only HW context user */
+       if (ring->id != RCS || !error->ccid)
+               return;
+
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+               if (!i915_gem_obj_ggtt_bound(obj))
+                       continue;
+
+               if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
+                       ering->ctx = i915_error_ggtt_object_create(dev_priv, obj);
+                       break;
+               }
+       }
+}
+
+static void i915_gem_record_rings(struct drm_device *dev,
+                                 struct drm_i915_error_state *error)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_request *request;
+       int i, count;
+
+       for (i = 0; i < I915_NUM_RINGS; i++) {
+               struct intel_engine_cs *ring = &dev_priv->ring[i];
+               struct intel_ringbuffer *rbuf;
+
+               error->ring[i].pid = -1;
+
+               if (ring->dev == NULL)
+                       continue;
+
+               error->ring[i].valid = true;
+
+               i915_record_ring_state(dev, error, ring, &error->ring[i]);
+
+               request = i915_gem_find_active_request(ring);
+               if (request) {
+                       struct i915_address_space *vm;
+
+                       vm = request->ctx && request->ctx->ppgtt ?
+                               &request->ctx->ppgtt->base :
+                               &dev_priv->gtt.base;
+
+                       /* We need to copy these to an anonymous buffer
+                        * as the simplest method to avoid being overwritten
+                        * by userspace.
+                        */
+                       error->ring[i].batchbuffer =
+                               i915_error_object_create(dev_priv,
+                                                        request->batch_obj,
+                                                        vm);
+
+                       if (HAS_BROKEN_CS_TLB(dev_priv->dev))
+                               error->ring[i].wa_batchbuffer =
+                                       i915_error_ggtt_object_create(dev_priv,
+                                                            ring->scratch.obj);
+
+                       if (request->pid) {
+                               struct task_struct *task;
+
+                               rcu_read_lock();
+                               task = pid_task(request->pid, PIDTYPE_PID);
+                               if (task) {
+                                       strcpy(error->ring[i].comm, task->comm);
+                                       error->ring[i].pid = task->pid;
+                               }
+                               rcu_read_unlock();
+                       }
+               }
+
+               if (i915.enable_execlists) {
+                       /* TODO: This is only a small fix to keep basic error
+                        * capture working, but we need to add more information
+                        * for it to be useful (e.g. dump the context being
+                        * executed).
+                        */
+                       if (request)
+                               rbuf = request->ctx->engine[ring->id].ringbuf;
+                       else
+                               rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
+               } else
+                       rbuf = ring->buffer;
+
+               error->ring[i].cpu_ring_head = rbuf->head;
+               error->ring[i].cpu_ring_tail = rbuf->tail;
+
+               error->ring[i].ringbuffer =
+                       i915_error_ggtt_object_create(dev_priv, rbuf->obj);
+
+               error->ring[i].hws_page =
+                       i915_error_ggtt_object_create(dev_priv, ring->status_page.obj);
+
+               i915_gem_record_active_context(ring, error, &error->ring[i]);
+
+               count = 0;
+               list_for_each_entry(request, &ring->request_list, list)
+                       count++;
+
+               error->ring[i].num_requests = count;
+               error->ring[i].requests =
+                       kcalloc(count, sizeof(*error->ring[i].requests),
+                               GFP_ATOMIC);
+               if (error->ring[i].requests == NULL) {
+                       error->ring[i].num_requests = 0;
+                       continue;
+               }
+
+               count = 0;
+               list_for_each_entry(request, &ring->request_list, list) {
+                       struct drm_i915_error_request *erq;
+
+                       if (count >= error->ring[i].num_requests) {
+                               /*
+                                * If the ring request list was changed in
+                                * between the point where the error request
+                                * list was created and dimensioned and this
+                                * point then just exit early to avoid crashes.
+                                *
+                                * We don't need to communicate that the
+                                * request list changed state during error
+                                * state capture and that the error state is
+                                * slightly incorrect as a consequence since we
+                                * are typically only interested in the request
+                                * list state at the point of error state
+                                * capture, not in any changes happening during
+                                * the capture.
+                                */
+                               break;
+                       }
+
+                       erq = &error->ring[i].requests[count++];
+                       erq->seqno = request->seqno;
+                       erq->jiffies = request->emitted_jiffies;
+                       erq->tail = request->postfix;
+               }
+       }
+}
+
+/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
+ * VM.
+ */
+static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
+                               struct drm_i915_error_state *error,
+                               struct i915_address_space *vm,
+                               const int ndx)
+{
+       struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       int i;
+
+       i = 0;
+       list_for_each_entry(vma, &vm->active_list, vm_link)
+               i++;
+       error->active_bo_count[ndx] = i;
+
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+               list_for_each_entry(vma, &obj->vma_list, obj_link)
+                       if (vma->vm == vm && vma->pin_count > 0)
+                               i++;
+       }
+       error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
+
+       if (i) {
+               active_bo = kcalloc(i, sizeof(*active_bo), GFP_ATOMIC);
+               if (active_bo)
+                       pinned_bo = active_bo + error->active_bo_count[ndx];
+       }
+
+       if (active_bo)
+               error->active_bo_count[ndx] =
+                       capture_active_bo(active_bo,
+                                         error->active_bo_count[ndx],
+                                         &vm->active_list);
+
+       if (pinned_bo)
+               error->pinned_bo_count[ndx] =
+                       capture_pinned_bo(pinned_bo,
+                                         error->pinned_bo_count[ndx],
+                                         &dev_priv->mm.bound_list, vm);
+       error->active_bo[ndx] = active_bo;
+       error->pinned_bo[ndx] = pinned_bo;
+}
+
+static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
+                                    struct drm_i915_error_state *error)
+{
+       struct i915_address_space *vm;
+       int cnt = 0, i = 0;
+
+       list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+               cnt++;
+
+       error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
+       error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
+       error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
+                                        GFP_ATOMIC);
+       error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
+                                        GFP_ATOMIC);
+
+       if (error->active_bo == NULL ||
+           error->pinned_bo == NULL ||
+           error->active_bo_count == NULL ||
+           error->pinned_bo_count == NULL) {
+               kfree(error->active_bo);
+               kfree(error->active_bo_count);
+               kfree(error->pinned_bo);
+               kfree(error->pinned_bo_count);
+
+               error->active_bo = NULL;
+               error->active_bo_count = NULL;
+               error->pinned_bo = NULL;
+               error->pinned_bo_count = NULL;
+       } else {
+               list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+                       i915_gem_capture_vm(dev_priv, error, vm, i++);
+
+               error->vm_count = cnt;
+       }
+}
+
+/* Capture all registers which don't fit into another category. */
+static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
+                                  struct drm_i915_error_state *error)
+{
+       struct drm_device *dev = dev_priv->dev;
+       int i;
+
+       /* General organization
+        * 1. Registers specific to a single generation
+        * 2. Registers which belong to multiple generations
+        * 3. Feature specific registers.
+        * 4. Everything else
+        * Please try to follow the order.
+        */
+
+       /* 1: Registers specific to a single generation */
+       if (IS_VALLEYVIEW(dev)) {
+               error->gtier[0] = I915_READ(GTIER);
+               error->ier = I915_READ(VLV_IER);
+               error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
+       }
+
+       if (IS_GEN7(dev))
+               error->err_int = I915_READ(GEN7_ERR_INT);
+
+       if (INTEL_INFO(dev)->gen >= 8) {
+               error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
+               error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
+       }
+
+       if (IS_GEN6(dev)) {
+               error->forcewake = I915_READ_FW(FORCEWAKE);
+               error->gab_ctl = I915_READ(GAB_CTL);
+               error->gfx_mode = I915_READ(GFX_MODE);
+       }
+
+       /* 2: Registers which belong to multiple generations */
+       if (INTEL_INFO(dev)->gen >= 7)
+               error->forcewake = I915_READ_FW(FORCEWAKE_MT);
+
+       if (INTEL_INFO(dev)->gen >= 6) {
+               error->derrmr = I915_READ(DERRMR);
+               error->error = I915_READ(ERROR_GEN6);
+               error->done_reg = I915_READ(DONE_REG);
+       }
+
+       /* 3: Feature specific registers */
+       if (IS_GEN6(dev) || IS_GEN7(dev)) {
+               error->gam_ecochk = I915_READ(GAM_ECOCHK);
+               error->gac_eco = I915_READ(GAC_ECO_BITS);
+       }
+
+       /* 4: Everything else */
+       if (HAS_HW_CONTEXTS(dev))
+               error->ccid = I915_READ(CCID);
+
+       if (INTEL_INFO(dev)->gen >= 8) {
+               error->ier = I915_READ(GEN8_DE_MISC_IER);
+               for (i = 0; i < 4; i++)
+                       error->gtier[i] = I915_READ(GEN8_GT_IER(i));
+       } else if (HAS_PCH_SPLIT(dev)) {
+               error->ier = I915_READ(DEIER);
+               error->gtier[0] = I915_READ(GTIER);
+       } else if (IS_GEN2(dev)) {
+               error->ier = I915_READ16(IER);
+       } else if (!IS_VALLEYVIEW(dev)) {
+               error->ier = I915_READ(IER);
+       }
+       error->eir = I915_READ(EIR);
+       error->pgtbl_er = I915_READ(PGTBL_ER);
+
+       i915_get_extra_instdone(dev, error->extra_instdone);
+}
+
+static void i915_error_capture_msg(struct drm_device *dev,
+                                  struct drm_i915_error_state *error,
+                                  bool wedged,
+                                  const char *error_msg)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 ecode;
+       int ring_id = -1, len;
+
+       ecode = i915_error_generate_code(dev_priv, error, &ring_id);
+
+       len = scnprintf(error->error_msg, sizeof(error->error_msg),
+                       "GPU HANG: ecode %d:%d:0x%08x",
+                       INTEL_INFO(dev)->gen, ring_id, ecode);
+
+       if (ring_id != -1 && error->ring[ring_id].pid != -1)
+               len += scnprintf(error->error_msg + len,
+                                sizeof(error->error_msg) - len,
+                                ", in %s [%d]",
+                                error->ring[ring_id].comm,
+                                error->ring[ring_id].pid);
+
+       scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
+                 ", reason: %s, action: %s",
+                 error_msg,
+                 wedged ? "reset" : "continue");
+}
+
+static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
+                                  struct drm_i915_error_state *error)
+{
+       error->iommu = -1;
+#ifdef CONFIG_INTEL_IOMMU
+       error->iommu = intel_iommu_gfx_mapped;
+#endif
+       error->reset_count = i915_reset_count(&dev_priv->gpu_error);
+       error->suspend_count = dev_priv->suspend_count;
+}
+
+/**
+ * i915_capture_error_state - capture an error record for later analysis
+ * @dev: drm device
+ *
+ * Should be called when an error is detected (either a hang or an error
+ * interrupt) to capture error state from the time of the error.  Fills
+ * out a structure which becomes available in debugfs for user level tools
+ * to pick up.
+ */
+void i915_capture_error_state(struct drm_device *dev, bool wedged,
+                             const char *error_msg)
+{
+       static bool warned;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_error_state *error;
+       unsigned long flags;
+
+       /* Account for pipe specific data like PIPE*STAT */
+       error = kzalloc(sizeof(*error), GFP_ATOMIC);
+       if (!error) {
+               DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+               return;
+       }
+
+       kref_init(&error->ref);
+
+       i915_capture_gen_state(dev_priv, error);
+       i915_capture_reg_state(dev_priv, error);
+       i915_gem_capture_buffers(dev_priv, error);
+       i915_gem_record_fences(dev, error);
+       i915_gem_record_rings(dev, error);
+
+       do_gettimeofday(&error->time);
+
+       error->overlay = intel_overlay_capture_error_state(dev);
+       error->display = intel_display_capture_error_state(dev);
+
+       i915_error_capture_msg(dev, error, wedged, error_msg);
+       DRM_INFO("%s\n", error->error_msg);
+
+       spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+       if (dev_priv->gpu_error.first_error == NULL) {
+               dev_priv->gpu_error.first_error = error;
+               error = NULL;
+       }
+       spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+       if (error) {
+               i915_error_state_free(&error->ref);
+               return;
+       }
+
+       if (!warned) {
+               DRM_INFO("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
+               DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
+               DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
+               DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
+               DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index);
+               warned = true;
+       }
+}
+
+void i915_error_state_get(struct drm_device *dev,
+                         struct i915_error_state_file_priv *error_priv)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       spin_lock_irq(&dev_priv->gpu_error.lock);
+       error_priv->error = dev_priv->gpu_error.first_error;
+       if (error_priv->error)
+               kref_get(&error_priv->error->ref);
+       spin_unlock_irq(&dev_priv->gpu_error.lock);
+
+}
+
+void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
+{
+       if (error_priv->error)
+               kref_put(&error_priv->error->ref, i915_error_state_free);
+}
+
+void i915_destroy_error_state(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_error_state *error;
+
+       spin_lock_irq(&dev_priv->gpu_error.lock);
+       error = dev_priv->gpu_error.first_error;
+       dev_priv->gpu_error.first_error = NULL;
+       spin_unlock_irq(&dev_priv->gpu_error.lock);
+
+       if (error)
+               kref_put(&error->ref, i915_error_state_free);
+}
+
+const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
+{
+       switch (type) {
+       case I915_CACHE_NONE: return " uncached";
+       case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
+       case I915_CACHE_L3_LLC: return " L3+LLC";
+       case I915_CACHE_WT: return " WT";
+       default: return "";
+       }
+}
+#endif
+
+/* NB: please notice the memset */
+void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
+
+       if (IS_GEN2(dev) || IS_GEN3(dev))
+               instdone[0] = I915_READ(GEN2_INSTDONE);
+       else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
+               instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
+               instdone[1] = I915_READ(GEN4_INSTDONE1);
+       } else if (INTEL_INFO(dev)->gen >= 7) {
+               instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
+               instdone[1] = I915_READ(GEN7_SC_INSTDONE);
+               instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
+               instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
+       }
+}
index 685c799..e4ba582 100644 (file)
@@ -40,6 +40,7 @@
 #define   GS_MIA_CORE_STATE              (1 << GS_MIA_SHIFT)
 
 #define SOFT_SCRATCH(n)                        _MMIO(0xc180 + (n) * 4)
+#define SOFT_SCRATCH_COUNT             16
 
 #define UOS_RSA_SCRATCH(i)             _MMIO(0xc200 + (i) * 4)
 #define   UOS_RSA_SCRATCH_MAX_COUNT      64
index 5935d67..4028b16 100644 (file)
@@ -158,10 +158,8 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
 
        data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
        /* WaRsDisableCoarsePowerGating:skl,bxt */
-       if (!intel_enable_rc6(dev_priv->dev) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
-           (IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) ||
-           (IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
+       if (!intel_enable_rc6(dev) ||
+           NEEDS_WaRsDisableCoarsePowerGating(dev))
                data[1] = 0;
        else
                /* bit 0 and 1 are for Render and Media domain separately */
@@ -246,6 +244,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
                        db_exc.cookie = 1;
        }
 
+       /* Finally, update the cached copy of the GuC's WQ head */
+       gc->wq_head = desc->head;
+
        kunmap_atomic(base);
        return ret;
 }
@@ -375,6 +376,8 @@ static void guc_init_proc_desc(struct intel_guc *guc,
 static void guc_init_ctx_desc(struct intel_guc *guc,
                              struct i915_guc_client *client)
 {
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       struct intel_engine_cs *ring;