drm/i915: Update to Linux 4.1
authorFrançois Tigeot <ftigeot@wolfpond.org>
Sat, 23 Jan 2016 17:26:28 +0000 (18:26 +0100)
committerFrançois Tigeot <ftigeot@wolfpond.org>
Sat, 23 Jan 2016 17:33:58 +0000 (18:33 +0100)
* Valleyview support has been vastly improved and is no longer considered
  preliminary

* Skylake support improvements: runtime power management, turbo and sleep
  states should now be fully operational.
  Many workarounds have been added for Skylake specific issues

* Preliminary changes to prepare for Broxton (future Atom SOCs) support

* Distinguish hardware minimum and user minimum frequencies. Set the GPU
  frequency to the hardware minimum on idle in order to reduce power usage

* DRRS (dynamic refresh rate switching) is now enabled where supported.
  The idea is to reduce the refresh rate of the panel to save power when
  nothing changes on the screen

* DP deadlock bugfixes and improved link rate computation. Intermediate
  link rate support for eDP 1.4

* XenGT client-side support. This is paravirtualization to allow virtual
  machines to tap into the render engines

* Plenty of internal work to prepare for atomic mode setting

* Lots of other smaller work all over such as added documentation, dead
  UMS code removal, vblank interrupt cleanings, etc...

87 files changed:
sys/conf/files
sys/dev/drm/drm_atomic.c
sys/dev/drm/drm_atomic_helper.c
sys/dev/drm/drm_cache.c
sys/dev/drm/drm_crtc.c
sys/dev/drm/drm_crtc_helper.c
sys/dev/drm/drm_dp_helper.c
sys/dev/drm/drm_dp_mst_topology.c
sys/dev/drm/drm_fb_helper.c
sys/dev/drm/drm_irq.c
sys/dev/drm/drm_modes.c
sys/dev/drm/drm_pci.c
sys/dev/drm/drm_plane_helper.c
sys/dev/drm/drm_probe_helper.c
sys/dev/drm/i915/Makefile
sys/dev/drm/i915/i915_cmd_parser.c
sys/dev/drm/i915/i915_dma.c
sys/dev/drm/i915/i915_drv.c
sys/dev/drm/i915/i915_drv.h
sys/dev/drm/i915/i915_gem.c
sys/dev/drm/i915/i915_gem_context.c
sys/dev/drm/i915/i915_gem_evict.c
sys/dev/drm/i915/i915_gem_execbuffer.c
sys/dev/drm/i915/i915_gem_gtt.c
sys/dev/drm/i915/i915_gem_gtt.h
sys/dev/drm/i915/i915_gem_shrinker.c [new file with mode: 0644]
sys/dev/drm/i915/i915_gem_stolen.c
sys/dev/drm/i915/i915_irq.c
sys/dev/drm/i915/i915_params.c
sys/dev/drm/i915/i915_reg.h
sys/dev/drm/i915/i915_suspend.c
sys/dev/drm/i915/i915_sysfs.c [new file with mode: 0644]
sys/dev/drm/i915/i915_trace.h
sys/dev/drm/i915/i915_ums.c [deleted file]
sys/dev/drm/i915/i915_vgpu.c [new file with mode: 0644]
sys/dev/drm/i915/i915_vgpu.h [new file with mode: 0644]
sys/dev/drm/i915/intel_atomic.c
sys/dev/drm/i915/intel_atomic_plane.c
sys/dev/drm/i915/intel_bios.c
sys/dev/drm/i915/intel_bios.h
sys/dev/drm/i915/intel_crt.c
sys/dev/drm/i915/intel_ddi.c
sys/dev/drm/i915/intel_display.c
sys/dev/drm/i915/intel_dp.c
sys/dev/drm/i915/intel_dp_mst.c
sys/dev/drm/i915/intel_drv.h
sys/dev/drm/i915/intel_dsi.c
sys/dev/drm/i915/intel_dsi_cmd.h
sys/dev/drm/i915/intel_dvo.c
sys/dev/drm/i915/intel_fbc.c
sys/dev/drm/i915/intel_fbdev.c
sys/dev/drm/i915/intel_frontbuffer.c
sys/dev/drm/i915/intel_hdmi.c
sys/dev/drm/i915/intel_i2c.c
sys/dev/drm/i915/intel_lrc.c
sys/dev/drm/i915/intel_lrc.h
sys/dev/drm/i915/intel_lvds.c
sys/dev/drm/i915/intel_opregion.c
sys/dev/drm/i915/intel_overlay.c
sys/dev/drm/i915/intel_panel.c
sys/dev/drm/i915/intel_pm.c
sys/dev/drm/i915/intel_psr.c
sys/dev/drm/i915/intel_ringbuffer.c
sys/dev/drm/i915/intel_ringbuffer.h
sys/dev/drm/i915/intel_runtime_pm.c
sys/dev/drm/i915/intel_sdvo.c
sys/dev/drm/i915/intel_sprite.c
sys/dev/drm/i915/intel_tv.c
sys/dev/drm/i915/intel_uncore.c
sys/dev/drm/include/drm/drmP.h
sys/dev/drm/include/drm/drm_atomic.h
sys/dev/drm/include/drm/drm_atomic_helper.h
sys/dev/drm/include/drm/drm_crtc.h
sys/dev/drm/include/drm/drm_crtc_helper.h
sys/dev/drm/include/drm/drm_dp_helper.h
sys/dev/drm/include/drm/drm_dp_mst_helper.h
sys/dev/drm/include/drm/drm_edid.h
sys/dev/drm/include/drm/drm_fb_helper.h
sys/dev/drm/include/drm/drm_modes.h
sys/dev/drm/include/drm/drm_panel.h
sys/dev/drm/include/drm/drm_plane_helper.h
sys/dev/drm/include/drm/i915_pciids.h
sys/dev/drm/include/linux/kernel.h
sys/dev/drm/include/uapi_drm/drm.h
sys/dev/drm/include/uapi_drm/drm_fourcc.h
sys/dev/drm/include/uapi_drm/drm_mode.h
sys/dev/drm/include/uapi_drm/i915_drm.h

index d937c8e..33b6a44 100644 (file)
@@ -2045,12 +2045,14 @@ dev/drm/i915/i915_gem_evict.c           optional i915 drm
 dev/drm/i915/i915_gem_gtt.c            optional i915 drm
 dev/drm/i915/i915_gem_stolen.c         optional i915 drm
 dev/drm/i915/i915_gem_render_state.c   optional i915 drm
+dev/drm/i915/i915_gem_shrinker.c       optional i915 drm
 dev/drm/i915/i915_gem_tiling.c         optional i915 drm
 dev/drm/i915/i915_gem_userptr.c                optional i915 drm
 dev/drm/i915/i915_irq.c                        optional i915 drm
 dev/drm/i915/i915_params.c             optional i915 drm
 dev/drm/i915/i915_suspend.c            optional i915 drm
-dev/drm/i915/i915_ums.c                        optional i915 drm
+dev/drm/i915/i915_sysfs.c              optional i915 drm
+dev/drm/i915/i915_vgpu.c               optional i915 drm
 dev/drm/i915/intel_acpi.c              optional i915 drm
 dev/drm/i915/intel_atomic.c            optional i915 drm
 dev/drm/i915/intel_atomic_plane.c      optional i915 drm
index aba362e..387bafd 100644 (file)
@@ -94,7 +94,7 @@ drm_atomic_state_alloc(struct drm_device *dev)
 
        state->dev = dev;
 
-       DRM_DEBUG_KMS("Allocate atomic state %p\n", state);
+       DRM_DEBUG_ATOMIC("Allocate atomic state %p\n", state);
 
        return state;
 fail:
@@ -124,7 +124,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
        struct drm_mode_config *config = &dev->mode_config;
        int i;
 
-       DRM_DEBUG_KMS("Clearing atomic state %p\n", state);
+       DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
 
        for (i = 0; i < state->num_connector; i++) {
                struct drm_connector *connector = state->connectors[i];
@@ -136,6 +136,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
 
                connector->funcs->atomic_destroy_state(connector,
                                                       state->connector_states[i]);
+               state->connectors[i] = NULL;
                state->connector_states[i] = NULL;
        }
 
@@ -147,6 +148,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
 
                crtc->funcs->atomic_destroy_state(crtc,
                                                  state->crtc_states[i]);
+               state->crtcs[i] = NULL;
                state->crtc_states[i] = NULL;
        }
 
@@ -158,6 +160,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
 
                plane->funcs->atomic_destroy_state(plane,
                                                   state->plane_states[i]);
+               state->planes[i] = NULL;
                state->plane_states[i] = NULL;
        }
 }
@@ -172,9 +175,12 @@ EXPORT_SYMBOL(drm_atomic_state_clear);
  */
 void drm_atomic_state_free(struct drm_atomic_state *state)
 {
+       if (!state)
+               return;
+
        drm_atomic_state_clear(state);
 
-       DRM_DEBUG_KMS("Freeing atomic state %p\n", state);
+       DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state);
 
        kfree_state(state);
 }
@@ -219,8 +225,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
        state->crtcs[index] = crtc;
        crtc_state->state = state;
 
-       DRM_DEBUG_KMS("Added [CRTC:%d] %p state to %p\n",
-                     crtc->base.id, crtc_state, state);
+       DRM_DEBUG_ATOMIC("Added [CRTC:%d] %p state to %p\n",
+                        crtc->base.id, crtc_state, state);
 
        return crtc_state;
 }
@@ -250,11 +256,14 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
        struct drm_mode_config *config = &dev->mode_config;
 
        /* FIXME: Mode prop is missing, which also controls ->enable. */
-       if (property == config->prop_active) {
+       if (property == config->prop_active)
                state->active = val;
-       else if (crtc->funcs->atomic_set_property)
+       else if (crtc->funcs->atomic_set_property)
                return crtc->funcs->atomic_set_property(crtc, state, property, val);
-       return -EINVAL;
+       else
+               return -EINVAL;
+
+       return 0;
 }
 EXPORT_SYMBOL(drm_atomic_crtc_set_property);
 
@@ -268,9 +277,17 @@ static int drm_atomic_crtc_get_property(struct drm_crtc *crtc,
                const struct drm_crtc_state *state,
                struct drm_property *property, uint64_t *val)
 {
-       if (crtc->funcs->atomic_get_property)
+       struct drm_device *dev = crtc->dev;
+       struct drm_mode_config *config = &dev->mode_config;
+
+       if (property == config->prop_active)
+               *val = state->active;
+       else if (crtc->funcs->atomic_get_property)
                return crtc->funcs->atomic_get_property(crtc, state, property, val);
-       return -EINVAL;
+       else
+               return -EINVAL;
+
+       return 0;
 }
 
 /**
@@ -295,8 +312,8 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
         */
 
        if (state->active && !state->enable) {
-               DRM_DEBUG_KMS("[CRTC:%d] active without enabled\n",
-                             crtc->base.id);
+               DRM_DEBUG_ATOMIC("[CRTC:%d] active without enabled\n",
+                                crtc->base.id);
                return -EINVAL;
        }
 
@@ -342,8 +359,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
        state->planes[index] = plane;
        plane_state->state = state;
 
-       DRM_DEBUG_KMS("Added [PLANE:%d] %p state to %p\n",
-                     plane->base.id, plane_state, state);
+       DRM_DEBUG_ATOMIC("Added [PLANE:%d] %p state to %p\n",
+                        plane->base.id, plane_state, state);
 
        if (plane_state->crtc) {
                struct drm_crtc_state *crtc_state;
@@ -452,6 +469,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
                *val = state->src_w;
        } else if (property == config->prop_src_h) {
                *val = state->src_h;
+       } else if (property == config->rotation_property) {
+               *val = state->rotation;
        } else if (plane->funcs->atomic_get_property) {
                return plane->funcs->atomic_get_property(plane, state, property, val);
        } else {
@@ -475,14 +494,14 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
                struct drm_plane_state *state)
 {
        unsigned int fb_width, fb_height;
-       unsigned int i;
+       int ret;
 
        /* either *both* CRTC and FB must be set, or neither */
        if (WARN_ON(state->crtc && !state->fb)) {
-               DRM_DEBUG_KMS("CRTC set but no FB\n");
+               DRM_DEBUG_ATOMIC("CRTC set but no FB\n");
                return -EINVAL;
        } else if (WARN_ON(state->fb && !state->crtc)) {
-               DRM_DEBUG_KMS("FB set but no CRTC\n");
+               DRM_DEBUG_ATOMIC("FB set but no CRTC\n");
                return -EINVAL;
        }
 
@@ -492,18 +511,16 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
 
        /* Check whether this plane is usable on this CRTC */
        if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
-               DRM_DEBUG_KMS("Invalid crtc for plane\n");
+               DRM_DEBUG_ATOMIC("Invalid crtc for plane\n");
                return -EINVAL;
        }
 
        /* Check whether this plane supports the fb pixel format. */
-       for (i = 0; i < plane->format_count; i++)
-               if (state->fb->pixel_format == plane->format_types[i])
-                       break;
-       if (i == plane->format_count) {
-               DRM_DEBUG_KMS("Invalid pixel format %s\n",
-                             drm_get_format_name(state->fb->pixel_format));
-               return -EINVAL;
+       ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format);
+       if (ret) {
+               DRM_DEBUG_ATOMIC("Invalid pixel format %s\n",
+                                drm_get_format_name(state->fb->pixel_format));
+               return ret;
        }
 
        /* Give drivers some help against integer overflows */
@@ -511,9 +528,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
            state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
            state->crtc_h > INT_MAX ||
            state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
-               DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
-                             state->crtc_w, state->crtc_h,
-                             state->crtc_x, state->crtc_y);
+               DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n",
+                                state->crtc_w, state->crtc_h,
+                                state->crtc_x, state->crtc_y);
                return -ERANGE;
        }
 
@@ -525,12 +542,12 @@ static int drm_atomic_plane_check(struct drm_plane *plane,
            state->src_x > fb_width - state->src_w ||
            state->src_h > fb_height ||
            state->src_y > fb_height - state->src_h) {
-               DRM_DEBUG_KMS("Invalid source coordinates "
-                             "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
-                             state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
-                             state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
-                             state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
-                             state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
+               DRM_DEBUG_ATOMIC("Invalid source coordinates "
+                                "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+                                state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
+                                state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
+                                state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
+                                state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
                return -ENOSPC;
        }
 
@@ -577,7 +594,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
         * at most the array is a bit too large.
         */
        if (index >= state->num_connector) {
-               DRM_DEBUG_KMS("Hot-added connector would overflow state array, restarting\n");
+               DRM_DEBUG_ATOMIC("Hot-added connector would overflow state array, restarting\n");
                return ERR_PTR(-EAGAIN);
        }
 
@@ -592,8 +609,8 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
        state->connectors[index] = connector;
        connector_state->state = state;
 
-       DRM_DEBUG_KMS("Added [CONNECTOR:%d] %p state to %p\n",
-                     connector->base.id, connector_state, state);
+       DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n",
+                        connector->base.id, connector_state, state);
 
        if (connector_state->crtc) {
                struct drm_crtc_state *crtc_state;
@@ -754,17 +771,18 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
        }
 
        if (crtc)
-               DRM_DEBUG_KMS("Link plane state %p to [CRTC:%d]\n",
-                             plane_state, crtc->base.id);
+               DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d]\n",
+                                plane_state, crtc->base.id);
        else
-               DRM_DEBUG_KMS("Link plane state %p to [NOCRTC]\n", plane_state);
+               DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n",
+                                plane_state);
 
        return 0;
 }
 EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
 
 /**
- * drm_atomic_set_fb_for_plane - set crtc for plane
+ * drm_atomic_set_fb_for_plane - set framebuffer for plane
  * @plane_state: atomic state object for the plane
  * @fb: fb to use for the plane
  *
@@ -784,10 +802,11 @@ drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
        plane_state->fb = fb;
 
        if (fb)
-               DRM_DEBUG_KMS("Set [FB:%d] for plane state %p\n",
-                             fb->base.id, plane_state);
+               DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n",
+                                fb->base.id, plane_state);
        else
-               DRM_DEBUG_KMS("Set [NOFB] for plane state %p\n", plane_state);
+               DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n",
+                                plane_state);
 }
 EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
 
@@ -820,11 +839,11 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
        conn_state->crtc = crtc;
 
        if (crtc)
-               DRM_DEBUG_KMS("Link connector state %p to [CRTC:%d]\n",
-                             conn_state, crtc->base.id);
+               DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d]\n",
+                                conn_state, crtc->base.id);
        else
-               DRM_DEBUG_KMS("Link connector state %p to [NOCRTC]\n",
-                             conn_state);
+               DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n",
+                                conn_state);
 
        return 0;
 }
@@ -860,8 +879,8 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
        if (ret)
                return ret;
 
-       DRM_DEBUG_KMS("Adding all current connectors for [CRTC:%d] to %p\n",
-                     crtc->base.id, state);
+       DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d] to %p\n",
+                        crtc->base.id, state);
 
        /*
         * Changed connectors are already in @state, so only need to look at the
@@ -892,19 +911,18 @@ int
 drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
                               struct drm_crtc *crtc)
 {
-       int i, num_connected_connectors = 0;
-
-       for (i = 0; i < state->num_connector; i++) {
-               struct drm_connector_state *conn_state;
+       struct drm_connector *connector;
+       struct drm_connector_state *conn_state;
 
-               conn_state = state->connector_states[i];
+       int i, num_connected_connectors = 0;
 
-               if (conn_state && conn_state->crtc == crtc)
+       for_each_connector_in_state(state, connector, conn_state, i) {
+               if (conn_state->crtc == crtc)
                        num_connected_connectors++;
        }
 
-       DRM_DEBUG_KMS("State %p has %i connectors for [CRTC:%d]\n",
-                     state, num_connected_connectors, crtc->base.id);
+       DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d]\n",
+                        state, num_connected_connectors, crtc->base.id);
 
        return num_connected_connectors;
 }
@@ -916,7 +934,7 @@ EXPORT_SYMBOL(drm_atomic_connectors_for_crtc);
  *
  * This function should be used by legacy entry points which don't understand
  * -EDEADLK semantics. For simplicity this one will grab all modeset locks after
- *  the slowpath completed.
+ * the slowpath completed.
  */
 void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
 {
@@ -951,36 +969,28 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
 {
        struct drm_device *dev = state->dev;
        struct drm_mode_config *config = &dev->mode_config;
-       int nplanes = config->num_total_plane;
-       int ncrtcs = config->num_crtc;
+       struct drm_plane *plane;
+       struct drm_plane_state *plane_state;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
        int i, ret = 0;
 
-       DRM_DEBUG_KMS("checking %p\n", state);
-
-       for (i = 0; i < nplanes; i++) {
-               struct drm_plane *plane = state->planes[i];
+       DRM_DEBUG_ATOMIC("checking %p\n", state);
 
-               if (!plane)
-                       continue;
-
-               ret = drm_atomic_plane_check(plane, state->plane_states[i]);
+       for_each_plane_in_state(state, plane, plane_state, i) {
+               ret = drm_atomic_plane_check(plane, plane_state);
                if (ret) {
-                       DRM_DEBUG_KMS("[PLANE:%d] atomic core check failed\n",
-                                     plane->base.id);
+                       DRM_DEBUG_ATOMIC("[PLANE:%d] atomic core check failed\n",
+                                        plane->base.id);
                        return ret;
                }
        }
 
-       for (i = 0; i < ncrtcs; i++) {
-               struct drm_crtc *crtc = state->crtcs[i];
-
-               if (!crtc)
-                       continue;
-
-               ret = drm_atomic_crtc_check(crtc, state->crtc_states[i]);
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               ret = drm_atomic_crtc_check(crtc, crtc_state);
                if (ret) {
-                       DRM_DEBUG_KMS("[CRTC:%d] atomic core check failed\n",
-                                     crtc->base.id);
+                       DRM_DEBUG_ATOMIC("[CRTC:%d] atomic core check failed\n",
+                                        crtc->base.id);
                        return ret;
                }
        }
@@ -989,17 +999,11 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
                ret = config->funcs->atomic_check(state->dev, state);
 
        if (!state->allow_modeset) {
-               for (i = 0; i < ncrtcs; i++) {
-                       struct drm_crtc *crtc = state->crtcs[i];
-                       struct drm_crtc_state *crtc_state = state->crtc_states[i];
-
-                       if (!crtc)
-                               continue;
-
+               for_each_crtc_in_state(state, crtc, crtc_state, i) {
                        if (crtc_state->mode_changed ||
                            crtc_state->active_changed) {
-                               DRM_DEBUG_KMS("[CRTC:%d] requires full modeset\n",
-                                             crtc->base.id);
+                               DRM_DEBUG_ATOMIC("[CRTC:%d] requires full modeset\n",
+                                                crtc->base.id);
                                return -EINVAL;
                        }
                }
@@ -1034,7 +1038,7 @@ int drm_atomic_commit(struct drm_atomic_state *state)
        if (ret)
                return ret;
 
-       DRM_DEBUG_KMS("commiting %p\n", state);
+       DRM_DEBUG_ATOMIC("commiting %p\n", state);
 
        return config->funcs->atomic_commit(state->dev, state, false);
 }
@@ -1065,7 +1069,7 @@ int drm_atomic_async_commit(struct drm_atomic_state *state)
        if (ret)
                return ret;
 
-       DRM_DEBUG_KMS("commiting %p asynchronously\n", state);
+       DRM_DEBUG_ATOMIC("commiting %p asynchronously\n", state);
 
        return config->funcs->atomic_commit(state->dev, state, true);
 }
@@ -1191,6 +1195,8 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
        struct drm_atomic_state *state;
        struct drm_modeset_acquire_ctx ctx;
        struct drm_plane *plane;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
        unsigned plane_mask = 0;
        int ret = 0;
        unsigned int i, j;
@@ -1294,15 +1300,9 @@ retry:
        }
 
        if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-               int ncrtcs = dev->mode_config.num_crtc;
-
-               for (i = 0; i < ncrtcs; i++) {
-                       struct drm_crtc_state *crtc_state = state->crtc_states[i];
+               for_each_crtc_in_state(state, crtc, crtc_state, i) {
                        struct drm_pending_vblank_event *e;
 
-                       if (!crtc_state)
-                               continue;
-
                        e = create_vblank_event(dev, file_priv, arg->user_data);
                        if (!e) {
                                ret = -ENOMEM;
@@ -1354,14 +1354,7 @@ fail:
                goto backoff;
 
        if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-               int ncrtcs = dev->mode_config.num_crtc;
-
-               for (i = 0; i < ncrtcs; i++) {
-                       struct drm_crtc_state *crtc_state = state->crtc_states[i];
-
-                       if (!crtc_state)
-                               continue;
-
+               for_each_crtc_in_state(state, crtc, crtc_state, i) {
                        destroy_vblank_event(dev, file_priv, crtc_state->event);
                        crtc_state->event = NULL;
                }
index 52b104f..7646c54 100644 (file)
@@ -116,9 +116,9 @@ steal_encoder(struct drm_atomic_state *state,
         */
        WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
-       DRM_DEBUG_KMS("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n",
-                     encoder->base.id, encoder->name,
-                     encoder_crtc->base.id);
+       DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n",
+                        encoder->base.id, encoder->name,
+                        encoder_crtc->base.id);
 
        crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
        if (IS_ERR(crtc_state))
@@ -130,9 +130,9 @@ steal_encoder(struct drm_atomic_state *state,
                if (connector->state->best_encoder != encoder)
                        continue;
 
-               DRM_DEBUG_KMS("Stealing encoder from [CONNECTOR:%d:%s]\n",
-                             connector->base.id,
-                             connector->name);
+               DRM_DEBUG_ATOMIC("Stealing encoder from [CONNECTOR:%d:%s]\n",
+                                connector->base.id,
+                                connector->name);
 
                connector_state = drm_atomic_get_connector_state(state,
                                                                 connector);
@@ -151,7 +151,7 @@ steal_encoder(struct drm_atomic_state *state,
 static int
 update_connector_routing(struct drm_atomic_state *state, int conn_idx)
 {
-       struct drm_connector_helper_funcs *funcs;
+       const struct drm_connector_helper_funcs *funcs;
        struct drm_encoder *new_encoder;
        struct drm_crtc *encoder_crtc;
        struct drm_connector *connector;
@@ -165,9 +165,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
        if (!connector)
                return 0;
 
-       DRM_DEBUG_KMS("Updating routing for [CONNECTOR:%d:%s]\n",
-                       connector->base.id,
-                       connector->name);
+       DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n",
+                        connector->base.id,
+                        connector->name);
 
        if (connector->state->crtc != connector_state->crtc) {
                if (connector->state->crtc) {
@@ -186,7 +186,7 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
        }
 
        if (!connector_state->crtc) {
-               DRM_DEBUG_KMS("Disabling [CONNECTOR:%d:%s]\n",
+               DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n",
                                connector->base.id,
                                connector->name);
 
@@ -199,19 +199,19 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
        new_encoder = funcs->best_encoder(connector);
 
        if (!new_encoder) {
-               DRM_DEBUG_KMS("No suitable encoder found for [CONNECTOR:%d:%s]\n",
-                             connector->base.id,
-                             connector->name);
+               DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
+                                connector->base.id,
+                                connector->name);
                return -EINVAL;
        }
 
        if (new_encoder == connector_state->best_encoder) {
-               DRM_DEBUG_KMS("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
-                             connector->base.id,
-                             connector->name,
-                             new_encoder->base.id,
-                             new_encoder->name,
-                             connector_state->crtc->base.id);
+               DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
+                                connector->base.id,
+                                connector->name,
+                                new_encoder->base.id,
+                                new_encoder->name,
+                                connector_state->crtc->base.id);
 
                return 0;
        }
@@ -222,9 +222,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
        if (encoder_crtc) {
                ret = steal_encoder(state, new_encoder, encoder_crtc);
                if (ret) {
-                       DRM_DEBUG_KMS("Encoder stealing failed for [CONNECTOR:%d:%s]\n",
-                                     connector->base.id,
-                                     connector->name);
+                       DRM_DEBUG_ATOMIC("Encoder stealing failed for [CONNECTOR:%d:%s]\n",
+                                        connector->base.id,
+                                        connector->name);
                        return ret;
                }
        }
@@ -235,12 +235,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
        crtc_state = state->crtc_states[idx];
        crtc_state->mode_changed = true;
 
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
-                     connector->base.id,
-                     connector->name,
-                     new_encoder->base.id,
-                     new_encoder->name,
-                     connector_state->crtc->base.id);
+       DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
+                        connector->base.id,
+                        connector->name,
+                        new_encoder->base.id,
+                        new_encoder->name,
+                        connector_state->crtc->base.id);
 
        return 0;
 }
@@ -248,30 +248,24 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
 static int
 mode_fixup(struct drm_atomic_state *state)
 {
-       int ncrtcs = state->dev->mode_config.num_crtc;
+       struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
+       struct drm_connector *connector;
        struct drm_connector_state *conn_state;
        int i;
        bool ret;
 
-       for (i = 0; i < ncrtcs; i++) {
-               crtc_state = state->crtc_states[i];
-
-               if (!crtc_state || !crtc_state->mode_changed)
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               if (!crtc_state->mode_changed)
                        continue;
 
                drm_mode_copy(&crtc_state->adjusted_mode, &crtc_state->mode);
        }
 
-       for (i = 0; i < state->num_connector; i++) {
-               struct drm_encoder_helper_funcs *funcs;
+       for_each_connector_in_state(state, connector, conn_state, i) {
+               const struct drm_encoder_helper_funcs *funcs;
                struct drm_encoder *encoder;
 
-               conn_state = state->connector_states[i];
-
-               if (!conn_state)
-                       continue;
-
                WARN_ON(!!conn_state->best_encoder != !!conn_state->crtc);
 
                if (!conn_state->crtc || !conn_state->best_encoder)
@@ -292,7 +286,7 @@ mode_fixup(struct drm_atomic_state *state)
                                        encoder->bridge, &crtc_state->mode,
                                        &crtc_state->adjusted_mode);
                        if (!ret) {
-                               DRM_DEBUG_KMS("Bridge fixup failed\n");
+                               DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
                                return -EINVAL;
                        }
                }
@@ -301,37 +295,33 @@ mode_fixup(struct drm_atomic_state *state)
                        ret = funcs->atomic_check(encoder, crtc_state,
                                                  conn_state);
                        if (ret) {
-                               DRM_DEBUG_KMS("[ENCODER:%d:%s] check failed\n",
-                                             encoder->base.id, encoder->name);
+                               DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n",
+                                                encoder->base.id, encoder->name);
                                return ret;
                        }
                } else {
                        ret = funcs->mode_fixup(encoder, &crtc_state->mode,
                                                &crtc_state->adjusted_mode);
                        if (!ret) {
-                               DRM_DEBUG_KMS("[ENCODER:%d:%s] fixup failed\n",
-                                             encoder->base.id, encoder->name);
+                               DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n",
+                                                encoder->base.id, encoder->name);
                                return -EINVAL;
                        }
                }
        }
 
-       for (i = 0; i < ncrtcs; i++) {
-               struct drm_crtc_helper_funcs *funcs;
-               struct drm_crtc *crtc;
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               const struct drm_crtc_helper_funcs *funcs;
 
-               crtc_state = state->crtc_states[i];
-               crtc = state->crtcs[i];
-
-               if (!crtc_state || !crtc_state->mode_changed)
+               if (!crtc_state->mode_changed)
                        continue;
 
                funcs = crtc->helper_private;
                ret = funcs->mode_fixup(crtc, &crtc_state->mode,
                                        &crtc_state->adjusted_mode);
                if (!ret) {
-                       DRM_DEBUG_KMS("[CRTC:%d] fixup failed\n",
-                                     crtc->base.id);
+                       DRM_DEBUG_ATOMIC("[CRTC:%d] fixup failed\n",
+                                        crtc->base.id);
                        return -EINVAL;
                }
        }
@@ -346,7 +336,7 @@ needs_modeset(struct drm_crtc_state *state)
 }
 
 /**
- * drm_atomic_helper_check - validate state object for modeset changes
+ * drm_atomic_helper_check_modeset - validate state object for modeset changes
  * @dev: DRM device
  * @state: the driver state object
  *
@@ -371,32 +361,27 @@ int
 drm_atomic_helper_check_modeset(struct drm_device *dev,
                                struct drm_atomic_state *state)
 {
-       int ncrtcs = dev->mode_config.num_crtc;
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
+       struct drm_connector *connector;
+       struct drm_connector_state *connector_state;
        int i, ret;
 
-       for (i = 0; i < ncrtcs; i++) {
-               crtc = state->crtcs[i];
-               crtc_state = state->crtc_states[i];
-
-               if (!crtc)
-                       continue;
-
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
                if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) {
-                       DRM_DEBUG_KMS("[CRTC:%d] mode changed\n",
-                                     crtc->base.id);
+                       DRM_DEBUG_ATOMIC("[CRTC:%d] mode changed\n",
+                                        crtc->base.id);
                        crtc_state->mode_changed = true;
                }
 
                if (crtc->state->enable != crtc_state->enable) {
-                       DRM_DEBUG_KMS("[CRTC:%d] enable changed\n",
-                                     crtc->base.id);
+                       DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n",
+                                        crtc->base.id);
                        crtc_state->mode_changed = true;
                }
        }
 
-       for (i = 0; i < state->num_connector; i++) {
+       for_each_connector_in_state(state, connector, connector_state, i) {
                /*
                 * This only sets crtc->mode_changed for routing changes,
                 * drivers must set crtc->mode_changed themselves when connector
@@ -413,32 +398,26 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
         * configuration. This must be done before calling mode_fixup in case a
         * crtc only changed its mode but has the same set of connectors.
         */
-       for (i = 0; i < ncrtcs; i++) {
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
                int num_connectors;
 
-               crtc = state->crtcs[i];
-               crtc_state = state->crtc_states[i];
-
-               if (!crtc)
-                       continue;
-
                /*
                 * We must set ->active_changed after walking connectors for
                 * otherwise an update that only changes active would result in
                 * a full modeset because update_connector_routing force that.
                 */
                if (crtc->state->active != crtc_state->active) {
-                       DRM_DEBUG_KMS("[CRTC:%d] active changed\n",
-                                     crtc->base.id);
+                       DRM_DEBUG_ATOMIC("[CRTC:%d] active changed\n",
+                                        crtc->base.id);
                        crtc_state->active_changed = true;
                }
 
                if (!needs_modeset(crtc_state))
                        continue;
 
-               DRM_DEBUG_KMS("[CRTC:%d] needs all connectors, enable: %c, active: %c\n",
-                             crtc->base.id,
-                             crtc_state->enable ? 'y' : 'n',
+               DRM_DEBUG_ATOMIC("[CRTC:%d] needs all connectors, enable: %c, active: %c\n",
+                                crtc->base.id,
+                                crtc_state->enable ? 'y' : 'n',
                              crtc_state->active ? 'y' : 'n');
 
                ret = drm_atomic_add_affected_connectors(state, crtc);
@@ -449,8 +428,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
                                                                crtc);
 
                if (crtc_state->enable != !!num_connectors) {
-                       DRM_DEBUG_KMS("[CRTC:%d] enabled/connectors mismatch\n",
-                                     crtc->base.id);
+                       DRM_DEBUG_ATOMIC("[CRTC:%d] enabled/connectors mismatch\n",
+                                        crtc->base.id);
 
                        return -EINVAL;
                }
@@ -461,7 +440,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
 EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
 
 /**
- * drm_atomic_helper_check - validate state object for modeset changes
+ * drm_atomic_helper_check_planes - validate state object for planes changes
  * @dev: DRM device
  * @state: the driver state object
  *
@@ -476,17 +455,14 @@ int
 drm_atomic_helper_check_planes(struct drm_device *dev,
                               struct drm_atomic_state *state)
 {
-       int nplanes = dev->mode_config.num_total_plane;
-       int ncrtcs = dev->mode_config.num_crtc;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       struct drm_plane *plane;
+       struct drm_plane_state *plane_state;
        int i, ret = 0;
 
-       for (i = 0; i < nplanes; i++) {
-               struct drm_plane_helper_funcs *funcs;
-               struct drm_plane *plane = state->planes[i];
-               struct drm_plane_state *plane_state = state->plane_states[i];
-
-               if (!plane)
-                       continue;
+       for_each_plane_in_state(state, plane, plane_state, i) {
+               const struct drm_plane_helper_funcs *funcs;
 
                funcs = plane->helper_private;
 
@@ -497,18 +473,14 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
 
                ret = funcs->atomic_check(plane, plane_state);
                if (ret) {
-                       DRM_DEBUG_KMS("[PLANE:%d] atomic driver check failed\n",
-                                     plane->base.id);
+                       DRM_DEBUG_ATOMIC("[PLANE:%d] atomic driver check failed\n",
+                                        plane->base.id);
                        return ret;
                }
        }
 
-       for (i = 0; i < ncrtcs; i++) {
-               struct drm_crtc_helper_funcs *funcs;
-               struct drm_crtc *crtc = state->crtcs[i];
-
-               if (!crtc)
-                       continue;
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               const struct drm_crtc_helper_funcs *funcs;
 
                funcs = crtc->helper_private;
 
@@ -517,8 +489,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
 
                ret = funcs->atomic_check(crtc, state->crtc_states[i]);
                if (ret) {
-                       DRM_DEBUG_KMS("[CRTC:%d] atomic driver check failed\n",
-                                     crtc->base.id);
+                       DRM_DEBUG_ATOMIC("[CRTC:%d] atomic driver check failed\n",
+                                        crtc->base.id);
                        return ret;
                }
        }
@@ -567,27 +539,26 @@ EXPORT_SYMBOL(drm_atomic_helper_check);
 static void
 disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
 {
-       int ncrtcs = old_state->dev->mode_config.num_crtc;
+       struct drm_connector *connector;
+       struct drm_connector_state *old_conn_state;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *old_crtc_state;
        int i;
 
-       for (i = 0; i < old_state->num_connector; i++) {
-               struct drm_connector_state *old_conn_state;
-               struct drm_connector *connector;
-               struct drm_encoder_helper_funcs *funcs;
+       for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+               const struct drm_encoder_helper_funcs *funcs;
                struct drm_encoder *encoder;
                struct drm_crtc_state *old_crtc_state;
 
-               old_conn_state = old_state->connector_states[i];
-               connector = old_state->connectors[i];
-
                /* Shut down everything that's in the changeset and currently
                 * still on. So need to check the old, saved state. */
-               if (!old_conn_state || !old_conn_state->crtc)
+               if (!old_conn_state->crtc)
                        continue;
 
                old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)];
 
-               if (!old_crtc_state->active)
+               if (!old_crtc_state->active ||
+                   !needs_modeset(old_conn_state->crtc->state))
                        continue;
 
                encoder = old_conn_state->best_encoder;
@@ -600,12 +571,12 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
 
                funcs = encoder->helper_private;
 
-               DRM_DEBUG_KMS("disabling [ENCODER:%d:%s]\n",
-                             encoder->base.id, encoder->name);
+               DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
+                                encoder->base.id, encoder->name);
 
                /*
                 * Each encoder has at most one connector (since we always steal
-                * it away), so we won't call call disable hooks twice.
+                * it away), so we won't call disable hooks twice.
                 */
                if (encoder->bridge)
                        encoder->bridge->funcs->disable(encoder->bridge);
@@ -622,16 +593,11 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
                        encoder->bridge->funcs->post_disable(encoder->bridge);
        }
 
-       for (i = 0; i < ncrtcs; i++) {
-               struct drm_crtc_helper_funcs *funcs;
-               struct drm_crtc *crtc;
-               struct drm_crtc_state *old_crtc_state;
-
-               crtc = old_state->crtcs[i];
-               old_crtc_state = old_state->crtc_states[i];
+       for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+               const struct drm_crtc_helper_funcs *funcs;
 
                /* Shut down everything that needs a full modeset. */
-               if (!crtc || !needs_modeset(crtc->state))
+               if (!needs_modeset(crtc->state))
                        continue;
 
                if (!old_crtc_state->active)
@@ -639,8 +605,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
 
                funcs = crtc->helper_private;
 
-               DRM_DEBUG_KMS("disabling [CRTC:%d]\n",
-                             crtc->base.id);
+               DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n",
+                                crtc->base.id);
 
 
                /* Right function depends upon target state. */
@@ -656,16 +622,15 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
 static void
 set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state)
 {
-       int ncrtcs = old_state->dev->mode_config.num_crtc;
+       struct drm_connector *connector;
+       struct drm_connector_state *old_conn_state;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *old_crtc_state;
        int i;
 
        /* clear out existing links */
-       for (i = 0; i < old_state->num_connector; i++) {
-               struct drm_connector *connector;
-
-               connector = old_state->connectors[i];
-
-               if (!connector || !connector->encoder)
+       for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+               if (!connector->encoder)
                        continue;
 
                WARN_ON(!connector->encoder->crtc);
@@ -675,12 +640,8 @@ set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state)
        }
 
        /* set new links */
-       for (i = 0; i < old_state->num_connector; i++) {
-               struct drm_connector *connector;
-
-               connector = old_state->connectors[i];
-
-               if (!connector || !connector->state->crtc)
+       for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+               if (!connector->state->crtc)
                        continue;
 
                if (WARN_ON(!connector->state->best_encoder))
@@ -691,14 +652,7 @@ set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state)
        }
 
        /* set legacy state in the crtc structure */
-       for (i = 0; i < ncrtcs; i++) {
-               struct drm_crtc *crtc;
-
-               crtc = old_state->crtcs[i];
-
-               if (!crtc)
-                       continue;
-
+       for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
                crtc->mode = crtc->state->mode;
                crtc->enabled = crtc->state->enable;
                crtc->x = crtc->primary->state->src_x >> 16;
@@ -709,38 +663,35 @@ set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state)
 static void
 crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
 {
-       int ncrtcs = old_state->dev->mode_config.num_crtc;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *old_crtc_state;
+       struct drm_connector *connector;
+       struct drm_connector_state *old_conn_state;
        int i;
 
-       for (i = 0; i < ncrtcs; i++) {
-               struct drm_crtc_helper_funcs *funcs;
-               struct drm_crtc *crtc;
-
-               crtc = old_state->crtcs[i];
+       for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+               const struct drm_crtc_helper_funcs *funcs;
 
-               if (!crtc || !crtc->state->mode_changed)
+               if (!crtc->state->mode_changed)
                        continue;
 
                funcs = crtc->helper_private;
 
-               if (crtc->state->enable) {
-                       DRM_DEBUG_KMS("modeset on [CRTC:%d]\n",
-                                     crtc->base.id);
+               if (crtc->state->enable && funcs->mode_set_nofb) {
+                       DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n",
+                                        crtc->base.id);
 
                        funcs->mode_set_nofb(crtc);
                }
        }
 
-       for (i = 0; i < old_state->num_connector; i++) {
-               struct drm_connector *connector;
+       for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+               const struct drm_encoder_helper_funcs *funcs;
                struct drm_crtc_state *new_crtc_state;
-               struct drm_encoder_helper_funcs *funcs;
                struct drm_encoder *encoder;
                struct drm_display_mode *mode, *adjusted_mode;
 
-               connector = old_state->connectors[i];
-
-               if (!connector || !connector->state->best_encoder)
+               if (!connector->state->best_encoder)
                        continue;
 
                encoder = connector->state->best_encoder;
@@ -752,14 +703,15 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
                if (!new_crtc_state->mode_changed)
                        continue;
 
-               DRM_DEBUG_KMS("modeset on [ENCODER:%d:%s]\n",
-                             encoder->base.id, encoder->name);
+               DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
+                                encoder->base.id, encoder->name);
 
                /*
                 * Each encoder has at most one connector (since we always steal
-                * it away), so we won't call call mode_set hooks twice.
+                * it away), so we won't call mode_set hooks twice.
                 */
-               funcs->mode_set(encoder, mode, adjusted_mode);
+               if (funcs->mode_set)
+                       funcs->mode_set(encoder, mode, adjusted_mode);
 
                if (encoder->bridge && encoder->bridge->funcs->mode_set)
                        encoder->bridge->funcs->mode_set(encoder->bridge,
@@ -768,46 +720,56 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
 }
 
 /**
- * drm_atomic_helper_commit_pre_planes - modeset commit before plane updates
+ * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
  * @dev: DRM device
- * @state: atomic state
+ * @old_state: atomic state object with old state structures
  *
- * This function commits the modeset changes that need to be committed before
- * updating planes. It shuts down all the outputs that need to be shut down and
+ * This function shuts down all the outputs that need to be shut down and
  * prepares them (if required) with the new mode.
+ *
+ * For compatability with legacy crtc helpers this should be called before
+ * drm_atomic_helper_commit_planes(), which is what the default commit function
+ * does. But drivers with different needs can group the modeset commits together
+ * and do the plane commits at the end. This is useful for drivers doing runtime
+ * PM since planes updates then only happen when the CRTC is actually enabled.
  */
-void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
-                                        struct drm_atomic_state *state)
+void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
+                                              struct drm_atomic_state *old_state)
 {
-       disable_outputs(dev, state);
-       set_routing_links(dev, state);
-       crtc_set_mode(dev, state);
+       disable_outputs(dev, old_state);
+       set_routing_links(dev, old_state);
+       crtc_set_mode(dev, old_state);
 }
-EXPORT_SYMBOL(drm_atomic_helper_commit_pre_planes);
+EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables);
 
 /**
- * drm_atomic_helper_commit_post_planes - modeset commit after plane updates
+ * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
  * @dev: DRM device
  * @old_state: atomic state object with old state structures
  *
- * This function commits the modeset changes that need to be committed after
- * updating planes: It enables all the outputs with the new configuration which
- * had to be turned off for the update.
+ * This function enables all the outputs with the new configuration which had to
+ * be turned off for the update.
+ *
+ * For compatability with legacy crtc helpers this should be called after
+ * drm_atomic_helper_commit_planes(), which is what the default commit function
+ * does. But drivers with different needs can group the modeset commits together
+ * and do the plane commits at the end. This is useful for drivers doing runtime
+ * PM since planes updates then only happen when the CRTC is actually enabled.
  */
-void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
-                                         struct drm_atomic_state *old_state)
+void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
+                                             struct drm_atomic_state *old_state)
 {
-       int ncrtcs = old_state->dev->mode_config.num_crtc;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *old_crtc_state;
+       struct drm_connector *connector;
+       struct drm_connector_state *old_conn_state;
        int i;
 
-       for (i = 0; i < ncrtcs; i++) {
-               struct drm_crtc_helper_funcs *funcs;
-               struct drm_crtc *crtc;
-
-               crtc = old_state->crtcs[i];
+       for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+               const struct drm_crtc_helper_funcs *funcs;
 
                /* Need to filter out CRTCs where only planes change. */
-               if (!crtc || !needs_modeset(crtc->state))
+               if (!needs_modeset(crtc->state))
                        continue;
 
                if (!crtc->state->active)
@@ -816,8 +778,8 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
                funcs = crtc->helper_private;
 
                if (crtc->state->enable) {
-                       DRM_DEBUG_KMS("enabling [CRTC:%d]\n",
-                                     crtc->base.id);
+                       DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n",
+                                        crtc->base.id);
 
                        if (funcs->enable)
                                funcs->enable(crtc);
@@ -826,28 +788,26 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
                }
        }
 
-       for (i = 0; i < old_state->num_connector; i++) {
-               struct drm_connector *connector;
-               struct drm_encoder_helper_funcs *funcs;
+       for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+               const struct drm_encoder_helper_funcs *funcs;
                struct drm_encoder *encoder;
 
-               connector = old_state->connectors[i];
-
-               if (!connector || !connector->state->best_encoder)
+               if (!connector->state->best_encoder)
                        continue;
 
-               if (!connector->state->crtc->state->active)
+               if (!connector->state->crtc->state->active ||
+                   !needs_modeset(connector->state->crtc->state))
                        continue;
 
                encoder = connector->state->best_encoder;
                funcs = encoder->helper_private;
 
-               DRM_DEBUG_KMS("enabling [ENCODER:%d:%s]\n",
-                             encoder->base.id, encoder->name);
+               DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
+                                encoder->base.id, encoder->name);
 
                /*
                 * Each encoder has at most one connector (since we always steal
-                * it away), so we won't call call enable hooks twice.
+                * it away), so we won't call enable hooks twice.
                 */
                if (encoder->bridge)
                        encoder->bridge->funcs->pre_enable(encoder->bridge);
@@ -861,18 +821,17 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
                        encoder->bridge->funcs->enable(encoder->bridge);
        }
 }
-EXPORT_SYMBOL(drm_atomic_helper_commit_post_planes);
+EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
 
 static void wait_for_fences(struct drm_device *dev,
                            struct drm_atomic_state *state)
 {
-       int nplanes = dev->mode_config.num_total_plane;
+       struct drm_plane *plane;
+       struct drm_plane_state *plane_state;
        int i;
 
-       for (i = 0; i < nplanes; i++) {
-               struct drm_plane *plane = state->planes[i];
-
-               if (!plane || !plane->state->fence)
+       for_each_plane_in_state(state, plane, plane_state, i) {
+               if (!plane->state->fence)
                        continue;
 
                WARN_ON(!plane->state->fb);
@@ -891,16 +850,9 @@ static bool framebuffer_changed(struct drm_device *dev,
 {
        struct drm_plane *plane;
        struct drm_plane_state *old_plane_state;
-       int nplanes = old_state->dev->mode_config.num_total_plane;
        int i;
 
-       for (i = 0; i < nplanes; i++) {
-               plane = old_state->planes[i];
-               old_plane_state = old_state->plane_states[i];
-
-               if (!plane)
-                       continue;
-
+       for_each_plane_in_state(old_state, plane, old_plane_state, i) {
                if (plane->state->crtc != crtc &&
                    old_plane_state->crtc != crtc)
                        continue;
@@ -929,16 +881,9 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
 {
        struct drm_crtc *crtc;
        struct drm_crtc_state *old_crtc_state;
-       int ncrtcs = old_state->dev->mode_config.num_crtc;
        int i, ret;
 
-       for (i = 0; i < ncrtcs; i++) {
-               crtc = old_state->crtcs[i];
-               old_crtc_state = old_state->crtc_states[i];
-
-               if (!crtc)
-                       continue;
-
+       for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
                /* No one cares about the old state, so abuse it for tracking
                 * and store whether we hold a vblank reference (and should do a
                 * vblank wait) in the ->enable boolean. */
@@ -963,11 +908,8 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
                old_crtc_state->last_vblank_count = drm_vblank_count(dev, i);
        }
 
-       for (i = 0; i < ncrtcs; i++) {
-               crtc = old_state->crtcs[i];
-               old_crtc_state = old_state->crtc_states[i];
-
-               if (!crtc || !old_crtc_state->enable)
+       for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+               if (!old_crtc_state->enable)
                        continue;
 
                ret = wait_event_timeout(dev->vblank[i].queue,
@@ -1016,7 +958,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
 
        /*
         * Everything below can be run asynchronously without the need to grab
-        * any modeset locks at all under one conditions: It must be guaranteed
+        * any modeset locks at all under one condition: It must be guaranteed
         * that the asynchronous work has either been cancelled (if the driver
         * supports it, which at least requires that the framebuffers get
         * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
@@ -1032,11 +974,11 @@ int drm_atomic_helper_commit(struct drm_device *dev,
 
        wait_for_fences(dev, state);
 
-       drm_atomic_helper_commit_pre_planes(dev, state);
+       drm_atomic_helper_commit_modeset_disables(dev, state);
 
        drm_atomic_helper_commit_planes(dev, state);
 
-       drm_atomic_helper_commit_post_planes(dev, state);
+       drm_atomic_helper_commit_modeset_enables(dev, state);
 
        drm_atomic_helper_wait_for_vblanks(dev, state);
 
@@ -1087,9 +1029,9 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
  */
 
 /**
- * drm_atomic_helper_prepare_planes - prepare plane resources after commit
+ * drm_atomic_helper_prepare_planes - prepare plane resources before commit
  * @dev: DRM device
- * @state: atomic state object with old state structures
+ * @state: atomic state object with new state structures
  *
  * This function prepares plane state, specifically framebuffers, for the new
  * configuration. If any failure is encountered this function will call
@@ -1105,8 +1047,9 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
        int ret, i;
 
        for (i = 0; i < nplanes; i++) {
-               struct drm_plane_helper_funcs *funcs;
+               const struct drm_plane_helper_funcs *funcs;
                struct drm_plane *plane = state->planes[i];
+               struct drm_plane_state *plane_state = state->plane_states[i];
                struct drm_framebuffer *fb;
 
                if (!plane)
@@ -1114,10 +1057,10 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
 
                funcs = plane->helper_private;
 
-               fb = state->plane_states[i]->fb;
+               fb = plane_state->fb;
 
                if (fb && funcs->prepare_fb) {
-                       ret = funcs->prepare_fb(plane, fb);
+                       ret = funcs->prepare_fb(plane, fb, plane_state);
                        if (ret)
                                goto fail;
                }
@@ -1127,8 +1070,9 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
 
 fail:
        for (i--; i >= 0; i--) {
-               struct drm_plane_helper_funcs *funcs;
+               const struct drm_plane_helper_funcs *funcs;
                struct drm_plane *plane = state->planes[i];
+               struct drm_plane_state *plane_state = state->plane_states[i];
                struct drm_framebuffer *fb;
 
                if (!plane)
@@ -1139,7 +1083,7 @@ fail:
                fb = state->plane_states[i]->fb;
 
                if (fb && funcs->cleanup_fb)
-                       funcs->cleanup_fb(plane, fb);
+                       funcs->cleanup_fb(plane, fb, plane_state);
 
        }
 
@@ -1163,16 +1107,14 @@ EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
 void drm_atomic_helper_commit_planes(struct drm_device *dev,
                                     struct drm_atomic_state *old_state)
 {
-       int nplanes = dev->mode_config.num_total_plane;
-       int ncrtcs = dev->mode_config.num_crtc;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *old_crtc_state;
+       struct drm_plane *plane;
+       struct drm_plane_state *old_plane_state;
        int i;
 
-       for (i = 0; i < ncrtcs; i++) {
-               struct drm_crtc_helper_funcs *funcs;
-               struct drm_crtc *crtc = old_state->crtcs[i];
-
-               if (!crtc)
-                       continue;
+       for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+               const struct drm_crtc_helper_funcs *funcs;
 
                funcs = crtc->helper_private;
 
@@ -1182,13 +1124,8 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
                funcs->atomic_begin(crtc);
        }
 
-       for (i = 0; i < nplanes; i++) {
-               struct drm_plane_helper_funcs *funcs;
-               struct drm_plane *plane = old_state->planes[i];
-               struct drm_plane_state *old_plane_state;
-
-               if (!plane)
-                       continue;
+       for_each_plane_in_state(old_state, plane, old_plane_state, i) {
+               const struct drm_plane_helper_funcs *funcs;
 
                funcs = plane->helper_private;
 
@@ -1207,12 +1144,8 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
                        funcs->atomic_update(plane, old_plane_state);
        }
 
-       for (i = 0; i < ncrtcs; i++) {
-               struct drm_crtc_helper_funcs *funcs;
-               struct drm_crtc *crtc = old_state->crtcs[i];
-
-               if (!crtc)
-                       continue;
+       for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+               const struct drm_crtc_helper_funcs *funcs;
 
                funcs = crtc->helper_private;
 
@@ -1239,23 +1172,20 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
 void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
                                      struct drm_atomic_state *old_state)
 {
-       int nplanes = dev->mode_config.num_total_plane;
+       struct drm_plane *plane;
+       struct drm_plane_state *plane_state;
        int i;
 
-       for (i = 0; i < nplanes; i++) {
-               struct drm_plane_helper_funcs *funcs;
-               struct drm_plane *plane = old_state->planes[i];
+       for_each_plane_in_state(old_state, plane, plane_state, i) {
+               const struct drm_plane_helper_funcs *funcs;
                struct drm_framebuffer *old_fb;
 
-               if (!plane)
-                       continue;
-
                funcs = plane->helper_private;
 
-               old_fb = old_state->plane_states[i]->fb;
+               old_fb = plane_state->fb;
 
                if (old_fb && funcs->cleanup_fb)
-                       funcs->cleanup_fb(plane, old_fb);
+                       funcs->cleanup_fb(plane, old_fb, plane_state);
        }
 }
 EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
@@ -1498,8 +1428,10 @@ static int update_output_state(struct drm_atomic_state *state,
                               struct drm_mode_set *set)
 {
        struct drm_device *dev = set->crtc->dev;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       struct drm_connector *connector;
        struct drm_connector_state *conn_state;
-       int ncrtcs = state->dev->mode_config.num_crtc;
        int ret, i, j;
 
        ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
@@ -1515,27 +1447,14 @@ static int update_output_state(struct drm_atomic_state *state,
                        return PTR_ERR(conn_state);
        }
 
-       for (i = 0; i < ncrtcs; i++) {
-               struct drm_crtc *crtc = state->crtcs[i];
-
-               if (!crtc)
-                       continue;
-
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
                ret = drm_atomic_add_affected_connectors(state, crtc);
                if (ret)
                        return ret;
        }
 
        /* Then recompute connector->crtc links and crtc enabling state. */
-       for (i = 0; i < state->num_connector; i++) {
-               struct drm_connector *connector;
-
-               connector = state->connectors[i];
-               conn_state = state->connector_states[i];
-
-               if (!connector)
-                       continue;
-
+       for_each_connector_in_state(state, connector, conn_state, i) {
                if (conn_state->crtc == set->crtc) {
                        ret = drm_atomic_set_crtc_for_connector(conn_state,
                                                                NULL);
@@ -1554,13 +1473,7 @@ static int update_output_state(struct drm_atomic_state *state,
                }
        }
 
-       for (i = 0; i < ncrtcs; i++) {
-               struct drm_crtc *crtc = state->crtcs[i];
-               struct drm_crtc_state *crtc_state = state->crtc_states[i];
-
-               if (!crtc)
-                       continue;
-
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
                /* Don't update ->enable for the CRTC in the set_config request,
                 * since a mismatch would indicate a bug in the upper layers.
                 * The actual modeset code later on will catch any
@@ -1680,12 +1593,13 @@ backoff:
 EXPORT_SYMBOL(drm_atomic_helper_set_config);
 
 /**
- * drm_atomic_helper_crtc_set_property - helper for crtc prorties
+ * drm_atomic_helper_crtc_set_property - helper for crtc properties
  * @crtc: DRM crtc
  * @property: DRM property
  * @val: value of property
  *
- * Provides a default plane disablle handler using the atomic driver interface.
+ * Provides a default crtc set_property handler using the atomic driver
+ * interface.
  *
  * RETURNS:
  * Zero on success, error code on failure
@@ -1739,12 +1653,13 @@ backoff:
 EXPORT_SYMBOL(drm_atomic_helper_crtc_set_property);
 
 /**
- * drm_atomic_helper_plane_set_property - helper for plane prorties
+ * drm_atomic_helper_plane_set_property - helper for plane properties
  * @plane: DRM plane
  * @property: DRM property
  * @val: value of property
  *
- * Provides a default plane disable handler using the atomic driver interface.
+ * Provides a default plane set_property handler using the atomic driver
+ * interface.
  *
  * RETURNS:
  * Zero on success, error code on failure
@@ -1798,12 +1713,13 @@ backoff:
 EXPORT_SYMBOL(drm_atomic_helper_plane_set_property);
 
 /**
- * drm_atomic_helper_connector_set_property - helper for connector prorties
+ * drm_atomic_helper_connector_set_property - helper for connector properties
  * @connector: DRM connector
  * @property: DRM property
  * @val: value of property
  *
- * Provides a default plane disablle handler using the atomic driver interface.
+ * Provides a default connector set_property handler using the atomic driver
+ * interface.
  *
  * RETURNS:
  * Zero on success, error code on failure
@@ -1986,10 +1902,10 @@ retry:
        WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
 
        list_for_each_entry(tmp_connector, &config->connector_list, head) {
-               if (connector->state->crtc != crtc)
+               if (tmp_connector->state->crtc != crtc)
                        continue;
 
-               if (connector->dpms == DRM_MODE_DPMS_ON) {
+               if (tmp_connector->dpms == DRM_MODE_DPMS_ON) {
                        active = true;
                        break;
                }
@@ -2051,6 +1967,26 @@ void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
 }
 EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
 
+/**
+ * __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state
+ * @crtc: CRTC object
+ * @state: atomic CRTC state
+ *
+ * Copies atomic state from a CRTC's current state and resets inferred values.
+ * This is useful for drivers that subclass the CRTC state.
+ */
+void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
+                                             struct drm_crtc_state *state)
+{
+       memcpy(state, crtc->state, sizeof(*state));
+
+       state->mode_changed = false;
+       state->active_changed = false;
+       state->planes_changed = false;
+       state->event = NULL;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
+
 /**
  * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
  * @crtc: drm CRTC
@@ -2066,19 +2002,34 @@ drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
        if (WARN_ON(!crtc->state))
                return NULL;
 
-       state = kmemdup(crtc->state, sizeof(*crtc->state), GFP_KERNEL);
-
-       if (state) {
-               state->mode_changed = false;
-               state->active_changed = false;
-               state->planes_changed = false;
-               state->event = NULL;
-       }
+       state = kmalloc(sizeof(*state), M_DRM, M_WAITOK);
+       if (state)
+               __drm_atomic_helper_crtc_duplicate_state(crtc, state);
 
        return state;
 }
 EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
 
+/**
+ * __drm_atomic_helper_crtc_destroy_state - release CRTC state
+ * @crtc: CRTC object
+ * @state: CRTC state object to release
+ *
+ * Releases all resources stored in the CRTC state without actually freeing
+ * the memory of the CRTC state. This is useful for drivers that subclass the
+ * CRTC state.
+ */
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+                                           struct drm_crtc_state *state)
+{
+       /*
+        * This is currently a placeholder so that drivers that subclass the
+        * state will automatically do the right thing if code is ever added
+        * to this function.
+        */
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
+
 /**
  * drm_atomic_helper_crtc_destroy_state - default state destroy hook
  * @crtc: drm CRTC
@@ -2090,6 +2041,7 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
 void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
                                          struct drm_crtc_state *state)
 {
+       __drm_atomic_helper_crtc_destroy_state(crtc, state);
        kfree(state);
 }
 EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
@@ -2114,6 +2066,24 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane)
 }
 EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
 
+/**
+ * __drm_atomic_helper_plane_duplicate_state - copy atomic plane state
+ * @plane: plane object
+ * @state: atomic plane state
+ *
+ * Copies atomic state from a plane's current state. This is useful for
+ * drivers that subclass the plane state.
+ */
+void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
+                                              struct drm_plane_state *state)
+{
+       memcpy(state, plane->state, sizeof(*state));
+
+       if (state->fb)
+               drm_framebuffer_reference(state->fb);
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
+
 /**
  * drm_atomic_helper_plane_duplicate_state - default state duplicate hook
  * @plane: drm plane
@@ -2129,15 +2099,31 @@ drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
        if (WARN_ON(!plane->state))
                return NULL;
 
-       state = kmemdup(plane->state, sizeof(*plane->state), GFP_KERNEL);
-
-       if (state && state->fb)
-               drm_framebuffer_reference(state->fb);
+       state = kmalloc(sizeof(*state), M_DRM, M_WAITOK);
+       if (state)
+               __drm_atomic_helper_plane_duplicate_state(plane, state);
 
        return state;
 }
 EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
 
+/**
+ * __drm_atomic_helper_plane_destroy_state - release plane state
+ * @plane: plane object
+ * @state: plane state object to release
+ *
+ * Releases all resources stored in the plane state without actually freeing
+ * the memory of the plane state. This is useful for drivers that subclass the
+ * plane state.
+ */
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+                                            struct drm_plane_state *state)
+{
+       if (state->fb)
+               drm_framebuffer_unreference(state->fb);
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
+
 /**
  * drm_atomic_helper_plane_destroy_state - default state destroy hook
  * @plane: drm plane
@@ -2149,9 +2135,7 @@ EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
 void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
                                           struct drm_plane_state *state)
 {
-       if (state->fb)
-               drm_framebuffer_unreference(state->fb);
-
+       __drm_atomic_helper_plane_destroy_state(plane, state);
        kfree(state);
 }
 EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
@@ -2174,6 +2158,22 @@ void drm_atomic_helper_connector_reset(struct drm_connector *connector)
 }
 EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
 
+/**
+ * __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
+ * @connector: connector object
+ * @state: atomic connector state
+ *
+ * Copies atomic state from a connector's current state. This is useful for
+ * drivers that subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
+                                           struct drm_connector_state *state)
+{
+       memcpy(state, connector->state, sizeof(*state));
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
+
 /**
  * drm_atomic_helper_connector_duplicate_state - default state duplicate hook
  * @connector: drm connector
@@ -2184,13 +2184,40 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
 struct drm_connector_state *
 drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
 {
+       struct drm_connector_state *state;
+
        if (WARN_ON(!connector->state))
                return NULL;
 
-       return kmemdup(connector->state, sizeof(*connector->state), GFP_KERNEL);
+       state = kmalloc(sizeof(*state), M_DRM, M_WAITOK);
+       if (state)
+               __drm_atomic_helper_connector_duplicate_state(connector, state);
+
+       return state;
 }
 EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
 
+/**
+ * __drm_atomic_helper_connector_destroy_state - release connector state
+ * @connector: connector object
+ * @state: connector state object to release
+ *
+ * Releases all resources stored in the connector state without actually
+ * freeing the memory of the connector state. This is useful for drivers that
+ * subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+                                           struct drm_connector_state *state)
+{
+       /*
+        * This is currently a placeholder so that drivers that subclass the
+        * state will automatically do the right thing if code is ever added
+        * to this function.
+        */
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
+
 /**
  * drm_atomic_helper_connector_destroy_state - default state destroy hook
  * @connector: drm connector
@@ -2202,6 +2229,7 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
 void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
                                          struct drm_connector_state *state)
 {
+       __drm_atomic_helper_connector_destroy_state(connector, state);
        kfree(state);
 }
 EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
index c05f8f1..ef29ed8 100644 (file)
 
 #include <linux/export.h>
 #include <drm/drmP.h>
+#include <asm/cpufeature.h>
 
 #include <machine/md_var.h>
 #include <machine/cpufunc.h>
 
-#define cpu_has_clflush        1
-
 void
 drm_clflush_virt_range(void *in_addr, unsigned long length)
 {
index 088e3d7..ac23818 100644 (file)
@@ -664,6 +664,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
        struct drm_mode_config *config = &dev->mode_config;
        int ret;
 
+       WARN_ON(primary && primary->type != DRM_PLANE_TYPE_PRIMARY);
+       WARN_ON(cursor && cursor->type != DRM_PLANE_TYPE_CURSOR);
+
        crtc->dev = dev;
        crtc->funcs = funcs;
        crtc->invert_dimensions = false;
@@ -1995,21 +1998,32 @@ int drm_mode_getcrtc(struct drm_device *dev,
                return -ENOENT;
 
        drm_modeset_lock_crtc(crtc, crtc->primary);
-       crtc_resp->x = crtc->x;
-       crtc_resp->y = crtc->y;
        crtc_resp->gamma_size = crtc->gamma_size;
        if (crtc->primary->fb)
                crtc_resp->fb_id = crtc->primary->fb->base.id;
        else
                crtc_resp->fb_id = 0;
 
-       if (crtc->enabled) {
-
-               drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
-               crtc_resp->mode_valid = 1;
+       if (crtc->state) {
+               crtc_resp->x = crtc->primary->state->src_x >> 16;
+               crtc_resp->y = crtc->primary->state->src_y >> 16;
+               if (crtc->state->enable) {
+                       drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->state->mode);
+                       crtc_resp->mode_valid = 1;
 
+               } else {
+                       crtc_resp->mode_valid = 0;
+               }
        } else {
-               crtc_resp->mode_valid = 0;
+               crtc_resp->x = crtc->x;
+               crtc_resp->y = crtc->y;
+               if (crtc->enabled) {
+                       drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode);
+                       crtc_resp->mode_valid = 1;
+
+               } else {
+                       crtc_resp->mode_valid = 0;
+               }
        }
        drm_modeset_unlock_crtc(crtc);
 
@@ -2262,8 +2276,6 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
        crtc = drm_encoder_get_crtc(encoder);
        if (crtc)
                enc_resp->crtc_id = crtc->base.id;
-       else if (encoder->crtc)
-               enc_resp->crtc_id = encoder->crtc->base.id;
        else
                enc_resp->crtc_id = 0;
        drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -2398,6 +2410,27 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
        return 0;
 }
 
+/**
+ * drm_plane_check_pixel_format - Check if the plane supports the pixel format
+ * @plane: plane to check for format support
+ * @format: the pixel format
+ *
+ * Returns:
+ * Zero of @plane has @format in its list of supported pixel formats, -EINVAL
+ * otherwise.
+ */
+int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format)
+{
+       unsigned int i;
+
+       for (i = 0; i < plane->format_count; i++) {
+               if (format == plane->format_types[i])
+                       return 0;
+       }
+
+       return -EINVAL;
+}
+
 /*
  * setplane_internal - setplane handler for internal callers
  *
@@ -2418,7 +2451,6 @@ static int __setplane_internal(struct drm_plane *plane,
 {
        int ret = 0;
        unsigned int fb_width, fb_height;
-       unsigned int i;
 
        /* No fb means shut it down */
        if (!fb) {
@@ -2441,16 +2473,24 @@ static int __setplane_internal(struct drm_plane *plane,
        }
 
        /* Check whether this plane supports the fb pixel format. */
-       for (i = 0; i < plane->format_count; i++)
-               if (fb->pixel_format == plane->format_types[i])
-                       break;
-       if (i == plane->format_count) {
+       ret = drm_plane_check_pixel_format(plane, fb->pixel_format);
+       if (ret) {
                DRM_DEBUG_KMS("Invalid pixel format %s\n",
                              drm_get_format_name(fb->pixel_format));
-               ret = -EINVAL;
                goto out;
        }
 
+       /* Give drivers some help against integer overflows */
+       if (crtc_w > INT_MAX ||
+           crtc_x > INT_MAX - (int32_t) crtc_w ||
+           crtc_h > INT_MAX ||
+           crtc_y > INT_MAX - (int32_t) crtc_h) {
+               DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+                             crtc_w, crtc_h, crtc_x, crtc_y);
+               return -ERANGE;
+       }
+
+
        fb_width = fb->width << 16;
        fb_height = fb->height << 16;
 
@@ -2535,17 +2575,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
                return -EINVAL;
 
-       /* Give drivers some help against integer overflows */
-       if (plane_req->crtc_w > INT_MAX ||
-           plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
-           plane_req->crtc_h > INT_MAX ||
-           plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
-               DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
-                             plane_req->crtc_w, plane_req->crtc_h,
-                             plane_req->crtc_x, plane_req->crtc_y);
-               return -ERANGE;
-       }
-
        /*
         * First, find the plane, crtc, and fb objects.  If not available,
         * we don't bother to call the driver.
@@ -2771,6 +2800,23 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
 
                drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
 
+               /*
+                * Check whether the primary plane supports the fb pixel format.
+                * Drivers not implementing the universal planes API use a
+                * default formats list provided by the DRM core which doesn't
+                * match real hardware capabilities. Skip the check in that
+                * case.
+                */
+               if (!crtc->primary->format_default) {
+                       ret = drm_plane_check_pixel_format(crtc->primary,
+                                                          fb->pixel_format);
+                       if (ret) {
+                               DRM_DEBUG_KMS("Invalid pixel format %s\n",
+                                       drm_get_format_name(fb->pixel_format));
+                               goto out;
+                       }
+               }
+
                ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y,
                                              mode, fb);
                if (ret)
@@ -3248,6 +3294,12 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
                        DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
                        return -EINVAL;
                }
+
+               if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) {
+                       DRM_DEBUG_KMS("bad fb modifier %lu for plane %d\n",
+                                     r->modifier[i], i);
+                       return -EINVAL;
+               }
        }
 
        return 0;
@@ -3262,7 +3314,7 @@ internal_framebuffer_create(struct drm_device *dev,
        struct drm_framebuffer *fb;
        int ret;
 
-       if (r->flags & ~DRM_MODE_FB_INTERLACED) {
+       if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
                DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
                return ERR_PTR(-EINVAL);
        }
@@ -3278,6 +3330,12 @@ internal_framebuffer_create(struct drm_device *dev,
                return ERR_PTR(-EINVAL);
        }
 
+       if (r->flags & DRM_MODE_FB_MODIFIERS &&
+           !dev->mode_config.allow_fb_modifiers) {
+               DRM_DEBUG_KMS("driver does not support fb modifiers\n");
+               return ERR_PTR(-EINVAL);
+       }
+
        ret = framebuffer_check(r);
        if (ret)
                return ERR_PTR(ret);
@@ -5542,6 +5600,7 @@ struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
 #endif
        return NULL;
 }
+EXPORT_SYMBOL(drm_mode_get_tile_group);
 
 /**
  * drm_mode_create_tile_group - create a tile group from a displayid description
@@ -5580,3 +5639,4 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
        mutex_unlock(&dev->mode_config.idr_mutex);
        return tg;
 }
+EXPORT_SYMBOL(drm_mode_create_tile_group);
index 690fee5..2c32d52 100644 (file)
@@ -145,7 +145,7 @@ EXPORT_SYMBOL(drm_helper_crtc_in_use);
 static void
 drm_encoder_disable(struct drm_encoder *encoder)
 {
-       struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+       const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
 
        if (encoder->bridge)
                encoder->bridge->funcs->disable(encoder->bridge);
@@ -175,7 +175,7 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
        }
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+               const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
                crtc->enabled = drm_helper_crtc_in_use(crtc);
                if (!crtc->enabled) {
                        if (crtc_funcs->disable)
@@ -213,7 +213,7 @@ EXPORT_SYMBOL(drm_helper_disable_unused_functions);
 static void
 drm_crtc_prepare_encoders(struct drm_device *dev)
 {
-       struct drm_encoder_helper_funcs *encoder_funcs;
+       const struct drm_encoder_helper_funcs *encoder_funcs;
        struct drm_encoder *encoder;
 
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -254,9 +254,9 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
                              struct drm_framebuffer *old_fb)
 {
        struct drm_device *dev = crtc->dev;
-       struct drm_display_mode *adjusted_mode, saved_mode;
-       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-       struct drm_encoder_helper_funcs *encoder_funcs;
+       struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
+       const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       const struct drm_encoder_helper_funcs *encoder_funcs;
        int saved_x, saved_y;
        bool saved_enabled;
        struct drm_encoder *encoder;
@@ -276,6 +276,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
        }
 
        saved_mode = crtc->mode;
+       saved_hwmode = crtc->hwmode;
        saved_x = crtc->x;
        saved_y = crtc->y;
 
@@ -318,6 +319,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
        }
        DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
 
+       crtc->hwmode = *adjusted_mode;
+
        /* Prepare the encoders and CRTCs before setting the mode. */
        list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 
@@ -380,9 +383,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
                        encoder->bridge->funcs->enable(encoder->bridge);
        }
 
-       /* Store real post-adjustment hardware mode. */
-       crtc->hwmode = *adjusted_mode;
-
        /* Calculate and store various constants which
         * are later needed by vblank and swap-completion
         * timestamping. They are derived from true hwmode.
@@ -395,6 +395,7 @@ done:
        if (!ret) {
                crtc->enabled = saved_enabled;
                crtc->mode = saved_mode;
+               crtc->hwmode = saved_hwmode;
                crtc->x = saved_x;
                crtc->y = saved_y;
        }
@@ -456,7 +457,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
        bool fb_changed = false; /* if true and !mode_changed just do a flip */
        struct drm_connector *save_connectors, *connector;
        int count = 0, ro, fail = 0;
-       struct drm_crtc_helper_funcs *crtc_funcs;
+       const struct drm_crtc_helper_funcs *crtc_funcs;
        struct drm_mode_set save_set;
        int ret;
        int i;
@@ -556,7 +557,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
        /* a) traverse passed in connector list and get encoders for them */
        count = 0;
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               struct drm_connector_helper_funcs *connector_funcs =
+               const struct drm_connector_helper_funcs *connector_funcs =
                        connector->helper_private;
                new_encoder = connector->encoder;
                for (ro = 0; ro < set->num_connectors; ro++) {
@@ -716,7 +717,7 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
 static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode)
 {
        struct drm_bridge *bridge = encoder->bridge;
-       struct drm_encoder_helper_funcs *encoder_funcs;
+       const struct drm_encoder_helper_funcs *encoder_funcs;
 
        if (bridge) {
                if (mode == DRM_MODE_DPMS_ON)
@@ -778,7 +779,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
        /* from off to on, do crtc then encoder */
        if (mode < old_dpms) {
                if (crtc) {
-                       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+                       const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
                        if (crtc_funcs->dpms)
                                (*crtc_funcs->dpms) (crtc,
                                                     drm_helper_choose_crtc_dpms(crtc));
@@ -792,7 +793,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
                if (encoder)
                        drm_helper_encoder_dpms(encoder, encoder_dpms);
                if (crtc) {
-                       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+                       const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
                        if (crtc_funcs->dpms)
                                (*crtc_funcs->dpms) (crtc,
                                                     drm_helper_choose_crtc_dpms(crtc));
@@ -821,6 +822,7 @@ void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
        for (i = 0; i < 4; i++) {
                fb->pitches[i] = mode_cmd->pitches[i];
                fb->offsets[i] = mode_cmd->offsets[i];
+               fb->modifier[i] = mode_cmd->modifier[i];
        }
        drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
                                    &fb->bits_per_pixel);
@@ -853,7 +855,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
 {
        struct drm_crtc *crtc;
        struct drm_encoder *encoder;
-       struct drm_crtc_helper_funcs *crtc_funcs;
+       const struct drm_crtc_helper_funcs *crtc_funcs;
        int encoder_dpms;
        bool ret;
 
@@ -918,7 +920,7 @@ int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mod
                             struct drm_framebuffer *old_fb)
 {
        struct drm_crtc_state *crtc_state;
-       struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+       const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
        int ret;
 
        if (crtc->funcs->atomic_duplicate_state)
index 1941b6c..e2008cb 100644 (file)
  * OF THIS SOFTWARE.
  */
 
-#include <linux/export.h>
-#include <drm/drmP.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/i2c.h>
 #include <drm/drm_dp_helper.h>
+#include <drm/drmP.h>
 
+/**
+ * DOC: dp helpers
+ *
+ * These functions contain some common logic and helpers at various abstraction
+ * levels to deal with Display Port sink devices and related things like DP aux
+ * channel transfers, EDID reading over DP aux channels, decoding certain DPCD
+ * blocks, ...
+ */
 
 /* Helpers for DP link training */
 static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
@@ -179,7 +192,6 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
        for (retry = 0; retry < 32; retry++) {
 
                mutex_lock(&aux->hw_mutex);
-               KKASSERT(aux->transfer != NULL);
                err = aux->transfer(aux, &msg);
                mutex_unlock(&aux->hw_mutex);
                if (err < 0) {
@@ -253,6 +265,383 @@ ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
 }
 EXPORT_SYMBOL(drm_dp_dpcd_write);
 
+/**
+ * drm_dp_dpcd_read_link_status() - read DPCD link status (bytes 0x202-0x207)
+ * @aux: DisplayPort AUX channel
+ * @status: buffer to store the link status in (must be at least 6 bytes)
+ *
+ * Returns the number of bytes transferred on success or a negative error
+ * code on failure.
+ */
+int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
+                                u8 status[DP_LINK_STATUS_SIZE])
+{
+       return drm_dp_dpcd_read(aux, DP_LANE0_1_STATUS, status,
+                               DP_LINK_STATUS_SIZE);
+}
+EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
+
+/**
+ * drm_dp_link_probe() - probe a DisplayPort link for capabilities
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to structure in which to return link capabilities
+ *
+ * The structure filled in by this function can usually be passed directly
+ * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and
+ * configure the link based on the link's capabilities.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+       u8 values[3];
+       int err;
+
+       memset(link, 0, sizeof(*link));
+
+       err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values));
+       if (err < 0)
+               return err;
+
+       link->revision = values[0];
+       link->rate = drm_dp_bw_code_to_link_rate(values[1]);
+       link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK;
+
+       if (values[2] & DP_ENHANCED_FRAME_CAP)
+               link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_probe);
+
+/**
+ * drm_dp_link_power_up() - power up a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+       u8 value;
+       int err;
+
+       /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+       if (link->revision < 0x11)
+               return 0;
+
+       err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+       if (err < 0)
+               return err;
+
+       value &= ~DP_SET_POWER_MASK;
+       value |= DP_SET_POWER_D0;
+
+       err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+       if (err < 0)
+               return err;
+
+       /*
+        * According to the DP 1.1 specification, a "Sink Device must exit the
+        * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
+        * Control Field" (register 0x600).
+        */
+       usleep_range(1000, 2000);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_power_up);
+
+/**
+ * drm_dp_link_power_down() - power down a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+       u8 value;
+       int err;
+
+       /* DP_SET_POWER register is only available on DPCD v1.1 and later */
+       if (link->revision < 0x11)
+               return 0;
+
+       err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+       if (err < 0)
+               return err;
+
+       value &= ~DP_SET_POWER_MASK;
+       value |= DP_SET_POWER_D3;
+
+       err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_power_down);
+
+/**
+ * drm_dp_link_configure() - configure a DisplayPort link
+ * @aux: DisplayPort AUX channel
+ * @link: pointer to a structure containing the link configuration
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
+{
+       u8 values[2];
+       int err;
+
+       values[0] = drm_dp_link_rate_to_bw_code(link->rate);
+       values[1] = link->num_lanes;
+
+       if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+               values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+       err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_link_configure);
+
+/*
+ * I2C-over-AUX implementation
+ */
+
+#if 0
+static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+              I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+              I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
+              I2C_FUNC_10BIT_ADDR;
+}
+
+/*
+ * Transfer a single I2C-over-AUX message and handle various error conditions,
+ * retrying the transaction as appropriate.  It is assumed that the
+ * aux->transfer function does not modify anything in the msg other than the
+ * reply field.
+ *
+ * Returns bytes transferred on success, or a negative error code on failure.
+ */
+static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+{
+       unsigned int retry;
+       int ret;
+
+       /*
+        * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device
+        * is required to retry at least seven times upon receiving AUX_DEFER
+        * before giving up the AUX transaction.
+        */
+       for (retry = 0; retry < 7; retry++) {
+               mutex_lock(&aux->hw_mutex);
+               ret = aux->transfer(aux, msg);
+               mutex_unlock(&aux->hw_mutex);
+               if (ret < 0) {
+                       if (ret == -EBUSY)
+                               continue;
+
+                       DRM_DEBUG_KMS("transaction failed: %d\n", ret);
+                       return ret;
+               }
+
+
+               switch (msg->reply & DP_AUX_NATIVE_REPLY_MASK) {
+               case DP_AUX_NATIVE_REPLY_ACK:
+                       /*
+                        * For I2C-over-AUX transactions this isn't enough, we
+                        * need to check for the I2C ACK reply.
+                        */
+                       break;
+
+               case DP_AUX_NATIVE_REPLY_NACK:
+                       DRM_DEBUG_KMS("native nack (result=%d, size=%zu)\n", ret, msg->size);
+                       return -EREMOTEIO;
+
+               case DP_AUX_NATIVE_REPLY_DEFER:
+                       DRM_DEBUG_KMS("native defer");
+                       /*
+                        * We could check for I2C bit rate capabilities and if
+                        * available adjust this interval. We could also be
+                        * more careful with DP-to-legacy adapters where a
+                        * long legacy cable may force very low I2C bit rates.
+                        *
+                        * For now just defer for long enough to hopefully be
+                        * safe for all use-cases.
+                        */
+                       usleep_range(500, 600);
+                       continue;
+
+               default:
+                       DRM_ERROR("invalid native reply %#04x\n", msg->reply);
+                       return -EREMOTEIO;
+               }
+
+               switch (msg->reply & DP_AUX_I2C_REPLY_MASK) {
+               case DP_AUX_I2C_REPLY_ACK:
+                       /*
+                        * Both native ACK and I2C ACK replies received. We
+                        * can assume the transfer was successful.
+                        */
+                       return ret;
+
+               case DP_AUX_I2C_REPLY_NACK:
+                       DRM_DEBUG_KMS("I2C nack (result=%d, size=%zu\n", ret, msg->size);
+                       aux->i2c_nack_count++;
+                       return -EREMOTEIO;
+
+               case DP_AUX_I2C_REPLY_DEFER:
+                       DRM_DEBUG_KMS("I2C defer\n");
+                       aux->i2c_defer_count++;
+                       usleep_range(400, 500);
+                       continue;
+
+               default:
+                       DRM_ERROR("invalid I2C reply %#04x\n", msg->reply);
+                       return -EREMOTEIO;
+               }
+       }
+
+       DRM_DEBUG_KMS("too many retries, giving up\n");
+       return -EREMOTEIO;
+}
+
+/*
+ * Keep retrying drm_dp_i2c_do_msg until all data has been transferred.
+ *
+ * Returns an error code on failure, or a recommended transfer size on success.
+ */
+static int drm_dp_i2c_drain_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *orig_msg)
+{
+       int err, ret = orig_msg->size;
+       struct drm_dp_aux_msg msg = *orig_msg;
+
+       while (msg.size > 0) {
+               err = drm_dp_i2c_do_msg(aux, &msg);
+               if (err <= 0)
+                       return err == 0 ? -EPROTO : err;
+
+               if (err < msg.size && err < ret) {
+                       DRM_DEBUG_KMS("Partial I2C reply: requested %zu bytes got %d bytes\n",
+                                     msg.size, err);
+                       ret = err;
+               }
+
+               msg.size -= err;
+               msg.buffer += err;
+       }
+
+       return ret;
+}
+
+/*
+ * Bizlink designed DP->DVI-D Dual Link adapters require the I2C over AUX
+ * packets to be as large as possible. If not, the I2C transactions never
+ * succeed. Hence the default is maximum.
+ */
+static int dp_aux_i2c_transfer_size __read_mostly = DP_AUX_MAX_PAYLOAD_BYTES;
+module_param_unsafe(dp_aux_i2c_transfer_size, int, 0644);
+MODULE_PARM_DESC(dp_aux_i2c_transfer_size,
+                "Number of bytes to transfer in a single I2C over DP AUX CH message, (1-16, default 16)");
+
+static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
+                          int num)
+{
+       struct drm_dp_aux *aux = adapter->algo_data;
+       unsigned int i, j;
+       unsigned transfer_size;
+       struct drm_dp_aux_msg msg;
+       int err = 0;
+
+       dp_aux_i2c_transfer_size = clamp(dp_aux_i2c_transfer_size, 1, DP_AUX_MAX_PAYLOAD_BYTES);
+
+       memset(&msg, 0, sizeof(msg));
+
+       for (i = 0; i < num; i++) {
+               msg.address = msgs[i].addr;
+               msg.request = (msgs[i].flags & I2C_M_RD) ?
+                       DP_AUX_I2C_READ :
+                       DP_AUX_I2C_WRITE;
+               msg.request |= DP_AUX_I2C_MOT;
+               /* Send a bare address packet to start the transaction.
+                * Zero sized messages specify an address only (bare
+                * address) transaction.
+                */
+               msg.buffer = NULL;
+               msg.size = 0;
+               err = drm_dp_i2c_do_msg(aux, &msg);
+               if (err < 0)
+                       break;
+               /* We want each transaction to be as large as possible, but
+                * we'll go to smaller sizes if the hardware gives us a
+                * short reply.
+                */
+               transfer_size = dp_aux_i2c_transfer_size;
+               for (j = 0; j < msgs[i].len; j += msg.size) {
+                       msg.buffer = msgs[i].buf + j;
+                       msg.size = min(transfer_size, msgs[i].len - j);
+
+                       err = drm_dp_i2c_drain_msg(aux, &msg);
+                       if (err < 0)
+                               break;
+                       transfer_size = err;
+               }
+               if (err < 0)
+                       break;
+       }
+       if (err >= 0)
+               err = num;
+       /* Send a bare address packet to close out the transaction.
+        * Zero sized messages specify an address only (bare
+        * address) transaction.
+        */
+       msg.request &= ~DP_AUX_I2C_MOT;
+       msg.buffer = NULL;
+       msg.size = 0;
+       (void)drm_dp_i2c_do_msg(aux, &msg);
+
+       return err;
+}
+
+static const struct i2c_algorithm drm_dp_i2c_algo = {
+       .functionality = drm_dp_i2c_functionality,
+       .master_xfer = drm_dp_i2c_xfer,
+};
+
+/**
+ * drm_dp_aux_register() - initialise and register aux channel
+ * @aux: DisplayPort AUX channel
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_dp_aux_register(struct drm_dp_aux *aux)
+{
+       lockinit(&aux->hw_mutex, "ahm", 0, LK_CANRECURSE);
+
+       aux->ddc.algo = &drm_dp_i2c_algo;
+       aux->ddc.algo_data = aux;
+       aux->ddc.retries = 3;
+
+       aux->ddc.class = I2C_CLASS_DDC;
+       aux->ddc.owner = THIS_MODULE;
+       aux->ddc.dev.parent = aux->dev;
+       aux->ddc.dev.of_node = aux->dev->of_node;
+
+       strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
+               sizeof(aux->ddc.name));
+
+       return i2c_add_adapter(&aux->ddc);
+}
+EXPORT_SYMBOL(drm_dp_aux_register);
+#endif
+
 /**
  * drm_dp_aux_unregister() - unregister an AUX adapter
  * @aux: DisplayPort AUX channel
index 41eec17..674a7a6 100644 (file)
@@ -2281,6 +2281,19 @@ out:
 }
 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
 
+int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+{
+       int slots = 0;
+       port = drm_dp_get_validated_port_ref(mgr, port);
+       if (!port)
+               return slots;
+
+       slots = port->vcpi.num_slots;
+       drm_dp_put_port(port);
+       return slots;
+}
+EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
+
 /**
  * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
  * @mgr: manager for this port
index ddd09ae..6c0991b 100644 (file)
@@ -206,7 +206,7 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
 int drm_fb_helper_debug_enter(struct fb_info *info)
 {
        struct drm_fb_helper *helper = info->par;
-       struct drm_crtc_helper_funcs *funcs;
+       const struct drm_crtc_helper_funcs *funcs;
        int i;
 
        list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
@@ -253,7 +253,7 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
 {
        struct drm_fb_helper *helper = info->par;
        struct drm_crtc *crtc;
-       struct drm_crtc_helper_funcs *funcs;
+       const struct drm_crtc_helper_funcs *funcs;
        struct drm_framebuffer *fb;
        int i;
 
@@ -743,7 +743,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
 {
        struct drm_fb_helper *fb_helper = info->par;
        struct drm_device *dev = fb_helper->dev;
-       struct drm_crtc_helper_funcs *crtc_funcs;
+       const struct drm_crtc_helper_funcs *crtc_funcs;
        u16 *red, *green, *blue, *transp;
        struct drm_crtc *crtc;
        int i, j, rc = 0;
@@ -1043,23 +1043,45 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
        crtc_count = 0;
        for (i = 0; i < fb_helper->crtc_count; i++) {
                struct drm_display_mode *desired_mode;
-               int x, y;
+               struct drm_mode_set *mode_set;
+               int x, y, j;
+               /* in case of tile group, are we the last tile vert or horiz?
+                * If no tile group you are always the last one both vertically
+                * and horizontally
+                */
+               bool lastv = true, lasth = true;
+
                desired_mode = fb_helper->crtc_info[i].desired_mode;
+               mode_set = &fb_helper->crtc_info[i].mode_set;
+
+               if (!desired_mode)
+                       continue;
+
+               crtc_count++;
+
                x = fb_helper->crtc_info[i].x;
                y = fb_helper->crtc_info[i].y;
-               if (desired_mode) {
-                       if (gamma_size == 0)
-                               gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
-                       if (desired_mode->hdisplay + x < sizes.fb_width)
-                               sizes.fb_width = desired_mode->hdisplay + x;
-                       if (desired_mode->vdisplay + y < sizes.fb_height)
-                               sizes.fb_height = desired_mode->vdisplay + y;
-                       if (desired_mode->hdisplay + x > sizes.surface_width)
-                               sizes.surface_width = desired_mode->hdisplay + x;
-                       if (desired_mode->vdisplay + y > sizes.surface_height)
-                               sizes.surface_height = desired_mode->vdisplay + y;
-                       crtc_count++;
+
+               if (gamma_size == 0)
+                       gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
+
+               sizes.surface_width  = max_t(u32, desired_mode->hdisplay + x, sizes.surface_width);
+               sizes.surface_height = max_t(u32, desired_mode->vdisplay + y, sizes.surface_height);
+
+               for (j = 0; j < mode_set->num_connectors; j++) {
+                       struct drm_connector *connector = mode_set->connectors[j];
+                       if (connector->has_tile) {
+                               lasth = (connector->tile_h_loc == (connector->num_h_tile - 1));
+                               lastv = (connector->tile_v_loc == (connector->num_v_tile - 1));
+                               /* cloning to multiple tiles is just crazy-talk, so: */
+                               break;
+                       }
                }
+
+               if (lasth)
+                       sizes.fb_width  = min_t(u32, desired_mode->hdisplay + x, sizes.fb_width);
+               if (lastv)
+                       sizes.fb_height = min_t(u32, desired_mode->vdisplay + y, sizes.fb_height);
        }
 
        if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
@@ -1285,12 +1307,12 @@ struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *f
                                                      int width, int height)
 {
        struct drm_cmdline_mode *cmdline_mode;
-       struct drm_display_mode *mode = NULL;
+       struct drm_display_mode *mode;
        bool prefer_non_interlace;
 
        cmdline_mode = &fb_helper_conn->connector->cmdline_mode;
        if (cmdline_mode->specified == false)
-               return mode;
+               return NULL;
 
        /* attempt to find a matching mode in the list of modes
         *  we have gotten so far, if not add a CVT mode that conforms
@@ -1299,7 +1321,7 @@ struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *f
                goto create_mode;
 
        prefer_non_interlace = !cmdline_mode->interlace;
- again:
+again:
        list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
                /* check width/height */
                if (mode->hdisplay != cmdline_mode->xres ||
@@ -1553,7 +1575,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
        int c, o;
        struct drm_device *dev = fb_helper->dev;
        struct drm_connector *connector;
-       struct drm_connector_helper_funcs *connector_funcs;
+       const struct drm_connector_helper_funcs *connector_funcs;
        struct drm_encoder *encoder;
        int my_score, best_score, score;
        struct drm_fb_helper_crtc **crtcs, *crtc;
index 259bc2e..89ac22e 100644 (file)
@@ -1209,6 +1209,37 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
 }
 EXPORT_SYMBOL(drm_crtc_vblank_off);
 
+/**
+ * drm_crtc_vblank_reset - reset vblank state to off on a CRTC
+ * @crtc: CRTC in question
+ *
+ * Drivers can use this function to reset the vblank state to off at load time.
+ * Drivers should use this together with the drm_crtc_vblank_off() and
+ * drm_crtc_vblank_on() functions. The difference compared to
+ * drm_crtc_vblank_off() is that this function doesn't save the vblank counter
+ * and hence doesn't need to call any driver hooks.
+ */
+void drm_crtc_vblank_reset(struct drm_crtc *drm_crtc)
+{
+       struct drm_device *dev = drm_crtc->dev;
+       int crtc = drm_crtc_index(drm_crtc);
+       struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+
+       lockmgr(&dev->vbl_lock, LK_EXCLUSIVE);
+       /*
+        * Prevent subsequent drm_vblank_get() from enabling the vblank
+        * interrupt by bumping the refcount.
+        */
+       if (!vblank->inmodeset) {
+               atomic_inc(&vblank->refcount);
+               vblank->inmodeset = 1;
+       }
+       lockmgr(&dev->vbl_lock, LK_RELEASE);
+
+       WARN_ON(!list_empty(&dev->vblank_event_list));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_reset);
+
 /**
  * drm_vblank_on - enable vblank events on a CRTC
  * @dev: DRM device
index 4a9d193..b0af519 100644 (file)
@@ -277,7 +277,7 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
                hblank = drm_mode->hdisplay * hblank_percentage /
                         (100 * HV_FACTOR - hblank_percentage);
                hblank -= hblank % (2 * CVT_H_GRANULARITY);
-               /* 14. find the total pixes per line */
+               /* 14. find the total pixels per line */
                drm_mode->htotal = drm_mode->hdisplay + hblank;
                drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2;
                drm_mode->hsync_start = drm_mode->hsync_end -
@@ -899,6 +899,12 @@ EXPORT_SYMBOL(drm_mode_duplicate);
  */
 bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
 {
+       if (!mode1 && !mode2)
+               return true;
+
+       if (!mode1 || !mode2)
+               return false;
+
        /* do clock check convert to PICOS so fb modes get matched
         * the same */
        if (mode1->clock && mode2->clock) {
@@ -1087,7 +1093,7 @@ EXPORT_SYMBOL(drm_mode_sort);
 /**
  * drm_mode_connector_list_update - update the mode list for the connector
  * @connector: the connector to update
- * @merge_type_bits: whether to merge or overright type bits.
+ * @merge_type_bits: whether to merge or overwrite type bits
  *
  * This moves the modes from the @connector probed_modes list
  * to the actual mode list. It compares the probed mode against the current
@@ -1148,7 +1154,7 @@ EXPORT_SYMBOL(drm_mode_connector_list_update);
  *     <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
  *
  * The intermediate drm_cmdline_mode structure is required to store additional
- * options from the command line modline like the force-enabel/disable flag.
+ * options from the command line modline like the force-enable/disable flag.
  *
  * Returns:
  * True if a valid modeline has been parsed, false otherwise.
index d1e2341..0027cf4 100644 (file)
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <linux/export.h>
 #include <drm/drmP.h>
-#include "drm_legacy.h"
 #include "drm_internal.h"
+#include "drm_legacy.h"
 
 /**********************************************************************/
 /** \name PCI memory */
index 074b77e..e4590e6 100644 (file)
@@ -344,20 +344,7 @@ const struct drm_plane_funcs drm_primary_helper_funcs = {
 };
 EXPORT_SYMBOL(drm_primary_helper_funcs);
 
-/**
- * drm_primary_helper_create_plane() - Create a generic primary plane
- * @dev: drm device
- * @formats: pixel formats supported, or NULL for a default safe list
- * @num_formats: size of @formats; ignored if @formats is NULL
- *
- * Allocates and initializes a primary plane that can be used with the primary
- * plane helpers.  Drivers that wish to use driver-specific plane structures or
- * provide custom handler functions may perform their own allocation and
- * initialization rather than calling this function.
- */
-struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
-                                                 const uint32_t *formats,
-                                                 int num_formats)
+static struct drm_plane *create_primary_plane(struct drm_device *dev)
 {
        struct drm_plane *primary;
        int ret;
@@ -368,15 +355,17 @@ struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
                return NULL;
        }
 
-       if (formats == NULL) {
-               formats = safe_modeset_formats;
-               num_formats = ARRAY_SIZE(safe_modeset_formats);
-       }
+       /*
+        * Remove the format_default field from drm_plane when dropping
+        * this helper.
+        */
+       primary->format_default = true;
 
        /* possible_crtc's will be filled in later by crtc_init */
        ret = drm_universal_plane_init(dev, primary, 0,
                                       &drm_primary_helper_funcs,
-                                      formats, num_formats,
+                                      safe_modeset_formats,
+                                      ARRAY_SIZE(safe_modeset_formats),
                                       DRM_PLANE_TYPE_PRIMARY);
        if (ret) {
                kfree(primary);
@@ -385,7 +374,6 @@ struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
 
        return primary;
 }
-EXPORT_SYMBOL(drm_primary_helper_create_plane);
 
 /**
  * drm_crtc_init - Legacy CRTC initialization function
@@ -404,7 +392,7 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
 {
        struct drm_plane *primary;
 
-       primary = drm_primary_helper_create_plane(dev, NULL, 0);
+       primary = create_primary_plane(dev);
        return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs);
 }
 EXPORT_SYMBOL(drm_crtc_init);
@@ -413,9 +401,9 @@ int drm_plane_helper_commit(struct drm_plane *plane,
                            struct drm_plane_state *plane_state,
                            struct drm_framebuffer *old_fb)
 {
-       struct drm_plane_helper_funcs *plane_funcs;
+       const struct drm_plane_helper_funcs *plane_funcs;
        struct drm_crtc *crtc[2];
-       struct drm_crtc_helper_funcs *crtc_funcs[2];
+       const struct drm_crtc_helper_funcs *crtc_funcs[2];
        int i, ret = 0;
 
        plane_funcs = plane->helper_private;
@@ -435,8 +423,10 @@ int drm_plane_helper_commit(struct drm_plane *plane,
                        goto out;
        }
 
-       if (plane_funcs->prepare_fb && plane_state->fb) {
-               ret = plane_funcs->prepare_fb(plane, plane_state->fb);
+       if (plane_funcs->prepare_fb && plane_state->fb &&
+           plane_state->fb != old_fb) {
+               ret = plane_funcs->prepare_fb(plane, plane_state->fb,
+                                             plane_state);
                if (ret)
                        goto out;
        }
@@ -449,34 +439,47 @@ int drm_plane_helper_commit(struct drm_plane *plane,
                        crtc_funcs[i]->atomic_begin(crtc[i]);
        }
 
-       plane_funcs->atomic_update(plane, plane_state);
+       /*
+        * Drivers may optionally implement the ->atomic_disable callback, so
+        * special-case that here.
+        */
+       if (drm_atomic_plane_disabling(plane, plane_state) &&
+           plane_funcs->atomic_disable)
+               plane_funcs->atomic_disable(plane, plane_state);
+       else
+               plane_funcs->atomic_update(plane, plane_state);
 
        for (i = 0; i < 2; i++) {
                if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush)
                        crtc_funcs[i]->atomic_flush(crtc[i]);
        }
 
+       /*
+        * If we only moved the plane and didn't change fb's, there's no need to
+        * wait for vblank.
+        */
+       if (plane->state->fb == old_fb)
+               goto out;
+
        for (i = 0; i < 2; i++) {
                if (!crtc[i])
                        continue;
 
-               if (plane != crtc[i]->cursor) {
-                       /*
-                        * There's no other way to figure out whether the
-                        * crtc is running.
-                        */
-                       ret = drm_crtc_vblank_get(crtc[i]);
-                       if (ret == 0) {
-                               drm_crtc_wait_one_vblank(crtc[i]);
-                               drm_crtc_vblank_put(crtc[i]);
-                       }
+               if (crtc[i]->cursor == plane)
+                       continue;
+
+               /* There's no other way to figure out whether the crtc is running. */
+               ret = drm_crtc_vblank_get(crtc[i]);
+               if (ret == 0) {
+                       drm_crtc_wait_one_vblank(crtc[i]);
+                       drm_crtc_vblank_put(crtc[i]);
                }
 
                ret = 0;
        }
 
        if (plane_funcs->cleanup_fb && old_fb)
-               plane_funcs->cleanup_fb(plane, old_fb);
+               plane_funcs->cleanup_fb(plane, old_fb, plane_state);
 out:
        if (plane_state) {
                if (plane->funcs->atomic_destroy_state)
index ff9d59c..71f10a3 100644 (file)
@@ -98,7 +98,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
 {
        struct drm_device *dev = connector->dev;
        struct drm_display_mode *mode;
-       struct drm_connector_helper_funcs *connector_funcs =
+       const struct drm_connector_helper_funcs *connector_funcs =
                connector->helper_private;
        int count = 0;
        int mode_flags = 0;
index d9774b4..3fc365e 100644 (file)
@@ -4,6 +4,7 @@ KMOD    = i915
 SRCS = i915_drv.c \
        i915_params.c \
        i915_suspend.c \
+       i915_sysfs.c \
        intel_pm.c \
        intel_runtime_pm.c
 
@@ -16,6 +17,7 @@ SRCS +=       i915_cmd_parser.c \
        i915_gem_execbuffer.c \
        i915_gem_gtt.c \
        i915_gem.c \
+       i915_gem_shrinker.c \
        i915_gem_stolen.c \
        i915_gem_tiling.c \
        i915_gem_userptr.c \
@@ -72,10 +74,11 @@ SRCS += \
        intel_sdvo.c \
        intel_tv.c
 
+# virtual gpu code
+SRCS += i915_vgpu.c
+
 # legacy horrors
-SRCS += \
-       i915_dma.c \
-       i915_ums.c
+SRCS += i915_dma.c
 
 SRCS   += acpi_if.h device_if.h bus_if.h pci_if.h iicbus_if.h iicbb_if.h \
          opt_acpi.h opt_drm.h opt_ktr.h
index 9cb1d0b..26cd3f2 100644 (file)
@@ -826,21 +826,25 @@ static bool valid_reg(const u32 *table, int count, u32 addr)
        return false;
 }
 
-static u32 *vmap_batch(struct drm_i915_gem_object *obj)
+static u32 *vmap_batch(struct drm_i915_gem_object *obj,
+                      unsigned start, unsigned len)
 {
        int i;
        void *addr = NULL;
+       int first_page = start >> PAGE_SHIFT;
+       int last_page = (len + start + 4095) >> PAGE_SHIFT;
+       int npages = last_page - first_page;
        struct vm_page **pages;
 
-       pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages));
+       pages = drm_malloc_ab(npages, sizeof(*pages));
        if (pages == NULL) {
                DRM_DEBUG_DRIVER("Failed to get space for pages\n");
                goto finish;
        }
 
        i = 0;
-       while (i < obj->base.size >> PAGE_SHIFT) {
-               pages[i] = obj->pages[i];
+       while (i < npages) {
+               pages[i] = obj->pages[first_page + i];
                i++;
        }
 
@@ -864,61 +868,61 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
                       u32 batch_start_offset,
                       u32 batch_len)
 {
-       int ret = 0;
        int needs_clflush = 0;
-       u32 *src_base, *dest_base = NULL;
-       u32 *src_addr, *dest_addr;
-       u32 offset = batch_start_offset / sizeof(*dest_addr);
-       u32 end = batch_start_offset + batch_len;
+       char *src_base, *src;
+       void *dst = NULL;
+       int ret;
 
-       if (end > dest_obj->base.size || end > src_obj->base.size)
+       if (batch_len > dest_obj->base.size ||
+           batch_len + batch_start_offset > src_obj->base.size)
                return ERR_PTR(-E2BIG);
 
        ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
        if (ret) {
-               DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
+               DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n");
                return ERR_PTR(ret);
        }
 
-       src_base = vmap_batch(src_obj);
+       src_base = (char *)vmap_batch(src_obj, batch_start_offset, batch_len);
        if (!src_base) {
                DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
                ret = -ENOMEM;
                goto unpin_src;
        }
 
-       src_addr = src_base + offset;
-
-       if (needs_clflush)
-               drm_clflush_virt_range((char *)src_addr, batch_len);
+       ret = i915_gem_object_get_pages(dest_obj);
+       if (ret) {
+               DRM_DEBUG_DRIVER("CMD: Failed to get pages for shadow batch\n");
+               goto unmap_src;
+       }
+       i915_gem_object_pin_pages(dest_obj);
 
        ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
        if (ret) {
-               DRM_DEBUG_DRIVER("CMD: Failed to set batch CPU domain\n");
+               DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n");
                goto unmap_src;
        }
 
-       dest_base = vmap_batch(dest_obj);
-       if (!dest_base) {
+       dst = vmap_batch(dest_obj, 0, batch_len);
+       if (!dst) {
                DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
+               i915_gem_object_unpin_pages(dest_obj);
                ret = -ENOMEM;
                goto unmap_src;
        }
 
-       dest_addr = dest_base + offset;
-
-       if (batch_start_offset != 0)
-               memset((u8 *)dest_base, 0, batch_start_offset);
+       src = src_base + offset_in_page(batch_start_offset);
+       if (needs_clflush)
+               drm_clflush_virt_range(src, batch_len);
 
-       memcpy(dest_addr, src_addr, batch_len);
-       memset((u8 *)dest_addr + batch_len, 0, dest_obj->base.size - end);
+       memcpy(dst, src, batch_len);
 
 unmap_src:
        vunmap(src_base);
 unpin_src:
        i915_gem_object_unpin_pages(src_obj);
 
-       return ret ? ERR_PTR(ret) : dest_base;
+       return ret ? ERR_PTR(ret) : dst;
 }
 
 /**
@@ -1055,34 +1059,26 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
                    u32 batch_len,
                    bool is_master)
 {
-       int ret = 0;
        u32 *cmd, *batch_base, *batch_end;
        struct drm_i915_cmd_descriptor default_desc = { 0 };
        bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
-
-       ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
-       if (ret) {
-               DRM_DEBUG_DRIVER("CMD: Failed to pin shadow batch\n");
-               return -1;
-       }
+       int ret = 0;
 
        batch_base = copy_batch(shadow_batch_obj, batch_obj,
                                batch_start_offset, batch_len);
        if (IS_ERR(batch_base)) {
                DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
-               i915_gem_object_ggtt_unpin(shadow_batch_obj);
                return PTR_ERR(batch_base);
        }
 
-       cmd = batch_base + (batch_start_offset / sizeof(*cmd));
-
        /*
         * We use the batch length as size because the shadow object is as
         * large or larger and copy_batch() will write MI_NOPs to the extra
         * space. Parsing should be faster in some cases this way.
         */
-       batch_end = cmd + (batch_len / sizeof(*batch_end));
+       batch_end = batch_base + (batch_len / sizeof(*batch_end));
 
+       cmd = batch_base;
        while (cmd < batch_end) {
                const struct drm_i915_cmd_descriptor *desc;
                u32 length;
@@ -1141,7 +1137,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
        }
 
        vunmap(batch_base);
-       i915_gem_object_ggtt_unpin(shadow_batch_obj);
+       i915_gem_object_unpin_pages(shadow_batch_obj);
 
        return ret;
 }
index 632f530..60dcd18 100644 (file)
 
 #include <linux/async.h>
 #include <drm/drmP.h>
+#include "intel_drv.h"
 #include <drm/i915_drm.h>
 #include <drm/drm_legacy.h>
 #include "i915_drv.h"
-#include "intel_drv.h"
+#include "i915_vgpu.h"
 #include "intel_ringbuffer.h"
 #include <linux/workqueue.h>
 
@@ -52,6 +53,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_CHIPSET_ID:
                value = dev->pdev->device;
                break;
+       case I915_PARAM_REVISION:
+               value = dev->pdev->revision;
+               break;
        case I915_PARAM_HAS_GEM:
                value = 1;
                break;
@@ -125,6 +129,16 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_HAS_COHERENT_PHYS_GTT:
                value = 1;
                break;
+       case I915_PARAM_SUBSLICE_TOTAL:
+               value = INTEL_INFO(dev)->subslice_total;
+               if (!value)
+                       return -ENODEV;
+               break;
+       case I915_PARAM_EU_TOTAL:
+               value = INTEL_INFO(dev)->eu_total;
+               if (!value)
+                       return -ENODEV;
+               break;
        default:
                DRM_DEBUG("Unknown parameter %d\n", param->param);
                return -EINVAL;
@@ -598,16 +612,128 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
                }
        }
 
+       /* Initialize slice/subslice/EU info */
        if (IS_CHERRYVIEW(dev)) {
-               u32 fuse, mask_eu;
+               u32 fuse, eu_dis;
 
                fuse = I915_READ(CHV_FUSE_GT);
-               mask_eu = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
-                                 CHV_FGT_EU_DIS_SS0_R1_MASK |
-                                 CHV_FGT_EU_DIS_SS1_R0_MASK |
-                                 CHV_FGT_EU_DIS_SS1_R1_MASK);
-               info->eu_total = 16 - hweight32(mask_eu);
+
+               info->slice_total = 1;
+
+               if (!(fuse & CHV_FGT_DISABLE_SS0)) {
+                       info->subslice_per_slice++;
+                       eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
+                                        CHV_FGT_EU_DIS_SS0_R1_MASK);
+                       info->eu_total += 8 - hweight32(eu_dis);
+               }
+
+               if (!(fuse & CHV_FGT_DISABLE_SS1)) {
+                       info->subslice_per_slice++;
+                       eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
+                                       CHV_FGT_EU_DIS_SS1_R1_MASK);
+                       info->eu_total += 8 - hweight32(eu_dis);
+               }
+
+               info->subslice_total = info->subslice_per_slice;
+               /*
+                * CHV expected to always have a uniform distribution of EU
+                * across subslices.
+               */
+               info->eu_per_subslice = info->subslice_total ?
+                                       info->eu_total / info->subslice_total :
+                                       0;
+               /*
+                * CHV supports subslice power gating on devices with more than
+                * one subslice, and supports EU power gating on devices with
+                * more than one EU pair per subslice.
+               */
+               info->has_slice_pg = 0;
+               info->has_subslice_pg = (info->subslice_total > 1);
+               info->has_eu_pg = (info->eu_per_subslice > 2);
+       } else if (IS_SKYLAKE(dev)) {
+               const int s_max = 3, ss_max = 4, eu_max = 8;
+               int s, ss;
+               u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
+
+               fuse2 = I915_READ(GEN8_FUSE2);
+               s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
+                          GEN8_F2_S_ENA_SHIFT;
+               ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
+                            GEN9_F2_SS_DIS_SHIFT;
+
+               eu_disable[0] = I915_READ(GEN8_EU_DISABLE0);
+               eu_disable[1] = I915_READ(GEN8_EU_DISABLE1);
+               eu_disable[2] = I915_READ(GEN8_EU_DISABLE2);
+
+               info->slice_total = hweight32(s_enable);
+               /*
+                * The subslice disable field is global, i.e. it applies
+                * to each of the enabled slices.
+               */
+               info->subslice_per_slice = ss_max - hweight32(ss_disable);
+               info->subslice_total = info->slice_total *
+                                      info->subslice_per_slice;
+
+               /*
+                * Iterate through enabled slices and subslices to
+                * count the total enabled EU.
+               */
+               for (s = 0; s < s_max; s++) {
+                       if (!(s_enable & (0x1 << s)))
+                               /* skip disabled slice */
+                               continue;
+
+                       for (ss = 0; ss < ss_max; ss++) {
+                               u32 n_disabled;
+
+                               if (ss_disable & (0x1 << ss))
+                                       /* skip disabled subslice */
+                                       continue;
+
+                               n_disabled = hweight8(eu_disable[s] >>
+                                                     (ss * eu_max));
+
+                               /*
+                                * Record which subslice(s) has(have) 7 EUs. we
+                                * can tune the hash used to spread work among
+                                * subslices if they are unbalanced.
+                                */
+                               if (eu_max - n_disabled == 7)
+                                       info->subslice_7eu[s] |= 1 << ss;
+
+                               info->eu_total += eu_max - n_disabled;
+                       }
+               }
+
+               /*
+                * SKL is expected to always have a uniform distribution
+                * of EU across subslices with the exception that any one
+                * EU in any one subslice may be fused off for die
+                * recovery.
+               */
+               info->eu_per_subslice = info->subslice_total ?
+                                       DIV_ROUND_UP(info->eu_total,
+                                                    info->subslice_total) : 0;
+               /*
+                * SKL supports slice power gating on devices with more than
+                * one slice, and supports EU power gating on devices with
+                * more than one EU pair per subslice.
+               */
+               info->has_slice_pg = (info->slice_total > 1) ? 1 : 0;
+               info->has_subslice_pg = 0;
+               info->has_eu_pg = (info->eu_per_subslice > 2) ? 1 : 0;
        }
+       DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
+       DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
+       DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
+       DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
+       DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
+       DRM_DEBUG_DRIVER("has slice power gating: %s\n",
+                        info->has_slice_pg ? "y" : "n");
+       DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
+                        info->has_subslice_pg ? "y" : "n");
+       DRM_DEBUG_DRIVER("has EU power gating: %s\n",
+                        info->has_eu_pg ? "y" : "n");
 }
 
 /**
@@ -632,13 +758,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        /* XXX: struct pci_dev */
        info = i915_get_device_id(dev->pdev->device);
 
-       /* Refuse to load on gen6+ without kms enabled. */
-       if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
-               DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
-               DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
-               return -ENODEV;
-       }
-
        dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
        if (dev_priv == NULL)
                return -ENOMEM;
@@ -692,14 +811,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        else
                mmio_size = 2*1024*1024;
 
-#if 0
        dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
        if (!dev_priv->regs) {
                DRM_ERROR("failed to map registers\n");
                ret = -EIO;
                goto put_bridge;
        }
-#else
+#ifdef __DragonFly__
        base = drm_get_resource_start(dev, mmio_bar);
        size = drm_get_resource_len(dev, mmio_bar);
 
@@ -716,20 +834,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (ret)
                goto out_regs;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               /* WARNING: Apparently we must kick fbdev drivers before vgacon,
-                * otherwise the vga fbdev driver falls over. */
-               ret = i915_kick_out_firmware_fb(dev_priv);
-               if (ret) {
-                       DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
-                       goto out_gtt;
-               }
+       /* WARNING: Apparently we must kick fbdev drivers before vgacon,
+        * otherwise the vga fbdev driver falls over. */
+       ret = i915_kick_out_firmware_fb(dev_priv);
+       if (ret) {
+               DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
+               goto out_gtt;
+       }
 
-               ret = i915_kick_out_vgacon(dev_priv);
-               if (ret) {
-                       DRM_ERROR("failed to remove conflicting VGA console\n");
-                       goto out_gtt;
-               }
+       ret = i915_kick_out_vgacon(dev_priv);
+       if (ret) {
+               DRM_ERROR("failed to remove conflicting VGA console\n");
+               goto out_gtt;
        }
 
 #if 0
@@ -837,17 +953,20 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        intel_power_domains_init(dev_priv);
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               ret = i915_load_modeset_init(dev);
-               if (ret < 0) {
-                       DRM_ERROR("failed to init modeset\n");
-                       goto out_power_well;
-               }
+       ret = i915_load_modeset_init(dev);
+       if (ret < 0) {
+               DRM_ERROR("failed to init modeset\n");
+               goto out_power_well;
        }
 
-#if 0
+       /*
+        * Notify a valid surface after modesetting,
+        * when running inside a VM.
+        */
+       if (intel_vgpu_active(dev))
+               I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
+
        i915_setup_sysfs(dev);
-#endif
 
        if (INTEL_INFO(dev)->num_pipes) {
                /* Must be done after probing outputs */
@@ -888,6 +1007,11 @@ out_gtt:
        i915_global_gtt_cleanup(dev);
 out_regs:
        intel_uncore_fini(dev);
+#if 0
+       pci_iounmap(dev->pdev, dev_priv->regs);
+#endif
+put_bridge:
+       pci_dev_put(dev_priv->bridge_dev);
 free_priv:
        kfree(dev_priv);
        return ret;
@@ -910,9 +1034,9 @@ int i915_driver_unload(struct drm_device *dev)
 
        intel_gpu_ips_teardown();
 
-#if 0
        i915_teardown_sysfs(dev);
 
+#if 0
        WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
        unregister_shrinker(&dev_priv->mm.shrinker);
 
@@ -924,54 +1048,53 @@ int i915_driver_unload(struct drm_device *dev)
        acpi_video_unregister();
 #endif
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               intel_fbdev_fini(dev);
+       intel_fbdev_fini(dev);
 
        drm_vblank_cleanup(dev);
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               intel_modeset_cleanup(dev);
-
-               /*
-                * free the memory space allocated for the child device
-                * config parsed from VBT
-                */
-               if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
-                       kfree(dev_priv->vbt.child_dev);
-                       dev_priv->vbt.child_dev = NULL;
-                       dev_priv->vbt.child_dev_num = 0;
-               }
+       intel_modeset_cleanup(dev);
 
+       /*
+        * free the memory space allocated for the child device
+        * config parsed from VBT
+        */
+       if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
+               kfree(dev_priv->vbt.child_dev);
+               dev_priv->vbt.child_dev = NULL;
+               dev_priv->vbt.child_dev_num = 0;
        }
 
+#if 0
+       vga_switcheroo_unregister_client(dev->pdev);
+       vga_client_register(dev->pdev, NULL, NULL, NULL);
+#endif
+
        /* Free error state after interrupts are fully disabled. */
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
 #if 0
        i915_destroy_error_state(dev);
+
+       if (dev->pdev->msi_enabled)
+               pci_disable_msi(dev->pdev);
 #endif
 
        intel_opregion_fini(dev);
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               /* Flush any outstanding unpin_work. */
-               flush_workqueue(dev_priv->wq);
+       /* Flush any outstanding unpin_work. */
+       flush_workqueue(dev_priv->wq);
 
-               mutex_lock(&dev->struct_mutex);
-               i915_gem_cleanup_ringbuffer(dev);
-               i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
-               i915_gem_context_fini(dev);
-               mutex_unlock(&dev->struct_mutex);
+       mutex_lock(&dev->struct_mutex);
+       i915_gem_cleanup_ringbuffer(dev);
+       i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool);
+       i915_gem_context_fini(dev);
+       mutex_unlock(&dev->struct_mutex);
 #if 0
-               i915_gem_cleanup_stolen(dev);
+       i915_gem_cleanup_stolen(dev);
 #endif
-       }
 
        intel_teardown_gmbus(dev);
        intel_teardown_mchbar(dev);
 
-       bus_generic_detach(dev->dev);
-       drm_legacy_rmmap(dev, dev_priv->mmio_map);
-
        destroy_workqueue(dev_priv->dp_wq);
        destroy_workqueue(dev_priv->wq);
        destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
@@ -983,6 +1106,9 @@ int i915_driver_unload(struct drm_device *dev)
 #if 0
        if (dev_priv->regs != NULL)
                pci_iounmap(dev->pdev, dev_priv->regs);
+
+       if (dev_priv->slab)
+               kmem_cache_destroy(dev_priv->slab);
 #endif
 
        pci_dev_put(dev_priv->bridge_dev);
@@ -1029,8 +1155,7 @@ void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
        i915_gem_release(dev, file);
        mutex_unlock(&dev->struct_mutex);
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
-               intel_modeset_preclose(dev, file);
+       intel_modeset_preclose(dev, file);
 }
 
 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
@@ -1093,7 +1218,7 @@ const struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
-       DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+       DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
        DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
index 5833090..1e2721a 100644 (file)
@@ -342,7 +342,6 @@ static const struct intel_device_info intel_broadwell_gt3m_info = {
 };
 
 static const struct intel_device_info intel_cherryview_info = {
-       .is_preliminary = 1,
        .gen = 8, .num_pipes = 3,
        .need_gfx_hws = 1, .has_hotplug = 1,
        .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
@@ -365,6 +364,19 @@ static const struct intel_device_info intel_skylake_info = {
        IVB_CURSOR_OFFSETS,
 };
 
+static const struct intel_device_info intel_skylake_gt3_info = {
+       .is_preliminary = 1,
+       .is_skylake = 1,
+       .gen = 9, .num_pipes = 3,
+       .need_gfx_hws = 1, .has_hotplug = 1,
+       .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+       .has_llc = 1,
+       .has_ddi = 1,
+       .has_fbc = 1,
+       GEN_DEFAULT_PIPEOFFSETS,
+       IVB_CURSOR_OFFSETS,
+};
+
 /*
  * Make sure any device matches here are from most specific to most
  * general.  For example, since the Quanta match is based on the subsystem
@@ -401,7 +413,9 @@ static const struct intel_device_info intel_skylake_info = {
        INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
        INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
        INTEL_CHV_IDS(&intel_cherryview_info),  \
-       INTEL_SKL_IDS(&intel_skylake_info)
+       INTEL_SKL_GT1_IDS(&intel_skylake_info), \
+       INTEL_SKL_GT2_IDS(&intel_skylake_info), \
+       INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info)      \
 
 static const struct pci_device_id pciidlist[] = {              /* aka */
        INTEL_PCI_IDS,
@@ -556,6 +570,7 @@ static int i915_drm_suspend(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc *crtc;
        pci_power_t opregion_target_state;
+       int error;
 
        /* ignore lid events during suspend */
        mutex_lock(&dev_priv->modeset_restore_lock);
@@ -572,39 +587,34 @@ static int i915_drm_suspend(struct drm_device *dev)
        pci_save_state(dev->pdev);
 #endif
 
-       /* If KMS is active, we do the leavevt stuff here */
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               int error;
-
-               error = i915_gem_suspend(dev);
-               if (error) {
-                       dev_err(dev->pdev->dev,
-                               "GEM idle failed, resume might fail\n");
-                       return error;
-               }
+       error = i915_gem_suspend(dev);
+       if (error) {
+               dev_err(dev->pdev->dev,
+                       "GEM idle failed, resume might fail\n");
+               return error;
+       }
 
-               intel_suspend_gt_powersave(dev);
+       intel_suspend_gt_powersave(dev);
 
-               /*
-                * Disable CRTCs directly since we want to preserve sw state
-                * for _thaw. Also, power gate the CRTC power wells.
-                */
-               drm_modeset_lock_all(dev);
-               for_each_crtc(dev, crtc)
-                       intel_crtc_control(crtc, false);
-               drm_modeset_unlock_all(dev);
+       /*
+        * Disable CRTCs directly since we want to preserve sw state
+        * for _thaw. Also, power gate the CRTC power wells.
+        */
+       drm_modeset_lock_all(dev);
+       for_each_crtc(dev, crtc)
+               intel_crtc_control(crtc, false);
+       drm_modeset_unlock_all(dev);
 
 #if 0
-               intel_dp_mst_suspend(dev);
+       intel_dp_mst_suspend(dev);
 #endif
 
-               intel_runtime_pm_disable_interrupts(dev_priv);
-               intel_hpd_cancel_work(dev_priv);
+       intel_runtime_pm_disable_interrupts(dev_priv);
+       intel_hpd_cancel_work(dev_priv);
 
-               intel_suspend_encoders(dev_priv);
+       intel_suspend_encoders(dev_priv);
 
-               intel_suspend_hw(dev);
-       }
+       intel_suspend_hw(dev);
 
        i915_gem_suspend_gtt_mappings(dev);
 
@@ -617,13 +627,11 @@ static int i915_drm_suspend(struct drm_device *dev)
 #endif
        intel_opregion_notify_adapter(dev, opregion_target_state);
 
-#if 0
        intel_uncore_forcewake_reset(dev, false);
-#endif
        intel_opregion_fini(dev);
 
 #if 0
-       intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
+       intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
 #endif
 
        dev_priv->suspend_count++;
@@ -695,53 +703,55 @@ static int i915_drm_resume(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               mutex_lock(&dev->struct_mutex);
-               i915_gem_restore_gtt_mappings(dev);
-               mutex_unlock(&dev->struct_mutex);
-       }
+       mutex_lock(&dev->struct_mutex);
+       i915_gem_restore_gtt_mappings(dev);
+       mutex_unlock(&dev->struct_mutex);
 
        i915_restore_state(dev);
        intel_opregion_setup(dev);
 
-       /* KMS EnterVT equivalent */
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               intel_init_pch_refclk(dev);
-               drm_mode_config_reset(dev);
+       intel_init_pch_refclk(dev);
+       drm_mode_config_reset(dev);
 
-               mutex_lock(&dev->struct_mutex);
-               if (i915_gem_init_hw(dev)) {
-                       DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
-                       atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
-               }
-               mutex_unlock(&dev->struct_mutex);
+       /*
+        * Interrupts have to be enabled before any batches are run. If not the
+        * GPU will hang. i915_gem_init_hw() will initiate batches to
+        * update/restore the context.
+        *
+        * Modeset enabling in intel_modeset_init_hw() also needs working
+        * interrupts.
+        */
+       intel_runtime_pm_enable_interrupts(dev_priv);
 
-               /* We need working interrupts for modeset enabling ... */
-               intel_runtime_pm_enable_interrupts(dev_priv);
+       mutex_lock(&dev->struct_mutex);
+       if (i915_gem_init_hw(dev)) {
+               DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
+               atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
+       }
+       mutex_unlock(&dev->struct_mutex);
 
-               intel_modeset_init_hw(dev);
+       intel_modeset_init_hw(dev);
 
-               lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
-               if (dev_priv->display.hpd_irq_setup)
-                       dev_priv->display.hpd_irq_setup(dev);
-               lockmgr(&dev_priv->irq_lock, LK_RELEASE);
+       lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
+       if (dev_priv->display.hpd_irq_setup)
+               dev_priv->display.hpd_irq_setup(dev);
+       lockmgr(&dev_priv->irq_lock, LK_RELEASE);
 
-               drm_modeset_lock_all(dev);
-               intel_modeset_setup_hw_state(dev, true);
-               drm_modeset_unlock_all(dev);
+       drm_modeset_lock_all(dev);
+       intel_modeset_setup_hw_state(dev, true);
+       drm_modeset_unlock_all(dev);
 
-               intel_dp_mst_resume(dev);
+       intel_dp_mst_resume(dev);
 
-               /*
-                * ... but also need to make sure that hotplug processing
-                * doesn't cause havoc. Like in the driver load code we don't
-                * bother with the tiny race here where we might loose hotplug
-                * notifications.
-                * */
-               intel_hpd_init(dev_priv);
-               /* Config may have changed between suspend and resume */
-               drm_helper_hpd_irq_event(dev);
-       }
+       /*
+        * ... but also need to make sure that hotplug processing
+        * doesn't cause havoc. Like in the driver load code we don't
+        * bother with the tiny race here where we might loose hotplug
+        * notifications.
+        * */
+       intel_hpd_init(dev_priv);
+       /* Config may have changed between suspend and resume */
+       drm_helper_hpd_irq_event(dev);
 
        intel_opregion_init(dev);
 
@@ -903,38 +913,29 @@ int i915_reset(struct drm_device *dev)
         * was running at the time of the reset (i.e. we weren't VT
         * switched away).
         */
-       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-               /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
-               dev_priv->gpu_error.reload_in_reset = true;
 
-               ret = i915_gem_init_hw(dev);
+       /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
+       dev_priv->gpu_error.reload_in_reset = true;
 
-               dev_priv->gpu_error.reload_in_reset = false;
+       ret = i915_gem_init_hw(dev);
 
-               mutex_unlock(&dev->struct_mutex);
-               if (ret) {
-                       DRM_ERROR("Failed hw init on reset %d\n", ret);
-                       return ret;
-               }
-
-               /*
-                * FIXME: This races pretty badly against concurrent holders of
-                * ring interrupts. This is possible since we've started to drop
-                * dev->struct_mutex in select places when waiting for the gpu.
-                */
+       dev_priv->gpu_error.reload_in_reset = false;
 
-               /*
-                * rps/rc6 re-init is necessary to restore state lost after the
-                * reset and the re-install of gt irqs. Skip for ironlake per
-                * previous concerns that it doesn't respond well to some forms
-                * of re-init after reset.
-                */
-               if (INTEL_INFO(dev)->gen > 5)
-                       intel_enable_gt_powersave(dev);
-       } else {
-               mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev->struct_mutex);
+       if (ret) {
+               DRM_ERROR("Failed hw init on reset %d\n", ret);
+               return ret;
        }
 
+       /*
+        * rps/rc6 re-init is necessary to restore state lost after the
+        * reset and the re-install of gt irqs. Skip for ironlake per
+        * previous concerns that it doesn't respond well to some forms
+        * of re-init after reset.
+        */
+       if (INTEL_INFO(dev)->gen > 5)
+               intel_enable_gt_powersave(dev);
+
        return 0;
 }
 
@@ -1086,7 +1087,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
                s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
 
        s->media_max_req_count  = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
-       s->gfx_max_req_count    = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
+       s->gfx_max_req_count    = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
 
        s->render_hwsp          = I915_READ(RENDER_HWS_PGA_GEN7);
        s->ecochk               = I915_READ(GAM_ECOCHK);
@@ -1168,7 +1169,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
                I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
 
        I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
-       I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
+       I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
 
        I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
        I915_WRITE(GAM_ECOCHK,          s->ecochk);
@@ -1717,11 +1718,9 @@ static int __init i915_init(void)
 
        if (!(driver.driver_features & DRIVER_MODESET)) {
                driver.get_vblank_timestamp = NULL;
-#ifndef CONFIG_DRM_I915_UMS
                /* Silently fail loading to not upset userspace. */
                DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
                return 0;
-#endif
        }
 
        /*
@@ -1739,6 +1738,16 @@ static int __init i915_init(void)
 #endif
 }
 
+#if 0
+static void __exit i915_exit(void)
+{
+       if (!(driver.driver_features & DRIVER_MODESET))
+               return; /* Never loaded a driver. */
+
+       drm_pci_exit(&driver, &i915_pci_driver);
+}
+#endif
+
 DRIVER_MODULE_ORDERED(i915, vgapci, i915_driver, drm_devclass, NULL, NULL, SI_ORDER_ANY);
 MODULE_DEPEND(i915, drm, 1, 1, 1);
 MODULE_DEPEND(i915, iicbus, 1, 1, 1);
index e65243c..65009e8 100644 (file)
@@ -31,6 +31,7 @@
 #define _I915_DRV_H_
 
 #include <uapi_drm/i915_drm.h>
+#include <uapi_drm/drm_fourcc.h>
 
 #include "i915_reg.h"
 #include "intel_bios.h"
@@ -48,7 +49,6 @@
 #include <linux/kref.h>
 #include <linux/kconfig.h>
 #include <linux/pm_qos.h>
-#include <linux/seq_file.h>
 #include <linux/delay.h>
 
 #define CONFIG_DRM_I915_FBDEV                  1
@@ -62,7 +62,7 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20150130"
+#define DRIVER_DATE            "20150327"
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -76,6 +76,9 @@
 #define WARN_ON(x) WARN((x), "WARN_ON(" #x ")")
 #endif
 
+#undef WARN_ON_ONCE
+#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(" #x ")")
+
 #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
                             (long) (x), __func__);
 
@@ -229,9 +232,14 @@ enum hpd_pin {
 
 #define for_each_pipe(__dev_priv, __p) \
        for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
-#define for_each_plane(pipe, p) \
-       for ((p) = 0; (p) < INTEL_INFO(dev)->num_sprites[(pipe)] + 1; (p)++)
-#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++)
+#define for_each_plane(__dev_priv, __pipe, __p)                                \
+       for ((__p) = 0;                                                 \
+            (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
+            (__p)++)
+#define for_each_sprite(__dev_priv, __p, __s)                          \
+       for ((__s) = 0;                                                 \
+            (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)];        \
+            (__s)++)
 
 #define for_each_crtc(dev, crtc) \
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
@@ -244,6 +252,12 @@ enum hpd_pin {
                            &(dev)->mode_config.encoder_list,   \
                            base.head)
 
+#define for_each_intel_connector(dev, intel_connector)         \
+       list_for_each_entry(intel_connector,                    \
+                           &dev->mode_config.connector_list,   \
+                           base.head)
+
+
 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
        list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
                if ((intel_encoder)->base.crtc == (__crtc))
@@ -257,6 +271,7 @@ enum hpd_pin {
                if ((1 << (domain)) & (mask))
 
 struct drm_i915_private;
+struct i915_mm_struct;
 struct i915_mmu_object;
 
 enum intel_dpll_id {
@@ -378,10 +393,6 @@ struct intel_opregion {
 struct intel_overlay;
 struct intel_overlay_error_state;
 
-struct drm_i915_master_private {
-       struct drm_local_map *sarea;
-       struct _drm_i915_sarea *sarea_priv;
-};
 #define I915_FENCE_REG_NONE -1
 #define I915_MAX_NUM_FENCES 32
 /* 32 fences + sign bit for FENCE_REG_NONE */
@@ -422,6 +433,8 @@ struct drm_i915_error_state {
        u32 forcewake;
        u32 error; /* gen6+ */
        u32 err_int; /* gen7 */
+       u32 fault_data0; /* gen8, gen9 */
+       u32 fault_data1; /* gen8, gen9 */
        u32 done_reg;
        u32 gac_eco;
        u32 gam_ecochk;
@@ -539,7 +552,7 @@ struct drm_i915_display_funcs {
         * Returns true on success, false on failure.
         */
        bool (*find_dpll)(const struct intel_limit *limit,
-                         struct intel_crtc *crtc,
+                         struct intel_crtc_state *crtc_state,
                          int target, int refclk,
                          struct dpll *match_clock,
                          struct dpll *best_clock);
@@ -548,7 +561,7 @@ struct drm_i915_display_funcs {
                                 struct drm_crtc *crtc,
                                 uint32_t sprite_width, uint32_t sprite_height,
                                 int pixel_size, bool enable, bool scaled);
-       void (*modeset_global_resources)(struct drm_device *dev);
+       void (*modeset_global_resources)(struct drm_atomic_state *state);
        /* Returns the active state of the crtc, and if the crtc is active,
         * fills out the pipe-config with the hw state. */
        bool (*get_pipe_config)(struct intel_crtc *,
@@ -702,7 +715,18 @@ struct intel_device_info {
        int trans_offsets[I915_MAX_TRANSCODERS];
        int palette_offsets[I915_MAX_PIPES];
        int cursor_offsets[I915_MAX_PIPES];
-       unsigned int eu_total;
+
+       /* Slice/subslice/EU info */
+       u8 slice_total;
+       u8 subslice_total;
+       u8 subslice_per_slice;
+       u8 eu_total;
+       u8 eu_per_subslice;
+       /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
+       u8 subslice_7eu[3];
+       u8 has_slice_pg:1;
+       u8 has_subslice_pg:1;
+       u8 has_eu_pg:1;
 };
 
 #undef DEFINE_FLAG
@@ -781,11 +805,20 @@ struct intel_context {
        struct list_head link;
 };
 
+enum fb_op_origin {
+       ORIGIN_GTT,
+       ORIGIN_CPU,
+       ORIGIN_CS,
+       ORIGIN_FLIP,
+};
+
 struct i915_fbc {
-       unsigned long size;
+       unsigned long uncompressed_size;
        unsigned threshold;
        unsigned int fb_id;
-       enum plane plane;
+       unsigned int possible_framebuffer_bits;
+       unsigned int busy_bits;
+       struct intel_crtc *crtc;
        int y;
 
        struct drm_mm_node compressed_fb;
@@ -797,14 +830,6 @@ struct i915_fbc {
         * possible. */
        bool enabled;
 
-       /* On gen8 some rings cannont perform fbc clean operation so for now
-        * we are doing this on SW with mmio.
-        * This variable works in the opposite information direction
-        * of ring->fbc_dirty telling software on frontbuffer tracking
-        * to perform the cache clean on sw side.
-        */
-       bool need_sw_cache_clean;
-
        struct intel_fbc_work {
                struct delayed_work work;
                struct drm_crtc *crtc;
@@ -842,7 +867,7 @@ enum drrs_support_type {
        STATIC_DRRS_SUPPORT = 1,
        SEAMLESS_DRRS_SUPPORT = 2
 };
+
 struct intel_dp;
 struct i915_drrs {
        struct lock mutex;
@@ -889,6 +914,7 @@ struct intel_fbdev;
 struct intel_fbc_work;
 
 struct intel_gmbus {
+       struct i2c_adapter adapter;
        u32 force_bit;
        u32 reg0;
        u32 gpio_reg;
@@ -905,150 +931,21 @@ struct intel_iic_softc {
 };
 
 struct i915_suspend_saved_registers {
-       u8 saveLBB;
-       u32 saveDSPACNTR;
-       u32 saveDSPBCNTR;
        u32 saveDSPARB;
-       u32 savePIPEACONF;
-       u32 savePIPEBCONF;
-       u32 savePIPEASRC;
-       u32 savePIPEBSRC;
-       u32 saveFPA0;
-       u32 saveFPA1;
-       u32 saveDPLL_A;
-       u32 saveDPLL_A_MD;
-       u32 saveHTOTAL_A;
-       u32 saveHBLANK_A;
-       u32 saveHSYNC_A;
-       u32 saveVTOTAL_A;
-       u32 saveVBLANK_A;
-       u32 saveVSYNC_A;
-       u32 saveBCLRPAT_A;
-       u32 saveTRANSACONF;
-       u32 saveTRANS_HTOTAL_A;
-       u32 saveTRANS_HBLANK_A;
-       u32 saveTRANS_HSYNC_A;
-       u32 saveTRANS_VTOTAL_A;
-       u32 saveTRANS_VBLANK_A;
-       u32 saveTRANS_VSYNC_A;
-       u32 savePIPEASTAT;
-       u32 saveDSPASTRIDE;
-       u32 saveDSPASIZE;
-       u32 saveDSPAPOS;
-       u32 saveDSPAADDR;
-       u32 saveDSPASURF;
-       u32 saveDSPATILEOFF;
-       u32 savePFIT_PGM_RATIOS;
-       u32 saveBLC_HIST_CTL;
-       u32 saveBLC_PWM_CTL;
-       u32 saveBLC_PWM_CTL2;
-       u32 saveBLC_CPU_PWM_CTL;
-       u32 saveBLC_CPU_PWM_CTL2;
-       u32 saveFPB0;
-       u32 saveFPB1;
-       u32 saveDPLL_B;
-       u32 saveDPLL_B_MD;
-       u32 saveHTOTAL_B;
-       u32 saveHBLANK_B;
-       u32 saveHSYNC_B;
-       u32 saveVTOTAL_B;
-       u32 saveVBLANK_B;
-       u32 saveVSYNC_B;
-       u32 saveBCLRPAT_B;
-       u32 saveTRANSBCONF;
-       u32 saveTRANS_HTOTAL_B;
-       u32 saveTRANS_HBLANK_B;
-       u32 saveTRANS_HSYNC_B;
-       u32 saveTRANS_VTOTAL_B;
-       u32 saveTRANS_VBLANK_B;
-       u32 saveTRANS_VSYNC_B;
-       u32 savePIPEBSTAT;
-       u32 saveDSPBSTRIDE;
-       u32 saveDSPBSIZE;
-       u32 saveDSPBPOS;
-       u32 saveDSPBADDR;
-       u32 saveDSPBSURF;
-       u32 saveDSPBTILEOFF;
-       u32 saveVGA0;
-       u32 saveVGA1;
-       u32 saveVGA_PD;
-       u32 saveVGACNTRL;
-       u32 saveADPA;
        u32 saveLVDS;
        u32 savePP_ON_DELAYS;
        u32 savePP_OFF_DELAYS;
-       u32 saveDVOA;
-       u32 saveDVOB;
-       u32 saveDVOC;
        u32 savePP_ON;
        u32 savePP_OFF;
        u32 savePP_CONTROL;
        u32 savePP_DIVISOR;
-       u32 savePFIT_CONTROL;
-       u32 save_palette_a[256];
-       u32 save_palette_b[256];
        u32 saveFBC_CONTROL;
-       u32 saveIER;
-       u32 saveIIR;
-       u32 saveIMR;
-       u32 saveDEIER;
-       u32 saveDEIMR;
-       u32 saveGTIER;
-       u32 saveGTIMR;
-       u32 saveFDI_RXA_IMR;
-       u32 saveFDI_RXB_IMR;
        u32 saveCACHE_MODE_0;
        u32 saveMI_ARB_STATE;
        u32 saveSWF0[16];
        u32 saveSWF1[16];
        u32 saveSWF2[3];
-       u8 saveMSR;
-       u8 saveSR[8];
-       u8 saveGR[25];
-       u8 saveAR_INDEX;
-       u8 saveAR[21];
-       u8 saveDACMASK;
-       u8 saveCR[37];
        uint64_t saveFENCE[I915_MAX_NUM_FENCES];
-       u32 saveCURACNTR;
-       u32 saveCURAPOS;
-       u32 saveCURABASE;
-       u32 saveCURBCNTR;
-       u32 saveCURBPOS;
-       u32 saveCURBBASE;
-       u32 saveCURSIZE;
-       u32 saveDP_B;
-       u32 saveDP_C;
-       u32 saveDP_D;
-       u32 savePIPEA_GMCH_DATA_M;
-       u32 savePIPEB_GMCH_DATA_M;
-       u32 savePIPEA_GMCH_DATA_N;
-       u32 savePIPEB_GMCH_DATA_N;
-       u32 savePIPEA_DP_LINK_M;
-       u32 savePIPEB_DP_LINK_M;
-       u32 savePIPEA_DP_LINK_N;
-       u32 savePIPEB_DP_LINK_N;
-       u32 saveFDI_RXA_CTL;
-       u32 saveFDI_TXA_CTL;
-       u32 saveFDI_RXB_CTL;
-       u32 saveFDI_TXB_CTL;
-       u32 savePFA_CTL_1;
-       u32 savePFB_CTL_1;
-       u32 savePFA_WIN_SZ;
-       u32 savePFB_WIN_SZ;
-       u32 savePFA_WIN_POS;
-       u32 savePFB_WIN_POS;
-       u32 savePCH_DREF_CONTROL;
-       u32 saveDISP_ARB_CTL;
-       u32 savePIPEA_DATA_M1;
-       u32 savePIPEA_DATA_N1;
-       u32 savePIPEA_LINK_M1;
-       u32 savePIPEA_LINK_N1;
-       u32 savePIPEB_DATA_M1;
-       u32 savePIPEB_DATA_N1;
-       u32 savePIPEB_LINK_M1;
-       u32 savePIPEB_LINK_N1;
-       u32 saveMCHBAR_RENDER_STANDBY;
        u32 savePCH_PORT_HOTPLUG;
        u16 saveGCDGMBUS;
 };
@@ -1145,13 +1042,12 @@ struct intel_gen6_power_mgmt {
        u8 max_freq_softlimit;  /* Max frequency permitted by the driver */
        u8 max_freq;            /* Maximum frequency, RP0 if not overclocking */
        u8 min_freq;            /* AKA RPn. Minimum frequency */
+       u8 idle_freq;           /* Frequency to request when we are idle */
        u8 efficient_freq;      /* AKA RPe. Pre-determined balanced frequency */
        u8 rp1_freq;            /* "less than" RP0 power/freqency */
        u8 rp0_freq;            /* Non-overclocked max frequency. */
        u32 cz_freq;
 
-       u32 ei_interrupt_count;
-
        int last_adj;
        enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
 
@@ -1188,9 +1084,6 @@ struct intel_ilk_power_mgmt {
 
        int c_m;
        int r_t;
-
-       struct drm_i915_gem_object *pwrctx;
-       struct drm_i915_gem_object *renderctx;
 };
 
 struct drm_i915_private;
@@ -1288,7 +1181,10 @@ struct i915_gem_mm {
        /** PPGTT used for aliasing the PPGTT with the GTT */
        struct i915_hw_ppgtt *aliasing_ppgtt;
 
-       eventhandler_tag inactive_shrinker;
+       struct notifier_block oom_notifier;
+#if 0
+       struct shrinker shrinker;
+#endif
        bool shrinker_no_lock_stealing;
 
        /** LRU list of objects with fence regs on them. */
@@ -1471,6 +1367,7 @@ struct intel_vbt_data {
        bool edp_initialized;
        bool edp_support;
        int edp_bpp;
+       bool edp_low_vswing;
        struct edp_power_seq edp_pps;
 
        struct {
@@ -1531,6 +1428,25 @@ struct ilk_wm_values {
        enum intel_ddb_partitioning partitioning;
 };
 
+struct vlv_wm_values {
+       struct {
+               uint16_t primary;
+               uint16_t sprite[2];
+               uint8_t cursor;
+       } pipe[3];
+
+       struct {
+               uint16_t plane;
+               uint8_t cursor;
+       } sr;
+
+       struct {
+               uint8_t cursor;
+               uint8_t sprite[2];
+               uint8_t primary;
+       } ddl[3];
+};
+
 struct skl_ddb_entry {
        uint16_t start, end;    /* in number of blocks, 'end' is exclusive */
 };
@@ -1657,6 +1573,10 @@ struct i915_workarounds {
        u32 count;
 };
 
+struct i915_virtual_gpu {
+       bool active;
+};
+
 struct drm_i915_private {
        struct drm_device *dev;
        struct kmem_cache *slab;
@@ -1675,6 +1595,8 @@ struct drm_i915_private {
 
        struct intel_uncore uncore;
 
+       struct i915_virtual_gpu vgpu;
+
        device_t *gmbus;
 
 
@@ -1698,7 +1620,7 @@ struct drm_i915_private {
        struct drm_i915_gem_object *semaphore_obj;
        uint32_t last_seqno, next_seqno;
 
-       drm_dma_handle_t *status_page_dmah;
+       struct drm_dma_handle *status_page_dmah;
        struct resource *mch_res;
        int mch_res_rid;
 
@@ -1791,9 +1713,8 @@ struct drm_i915_private {
        struct i915_gtt gtt; /* VM representing the global address space */
 
        struct i915_gem_mm mm;
-#if defined(CONFIG_MMU_NOTIFIER)
-       DECLARE_HASHTABLE(mmu_notifiers, 7);
-#endif
+       DECLARE_HASHTABLE(mm_structs, 7);
+       struct lock mm_lock;
 
        /* Kernel Modesetting */
 
@@ -1896,6 +1817,7 @@ struct drm_i915_private {
                union {
                        struct ilk_wm_values hw;
                        struct skl_wm_values skl_hw;
+                       struct vlv_wm_values vlv;
                };
        } wm;
 
@@ -2088,7 +2010,7 @@ struct drm_i915_gem_object {
 
        unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
 
-       vm_page_t *pages;
+       struct vm_page **pages;
        int pages_pin_count;
 
        /* prime dma-buf support */
@@ -2120,8 +2042,8 @@ struct drm_i915_gem_object {
                        unsigned workers :4;
 #define I915_GEM_USERPTR_MAX_WORKERS 15
 
-                       struct mm_struct *mm;
-                       struct i915_mmu_object *mn;
+                       struct i915_mm_struct *mm;
+                       struct i915_mmu_object *mmu_object;
                        struct work_struct *work;
                } userptr;
        };
@@ -2169,7 +2091,7 @@ struct drm_i915_gem_request {
        u32 tail;
 
        /**
-        * Context related to this request
+        * Context and ring buffer related to this request
         * Contexts are refcounted, so when this request is associated with a
         * context, we must increment the context's refcount, to guarantee that
         * it persists while any request is linked to it. Requests themselves
@@ -2179,6 +2101,7 @@ struct drm_i915_gem_request {
         * context.
         */
        struct intel_context *ctx;
+       struct intel_ringbuffer *ringbuf;
 
        /** Batch buffer related to this request if any */
        struct drm_i915_gem_object *batch_obj;
@@ -2193,6 +2116,9 @@ struct drm_i915_gem_request {
        /** file_priv list entry for this request */
        struct list_head client_list;
 
+       /** process identifier submitting this request */
+       pid_t pid;
+
        uint32_t uniq;
 
        /**
@@ -2239,6 +2165,7 @@ i915_gem_request_reference(struct drm_i915_gem_request *req)
 static inline void
 i915_gem_request_unreference(struct drm_i915_gem_request *req)
 {
+       WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
        kref_put(&req->ref, i915_gem_request_free);
 }
 
@@ -2374,9 +2301,9 @@ struct drm_i915_cmd_table {
                __p = to_i915((const struct drm_device *)p); \
        __p; \
 })
-
 #define INTEL_INFO(p)  (&__I915__(p)->info)
 #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
+#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
 
 #define IS_I830(dev)           (INTEL_DEVID(dev) == 0x3577)
 #define IS_845G(dev)           (INTEL_DEVID(dev) == 0x2562)
@@ -2399,9 +2326,6 @@ struct drm_i915_cmd_table {
 #define IS_IVB_GT1(dev)                (INTEL_DEVID(dev) == 0x0156 || \
                                 INTEL_DEVID(dev) == 0x0152 || \
                                 INTEL_DEVID(dev) == 0x015a)
-#define IS_SNB_GT1(dev)                (INTEL_DEVID(dev) == 0x0102 || \
-                                INTEL_DEVID(dev) == 0x0106 || \
-                                INTEL_DEVID(dev) == 0x010A)
 #define IS_VALLEYVIEW(dev)     (INTEL_INFO(dev)->is_valleyview)
 #define IS_CHERRYVIEW(dev)     (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
 #define IS_HASWELL(dev)        (INTEL_INFO(dev)->is_haswell)
@@ -2425,6 +2349,12 @@ struct drm_i915_cmd_table {
                                 INTEL_DEVID(dev) == 0x0A1E)
 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
 
+#define SKL_REVID_A0           (0x0)
+#define SKL_REVID_B0           (0x1)
+#define SKL_REVID_C0           (0x2)
+#define SKL_REVID_D0           (0x3)
+#define SKL_REVID_E0           (0x4)
+
 /*
  * The genX designation typically refers to the render engine, so render
  * capability related checks should use IS_GEN, while display and other checks
@@ -2524,6 +2454,7 @@ struct drm_i915_cmd_table {
 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
 
 #define GT_FREQUENCY_MULTIPLIER 50
+#define GEN9_FREQ_SCALER 3
 
 #include "i915_trace.h"
 
@@ -2532,14 +2463,11 @@ extern int i915_max_ioctl;
 
 extern int i915_suspend_legacy(device_t kdev);
 extern int i915_resume_legacy(struct drm_device *dev);
-extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
-extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
 
 /* i915_params.c */
 struct i915_params {
        int modeset;
        int panel_ignore_lid;
-       unsigned int powersave;
        int semaphores;
        unsigned int lvds_downclock;
        int lvds_channel_mode;
@@ -2559,11 +2487,12 @@ struct i915_params {
        bool enable_hangcheck;
        bool fastboot;
        bool prefault_disable;
-       int reset;
+       bool load_detect_test;
+       int  reset;
        bool disable_display;
        bool disable_vtd_wa;
        int use_mmio_flip;
-       bool mmio_debug;
+       int mmio_debug;
        bool verbose_state_checks;
        bool nuclear_pageflip;
 };
@@ -2616,6 +2545,10 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
 void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
                                enum forcewake_domains domains);
 void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
+static inline bool intel_vgpu_active(struct drm_device *dev)
+{
+       return to_i915(dev)->vgpu.active;
+}
 
 void
 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
@@ -2694,12 +2627,6 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
-unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
-                             long target,
-                             unsigned flags);
-#define I915_SHRINK_PURGEABLE 0x1
-#define I915_SHRINK_UNBOUND 0x2
-#define I915_SHRINK_BOUND 0x4
 void *i915_gem_object_alloc(struct drm_device *dev);
 void i915_gem_object_free(struct drm_i915_gem_object *obj);
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -2716,20 +2643,16 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
 #define PIN_GLOBAL 0x4
 #define PIN_OFFSET_BIAS 0x8
 #define PIN_OFFSET_MASK (~4095)
-int __must_check i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
-                                         struct i915_address_space *vm,
-                                         uint32_t alignment,
-                                         uint64_t flags,
-                                         const struct i915_ggtt_view *view);
-static inline
-int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
-                                    struct i915_address_space *vm,
-                                    uint32_t alignment,
-                                    uint64_t flags)
-{
-       return i915_gem_object_pin_view(obj, vm, alignment, flags,
-                                               &i915_ggtt_view_normal);
-}
+int __must_check
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+                   struct i915_address_space *vm,
+                   uint32_t alignment,
+                   uint64_t flags);
+int __must_check
+i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+                        const struct i915_ggtt_view *view,
+                        uint32_t alignment,
+                        uint64_t flags);
 
 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
                  u32 flags);
@@ -2864,8 +2787,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
 int __must_check
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                                     u32 alignment,
-                                    struct intel_engine_cs *pipelined);
-void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
+                                    struct intel_engine_cs *pipelined,
+                                    const struct i915_ggtt_view *view);
+void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
+                                             const struct i915_ggtt_view *view);
 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
                                int align);
 int i915_gem_open(struct drm_device *dev, struct drm_file *file);
@@ -2890,60 +2815,46 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 
 void i915_gem_restore_fences(struct drm_device *dev);
 
-unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
-                                      struct i915_address_space *vm,
-                                      enum i915_ggtt_view_type view);
-static inline
-unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
-                                 struct i915_address_space *vm)
+unsigned long
+i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
+                             const struct i915_ggtt_view *view);
+unsigned long
+i915_gem_obj_offset(struct drm_i915_gem_object *o,
+                   struct i915_address_space *vm);
+static inline unsigned long
+i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
 {
-       return i915_gem_obj_offset_view(o, vm, I915_GGTT_VIEW_NORMAL);
+       return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
 }
+
 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
-bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
-                            struct i915_address_space *vm,
-                            enum i915_ggtt_view_type view);
-static inline
+bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
+                                 const struct i915_ggtt_view *view);
 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
-                       struct i915_address_space *vm)
-{
-       return i915_gem_obj_bound_view(o, vm, I915_GGTT_VIEW_NORMAL);
-}
+                       struct i915_address_space *vm);
 
 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
                                struct i915_address_space *vm);
-struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
-                                         struct i915_address_space *vm,
-                                         const struct i915_ggtt_view *view);
-static inline
-struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-                                    struct i915_address_space *vm)
-{
-       return i915_gem_obj_to_vma_view(obj, vm, &i915_ggtt_view_normal);
-}
-
 struct i915_vma *
-i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
-                                      struct i915_address_space *vm,
-                                      const struct i915_ggtt_view *view);
+i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+                   struct i915_address_space *vm);
+struct i915_vma *
+i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
+                         const struct i915_ggtt_view *view);
 
-static inline
 struct i915_vma *
 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-                                 struct i915_address_space *vm)
-{
-       return i915_gem_obj_lookup_or_create_vma_view(obj, vm,
-                                               &i915_ggtt_view_normal);
-}
+                                 struct i915_address_space *vm);
+struct i915_vma *
+i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
+                                      const struct i915_ggtt_view *view);
 
-struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
-static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
-       struct i915_vma *vma;
-       list_for_each_entry(vma, &obj->vma_list, vma_link)
-               if (vma->pin_count > 0)
-                       return true;
-       return false;
+static inline struct i915_vma *
+i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
+{
+       return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal);
 }
+bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
 
 /* Some GGTT VM helpers */
 #define i915_obj_to_ggtt(obj) \
@@ -2966,13 +2877,7 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
 
 static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
 {
-       return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj));
-}
-
-static inline unsigned long
-i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
-{
-       return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj));
+       return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
 }
 
 static inline unsigned long
@@ -2996,7 +2901,13 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
        return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
 }
 
-void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
+void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
+                                    const struct i915_ggtt_view *view);
+static inline void
+i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
+{
+       i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
+}
 
 /* i915_gem_context.c */
 int __must_check i915_gem_context_init(struct drm_device *dev);
@@ -3068,6 +2979,17 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
                                               u32 gtt_offset,
                                               u32 size);
 
+/* i915_gem_shrinker.c */
+unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
+                             long target,
+                             unsigned flags);
+#define I915_SHRINK_PURGEABLE 0x1
+#define I915_SHRINK_UNBOUND 0x2
+#define I915_SHRINK_BOUND 0x4
+unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
+
+
 /* i915_gem_tiling.c */
 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
 {
@@ -3143,10 +3065,6 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
 extern int i915_save_state(struct drm_device *dev);
 extern int i915_restore_state(struct drm_device *dev);
 
-/* i915_ums.c */
-void i915_save_display_reg(struct drm_device *dev);
-void i915_restore_display_reg(struct drm_device *dev);
-
 /* i915_sysfs.c */
 void i915_setup_sysfs(struct drm_device *dev_priv);
 void i915_teardown_sysfs(struct drm_device *dev_priv);
@@ -3159,11 +3077,11 @@ static inline bool intel_gmbus_is_port_valid(unsigned port)
        return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
 }
 
-extern struct device *intel_gmbus_get_adapter(
+extern struct i2c_adapter *intel_gmbus_get_adapter(
                struct drm_i915_private *dev_priv, unsigned port);
-extern void intel_gmbus_set_speed(struct device *adapter, int speed);
-extern void intel_gmbus_force_bit(struct device *adapter, bool force_bit);
-static inline bool intel_gmbus_is_forced_bit(struct device *adapter)
+extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
 {
        struct intel_iic_softc *sc;
        sc = device_get_softc(device_get_parent(adapter));
@@ -3219,11 +3137,9 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev,
                                         bool force_restore);
 extern void i915_redisable_vga(struct drm_device *dev);
 extern void i915_redisable_vga_power_on(struct drm_device *dev);
-extern void intel_disable_fbc(struct drm_device *dev);
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
 extern void intel_init_pch_refclk(struct drm_device *dev);
-extern void gen6_set_rps(struct drm_device *dev, u8 val);
-extern void valleyview_set_rps(struct drm_device *dev, u8 val);
+extern void intel_set_rps(struct drm_device *dev, u8 val);
 extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
                                  bool enable);
 extern void intel_detect_pch(struct drm_device *dev);
@@ -3238,8 +3154,6 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
 
 struct intel_device_info *i915_get_device_id(int device);
 
-void intel_notify_mmio_flip(struct intel_engine_cs *ring);
-
 /* overlay */
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
@@ -3376,7 +3290,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
 
        if (time_after(target_jiffies, tmp_jiffies)) {
                remaining_jiffies = target_jiffies - tmp_jiffies;
-
 #if 0
                while (remaining_jiffies)
                        remaining_jiffies =
index 508aabf..8cb8fd9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright © 2008 Intel Corporation
+ * Copyright © 2008-2015 Intel Corporation
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -58,6 +58,7 @@
 #include <drm/drm_vma_manager.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
+#include "i915_vgpu.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
 #include <linux/shmem_fs.h>
@@ -79,8 +80,6 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
                                         struct drm_i915_fence_reg *fence,
                                         bool enable);
 
-static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
-
 static bool cpu_cache_is_coherent(struct drm_device *dev,
                                  enum i915_cache_level level)
 {
@@ -376,7 +375,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
        struct drm_device *dev = obj->base.dev;
        void *vaddr = (char *)obj->phys_handle->vaddr + args->offset;
        char __user *user_data = to_user_ptr(args->data_ptr);
-       int ret;
+       int ret = 0;
 
        /* We manually control the domain here and pretend that it
         * remains coherent i.e. in the GTT domain, like shmem_pwrite.
@@ -385,6 +384,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
+       intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
        if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
                unsigned long unwritten;
 
@@ -395,13 +395,18 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
                mutex_unlock(&dev->struct_mutex);
                unwritten = copy_from_user(vaddr, user_data, args->size);
                mutex_lock(&dev->struct_mutex);
-               if (unwritten)
-                       return -EFAULT;
+               if (unwritten) {
+                       ret = -EFAULT;
+                       goto out;
+               }
        }
 
        drm_clflush_virt_range(vaddr, args->size);
        i915_gem_chipset_flush(dev);
-       return 0;
+
+out:
+       intel_fb_obj_flush(obj, false);
+       return ret;
 }
 
 void *i915_gem_object_alloc(struct drm_device *dev)
@@ -822,6 +827,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
 
        offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
 
+       intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
+
        while (remain > 0) {
                /* Operation in this page
                 *
@@ -842,7 +849,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
                if (fast_user_write(dev_priv->gtt.mappable, page_base,
                                    page_offset, user_data, page_length)) {
                        ret = -EFAULT;
-                       goto out_unpin;
+                       goto out_flush;
                }
 
                remain -= page_length;
@@ -850,6 +857,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
                offset += page_length;
        }
 
+out_flush:
+       intel_fb_obj_flush(obj, false);
 out_unpin:
        i915_gem_object_ggtt_unpin(obj);
 out:
@@ -964,6 +973,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
        if (ret)
                return ret;
 
+       intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
+
        i915_gem_object_pin_pages(obj);
 
        offset = args->offset;
@@ -1048,6 +1059,7 @@ out:
        if (needs_clflush_after)
                i915_gem_chipset_flush(dev);
 
+       intel_fb_obj_flush(obj, false);
        return ret;
 }
 
@@ -2030,12 +2042,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
        return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
 }
 
-static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
-{
-       return obj->madv == I915_MADV_DONTNEED;
-}
-
 /* Immediately discard the backing storage */
 static void
 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
@@ -2146,85 +2152,6 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
        return 0;
 }
 
-unsigned long
-i915_gem_shrink(struct drm_i915_private *dev_priv,
-               long target, unsigned flags)
-{
-       const struct {
-               struct list_head *list;
-               unsigned int bit;
-       } phases[] = {
-               { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
-               { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
-               { NULL, 0 },
-       }, *phase;
-       unsigned long count = 0;
-
-       /*
-        * As we may completely rewrite the (un)bound list whilst unbinding
-        * (due to retiring requests) we have to strictly process only
-        * one element of the list at the time, and recheck the list
-        * on every iteration.
-        *
-        * In particular, we must hold a reference whilst removing the
-        * object as we may end up waiting for and/or retiring the objects.
-        * This might release the final reference (held by the active list)
-        * and result in the object being freed from under us. This is
-        * similar to the precautions the eviction code must take whilst
-        * removing objects.
-        *
-        * Also note that although these lists do not hold a reference to
-        * the object we can safely grab one here: The final object
-        * unreferencing and the bound_list are both protected by the
-        * dev->struct_mutex and so we won't ever be able to observe an
-        * object on the bound_list with a reference count equals 0.
-        */
-       for (phase = phases; phase->list; phase++) {
-               struct list_head still_in_list;
-
-               if ((flags & phase->bit) == 0)
-                       continue;
-
-               INIT_LIST_HEAD(&still_in_list);
-               while (count < target && !list_empty(phase->list)) {
-                       struct drm_i915_gem_object *obj;
-                       struct i915_vma *vma, *v;
-
-                       obj = list_first_entry(phase->list,
-                                              typeof(*obj), global_list);
-                       list_move_tail(&obj->global_list, &still_in_list);
-
-                       if (flags & I915_SHRINK_PURGEABLE &&
-                           !i915_gem_object_is_purgeable(obj))
-                               continue;
-
-                       drm_gem_object_reference(&obj->base);
-
-                       /* For the unbound phase, this should be a no-op! */
-                       list_for_each_entry_safe(vma, v,
-                                                &obj->vma_list, vma_link)
-                               if (i915_vma_unbind(vma))
-                                       break;
-
-                       if (i915_gem_object_put_pages(obj) == 0)
-                               count += obj->base.size >> PAGE_SHIFT;
-
-                       drm_gem_object_unreference(&obj->base);
-               }
-               list_splice(&still_in_list, phase->list);
-       }
-
-       return count;
-}
-
-static unsigned long
-i915_gem_shrink_all(struct drm_i915_private *dev_priv)
-{
-       i915_gem_evict_everything(dev_priv->dev);
-       return i915_gem_shrink(dev_priv, LONG_MAX,
-                              I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
-}
-
 static int
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
@@ -2537,10 +2464,11 @@ int __i915_add_request(struct intel_engine_cs *ring,
                ret = ring->add_request(ring);
                if (ret)
                        return ret;
+
+               request->tail = intel_ring_get_tail(ringbuf);
        }
 
        request->head = request_start;
-       request->tail = intel_ring_get_tail(ringbuf);
 
        /* Whilst this request exists, batch_obj will be on the
         * active_list, and so will hold the active reference. Only when this
@@ -2571,6 +2499,8 @@ int __i915_add_request(struct intel_engine_cs *ring,
                list_add_tail(&request->client_list,
                              &file_priv->mm.request_list);
                spin_unlock(&file_priv->mm.lock);
+
+               request->pid = curproc->p_pid;
        }
 
        trace_i915_gem_request_add(request);
@@ -2651,6 +2581,10 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
        list_del(&request->list);
        i915_gem_request_remove_from_client(request);
 
+#if 0
+       put_pid(request->pid);
+#endif
+
        i915_gem_request_unreference(request);
 }
 
@@ -2823,7 +2757,6 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
         */
        while (!list_empty(&ring->request_list)) {
                struct drm_i915_gem_request *request;
-               struct intel_ringbuffer *ringbuf;
 
                request = list_first_entry(&ring->request_list,
                                           struct drm_i915_gem_request,
@@ -2834,23 +2767,12 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 
                trace_i915_gem_request_retire(request);
 
-               /* This is one of the few common intersection points
-                * between legacy ringbuffer submission and execlists:
-                * we need to tell them apart in order to find the correct
-                * ringbuffer to which the request belongs to.
-                */
-               if (i915.enable_execlists) {
-                       struct intel_context *ctx = request->ctx;
-                       ringbuf = ctx->engine[ring->id].ringbuf;
-               } else
-                       ringbuf = ring->buffer;
-
                /* We know the GPU must have read the request to have
                 * sent us the seqno + interrupt, so use the position
                 * of tail of the request to update the last known position
                 * of the GPU head.
                 */
-               ringbuf->last_retired_head = request->postfix;
+               request->ringbuf->last_retired_head = request->postfix;
 
                i915_gem_free_request(request);
        }
@@ -3168,8 +3090,8 @@ int i915_vma_unbind(struct i915_vma *vma)
                        obj->map_and_fenceable = false;
                } else if (vma->ggtt_view.pages) {
                        kfree(vma->ggtt_view.pages);
-                       vma->ggtt_view.pages = NULL;
                }
+               vma->ggtt_view.pages = NULL;
        }
 
        drm_mm_remove_node(&vma->node);
@@ -3586,9 +3508,9 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
 static struct i915_vma *
 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
                           struct i915_address_space *vm,
+                          const struct i915_ggtt_view *ggtt_view,
                           unsigned alignment,
-                          uint64_t flags,
-                          const struct i915_ggtt_view *view)
+                          uint64_t flags)
 {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3600,6 +3522,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
        struct i915_vma *vma;
        int ret;
 
+       if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
+               return ERR_PTR(-EINVAL);
+
        fence_size = i915_gem_get_gtt_size(dev,
                                           obj->base.size,
                                           obj->tiling_mode);
@@ -3638,7 +3563,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 
        i915_gem_object_pin_pages(obj);
 
-       vma = i915_gem_obj_lookup_or_create_vma_view(obj, vm, view);
+       vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
+                         i915_gem_obj_lookup_or_create_vma(obj, vm);
+
        if (IS_ERR(vma))
                goto err_unpin;
 
@@ -3668,6 +3595,17 @@ search_free:
        if (ret)
                goto err_remove_node;
 
+       /*  allocate before insert / bind */
+       if (vma->vm->allocate_va_range) {
+               trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size,
+                               VM_TO_TRACE_NAME(vma->vm));
+               ret = vma->vm->allocate_va_range(vma->vm,
+                                               vma->node.start,
+                                               vma->node.size);
+               if (ret)
+                       goto err_remove_node;
+       }
+
        trace_i915_vma_bind(vma, flags);
        ret = i915_vma_bind(vma, obj->cache_level,
                            flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
@@ -3838,7 +3776,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
        }
 
        if (write)
-               intel_fb_obj_invalidate(obj, NULL);
+               intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
 
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
@@ -4020,7 +3958,8 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
 int
 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                                     u32 alignment,
-                                    struct intel_engine_cs *pipelined)
+                                    struct intel_engine_cs *pipelined,
+                                    const struct i915_ggtt_view *view)
 {
        u32 old_read_domains, old_write_domain;
        bool was_pin_display;
@@ -4056,7 +3995,9 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
         * (e.g. libkms for the bootup splash), we have to ensure that we
         * always use map_and_fenceable for all scanout buffers.
         */
-       ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
+       ret = i915_gem_object_ggtt_pin(obj, view, alignment,
+                                      view->type == I915_GGTT_VIEW_NORMAL ?
+                                      PIN_MAPPABLE : 0);
        if (ret)
                goto err_unpin_display;
 
@@ -4084,9 +4025,11 @@ err_unpin_display:
 }
 
 void
-i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
+i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
+                                        const struct i915_ggtt_view *view)
 {
-       i915_gem_object_ggtt_unpin(obj);
+       i915_gem_object_ggtt_unpin_view(obj, view);
+
        obj->pin_display = is_pin_display(obj);
 }
 
@@ -4153,7 +4096,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
        }
 
        if (write)
-               intel_fb_obj_invalidate(obj, NULL);
+               intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
 
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
@@ -4235,12 +4178,12 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
        return false;
 }
 
-int
-i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
-                        struct i915_address_space *vm,
-                        uint32_t alignment,
-                        uint64_t flags,
-                        const struct i915_ggtt_view *view)
+static int
+i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
+                      struct i915_address_space *vm,
+                      const struct i915_ggtt_view *ggtt_view,
+                      uint32_t alignment,
+                      uint64_t flags)
 {
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
        struct i915_vma *vma;
@@ -4256,17 +4199,29 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
        if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
                return -EINVAL;
 
-       vma = i915_gem_obj_to_vma_view(obj, vm, view);
+       if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
+               return -EINVAL;
+
+       vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
+                         i915_gem_obj_to_vma(obj, vm);
+
+       if (IS_ERR(vma))
+               return PTR_ERR(vma);
+
        if (vma) {
                if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
                        return -EBUSY;
 
                if (i915_vma_misplaced(vma, alignment, flags)) {
+                       unsigned long offset;
+                       offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) :
+                                            i915_gem_obj_offset(obj, vm);
                        WARN(vma->pin_count,
-                            "bo is already pinned with incorrect alignment:"
+                            "bo is already pinned in %s with incorrect alignment:"
                             " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
                             " obj->map_and_fenceable=%d\n",
-                            i915_gem_obj_offset_view(obj, vm, view->type),
+                            ggtt_view ? "ggtt" : "ppgtt",
+                            offset,
                             alignment,
                             !!(flags & PIN_MAPPABLE),
                             obj->map_and_fenceable);
@@ -4280,8 +4235,12 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
 
        bound = vma ? vma->bound : 0;
        if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
-               vma = i915_gem_object_bind_to_vm(obj, vm, alignment,
-                                                flags, view);
+               /* In true PPGTT, bind has possibly changed PDEs, which
+                * means we must do a context switch before the GPU can
+                * accurately read some of the VMAs.
+                */
+               vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
+                                                flags);
                if (IS_ERR(vma))
                        return PTR_ERR(vma);
        }
@@ -4307,7 +4266,7 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
                fenceable = (vma->node.size == fence_size &&
                             (vma->node.start & (fence_alignment - 1)) == 0);
 
-               mappable = (vma->node.start + obj->base.size <=
+               mappable = (vma->node.start + fence_size <=
                            dev_priv->gtt.mappable_end);
 
                obj->map_and_fenceable = mappable && fenceable;
@@ -4322,16 +4281,41 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj,
        return 0;
 }
 
+int
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+                   struct i915_address_space *vm,
+                   uint32_t alignment,
+                   uint64_t flags)
+{
+       return i915_gem_object_do_pin(obj, vm,
+                                     i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
+                                     alignment, flags);
+}
+
+int
+i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
+                        const struct i915_ggtt_view *view,
+                        uint32_t alignment,
+                        uint64_t flags)
+{
+       if (WARN_ONCE(!view, "no view specified"))
+               return -EINVAL;
+
+       return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
+                                     alignment, flags | PIN_GLOBAL);
+}
+
 void
-i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
+i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
+                               const struct i915_ggtt_view *view)
 {
-       struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
+       struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
 
        BUG_ON(!vma);
-       BUG_ON(vma->pin_count == 0);
-       BUG_ON(!i915_gem_obj_ggtt_bound(obj));
+       WARN_ON(vma->pin_count == 0);
+       WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
 
-       if (--vma->pin_count == 0)
+       if (--vma->pin_count == 0 && view->type == I915_GGTT_VIEW_NORMAL)
                obj->pin_mappable = false;
 }
 
@@ -4452,7 +4436,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                obj->madv = args->madv;
 
        /* if the object is no longer attached, discard its backing storage */
-       if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
+       if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
                i915_gem_object_truncate(obj);
 
        args->retained = obj->madv != __I915_MADV_PURGED;
@@ -4637,15 +4621,33 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        intel_runtime_pm_put(dev_priv);
 }
 
-struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj,
-                                         struct i915_address_space *vm,
-                                         const struct i915_ggtt_view *view)
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+                                    struct i915_address_space *vm)
 {
        struct i915_vma *vma;
-       list_for_each_entry(vma, &obj->vma_list, vma_link)
-               if (vma->vm == vm && vma->ggtt_view.type == view->type)
+       list_for_each_entry(vma, &obj->vma_list, vma_link) {
+               if (i915_is_ggtt(vma->vm) &&
+                   vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+                       continue;
+               if (vma->vm == vm)
                        return vma;
+       }
+       return NULL;
+}
+
+struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
+                                          const struct i915_ggtt_view *view)
+{
+       struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
+       struct i915_vma *vma;
 
+       if (WARN_ONCE(!view, "no view specified"))
+               return ERR_PTR(-EINVAL);
+
+       list_for_each_entry(vma, &obj->vma_list, vma_link)
+               if (vma->vm == ggtt &&
+                   i915_ggtt_view_equal(&vma->ggtt_view, view))
+                       return vma;
        return NULL;
 }
 
@@ -4692,10 +4694,6 @@ i915_gem_suspend(struct drm_device *dev)
 
        i915_gem_retire_requests(dev);
 
-       /* Under UMS, be paranoid and evict. */
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               i915_gem_evict_everything(dev);
-
        i915_gem_stop_ringbuffers(dev);
        mutex_unlock(&dev->struct_mutex);
 
@@ -5064,18 +5062,8 @@ i915_gem_load(struct drm_device *dev)
                          i915_gem_idle_work_handler);
        init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
 
-       /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
-       if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
-               I915_WRITE(MI_ARB_STATE,
-                          _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
-       }
-
        dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
 
-       /* Old X drivers will take 0-2 for front, back, depth buffers */
-       if (!drm_core_check_feature(dev, DRIVER_MODESET))
-               dev_priv->fence_reg_start = 3;
-
        if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
                dev_priv->num_fence_regs = 32;
        else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
@@ -5083,6 +5071,10 @@ i915_gem_load(struct drm_device *dev)
        else
                dev_priv->num_fence_regs = 8;
 
+       if (intel_vgpu_active(dev))
+               dev_priv->num_fence_regs =
+                               I915_READ(vgtif_reg(avail_rs.fence_num));
+
        /* Initialize fence registers to zero */
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        i915_gem_restore_fences(dev);
@@ -5092,15 +5084,7 @@ i915_gem_load(struct drm_device *dev)
 
        dev_priv->mm.interruptible = true;
 
-#if 0
-       dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
-       dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
-       dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
-       register_shrinker(&dev_priv->mm.shrinker);
-
-       dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
-       register_oom_notifier(&dev_priv->mm.oom_notifier);
-#endif
+       i915_gem_shrinker_init(dev_priv);
 
        i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool);
 
@@ -5216,110 +5200,70 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
        }
 }
 
-#if 0
-static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
-{
-       if (!mutex_is_locked(mutex))
-               return false;
-
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
-       return mutex->owner == task;
-#else
-       /* Since UP may be pre-empted, we cannot assume that we own the lock */
-       return false;
-#endif
-}
-#endif
-
-#if 0
-static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
+/* All the new VM stuff */
+unsigned long
+i915_gem_obj_offset(struct drm_i915_gem_object *o,
+                   struct i915_address_space *vm)
 {
-       if (!mutex_trylock(&dev->struct_mutex)) {
-               if (!mutex_is_locked_by(&dev->struct_mutex, current))
-                       return false;
+       struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+       struct i915_vma *vma;
 
-               if (to_i915(dev)->mm.shrinker_no_lock_stealing)
-                       return false;
+       WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
 
-               *unlock = false;
-       } else
-               *unlock = true;
+       list_for_each_entry(vma, &o->vma_list, vma_link) {
+               if (i915_is_ggtt(vma->vm) &&
+                   vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+                       continue;
+               if (vma->vm == vm)
+                       return vma->node.start;
+       }
 
-       return true;
+       WARN(1, "%s vma for this object not found.\n",
+            i915_is_ggtt(vm) ? "global" : "ppgtt");
+       return -1;
 }
 
-static int num_vma_bound(struct drm_i915_gem_object *obj)
+unsigned long
+i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
+                             const struct i915_ggtt_view *view)
 {
+       struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
        struct i915_vma *vma;
-       int count = 0;
-
-       list_for_each_entry(vma, &obj->vma_list, vma_link)
-               if (drm_mm_node_allocated(&vma->node))
-                       count++;
-
-       return count;
-}
-
-static unsigned long
-i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(shrinker, struct drm_i915_private, mm.shrinker);
-       struct drm_device *dev = dev_priv->dev;
-       struct drm_i915_gem_object *obj;
-       unsigned long count;
-       bool unlock;
-
-       if (!i915_gem_shrinker_lock(dev, &unlock))
-               return 0;
-
-       count = 0;
-       list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
-               if (obj->pages_pin_count == 0)
-                       count += obj->base.size >> PAGE_SHIFT;
-
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               if (!i915_gem_obj_is_pinned(obj) &&
-                   obj->pages_pin_count == num_vma_bound(obj))
-                       count += obj->base.size >> PAGE_SHIFT;
-       }
 
-       if (unlock)
-               mutex_unlock(&dev->struct_mutex);
+       list_for_each_entry(vma, &o->vma_list, vma_link)
+               if (vma->vm == ggtt &&
+                   i915_ggtt_view_equal(&vma->ggtt_view, view))
+                       return vma->node.start;
 
-       return count;
+       WARN(1, "global vma for this object not found.\n");
+       return -1;
 }
-#endif
 
-/* All the new VM stuff */
-unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o,
-                                      struct i915_address_space *vm,
-                                      enum i915_ggtt_view_type view)
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+                       struct i915_address_space *vm)
 {
-       struct drm_i915_private *dev_priv = o->base.dev->dev_private;
        struct i915_vma *vma;
 
-       WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-
        list_for_each_entry(vma, &o->vma_list, vma_link) {
-               if (vma->vm == vm && vma->ggtt_view.type == view)
-                       return vma->node.start;
-
+               if (i915_is_ggtt(vma->vm) &&
+                   vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+                       continue;
+               if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
+                       return true;
        }
-       WARN(1, "%s vma for this object not found.\n",
-            i915_is_ggtt(vm) ? "global" : "ppgtt");
-       return -1;
+
+       return false;
 }
 
-bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o,
-                            struct i915_address_space *vm,
-                            enum i915_ggtt_view_type view)
+bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
+                                 const struct i915_ggtt_view *view)
 {
+       struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
        struct i915_vma *vma;
 
        list_for_each_entry(vma, &o->vma_list, vma_link)
-               if (vma->vm == vm &&
-                   vma->ggtt_view.type == view &&
+               if (vma->vm == ggtt &&
+                   i915_ggtt_view_equal(&vma->ggtt_view, view) &&
                    drm_mm_node_allocated(&vma->node))
                        return true;
 
@@ -5347,120 +5291,26 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
 
        BUG_ON(list_empty(&o->vma_list));
 
-       list_for_each_entry(vma, &o->vma_list, vma_link)
+       list_for_each_entry(vma, &o->vma_list, vma_link) {
+               if (i915_is_ggtt(vma->vm) &&
+                   vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
+                       continue;
                if (vma->vm == vm)
                        return vma->node.size;
-
+       }
        return 0;
 }
 
-#if 0
-static unsigned long
-i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv =
-               container_of(shrinker, struct drm_i915_private, mm.shrinker);
-       struct drm_device *dev = dev_priv->dev;
-       unsigned long freed;
-       bool unlock;
-
-       if (!i915_gem_shrinker_lock(dev, &unlock))
-               return SHRINK_STOP;
-
-       freed = i915_gem_shrink(dev_priv,
-                               sc->nr_to_scan,
-                               I915_SHRINK_BOUND |
-                               I915_SHRINK_UNBOUND |
-                               I915_SHRINK_PURGEABLE);
-       if (freed < sc->nr_to_scan)
-               freed += i915_gem_shrink(dev_priv,
-                                        sc->nr_to_scan - freed,
-                                        I915_SHRINK_BOUND |
-                                        I915_SHRINK_UNBOUND);
-       if (unlock)
-               mutex_unlock(&dev->struct_mutex);
-
-       return freed;
-}
-
-static int
-i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(nb, struct drm_i915_private, mm.oom_notifier);
-       struct drm_device *dev = dev_priv->dev;
-       struct drm_i915_gem_object *obj;
-       unsigned long timeout = msecs_to_jiffies(5000) + 1;
-       unsigned long pinned, bound, unbound, freed_pages;
-       bool was_interruptible;
-       bool unlock;
-
-       while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
-               schedule_timeout_killable(1);
-               if (fatal_signal_pending(current))
-                       return NOTIFY_DONE;
-       }
-       if (timeout == 0) {
-               pr_err("Unable to purge GPU memory due lock contention.\n");
-               return NOTIFY_DONE;
-       }
-
-       was_interruptible = dev_priv->mm.interruptible;
-       dev_priv->mm.interruptible = false;
-
-       freed_pages = i915_gem_shrink_all(dev_priv);
-
-       dev_priv->mm.interruptible = was_interruptible;
-
-       /* Because we may be allocating inside our own driver, we cannot
-        * assert that there are no objects with pinned pages that are not
-        * being pointed to by hardware.
-        */
-       unbound = bound = pinned = 0;
-       list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
-               if (!obj->base.filp) /* not backed by a freeable object */
-                       continue;
-
-               if (obj->pages_pin_count)
-                       pinned += obj->base.size;
-               else
-                       unbound += obj->base.size;
-       }
-       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               if (!obj->base.filp)
+       struct i915_vma *vma;
+       list_for_each_entry(vma, &obj->vma_list, vma_link) {
+               if (i915_is_ggtt(vma->vm) &&
+                   vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
                        continue;
-
-               if (obj->pages_pin_count)
-                       pinned += obj->base.size;
-               else
-                       bound += obj->base.size;
+               if (vma->pin_count > 0)
+                       return true;
        }
-
-       if (unlock)
-               mutex_unlock(&dev->struct_mutex);
-
-       if (freed_pages || unbound || bound)
-               pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
-                       freed_pages << PAGE_SHIFT, pinned);
-       if (unbound || bound)
-               pr_err("%lu and %lu bytes still available in the "
-                      "bound and unbound GPU page lists.\n",
-                      bound, unbound);
-
-       *(unsigned long *)ptr += freed_pages;
-       return NOTIFY_DONE;
+       return false;
 }
-#endif
-
-struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
-{
-       struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
-       struct i915_vma *vma;
 
-       list_for_each_entry(vma, &obj->vma_list, vma_link)
-               if (vma->vm == ggtt &&
-                   vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
-                       return vma;
-
-       return NULL;
-}
index 8603bf4..f3e84c4 100644 (file)
@@ -296,11 +296,15 @@ void i915_gem_context_reset(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
 
-       /* In execlists mode we will unreference the context when the execlist
-        * queue is cleared and the requests destroyed.
-        */
-       if (i915.enable_execlists)
+       if (i915.enable_execlists) {
+               struct intel_context *ctx;
+
+               list_for_each_entry(ctx, &dev_priv->context_list, link) {
+                       intel_lr_context_reset(dev, ctx);
+               }
+
                return;
+       }
 
        for (i = 0; i < I915_NUM_RINGS; i++) {
                struct intel_engine_cs *ring = &dev_priv->ring[i];
@@ -565,6 +569,66 @@ mi_set_context(struct intel_engine_cs *ring,
        return ret;
 }
 
+static inline bool should_skip_switch(struct intel_engine_cs *ring,
+                                     struct intel_context *from,
+                                     struct intel_context *to)
+{
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+       if (to->remap_slice)
+               return false;
+
+       if (to->ppgtt) {
+               if (from == to && !test_bit(ring->id,
+                               &to->ppgtt->pd_dirty_rings))
+                       return true;
+       } else if (dev_priv->mm.aliasing_ppgtt) {
+               if (from == to && !test_bit(ring->id,
+                               &dev_priv->mm.aliasing_ppgtt->pd_dirty_rings))
+                       return true;
+       }
+
+       return false;
+}
+
+static bool
+needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to)
+{
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+       if (!to->ppgtt)
+               return false;
+
+       if (INTEL_INFO(ring->dev)->gen < 8)
+               return true;
+
+       if (ring != &dev_priv->ring[RCS])
+               return true;
+
+       return false;
+}
+
+static bool
+needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to,
+               u32 hw_flags)
+{
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+       if (!to->ppgtt)
+               return false;
+
+       if (!IS_GEN8(ring->dev))
+               return false;
+
+       if (ring != &dev_priv->ring[RCS])
+               return false;
+
+       if (hw_flags & MI_RESTORE_INHIBIT)
+               return true;
+
+       return false;
+}
+
 static int do_switch(struct intel_engine_cs *ring,
                     struct intel_context *to)
 {
@@ -580,7 +644,7 @@ static int do_switch(struct intel_engine_cs *ring,
                BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
        }
 
-       if (from == to && !to->remap_slice)
+       if (should_skip_switch(ring, from, to))
                return 0;
 
        /* Trying to pin first makes error handling easier. */
@@ -598,11 +662,18 @@ static int do_switch(struct intel_engine_cs *ring,
         */
        from = ring->last_context;
 
-       if (to->ppgtt) {
+       if (needs_pd_load_pre(ring, to)) {
+               /* Older GENs and non render rings still want the load first,
+                * "PP_DCLV followed by PP_DIR_BASE register through Load
+                * Register Immediate commands in Ring Buffer before submitting
+                * a context."*/
                trace_switch_mm(ring, to);
                ret = to->ppgtt->switch_mm(to->ppgtt, ring);
                if (ret)
                        goto unpin_out;
+
+               /* Doing a PD load always reloads the page dirs */
+               clear_bit(ring->id, &to->ppgtt->pd_dirty_rings);
        }
 
        if (ring != &dev_priv->ring[RCS]) {
@@ -633,13 +704,41 @@ static int do_switch(struct intel_engine_cs *ring,
                        goto unpin_out;
        }
 
-       if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
+       if (!to->legacy_hw_ctx.initialized) {
                hw_flags |= MI_RESTORE_INHIBIT;
+               /* NB: If we inhibit the restore, the context is not allowed to
+                * die because future work may end up depending on valid address
+                * space. This means we must enforce that a page table load
+                * occur when this occurs. */
+       } else if (to->ppgtt &&
+                       test_and_clear_bit(ring->id, &to->ppgtt->pd_dirty_rings))
+               hw_flags |= MI_FORCE_RESTORE;
+
+       /* We should never emit switch_mm more than once */
+       WARN_ON(needs_pd_load_pre(ring, to) &&
+                       needs_pd_load_post(ring, to, hw_flags));
 
        ret = mi_set_context(ring, to, hw_flags);
        if (ret)
                goto unpin_out;
 
+       /* GEN8 does *not* require an explicit reload if the PDPs have been
+        * setup, and we do not wish to move them.
+        */
+       if (needs_pd_load_post(ring, to, hw_flags)) {
+               trace_switch_mm(ring, to);
+               ret = to->ppgtt->switch_mm(to->ppgtt, ring);
+               /* The hardware context switch is emitted, but we haven't
+                * actually changed the state - so it's probably safe to bail
+                * here. Still, let the user know something dangerous has
+                * happened.
+                */
+               if (ret) {
+                       DRM_ERROR("Failed to change address space on context switch\n");
+                       goto unpin_out;
+               }
+       }
+
        for (i = 0; i < MAX_L3_SLICES; i++) {
                if (!(to->remap_slice & (1<<i)))
                        continue;
@@ -677,7 +776,7 @@ static int do_switch(struct intel_engine_cs *ring,
                i915_gem_context_unreference(from);
        }
 
-       uninitialized = !to->legacy_hw_ctx.initialized && from == NULL;
+       uninitialized = !to->legacy_hw_ctx.initialized;
        to->legacy_hw_ctx.initialized = true;
 
 done:
index e3a49d9..d09e35e 100644 (file)
@@ -63,6 +63,10 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
  *
  * This function is used by the object/vma binding code.
  *
+ * Since this function is only used to free up virtual address space it only
+ * ignores pinned vmas, and not object where the backing storage itself is
+ * pinned. Hence obj->pages_pin_count does not protect against eviction.
+ *
  * To clarify: This is for freeing up virtual address space, not for freeing
  * memory in e.g. the shrinker.
  */
index 0601614..141d68c 100644 (file)
@@ -32,6 +32,7 @@
 #include "i915_trace.h"
 #include "intel_drv.h"
 #include <linux/pagemap.h>
+#include <asm/cpufeature.h>
 
 #define  __EXEC_OBJECT_HAS_PIN (1<<31)
 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
@@ -251,7 +252,6 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
 {
        return (HAS_LLC(obj->base.dev) ||
                obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
-               !obj->map_and_fenceable ||
                obj->cache_level != I915_CACHE_NONE);
 }
 
@@ -337,6 +337,51 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
        return 0;
 }
 
+static void
+clflush_write32(void *addr, uint32_t value)
+{
+       /* This is not a fast path, so KISS. */
+       drm_clflush_virt_range(addr, sizeof(uint32_t));
+       *(uint32_t *)addr = value;
+       drm_clflush_virt_range(addr, sizeof(uint32_t));
+}
+
+static int
+relocate_entry_clflush(struct drm_i915_gem_object *obj,
+                      struct drm_i915_gem_relocation_entry *reloc,
+                      uint64_t target_offset)
+{
+       struct drm_device *dev = obj->base.dev;
+       uint32_t page_offset = offset_in_page(reloc->offset);
+       uint64_t delta = (int)reloc->delta + target_offset;
+       char *vaddr;
+       int ret;
+
+       ret = i915_gem_object_set_to_gtt_domain(obj, true);
+       if (ret)
+               return ret;
+
+       vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+                               reloc->offset >> PAGE_SHIFT));
+       clflush_write32(vaddr + page_offset, lower_32_bits(delta));
+
+       if (INTEL_INFO(dev)->gen >= 8) {
+               page_offset = offset_in_page(page_offset + sizeof(uint32_t));
+
+               if (page_offset == 0) {
+                       kunmap_atomic(vaddr);
+                       vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+                           (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
+               }
+
+               clflush_write32(vaddr + page_offset, upper_32_bits(delta));
+       }
+
+       kunmap_atomic(vaddr);
+
+       return 0;
+}
+
 static int
 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
                                   struct eb_vmas *eb,
@@ -426,8 +471,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 
        if (use_cpu_reloc(obj))
                ret = relocate_entry_cpu(obj, reloc, target_offset);
-       else
+       else if (obj->map_and_fenceable)
                ret = relocate_entry_gtt(obj, reloc, target_offset);
+       else if (cpu_has_clflush)
+               ret = relocate_entry_clflush(obj, reloc, target_offset);
+       else {
+               WARN_ONCE(1, "Impossible case in relocation handling\n");
+               ret = -ENODEV;
+       }
 
        if (ret)
                return ret;
@@ -525,6 +576,12 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb)
        return ret;
 }
 
+static bool only_mappable_for_reloc(unsigned int flags)
+{
+       return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
+               __EXEC_OBJECT_NEEDS_MAP;
+}
+
 static int
 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
                                struct intel_engine_cs *ring,
@@ -536,14 +593,21 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
        int ret;
 
        flags = 0;
-       if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
-               flags |= PIN_GLOBAL | PIN_MAPPABLE;
-       if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
-               flags |= PIN_GLOBAL;
-       if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
-               flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+       if (!drm_mm_node_allocated(&vma->node)) {
+               if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
+                       flags |= PIN_GLOBAL | PIN_MAPPABLE;
+               if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
+                       flags |= PIN_GLOBAL;
+               if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
+                       flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+       }
 
        ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
+       if ((ret == -ENOSPC  || ret == -E2BIG) &&
+           only_mappable_for_reloc(entry->flags))
+               ret = i915_gem_object_pin(obj, vma->vm,
+                                         entry->alignment,
+                                         flags & ~(PIN_GLOBAL | PIN_MAPPABLE));
        if (ret)
                return ret;
 
@@ -605,13 +669,14 @@ eb_vma_misplaced(struct i915_vma *vma)
            vma->node.start & (entry->alignment - 1))
                return true;
 
-       if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
-               return true;
-
        if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
            vma->node.start < BATCH_OFFSET_BIAS)
                return true;
 
+       /* avoid costly ping-pong once a batch bo ended up non-mappable */
+       if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
+               return !only_mappable_for_reloc(entry->flags);
+
        return false;
 }
 
@@ -973,7 +1038,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
                        obj->dirty = 1;
                        i915_gem_request_assign(&obj->last_write_req, req);
 
-                       intel_fb_obj_invalidate(obj, ring);
+                       intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
 
                        /* update for the implicit flush after a batch */
                        obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -1078,16 +1143,15 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
                          struct drm_i915_gem_object *batch_obj,
                          u32 batch_start_offset,
                          u32 batch_len,
-                         bool is_master,
-                         u32 *flags)
+                         bool is_master)
 {
        struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev);
        struct drm_i915_gem_object *shadow_batch_obj;
-       bool need_reloc = false;
+       struct i915_vma *vma;
        int ret;
 
        shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool,
-                                                  batch_obj->base.size);
+                                                  PAGE_ALIGN(batch_len));
        if (IS_ERR(shadow_batch_obj))
                return shadow_batch_obj;
 
@@ -1097,40 +1161,30 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
                              batch_start_offset,
                              batch_len,
                              is_master);
-       if (ret) {
-               if (ret == -EACCES)
-                       return batch_obj;
-       } else {
-               struct i915_vma *vma;
+       if (ret)
+               goto err;
 
-               memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
+       ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
+       if (ret)
+               goto err;
 
-               vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
-               vma->exec_entry = shadow_exec_entry;
-               vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE;
-               drm_gem_object_reference(&shadow_batch_obj->base);
-               i915_gem_execbuffer_reserve_vma(vma, ring, &need_reloc);
-               list_add_tail(&vma->exec_list, &eb->vmas);
+       memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
 
-               shadow_batch_obj->base.pending_read_domains =
-                       batch_obj->base.pending_read_domains;
+       vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
+       vma->exec_entry = shadow_exec_entry;
+       vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE | __EXEC_OBJECT_HAS_PIN;
+       drm_gem_object_reference(&shadow_batch_obj->base);
+       list_add_tail(&vma->exec_list, &eb->vmas);
 
-               /*
-                * Set the DISPATCH_SECURE bit to remove the NON_SECURE
-                * bit from MI_BATCH_BUFFER_START commands issued in the
-                * dispatch_execbuffer implementations. We specifically
-                * don't want that set when the command parser is
-                * enabled.
-                *
-                * FIXME: with aliasing ppgtt, buffers that should only
-                * be in ggtt still end up in the aliasing ppgtt. remove
-                * this check when that is fixed.
-                */
-               if (USES_FULL_PPGTT(dev))
-                       *flags |= I915_DISPATCH_SECURE;
-       }
+       shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
+
+       return shadow_batch_obj;
 
-       return ret ? ERR_PTR(ret) : shadow_batch_obj;
+err:
+       if (ret == -EACCES) /* unhandled chained batch */
+               return batch_obj;
+       else
+               return ERR_PTR(ret);
 }
 
 int
@@ -1140,7 +1194,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
                               struct drm_i915_gem_execbuffer2 *args,
                               struct list_head *vmas,
                               struct drm_i915_gem_object *batch_obj,
-                              u64 exec_start, u32 flags)
+                              u64 exec_start, u32 dispatch_flags)
 {
        struct drm_clip_rect *cliprects = NULL;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1200,6 +1254,13 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
        if (ret)
                goto error;
 
+       if (ctx->ppgtt)
+               WARN(ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
+                       "%s didn't clear reload\n", ring->name);
+       else if (dev_priv->mm.aliasing_ppgtt)
+               WARN(dev_priv->mm.aliasing_ppgtt->pd_dirty_rings &
+                       (1<<ring->id), "%s didn't clear reload\n", ring->name);
+
        instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
        instp_mask = I915_EXEC_CONSTANTS_MASK;
        switch (instp_mode) {
@@ -1268,19 +1329,19 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
 
                        ret = ring->dispatch_execbuffer(ring,
                                                        exec_start, exec_len,
-                                                       flags);
+                                                       dispatch_flags);
                        if (ret)
                                goto error;
                }
        } else {
                ret = ring->dispatch_execbuffer(ring,
                                                exec_start, exec_len,
-                                               flags);
+                                               dispatch_flags);
                if (ret)
                        return ret;
        }
 
-       trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags);
+       trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
 
        i915_gem_execbuffer_move_to_active(vmas, ring);
        i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
@@ -1355,7 +1416,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        struct i915_address_space *vm;
        const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
        u64 exec_start = args->batch_start_offset;
-       u32 flags;
+       u32 dispatch_flags;
        int ret;
        bool need_relocs;
 
@@ -1366,12 +1427,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       flags = 0;
+       dispatch_flags = 0;
        if (args->flags & I915_EXEC_SECURE) {
-               flags |= I915_DISPATCH_SECURE;
+               dispatch_flags |= I915_DISPATCH_SECURE;
        }
        if (args->flags & I915_EXEC_IS_PINNED)
-               flags |= I915_DISPATCH_PINNED;
+               dispatch_flags |= I915_DISPATCH_PINNED;
 
        if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
                DRM_DEBUG("execbuf with unknown ring: %d\n",
@@ -1493,12 +1554,27 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                                                      batch_obj,
                                                      args->batch_start_offset,
                                                      args->batch_len,
-                                                     file->is_master,
-                                                     &flags);
+                                                     file->is_master);
                if (IS_ERR(batch_obj)) {
                        ret = PTR_ERR(batch_obj);
                        goto err;
                }
+
+               /*
+                * Set the DISPATCH_SECURE bit to remove the NON_SECURE
+                * bit from MI_BATCH_BUFFER_START commands issued in the
+                * dispatch_execbuffer implementations. We specifically
+                * don't want that set when the command parser is
+                * enabled.
+                *
+                * FIXME: with aliasing ppgtt, buffers that should only
+                * be in ggtt still end up in the aliasing ppgtt. remove
+                * this check when that is fixed.
+                */
+               if (USES_FULL_PPGTT(dev))
+                       dispatch_flags |= I915_DISPATCH_SECURE;
+
+               exec_start = 0;
        }
 
        batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
@@ -1506,14 +1582,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
         * batch" bit. Hence we need to pin secure batches into the global gtt.
         * hsw should have this fixed, but bdw mucks it up again. */
-       if (flags & I915_DISPATCH_SECURE) {
+       if (dispatch_flags & I915_DISPATCH_SECURE) {
                /*
                 * So on first glance it looks freaky that we pin the batch here
                 * outside of the reservation loop. But:
                 * - The batch is already pinned into the relevant ppgtt, so we
                 *   already have the backing storage fully allocated.
                 * - No other BO uses the global gtt (well contexts, but meh),
-                *   so we don't really have issues with mutliple objects not
+                *   so we don't really have issues with multiple objects not
                 *   fitting due to fragmentation.
                 * So this is actually safe.
                 */
@@ -1526,7 +1602,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                exec_start += i915_gem_obj_offset(batch_obj, vm);
 
        ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
-                                     &eb->vmas, batch_obj, exec_start, flags);
+                                     &eb->vmas, batch_obj, exec_start,
+                                     dispatch_flags);
 
        /*
         * FIXME: We crucially rely upon the active tracking for the (ppgtt)
@@ -1534,7 +1611,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
         * needs to be adjusted to also track the ggtt batch vma properly as
         * active.
         */
-       if (flags & I915_DISPATCH_SECURE)
+       if (dispatch_flags & I915_DISPATCH_SECURE)
                i915_gem_object_ggtt_unpin(batch_obj);
 err:
        /* the request owns the ref now */
index 36a3c63..c928268 100644 (file)
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
+#include "i915_vgpu.h"
+#include "i915_trace.h"
 #include "intel_drv.h"
 
+#include <linux/bitmap.h>
 #include <linux/highmem.h>
 
 /**
@@ -67,8 +70,9 @@
  * i915_ggtt_view_type and struct i915_ggtt_view.
  *
  * A new flavour of core GEM functions which work with GGTT bound objects were
- * added with the _view suffix. They take the struct i915_ggtt_view parameter
- * encapsulating all metadata required to implement a view.
+ * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
+ * renaming  in large amounts of code. They take the struct i915_ggtt_view
+ * parameter encapsulating all metadata required to implement a view.
  *
  * As a helper for callers which are only interested in the normal view,
  * globally const i915_ggtt_view_normal singleton instance exists. All old core
@@ -92,6 +96,9 @@
  */
 
 const struct i915_ggtt_view i915_ggtt_view_normal;
+const struct i915_ggtt_view i915_ggtt_view_rotated = {
+        .type = I915_GGTT_VIEW_ROTATED
+};
 
 static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
 static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
@@ -104,6 +111,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
        has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
        has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
 
+       if (intel_vgpu_active(dev))
+               has_full_ppgtt = false; /* emulation is too hard */
+
        /*
         * We don't allow disabling PPGTT for gen9+ as it's a requirement for
         * execlists, the sole mechanism available to submit work.
@@ -144,11 +154,11 @@ static void ppgtt_bind_vma(struct i915_vma *vma,
                           u32 flags);
 static void ppgtt_unbind_vma(struct i915_vma *vma);
 
-static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
-                                            enum i915_cache_level level,
-                                            bool valid)
+static inline gen8_pte_t gen8_pte_encode(dma_addr_t addr,
+                                        enum i915_cache_level level,
+                                        bool valid)
 {
-       gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
+       gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
        pte |= addr;
 
        switch (level) {
@@ -166,11 +176,11 @@ static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
        return pte;
 }
 
-static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
-                                            dma_addr_t addr,
-                                            enum i915_cache_level level)
+static inline gen8_pde_t gen8_pde_encode(struct drm_device *dev,
+                                         dma_addr_t addr,
+                                         enum i915_cache_level level)
 {
-       gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
+       gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
        pde |= addr;
        if (level != I915_CACHE_NONE)
                pde |= PPAT_CACHED_PDE_INDEX;
@@ -179,11 +189,11 @@ static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
        return pde;
 }
 
-static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
-                                    enum i915_cache_level level,
-                                    bool valid, u32 unused)
+static gen6_pte_t snb_pte_encode(dma_addr_t addr,
+                                enum i915_cache_level level,
+                                bool valid, u32 unused)
 {
-       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+       gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
        switch (level) {
@@ -201,11 +211,11 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
        return pte;
 }
 
-static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
-                                    enum i915_cache_level level,
-                                    bool valid, u32 unused)
+static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
+                                enum i915_cache_level level,
+                                bool valid, u32 unused)
 {
-       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+       gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
        switch (level) {
@@ -225,11 +235,11 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
        return pte;
 }
 
-static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
-                                    enum i915_cache_level level,
-                                    bool valid, u32 flags)
+static gen6_pte_t byt_pte_encode(dma_addr_t addr,
+                                enum i915_cache_level level,
+                                bool valid, u32 flags)
 {
-       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+       gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
        if (!(flags & PTE_READ_ONLY))
@@ -241,11 +251,11 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
        return pte;
 }
 
-static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
-                                    enum i915_cache_level level,
-                                    bool valid, u32 unused)
+static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
+                                enum i915_cache_level level,
+                                bool valid, u32 unused)
 {
-       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+       gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= HSW_PTE_ADDR_ENCODE(addr);
 
        if (level != I915_CACHE_NONE)
@@ -254,11 +264,11 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
        return pte;
 }
 
-static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
-                                     enum i915_cache_level level,
-                                     bool valid, u32 unused)
+static gen6_pte_t iris_pte_encode(dma_addr_t addr,
+                                 enum i915_cache_level level,
+                                 bool valid, u32 unused)
 {
-       gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
+       gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
        pte |= HSW_PTE_ADDR_ENCODE(addr);
 
        switch (level) {
@@ -275,6 +285,164 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
        return pte;
 }
 
+#define i915_dma_unmap_single(px, dev) \
+       __i915_dma_unmap_single((px)->daddr, dev)
+
+static inline void __i915_dma_unmap_single(dma_addr_t daddr,
+                                       struct drm_device *dev)
+{
+#if 0
+       struct device *device = &dev->pdev->dev;
+
+       dma_unmap_page(device, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+#endif
+}
+
+/**
+ * i915_dma_map_single() - Create a dma mapping for a page table/dir/etc.
+ * @px:        Page table/dir/etc to get a DMA map for
+ * @dev:       drm device
+ *
+ * Page table allocations are unified across all gens. They always require a
+ * single 4k allocation, as well as a DMA mapping. If we keep the structs
+ * symmetric here, the simple macro covers us for every page table type.
+ *
+ * Return: 0 if success.
+ */
+#define i915_dma_map_single(px, dev) \
+       i915_dma_map_page_single((px)->page, (dev), &(px)->daddr)
+
+static inline int i915_dma_map_page_single(struct vm_page *page,
+                                          struct drm_device *dev,
+                                          dma_addr_t *daddr)
+{
+       struct device *device = dev->pdev->dev;
+
+       *daddr = dma_map_page(device, page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(device, *daddr))
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void unmap_and_free_pt(struct i915_page_table_entry *pt,
+                              struct drm_device *dev)
+{
+       if (WARN_ON(!pt->page))
+               return;
+
+       i915_dma_unmap_single(pt, dev);
+       __free_page(pt->page);
+       kfree(pt->used_ptes);
+       kfree(pt);
+}
+
+static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev)
+{
+       struct i915_page_table_entry *pt;
+       const size_t count = INTEL_INFO(dev)->gen >= 8 ?
+               GEN8_PTES : GEN6_PTES;
+       int ret = -ENOMEM;
+
+       pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+       if (!pt)
+               return ERR_PTR(-ENOMEM);
+
+       pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
+                               GFP_KERNEL);
+
+       if (!pt->used_ptes)
+               goto fail_bitmap;
+
+       pt->page = alloc_page(GFP_KERNEL);
+       if (!pt->page)
+               goto fail_page;
+
+       ret = i915_dma_map_single(pt, dev);
+       if (ret)
+               goto fail_dma;
+
+       return pt;
+
+fail_dma:
+       __free_page(pt->page);
+fail_page:
+       kfree(pt->used_ptes);
+fail_bitmap:
+       kfree(pt);
+
+       return ERR_PTR(ret);
+}
+
+/**
+ * alloc_pt_range() - Allocate a multiple page tables
+ * @pd:                The page directory which will have at least @count entries
+ *             available to point to the allocated page tables.
+ * @pde:       First page directory entry for which we are allocating.
+ * @count:     Number of pages to allocate.
+ * @dev:       DRM device.
+ *
+ * Allocates multiple page table pages and sets the appropriate entries in the
+ * page table structure within the page directory. Function cleans up after
+ * itself on any failures.
+ *
+ * Return: 0 if allocation succeeded.
+ */
+static int alloc_pt_range(struct i915_page_directory_entry *pd, uint16_t pde, size_t count,
+                         struct drm_device *dev)
+{
+       int i, ret;
+
+       /* 512 is the max page tables per page_directory on any platform. */
+       if (WARN_ON(pde + count > I915_PDES))
+               return -EINVAL;
+
+       for (i = pde; i < pde + count; i++) {
+               struct i915_page_table_entry *pt = alloc_pt_single(dev);
+
+               if (IS_ERR(pt)) {
+                       ret = PTR_ERR(pt);
+                       goto err_out;
+               }
+               WARN(pd->page_table[i],
+                    "Leaking page directory entry %d (%p)\n",
+                    i, pd->page_table[i]);
+               pd->page_table[i] = pt;
+       }
+
+       return 0;
+
+err_out:
+       while (i-- > pde)
+               unmap_and_free_pt(pd->page_table[i], dev);
+       return ret;
+}
+
+static void unmap_and_free_pd(struct i915_page_directory_entry *pd)
+{
+       if (pd->page) {
+               __free_page(pd->page);
+               kfree(pd);
+       }
+}
+
+static struct i915_page_directory_entry *alloc_pd_single(void)
+{
+       struct i915_page_directory_entry *pd;
+
+       pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+       if (!pd)
+               return ERR_PTR(-ENOMEM);
+
+       pd->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+       if (!pd->page) {
+               kfree(pd);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       return pd;
+}
+
 /* Broadwell Page Directory Pointer Descriptors */
 static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
                           uint64_t val)
@@ -304,10 +472,10 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
        int i, ret;
 
        /* bit of a hack to find the actual last used pd */
-       int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
+       int used_pd = ppgtt->num_pd_entries / I915_PDES;
 
        for (i = used_pd - 1; i >= 0; i--) {
-               dma_addr_t addr = ppgtt->pd_dma_addr[i];
+               dma_addr_t addr = ppgtt->pdp.page_directory[i]->daddr;
                ret = gen8_write_pdp(ring, i, addr);
                if (ret)
                        return ret;
@@ -323,7 +491,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
-       gen8_gtt_pte_t *pt_vaddr, scratch_pte;
+       gen8_pte_t *pt_vaddr, scratch_pte;
        unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
        unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
        unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
@@ -334,11 +502,28 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
                                      I915_CACHE_LLC, use_scratch);
 
        while (num_entries) {
-               struct vm_page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
+               struct i915_page_directory_entry *pd;
+               struct i915_page_table_entry *pt;
+               struct vm_page *page_table;
+
+               if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
+                       continue;
+
+               pd = ppgtt->pdp.page_directory[pdpe];
+
+               if (WARN_ON(!pd->page_table[pde]))
+                       continue;
+
+               pt = pd->page_table[pde];
+
+               if (WARN_ON(!pt->page))
+                       continue;
+
+               page_table = pt->page;
 
                last_pte = pte + num_entries;
-               if (last_pte > GEN8_PTES_PER_PAGE)
-                       last_pte = GEN8_PTES_PER_PAGE;
+               if (last_pte > GEN8_PTES)
+                       last_pte = GEN8_PTES;
 
                pt_vaddr = kmap_atomic(page_table);
 
@@ -352,7 +537,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
                kunmap_atomic(pt_vaddr);
 
                pte = 0;
-               if (++pde == GEN8_PDES_PER_PAGE) {
+               if (++pde == I915_PDES) {
                        pdpe++;
                        pde = 0;
                }
@@ -367,7 +552,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
 {
        struct i915_hw_ppgtt *ppgtt =
                container_of(vm, struct i915_hw_ppgtt, base);
-       gen8_gtt_pte_t *pt_vaddr;
+       gen8_pte_t *pt_vaddr;
        unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
        unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
        unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
@@ -376,21 +561,26 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
        pt_vaddr = NULL;
 
        for (i=0;i<num_entries;i++) {
-               if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
+               if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES))
                        break;
 
-               if (pt_vaddr == NULL)
-                       pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
+               if (pt_vaddr == NULL) {
+                       struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[pdpe];
+                       struct i915_page_table_entry *pt = pd->page_table[pde];
+                       struct vm_page *page_table = pt->page;
+
+                       pt_vaddr = kmap_atomic(page_table);
+               }
 
                pt_vaddr[pte] =
                        gen8_pte_encode(VM_PAGE_TO_PHYS(pages[i]),
                                        cache_level, true);
-               if (++pte == GEN8_PTES_PER_PAGE) {
+               if (++pte == GEN8_PTES) {
                        if (!HAS_LLC(ppgtt->base.dev))
                                drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
                        kunmap_atomic(pt_vaddr);
                        pt_vaddr = NULL;
-                       if (++pde == GEN8_PDES_PER_PAGE) {
+                       if (++pde == I915_PDES) {
                                pdpe++;
                                pde = 0;
                        }
@@ -404,29 +594,33 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
        }
 }
 
-static void gen8_free_page_tables(struct vm_page **pt_pages)
+static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct drm_device *dev)
 {
        int i;
 
-       if (pt_pages == NULL)
+       if (!pd->page)
                return;
 
-       for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
-               if (pt_pages[i])
-                       __free_pages(pt_pages[i], 0);
+       for (i = 0; i < I915_PDES; i++) {
+               if (WARN_ON(!pd->page_table[i]))
+                       continue;
+
+               unmap_and_free_pt(pd->page_table[i], dev);
+               pd->page_table[i] = NULL;
+       }
 }
 
-static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt)
+static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
 {
        int i;
 
        for (i = 0; i < ppgtt->num_pd_pages; i++) {
-               gen8_free_page_tables(ppgtt->gen8_pt_pages[i]);
-               kfree(ppgtt->gen8_pt_pages[i]);
-               kfree(ppgtt->gen8_pt_dma_addr[i]);
-       }
+               if (WARN_ON(!ppgtt->pdp.page_directory[i]))
+                       continue;
 
-       __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
+               gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
+               unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
+       }
 }
 
 static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
@@ -437,14 +631,23 @@ static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
        for (i = 0; i < ppgtt->num_pd_pages; i++) {
                /* TODO: In the future we'll support sparse mappings, so this
                 * will have to change. */
-               if (!ppgtt->pd_dma_addr[i])
+               if (!ppgtt->pdp.page_directory[i]->daddr)
                        continue;
 
-               pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE,
+               pci_unmap_page(hwdev, ppgtt->pdp.page_directory[i]->daddr, PAGE_SIZE,
                               PCI_DMA_BIDIRECTIONAL);
 
-               for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
-                       dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
+               for (j = 0; j < I915_PDES; j++) {
+                       struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
+                       struct i915_page_table_entry *pt;
+                       dma_addr_t addr;
+
+                       if (WARN_ON(!pd->page_table[j]))
+                               continue;
+
+                       pt = pd->page_table[j];
+                       addr = pt->daddr;
+
                        if (addr)
                                pci_unmap_page(hwdev, addr, PAGE_SIZE,
                                               PCI_DMA_BIDIRECTIONAL);
@@ -461,86 +664,47 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
        gen8_ppgtt_free(ppgtt);
 }
 
-static struct vm_page **__gen8_alloc_page_tables(void)
+static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
 {
-       struct vm_page **pt_pages;
-       int i;
-
-       pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct vm_page *), GFP_KERNEL);
-   &nbs