From 477eb7f9b36d138314225b3b077de1c692e5e1a8 Mon Sep 17 00:00:00 2001 From: =?utf8?q?Fran=C3=A7ois=20Tigeot?= Date: Sat, 23 Jan 2016 18:26:28 +0100 Subject: [PATCH] drm/i915: Update to Linux 4.1 * Valleyview support has been vastly improved and is no longer considered preliminary * Skylake support improvements: runtime power management, turbo and sleep states should now be fully operational. Many workarounds have been added for Skylake specific issues * Preliminary changes to prepare for Broxton (future Atom SOCs) support * Distinguish hardware minimum and user minimum frequencies. Set the GPU frequency to the hardware minimum on idle in order to reduce power usage * DRRS (dynamic refresh rate switching) is now enabled where supported. The idea is to reduce the refresh rate of the panel to save power when nothing changes on the screen * DP deadlock bugfixes and improved link rate computation. Intermediate link rate support for eDP 1.4 * XenGT client-side support. This is paravirtualization to allow virtual machines to tap into the render engines * Plenty of internal work to prepare for atomic mode setting * Lots of other smaller work all over such as added documentation, dead UMS code removal, vblank interrupt cleanings, etc... --- sys/conf/files | 4 +- sys/dev/drm/drm_atomic.c | 205 +-- sys/dev/drm/drm_atomic_helper.c | 660 +++---- sys/dev/drm/drm_cache.c | 3 +- sys/dev/drm/drm_crtc.c | 114 +- sys/dev/drm/drm_crtc_helper.c | 34 +- sys/dev/drm/drm_dp_helper.c | 395 +++- sys/dev/drm/drm_dp_mst_topology.c | 13 + sys/dev/drm/drm_fb_helper.c | 62 +- sys/dev/drm/drm_irq.c | 31 + sys/dev/drm/drm_modes.c | 12 +- sys/dev/drm/drm_pci.c | 3 +- sys/dev/drm/drm_plane_helper.c | 77 +- sys/dev/drm/drm_probe_helper.c | 2 +- sys/dev/drm/i915/Makefile | 9 +- sys/dev/drm/i915/i915_cmd_parser.c | 72 +- sys/dev/drm/i915/i915_dma.c | 261 ++- sys/dev/drm/i915/i915_drv.c | 201 +- sys/dev/drm/i915/i915_drv.h | 433 ++--- sys/dev/drm/i915/i915_gem.c | 552 ++---- sys/dev/drm/i915/i915_gem_context.c | 115 +- sys/dev/drm/i915/i915_gem_evict.c | 4 + sys/dev/drm/i915/i915_gem_execbuffer.c | 195 +- sys/dev/drm/i915/i915_gem_gtt.c | 1081 +++++++---- sys/dev/drm/i915/i915_gem_gtt.h | 162 +- sys/dev/drm/i915/i915_gem_shrinker.c | 338 ++++ sys/dev/drm/i915/i915_gem_stolen.c | 8 +- sys/dev/drm/i915/i915_irq.c | 300 +-- sys/dev/drm/i915/i915_params.c | 20 +- sys/dev/drm/i915/i915_reg.h | 202 ++- sys/dev/drm/i915/i915_suspend.c | 215 +-- sys/dev/drm/i915/i915_sysfs.c | 682 +++++++ sys/dev/drm/i915/i915_trace.h | 5 + sys/dev/drm/i915/i915_ums.c | 552 ------ sys/dev/drm/i915/i915_vgpu.c | 264 +++ sys/dev/drm/i915/i915_vgpu.h | 91 + sys/dev/drm/i915/intel_atomic.c | 16 +- sys/dev/drm/i915/intel_atomic_plane.c | 24 +- sys/dev/drm/i915/intel_bios.c | 7 + sys/dev/drm/i915/intel_bios.h | 1 + sys/dev/drm/i915/intel_crt.c | 11 +- sys/dev/drm/i915/intel_ddi.c | 111 +- sys/dev/drm/i915/intel_display.c | 1818 ++++++++++++------- sys/dev/drm/i915/intel_dp.c | 580 ++++-- sys/dev/drm/i915/intel_dp_mst.c | 38 +- sys/dev/drm/i915/intel_drv.h | 129 +- sys/dev/drm/i915/intel_dsi.c | 5 +- sys/dev/drm/i915/intel_dsi_cmd.h | 39 - sys/dev/drm/i915/intel_dvo.c | 3 +- sys/dev/drm/i915/intel_fbc.c | 184 +- sys/dev/drm/i915/intel_fbdev.c | 32 +- sys/dev/drm/i915/intel_frontbuffer.c | 21 +- sys/dev/drm/i915/intel_hdmi.c | 29 +- sys/dev/drm/i915/intel_i2c.c | 87 +- sys/dev/drm/i915/intel_lrc.c | 249 ++- sys/dev/drm/i915/intel_lrc.h | 12 +- sys/dev/drm/i915/intel_lvds.c | 37 +- sys/dev/drm/i915/intel_opregion.c | 6 +- sys/dev/drm/i915/intel_overlay.c | 5 +- sys/dev/drm/i915/intel_panel.c | 1 + sys/dev/drm/i915/intel_pm.c | 1280 +++++++------ sys/dev/drm/i915/intel_psr.c | 2 - sys/dev/drm/i915/intel_ringbuffer.c | 351 ++-- sys/dev/drm/i915/intel_ringbuffer.h | 13 +- sys/dev/drm/i915/intel_runtime_pm.c | 268 ++- sys/dev/drm/i915/intel_sdvo.c | 27 +- sys/dev/drm/i915/intel_sprite.c | 476 ++--- sys/dev/drm/i915/intel_tv.c | 5 +- sys/dev/drm/i915/intel_uncore.c | 77 +- sys/dev/drm/include/drm/drmP.h | 124 +- sys/dev/drm/include/drm/drm_atomic.h | 24 + sys/dev/drm/include/drm/drm_atomic_helper.h | 20 +- sys/dev/drm/include/drm/drm_crtc.h | 13 +- sys/dev/drm/include/drm/drm_crtc_helper.h | 3 +- sys/dev/drm/include/drm/drm_dp_helper.h | 176 +- sys/dev/drm/include/drm/drm_dp_mst_helper.h | 2 + sys/dev/drm/include/drm/drm_edid.h | 2 + sys/dev/drm/include/drm/drm_fb_helper.h | 19 + sys/dev/drm/include/drm/drm_modes.h | 2 +- sys/dev/drm/include/drm/drm_panel.h | 5 + sys/dev/drm/include/drm/drm_plane_helper.h | 10 +- sys/dev/drm/include/drm/i915_pciids.h | 99 +- sys/dev/drm/include/linux/kernel.h | 5 +- sys/dev/drm/include/uapi_drm/drm.h | 2 + sys/dev/drm/include/uapi_drm/drm_fourcc.h | 78 + sys/dev/drm/include/uapi_drm/drm_mode.h | 9 + sys/dev/drm/include/uapi_drm/i915_drm.h | 3 + 87 files changed, 8839 insertions(+), 5078 deletions(-) create mode 100644 sys/dev/drm/i915/i915_gem_shrinker.c create mode 100644 sys/dev/drm/i915/i915_sysfs.c delete mode 100644 sys/dev/drm/i915/i915_ums.c create mode 100644 sys/dev/drm/i915/i915_vgpu.c create mode 100644 sys/dev/drm/i915/i915_vgpu.h diff --git a/sys/conf/files b/sys/conf/files index d937c8ed9e..33b6a4414f 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -2045,12 +2045,14 @@ dev/drm/i915/i915_gem_evict.c optional i915 drm dev/drm/i915/i915_gem_gtt.c optional i915 drm dev/drm/i915/i915_gem_stolen.c optional i915 drm dev/drm/i915/i915_gem_render_state.c optional i915 drm +dev/drm/i915/i915_gem_shrinker.c optional i915 drm dev/drm/i915/i915_gem_tiling.c optional i915 drm dev/drm/i915/i915_gem_userptr.c optional i915 drm dev/drm/i915/i915_irq.c optional i915 drm dev/drm/i915/i915_params.c optional i915 drm dev/drm/i915/i915_suspend.c optional i915 drm -dev/drm/i915/i915_ums.c optional i915 drm +dev/drm/i915/i915_sysfs.c optional i915 drm +dev/drm/i915/i915_vgpu.c optional i915 drm dev/drm/i915/intel_acpi.c optional i915 drm dev/drm/i915/intel_atomic.c optional i915 drm dev/drm/i915/intel_atomic_plane.c optional i915 drm diff --git a/sys/dev/drm/drm_atomic.c b/sys/dev/drm/drm_atomic.c index aba362e9a9..387bafd883 100644 --- a/sys/dev/drm/drm_atomic.c +++ b/sys/dev/drm/drm_atomic.c @@ -94,7 +94,7 @@ drm_atomic_state_alloc(struct drm_device *dev) state->dev = dev; - DRM_DEBUG_KMS("Allocate atomic state %p\n", state); + DRM_DEBUG_ATOMIC("Allocate atomic state %p\n", state); return state; fail: @@ -124,7 +124,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state) struct drm_mode_config *config = &dev->mode_config; int i; - DRM_DEBUG_KMS("Clearing atomic state %p\n", state); + DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state); for (i = 0; i < state->num_connector; i++) { struct drm_connector *connector = state->connectors[i]; @@ -136,6 +136,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state) connector->funcs->atomic_destroy_state(connector, state->connector_states[i]); + state->connectors[i] = NULL; state->connector_states[i] = NULL; } @@ -147,6 +148,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state) crtc->funcs->atomic_destroy_state(crtc, state->crtc_states[i]); + state->crtcs[i] = NULL; state->crtc_states[i] = NULL; } @@ -158,6 +160,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state) plane->funcs->atomic_destroy_state(plane, state->plane_states[i]); + state->planes[i] = NULL; state->plane_states[i] = NULL; } } @@ -172,9 +175,12 @@ EXPORT_SYMBOL(drm_atomic_state_clear); */ void drm_atomic_state_free(struct drm_atomic_state *state) { + if (!state) + return; + drm_atomic_state_clear(state); - DRM_DEBUG_KMS("Freeing atomic state %p\n", state); + DRM_DEBUG_ATOMIC("Freeing atomic state %p\n", state); kfree_state(state); } @@ -219,8 +225,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state, state->crtcs[index] = crtc; crtc_state->state = state; - DRM_DEBUG_KMS("Added [CRTC:%d] %p state to %p\n", - crtc->base.id, crtc_state, state); + DRM_DEBUG_ATOMIC("Added [CRTC:%d] %p state to %p\n", + crtc->base.id, crtc_state, state); return crtc_state; } @@ -250,11 +256,14 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc, struct drm_mode_config *config = &dev->mode_config; /* FIXME: Mode prop is missing, which also controls ->enable. */ - if (property == config->prop_active) { + if (property == config->prop_active) state->active = val; - } else if (crtc->funcs->atomic_set_property) + else if (crtc->funcs->atomic_set_property) return crtc->funcs->atomic_set_property(crtc, state, property, val); - return -EINVAL; + else + return -EINVAL; + + return 0; } EXPORT_SYMBOL(drm_atomic_crtc_set_property); @@ -268,9 +277,17 @@ static int drm_atomic_crtc_get_property(struct drm_crtc *crtc, const struct drm_crtc_state *state, struct drm_property *property, uint64_t *val) { - if (crtc->funcs->atomic_get_property) + struct drm_device *dev = crtc->dev; + struct drm_mode_config *config = &dev->mode_config; + + if (property == config->prop_active) + *val = state->active; + else if (crtc->funcs->atomic_get_property) return crtc->funcs->atomic_get_property(crtc, state, property, val); - return -EINVAL; + else + return -EINVAL; + + return 0; } /** @@ -295,8 +312,8 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc, */ if (state->active && !state->enable) { - DRM_DEBUG_KMS("[CRTC:%d] active without enabled\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d] active without enabled\n", + crtc->base.id); return -EINVAL; } @@ -342,8 +359,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state, state->planes[index] = plane; plane_state->state = state; - DRM_DEBUG_KMS("Added [PLANE:%d] %p state to %p\n", - plane->base.id, plane_state, state); + DRM_DEBUG_ATOMIC("Added [PLANE:%d] %p state to %p\n", + plane->base.id, plane_state, state); if (plane_state->crtc) { struct drm_crtc_state *crtc_state; @@ -452,6 +469,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane, *val = state->src_w; } else if (property == config->prop_src_h) { *val = state->src_h; + } else if (property == config->rotation_property) { + *val = state->rotation; } else if (plane->funcs->atomic_get_property) { return plane->funcs->atomic_get_property(plane, state, property, val); } else { @@ -475,14 +494,14 @@ static int drm_atomic_plane_check(struct drm_plane *plane, struct drm_plane_state *state) { unsigned int fb_width, fb_height; - unsigned int i; + int ret; /* either *both* CRTC and FB must be set, or neither */ if (WARN_ON(state->crtc && !state->fb)) { - DRM_DEBUG_KMS("CRTC set but no FB\n"); + DRM_DEBUG_ATOMIC("CRTC set but no FB\n"); return -EINVAL; } else if (WARN_ON(state->fb && !state->crtc)) { - DRM_DEBUG_KMS("FB set but no CRTC\n"); + DRM_DEBUG_ATOMIC("FB set but no CRTC\n"); return -EINVAL; } @@ -492,18 +511,16 @@ static int drm_atomic_plane_check(struct drm_plane *plane, /* Check whether this plane is usable on this CRTC */ if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) { - DRM_DEBUG_KMS("Invalid crtc for plane\n"); + DRM_DEBUG_ATOMIC("Invalid crtc for plane\n"); return -EINVAL; } /* Check whether this plane supports the fb pixel format. */ - for (i = 0; i < plane->format_count; i++) - if (state->fb->pixel_format == plane->format_types[i]) - break; - if (i == plane->format_count) { - DRM_DEBUG_KMS("Invalid pixel format %s\n", - drm_get_format_name(state->fb->pixel_format)); - return -EINVAL; + ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format); + if (ret) { + DRM_DEBUG_ATOMIC("Invalid pixel format %s\n", + drm_get_format_name(state->fb->pixel_format)); + return ret; } /* Give drivers some help against integer overflows */ @@ -511,9 +528,9 @@ static int drm_atomic_plane_check(struct drm_plane *plane, state->crtc_x > INT_MAX - (int32_t) state->crtc_w || state->crtc_h > INT_MAX || state->crtc_y > INT_MAX - (int32_t) state->crtc_h) { - DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n", - state->crtc_w, state->crtc_h, - state->crtc_x, state->crtc_y); + DRM_DEBUG_ATOMIC("Invalid CRTC coordinates %ux%u+%d+%d\n", + state->crtc_w, state->crtc_h, + state->crtc_x, state->crtc_y); return -ERANGE; } @@ -525,12 +542,12 @@ static int drm_atomic_plane_check(struct drm_plane *plane, state->src_x > fb_width - state->src_w || state->src_h > fb_height || state->src_y > fb_height - state->src_h) { - DRM_DEBUG_KMS("Invalid source coordinates " - "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n", - state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10, - state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10, - state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10, - state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10); + DRM_DEBUG_ATOMIC("Invalid source coordinates " + "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n", + state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10, + state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10, + state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10, + state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10); return -ENOSPC; } @@ -577,7 +594,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, * at most the array is a bit too large. */ if (index >= state->num_connector) { - DRM_DEBUG_KMS("Hot-added connector would overflow state array, restarting\n"); + DRM_DEBUG_ATOMIC("Hot-added connector would overflow state array, restarting\n"); return ERR_PTR(-EAGAIN); } @@ -592,8 +609,8 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, state->connectors[index] = connector; connector_state->state = state; - DRM_DEBUG_KMS("Added [CONNECTOR:%d] %p state to %p\n", - connector->base.id, connector_state, state); + DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n", + connector->base.id, connector_state, state); if (connector_state->crtc) { struct drm_crtc_state *crtc_state; @@ -754,17 +771,18 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, } if (crtc) - DRM_DEBUG_KMS("Link plane state %p to [CRTC:%d]\n", - plane_state, crtc->base.id); + DRM_DEBUG_ATOMIC("Link plane state %p to [CRTC:%d]\n", + plane_state, crtc->base.id); else - DRM_DEBUG_KMS("Link plane state %p to [NOCRTC]\n", plane_state); + DRM_DEBUG_ATOMIC("Link plane state %p to [NOCRTC]\n", + plane_state); return 0; } EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane); /** - * drm_atomic_set_fb_for_plane - set crtc for plane + * drm_atomic_set_fb_for_plane - set framebuffer for plane * @plane_state: atomic state object for the plane * @fb: fb to use for the plane * @@ -784,10 +802,11 @@ drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, plane_state->fb = fb; if (fb) - DRM_DEBUG_KMS("Set [FB:%d] for plane state %p\n", - fb->base.id, plane_state); + DRM_DEBUG_ATOMIC("Set [FB:%d] for plane state %p\n", + fb->base.id, plane_state); else - DRM_DEBUG_KMS("Set [NOFB] for plane state %p\n", plane_state); + DRM_DEBUG_ATOMIC("Set [NOFB] for plane state %p\n", + plane_state); } EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); @@ -820,11 +839,11 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, conn_state->crtc = crtc; if (crtc) - DRM_DEBUG_KMS("Link connector state %p to [CRTC:%d]\n", - conn_state, crtc->base.id); + DRM_DEBUG_ATOMIC("Link connector state %p to [CRTC:%d]\n", + conn_state, crtc->base.id); else - DRM_DEBUG_KMS("Link connector state %p to [NOCRTC]\n", - conn_state); + DRM_DEBUG_ATOMIC("Link connector state %p to [NOCRTC]\n", + conn_state); return 0; } @@ -860,8 +879,8 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state, if (ret) return ret; - DRM_DEBUG_KMS("Adding all current connectors for [CRTC:%d] to %p\n", - crtc->base.id, state); + DRM_DEBUG_ATOMIC("Adding all current connectors for [CRTC:%d] to %p\n", + crtc->base.id, state); /* * Changed connectors are already in @state, so only need to look at the @@ -892,19 +911,18 @@ int drm_atomic_connectors_for_crtc(struct drm_atomic_state *state, struct drm_crtc *crtc) { - int i, num_connected_connectors = 0; - - for (i = 0; i < state->num_connector; i++) { - struct drm_connector_state *conn_state; + struct drm_connector *connector; + struct drm_connector_state *conn_state; - conn_state = state->connector_states[i]; + int i, num_connected_connectors = 0; - if (conn_state && conn_state->crtc == crtc) + for_each_connector_in_state(state, connector, conn_state, i) { + if (conn_state->crtc == crtc) num_connected_connectors++; } - DRM_DEBUG_KMS("State %p has %i connectors for [CRTC:%d]\n", - state, num_connected_connectors, crtc->base.id); + DRM_DEBUG_ATOMIC("State %p has %i connectors for [CRTC:%d]\n", + state, num_connected_connectors, crtc->base.id); return num_connected_connectors; } @@ -916,7 +934,7 @@ EXPORT_SYMBOL(drm_atomic_connectors_for_crtc); * * This function should be used by legacy entry points which don't understand * -EDEADLK semantics. For simplicity this one will grab all modeset locks after - * the slowpath completed. + * the slowpath completed. */ void drm_atomic_legacy_backoff(struct drm_atomic_state *state) { @@ -951,36 +969,28 @@ int drm_atomic_check_only(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct drm_mode_config *config = &dev->mode_config; - int nplanes = config->num_total_plane; - int ncrtcs = config->num_crtc; + struct drm_plane *plane; + struct drm_plane_state *plane_state; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; int i, ret = 0; - DRM_DEBUG_KMS("checking %p\n", state); - - for (i = 0; i < nplanes; i++) { - struct drm_plane *plane = state->planes[i]; + DRM_DEBUG_ATOMIC("checking %p\n", state); - if (!plane) - continue; - - ret = drm_atomic_plane_check(plane, state->plane_states[i]); + for_each_plane_in_state(state, plane, plane_state, i) { + ret = drm_atomic_plane_check(plane, plane_state); if (ret) { - DRM_DEBUG_KMS("[PLANE:%d] atomic core check failed\n", - plane->base.id); + DRM_DEBUG_ATOMIC("[PLANE:%d] atomic core check failed\n", + plane->base.id); return ret; } } - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc *crtc = state->crtcs[i]; - - if (!crtc) - continue; - - ret = drm_atomic_crtc_check(crtc, state->crtc_states[i]); + for_each_crtc_in_state(state, crtc, crtc_state, i) { + ret = drm_atomic_crtc_check(crtc, crtc_state); if (ret) { - DRM_DEBUG_KMS("[CRTC:%d] atomic core check failed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d] atomic core check failed\n", + crtc->base.id); return ret; } } @@ -989,17 +999,11 @@ int drm_atomic_check_only(struct drm_atomic_state *state) ret = config->funcs->atomic_check(state->dev, state); if (!state->allow_modeset) { - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc *crtc = state->crtcs[i]; - struct drm_crtc_state *crtc_state = state->crtc_states[i]; - - if (!crtc) - continue; - + for_each_crtc_in_state(state, crtc, crtc_state, i) { if (crtc_state->mode_changed || crtc_state->active_changed) { - DRM_DEBUG_KMS("[CRTC:%d] requires full modeset\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d] requires full modeset\n", + crtc->base.id); return -EINVAL; } } @@ -1034,7 +1038,7 @@ int drm_atomic_commit(struct drm_atomic_state *state) if (ret) return ret; - DRM_DEBUG_KMS("commiting %p\n", state); + DRM_DEBUG_ATOMIC("commiting %p\n", state); return config->funcs->atomic_commit(state->dev, state, false); } @@ -1065,7 +1069,7 @@ int drm_atomic_async_commit(struct drm_atomic_state *state) if (ret) return ret; - DRM_DEBUG_KMS("commiting %p asynchronously\n", state); + DRM_DEBUG_ATOMIC("commiting %p asynchronously\n", state); return config->funcs->atomic_commit(state->dev, state, true); } @@ -1191,6 +1195,8 @@ int drm_mode_atomic_ioctl(struct drm_device *dev, struct drm_atomic_state *state; struct drm_modeset_acquire_ctx ctx; struct drm_plane *plane; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; unsigned plane_mask = 0; int ret = 0; unsigned int i, j; @@ -1294,15 +1300,9 @@ retry: } if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { - int ncrtcs = dev->mode_config.num_crtc; - - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc_state *crtc_state = state->crtc_states[i]; + for_each_crtc_in_state(state, crtc, crtc_state, i) { struct drm_pending_vblank_event *e; - if (!crtc_state) - continue; - e = create_vblank_event(dev, file_priv, arg->user_data); if (!e) { ret = -ENOMEM; @@ -1354,14 +1354,7 @@ fail: goto backoff; if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { - int ncrtcs = dev->mode_config.num_crtc; - - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc_state *crtc_state = state->crtc_states[i]; - - if (!crtc_state) - continue; - + for_each_crtc_in_state(state, crtc, crtc_state, i) { destroy_vblank_event(dev, file_priv, crtc_state->event); crtc_state->event = NULL; } diff --git a/sys/dev/drm/drm_atomic_helper.c b/sys/dev/drm/drm_atomic_helper.c index 52b104fe29..7646c54762 100644 --- a/sys/dev/drm/drm_atomic_helper.c +++ b/sys/dev/drm/drm_atomic_helper.c @@ -116,9 +116,9 @@ steal_encoder(struct drm_atomic_state *state, */ WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); - DRM_DEBUG_KMS("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n", - encoder->base.id, encoder->name, - encoder_crtc->base.id); + DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n", + encoder->base.id, encoder->name, + encoder_crtc->base.id); crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc); if (IS_ERR(crtc_state)) @@ -130,9 +130,9 @@ steal_encoder(struct drm_atomic_state *state, if (connector->state->best_encoder != encoder) continue; - DRM_DEBUG_KMS("Stealing encoder from [CONNECTOR:%d:%s]\n", - connector->base.id, - connector->name); + DRM_DEBUG_ATOMIC("Stealing encoder from [CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); connector_state = drm_atomic_get_connector_state(state, connector); @@ -151,7 +151,7 @@ steal_encoder(struct drm_atomic_state *state, static int update_connector_routing(struct drm_atomic_state *state, int conn_idx) { - struct drm_connector_helper_funcs *funcs; + const struct drm_connector_helper_funcs *funcs; struct drm_encoder *new_encoder; struct drm_crtc *encoder_crtc; struct drm_connector *connector; @@ -165,9 +165,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) if (!connector) return 0; - DRM_DEBUG_KMS("Updating routing for [CONNECTOR:%d:%s]\n", - connector->base.id, - connector->name); + DRM_DEBUG_ATOMIC("Updating routing for [CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); if (connector->state->crtc != connector_state->crtc) { if (connector->state->crtc) { @@ -186,7 +186,7 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) } if (!connector_state->crtc) { - DRM_DEBUG_KMS("Disabling [CONNECTOR:%d:%s]\n", + DRM_DEBUG_ATOMIC("Disabling [CONNECTOR:%d:%s]\n", connector->base.id, connector->name); @@ -199,19 +199,19 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) new_encoder = funcs->best_encoder(connector); if (!new_encoder) { - DRM_DEBUG_KMS("No suitable encoder found for [CONNECTOR:%d:%s]\n", - connector->base.id, - connector->name); + DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); return -EINVAL; } if (new_encoder == connector_state->best_encoder) { - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", - connector->base.id, - connector->name, - new_encoder->base.id, - new_encoder->name, - connector_state->crtc->base.id); + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n", + connector->base.id, + connector->name, + new_encoder->base.id, + new_encoder->name, + connector_state->crtc->base.id); return 0; } @@ -222,9 +222,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) if (encoder_crtc) { ret = steal_encoder(state, new_encoder, encoder_crtc); if (ret) { - DRM_DEBUG_KMS("Encoder stealing failed for [CONNECTOR:%d:%s]\n", - connector->base.id, - connector->name); + DRM_DEBUG_ATOMIC("Encoder stealing failed for [CONNECTOR:%d:%s]\n", + connector->base.id, + connector->name); return ret; } } @@ -235,12 +235,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) crtc_state = state->crtc_states[idx]; crtc_state->mode_changed = true; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n", - connector->base.id, - connector->name, - new_encoder->base.id, - new_encoder->name, - connector_state->crtc->base.id); + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n", + connector->base.id, + connector->name, + new_encoder->base.id, + new_encoder->name, + connector_state->crtc->base.id); return 0; } @@ -248,30 +248,24 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx) static int mode_fixup(struct drm_atomic_state *state) { - int ncrtcs = state->dev->mode_config.num_crtc; + struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; + struct drm_connector *connector; struct drm_connector_state *conn_state; int i; bool ret; - for (i = 0; i < ncrtcs; i++) { - crtc_state = state->crtc_states[i]; - - if (!crtc_state || !crtc_state->mode_changed) + for_each_crtc_in_state(state, crtc, crtc_state, i) { + if (!crtc_state->mode_changed) continue; drm_mode_copy(&crtc_state->adjusted_mode, &crtc_state->mode); } - for (i = 0; i < state->num_connector; i++) { - struct drm_encoder_helper_funcs *funcs; + for_each_connector_in_state(state, connector, conn_state, i) { + const struct drm_encoder_helper_funcs *funcs; struct drm_encoder *encoder; - conn_state = state->connector_states[i]; - - if (!conn_state) - continue; - WARN_ON(!!conn_state->best_encoder != !!conn_state->crtc); if (!conn_state->crtc || !conn_state->best_encoder) @@ -292,7 +286,7 @@ mode_fixup(struct drm_atomic_state *state) encoder->bridge, &crtc_state->mode, &crtc_state->adjusted_mode); if (!ret) { - DRM_DEBUG_KMS("Bridge fixup failed\n"); + DRM_DEBUG_ATOMIC("Bridge fixup failed\n"); return -EINVAL; } } @@ -301,37 +295,33 @@ mode_fixup(struct drm_atomic_state *state) ret = funcs->atomic_check(encoder, crtc_state, conn_state); if (ret) { - DRM_DEBUG_KMS("[ENCODER:%d:%s] check failed\n", - encoder->base.id, encoder->name); + DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] check failed\n", + encoder->base.id, encoder->name); return ret; } } else { ret = funcs->mode_fixup(encoder, &crtc_state->mode, &crtc_state->adjusted_mode); if (!ret) { - DRM_DEBUG_KMS("[ENCODER:%d:%s] fixup failed\n", - encoder->base.id, encoder->name); + DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] fixup failed\n", + encoder->base.id, encoder->name); return -EINVAL; } } } - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc_helper_funcs *funcs; - struct drm_crtc *crtc; + for_each_crtc_in_state(state, crtc, crtc_state, i) { + const struct drm_crtc_helper_funcs *funcs; - crtc_state = state->crtc_states[i]; - crtc = state->crtcs[i]; - - if (!crtc_state || !crtc_state->mode_changed) + if (!crtc_state->mode_changed) continue; funcs = crtc->helper_private; ret = funcs->mode_fixup(crtc, &crtc_state->mode, &crtc_state->adjusted_mode); if (!ret) { - DRM_DEBUG_KMS("[CRTC:%d] fixup failed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d] fixup failed\n", + crtc->base.id); return -EINVAL; } } @@ -346,7 +336,7 @@ needs_modeset(struct drm_crtc_state *state) } /** - * drm_atomic_helper_check - validate state object for modeset changes + * drm_atomic_helper_check_modeset - validate state object for modeset changes * @dev: DRM device * @state: the driver state object * @@ -371,32 +361,27 @@ int drm_atomic_helper_check_modeset(struct drm_device *dev, struct drm_atomic_state *state) { - int ncrtcs = dev->mode_config.num_crtc; struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; + struct drm_connector *connector; + struct drm_connector_state *connector_state; int i, ret; - for (i = 0; i < ncrtcs; i++) { - crtc = state->crtcs[i]; - crtc_state = state->crtc_states[i]; - - if (!crtc) - continue; - + for_each_crtc_in_state(state, crtc, crtc_state, i) { if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) { - DRM_DEBUG_KMS("[CRTC:%d] mode changed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d] mode changed\n", + crtc->base.id); crtc_state->mode_changed = true; } if (crtc->state->enable != crtc_state->enable) { - DRM_DEBUG_KMS("[CRTC:%d] enable changed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d] enable changed\n", + crtc->base.id); crtc_state->mode_changed = true; } } - for (i = 0; i < state->num_connector; i++) { + for_each_connector_in_state(state, connector, connector_state, i) { /* * This only sets crtc->mode_changed for routing changes, * drivers must set crtc->mode_changed themselves when connector @@ -413,32 +398,26 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, * configuration. This must be done before calling mode_fixup in case a * crtc only changed its mode but has the same set of connectors. */ - for (i = 0; i < ncrtcs; i++) { + for_each_crtc_in_state(state, crtc, crtc_state, i) { int num_connectors; - crtc = state->crtcs[i]; - crtc_state = state->crtc_states[i]; - - if (!crtc) - continue; - /* * We must set ->active_changed after walking connectors for * otherwise an update that only changes active would result in * a full modeset because update_connector_routing force that. */ if (crtc->state->active != crtc_state->active) { - DRM_DEBUG_KMS("[CRTC:%d] active changed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d] active changed\n", + crtc->base.id); crtc_state->active_changed = true; } if (!needs_modeset(crtc_state)) continue; - DRM_DEBUG_KMS("[CRTC:%d] needs all connectors, enable: %c, active: %c\n", - crtc->base.id, - crtc_state->enable ? 'y' : 'n', + DRM_DEBUG_ATOMIC("[CRTC:%d] needs all connectors, enable: %c, active: %c\n", + crtc->base.id, + crtc_state->enable ? 'y' : 'n', crtc_state->active ? 'y' : 'n'); ret = drm_atomic_add_affected_connectors(state, crtc); @@ -449,8 +428,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, crtc); if (crtc_state->enable != !!num_connectors) { - DRM_DEBUG_KMS("[CRTC:%d] enabled/connectors mismatch\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d] enabled/connectors mismatch\n", + crtc->base.id); return -EINVAL; } @@ -461,7 +440,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, EXPORT_SYMBOL(drm_atomic_helper_check_modeset); /** - * drm_atomic_helper_check - validate state object for modeset changes + * drm_atomic_helper_check_planes - validate state object for planes changes * @dev: DRM device * @state: the driver state object * @@ -476,17 +455,14 @@ int drm_atomic_helper_check_planes(struct drm_device *dev, struct drm_atomic_state *state) { - int nplanes = dev->mode_config.num_total_plane; - int ncrtcs = dev->mode_config.num_crtc; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_plane *plane; + struct drm_plane_state *plane_state; int i, ret = 0; - for (i = 0; i < nplanes; i++) { - struct drm_plane_helper_funcs *funcs; - struct drm_plane *plane = state->planes[i]; - struct drm_plane_state *plane_state = state->plane_states[i]; - - if (!plane) - continue; + for_each_plane_in_state(state, plane, plane_state, i) { + const struct drm_plane_helper_funcs *funcs; funcs = plane->helper_private; @@ -497,18 +473,14 @@ drm_atomic_helper_check_planes(struct drm_device *dev, ret = funcs->atomic_check(plane, plane_state); if (ret) { - DRM_DEBUG_KMS("[PLANE:%d] atomic driver check failed\n", - plane->base.id); + DRM_DEBUG_ATOMIC("[PLANE:%d] atomic driver check failed\n", + plane->base.id); return ret; } } - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc_helper_funcs *funcs; - struct drm_crtc *crtc = state->crtcs[i]; - - if (!crtc) - continue; + for_each_crtc_in_state(state, crtc, crtc_state, i) { + const struct drm_crtc_helper_funcs *funcs; funcs = crtc->helper_private; @@ -517,8 +489,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev, ret = funcs->atomic_check(crtc, state->crtc_states[i]); if (ret) { - DRM_DEBUG_KMS("[CRTC:%d] atomic driver check failed\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("[CRTC:%d] atomic driver check failed\n", + crtc->base.id); return ret; } } @@ -567,27 +539,26 @@ EXPORT_SYMBOL(drm_atomic_helper_check); static void disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) { - int ncrtcs = old_state->dev->mode_config.num_crtc; + struct drm_connector *connector; + struct drm_connector_state *old_conn_state; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; int i; - for (i = 0; i < old_state->num_connector; i++) { - struct drm_connector_state *old_conn_state; - struct drm_connector *connector; - struct drm_encoder_helper_funcs *funcs; + for_each_connector_in_state(old_state, connector, old_conn_state, i) { + const struct drm_encoder_helper_funcs *funcs; struct drm_encoder *encoder; struct drm_crtc_state *old_crtc_state; - old_conn_state = old_state->connector_states[i]; - connector = old_state->connectors[i]; - /* Shut down everything that's in the changeset and currently * still on. So need to check the old, saved state. */ - if (!old_conn_state || !old_conn_state->crtc) + if (!old_conn_state->crtc) continue; old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)]; - if (!old_crtc_state->active) + if (!old_crtc_state->active || + !needs_modeset(old_conn_state->crtc->state)) continue; encoder = old_conn_state->best_encoder; @@ -600,12 +571,12 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) funcs = encoder->helper_private; - DRM_DEBUG_KMS("disabling [ENCODER:%d:%s]\n", - encoder->base.id, encoder->name); + DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); /* * Each encoder has at most one connector (since we always steal - * it away), so we won't call call disable hooks twice. + * it away), so we won't call disable hooks twice. */ if (encoder->bridge) encoder->bridge->funcs->disable(encoder->bridge); @@ -622,16 +593,11 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) encoder->bridge->funcs->post_disable(encoder->bridge); } - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc_helper_funcs *funcs; - struct drm_crtc *crtc; - struct drm_crtc_state *old_crtc_state; - - crtc = old_state->crtcs[i]; - old_crtc_state = old_state->crtc_states[i]; + for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { + const struct drm_crtc_helper_funcs *funcs; /* Shut down everything that needs a full modeset. */ - if (!crtc || !needs_modeset(crtc->state)) + if (!needs_modeset(crtc->state)) continue; if (!old_crtc_state->active) @@ -639,8 +605,8 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) funcs = crtc->helper_private; - DRM_DEBUG_KMS("disabling [CRTC:%d]\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n", + crtc->base.id); /* Right function depends upon target state. */ @@ -656,16 +622,15 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state) static void set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state) { - int ncrtcs = old_state->dev->mode_config.num_crtc; + struct drm_connector *connector; + struct drm_connector_state *old_conn_state; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; int i; /* clear out existing links */ - for (i = 0; i < old_state->num_connector; i++) { - struct drm_connector *connector; - - connector = old_state->connectors[i]; - - if (!connector || !connector->encoder) + for_each_connector_in_state(old_state, connector, old_conn_state, i) { + if (!connector->encoder) continue; WARN_ON(!connector->encoder->crtc); @@ -675,12 +640,8 @@ set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state) } /* set new links */ - for (i = 0; i < old_state->num_connector; i++) { - struct drm_connector *connector; - - connector = old_state->connectors[i]; - - if (!connector || !connector->state->crtc) + for_each_connector_in_state(old_state, connector, old_conn_state, i) { + if (!connector->state->crtc) continue; if (WARN_ON(!connector->state->best_encoder)) @@ -691,14 +652,7 @@ set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state) } /* set legacy state in the crtc structure */ - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc *crtc; - - crtc = old_state->crtcs[i]; - - if (!crtc) - continue; - + for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { crtc->mode = crtc->state->mode; crtc->enabled = crtc->state->enable; crtc->x = crtc->primary->state->src_x >> 16; @@ -709,38 +663,35 @@ set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state) static void crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) { - int ncrtcs = old_state->dev->mode_config.num_crtc; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + struct drm_connector *connector; + struct drm_connector_state *old_conn_state; int i; - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc_helper_funcs *funcs; - struct drm_crtc *crtc; - - crtc = old_state->crtcs[i]; + for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { + const struct drm_crtc_helper_funcs *funcs; - if (!crtc || !crtc->state->mode_changed) + if (!crtc->state->mode_changed) continue; funcs = crtc->helper_private; - if (crtc->state->enable) { - DRM_DEBUG_KMS("modeset on [CRTC:%d]\n", - crtc->base.id); + if (crtc->state->enable && funcs->mode_set_nofb) { + DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n", + crtc->base.id); funcs->mode_set_nofb(crtc); } } - for (i = 0; i < old_state->num_connector; i++) { - struct drm_connector *connector; + for_each_connector_in_state(old_state, connector, old_conn_state, i) { + const struct drm_encoder_helper_funcs *funcs; struct drm_crtc_state *new_crtc_state; - struct drm_encoder_helper_funcs *funcs; struct drm_encoder *encoder; struct drm_display_mode *mode, *adjusted_mode; - connector = old_state->connectors[i]; - - if (!connector || !connector->state->best_encoder) + if (!connector->state->best_encoder) continue; encoder = connector->state->best_encoder; @@ -752,14 +703,15 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) if (!new_crtc_state->mode_changed) continue; - DRM_DEBUG_KMS("modeset on [ENCODER:%d:%s]\n", - encoder->base.id, encoder->name); + DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); /* * Each encoder has at most one connector (since we always steal - * it away), so we won't call call mode_set hooks twice. + * it away), so we won't call mode_set hooks twice. */ - funcs->mode_set(encoder, mode, adjusted_mode); + if (funcs->mode_set) + funcs->mode_set(encoder, mode, adjusted_mode); if (encoder->bridge && encoder->bridge->funcs->mode_set) encoder->bridge->funcs->mode_set(encoder->bridge, @@ -768,46 +720,56 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) } /** - * drm_atomic_helper_commit_pre_planes - modeset commit before plane updates + * drm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs * @dev: DRM device - * @state: atomic state + * @old_state: atomic state object with old state structures * - * This function commits the modeset changes that need to be committed before - * updating planes. It shuts down all the outputs that need to be shut down and + * This function shuts down all the outputs that need to be shut down and * prepares them (if required) with the new mode. + * + * For compatability with legacy crtc helpers this should be called before + * drm_atomic_helper_commit_planes(), which is what the default commit function + * does. But drivers with different needs can group the modeset commits together + * and do the plane commits at the end. This is useful for drivers doing runtime + * PM since planes updates then only happen when the CRTC is actually enabled. */ -void drm_atomic_helper_commit_pre_planes(struct drm_device *dev, - struct drm_atomic_state *state) +void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev, + struct drm_atomic_state *old_state) { - disable_outputs(dev, state); - set_routing_links(dev, state); - crtc_set_mode(dev, state); + disable_outputs(dev, old_state); + set_routing_links(dev, old_state); + crtc_set_mode(dev, old_state); } -EXPORT_SYMBOL(drm_atomic_helper_commit_pre_planes); +EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_disables); /** - * drm_atomic_helper_commit_post_planes - modeset commit after plane updates + * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs * @dev: DRM device * @old_state: atomic state object with old state structures * - * This function commits the modeset changes that need to be committed after - * updating planes: It enables all the outputs with the new configuration which - * had to be turned off for the update. + * This function enables all the outputs with the new configuration which had to + * be turned off for the update. + * + * For compatability with legacy crtc helpers this should be called after + * drm_atomic_helper_commit_planes(), which is what the default commit function + * does. But drivers with different needs can group the modeset commits together + * and do the plane commits at the end. This is useful for drivers doing runtime + * PM since planes updates then only happen when the CRTC is actually enabled. */ -void drm_atomic_helper_commit_post_planes(struct drm_device *dev, - struct drm_atomic_state *old_state) +void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, + struct drm_atomic_state *old_state) { - int ncrtcs = old_state->dev->mode_config.num_crtc; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + struct drm_connector *connector; + struct drm_connector_state *old_conn_state; int i; - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc_helper_funcs *funcs; - struct drm_crtc *crtc; - - crtc = old_state->crtcs[i]; + for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { + const struct drm_crtc_helper_funcs *funcs; /* Need to filter out CRTCs where only planes change. */ - if (!crtc || !needs_modeset(crtc->state)) + if (!needs_modeset(crtc->state)) continue; if (!crtc->state->active) @@ -816,8 +778,8 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev, funcs = crtc->helper_private; if (crtc->state->enable) { - DRM_DEBUG_KMS("enabling [CRTC:%d]\n", - crtc->base.id); + DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n", + crtc->base.id); if (funcs->enable) funcs->enable(crtc); @@ -826,28 +788,26 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev, } } - for (i = 0; i < old_state->num_connector; i++) { - struct drm_connector *connector; - struct drm_encoder_helper_funcs *funcs; + for_each_connector_in_state(old_state, connector, old_conn_state, i) { + const struct drm_encoder_helper_funcs *funcs; struct drm_encoder *encoder; - connector = old_state->connectors[i]; - - if (!connector || !connector->state->best_encoder) + if (!connector->state->best_encoder) continue; - if (!connector->state->crtc->state->active) + if (!connector->state->crtc->state->active || + !needs_modeset(connector->state->crtc->state)) continue; encoder = connector->state->best_encoder; funcs = encoder->helper_private; - DRM_DEBUG_KMS("enabling [ENCODER:%d:%s]\n", - encoder->base.id, encoder->name); + DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n", + encoder->base.id, encoder->name); /* * Each encoder has at most one connector (since we always steal - * it away), so we won't call call enable hooks twice. + * it away), so we won't call enable hooks twice. */ if (encoder->bridge) encoder->bridge->funcs->pre_enable(encoder->bridge); @@ -861,18 +821,17 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev, encoder->bridge->funcs->enable(encoder->bridge); } } -EXPORT_SYMBOL(drm_atomic_helper_commit_post_planes); +EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables); static void wait_for_fences(struct drm_device *dev, struct drm_atomic_state *state) { - int nplanes = dev->mode_config.num_total_plane; + struct drm_plane *plane; + struct drm_plane_state *plane_state; int i; - for (i = 0; i < nplanes; i++) { - struct drm_plane *plane = state->planes[i]; - - if (!plane || !plane->state->fence) + for_each_plane_in_state(state, plane, plane_state, i) { + if (!plane->state->fence) continue; WARN_ON(!plane->state->fb); @@ -891,16 +850,9 @@ static bool framebuffer_changed(struct drm_device *dev, { struct drm_plane *plane; struct drm_plane_state *old_plane_state; - int nplanes = old_state->dev->mode_config.num_total_plane; int i; - for (i = 0; i < nplanes; i++) { - plane = old_state->planes[i]; - old_plane_state = old_state->plane_states[i]; - - if (!plane) - continue; - + for_each_plane_in_state(old_state, plane, old_plane_state, i) { if (plane->state->crtc != crtc && old_plane_state->crtc != crtc) continue; @@ -929,16 +881,9 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, { struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; - int ncrtcs = old_state->dev->mode_config.num_crtc; int i, ret; - for (i = 0; i < ncrtcs; i++) { - crtc = old_state->crtcs[i]; - old_crtc_state = old_state->crtc_states[i]; - - if (!crtc) - continue; - + for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { /* No one cares about the old state, so abuse it for tracking * and store whether we hold a vblank reference (and should do a * vblank wait) in the ->enable boolean. */ @@ -963,11 +908,8 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, old_crtc_state->last_vblank_count = drm_vblank_count(dev, i); } - for (i = 0; i < ncrtcs; i++) { - crtc = old_state->crtcs[i]; - old_crtc_state = old_state->crtc_states[i]; - - if (!crtc || !old_crtc_state->enable) + for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { + if (!old_crtc_state->enable) continue; ret = wait_event_timeout(dev->vblank[i].queue, @@ -1016,7 +958,7 @@ int drm_atomic_helper_commit(struct drm_device *dev, /* * Everything below can be run asynchronously without the need to grab - * any modeset locks at all under one conditions: It must be guaranteed + * any modeset locks at all under one condition: It must be guaranteed * that the asynchronous work has either been cancelled (if the driver * supports it, which at least requires that the framebuffers get * cleaned up with drm_atomic_helper_cleanup_planes()) or completed @@ -1032,11 +974,11 @@ int drm_atomic_helper_commit(struct drm_device *dev, wait_for_fences(dev, state); - drm_atomic_helper_commit_pre_planes(dev, state); + drm_atomic_helper_commit_modeset_disables(dev, state); drm_atomic_helper_commit_planes(dev, state); - drm_atomic_helper_commit_post_planes(dev, state); + drm_atomic_helper_commit_modeset_enables(dev, state); drm_atomic_helper_wait_for_vblanks(dev, state); @@ -1087,9 +1029,9 @@ EXPORT_SYMBOL(drm_atomic_helper_commit); */ /** - * drm_atomic_helper_prepare_planes - prepare plane resources after commit + * drm_atomic_helper_prepare_planes - prepare plane resources before commit * @dev: DRM device - * @state: atomic state object with old state structures + * @state: atomic state object with new state structures * * This function prepares plane state, specifically framebuffers, for the new * configuration. If any failure is encountered this function will call @@ -1105,8 +1047,9 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev, int ret, i; for (i = 0; i < nplanes; i++) { - struct drm_plane_helper_funcs *funcs; + const struct drm_plane_helper_funcs *funcs; struct drm_plane *plane = state->planes[i]; + struct drm_plane_state *plane_state = state->plane_states[i]; struct drm_framebuffer *fb; if (!plane) @@ -1114,10 +1057,10 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev, funcs = plane->helper_private; - fb = state->plane_states[i]->fb; + fb = plane_state->fb; if (fb && funcs->prepare_fb) { - ret = funcs->prepare_fb(plane, fb); + ret = funcs->prepare_fb(plane, fb, plane_state); if (ret) goto fail; } @@ -1127,8 +1070,9 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev, fail: for (i--; i >= 0; i--) { - struct drm_plane_helper_funcs *funcs; + const struct drm_plane_helper_funcs *funcs; struct drm_plane *plane = state->planes[i]; + struct drm_plane_state *plane_state = state->plane_states[i]; struct drm_framebuffer *fb; if (!plane) @@ -1139,7 +1083,7 @@ fail: fb = state->plane_states[i]->fb; if (fb && funcs->cleanup_fb) - funcs->cleanup_fb(plane, fb); + funcs->cleanup_fb(plane, fb, plane_state); } @@ -1163,16 +1107,14 @@ EXPORT_SYMBOL(drm_atomic_helper_prepare_planes); void drm_atomic_helper_commit_planes(struct drm_device *dev, struct drm_atomic_state *old_state) { - int nplanes = dev->mode_config.num_total_plane; - int ncrtcs = dev->mode_config.num_crtc; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + struct drm_plane *plane; + struct drm_plane_state *old_plane_state; int i; - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc_helper_funcs *funcs; - struct drm_crtc *crtc = old_state->crtcs[i]; - - if (!crtc) - continue; + for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { + const struct drm_crtc_helper_funcs *funcs; funcs = crtc->helper_private; @@ -1182,13 +1124,8 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, funcs->atomic_begin(crtc); } - for (i = 0; i < nplanes; i++) { - struct drm_plane_helper_funcs *funcs; - struct drm_plane *plane = old_state->planes[i]; - struct drm_plane_state *old_plane_state; - - if (!plane) - continue; + for_each_plane_in_state(old_state, plane, old_plane_state, i) { + const struct drm_plane_helper_funcs *funcs; funcs = plane->helper_private; @@ -1207,12 +1144,8 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev, funcs->atomic_update(plane, old_plane_state); } - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc_helper_funcs *funcs; - struct drm_crtc *crtc = old_state->crtcs[i]; - - if (!crtc) - continue; + for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { + const struct drm_crtc_helper_funcs *funcs; funcs = crtc->helper_private; @@ -1239,23 +1172,20 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_planes); void drm_atomic_helper_cleanup_planes(struct drm_device *dev, struct drm_atomic_state *old_state) { - int nplanes = dev->mode_config.num_total_plane; + struct drm_plane *plane; + struct drm_plane_state *plane_state; int i; - for (i = 0; i < nplanes; i++) { - struct drm_plane_helper_funcs *funcs; - struct drm_plane *plane = old_state->planes[i]; + for_each_plane_in_state(old_state, plane, plane_state, i) { + const struct drm_plane_helper_funcs *funcs; struct drm_framebuffer *old_fb; - if (!plane) - continue; - funcs = plane->helper_private; - old_fb = old_state->plane_states[i]->fb; + old_fb = plane_state->fb; if (old_fb && funcs->cleanup_fb) - funcs->cleanup_fb(plane, old_fb); + funcs->cleanup_fb(plane, old_fb, plane_state); } } EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes); @@ -1498,8 +1428,10 @@ static int update_output_state(struct drm_atomic_state *state, struct drm_mode_set *set) { struct drm_device *dev = set->crtc->dev; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + struct drm_connector *connector; struct drm_connector_state *conn_state; - int ncrtcs = state->dev->mode_config.num_crtc; int ret, i, j; ret = drm_modeset_lock(&dev->mode_config.connection_mutex, @@ -1515,27 +1447,14 @@ static int update_output_state(struct drm_atomic_state *state, return PTR_ERR(conn_state); } - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc *crtc = state->crtcs[i]; - - if (!crtc) - continue; - + for_each_crtc_in_state(state, crtc, crtc_state, i) { ret = drm_atomic_add_affected_connectors(state, crtc); if (ret) return ret; } /* Then recompute connector->crtc links and crtc enabling state. */ - for (i = 0; i < state->num_connector; i++) { - struct drm_connector *connector; - - connector = state->connectors[i]; - conn_state = state->connector_states[i]; - - if (!connector) - continue; - + for_each_connector_in_state(state, connector, conn_state, i) { if (conn_state->crtc == set->crtc) { ret = drm_atomic_set_crtc_for_connector(conn_state, NULL); @@ -1554,13 +1473,7 @@ static int update_output_state(struct drm_atomic_state *state, } } - for (i = 0; i < ncrtcs; i++) { - struct drm_crtc *crtc = state->crtcs[i]; - struct drm_crtc_state *crtc_state = state->crtc_states[i]; - - if (!crtc) - continue; - + for_each_crtc_in_state(state, crtc, crtc_state, i) { /* Don't update ->enable for the CRTC in the set_config request, * since a mismatch would indicate a bug in the upper layers. * The actual modeset code later on will catch any @@ -1680,12 +1593,13 @@ backoff: EXPORT_SYMBOL(drm_atomic_helper_set_config); /** - * drm_atomic_helper_crtc_set_property - helper for crtc prorties + * drm_atomic_helper_crtc_set_property - helper for crtc properties * @crtc: DRM crtc * @property: DRM property * @val: value of property * - * Provides a default plane disablle handler using the atomic driver interface. + * Provides a default crtc set_property handler using the atomic driver + * interface. * * RETURNS: * Zero on success, error code on failure @@ -1739,12 +1653,13 @@ backoff: EXPORT_SYMBOL(drm_atomic_helper_crtc_set_property); /** - * drm_atomic_helper_plane_set_property - helper for plane prorties + * drm_atomic_helper_plane_set_property - helper for plane properties * @plane: DRM plane * @property: DRM property * @val: value of property * - * Provides a default plane disable handler using the atomic driver interface. + * Provides a default plane set_property handler using the atomic driver + * interface. * * RETURNS: * Zero on success, error code on failure @@ -1798,12 +1713,13 @@ backoff: EXPORT_SYMBOL(drm_atomic_helper_plane_set_property); /** - * drm_atomic_helper_connector_set_property - helper for connector prorties + * drm_atomic_helper_connector_set_property - helper for connector properties * @connector: DRM connector * @property: DRM property * @val: value of property * - * Provides a default plane disablle handler using the atomic driver interface. + * Provides a default connector set_property handler using the atomic driver + * interface. * * RETURNS: * Zero on success, error code on failure @@ -1986,10 +1902,10 @@ retry: WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); list_for_each_entry(tmp_connector, &config->connector_list, head) { - if (connector->state->crtc != crtc) + if (tmp_connector->state->crtc != crtc) continue; - if (connector->dpms == DRM_MODE_DPMS_ON) { + if (tmp_connector->dpms == DRM_MODE_DPMS_ON) { active = true; break; } @@ -2051,6 +1967,26 @@ void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc) } EXPORT_SYMBOL(drm_atomic_helper_crtc_reset); +/** + * __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state + * @crtc: CRTC object + * @state: atomic CRTC state + * + * Copies atomic state from a CRTC's current state and resets inferred values. + * This is useful for drivers that subclass the CRTC state. + */ +void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + memcpy(state, crtc->state, sizeof(*state)); + + state->mode_changed = false; + state->active_changed = false; + state->planes_changed = false; + state->event = NULL; +} +EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state); + /** * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook * @crtc: drm CRTC @@ -2066,19 +2002,34 @@ drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc) if (WARN_ON(!crtc->state)) return NULL; - state = kmemdup(crtc->state, sizeof(*crtc->state), GFP_KERNEL); - - if (state) { - state->mode_changed = false; - state->active_changed = false; - state->planes_changed = false; - state->event = NULL; - } + state = kmalloc(sizeof(*state), M_DRM, M_WAITOK); + if (state) + __drm_atomic_helper_crtc_duplicate_state(crtc, state); return state; } EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state); +/** + * __drm_atomic_helper_crtc_destroy_state - release CRTC state + * @crtc: CRTC object + * @state: CRTC state object to release + * + * Releases all resources stored in the CRTC state without actually freeing + * the memory of the CRTC state. This is useful for drivers that subclass the + * CRTC state. + */ +void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + /* + * This is currently a placeholder so that drivers that subclass the + * state will automatically do the right thing if code is ever added + * to this function. + */ +} +EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state); + /** * drm_atomic_helper_crtc_destroy_state - default state destroy hook * @crtc: drm CRTC @@ -2090,6 +2041,7 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state); void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state) { + __drm_atomic_helper_crtc_destroy_state(crtc, state); kfree(state); } EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state); @@ -2114,6 +2066,24 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane) } EXPORT_SYMBOL(drm_atomic_helper_plane_reset); +/** + * __drm_atomic_helper_plane_duplicate_state - copy atomic plane state + * @plane: plane object + * @state: atomic plane state + * + * Copies atomic state from a plane's current state. This is useful for + * drivers that subclass the plane state. + */ +void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + memcpy(state, plane->state, sizeof(*state)); + + if (state->fb) + drm_framebuffer_reference(state->fb); +} +EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state); + /** * drm_atomic_helper_plane_duplicate_state - default state duplicate hook * @plane: drm plane @@ -2129,15 +2099,31 @@ drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane) if (WARN_ON(!plane->state)) return NULL; - state = kmemdup(plane->state, sizeof(*plane->state), GFP_KERNEL); - - if (state && state->fb) - drm_framebuffer_reference(state->fb); + state = kmalloc(sizeof(*state), M_DRM, M_WAITOK); + if (state) + __drm_atomic_helper_plane_duplicate_state(plane, state); return state; } EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state); +/** + * __drm_atomic_helper_plane_destroy_state - release plane state + * @plane: plane object + * @state: plane state object to release + * + * Releases all resources stored in the plane state without actually freeing + * the memory of the plane state. This is useful for drivers that subclass the + * plane state. + */ +void __drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + if (state->fb) + drm_framebuffer_unreference(state->fb); +} +EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state); + /** * drm_atomic_helper_plane_destroy_state - default state destroy hook * @plane: drm plane @@ -2149,9 +2135,7 @@ EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state); void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) { - if (state->fb) - drm_framebuffer_unreference(state->fb); - + __drm_atomic_helper_plane_destroy_state(plane, state); kfree(state); } EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state); @@ -2174,6 +2158,22 @@ void drm_atomic_helper_connector_reset(struct drm_connector *connector) } EXPORT_SYMBOL(drm_atomic_helper_connector_reset); +/** + * __drm_atomic_helper_connector_duplicate_state - copy atomic connector state + * @connector: connector object + * @state: atomic connector state + * + * Copies atomic state from a connector's current state. This is useful for + * drivers that subclass the connector state. + */ +void +__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector, + struct drm_connector_state *state) +{ + memcpy(state, connector->state, sizeof(*state)); +} +EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state); + /** * drm_atomic_helper_connector_duplicate_state - default state duplicate hook * @connector: drm connector @@ -2184,13 +2184,40 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_reset); struct drm_connector_state * drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector) { + struct drm_connector_state *state; + if (WARN_ON(!connector->state)) return NULL; - return kmemdup(connector->state, sizeof(*connector->state), GFP_KERNEL); + state = kmalloc(sizeof(*state), M_DRM, M_WAITOK); + if (state) + __drm_atomic_helper_connector_duplicate_state(connector, state); + + return state; } EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state); +/** + * __drm_atomic_helper_connector_destroy_state - release connector state + * @connector: connector object + * @state: connector state object to release + * + * Releases all resources stored in the connector state without actually + * freeing the memory of the connector state. This is useful for drivers that + * subclass the connector state. + */ +void +__drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, + struct drm_connector_state *state) +{ + /* + * This is currently a placeholder so that drivers that subclass the + * state will automatically do the right thing if code is ever added + * to this function. + */ +} +EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state); + /** * drm_atomic_helper_connector_destroy_state - default state destroy hook * @connector: drm connector @@ -2202,6 +2229,7 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state); void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector, struct drm_connector_state *state) { + __drm_atomic_helper_connector_destroy_state(connector, state); kfree(state); } EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state); diff --git a/sys/dev/drm/drm_cache.c b/sys/dev/drm/drm_cache.c index c05f8f1824..ef29ed8682 100644 --- a/sys/dev/drm/drm_cache.c +++ b/sys/dev/drm/drm_cache.c @@ -30,12 +30,11 @@ #include #include +#include #include #include -#define cpu_has_clflush 1 - void drm_clflush_virt_range(void *in_addr, unsigned long length) { diff --git a/sys/dev/drm/drm_crtc.c b/sys/dev/drm/drm_crtc.c index 088e3d7d20..ac23818cfd 100644 --- a/sys/dev/drm/drm_crtc.c +++ b/sys/dev/drm/drm_crtc.c @@ -664,6 +664,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, struct drm_mode_config *config = &dev->mode_config; int ret; + WARN_ON(primary && primary->type != DRM_PLANE_TYPE_PRIMARY); + WARN_ON(cursor && cursor->type != DRM_PLANE_TYPE_CURSOR); + crtc->dev = dev; crtc->funcs = funcs; crtc->invert_dimensions = false; @@ -1995,21 +1998,32 @@ int drm_mode_getcrtc(struct drm_device *dev, return -ENOENT; drm_modeset_lock_crtc(crtc, crtc->primary); - crtc_resp->x = crtc->x; - crtc_resp->y = crtc->y; crtc_resp->gamma_size = crtc->gamma_size; if (crtc->primary->fb) crtc_resp->fb_id = crtc->primary->fb->base.id; else crtc_resp->fb_id = 0; - if (crtc->enabled) { - - drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode); - crtc_resp->mode_valid = 1; + if (crtc->state) { + crtc_resp->x = crtc->primary->state->src_x >> 16; + crtc_resp->y = crtc->primary->state->src_y >> 16; + if (crtc->state->enable) { + drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->state->mode); + crtc_resp->mode_valid = 1; + } else { + crtc_resp->mode_valid = 0; + } } else { - crtc_resp->mode_valid = 0; + crtc_resp->x = crtc->x; + crtc_resp->y = crtc->y; + if (crtc->enabled) { + drm_crtc_convert_to_umode(&crtc_resp->mode, &crtc->mode); + crtc_resp->mode_valid = 1; + + } else { + crtc_resp->mode_valid = 0; + } } drm_modeset_unlock_crtc(crtc); @@ -2262,8 +2276,6 @@ int drm_mode_getencoder(struct drm_device *dev, void *data, crtc = drm_encoder_get_crtc(encoder); if (crtc) enc_resp->crtc_id = crtc->base.id; - else if (encoder->crtc) - enc_resp->crtc_id = encoder->crtc->base.id; else enc_resp->crtc_id = 0; drm_modeset_unlock(&dev->mode_config.connection_mutex); @@ -2398,6 +2410,27 @@ int drm_mode_getplane(struct drm_device *dev, void *data, return 0; } +/** + * drm_plane_check_pixel_format - Check if the plane supports the pixel format + * @plane: plane to check for format support + * @format: the pixel format + * + * Returns: + * Zero of @plane has @format in its list of supported pixel formats, -EINVAL + * otherwise. + */ +int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format) +{ + unsigned int i; + + for (i = 0; i < plane->format_count; i++) { + if (format == plane->format_types[i]) + return 0; + } + + return -EINVAL; +} + /* * setplane_internal - setplane handler for internal callers * @@ -2418,7 +2451,6 @@ static int __setplane_internal(struct drm_plane *plane, { int ret = 0; unsigned int fb_width, fb_height; - unsigned int i; /* No fb means shut it down */ if (!fb) { @@ -2441,16 +2473,24 @@ static int __setplane_internal(struct drm_plane *plane, } /* Check whether this plane supports the fb pixel format. */ - for (i = 0; i < plane->format_count; i++) - if (fb->pixel_format == plane->format_types[i]) - break; - if (i == plane->format_count) { + ret = drm_plane_check_pixel_format(plane, fb->pixel_format); + if (ret) { DRM_DEBUG_KMS("Invalid pixel format %s\n", drm_get_format_name(fb->pixel_format)); - ret = -EINVAL; goto out; } + /* Give drivers some help against integer overflows */ + if (crtc_w > INT_MAX || + crtc_x > INT_MAX - (int32_t) crtc_w || + crtc_h > INT_MAX || + crtc_y > INT_MAX - (int32_t) crtc_h) { + DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n", + crtc_w, crtc_h, crtc_x, crtc_y); + return -ERANGE; + } + + fb_width = fb->width << 16; fb_height = fb->height << 16; @@ -2535,17 +2575,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - /* Give drivers some help against integer overflows */ - if (plane_req->crtc_w > INT_MAX || - plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w || - plane_req->crtc_h > INT_MAX || - plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) { - DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n", - plane_req->crtc_w, plane_req->crtc_h, - plane_req->crtc_x, plane_req->crtc_y); - return -ERANGE; - } - /* * First, find the plane, crtc, and fb objects. If not available, * we don't bother to call the driver. @@ -2771,6 +2800,23 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); + /* + * Check whether the primary plane supports the fb pixel format. + * Drivers not implementing the universal planes API use a + * default formats list provided by the DRM core which doesn't + * match real hardware capabilities. Skip the check in that + * case. + */ + if (!crtc->primary->format_default) { + ret = drm_plane_check_pixel_format(crtc->primary, + fb->pixel_format); + if (ret) { + DRM_DEBUG_KMS("Invalid pixel format %s\n", + drm_get_format_name(fb->pixel_format)); + goto out; + } + } + ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y, mode, fb); if (ret) @@ -3248,6 +3294,12 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i); return -EINVAL; } + + if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) { + DRM_DEBUG_KMS("bad fb modifier %lu for plane %d\n", + r->modifier[i], i); + return -EINVAL; + } } return 0; @@ -3262,7 +3314,7 @@ internal_framebuffer_create(struct drm_device *dev, struct drm_framebuffer *fb; int ret; - if (r->flags & ~DRM_MODE_FB_INTERLACED) { + if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) { DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags); return ERR_PTR(-EINVAL); } @@ -3278,6 +3330,12 @@ internal_framebuffer_create(struct drm_device *dev, return ERR_PTR(-EINVAL); } + if (r->flags & DRM_MODE_FB_MODIFIERS && + !dev->mode_config.allow_fb_modifiers) { + DRM_DEBUG_KMS("driver does not support fb modifiers\n"); + return ERR_PTR(-EINVAL); + } + ret = framebuffer_check(r); if (ret) return ERR_PTR(ret); @@ -5542,6 +5600,7 @@ struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev, #endif return NULL; } +EXPORT_SYMBOL(drm_mode_get_tile_group); /** * drm_mode_create_tile_group - create a tile group from a displayid description @@ -5580,3 +5639,4 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, mutex_unlock(&dev->mode_config.idr_mutex); return tg; } +EXPORT_SYMBOL(drm_mode_create_tile_group); diff --git a/sys/dev/drm/drm_crtc_helper.c b/sys/dev/drm/drm_crtc_helper.c index 690fee55c1..2c32d52f80 100644 --- a/sys/dev/drm/drm_crtc_helper.c +++ b/sys/dev/drm/drm_crtc_helper.c @@ -145,7 +145,7 @@ EXPORT_SYMBOL(drm_helper_crtc_in_use); static void drm_encoder_disable(struct drm_encoder *encoder) { - struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; + const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; if (encoder->bridge) encoder->bridge->funcs->disable(encoder->bridge); @@ -175,7 +175,7 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev) } list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; crtc->enabled = drm_helper_crtc_in_use(crtc); if (!crtc->enabled) { if (crtc_funcs->disable) @@ -213,7 +213,7 @@ EXPORT_SYMBOL(drm_helper_disable_unused_functions); static void drm_crtc_prepare_encoders(struct drm_device *dev) { - struct drm_encoder_helper_funcs *encoder_funcs; + const struct drm_encoder_helper_funcs *encoder_funcs; struct drm_encoder *encoder; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { @@ -254,9 +254,9 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; - struct drm_display_mode *adjusted_mode, saved_mode; - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - struct drm_encoder_helper_funcs *encoder_funcs; + struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; + const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + const struct drm_encoder_helper_funcs *encoder_funcs; int saved_x, saved_y; bool saved_enabled; struct drm_encoder *encoder; @@ -276,6 +276,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, } saved_mode = crtc->mode; + saved_hwmode = crtc->hwmode; saved_x = crtc->x; saved_y = crtc->y; @@ -318,6 +319,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, } DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); + crtc->hwmode = *adjusted_mode; + /* Prepare the encoders and CRTCs before setting the mode. */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { @@ -380,9 +383,6 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, encoder->bridge->funcs->enable(encoder->bridge); } - /* Store real post-adjustment hardware mode. */ - crtc->hwmode = *adjusted_mode; - /* Calculate and store various constants which * are later needed by vblank and swap-completion * timestamping. They are derived from true hwmode. @@ -395,6 +395,7 @@ done: if (!ret) { crtc->enabled = saved_enabled; crtc->mode = saved_mode; + crtc->hwmode = saved_hwmode; crtc->x = saved_x; crtc->y = saved_y; } @@ -456,7 +457,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) bool fb_changed = false; /* if true and !mode_changed just do a flip */ struct drm_connector *save_connectors, *connector; int count = 0, ro, fail = 0; - struct drm_crtc_helper_funcs *crtc_funcs; + const struct drm_crtc_helper_funcs *crtc_funcs; struct drm_mode_set save_set; int ret; int i; @@ -556,7 +557,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) /* a) traverse passed in connector list and get encoders for them */ count = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - struct drm_connector_helper_funcs *connector_funcs = + const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; new_encoder = connector->encoder; for (ro = 0; ro < set->num_connectors; ro++) { @@ -716,7 +717,7 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder) static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode) { struct drm_bridge *bridge = encoder->bridge; - struct drm_encoder_helper_funcs *encoder_funcs; + const struct drm_encoder_helper_funcs *encoder_funcs; if (bridge) { if (mode == DRM_MODE_DPMS_ON) @@ -778,7 +779,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode) /* from off to on, do crtc then encoder */ if (mode < old_dpms) { if (crtc) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; if (crtc_funcs->dpms) (*crtc_funcs->dpms) (crtc, drm_helper_choose_crtc_dpms(crtc)); @@ -792,7 +793,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode) if (encoder) drm_helper_encoder_dpms(encoder, encoder_dpms); if (crtc) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; if (crtc_funcs->dpms) (*crtc_funcs->dpms) (crtc, drm_helper_choose_crtc_dpms(crtc)); @@ -821,6 +822,7 @@ void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, for (i = 0; i < 4; i++) { fb->pitches[i] = mode_cmd->pitches[i]; fb->offsets[i] = mode_cmd->offsets[i]; + fb->modifier[i] = mode_cmd->modifier[i]; } drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth, &fb->bits_per_pixel); @@ -853,7 +855,7 @@ void drm_helper_resume_force_mode(struct drm_device *dev) { struct drm_crtc *crtc; struct drm_encoder *encoder; - struct drm_crtc_helper_funcs *crtc_funcs; + const struct drm_crtc_helper_funcs *crtc_funcs; int encoder_dpms; bool ret; @@ -918,7 +920,7 @@ int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mod struct drm_framebuffer *old_fb) { struct drm_crtc_state *crtc_state; - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; int ret; if (crtc->funcs->atomic_duplicate_state) diff --git a/sys/dev/drm/drm_dp_helper.c b/sys/dev/drm/drm_dp_helper.c index 1941b6c55f..e2008cbff4 100644 --- a/sys/dev/drm/drm_dp_helper.c +++ b/sys/dev/drm/drm_dp_helper.c @@ -20,10 +20,23 @@ * OF THIS SOFTWARE. */ -#include -#include +#include +#include +#include +#include +#include +#include #include +#include +/** + * DOC: dp helpers + * + * These functions contain some common logic and helpers at various abstraction + * levels to deal with Display Port sink devices and related things like DP aux + * channel transfers, EDID reading over DP aux channels, decoding certain DPCD + * blocks, ... + */ /* Helpers for DP link training */ static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r) @@ -179,7 +192,6 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, for (retry = 0; retry < 32; retry++) { mutex_lock(&aux->hw_mutex); - KKASSERT(aux->transfer != NULL); err = aux->transfer(aux, &msg); mutex_unlock(&aux->hw_mutex); if (err < 0) { @@ -253,6 +265,383 @@ ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset, } EXPORT_SYMBOL(drm_dp_dpcd_write); +/** + * drm_dp_dpcd_read_link_status() - read DPCD link status (bytes 0x202-0x207) + * @aux: DisplayPort AUX channel + * @status: buffer to store the link status in (must be at least 6 bytes) + * + * Returns the number of bytes transferred on success or a negative error + * code on failure. + */ +int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, + u8 status[DP_LINK_STATUS_SIZE]) +{ + return drm_dp_dpcd_read(aux, DP_LANE0_1_STATUS, status, + DP_LINK_STATUS_SIZE); +} +EXPORT_SYMBOL(drm_dp_dpcd_read_link_status); + +/** + * drm_dp_link_probe() - probe a DisplayPort link for capabilities + * @aux: DisplayPort AUX channel + * @link: pointer to structure in which to return link capabilities + * + * The structure filled in by this function can usually be passed directly + * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and + * configure the link based on the link's capabilities. + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 values[3]; + int err; + + memset(link, 0, sizeof(*link)); + + err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values)); + if (err < 0) + return err; + + link->revision = values[0]; + link->rate = drm_dp_bw_code_to_link_rate(values[1]); + link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK; + + if (values[2] & DP_ENHANCED_FRAME_CAP) + link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; + + return 0; +} +EXPORT_SYMBOL(drm_dp_link_probe); + +/** + * drm_dp_link_power_up() - power up a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 value; + int err; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D0; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + /* + * According to the DP 1.1 specification, a "Sink Device must exit the + * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink + * Control Field" (register 0x600). + */ + usleep_range(1000, 2000); + + return 0; +} +EXPORT_SYMBOL(drm_dp_link_power_up); + +/** + * drm_dp_link_power_down() - power down a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 value; + int err; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D3; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + return 0; +} +EXPORT_SYMBOL(drm_dp_link_power_down); + +/** + * drm_dp_link_configure() - configure a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 values[2]; + int err; + + values[0] = drm_dp_link_rate_to_bw_code(link->rate); + values[1] = link->num_lanes; + + if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); + if (err < 0) + return err; + + return 0; +} +EXPORT_SYMBOL(drm_dp_link_configure); + +/* + * I2C-over-AUX implementation + */ + +#if 0 +static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | + I2C_FUNC_SMBUS_READ_BLOCK_DATA | + I2C_FUNC_SMBUS_BLOCK_PROC_CALL | + I2C_FUNC_10BIT_ADDR; +} + +/* + * Transfer a single I2C-over-AUX message and handle various error conditions, + * retrying the transaction as appropriate. It is assumed that the + * aux->transfer function does not modify anything in the msg other than the + * reply field. + * + * Returns bytes transferred on success, or a negative error code on failure. + */ +static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) +{ + unsigned int retry; + int ret; + + /* + * DP1.2 sections 2.7.7.1.5.6.1 and 2.7.7.1.6.6.1: A DP Source device + * is required to retry at least seven times upon receiving AUX_DEFER + * before giving up the AUX transaction. + */ + for (retry = 0; retry < 7; retry++) { + mutex_lock(&aux->hw_mutex); + ret = aux->transfer(aux, msg); + mutex_unlock(&aux->hw_mutex); + if (ret < 0) { + if (ret == -EBUSY) + continue; + + DRM_DEBUG_KMS("transaction failed: %d\n", ret); + return ret; + } + + + switch (msg->reply & DP_AUX_NATIVE_REPLY_MASK) { + case DP_AUX_NATIVE_REPLY_ACK: + /* + * For I2C-over-AUX transactions this isn't enough, we + * need to check for the I2C ACK reply. + */ + break; + + case DP_AUX_NATIVE_REPLY_NACK: + DRM_DEBUG_KMS("native nack (result=%d, size=%zu)\n", ret, msg->size); + return -EREMOTEIO; + + case DP_AUX_NATIVE_REPLY_DEFER: + DRM_DEBUG_KMS("native defer"); + /* + * We could check for I2C bit rate capabilities and if + * available adjust this interval. We could also be + * more careful with DP-to-legacy adapters where a + * long legacy cable may force very low I2C bit rates. + * + * For now just defer for long enough to hopefully be + * safe for all use-cases. + */ + usleep_range(500, 600); + continue; + + default: + DRM_ERROR("invalid native reply %#04x\n", msg->reply); + return -EREMOTEIO; + } + + switch (msg->reply & DP_AUX_I2C_REPLY_MASK) { + case DP_AUX_I2C_REPLY_ACK: + /* + * Both native ACK and I2C ACK replies received. We + * can assume the transfer was successful. + */ + return ret; + + case DP_AUX_I2C_REPLY_NACK: + DRM_DEBUG_KMS("I2C nack (result=%d, size=%zu\n", ret, msg->size); + aux->i2c_nack_count++; + return -EREMOTEIO; + + case DP_AUX_I2C_REPLY_DEFER: + DRM_DEBUG_KMS("I2C defer\n"); + aux->i2c_defer_count++; + usleep_range(400, 500); + continue; + + default: + DRM_ERROR("invalid I2C reply %#04x\n", msg->reply); + return -EREMOTEIO; + } + } + + DRM_DEBUG_KMS("too many retries, giving up\n"); + return -EREMOTEIO; +} + +/* + * Keep retrying drm_dp_i2c_do_msg until all data has been transferred. + * + * Returns an error code on failure, or a recommended transfer size on success. + */ +static int drm_dp_i2c_drain_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *orig_msg) +{ + int err, ret = orig_msg->size; + struct drm_dp_aux_msg msg = *orig_msg; + + while (msg.size > 0) { + err = drm_dp_i2c_do_msg(aux, &msg); + if (err <= 0) + return err == 0 ? -EPROTO : err; + + if (err < msg.size && err < ret) { + DRM_DEBUG_KMS("Partial I2C reply: requested %zu bytes got %d bytes\n", + msg.size, err); + ret = err; + } + + msg.size -= err; + msg.buffer += err; + } + + return ret; +} + +/* + * Bizlink designed DP->DVI-D Dual Link adapters require the I2C over AUX + * packets to be as large as possible. If not, the I2C transactions never + * succeed. Hence the default is maximum. + */ +static int dp_aux_i2c_transfer_size __read_mostly = DP_AUX_MAX_PAYLOAD_BYTES; +module_param_unsafe(dp_aux_i2c_transfer_size, int, 0644); +MODULE_PARM_DESC(dp_aux_i2c_transfer_size, + "Number of bytes to transfer in a single I2C over DP AUX CH message, (1-16, default 16)"); + +static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, + int num) +{ + struct drm_dp_aux *aux = adapter->algo_data; + unsigned int i, j; + unsigned transfer_size; + struct drm_dp_aux_msg msg; + int err = 0; + + dp_aux_i2c_transfer_size = clamp(dp_aux_i2c_transfer_size, 1, DP_AUX_MAX_PAYLOAD_BYTES); + + memset(&msg, 0, sizeof(msg)); + + for (i = 0; i < num; i++) { + msg.address = msgs[i].addr; + msg.request = (msgs[i].flags & I2C_M_RD) ? + DP_AUX_I2C_READ : + DP_AUX_I2C_WRITE; + msg.request |= DP_AUX_I2C_MOT; + /* Send a bare address packet to start the transaction. + * Zero sized messages specify an address only (bare + * address) transaction. + */ + msg.buffer = NULL; + msg.size = 0; + err = drm_dp_i2c_do_msg(aux, &msg); + if (err < 0) + break; + /* We want each transaction to be as large as possible, but + * we'll go to smaller sizes if the hardware gives us a + * short reply. + */ + transfer_size = dp_aux_i2c_transfer_size; + for (j = 0; j < msgs[i].len; j += msg.size) { + msg.buffer = msgs[i].buf + j; + msg.size = min(transfer_size, msgs[i].len - j); + + err = drm_dp_i2c_drain_msg(aux, &msg); + if (err < 0) + break; + transfer_size = err; + } + if (err < 0) + break; + } + if (err >= 0) + err = num; + /* Send a bare address packet to close out the transaction. + * Zero sized messages specify an address only (bare + * address) transaction. + */ + msg.request &= ~DP_AUX_I2C_MOT; + msg.buffer = NULL; + msg.size = 0; + (void)drm_dp_i2c_do_msg(aux, &msg); + + return err; +} + +static const struct i2c_algorithm drm_dp_i2c_algo = { + .functionality = drm_dp_i2c_functionality, + .master_xfer = drm_dp_i2c_xfer, +}; + +/** + * drm_dp_aux_register() - initialise and register aux channel + * @aux: DisplayPort AUX channel + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_aux_register(struct drm_dp_aux *aux) +{ + lockinit(&aux->hw_mutex, "ahm", 0, LK_CANRECURSE); + + aux->ddc.algo = &drm_dp_i2c_algo; + aux->ddc.algo_data = aux; + aux->ddc.retries = 3; + + aux->ddc.class = I2C_CLASS_DDC; + aux->ddc.owner = THIS_MODULE; + aux->ddc.dev.parent = aux->dev; + aux->ddc.dev.of_node = aux->dev->of_node; + + strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev), + sizeof(aux->ddc.name)); + + return i2c_add_adapter(&aux->ddc); +} +EXPORT_SYMBOL(drm_dp_aux_register); +#endif + /** * drm_dp_aux_unregister() - unregister an AUX adapter * @aux: DisplayPort AUX channel diff --git a/sys/dev/drm/drm_dp_mst_topology.c b/sys/dev/drm/drm_dp_mst_topology.c index 41eec17a26..674a7a624f 100644 --- a/sys/dev/drm/drm_dp_mst_topology.c +++ b/sys/dev/drm/drm_dp_mst_topology.c @@ -2281,6 +2281,19 @@ out: } EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi); +int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +{ + int slots = 0; + port = drm_dp_get_validated_port_ref(mgr, port); + if (!port) + return slots; + + slots = port->vcpi.num_slots; + drm_dp_put_port(port); + return slots; +} +EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots); + /** * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI * @mgr: manager for this port diff --git a/sys/dev/drm/drm_fb_helper.c b/sys/dev/drm/drm_fb_helper.c index ddd09ae351..6c0991b1d5 100644 --- a/sys/dev/drm/drm_fb_helper.c +++ b/sys/dev/drm/drm_fb_helper.c @@ -206,7 +206,7 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc) int drm_fb_helper_debug_enter(struct fb_info *info) { struct drm_fb_helper *helper = info->par; - struct drm_crtc_helper_funcs *funcs; + const struct drm_crtc_helper_funcs *funcs; int i; list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { @@ -253,7 +253,7 @@ int drm_fb_helper_debug_leave(struct fb_info *info) { struct drm_fb_helper *helper = info->par; struct drm_crtc *crtc; - struct drm_crtc_helper_funcs *funcs; + const struct drm_crtc_helper_funcs *funcs; struct drm_framebuffer *fb; int i; @@ -743,7 +743,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct drm_device *dev = fb_helper->dev; - struct drm_crtc_helper_funcs *crtc_funcs; + const struct drm_crtc_helper_funcs *crtc_funcs; u16 *red, *green, *blue, *transp; struct drm_crtc *crtc; int i, j, rc = 0; @@ -1043,23 +1043,45 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, crtc_count = 0; for (i = 0; i < fb_helper->crtc_count; i++) { struct drm_display_mode *desired_mode; - int x, y; + struct drm_mode_set *mode_set; + int x, y, j; + /* in case of tile group, are we the last tile vert or horiz? + * If no tile group you are always the last one both vertically + * and horizontally + */ + bool lastv = true, lasth = true; + desired_mode = fb_helper->crtc_info[i].desired_mode; + mode_set = &fb_helper->crtc_info[i].mode_set; + + if (!desired_mode) + continue; + + crtc_count++; + x = fb_helper->crtc_info[i].x; y = fb_helper->crtc_info[i].y; - if (desired_mode) { - if (gamma_size == 0) - gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size; - if (desired_mode->hdisplay + x < sizes.fb_width) - sizes.fb_width = desired_mode->hdisplay + x; - if (desired_mode->vdisplay + y < sizes.fb_height) - sizes.fb_height = desired_mode->vdisplay + y; - if (desired_mode->hdisplay + x > sizes.surface_width) - sizes.surface_width = desired_mode->hdisplay + x; - if (desired_mode->vdisplay + y > sizes.surface_height) - sizes.surface_height = desired_mode->vdisplay + y; - crtc_count++; + + if (gamma_size == 0) + gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size; + + sizes.surface_width = max_t(u32, desired_mode->hdisplay + x, sizes.surface_width); + sizes.surface_height = max_t(u32, desired_mode->vdisplay + y, sizes.surface_height); + + for (j = 0; j < mode_set->num_connectors; j++) { + struct drm_connector *connector = mode_set->connectors[j]; + if (connector->has_tile) { + lasth = (connector->tile_h_loc == (connector->num_h_tile - 1)); + lastv = (connector->tile_v_loc == (connector->num_v_tile - 1)); + /* cloning to multiple tiles is just crazy-talk, so: */ + break; + } } + + if (lasth) + sizes.fb_width = min_t(u32, desired_mode->hdisplay + x, sizes.fb_width); + if (lastv) + sizes.fb_height = min_t(u32, desired_mode->vdisplay + y, sizes.fb_height); } if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { @@ -1285,12 +1307,12 @@ struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *f int width, int height) { struct drm_cmdline_mode *cmdline_mode; - struct drm_display_mode *mode = NULL; + struct drm_display_mode *mode; bool prefer_non_interlace; cmdline_mode = &fb_helper_conn->connector->cmdline_mode; if (cmdline_mode->specified == false) - return mode; + return NULL; /* attempt to find a matching mode in the list of modes * we have gotten so far, if not add a CVT mode that conforms @@ -1299,7 +1321,7 @@ struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *f goto create_mode; prefer_non_interlace = !cmdline_mode->interlace; - again: +again: list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) { /* check width/height */ if (mode->hdisplay != cmdline_mode->xres || @@ -1553,7 +1575,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, int c, o; struct drm_device *dev = fb_helper->dev; struct drm_connector *connector; - struct drm_connector_helper_funcs *connector_funcs; + const struct drm_connector_helper_funcs *connector_funcs; struct drm_encoder *encoder; int my_score, best_score, score; struct drm_fb_helper_crtc **crtcs, *crtc; diff --git a/sys/dev/drm/drm_irq.c b/sys/dev/drm/drm_irq.c index 259bc2e572..89ac22ef0d 100644 --- a/sys/dev/drm/drm_irq.c +++ b/sys/dev/drm/drm_irq.c @@ -1209,6 +1209,37 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc) } EXPORT_SYMBOL(drm_crtc_vblank_off); +/** + * drm_crtc_vblank_reset - reset vblank state to off on a CRTC + * @crtc: CRTC in question + * + * Drivers can use this function to reset the vblank state to off at load time. + * Drivers should use this together with the drm_crtc_vblank_off() and + * drm_crtc_vblank_on() functions. The difference compared to + * drm_crtc_vblank_off() is that this function doesn't save the vblank counter + * and hence doesn't need to call any driver hooks. + */ +void drm_crtc_vblank_reset(struct drm_crtc *drm_crtc) +{ + struct drm_device *dev = drm_crtc->dev; + int crtc = drm_crtc_index(drm_crtc); + struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; + + lockmgr(&dev->vbl_lock, LK_EXCLUSIVE); + /* + * Prevent subsequent drm_vblank_get() from enabling the vblank + * interrupt by bumping the refcount. + */ + if (!vblank->inmodeset) { + atomic_inc(&vblank->refcount); + vblank->inmodeset = 1; + } + lockmgr(&dev->vbl_lock, LK_RELEASE); + + WARN_ON(!list_empty(&dev->vblank_event_list)); +} +EXPORT_SYMBOL(drm_crtc_vblank_reset); + /** * drm_vblank_on - enable vblank events on a CRTC * @dev: DRM device diff --git a/sys/dev/drm/drm_modes.c b/sys/dev/drm/drm_modes.c index 4a9d193b18..b0af5190cd 100644 --- a/sys/dev/drm/drm_modes.c +++ b/sys/dev/drm/drm_modes.c @@ -277,7 +277,7 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, hblank = drm_mode->hdisplay * hblank_percentage / (100 * HV_FACTOR - hblank_percentage); hblank -= hblank % (2 * CVT_H_GRANULARITY); - /* 14. find the total pixes per line */ + /* 14. find the total pixels per line */ drm_mode->htotal = drm_mode->hdisplay + hblank; drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2; drm_mode->hsync_start = drm_mode->hsync_end - @@ -899,6 +899,12 @@ EXPORT_SYMBOL(drm_mode_duplicate); */ bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2) { + if (!mode1 && !mode2) + return true; + + if (!mode1 || !mode2) + return false; + /* do clock check convert to PICOS so fb modes get matched * the same */ if (mode1->clock && mode2->clock) { @@ -1087,7 +1093,7 @@ EXPORT_SYMBOL(drm_mode_sort); /** * drm_mode_connector_list_update - update the mode list for the connector * @connector: the connector to update - * @merge_type_bits: whether to merge or overright type bits. + * @merge_type_bits: whether to merge or overwrite type bits * * This moves the modes from the @connector probed_modes list * to the actual mode list. It compares the probed mode against the current @@ -1148,7 +1154,7 @@ EXPORT_SYMBOL(drm_mode_connector_list_update); * x[M][R][-][@][i][m][eDd] * * The intermediate drm_cmdline_mode structure is required to store additional - * options from the command line modline like the force-enabel/disable flag. + * options from the command line modline like the force-enable/disable flag. * * Returns: * True if a valid modeline has been parsed, false otherwise. diff --git a/sys/dev/drm/drm_pci.c b/sys/dev/drm/drm_pci.c index d1e23414a7..0027cf4598 100644 --- a/sys/dev/drm/drm_pci.c +++ b/sys/dev/drm/drm_pci.c @@ -22,9 +22,10 @@ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +#include #include -#include "drm_legacy.h" #include "drm_internal.h" +#include "drm_legacy.h" /**********************************************************************/ /** \name PCI memory */ diff --git a/sys/dev/drm/drm_plane_helper.c b/sys/dev/drm/drm_plane_helper.c index 074b77e4c5..e4590e6c26 100644 --- a/sys/dev/drm/drm_plane_helper.c +++ b/sys/dev/drm/drm_plane_helper.c @@ -344,20 +344,7 @@ const struct drm_plane_funcs drm_primary_helper_funcs = { }; EXPORT_SYMBOL(drm_primary_helper_funcs); -/** - * drm_primary_helper_create_plane() - Create a generic primary plane - * @dev: drm device - * @formats: pixel formats supported, or NULL for a default safe list - * @num_formats: size of @formats; ignored if @formats is NULL - * - * Allocates and initializes a primary plane that can be used with the primary - * plane helpers. Drivers that wish to use driver-specific plane structures or - * provide custom handler functions may perform their own allocation and - * initialization rather than calling this function. - */ -struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev, - const uint32_t *formats, - int num_formats) +static struct drm_plane *create_primary_plane(struct drm_device *dev) { struct drm_plane *primary; int ret; @@ -368,15 +355,17 @@ struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev, return NULL; } - if (formats == NULL) { - formats = safe_modeset_formats; - num_formats = ARRAY_SIZE(safe_modeset_formats); - } + /* + * Remove the format_default field from drm_plane when dropping + * this helper. + */ + primary->format_default = true; /* possible_crtc's will be filled in later by crtc_init */ ret = drm_universal_plane_init(dev, primary, 0, &drm_primary_helper_funcs, - formats, num_formats, + safe_modeset_formats, + ARRAY_SIZE(safe_modeset_formats), DRM_PLANE_TYPE_PRIMARY); if (ret) { kfree(primary); @@ -385,7 +374,6 @@ struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev, return primary; } -EXPORT_SYMBOL(drm_primary_helper_create_plane); /** * drm_crtc_init - Legacy CRTC initialization function @@ -404,7 +392,7 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, { struct drm_plane *primary; - primary = drm_primary_helper_create_plane(dev, NULL, 0); + primary = create_primary_plane(dev); return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs); } EXPORT_SYMBOL(drm_crtc_init); @@ -413,9 +401,9 @@ int drm_plane_helper_commit(struct drm_plane *plane, struct drm_plane_state *plane_state, struct drm_framebuffer *old_fb) { - struct drm_plane_helper_funcs *plane_funcs; + const struct drm_plane_helper_funcs *plane_funcs; struct drm_crtc *crtc[2]; - struct drm_crtc_helper_funcs *crtc_funcs[2]; + const struct drm_crtc_helper_funcs *crtc_funcs[2]; int i, ret = 0; plane_funcs = plane->helper_private; @@ -435,8 +423,10 @@ int drm_plane_helper_commit(struct drm_plane *plane, goto out; } - if (plane_funcs->prepare_fb && plane_state->fb) { - ret = plane_funcs->prepare_fb(plane, plane_state->fb); + if (plane_funcs->prepare_fb && plane_state->fb && + plane_state->fb != old_fb) { + ret = plane_funcs->prepare_fb(plane, plane_state->fb, + plane_state); if (ret) goto out; } @@ -449,34 +439,47 @@ int drm_plane_helper_commit(struct drm_plane *plane, crtc_funcs[i]->atomic_begin(crtc[i]); } - plane_funcs->atomic_update(plane, plane_state); + /* + * Drivers may optionally implement the ->atomic_disable callback, so + * special-case that here. + */ + if (drm_atomic_plane_disabling(plane, plane_state) && + plane_funcs->atomic_disable) + plane_funcs->atomic_disable(plane, plane_state); + else + plane_funcs->atomic_update(plane, plane_state); for (i = 0; i < 2; i++) { if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush) crtc_funcs[i]->atomic_flush(crtc[i]); } + /* + * If we only moved the plane and didn't change fb's, there's no need to + * wait for vblank. + */ + if (plane->state->fb == old_fb) + goto out; + for (i = 0; i < 2; i++) { if (!crtc[i]) continue; - if (plane != crtc[i]->cursor) { - /* - * There's no other way to figure out whether the - * crtc is running. - */ - ret = drm_crtc_vblank_get(crtc[i]); - if (ret == 0) { - drm_crtc_wait_one_vblank(crtc[i]); - drm_crtc_vblank_put(crtc[i]); - } + if (crtc[i]->cursor == plane) + continue; + + /* There's no other way to figure out whether the crtc is running. */ + ret = drm_crtc_vblank_get(crtc[i]); + if (ret == 0) { + drm_crtc_wait_one_vblank(crtc[i]); + drm_crtc_vblank_put(crtc[i]); } ret = 0; } if (plane_funcs->cleanup_fb && old_fb) - plane_funcs->cleanup_fb(plane, old_fb); + plane_funcs->cleanup_fb(plane, old_fb, plane_state); out: if (plane_state) { if (plane->funcs->atomic_destroy_state) diff --git a/sys/dev/drm/drm_probe_helper.c b/sys/dev/drm/drm_probe_helper.c index ff9d59c4ff..71f10a3081 100644 --- a/sys/dev/drm/drm_probe_helper.c +++ b/sys/dev/drm/drm_probe_helper.c @@ -98,7 +98,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect { struct drm_device *dev = connector->dev; struct drm_display_mode *mode; - struct drm_connector_helper_funcs *connector_funcs = + const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; int count = 0; int mode_flags = 0; diff --git a/sys/dev/drm/i915/Makefile b/sys/dev/drm/i915/Makefile index d9774b4048..3fc365eb51 100644 --- a/sys/dev/drm/i915/Makefile +++ b/sys/dev/drm/i915/Makefile @@ -4,6 +4,7 @@ KMOD = i915 SRCS = i915_drv.c \ i915_params.c \ i915_suspend.c \ + i915_sysfs.c \ intel_pm.c \ intel_runtime_pm.c @@ -16,6 +17,7 @@ SRCS += i915_cmd_parser.c \ i915_gem_execbuffer.c \ i915_gem_gtt.c \ i915_gem.c \ + i915_gem_shrinker.c \ i915_gem_stolen.c \ i915_gem_tiling.c \ i915_gem_userptr.c \ @@ -72,10 +74,11 @@ SRCS += \ intel_sdvo.c \ intel_tv.c +# virtual gpu code +SRCS += i915_vgpu.c + # legacy horrors -SRCS += \ - i915_dma.c \ - i915_ums.c +SRCS += i915_dma.c SRCS += acpi_if.h device_if.h bus_if.h pci_if.h iicbus_if.h iicbb_if.h \ opt_acpi.h opt_drm.h opt_ktr.h diff --git a/sys/dev/drm/i915/i915_cmd_parser.c b/sys/dev/drm/i915/i915_cmd_parser.c index 9cb1d0be0d..26cd3f22a3 100644 --- a/sys/dev/drm/i915/i915_cmd_parser.c +++ b/sys/dev/drm/i915/i915_cmd_parser.c @@ -826,21 +826,25 @@ static bool valid_reg(const u32 *table, int count, u32 addr) return false; } -static u32 *vmap_batch(struct drm_i915_gem_object *obj) +static u32 *vmap_batch(struct drm_i915_gem_object *obj, + unsigned start, unsigned len) { int i; void *addr = NULL; + int first_page = start >> PAGE_SHIFT; + int last_page = (len + start + 4095) >> PAGE_SHIFT; + int npages = last_page - first_page; struct vm_page **pages; - pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); + pages = drm_malloc_ab(npages, sizeof(*pages)); if (pages == NULL) { DRM_DEBUG_DRIVER("Failed to get space for pages\n"); goto finish; } i = 0; - while (i < obj->base.size >> PAGE_SHIFT) { - pages[i] = obj->pages[i]; + while (i < npages) { + pages[i] = obj->pages[first_page + i]; i++; } @@ -864,61 +868,61 @@ static u32 *copy_batch(struct drm_i915_gem_object *dest_obj, u32 batch_start_offset, u32 batch_len) { - int ret = 0; int needs_clflush = 0; - u32 *src_base, *dest_base = NULL; - u32 *src_addr, *dest_addr; - u32 offset = batch_start_offset / sizeof(*dest_addr); - u32 end = batch_start_offset + batch_len; + char *src_base, *src; + void *dst = NULL; + int ret; - if (end > dest_obj->base.size || end > src_obj->base.size) + if (batch_len > dest_obj->base.size || + batch_len + batch_start_offset > src_obj->base.size) return ERR_PTR(-E2BIG); ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush); if (ret) { - DRM_DEBUG_DRIVER("CMD: failed to prep read\n"); + DRM_DEBUG_DRIVER("CMD: failed to prepare shadow batch\n"); return ERR_PTR(ret); } - src_base = vmap_batch(src_obj); + src_base = (char *)vmap_batch(src_obj, batch_start_offset, batch_len); if (!src_base) { DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n"); ret = -ENOMEM; goto unpin_src; } - src_addr = src_base + offset; - - if (needs_clflush) - drm_clflush_virt_range((char *)src_addr, batch_len); + ret = i915_gem_object_get_pages(dest_obj); + if (ret) { + DRM_DEBUG_DRIVER("CMD: Failed to get pages for shadow batch\n"); + goto unmap_src; + } + i915_gem_object_pin_pages(dest_obj); ret = i915_gem_object_set_to_cpu_domain(dest_obj, true); if (ret) { - DRM_DEBUG_DRIVER("CMD: Failed to set batch CPU domain\n"); + DRM_DEBUG_DRIVER("CMD: Failed to set shadow batch to CPU\n"); goto unmap_src; } - dest_base = vmap_batch(dest_obj); - if (!dest_base) { + dst = vmap_batch(dest_obj, 0, batch_len); + if (!dst) { DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n"); + i915_gem_object_unpin_pages(dest_obj); ret = -ENOMEM; goto unmap_src; } - dest_addr = dest_base + offset; - - if (batch_start_offset != 0) - memset((u8 *)dest_base, 0, batch_start_offset); + src = src_base + offset_in_page(batch_start_offset); + if (needs_clflush) + drm_clflush_virt_range(src, batch_len); - memcpy(dest_addr, src_addr, batch_len); - memset((u8 *)dest_addr + batch_len, 0, dest_obj->base.size - end); + memcpy(dst, src, batch_len); unmap_src: vunmap(src_base); unpin_src: i915_gem_object_unpin_pages(src_obj); - return ret ? ERR_PTR(ret) : dest_base; + return ret ? ERR_PTR(ret) : dst; } /** @@ -1055,34 +1059,26 @@ int i915_parse_cmds(struct intel_engine_cs *ring, u32 batch_len, bool is_master) { - int ret = 0; u32 *cmd, *batch_base, *batch_end; struct drm_i915_cmd_descriptor default_desc = { 0 }; bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */ - - ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0); - if (ret) { - DRM_DEBUG_DRIVER("CMD: Failed to pin shadow batch\n"); - return -1; - } + int ret = 0; batch_base = copy_batch(shadow_batch_obj, batch_obj, batch_start_offset, batch_len); if (IS_ERR(batch_base)) { DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n"); - i915_gem_object_ggtt_unpin(shadow_batch_obj); return PTR_ERR(batch_base); } - cmd = batch_base + (batch_start_offset / sizeof(*cmd)); - /* * We use the batch length as size because the shadow object is as * large or larger and copy_batch() will write MI_NOPs to the extra * space. Parsing should be faster in some cases this way. */ - batch_end = cmd + (batch_len / sizeof(*batch_end)); + batch_end = batch_base + (batch_len / sizeof(*batch_end)); + cmd = batch_base; while (cmd < batch_end) { const struct drm_i915_cmd_descriptor *desc; u32 length; @@ -1141,7 +1137,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring, } vunmap(batch_base); - i915_gem_object_ggtt_unpin(shadow_batch_obj); + i915_gem_object_unpin_pages(shadow_batch_obj); return ret; } diff --git a/sys/dev/drm/i915/i915_dma.c b/sys/dev/drm/i915/i915_dma.c index 632f5304d2..60dcd186f1 100644 --- a/sys/dev/drm/i915/i915_dma.c +++ b/sys/dev/drm/i915/i915_dma.c @@ -28,10 +28,11 @@ #include #include +#include "intel_drv.h" #include #include #include "i915_drv.h" -#include "intel_drv.h" +#include "i915_vgpu.h" #include "intel_ringbuffer.h" #include @@ -52,6 +53,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_CHIPSET_ID: value = dev->pdev->device; break; + case I915_PARAM_REVISION: + value = dev->pdev->revision; + break; case I915_PARAM_HAS_GEM: value = 1; break; @@ -125,6 +129,16 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_COHERENT_PHYS_GTT: value = 1; break; + case I915_PARAM_SUBSLICE_TOTAL: + value = INTEL_INFO(dev)->subslice_total; + if (!value) + return -ENODEV; + break; + case I915_PARAM_EU_TOTAL: + value = INTEL_INFO(dev)->eu_total; + if (!value) + return -ENODEV; + break; default: DRM_DEBUG("Unknown parameter %d\n", param->param); return -EINVAL; @@ -598,16 +612,128 @@ static void intel_device_info_runtime_init(struct drm_device *dev) } } + /* Initialize slice/subslice/EU info */ if (IS_CHERRYVIEW(dev)) { - u32 fuse, mask_eu; + u32 fuse, eu_dis; fuse = I915_READ(CHV_FUSE_GT); - mask_eu = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK | - CHV_FGT_EU_DIS_SS0_R1_MASK | - CHV_FGT_EU_DIS_SS1_R0_MASK | - CHV_FGT_EU_DIS_SS1_R1_MASK); - info->eu_total = 16 - hweight32(mask_eu); + + info->slice_total = 1; + + if (!(fuse & CHV_FGT_DISABLE_SS0)) { + info->subslice_per_slice++; + eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK | + CHV_FGT_EU_DIS_SS0_R1_MASK); + info->eu_total += 8 - hweight32(eu_dis); + } + + if (!(fuse & CHV_FGT_DISABLE_SS1)) { + info->subslice_per_slice++; + eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK | + CHV_FGT_EU_DIS_SS1_R1_MASK); + info->eu_total += 8 - hweight32(eu_dis); + } + + info->subslice_total = info->subslice_per_slice; + /* + * CHV expected to always have a uniform distribution of EU + * across subslices. + */ + info->eu_per_subslice = info->subslice_total ? + info->eu_total / info->subslice_total : + 0; + /* + * CHV supports subslice power gating on devices with more than + * one subslice, and supports EU power gating on devices with + * more than one EU pair per subslice. + */ + info->has_slice_pg = 0; + info->has_subslice_pg = (info->subslice_total > 1); + info->has_eu_pg = (info->eu_per_subslice > 2); + } else if (IS_SKYLAKE(dev)) { + const int s_max = 3, ss_max = 4, eu_max = 8; + int s, ss; + u32 fuse2, eu_disable[s_max], s_enable, ss_disable; + + fuse2 = I915_READ(GEN8_FUSE2); + s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> + GEN8_F2_S_ENA_SHIFT; + ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> + GEN9_F2_SS_DIS_SHIFT; + + eu_disable[0] = I915_READ(GEN8_EU_DISABLE0); + eu_disable[1] = I915_READ(GEN8_EU_DISABLE1); + eu_disable[2] = I915_READ(GEN8_EU_DISABLE2); + + info->slice_total = hweight32(s_enable); + /* + * The subslice disable field is global, i.e. it applies + * to each of the enabled slices. + */ + info->subslice_per_slice = ss_max - hweight32(ss_disable); + info->subslice_total = info->slice_total * + info->subslice_per_slice; + + /* + * Iterate through enabled slices and subslices to + * count the total enabled EU. + */ + for (s = 0; s < s_max; s++) { + if (!(s_enable & (0x1 << s))) + /* skip disabled slice */ + continue; + + for (ss = 0; ss < ss_max; ss++) { + u32 n_disabled; + + if (ss_disable & (0x1 << ss)) + /* skip disabled subslice */ + continue; + + n_disabled = hweight8(eu_disable[s] >> + (ss * eu_max)); + + /* + * Record which subslice(s) has(have) 7 EUs. we + * can tune the hash used to spread work among + * subslices if they are unbalanced. + */ + if (eu_max - n_disabled == 7) + info->subslice_7eu[s] |= 1 << ss; + + info->eu_total += eu_max - n_disabled; + } + } + + /* + * SKL is expected to always have a uniform distribution + * of EU across subslices with the exception that any one + * EU in any one subslice may be fused off for die + * recovery. + */ + info->eu_per_subslice = info->subslice_total ? + DIV_ROUND_UP(info->eu_total, + info->subslice_total) : 0; + /* + * SKL supports slice power gating on devices with more than + * one slice, and supports EU power gating on devices with + * more than one EU pair per subslice. + */ + info->has_slice_pg = (info->slice_total > 1) ? 1 : 0; + info->has_subslice_pg = 0; + info->has_eu_pg = (info->eu_per_subslice > 2) ? 1 : 0; } + DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); + DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); + DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice); + DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total); + DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice); + DRM_DEBUG_DRIVER("has slice power gating: %s\n", + info->has_slice_pg ? "y" : "n"); + DRM_DEBUG_DRIVER("has subslice power gating: %s\n", + info->has_subslice_pg ? "y" : "n"); + DRM_DEBUG_DRIVER("has EU power gating: %s\n", + info->has_eu_pg ? "y" : "n"); } /** @@ -632,13 +758,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) /* XXX: struct pci_dev */ info = i915_get_device_id(dev->pdev->device); - /* Refuse to load on gen6+ without kms enabled. */ - if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) { - DRM_INFO("Your hardware requires kernel modesetting (KMS)\n"); - DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n"); - return -ENODEV; - } - dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; @@ -692,14 +811,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) else mmio_size = 2*1024*1024; -#if 0 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size); if (!dev_priv->regs) { DRM_ERROR("failed to map registers\n"); ret = -EIO; goto put_bridge; } -#else +#ifdef __DragonFly__ base = drm_get_resource_start(dev, mmio_bar); size = drm_get_resource_len(dev, mmio_bar); @@ -716,20 +834,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (ret) goto out_regs; - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - /* WARNING: Apparently we must kick fbdev drivers before vgacon, - * otherwise the vga fbdev driver falls over. */ - ret = i915_kick_out_firmware_fb(dev_priv); - if (ret) { - DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); - goto out_gtt; - } + /* WARNING: Apparently we must kick fbdev drivers before vgacon, + * otherwise the vga fbdev driver falls over. */ + ret = i915_kick_out_firmware_fb(dev_priv); + if (ret) { + DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); + goto out_gtt; + } - ret = i915_kick_out_vgacon(dev_priv); - if (ret) { - DRM_ERROR("failed to remove conflicting VGA console\n"); - goto out_gtt; - } + ret = i915_kick_out_vgacon(dev_priv); + if (ret) { + DRM_ERROR("failed to remove conflicting VGA console\n"); + goto out_gtt; } #if 0 @@ -837,17 +953,20 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) intel_power_domains_init(dev_priv); - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = i915_load_modeset_init(dev); - if (ret < 0) { - DRM_ERROR("failed to init modeset\n"); - goto out_power_well; - } + ret = i915_load_modeset_init(dev); + if (ret < 0) { + DRM_ERROR("failed to init modeset\n"); + goto out_power_well; } -#if 0 + /* + * Notify a valid surface after modesetting, + * when running inside a VM. + */ + if (intel_vgpu_active(dev)) + I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY); + i915_setup_sysfs(dev); -#endif if (INTEL_INFO(dev)->num_pipes) { /* Must be done after probing outputs */ @@ -888,6 +1007,11 @@ out_gtt: i915_global_gtt_cleanup(dev); out_regs: intel_uncore_fini(dev); +#if 0 + pci_iounmap(dev->pdev, dev_priv->regs); +#endif +put_bridge: + pci_dev_put(dev_priv->bridge_dev); free_priv: kfree(dev_priv); return ret; @@ -910,9 +1034,9 @@ int i915_driver_unload(struct drm_device *dev) intel_gpu_ips_teardown(); -#if 0 i915_teardown_sysfs(dev); +#if 0 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); unregister_shrinker(&dev_priv->mm.shrinker); @@ -924,54 +1048,53 @@ int i915_driver_unload(struct drm_device *dev) acpi_video_unregister(); #endif - if (drm_core_check_feature(dev, DRIVER_MODESET)) - intel_fbdev_fini(dev); + intel_fbdev_fini(dev); drm_vblank_cleanup(dev); - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - intel_modeset_cleanup(dev); - - /* - * free the memory space allocated for the child device - * config parsed from VBT - */ - if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { - kfree(dev_priv->vbt.child_dev); - dev_priv->vbt.child_dev = NULL; - dev_priv->vbt.child_dev_num = 0; - } + intel_modeset_cleanup(dev); + /* + * free the memory space allocated for the child device + * config parsed from VBT + */ + if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { + kfree(dev_priv->vbt.child_dev); + dev_priv->vbt.child_dev = NULL; + dev_priv->vbt.child_dev_num = 0; } +#if 0 + vga_switcheroo_unregister_client(dev->pdev); + vga_client_register(dev->pdev, NULL, NULL, NULL); +#endif + /* Free error state after interrupts are fully disabled. */ cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); #if 0 i915_destroy_error_state(dev); + + if (dev->pdev->msi_enabled) + pci_disable_msi(dev->pdev); #endif intel_opregion_fini(dev); - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - /* Flush any outstanding unpin_work. */ - flush_workqueue(dev_priv->wq); + /* Flush any outstanding unpin_work. */ + flush_workqueue(dev_priv->wq); - mutex_lock(&dev->struct_mutex); - i915_gem_cleanup_ringbuffer(dev); - i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool); - i915_gem_context_fini(dev); - mutex_unlock(&dev->struct_mutex); + mutex_lock(&dev->struct_mutex); + i915_gem_cleanup_ringbuffer(dev); + i915_gem_batch_pool_fini(&dev_priv->mm.batch_pool); + i915_gem_context_fini(dev); + mutex_unlock(&dev->struct_mutex); #if 0 - i915_gem_cleanup_stolen(dev); + i915_gem_cleanup_stolen(dev); #endif - } intel_teardown_gmbus(dev); intel_teardown_mchbar(dev); - bus_generic_detach(dev->dev); - drm_legacy_rmmap(dev, dev_priv->mmio_map); - destroy_workqueue(dev_priv->dp_wq); destroy_workqueue(dev_priv->wq); destroy_workqueue(dev_priv->gpu_error.hangcheck_wq); @@ -983,6 +1106,9 @@ int i915_driver_unload(struct drm_device *dev) #if 0 if (dev_priv->regs != NULL) pci_iounmap(dev->pdev, dev_priv->regs); + + if (dev_priv->slab) + kmem_cache_destroy(dev_priv->slab); #endif pci_dev_put(dev_priv->bridge_dev); @@ -1029,8 +1155,7 @@ void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) i915_gem_release(dev, file); mutex_unlock(&dev->struct_mutex); - if (drm_core_check_feature(dev, DRIVER_MODESET)) - intel_modeset_preclose(dev, file); + intel_modeset_preclose(dev, file); } void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) @@ -1093,7 +1218,7 @@ const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), diff --git a/sys/dev/drm/i915/i915_drv.c b/sys/dev/drm/i915/i915_drv.c index 5833090c76..1e2721aeed 100644 --- a/sys/dev/drm/i915/i915_drv.c +++ b/sys/dev/drm/i915/i915_drv.c @@ -342,7 +342,6 @@ static const struct intel_device_info intel_broadwell_gt3m_info = { }; static const struct intel_device_info intel_cherryview_info = { - .is_preliminary = 1, .gen = 8, .num_pipes = 3, .need_gfx_hws = 1, .has_hotplug = 1, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, @@ -365,6 +364,19 @@ static const struct intel_device_info intel_skylake_info = { IVB_CURSOR_OFFSETS, }; +static const struct intel_device_info intel_skylake_gt3_info = { + .is_preliminary = 1, + .is_skylake = 1, + .gen = 9, .num_pipes = 3, + .need_gfx_hws = 1, .has_hotplug = 1, + .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, + .has_llc = 1, + .has_ddi = 1, + .has_fbc = 1, + GEN_DEFAULT_PIPEOFFSETS, + IVB_CURSOR_OFFSETS, +}; + /* * Make sure any device matches here are from most specific to most * general. For example, since the Quanta match is based on the subsystem @@ -401,7 +413,9 @@ static const struct intel_device_info intel_skylake_info = { INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \ INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \ INTEL_CHV_IDS(&intel_cherryview_info), \ - INTEL_SKL_IDS(&intel_skylake_info) + INTEL_SKL_GT1_IDS(&intel_skylake_info), \ + INTEL_SKL_GT2_IDS(&intel_skylake_info), \ + INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info) \ static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_PCI_IDS, @@ -556,6 +570,7 @@ static int i915_drm_suspend(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; pci_power_t opregion_target_state; + int error; /* ignore lid events during suspend */ mutex_lock(&dev_priv->modeset_restore_lock); @@ -572,39 +587,34 @@ static int i915_drm_suspend(struct drm_device *dev) pci_save_state(dev->pdev); #endif - /* If KMS is active, we do the leavevt stuff here */ - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - int error; - - error = i915_gem_suspend(dev); - if (error) { - dev_err(dev->pdev->dev, - "GEM idle failed, resume might fail\n"); - return error; - } + error = i915_gem_suspend(dev); + if (error) { + dev_err(dev->pdev->dev, + "GEM idle failed, resume might fail\n"); + return error; + } - intel_suspend_gt_powersave(dev); + intel_suspend_gt_powersave(dev); - /* - * Disable CRTCs directly since we want to preserve sw state - * for _thaw. Also, power gate the CRTC power wells. - */ - drm_modeset_lock_all(dev); - for_each_crtc(dev, crtc) - intel_crtc_control(crtc, false); - drm_modeset_unlock_all(dev); + /* + * Disable CRTCs directly since we want to preserve sw state + * for _thaw. Also, power gate the CRTC power wells. + */ + drm_modeset_lock_all(dev); + for_each_crtc(dev, crtc) + intel_crtc_control(crtc, false); + drm_modeset_unlock_all(dev); #if 0 - intel_dp_mst_suspend(dev); + intel_dp_mst_suspend(dev); #endif - intel_runtime_pm_disable_interrupts(dev_priv); - intel_hpd_cancel_work(dev_priv); + intel_runtime_pm_disable_interrupts(dev_priv); + intel_hpd_cancel_work(dev_priv); - intel_suspend_encoders(dev_priv); + intel_suspend_encoders(dev_priv); - intel_suspend_hw(dev); - } + intel_suspend_hw(dev); i915_gem_suspend_gtt_mappings(dev); @@ -617,13 +627,11 @@ static int i915_drm_suspend(struct drm_device *dev) #endif intel_opregion_notify_adapter(dev, opregion_target_state); -#if 0 intel_uncore_forcewake_reset(dev, false); -#endif intel_opregion_fini(dev); #if 0 - intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED); + intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); #endif dev_priv->suspend_count++; @@ -695,53 +703,55 @@ static int i915_drm_resume(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - mutex_lock(&dev->struct_mutex); - i915_gem_restore_gtt_mappings(dev); - mutex_unlock(&dev->struct_mutex); - } + mutex_lock(&dev->struct_mutex); + i915_gem_restore_gtt_mappings(dev); + mutex_unlock(&dev->struct_mutex); i915_restore_state(dev); intel_opregion_setup(dev); - /* KMS EnterVT equivalent */ - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - intel_init_pch_refclk(dev); - drm_mode_config_reset(dev); + intel_init_pch_refclk(dev); + drm_mode_config_reset(dev); - mutex_lock(&dev->struct_mutex); - if (i915_gem_init_hw(dev)) { - DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); - atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); - } - mutex_unlock(&dev->struct_mutex); + /* + * Interrupts have to be enabled before any batches are run. If not the + * GPU will hang. i915_gem_init_hw() will initiate batches to + * update/restore the context. + * + * Modeset enabling in intel_modeset_init_hw() also needs working + * interrupts. + */ + intel_runtime_pm_enable_interrupts(dev_priv); - /* We need working interrupts for modeset enabling ... */ - intel_runtime_pm_enable_interrupts(dev_priv); + mutex_lock(&dev->struct_mutex); + if (i915_gem_init_hw(dev)) { + DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); + atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter); + } + mutex_unlock(&dev->struct_mutex); - intel_modeset_init_hw(dev); + intel_modeset_init_hw(dev); - lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); - if (dev_priv->display.hpd_irq_setup) - dev_priv->display.hpd_irq_setup(dev); - lockmgr(&dev_priv->irq_lock, LK_RELEASE); + lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); + if (dev_priv->display.hpd_irq_setup) + dev_priv->display.hpd_irq_setup(dev); + lockmgr(&dev_priv->irq_lock, LK_RELEASE); - drm_modeset_lock_all(dev); - intel_modeset_setup_hw_state(dev, true); - drm_modeset_unlock_all(dev); + drm_modeset_lock_all(dev); + intel_modeset_setup_hw_state(dev, true); + drm_modeset_unlock_all(dev); - intel_dp_mst_resume(dev); + intel_dp_mst_resume(dev); - /* - * ... but also need to make sure that hotplug processing - * doesn't cause havoc. Like in the driver load code we don't - * bother with the tiny race here where we might loose hotplug - * notifications. - * */ - intel_hpd_init(dev_priv); - /* Config may have changed between suspend and resume */ - drm_helper_hpd_irq_event(dev); - } + /* + * ... but also need to make sure that hotplug processing + * doesn't cause havoc. Like in the driver load code we don't + * bother with the tiny race here where we might loose hotplug + * notifications. + * */ + intel_hpd_init(dev_priv); + /* Config may have changed between suspend and resume */ + drm_helper_hpd_irq_event(dev); intel_opregion_init(dev); @@ -903,38 +913,29 @@ int i915_reset(struct drm_device *dev) * was running at the time of the reset (i.e. we weren't VT * switched away). */ - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ - dev_priv->gpu_error.reload_in_reset = true; - ret = i915_gem_init_hw(dev); + /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ + dev_priv->gpu_error.reload_in_reset = true; - dev_priv->gpu_error.reload_in_reset = false; + ret = i915_gem_init_hw(dev); - mutex_unlock(&dev->struct_mutex); - if (ret) { - DRM_ERROR("Failed hw init on reset %d\n", ret); - return ret; - } - - /* - * FIXME: This races pretty badly against concurrent holders of - * ring interrupts. This is possible since we've started to drop - * dev->struct_mutex in select places when waiting for the gpu. - */ + dev_priv->gpu_error.reload_in_reset = false; - /* - * rps/rc6 re-init is necessary to restore state lost after the - * reset and the re-install of gt irqs. Skip for ironlake per - * previous concerns that it doesn't respond well to some forms - * of re-init after reset. - */ - if (INTEL_INFO(dev)->gen > 5) - intel_enable_gt_powersave(dev); - } else { - mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->struct_mutex); + if (ret) { + DRM_ERROR("Failed hw init on reset %d\n", ret); + return ret; } + /* + * rps/rc6 re-init is necessary to restore state lost after the + * reset and the re-install of gt irqs. Skip for ironlake per + * previous concerns that it doesn't respond well to some forms + * of re-init after reset. + */ + if (INTEL_INFO(dev)->gen > 5) + intel_enable_gt_powersave(dev); + return 0; } @@ -1086,7 +1087,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4); s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); - s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); + s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7); s->ecochk = I915_READ(GAM_ECOCHK); @@ -1168,7 +1169,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]); I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); - I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count); + I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp); I915_WRITE(GAM_ECOCHK, s->ecochk); @@ -1717,11 +1718,9 @@ static int __init i915_init(void) if (!(driver.driver_features & DRIVER_MODESET)) { driver.get_vblank_timestamp = NULL; -#ifndef CONFIG_DRM_I915_UMS /* Silently fail loading to not upset userspace. */ DRM_DEBUG_DRIVER("KMS and UMS disabled.\n"); return 0; -#endif } /* @@ -1739,6 +1738,16 @@ static int __init i915_init(void) #endif } +#if 0 +static void __exit i915_exit(void) +{ + if (!(driver.driver_features & DRIVER_MODESET)) + return; /* Never loaded a driver. */ + + drm_pci_exit(&driver, &i915_pci_driver); +} +#endif + DRIVER_MODULE_ORDERED(i915, vgapci, i915_driver, drm_devclass, NULL, NULL, SI_ORDER_ANY); MODULE_DEPEND(i915, drm, 1, 1, 1); MODULE_DEPEND(i915, iicbus, 1, 1, 1); diff --git a/sys/dev/drm/i915/i915_drv.h b/sys/dev/drm/i915/i915_drv.h index e65243c3a4..65009e887b 100644 --- a/sys/dev/drm/i915/i915_drv.h +++ b/sys/dev/drm/i915/i915_drv.h @@ -31,6 +31,7 @@ #define _I915_DRV_H_ #include +#include #include "i915_reg.h" #include "intel_bios.h" @@ -48,7 +49,6 @@ #include #include #include -#include #include #define CONFIG_DRM_I915_FBDEV 1 @@ -62,7 +62,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20150130" +#define DRIVER_DATE "20150327" #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -76,6 +76,9 @@ #define WARN_ON(x) WARN((x), "WARN_ON(" #x ")") #endif +#undef WARN_ON_ONCE +#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(" #x ")") + #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ (long) (x), __func__); @@ -229,9 +232,14 @@ enum hpd_pin { #define for_each_pipe(__dev_priv, __p) \ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) -#define for_each_plane(pipe, p) \ - for ((p) = 0; (p) < INTEL_INFO(dev)->num_sprites[(pipe)] + 1; (p)++) -#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++) +#define for_each_plane(__dev_priv, __pipe, __p) \ + for ((__p) = 0; \ + (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ + (__p)++) +#define for_each_sprite(__dev_priv, __p, __s) \ + for ((__s) = 0; \ + (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ + (__s)++) #define for_each_crtc(dev, crtc) \ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) @@ -244,6 +252,12 @@ enum hpd_pin { &(dev)->mode_config.encoder_list, \ base.head) +#define for_each_intel_connector(dev, intel_connector) \ + list_for_each_entry(intel_connector, \ + &dev->mode_config.connector_list, \ + base.head) + + #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ if ((intel_encoder)->base.crtc == (__crtc)) @@ -257,6 +271,7 @@ enum hpd_pin { if ((1 << (domain)) & (mask)) struct drm_i915_private; +struct i915_mm_struct; struct i915_mmu_object; enum intel_dpll_id { @@ -378,10 +393,6 @@ struct intel_opregion { struct intel_overlay; struct intel_overlay_error_state; -struct drm_i915_master_private { - struct drm_local_map *sarea; - struct _drm_i915_sarea *sarea_priv; -}; #define I915_FENCE_REG_NONE -1 #define I915_MAX_NUM_FENCES 32 /* 32 fences + sign bit for FENCE_REG_NONE */ @@ -422,6 +433,8 @@ struct drm_i915_error_state { u32 forcewake; u32 error; /* gen6+ */ u32 err_int; /* gen7 */ + u32 fault_data0; /* gen8, gen9 */ + u32 fault_data1; /* gen8, gen9 */ u32 done_reg; u32 gac_eco; u32 gam_ecochk; @@ -539,7 +552,7 @@ struct drm_i915_display_funcs { * Returns true on success, false on failure. */ bool (*find_dpll)(const struct intel_limit *limit, - struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state, int target, int refclk, struct dpll *match_clock, struct dpll *best_clock); @@ -548,7 +561,7 @@ struct drm_i915_display_funcs { struct drm_crtc *crtc, uint32_t sprite_width, uint32_t sprite_height, int pixel_size, bool enable, bool scaled); - void (*modeset_global_resources)(struct drm_device *dev); + void (*modeset_global_resources)(struct drm_atomic_state *state); /* Returns the active state of the crtc, and if the crtc is active, * fills out the pipe-config with the hw state. */ bool (*get_pipe_config)(struct intel_crtc *, @@ -702,7 +715,18 @@ struct intel_device_info { int trans_offsets[I915_MAX_TRANSCODERS]; int palette_offsets[I915_MAX_PIPES]; int cursor_offsets[I915_MAX_PIPES]; - unsigned int eu_total; + + /* Slice/subslice/EU info */ + u8 slice_total; + u8 subslice_total; + u8 subslice_per_slice; + u8 eu_total; + u8 eu_per_subslice; + /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ + u8 subslice_7eu[3]; + u8 has_slice_pg:1; + u8 has_subslice_pg:1; + u8 has_eu_pg:1; }; #undef DEFINE_FLAG @@ -781,11 +805,20 @@ struct intel_context { struct list_head link; }; +enum fb_op_origin { + ORIGIN_GTT, + ORIGIN_CPU, + ORIGIN_CS, + ORIGIN_FLIP, +}; + struct i915_fbc { - unsigned long size; + unsigned long uncompressed_size; unsigned threshold; unsigned int fb_id; - enum plane plane; + unsigned int possible_framebuffer_bits; + unsigned int busy_bits; + struct intel_crtc *crtc; int y; struct drm_mm_node compressed_fb; @@ -797,14 +830,6 @@ struct i915_fbc { * possible. */ bool enabled; - /* On gen8 some rings cannont perform fbc clean operation so for now - * we are doing this on SW with mmio. - * This variable works in the opposite information direction - * of ring->fbc_dirty telling software on frontbuffer tracking - * to perform the cache clean on sw side. - */ - bool need_sw_cache_clean; - struct intel_fbc_work { struct delayed_work work; struct drm_crtc *crtc; @@ -842,7 +867,7 @@ enum drrs_support_type { STATIC_DRRS_SUPPORT = 1, SEAMLESS_DRRS_SUPPORT = 2 }; - + struct intel_dp; struct i915_drrs { struct lock mutex; @@ -889,6 +914,7 @@ struct intel_fbdev; struct intel_fbc_work; struct intel_gmbus { + struct i2c_adapter adapter; u32 force_bit; u32 reg0; u32 gpio_reg; @@ -905,150 +931,21 @@ struct intel_iic_softc { }; struct i915_suspend_saved_registers { - u8 saveLBB; - u32 saveDSPACNTR; - u32 saveDSPBCNTR; u32 saveDSPARB; - u32 savePIPEACONF; - u32 savePIPEBCONF; - u32 savePIPEASRC; - u32 savePIPEBSRC; - u32 saveFPA0; - u32 saveFPA1; - u32 saveDPLL_A; - u32 saveDPLL_A_MD; - u32 saveHTOTAL_A; - u32 saveHBLANK_A; - u32 saveHSYNC_A; - u32 saveVTOTAL_A; - u32 saveVBLANK_A; - u32 saveVSYNC_A; - u32 saveBCLRPAT_A; - u32 saveTRANSACONF; - u32 saveTRANS_HTOTAL_A; - u32 saveTRANS_HBLANK_A; - u32 saveTRANS_HSYNC_A; - u32 saveTRANS_VTOTAL_A; - u32 saveTRANS_VBLANK_A; - u32 saveTRANS_VSYNC_A; - u32 savePIPEASTAT; - u32 saveDSPASTRIDE; - u32 saveDSPASIZE; - u32 saveDSPAPOS; - u32 saveDSPAADDR; - u32 saveDSPASURF; - u32 saveDSPATILEOFF; - u32 savePFIT_PGM_RATIOS; - u32 saveBLC_HIST_CTL; - u32 saveBLC_PWM_CTL; - u32 saveBLC_PWM_CTL2; - u32 saveBLC_CPU_PWM_CTL; - u32 saveBLC_CPU_PWM_CTL2; - u32 saveFPB0; - u32 saveFPB1; - u32 saveDPLL_B; - u32 saveDPLL_B_MD; - u32 saveHTOTAL_B; - u32 saveHBLANK_B; - u32 saveHSYNC_B; - u32 saveVTOTAL_B; - u32 saveVBLANK_B; - u32 saveVSYNC_B; - u32 saveBCLRPAT_B; - u32 saveTRANSBCONF; - u32 saveTRANS_HTOTAL_B; - u32 saveTRANS_HBLANK_B; - u32 saveTRANS_HSYNC_B; - u32 saveTRANS_VTOTAL_B; - u32 saveTRANS_VBLANK_B; - u32 saveTRANS_VSYNC_B; - u32 savePIPEBSTAT; - u32 saveDSPBSTRIDE; - u32 saveDSPBSIZE; - u32 saveDSPBPOS; - u32 saveDSPBADDR; - u32 saveDSPBSURF; - u32 saveDSPBTILEOFF; - u32 saveVGA0; - u32 saveVGA1; - u32 saveVGA_PD; - u32 saveVGACNTRL; - u32 saveADPA; u32 saveLVDS; u32 savePP_ON_DELAYS; u32 savePP_OFF_DELAYS; - u32 saveDVOA; - u32 saveDVOB; - u32 saveDVOC; u32 savePP_ON; u32 savePP_OFF; u32 savePP_CONTROL; u32 savePP_DIVISOR; - u32 savePFIT_CONTROL; - u32 save_palette_a[256]; - u32 save_palette_b[256]; u32 saveFBC_CONTROL; - u32 saveIER; - u32 saveIIR; - u32 saveIMR; - u32 saveDEIER; - u32 saveDEIMR; - u32 saveGTIER; - u32 saveGTIMR; - u32 saveFDI_RXA_IMR; - u32 saveFDI_RXB_IMR; u32 saveCACHE_MODE_0; u32 saveMI_ARB_STATE; u32 saveSWF0[16]; u32 saveSWF1[16]; u32 saveSWF2[3]; - u8 saveMSR; - u8 saveSR[8]; - u8 saveGR[25]; - u8 saveAR_INDEX; - u8 saveAR[21]; - u8 saveDACMASK; - u8 saveCR[37]; uint64_t saveFENCE[I915_MAX_NUM_FENCES]; - u32 saveCURACNTR; - u32 saveCURAPOS; - u32 saveCURABASE; - u32 saveCURBCNTR; - u32 saveCURBPOS; - u32 saveCURBBASE; - u32 saveCURSIZE; - u32 saveDP_B; - u32 saveDP_C; - u32 saveDP_D; - u32 savePIPEA_GMCH_DATA_M; - u32 savePIPEB_GMCH_DATA_M; - u32 savePIPEA_GMCH_DATA_N; - u32 savePIPEB_GMCH_DATA_N; - u32 savePIPEA_DP_LINK_M; - u32 savePIPEB_DP_LINK_M; - u32 savePIPEA_DP_LINK_N; - u32 savePIPEB_DP_LINK_N; - u32 saveFDI_RXA_CTL; - u32 saveFDI_TXA_CTL; - u32 saveFDI_RXB_CTL; - u32 saveFDI_TXB_CTL; - u32 savePFA_CTL_1; - u32 savePFB_CTL_1; - u32 savePFA_WIN_SZ; - u32 savePFB_WIN_SZ; - u32 savePFA_WIN_POS; - u32 savePFB_WIN_POS; - u32 savePCH_DREF_CONTROL; - u32 saveDISP_ARB_CTL; - u32 savePIPEA_DATA_M1; - u32 savePIPEA_DATA_N1; - u32 savePIPEA_LINK_M1; - u32 savePIPEA_LINK_N1; - u32 savePIPEB_DATA_M1; - u32 savePIPEB_DATA_N1; - u32 savePIPEB_LINK_M1; - u32 savePIPEB_LINK_N1; - u32 saveMCHBAR_RENDER_STANDBY; u32 savePCH_PORT_HOTPLUG; u16 saveGCDGMBUS; }; @@ -1145,13 +1042,12 @@ struct intel_gen6_power_mgmt { u8 max_freq_softlimit; /* Max frequency permitted by the driver */ u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ u8 min_freq; /* AKA RPn. Minimum frequency */ + u8 idle_freq; /* Frequency to request when we are idle */ u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ u8 rp1_freq; /* "less than" RP0 power/freqency */ u8 rp0_freq; /* Non-overclocked max frequency. */ u32 cz_freq; - u32 ei_interrupt_count; - int last_adj; enum { LOW_POWER, BETWEEN, HIGH_POWER } power; @@ -1188,9 +1084,6 @@ struct intel_ilk_power_mgmt { int c_m; int r_t; - - struct drm_i915_gem_object *pwrctx; - struct drm_i915_gem_object *renderctx; }; struct drm_i915_private; @@ -1288,7 +1181,10 @@ struct i915_gem_mm { /** PPGTT used for aliasing the PPGTT with the GTT */ struct i915_hw_ppgtt *aliasing_ppgtt; - eventhandler_tag inactive_shrinker; + struct notifier_block oom_notifier; +#if 0 + struct shrinker shrinker; +#endif bool shrinker_no_lock_stealing; /** LRU list of objects with fence regs on them. */ @@ -1471,6 +1367,7 @@ struct intel_vbt_data { bool edp_initialized; bool edp_support; int edp_bpp; + bool edp_low_vswing; struct edp_power_seq edp_pps; struct { @@ -1531,6 +1428,25 @@ struct ilk_wm_values { enum intel_ddb_partitioning partitioning; }; +struct vlv_wm_values { + struct { + uint16_t primary; + uint16_t sprite[2]; + uint8_t cursor; + } pipe[3]; + + struct { + uint16_t plane; + uint8_t cursor; + } sr; + + struct { + uint8_t cursor; + uint8_t sprite[2]; + uint8_t primary; + } ddl[3]; +}; + struct skl_ddb_entry { uint16_t start, end; /* in number of blocks, 'end' is exclusive */ }; @@ -1657,6 +1573,10 @@ struct i915_workarounds { u32 count; }; +struct i915_virtual_gpu { + bool active; +}; + struct drm_i915_private { struct drm_device *dev; struct kmem_cache *slab; @@ -1675,6 +1595,8 @@ struct drm_i915_private { struct intel_uncore uncore; + struct i915_virtual_gpu vgpu; + device_t *gmbus; @@ -1698,7 +1620,7 @@ struct drm_i915_private { struct drm_i915_gem_object *semaphore_obj; uint32_t last_seqno, next_seqno; - drm_dma_handle_t *status_page_dmah; + struct drm_dma_handle *status_page_dmah; struct resource *mch_res; int mch_res_rid; @@ -1791,9 +1713,8 @@ struct drm_i915_private { struct i915_gtt gtt; /* VM representing the global address space */ struct i915_gem_mm mm; -#if defined(CONFIG_MMU_NOTIFIER) - DECLARE_HASHTABLE(mmu_notifiers, 7); -#endif + DECLARE_HASHTABLE(mm_structs, 7); + struct lock mm_lock; /* Kernel Modesetting */ @@ -1896,6 +1817,7 @@ struct drm_i915_private { union { struct ilk_wm_values hw; struct skl_wm_values skl_hw; + struct vlv_wm_values vlv; }; } wm; @@ -2088,7 +2010,7 @@ struct drm_i915_gem_object { unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; - vm_page_t *pages; + struct vm_page **pages; int pages_pin_count; /* prime dma-buf support */ @@ -2120,8 +2042,8 @@ struct drm_i915_gem_object { unsigned workers :4; #define I915_GEM_USERPTR_MAX_WORKERS 15 - struct mm_struct *mm; - struct i915_mmu_object *mn; + struct i915_mm_struct *mm; + struct i915_mmu_object *mmu_object; struct work_struct *work; } userptr; }; @@ -2169,7 +2091,7 @@ struct drm_i915_gem_request { u32 tail; /** - * Context related to this request + * Context and ring buffer related to this request * Contexts are refcounted, so when this request is associated with a * context, we must increment the context's refcount, to guarantee that * it persists while any request is linked to it. Requests themselves @@ -2179,6 +2101,7 @@ struct drm_i915_gem_request { * context. */ struct intel_context *ctx; + struct intel_ringbuffer *ringbuf; /** Batch buffer related to this request if any */ struct drm_i915_gem_object *batch_obj; @@ -2193,6 +2116,9 @@ struct drm_i915_gem_request { /** file_priv list entry for this request */ struct list_head client_list; + /** process identifier submitting this request */ + pid_t pid; + uint32_t uniq; /** @@ -2239,6 +2165,7 @@ i915_gem_request_reference(struct drm_i915_gem_request *req) static inline void i915_gem_request_unreference(struct drm_i915_gem_request *req) { + WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex)); kref_put(&req->ref, i915_gem_request_free); } @@ -2374,9 +2301,9 @@ struct drm_i915_cmd_table { __p = to_i915((const struct drm_device *)p); \ __p; \ }) - #define INTEL_INFO(p) (&__I915__(p)->info) #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) +#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) #define IS_I830(dev) (INTEL_DEVID(dev) == 0x3577) #define IS_845G(dev) (INTEL_DEVID(dev) == 0x2562) @@ -2399,9 +2326,6 @@ struct drm_i915_cmd_table { #define IS_IVB_GT1(dev) (INTEL_DEVID(dev) == 0x0156 || \ INTEL_DEVID(dev) == 0x0152 || \ INTEL_DEVID(dev) == 0x015a) -#define IS_SNB_GT1(dev) (INTEL_DEVID(dev) == 0x0102 || \ - INTEL_DEVID(dev) == 0x0106 || \ - INTEL_DEVID(dev) == 0x010A) #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) #define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) @@ -2425,6 +2349,12 @@ struct drm_i915_cmd_table { INTEL_DEVID(dev) == 0x0A1E) #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) +#define SKL_REVID_A0 (0x0) +#define SKL_REVID_B0 (0x1) +#define SKL_REVID_C0 (0x2) +#define SKL_REVID_D0 (0x3) +#define SKL_REVID_E0 (0x4) + /* * The genX designation typically refers to the render engine, so render * capability related checks should use IS_GEN, while display and other checks @@ -2524,6 +2454,7 @@ struct drm_i915_cmd_table { #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) #define GT_FREQUENCY_MULTIPLIER 50 +#define GEN9_FREQ_SCALER 3 #include "i915_trace.h" @@ -2532,14 +2463,11 @@ extern int i915_max_ioctl; extern int i915_suspend_legacy(device_t kdev); extern int i915_resume_legacy(struct drm_device *dev); -extern int i915_master_create(struct drm_device *dev, struct drm_master *master); -extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); /* i915_params.c */ struct i915_params { int modeset; int panel_ignore_lid; - unsigned int powersave; int semaphores; unsigned int lvds_downclock; int lvds_channel_mode; @@ -2559,11 +2487,12 @@ struct i915_params { bool enable_hangcheck; bool fastboot; bool prefault_disable; - int reset; + bool load_detect_test; + int reset; bool disable_display; bool disable_vtd_wa; int use_mmio_flip; - bool mmio_debug; + int mmio_debug; bool verbose_state_checks; bool nuclear_pageflip; }; @@ -2616,6 +2545,10 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, enum forcewake_domains domains); void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); +static inline bool intel_vgpu_active(struct drm_device *dev) +{ + return to_i915(dev)->vgpu.active; +} void i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, @@ -2694,12 +2627,6 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); -unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, - long target, - unsigned flags); -#define I915_SHRINK_PURGEABLE 0x1 -#define I915_SHRINK_UNBOUND 0x2 -#define I915_SHRINK_BOUND 0x4 void *i915_gem_object_alloc(struct drm_device *dev); void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj, @@ -2716,20 +2643,16 @@ void i915_gem_vma_destroy(struct i915_vma *vma); #define PIN_GLOBAL 0x4 #define PIN_OFFSET_BIAS 0x8 #define PIN_OFFSET_MASK (~4095) -int __must_check i915_gem_object_pin_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - uint32_t alignment, - uint64_t flags, - const struct i915_ggtt_view *view); -static inline -int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - uint32_t alignment, - uint64_t flags) -{ - return i915_gem_object_pin_view(obj, vm, alignment, flags, - &i915_ggtt_view_normal); -} +int __must_check +i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + uint32_t alignment, + uint64_t flags); +int __must_check +i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view, + uint32_t alignment, + uint64_t flags); int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags); @@ -2864,8 +2787,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); int __must_check i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, - struct intel_engine_cs *pipelined); -void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); + struct intel_engine_cs *pipelined, + const struct i915_ggtt_view *view); +void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view); int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); int i915_gem_open(struct drm_device *dev, struct drm_file *file); @@ -2890,60 +2815,46 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, void i915_gem_restore_fences(struct drm_device *dev); -unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o, - struct i915_address_space *vm, - enum i915_ggtt_view_type view); -static inline -unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, - struct i915_address_space *vm) +unsigned long +i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, + const struct i915_ggtt_view *view); +unsigned long +i915_gem_obj_offset(struct drm_i915_gem_object *o, + struct i915_address_space *vm); +static inline unsigned long +i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) { - return i915_gem_obj_offset_view(o, vm, I915_GGTT_VIEW_NORMAL); + return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); } + bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); -bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o, - struct i915_address_space *vm, - enum i915_ggtt_view_type view); -static inline +bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, + const struct i915_ggtt_view *view); bool i915_gem_obj_bound(struct drm_i915_gem_object *o, - struct i915_address_space *vm) -{ - return i915_gem_obj_bound_view(o, vm, I915_GGTT_VIEW_NORMAL); -} + struct i915_address_space *vm); unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, struct i915_address_space *vm); -struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *view); -static inline -struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, - struct i915_address_space *vm) -{ - return i915_gem_obj_to_vma_view(obj, vm, &i915_ggtt_view_normal); -} - struct i915_vma * -i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *view); +i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm); +struct i915_vma * +i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view); -static inline struct i915_vma * i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, - struct i915_address_space *vm) -{ - return i915_gem_obj_lookup_or_create_vma_view(obj, vm, - &i915_ggtt_view_normal); -} + struct i915_address_space *vm); +struct i915_vma * +i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view); -struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj); -static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { - struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) - if (vma->pin_count > 0) - return true; - return false; +static inline struct i915_vma * +i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) +{ + return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal); } +bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); /* Some GGTT VM helpers */ #define i915_obj_to_ggtt(obj) \ @@ -2966,13 +2877,7 @@ i915_vm_to_ppgtt(struct i915_address_space *vm) static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) { - return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj)); -} - -static inline unsigned long -i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj) -{ - return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj)); + return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); } static inline unsigned long @@ -2996,7 +2901,13 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); } -void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj); +void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view); +static inline void +i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) +{ + i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal); +} /* i915_gem_context.c */ int __must_check i915_gem_context_init(struct drm_device *dev); @@ -3068,6 +2979,17 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, u32 gtt_offset, u32 size); +/* i915_gem_shrinker.c */ +unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, + long target, + unsigned flags); +#define I915_SHRINK_PURGEABLE 0x1 +#define I915_SHRINK_UNBOUND 0x2 +#define I915_SHRINK_BOUND 0x4 +unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); +void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); + + /* i915_gem_tiling.c */ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) { @@ -3143,10 +3065,6 @@ int i915_parse_cmds(struct intel_engine_cs *ring, extern int i915_save_state(struct drm_device *dev); extern int i915_restore_state(struct drm_device *dev); -/* i915_ums.c */ -void i915_save_display_reg(struct drm_device *dev); -void i915_restore_display_reg(struct drm_device *dev); - /* i915_sysfs.c */ void i915_setup_sysfs(struct drm_device *dev_priv); void i915_teardown_sysfs(struct drm_device *dev_priv); @@ -3159,11 +3077,11 @@ static inline bool intel_gmbus_is_port_valid(unsigned port) return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD); } -extern struct device *intel_gmbus_get_adapter( +extern struct i2c_adapter *intel_gmbus_get_adapter( struct drm_i915_private *dev_priv, unsigned port); -extern void intel_gmbus_set_speed(struct device *adapter, int speed); -extern void intel_gmbus_force_bit(struct device *adapter, bool force_bit); -static inline bool intel_gmbus_is_forced_bit(struct device *adapter) +extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); +extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter) { struct intel_iic_softc *sc; sc = device_get_softc(device_get_parent(adapter)); @@ -3219,11 +3137,9 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev, bool force_restore); extern void i915_redisable_vga(struct drm_device *dev); extern void i915_redisable_vga_power_on(struct drm_device *dev); -extern void intel_disable_fbc(struct drm_device *dev); extern bool ironlake_set_drps(struct drm_device *dev, u8 val); extern void intel_init_pch_refclk(struct drm_device *dev); -extern void gen6_set_rps(struct drm_device *dev, u8 val); -extern void valleyview_set_rps(struct drm_device *dev, u8 val); +extern void intel_set_rps(struct drm_device *dev, u8 val); extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); extern void intel_detect_pch(struct drm_device *dev); @@ -3238,8 +3154,6 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, struct intel_device_info *i915_get_device_id(int device); -void intel_notify_mmio_flip(struct intel_engine_cs *ring); - /* overlay */ extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, @@ -3376,7 +3290,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) if (time_after(target_jiffies, tmp_jiffies)) { remaining_jiffies = target_jiffies - tmp_jiffies; - #if 0 while (remaining_jiffies) remaining_jiffies = diff --git a/sys/dev/drm/i915/i915_gem.c b/sys/dev/drm/i915/i915_gem.c index 508aabfac0..8cb8fd945c 100644 --- a/sys/dev/drm/i915/i915_gem.c +++ b/sys/dev/drm/i915/i915_gem.c @@ -1,5 +1,5 @@ /* - * Copyright © 2008 Intel Corporation + * Copyright © 2008-2015 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -58,6 +58,7 @@ #include #include #include "i915_drv.h" +#include "i915_vgpu.h" #include "i915_trace.h" #include "intel_drv.h" #include @@ -79,8 +80,6 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, struct drm_i915_fence_reg *fence, bool enable); -static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); - static bool cpu_cache_is_coherent(struct drm_device *dev, enum i915_cache_level level) { @@ -376,7 +375,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, struct drm_device *dev = obj->base.dev; void *vaddr = (char *)obj->phys_handle->vaddr + args->offset; char __user *user_data = to_user_ptr(args->data_ptr); - int ret; + int ret = 0; /* We manually control the domain here and pretend that it * remains coherent i.e. in the GTT domain, like shmem_pwrite. @@ -385,6 +384,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, if (ret) return ret; + intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { unsigned long unwritten; @@ -395,13 +395,18 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, mutex_unlock(&dev->struct_mutex); unwritten = copy_from_user(vaddr, user_data, args->size); mutex_lock(&dev->struct_mutex); - if (unwritten) - return -EFAULT; + if (unwritten) { + ret = -EFAULT; + goto out; + } } drm_clflush_virt_range(vaddr, args->size); i915_gem_chipset_flush(dev); - return 0; + +out: + intel_fb_obj_flush(obj, false); + return ret; } void *i915_gem_object_alloc(struct drm_device *dev) @@ -822,6 +827,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, offset = i915_gem_obj_ggtt_offset(obj) + args->offset; + intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT); + while (remain > 0) { /* Operation in this page * @@ -842,7 +849,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, if (fast_user_write(dev_priv->gtt.mappable, page_base, page_offset, user_data, page_length)) { ret = -EFAULT; - goto out_unpin; + goto out_flush; } remain -= page_length; @@ -850,6 +857,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, offset += page_length; } +out_flush: + intel_fb_obj_flush(obj, false); out_unpin: i915_gem_object_ggtt_unpin(obj); out: @@ -964,6 +973,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev, if (ret) return ret; + intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); + i915_gem_object_pin_pages(obj); offset = args->offset; @@ -1048,6 +1059,7 @@ out: if (needs_clflush_after) i915_gem_chipset_flush(dev); + intel_fb_obj_flush(obj, false); return ret; } @@ -2030,12 +2042,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); } -static inline int -i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) -{ - return obj->madv == I915_MADV_DONTNEED; -} - /* Immediately discard the backing storage */ static void i915_gem_object_truncate(struct drm_i915_gem_object *obj) @@ -2146,85 +2152,6 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) return 0; } -unsigned long -i915_gem_shrink(struct drm_i915_private *dev_priv, - long target, unsigned flags) -{ - const struct { - struct list_head *list; - unsigned int bit; - } phases[] = { - { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND }, - { &dev_priv->mm.bound_list, I915_SHRINK_BOUND }, - { NULL, 0 }, - }, *phase; - unsigned long count = 0; - - /* - * As we may completely rewrite the (un)bound list whilst unbinding - * (due to retiring requests) we have to strictly process only - * one element of the list at the time, and recheck the list - * on every iteration. - * - * In particular, we must hold a reference whilst removing the - * object as we may end up waiting for and/or retiring the objects. - * This might release the final reference (held by the active list) - * and result in the object being freed from under us. This is - * similar to the precautions the eviction code must take whilst - * removing objects. - * - * Also note that although these lists do not hold a reference to - * the object we can safely grab one here: The final object - * unreferencing and the bound_list are both protected by the - * dev->struct_mutex and so we won't ever be able to observe an - * object on the bound_list with a reference count equals 0. - */ - for (phase = phases; phase->list; phase++) { - struct list_head still_in_list; - - if ((flags & phase->bit) == 0) - continue; - - INIT_LIST_HEAD(&still_in_list); - while (count < target && !list_empty(phase->list)) { - struct drm_i915_gem_object *obj; - struct i915_vma *vma, *v; - - obj = list_first_entry(phase->list, - typeof(*obj), global_list); - list_move_tail(&obj->global_list, &still_in_list); - - if (flags & I915_SHRINK_PURGEABLE && - !i915_gem_object_is_purgeable(obj)) - continue; - - drm_gem_object_reference(&obj->base); - - /* For the unbound phase, this should be a no-op! */ - list_for_each_entry_safe(vma, v, - &obj->vma_list, vma_link) - if (i915_vma_unbind(vma)) - break; - - if (i915_gem_object_put_pages(obj) == 0) - count += obj->base.size >> PAGE_SHIFT; - - drm_gem_object_unreference(&obj->base); - } - list_splice(&still_in_list, phase->list); - } - - return count; -} - -static unsigned long -i915_gem_shrink_all(struct drm_i915_private *dev_priv) -{ - i915_gem_evict_everything(dev_priv->dev); - return i915_gem_shrink(dev_priv, LONG_MAX, - I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); -} - static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) { @@ -2537,10 +2464,11 @@ int __i915_add_request(struct intel_engine_cs *ring, ret = ring->add_request(ring); if (ret) return ret; + + request->tail = intel_ring_get_tail(ringbuf); } request->head = request_start; - request->tail = intel_ring_get_tail(ringbuf); /* Whilst this request exists, batch_obj will be on the * active_list, and so will hold the active reference. Only when this @@ -2571,6 +2499,8 @@ int __i915_add_request(struct intel_engine_cs *ring, list_add_tail(&request->client_list, &file_priv->mm.request_list); spin_unlock(&file_priv->mm.lock); + + request->pid = curproc->p_pid; } trace_i915_gem_request_add(request); @@ -2651,6 +2581,10 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request) list_del(&request->list); i915_gem_request_remove_from_client(request); +#if 0 + put_pid(request->pid); +#endif + i915_gem_request_unreference(request); } @@ -2823,7 +2757,6 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) */ while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; - struct intel_ringbuffer *ringbuf; request = list_first_entry(&ring->request_list, struct drm_i915_gem_request, @@ -2834,23 +2767,12 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) trace_i915_gem_request_retire(request); - /* This is one of the few common intersection points - * between legacy ringbuffer submission and execlists: - * we need to tell them apart in order to find the correct - * ringbuffer to which the request belongs to. - */ - if (i915.enable_execlists) { - struct intel_context *ctx = request->ctx; - ringbuf = ctx->engine[ring->id].ringbuf; - } else - ringbuf = ring->buffer; - /* We know the GPU must have read the request to have * sent us the seqno + interrupt, so use the position * of tail of the request to update the last known position * of the GPU head. */ - ringbuf->last_retired_head = request->postfix; + request->ringbuf->last_retired_head = request->postfix; i915_gem_free_request(request); } @@ -3168,8 +3090,8 @@ int i915_vma_unbind(struct i915_vma *vma) obj->map_and_fenceable = false; } else if (vma->ggtt_view.pages) { kfree(vma->ggtt_view.pages); - vma->ggtt_view.pages = NULL; } + vma->ggtt_view.pages = NULL; } drm_mm_remove_node(&vma->node); @@ -3586,9 +3508,9 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma, static struct i915_vma * i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, struct i915_address_space *vm, + const struct i915_ggtt_view *ggtt_view, unsigned alignment, - uint64_t flags, - const struct i915_ggtt_view *view) + uint64_t flags) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -3600,6 +3522,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, struct i915_vma *vma; int ret; + if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) + return ERR_PTR(-EINVAL); + fence_size = i915_gem_get_gtt_size(dev, obj->base.size, obj->tiling_mode); @@ -3638,7 +3563,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, i915_gem_object_pin_pages(obj); - vma = i915_gem_obj_lookup_or_create_vma_view(obj, vm, view); + vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) : + i915_gem_obj_lookup_or_create_vma(obj, vm); + if (IS_ERR(vma)) goto err_unpin; @@ -3668,6 +3595,17 @@ search_free: if (ret) goto err_remove_node; + /* allocate before insert / bind */ + if (vma->vm->allocate_va_range) { + trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size, + VM_TO_TRACE_NAME(vma->vm)); + ret = vma->vm->allocate_va_range(vma->vm, + vma->node.start, + vma->node.size); + if (ret) + goto err_remove_node; + } + trace_i915_vma_bind(vma, flags); ret = i915_vma_bind(vma, obj->cache_level, flags & PIN_GLOBAL ? GLOBAL_BIND : 0); @@ -3838,7 +3776,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) } if (write) - intel_fb_obj_invalidate(obj, NULL); + intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT); trace_i915_gem_object_change_domain(obj, old_read_domains, @@ -4020,7 +3958,8 @@ static bool is_pin_display(struct drm_i915_gem_object *obj) int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, - struct intel_engine_cs *pipelined) + struct intel_engine_cs *pipelined, + const struct i915_ggtt_view *view) { u32 old_read_domains, old_write_domain; bool was_pin_display; @@ -4056,7 +3995,9 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, * (e.g. libkms for the bootup splash), we have to ensure that we * always use map_and_fenceable for all scanout buffers. */ - ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE); + ret = i915_gem_object_ggtt_pin(obj, view, alignment, + view->type == I915_GGTT_VIEW_NORMAL ? + PIN_MAPPABLE : 0); if (ret) goto err_unpin_display; @@ -4084,9 +4025,11 @@ err_unpin_display: } void -i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj) +i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view) { - i915_gem_object_ggtt_unpin(obj); + i915_gem_object_ggtt_unpin_view(obj, view); + obj->pin_display = is_pin_display(obj); } @@ -4153,7 +4096,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) } if (write) - intel_fb_obj_invalidate(obj, NULL); + intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); trace_i915_gem_object_change_domain(obj, old_read_domains, @@ -4235,12 +4178,12 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) return false; } -int -i915_gem_object_pin_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - uint32_t alignment, - uint64_t flags, - const struct i915_ggtt_view *view) +static int +i915_gem_object_do_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *ggtt_view, + uint32_t alignment, + uint64_t flags) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct i915_vma *vma; @@ -4256,17 +4199,29 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj, if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE)) return -EINVAL; - vma = i915_gem_obj_to_vma_view(obj, vm, view); + if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) + return -EINVAL; + + vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) : + i915_gem_obj_to_vma(obj, vm); + + if (IS_ERR(vma)) + return PTR_ERR(vma); + if (vma) { if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) return -EBUSY; if (i915_vma_misplaced(vma, alignment, flags)) { + unsigned long offset; + offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) : + i915_gem_obj_offset(obj, vm); WARN(vma->pin_count, - "bo is already pinned with incorrect alignment:" + "bo is already pinned in %s with incorrect alignment:" " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," " obj->map_and_fenceable=%d\n", - i915_gem_obj_offset_view(obj, vm, view->type), + ggtt_view ? "ggtt" : "ppgtt", + offset, alignment, !!(flags & PIN_MAPPABLE), obj->map_and_fenceable); @@ -4280,8 +4235,12 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj, bound = vma ? vma->bound : 0; if (vma == NULL || !drm_mm_node_allocated(&vma->node)) { - vma = i915_gem_object_bind_to_vm(obj, vm, alignment, - flags, view); + /* In true PPGTT, bind has possibly changed PDEs, which + * means we must do a context switch before the GPU can + * accurately read some of the VMAs. + */ + vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment, + flags); if (IS_ERR(vma)) return PTR_ERR(vma); } @@ -4307,7 +4266,7 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj, fenceable = (vma->node.size == fence_size && (vma->node.start & (fence_alignment - 1)) == 0); - mappable = (vma->node.start + obj->base.size <= + mappable = (vma->node.start + fence_size <= dev_priv->gtt.mappable_end); obj->map_and_fenceable = mappable && fenceable; @@ -4322,16 +4281,41 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj, return 0; } +int +i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + uint32_t alignment, + uint64_t flags) +{ + return i915_gem_object_do_pin(obj, vm, + i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL, + alignment, flags); +} + +int +i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view, + uint32_t alignment, + uint64_t flags) +{ + if (WARN_ONCE(!view, "no view specified")) + return -EINVAL; + + return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view, + alignment, flags | PIN_GLOBAL); +} + void -i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) +i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view) { - struct i915_vma *vma = i915_gem_obj_to_ggtt(obj); + struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view); BUG_ON(!vma); - BUG_ON(vma->pin_count == 0); - BUG_ON(!i915_gem_obj_ggtt_bound(obj)); + WARN_ON(vma->pin_count == 0); + WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view)); - if (--vma->pin_count == 0) + if (--vma->pin_count == 0 && view->type == I915_GGTT_VIEW_NORMAL) obj->pin_mappable = false; } @@ -4452,7 +4436,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, obj->madv = args->madv; /* if the object is no longer attached, discard its backing storage */ - if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL) + if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL) i915_gem_object_truncate(obj); args->retained = obj->madv != __I915_MADV_PURGED; @@ -4637,15 +4621,33 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) intel_runtime_pm_put(dev_priv); } -struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *view) +struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm) { struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) - if (vma->vm == vm && vma->ggtt_view.type == view->type) + list_for_each_entry(vma, &obj->vma_list, vma_link) { + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) + continue; + if (vma->vm == vm) return vma; + } + return NULL; +} + +struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view) +{ + struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); + struct i915_vma *vma; + if (WARN_ONCE(!view, "no view specified")) + return ERR_PTR(-EINVAL); + + list_for_each_entry(vma, &obj->vma_list, vma_link) + if (vma->vm == ggtt && + i915_ggtt_view_equal(&vma->ggtt_view, view)) + return vma; return NULL; } @@ -4692,10 +4694,6 @@ i915_gem_suspend(struct drm_device *dev) i915_gem_retire_requests(dev); - /* Under UMS, be paranoid and evict. */ - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - i915_gem_evict_everything(dev); - i915_gem_stop_ringbuffers(dev); mutex_unlock(&dev->struct_mutex); @@ -5064,18 +5062,8 @@ i915_gem_load(struct drm_device *dev) i915_gem_idle_work_handler); init_waitqueue_head(&dev_priv->gpu_error.reset_queue); - /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ - if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) { - I915_WRITE(MI_ARB_STATE, - _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); - } - dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; - /* Old X drivers will take 0-2 for front, back, depth buffers */ - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - dev_priv->fence_reg_start = 3; - if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev)) dev_priv->num_fence_regs = 32; else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) @@ -5083,6 +5071,10 @@ i915_gem_load(struct drm_device *dev) else dev_priv->num_fence_regs = 8; + if (intel_vgpu_active(dev)) + dev_priv->num_fence_regs = + I915_READ(vgtif_reg(avail_rs.fence_num)); + /* Initialize fence registers to zero */ INIT_LIST_HEAD(&dev_priv->mm.fence_list); i915_gem_restore_fences(dev); @@ -5092,15 +5084,7 @@ i915_gem_load(struct drm_device *dev) dev_priv->mm.interruptible = true; -#if 0 - dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan; - dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count; - dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS; - register_shrinker(&dev_priv->mm.shrinker); - - dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; - register_oom_notifier(&dev_priv->mm.oom_notifier); -#endif + i915_gem_shrinker_init(dev_priv); i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool); @@ -5216,110 +5200,70 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, } } -#if 0 -static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) -{ - if (!mutex_is_locked(mutex)) - return false; - -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) - return mutex->owner == task; -#else - /* Since UP may be pre-empted, we cannot assume that we own the lock */ - return false; -#endif -} -#endif - -#if 0 -static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) +/* All the new VM stuff */ +unsigned long +i915_gem_obj_offset(struct drm_i915_gem_object *o, + struct i915_address_space *vm) { - if (!mutex_trylock(&dev->struct_mutex)) { - if (!mutex_is_locked_by(&dev->struct_mutex, current)) - return false; + struct drm_i915_private *dev_priv = o->base.dev->dev_private; + struct i915_vma *vma; - if (to_i915(dev)->mm.shrinker_no_lock_stealing) - return false; + WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); - *unlock = false; - } else - *unlock = true; + list_for_each_entry(vma, &o->vma_list, vma_link) { + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) + continue; + if (vma->vm == vm) + return vma->node.start; + } - return true; + WARN(1, "%s vma for this object not found.\n", + i915_is_ggtt(vm) ? "global" : "ppgtt"); + return -1; } -static int num_vma_bound(struct drm_i915_gem_object *obj) +unsigned long +i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, + const struct i915_ggtt_view *view) { + struct i915_address_space *ggtt = i915_obj_to_ggtt(o); struct i915_vma *vma; - int count = 0; - - list_for_each_entry(vma, &obj->vma_list, vma_link) - if (drm_mm_node_allocated(&vma->node)) - count++; - - return count; -} - -static unsigned long -i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) -{ - struct drm_i915_private *dev_priv = - container_of(shrinker, struct drm_i915_private, mm.shrinker); - struct drm_device *dev = dev_priv->dev; - struct drm_i915_gem_object *obj; - unsigned long count; - bool unlock; - - if (!i915_gem_shrinker_lock(dev, &unlock)) - return 0; - - count = 0; - list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) - if (obj->pages_pin_count == 0) - count += obj->base.size >> PAGE_SHIFT; - - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if (!i915_gem_obj_is_pinned(obj) && - obj->pages_pin_count == num_vma_bound(obj)) - count += obj->base.size >> PAGE_SHIFT; - } - if (unlock) - mutex_unlock(&dev->struct_mutex); + list_for_each_entry(vma, &o->vma_list, vma_link) + if (vma->vm == ggtt && + i915_ggtt_view_equal(&vma->ggtt_view, view)) + return vma->node.start; - return count; + WARN(1, "global vma for this object not found.\n"); + return -1; } -#endif -/* All the new VM stuff */ -unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o, - struct i915_address_space *vm, - enum i915_ggtt_view_type view) +bool i915_gem_obj_bound(struct drm_i915_gem_object *o, + struct i915_address_space *vm) { - struct drm_i915_private *dev_priv = o->base.dev->dev_private; struct i915_vma *vma; - WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); - list_for_each_entry(vma, &o->vma_list, vma_link) { - if (vma->vm == vm && vma->ggtt_view.type == view) - return vma->node.start; - + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) + continue; + if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) + return true; } - WARN(1, "%s vma for this object not found.\n", - i915_is_ggtt(vm) ? "global" : "ppgtt"); - return -1; + + return false; } -bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o, - struct i915_address_space *vm, - enum i915_ggtt_view_type view) +bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, + const struct i915_ggtt_view *view) { + struct i915_address_space *ggtt = i915_obj_to_ggtt(o); struct i915_vma *vma; list_for_each_entry(vma, &o->vma_list, vma_link) - if (vma->vm == vm && - vma->ggtt_view.type == view && + if (vma->vm == ggtt && + i915_ggtt_view_equal(&vma->ggtt_view, view) && drm_mm_node_allocated(&vma->node)) return true; @@ -5347,120 +5291,26 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, BUG_ON(list_empty(&o->vma_list)); - list_for_each_entry(vma, &o->vma_list, vma_link) + list_for_each_entry(vma, &o->vma_list, vma_link) { + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) + continue; if (vma->vm == vm) return vma->node.size; - + } return 0; } -#if 0 -static unsigned long -i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) +bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = - container_of(shrinker, struct drm_i915_private, mm.shrinker); - struct drm_device *dev = dev_priv->dev; - unsigned long freed; - bool unlock; - - if (!i915_gem_shrinker_lock(dev, &unlock)) - return SHRINK_STOP; - - freed = i915_gem_shrink(dev_priv, - sc->nr_to_scan, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_PURGEABLE); - if (freed < sc->nr_to_scan) - freed += i915_gem_shrink(dev_priv, - sc->nr_to_scan - freed, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND); - if (unlock) - mutex_unlock(&dev->struct_mutex); - - return freed; -} - -static int -i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) -{ - struct drm_i915_private *dev_priv = - container_of(nb, struct drm_i915_private, mm.oom_notifier); - struct drm_device *dev = dev_priv->dev; - struct drm_i915_gem_object *obj; - unsigned long timeout = msecs_to_jiffies(5000) + 1; - unsigned long pinned, bound, unbound, freed_pages; - bool was_interruptible; - bool unlock; - - while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) { - schedule_timeout_killable(1); - if (fatal_signal_pending(current)) - return NOTIFY_DONE; - } - if (timeout == 0) { - pr_err("Unable to purge GPU memory due lock contention.\n"); - return NOTIFY_DONE; - } - - was_interruptible = dev_priv->mm.interruptible; - dev_priv->mm.interruptible = false; - - freed_pages = i915_gem_shrink_all(dev_priv); - - dev_priv->mm.interruptible = was_interruptible; - - /* Because we may be allocating inside our own driver, we cannot - * assert that there are no objects with pinned pages that are not - * being pointed to by hardware. - */ - unbound = bound = pinned = 0; - list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { - if (!obj->base.filp) /* not backed by a freeable object */ - continue; - - if (obj->pages_pin_count) - pinned += obj->base.size; - else - unbound += obj->base.size; - } - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if (!obj->base.filp) + struct i915_vma *vma; + list_for_each_entry(vma, &obj->vma_list, vma_link) { + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) continue; - - if (obj->pages_pin_count) - pinned += obj->base.size; - else - bound += obj->base.size; + if (vma->pin_count > 0) + return true; } - - if (unlock) - mutex_unlock(&dev->struct_mutex); - - if (freed_pages || unbound || bound) - pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", - freed_pages << PAGE_SHIFT, pinned); - if (unbound || bound) - pr_err("%lu and %lu bytes still available in the " - "bound and unbound GPU page lists.\n", - bound, unbound); - - *(unsigned long *)ptr += freed_pages; - return NOTIFY_DONE; + return false; } -#endif - -struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) -{ - struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); - struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) - if (vma->vm == ggtt && - vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) - return vma; - - return NULL; -} diff --git a/sys/dev/drm/i915/i915_gem_context.c b/sys/dev/drm/i915/i915_gem_context.c index 8603bf48d3..f3e84c44d0 100644 --- a/sys/dev/drm/i915/i915_gem_context.c +++ b/sys/dev/drm/i915/i915_gem_context.c @@ -296,11 +296,15 @@ void i915_gem_context_reset(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int i; - /* In execlists mode we will unreference the context when the execlist - * queue is cleared and the requests destroyed. - */ - if (i915.enable_execlists) + if (i915.enable_execlists) { + struct intel_context *ctx; + + list_for_each_entry(ctx, &dev_priv->context_list, link) { + intel_lr_context_reset(dev, ctx); + } + return; + } for (i = 0; i < I915_NUM_RINGS; i++) { struct intel_engine_cs *ring = &dev_priv->ring[i]; @@ -565,6 +569,66 @@ mi_set_context(struct intel_engine_cs *ring, return ret; } +static inline bool should_skip_switch(struct intel_engine_cs *ring, + struct intel_context *from, + struct intel_context *to) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + + if (to->remap_slice) + return false; + + if (to->ppgtt) { + if (from == to && !test_bit(ring->id, + &to->ppgtt->pd_dirty_rings)) + return true; + } else if (dev_priv->mm.aliasing_ppgtt) { + if (from == to && !test_bit(ring->id, + &dev_priv->mm.aliasing_ppgtt->pd_dirty_rings)) + return true; + } + + return false; +} + +static bool +needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + + if (!to->ppgtt) + return false; + + if (INTEL_INFO(ring->dev)->gen < 8) + return true; + + if (ring != &dev_priv->ring[RCS]) + return true; + + return false; +} + +static bool +needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to, + u32 hw_flags) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + + if (!to->ppgtt) + return false; + + if (!IS_GEN8(ring->dev)) + return false; + + if (ring != &dev_priv->ring[RCS]) + return false; + + if (hw_flags & MI_RESTORE_INHIBIT) + return true; + + return false; +} + static int do_switch(struct intel_engine_cs *ring, struct intel_context *to) { @@ -580,7 +644,7 @@ static int do_switch(struct intel_engine_cs *ring, BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state)); } - if (from == to && !to->remap_slice) + if (should_skip_switch(ring, from, to)) return 0; /* Trying to pin first makes error handling easier. */ @@ -598,11 +662,18 @@ static int do_switch(struct intel_engine_cs *ring, */ from = ring->last_context; - if (to->ppgtt) { + if (needs_pd_load_pre(ring, to)) { + /* Older GENs and non render rings still want the load first, + * "PP_DCLV followed by PP_DIR_BASE register through Load + * Register Immediate commands in Ring Buffer before submitting + * a context."*/ trace_switch_mm(ring, to); ret = to->ppgtt->switch_mm(to->ppgtt, ring); if (ret) goto unpin_out; + + /* Doing a PD load always reloads the page dirs */ + clear_bit(ring->id, &to->ppgtt->pd_dirty_rings); } if (ring != &dev_priv->ring[RCS]) { @@ -633,13 +704,41 @@ static int do_switch(struct intel_engine_cs *ring, goto unpin_out; } - if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) + if (!to->legacy_hw_ctx.initialized) { hw_flags |= MI_RESTORE_INHIBIT; + /* NB: If we inhibit the restore, the context is not allowed to + * die because future work may end up depending on valid address + * space. This means we must enforce that a page table load + * occur when this occurs. */ + } else if (to->ppgtt && + test_and_clear_bit(ring->id, &to->ppgtt->pd_dirty_rings)) + hw_flags |= MI_FORCE_RESTORE; + + /* We should never emit switch_mm more than once */ + WARN_ON(needs_pd_load_pre(ring, to) && + needs_pd_load_post(ring, to, hw_flags)); ret = mi_set_context(ring, to, hw_flags); if (ret) goto unpin_out; + /* GEN8 does *not* require an explicit reload if the PDPs have been + * setup, and we do not wish to move them. + */ + if (needs_pd_load_post(ring, to, hw_flags)) { + trace_switch_mm(ring, to); + ret = to->ppgtt->switch_mm(to->ppgtt, ring); + /* The hardware context switch is emitted, but we haven't + * actually changed the state - so it's probably safe to bail + * here. Still, let the user know something dangerous has + * happened. + */ + if (ret) { + DRM_ERROR("Failed to change address space on context switch\n"); + goto unpin_out; + } + } + for (i = 0; i < MAX_L3_SLICES; i++) { if (!(to->remap_slice & (1<legacy_hw_ctx.initialized && from == NULL; + uninitialized = !to->legacy_hw_ctx.initialized; to->legacy_hw_ctx.initialized = true; done: diff --git a/sys/dev/drm/i915/i915_gem_evict.c b/sys/dev/drm/i915/i915_gem_evict.c index e3a49d94da..d09e35ed9c 100644 --- a/sys/dev/drm/i915/i915_gem_evict.c +++ b/sys/dev/drm/i915/i915_gem_evict.c @@ -63,6 +63,10 @@ mark_free(struct i915_vma *vma, struct list_head *unwind) * * This function is used by the object/vma binding code. * + * Since this function is only used to free up virtual address space it only + * ignores pinned vmas, and not object where the backing storage itself is + * pinned. Hence obj->pages_pin_count does not protect against eviction. + * * To clarify: This is for freeing up virtual address space, not for freeing * memory in e.g. the shrinker. */ diff --git a/sys/dev/drm/i915/i915_gem_execbuffer.c b/sys/dev/drm/i915/i915_gem_execbuffer.c index 06016145dd..141d68c605 100644 --- a/sys/dev/drm/i915/i915_gem_execbuffer.c +++ b/sys/dev/drm/i915/i915_gem_execbuffer.c @@ -32,6 +32,7 @@ #include "i915_trace.h" #include "intel_drv.h" #include +#include #define __EXEC_OBJECT_HAS_PIN (1<<31) #define __EXEC_OBJECT_HAS_FENCE (1<<30) @@ -251,7 +252,6 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) { return (HAS_LLC(obj->base.dev) || obj->base.write_domain == I915_GEM_DOMAIN_CPU || - !obj->map_and_fenceable || obj->cache_level != I915_CACHE_NONE); } @@ -337,6 +337,51 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, return 0; } +static void +clflush_write32(void *addr, uint32_t value) +{ + /* This is not a fast path, so KISS. */ + drm_clflush_virt_range(addr, sizeof(uint32_t)); + *(uint32_t *)addr = value; + drm_clflush_virt_range(addr, sizeof(uint32_t)); +} + +static int +relocate_entry_clflush(struct drm_i915_gem_object *obj, + struct drm_i915_gem_relocation_entry *reloc, + uint64_t target_offset) +{ + struct drm_device *dev = obj->base.dev; + uint32_t page_offset = offset_in_page(reloc->offset); + uint64_t delta = (int)reloc->delta + target_offset; + char *vaddr; + int ret; + + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + return ret; + + vaddr = kmap_atomic(i915_gem_object_get_page(obj, + reloc->offset >> PAGE_SHIFT)); + clflush_write32(vaddr + page_offset, lower_32_bits(delta)); + + if (INTEL_INFO(dev)->gen >= 8) { + page_offset = offset_in_page(page_offset + sizeof(uint32_t)); + + if (page_offset == 0) { + kunmap_atomic(vaddr); + vaddr = kmap_atomic(i915_gem_object_get_page(obj, + (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); + } + + clflush_write32(vaddr + page_offset, upper_32_bits(delta)); + } + + kunmap_atomic(vaddr); + + return 0; +} + static int i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, struct eb_vmas *eb, @@ -426,8 +471,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, if (use_cpu_reloc(obj)) ret = relocate_entry_cpu(obj, reloc, target_offset); - else + else if (obj->map_and_fenceable) ret = relocate_entry_gtt(obj, reloc, target_offset); + else if (cpu_has_clflush) + ret = relocate_entry_clflush(obj, reloc, target_offset); + else { + WARN_ONCE(1, "Impossible case in relocation handling\n"); + ret = -ENODEV; + } if (ret) return ret; @@ -525,6 +576,12 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb) return ret; } +static bool only_mappable_for_reloc(unsigned int flags) +{ + return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) == + __EXEC_OBJECT_NEEDS_MAP; +} + static int i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, struct intel_engine_cs *ring, @@ -536,14 +593,21 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, int ret; flags = 0; - if (entry->flags & __EXEC_OBJECT_NEEDS_MAP) - flags |= PIN_GLOBAL | PIN_MAPPABLE; - if (entry->flags & EXEC_OBJECT_NEEDS_GTT) - flags |= PIN_GLOBAL; - if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) - flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; + if (!drm_mm_node_allocated(&vma->node)) { + if (entry->flags & __EXEC_OBJECT_NEEDS_MAP) + flags |= PIN_GLOBAL | PIN_MAPPABLE; + if (entry->flags & EXEC_OBJECT_NEEDS_GTT) + flags |= PIN_GLOBAL; + if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) + flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; + } ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); + if ((ret == -ENOSPC || ret == -E2BIG) && + only_mappable_for_reloc(entry->flags)) + ret = i915_gem_object_pin(obj, vma->vm, + entry->alignment, + flags & ~(PIN_GLOBAL | PIN_MAPPABLE)); if (ret) return ret; @@ -605,13 +669,14 @@ eb_vma_misplaced(struct i915_vma *vma) vma->node.start & (entry->alignment - 1)) return true; - if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable) - return true; - if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS && vma->node.start < BATCH_OFFSET_BIAS) return true; + /* avoid costly ping-pong once a batch bo ended up non-mappable */ + if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable) + return !only_mappable_for_reloc(entry->flags); + return false; } @@ -973,7 +1038,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, obj->dirty = 1; i915_gem_request_assign(&obj->last_write_req, req); - intel_fb_obj_invalidate(obj, ring); + intel_fb_obj_invalidate(obj, ring, ORIGIN_CS); /* update for the implicit flush after a batch */ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; @@ -1078,16 +1143,15 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring, struct drm_i915_gem_object *batch_obj, u32 batch_start_offset, u32 batch_len, - bool is_master, - u32 *flags) + bool is_master) { struct drm_i915_private *dev_priv = to_i915(batch_obj->base.dev); struct drm_i915_gem_object *shadow_batch_obj; - bool need_reloc = false; + struct i915_vma *vma; int ret; shadow_batch_obj = i915_gem_batch_pool_get(&dev_priv->mm.batch_pool, - batch_obj->base.size); + PAGE_ALIGN(batch_len)); if (IS_ERR(shadow_batch_obj)) return shadow_batch_obj; @@ -1097,40 +1161,30 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring, batch_start_offset, batch_len, is_master); - if (ret) { - if (ret == -EACCES) - return batch_obj; - } else { - struct i915_vma *vma; + if (ret) + goto err; - memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry)); + ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0); + if (ret) + goto err; - vma = i915_gem_obj_to_ggtt(shadow_batch_obj); - vma->exec_entry = shadow_exec_entry; - vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE; - drm_gem_object_reference(&shadow_batch_obj->base); - i915_gem_execbuffer_reserve_vma(vma, ring, &need_reloc); - list_add_tail(&vma->exec_list, &eb->vmas); + memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry)); - shadow_batch_obj->base.pending_read_domains = - batch_obj->base.pending_read_domains; + vma = i915_gem_obj_to_ggtt(shadow_batch_obj); + vma->exec_entry = shadow_exec_entry; + vma->exec_entry->flags = __EXEC_OBJECT_PURGEABLE | __EXEC_OBJECT_HAS_PIN; + drm_gem_object_reference(&shadow_batch_obj->base); + list_add_tail(&vma->exec_list, &eb->vmas); - /* - * Set the DISPATCH_SECURE bit to remove the NON_SECURE - * bit from MI_BATCH_BUFFER_START commands issued in the - * dispatch_execbuffer implementations. We specifically - * don't want that set when the command parser is - * enabled. - * - * FIXME: with aliasing ppgtt, buffers that should only - * be in ggtt still end up in the aliasing ppgtt. remove - * this check when that is fixed. - */ - if (USES_FULL_PPGTT(dev)) - *flags |= I915_DISPATCH_SECURE; - } + shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND; + + return shadow_batch_obj; - return ret ? ERR_PTR(ret) : shadow_batch_obj; +err: + if (ret == -EACCES) /* unhandled chained batch */ + return batch_obj; + else + return ERR_PTR(ret); } int @@ -1140,7 +1194,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, struct drm_i915_gem_execbuffer2 *args, struct list_head *vmas, struct drm_i915_gem_object *batch_obj, - u64 exec_start, u32 flags) + u64 exec_start, u32 dispatch_flags) { struct drm_clip_rect *cliprects = NULL; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1200,6 +1254,13 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, if (ret) goto error; + if (ctx->ppgtt) + WARN(ctx->ppgtt->pd_dirty_rings & (1<id), + "%s didn't clear reload\n", ring->name); + else if (dev_priv->mm.aliasing_ppgtt) + WARN(dev_priv->mm.aliasing_ppgtt->pd_dirty_rings & + (1<id), "%s didn't clear reload\n", ring->name); + instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; instp_mask = I915_EXEC_CONSTANTS_MASK; switch (instp_mode) { @@ -1268,19 +1329,19 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, ret = ring->dispatch_execbuffer(ring, exec_start, exec_len, - flags); + dispatch_flags); if (ret) goto error; } } else { ret = ring->dispatch_execbuffer(ring, exec_start, exec_len, - flags); + dispatch_flags); if (ret) return ret; } - trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags); + trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags); i915_gem_execbuffer_move_to_active(vmas, ring); i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); @@ -1355,7 +1416,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct i915_address_space *vm; const u32 ctx_id = i915_execbuffer2_get_context_id(*args); u64 exec_start = args->batch_start_offset; - u32 flags; + u32 dispatch_flags; int ret; bool need_relocs; @@ -1366,12 +1427,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret) return ret; - flags = 0; + dispatch_flags = 0; if (args->flags & I915_EXEC_SECURE) { - flags |= I915_DISPATCH_SECURE; + dispatch_flags |= I915_DISPATCH_SECURE; } if (args->flags & I915_EXEC_IS_PINNED) - flags |= I915_DISPATCH_PINNED; + dispatch_flags |= I915_DISPATCH_PINNED; if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) { DRM_DEBUG("execbuf with unknown ring: %d\n", @@ -1493,12 +1554,27 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, batch_obj, args->batch_start_offset, args->batch_len, - file->is_master, - &flags); + file->is_master); if (IS_ERR(batch_obj)) { ret = PTR_ERR(batch_obj); goto err; } + + /* + * Set the DISPATCH_SECURE bit to remove the NON_SECURE + * bit from MI_BATCH_BUFFER_START commands issued in the + * dispatch_execbuffer implementations. We specifically + * don't want that set when the command parser is + * enabled. + * + * FIXME: with aliasing ppgtt, buffers that should only + * be in ggtt still end up in the aliasing ppgtt. remove + * this check when that is fixed. + */ + if (USES_FULL_PPGTT(dev)) + dispatch_flags |= I915_DISPATCH_SECURE; + + exec_start = 0; } batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; @@ -1506,14 +1582,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure * batch" bit. Hence we need to pin secure batches into the global gtt. * hsw should have this fixed, but bdw mucks it up again. */ - if (flags & I915_DISPATCH_SECURE) { + if (dispatch_flags & I915_DISPATCH_SECURE) { /* * So on first glance it looks freaky that we pin the batch here * outside of the reservation loop. But: * - The batch is already pinned into the relevant ppgtt, so we * already have the backing storage fully allocated. * - No other BO uses the global gtt (well contexts, but meh), - * so we don't really have issues with mutliple objects not + * so we don't really have issues with multiple objects not * fitting due to fragmentation. * So this is actually safe. */ @@ -1526,7 +1602,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, exec_start += i915_gem_obj_offset(batch_obj, vm); ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args, - &eb->vmas, batch_obj, exec_start, flags); + &eb->vmas, batch_obj, exec_start, + dispatch_flags); /* * FIXME: We crucially rely upon the active tracking for the (ppgtt) @@ -1534,7 +1611,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, * needs to be adjusted to also track the ggtt batch vma properly as * active. */ - if (flags & I915_DISPATCH_SECURE) + if (dispatch_flags & I915_DISPATCH_SECURE) i915_gem_object_ggtt_unpin(batch_obj); err: /* the request owns the ref now */ diff --git a/sys/dev/drm/i915/i915_gem_gtt.c b/sys/dev/drm/i915/i915_gem_gtt.c index 36a3c63e71..c9282684eb 100644 --- a/sys/dev/drm/i915/i915_gem_gtt.c +++ b/sys/dev/drm/i915/i915_gem_gtt.c @@ -27,8 +27,11 @@ #include #include #include "i915_drv.h" +#include "i915_vgpu.h" +#include "i915_trace.h" #include "intel_drv.h" +#include #include /** @@ -67,8 +70,9 @@ * i915_ggtt_view_type and struct i915_ggtt_view. * * A new flavour of core GEM functions which work with GGTT bound objects were - * added with the _view suffix. They take the struct i915_ggtt_view parameter - * encapsulating all metadata required to implement a view. + * added with the _ggtt_ infix, and sometimes with _view postfix to avoid + * renaming in large amounts of code. They take the struct i915_ggtt_view + * parameter encapsulating all metadata required to implement a view. * * As a helper for callers which are only interested in the normal view, * globally const i915_ggtt_view_normal singleton instance exists. All old core @@ -92,6 +96,9 @@ */ const struct i915_ggtt_view i915_ggtt_view_normal; +const struct i915_ggtt_view i915_ggtt_view_rotated = { + .type = I915_GGTT_VIEW_ROTATED +}; static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv); static void chv_setup_private_ppat(struct drm_i915_private *dev_priv); @@ -104,6 +111,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; + if (intel_vgpu_active(dev)) + has_full_ppgtt = false; /* emulation is too hard */ + /* * We don't allow disabling PPGTT for gen9+ as it's a requirement for * execlists, the sole mechanism available to submit work. @@ -144,11 +154,11 @@ static void ppgtt_bind_vma(struct i915_vma *vma, u32 flags); static void ppgtt_unbind_vma(struct i915_vma *vma); -static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - bool valid) +static inline gen8_pte_t gen8_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + bool valid) { - gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; + gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; pte |= addr; switch (level) { @@ -166,11 +176,11 @@ static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, return pte; } -static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev, - dma_addr_t addr, - enum i915_cache_level level) +static inline gen8_pde_t gen8_pde_encode(struct drm_device *dev, + dma_addr_t addr, + enum i915_cache_level level) { - gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW; + gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; pde |= addr; if (level != I915_CACHE_NONE) pde |= PPAT_CACHED_PDE_INDEX; @@ -179,11 +189,11 @@ static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev, return pde; } -static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - bool valid, u32 unused) +static gen6_pte_t snb_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + bool valid, u32 unused) { - gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; + gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= GEN6_PTE_ADDR_ENCODE(addr); switch (level) { @@ -201,11 +211,11 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, return pte; } -static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - bool valid, u32 unused) +static gen6_pte_t ivb_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + bool valid, u32 unused) { - gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; + gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= GEN6_PTE_ADDR_ENCODE(addr); switch (level) { @@ -225,11 +235,11 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, return pte; } -static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - bool valid, u32 flags) +static gen6_pte_t byt_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + bool valid, u32 flags) { - gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; + gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= GEN6_PTE_ADDR_ENCODE(addr); if (!(flags & PTE_READ_ONLY)) @@ -241,11 +251,11 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, return pte; } -static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - bool valid, u32 unused) +static gen6_pte_t hsw_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + bool valid, u32 unused) { - gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; + gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= HSW_PTE_ADDR_ENCODE(addr); if (level != I915_CACHE_NONE) @@ -254,11 +264,11 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, return pte; } -static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - bool valid, u32 unused) +static gen6_pte_t iris_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + bool valid, u32 unused) { - gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; + gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= HSW_PTE_ADDR_ENCODE(addr); switch (level) { @@ -275,6 +285,164 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, return pte; } +#define i915_dma_unmap_single(px, dev) \ + __i915_dma_unmap_single((px)->daddr, dev) + +static inline void __i915_dma_unmap_single(dma_addr_t daddr, + struct drm_device *dev) +{ +#if 0 + struct device *device = &dev->pdev->dev; + + dma_unmap_page(device, daddr, 4096, PCI_DMA_BIDIRECTIONAL); +#endif +} + +/** + * i915_dma_map_single() - Create a dma mapping for a page table/dir/etc. + * @px: Page table/dir/etc to get a DMA map for + * @dev: drm device + * + * Page table allocations are unified across all gens. They always require a + * single 4k allocation, as well as a DMA mapping. If we keep the structs + * symmetric here, the simple macro covers us for every page table type. + * + * Return: 0 if success. + */ +#define i915_dma_map_single(px, dev) \ + i915_dma_map_page_single((px)->page, (dev), &(px)->daddr) + +static inline int i915_dma_map_page_single(struct vm_page *page, + struct drm_device *dev, + dma_addr_t *daddr) +{ + struct device *device = dev->pdev->dev; + + *daddr = dma_map_page(device, page, 0, 4096, PCI_DMA_BIDIRECTIONAL); + if (dma_mapping_error(device, *daddr)) + return -ENOMEM; + + return 0; +} + +static void unmap_and_free_pt(struct i915_page_table_entry *pt, + struct drm_device *dev) +{ + if (WARN_ON(!pt->page)) + return; + + i915_dma_unmap_single(pt, dev); + __free_page(pt->page); + kfree(pt->used_ptes); + kfree(pt); +} + +static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev) +{ + struct i915_page_table_entry *pt; + const size_t count = INTEL_INFO(dev)->gen >= 8 ? + GEN8_PTES : GEN6_PTES; + int ret = -ENOMEM; + + pt = kzalloc(sizeof(*pt), GFP_KERNEL); + if (!pt) + return ERR_PTR(-ENOMEM); + + pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes), + GFP_KERNEL); + + if (!pt->used_ptes) + goto fail_bitmap; + + pt->page = alloc_page(GFP_KERNEL); + if (!pt->page) + goto fail_page; + + ret = i915_dma_map_single(pt, dev); + if (ret) + goto fail_dma; + + return pt; + +fail_dma: + __free_page(pt->page); +fail_page: + kfree(pt->used_ptes); +fail_bitmap: + kfree(pt); + + return ERR_PTR(ret); +} + +/** + * alloc_pt_range() - Allocate a multiple page tables + * @pd: The page directory which will have at least @count entries + * available to point to the allocated page tables. + * @pde: First page directory entry for which we are allocating. + * @count: Number of pages to allocate. + * @dev: DRM device. + * + * Allocates multiple page table pages and sets the appropriate entries in the + * page table structure within the page directory. Function cleans up after + * itself on any failures. + * + * Return: 0 if allocation succeeded. + */ +static int alloc_pt_range(struct i915_page_directory_entry *pd, uint16_t pde, size_t count, + struct drm_device *dev) +{ + int i, ret; + + /* 512 is the max page tables per page_directory on any platform. */ + if (WARN_ON(pde + count > I915_PDES)) + return -EINVAL; + + for (i = pde; i < pde + count; i++) { + struct i915_page_table_entry *pt = alloc_pt_single(dev); + + if (IS_ERR(pt)) { + ret = PTR_ERR(pt); + goto err_out; + } + WARN(pd->page_table[i], + "Leaking page directory entry %d (%p)\n", + i, pd->page_table[i]); + pd->page_table[i] = pt; + } + + return 0; + +err_out: + while (i-- > pde) + unmap_and_free_pt(pd->page_table[i], dev); + return ret; +} + +static void unmap_and_free_pd(struct i915_page_directory_entry *pd) +{ + if (pd->page) { + __free_page(pd->page); + kfree(pd); + } +} + +static struct i915_page_directory_entry *alloc_pd_single(void) +{ + struct i915_page_directory_entry *pd; + + pd = kzalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + return ERR_PTR(-ENOMEM); + + pd->page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!pd->page) { + kfree(pd); + return ERR_PTR(-ENOMEM); + } + + return pd; +} + /* Broadwell Page Directory Pointer Descriptors */ static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry, uint64_t val) @@ -304,10 +472,10 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, int i, ret; /* bit of a hack to find the actual last used pd */ - int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE; + int used_pd = ppgtt->num_pd_entries / I915_PDES; for (i = used_pd - 1; i >= 0; i--) { - dma_addr_t addr = ppgtt->pd_dma_addr[i]; + dma_addr_t addr = ppgtt->pdp.page_directory[i]->daddr; ret = gen8_write_pdp(ring, i, addr); if (ret) return ret; @@ -323,7 +491,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); - gen8_gtt_pte_t *pt_vaddr, scratch_pte; + gen8_pte_t *pt_vaddr, scratch_pte; unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; @@ -334,11 +502,28 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, I915_CACHE_LLC, use_scratch); while (num_entries) { - struct vm_page *page_table = ppgtt->gen8_pt_pages[pdpe][pde]; + struct i915_page_directory_entry *pd; + struct i915_page_table_entry *pt; + struct vm_page *page_table; + + if (WARN_ON(!ppgtt->pdp.page_directory[pdpe])) + continue; + + pd = ppgtt->pdp.page_directory[pdpe]; + + if (WARN_ON(!pd->page_table[pde])) + continue; + + pt = pd->page_table[pde]; + + if (WARN_ON(!pt->page)) + continue; + + page_table = pt->page; last_pte = pte + num_entries; - if (last_pte > GEN8_PTES_PER_PAGE) - last_pte = GEN8_PTES_PER_PAGE; + if (last_pte > GEN8_PTES) + last_pte = GEN8_PTES; pt_vaddr = kmap_atomic(page_table); @@ -352,7 +537,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, kunmap_atomic(pt_vaddr); pte = 0; - if (++pde == GEN8_PDES_PER_PAGE) { + if (++pde == I915_PDES) { pdpe++; pde = 0; } @@ -367,7 +552,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); - gen8_gtt_pte_t *pt_vaddr; + gen8_pte_t *pt_vaddr; unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; @@ -376,21 +561,26 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, pt_vaddr = NULL; for (i=0;i= GEN8_LEGACY_PDPS)) + if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES)) break; - if (pt_vaddr == NULL) - pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]); + if (pt_vaddr == NULL) { + struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[pdpe]; + struct i915_page_table_entry *pt = pd->page_table[pde]; + struct vm_page *page_table = pt->page; + + pt_vaddr = kmap_atomic(page_table); + } pt_vaddr[pte] = gen8_pte_encode(VM_PAGE_TO_PHYS(pages[i]), cache_level, true); - if (++pte == GEN8_PTES_PER_PAGE) { + if (++pte == GEN8_PTES) { if (!HAS_LLC(ppgtt->base.dev)) drm_clflush_virt_range(pt_vaddr, PAGE_SIZE); kunmap_atomic(pt_vaddr); pt_vaddr = NULL; - if (++pde == GEN8_PDES_PER_PAGE) { + if (++pde == I915_PDES) { pdpe++; pde = 0; } @@ -404,29 +594,33 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, } } -static void gen8_free_page_tables(struct vm_page **pt_pages) +static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct drm_device *dev) { int i; - if (pt_pages == NULL) + if (!pd->page) return; - for (i = 0; i < GEN8_PDES_PER_PAGE; i++) - if (pt_pages[i]) - __free_pages(pt_pages[i], 0); + for (i = 0; i < I915_PDES; i++) { + if (WARN_ON(!pd->page_table[i])) + continue; + + unmap_and_free_pt(pd->page_table[i], dev); + pd->page_table[i] = NULL; + } } -static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt) +static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt) { int i; for (i = 0; i < ppgtt->num_pd_pages; i++) { - gen8_free_page_tables(ppgtt->gen8_pt_pages[i]); - kfree(ppgtt->gen8_pt_pages[i]); - kfree(ppgtt->gen8_pt_dma_addr[i]); - } + if (WARN_ON(!ppgtt->pdp.page_directory[i])) + continue; - __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT)); + gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev); + unmap_and_free_pd(ppgtt->pdp.page_directory[i]); + } } static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) @@ -437,14 +631,23 @@ static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) for (i = 0; i < ppgtt->num_pd_pages; i++) { /* TODO: In the future we'll support sparse mappings, so this * will have to change. */ - if (!ppgtt->pd_dma_addr[i]) + if (!ppgtt->pdp.page_directory[i]->daddr) continue; - pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE, + pci_unmap_page(hwdev, ppgtt->pdp.page_directory[i]->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { - dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; + for (j = 0; j < I915_PDES; j++) { + struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i]; + struct i915_page_table_entry *pt; + dma_addr_t addr; + + if (WARN_ON(!pd->page_table[j])) + continue; + + pt = pd->page_table[j]; + addr = pt->daddr; + if (addr) pci_unmap_page(hwdev, addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); @@ -461,86 +664,47 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm) gen8_ppgtt_free(ppgtt); } -static struct vm_page **__gen8_alloc_page_tables(void) +static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt) { - struct vm_page **pt_pages; - int i; - - pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct vm_page *), GFP_KERNEL); - if (!pt_pages) - return ERR_PTR(-ENOMEM); - - for (i = 0; i < GEN8_PDES_PER_PAGE; i++) { - pt_pages[i] = alloc_page(GFP_KERNEL); - if (!pt_pages[i]) - goto bail; - } - - return pt_pages; - -bail: - gen8_free_page_tables(pt_pages); - kfree(pt_pages); - return ERR_PTR(-ENOMEM); -} - -static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt, - const int max_pdp) -{ - struct vm_page **pt_pages[GEN8_LEGACY_PDPS]; int i, ret; - for (i = 0; i < max_pdp; i++) { - pt_pages[i] = __gen8_alloc_page_tables(); - if (IS_ERR(pt_pages[i])) { - ret = PTR_ERR(pt_pages[i]); + for (i = 0; i < ppgtt->num_pd_pages; i++) { + ret = alloc_pt_range(ppgtt->pdp.page_directory[i], + 0, I915_PDES, ppgtt->base.dev); + if (ret) goto unwind_out; - } } - /* NB: Avoid touching gen8_pt_pages until last to keep the allocation, - * "atomic" - for cleanup purposes. - */ - for (i = 0; i < max_pdp; i++) - ppgtt->gen8_pt_pages[i] = pt_pages[i]; - return 0; unwind_out: - while (i--) { - gen8_free_page_tables(pt_pages[i]); - kfree(pt_pages[i]); - } + while (i--) + gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev); - return ret; + return -ENOMEM; } -static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt) +static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, + const int max_pdp) { int i; - for (i = 0; i < ppgtt->num_pd_pages; i++) { - ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE, - sizeof(dma_addr_t), - GFP_KERNEL); - if (!ppgtt->gen8_pt_dma_addr[i]) - return -ENOMEM; + for (i = 0; i < max_pdp; i++) { + ppgtt->pdp.page_directory[i] = alloc_pd_single(); + if (IS_ERR(ppgtt->pdp.page_directory[i])) + goto unwind_out; } - return 0; -} + ppgtt->num_pd_pages = max_pdp; + BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES); -static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt, - const int max_pdp) -{ - ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT)); - if (!ppgtt->pd_pages) - return -ENOMEM; + return 0; - ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT); - BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS); +unwind_out: + while (i--) + unmap_and_free_pd(ppgtt->pdp.page_directory[i]); - return 0; + return -ENOMEM; } static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, @@ -552,18 +716,16 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, if (ret) return ret; - ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp); - if (ret) { - __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT)); - return ret; - } + ret = gen8_ppgtt_allocate_page_tables(ppgtt); + if (ret) + goto err_out; - ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; + ppgtt->num_pd_entries = max_pdp * I915_PDES; - ret = gen8_ppgtt_allocate_dma(ppgtt); - if (ret) - gen8_ppgtt_free(ppgtt); + return 0; +err_out: + gen8_ppgtt_free(ppgtt); return ret; } @@ -574,14 +736,14 @@ static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt, int ret; pd_addr = pci_map_page(ppgtt->base.dev->pdev, - &ppgtt->pd_pages[pd], 0, + ppgtt->pdp.page_directory[pd]->page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr); if (ret) return ret; - ppgtt->pd_dma_addr[pd] = pd_addr; + ppgtt->pdp.page_directory[pd]->daddr = pd_addr; return 0; } @@ -591,22 +753,23 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt, const int pt) { dma_addr_t pt_addr; - struct vm_page *p; + struct i915_page_directory_entry *pdir = ppgtt->pdp.page_directory[pd]; + struct i915_page_table_entry *ptab = pdir->page_table[pt]; + struct vm_page *p = ptab->page; int ret; - p = ppgtt->gen8_pt_pages[pd][pt]; pt_addr = pci_map_page(ppgtt->base.dev->pdev, p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr); if (ret) return ret; - ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr; + ptab->daddr = pt_addr; return 0; } -/** +/* * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers * with a net effect resembling a 2-level page table in normal x86 terms. Each * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address @@ -619,26 +782,30 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt, static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) { const int max_pdp = DIV_ROUND_UP(size, 1 << 30); - const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp; + const int min_pt_pages = I915_PDES * max_pdp; int i, j, ret; if (size % (1<<30)) DRM_INFO("Pages will be wasted unless GTT size (%lu) is divisible by 1GB\n", size); - /* 1. Do all our allocations for page directories and page tables. */ - ret = gen8_ppgtt_alloc(ppgtt, max_pdp); + /* 1. Do all our allocations for page directories and page tables. + * We allocate more than was asked so that we can point the unused parts + * to valid entries that point to scratch page. Dynamic page tables + * will fix this eventually. + */ + ret = gen8_ppgtt_alloc(ppgtt, GEN8_LEGACY_PDPES); if (ret) return ret; /* * 2. Create DMA mappings for the page directories and page tables. */ - for (i = 0; i < max_pdp; i++) { + for (i = 0; i < GEN8_LEGACY_PDPES; i++) { ret = gen8_ppgtt_setup_page_directories(ppgtt, i); if (ret) goto bail; - for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { + for (j = 0; j < I915_PDES; j++) { ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j); if (ret) goto bail; @@ -653,11 +820,13 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) * plugged in correctly. So we do that now/here. For aliasing PPGTT, we * will never need to touch the PDEs again. */ - for (i = 0; i < max_pdp; i++) { - gen8_ppgtt_pde_t *pd_vaddr; - pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]); - for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { - dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j]; + for (i = 0; i < GEN8_LEGACY_PDPES; i++) { + struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i]; + gen8_pde_t *pd_vaddr; + pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page); + for (j = 0; j < I915_PDES; j++) { + struct i915_page_table_entry *pt = pd->page_table[j]; + dma_addr_t addr = pt->daddr; pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr, I915_CACHE_LLC); } @@ -671,9 +840,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; ppgtt->base.cleanup = gen8_ppgtt_cleanup; ppgtt->base.start = 0; - ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE; - ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); + /* This is the area that we advertise as usable for the caller */ + ppgtt->base.total = max_pdp * I915_PDES * GEN8_PTES * PAGE_SIZE; + + /* Set all ptes to a valid scratch page. Also above requested space */ + ppgtt->base.clear_range(&ppgtt->base, 0, + ppgtt->num_pd_pages * GEN8_PTES * PAGE_SIZE, + true); DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n", ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp); @@ -692,22 +866,23 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) { struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; struct i915_address_space *vm = &ppgtt->base; - gen6_gtt_pte_t __iomem *pd_addr; - gen6_gtt_pte_t scratch_pte; + gen6_pte_t __iomem *pd_addr; + gen6_pte_t scratch_pte; uint32_t pd_entry; int pte, pde; scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); - pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + - ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); + pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm + + ppgtt->pd.pd_offset / sizeof(gen6_pte_t); seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm, - ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries); + ppgtt->pd.pd_offset, + ppgtt->pd.pd_offset + ppgtt->num_pd_entries); for (pde = 0; pde < ppgtt->num_pd_entries; pde++) { u32 expected; - gen6_gtt_pte_t *pt_vaddr; - dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde]; + gen6_pte_t *pt_vaddr; + dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr; pd_entry = readl(pd_addr + pde); expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); @@ -718,10 +893,10 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) expected); seq_printf(m, "\tPDE: %x\n", pd_entry); - pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]); - for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) { + pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page); + for (pte = 0; pte < GEN6_PTES; pte+=4) { unsigned long va = - (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) + + (pde * PAGE_SIZE * GEN6_PTES) + (pte * PAGE_SIZE); int i; bool found = false; @@ -736,41 +911,51 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) if (pt_vaddr[pte + i] != scratch_pte) seq_printf(m, " %08x", pt_vaddr[pte + i]); else - seq_printf(m, " SCRATCH "); + seq_puts(m, " SCRATCH "); } - seq_printf(m, "\n"); + seq_puts(m, "\n"); } kunmap_atomic(pt_vaddr); } } -static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) +/* Write pde (index) from the page directory @pd to the page table @pt */ +static void gen6_write_pde(struct i915_page_directory_entry *pd, + const int pde, struct i915_page_table_entry *pt) { - struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; - gen6_gtt_pte_t __iomem *pd_addr; - uint32_t pd_entry; - int i; + /* Caller needs to make sure the write completes if necessary */ + struct i915_hw_ppgtt *ppgtt = + container_of(pd, struct i915_hw_ppgtt, pd); + u32 pd_entry; - WARN_ON(ppgtt->pd_offset & 0x3f); - pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm + - ppgtt->pd_offset / sizeof(gen6_gtt_pte_t); - for (i = 0; i < ppgtt->num_pd_entries; i++) { - dma_addr_t pt_addr; + pd_entry = GEN6_PDE_ADDR_ENCODE(pt->daddr); + pd_entry |= GEN6_PDE_VALID; - pt_addr = ppgtt->pt_dma_addr[i]; - pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); - pd_entry |= GEN6_PDE_VALID; + writel(pd_entry, ppgtt->pd_addr + pde); +} - writel(pd_entry, pd_addr + i); - } - readl(pd_addr); +/* Write all the page tables found in the ppgtt structure to incrementing page + * directories. */ +static void gen6_write_page_range(struct drm_i915_private *dev_priv, + struct i915_page_directory_entry *pd, + uint32_t start, uint32_t length) +{ + struct i915_page_table_entry *pt; + uint32_t pde, temp; + + gen6_for_each_pde(pt, pd, start, length, temp, pde) + gen6_write_pde(pd, pde, pt); + + /* Make sure write is complete before other code can use this page + * table. Also require for WC mapped PTEs */ + readl(dev_priv->gtt.gsm); } static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) { - BUG_ON(ppgtt->pd_offset & 0x3f); + BUG_ON(ppgtt->pd.pd_offset & 0x3f); - return (ppgtt->pd_offset / 64) << 16; + return (ppgtt->pd.pd_offset / 64) << 16; } static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, @@ -798,6 +983,16 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, return 0; } +static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt, + struct intel_engine_cs *ring) +{ + struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); + + I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); + I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); + return 0; +} + static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *ring) { @@ -909,21 +1104,21 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); - gen6_gtt_pte_t *pt_vaddr, scratch_pte; + gen6_pte_t *pt_vaddr, scratch_pte; unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; - unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; - unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; + unsigned act_pt = first_entry / GEN6_PTES; + unsigned first_pte = first_entry % GEN6_PTES; unsigned last_pte, i; scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); while (num_entries) { last_pte = first_pte + num_entries; - if (last_pte > I915_PPGTT_PT_ENTRIES) - last_pte = I915_PPGTT_PT_ENTRIES; + if (last_pte > GEN6_PTES) + last_pte = GEN6_PTES; - pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]); + pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page); for (i = first_pte; i < last_pte; i++) pt_vaddr[i] = scratch_pte; @@ -944,20 +1139,21 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); - gen6_gtt_pte_t *pt_vaddr; + gen6_pte_t *pt_vaddr; unsigned first_entry = start >> PAGE_SHIFT; - unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; - unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; + unsigned act_pt = first_entry / GEN6_PTES; + unsigned act_pte = first_entry % GEN6_PTES; pt_vaddr = NULL; for (int i=0;ipt_pages[act_pt]); + pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page); pt_vaddr[act_pte] = vm->pte_encode(VM_PAGE_TO_PHYS(pages[i]), cache_level, true, flags); - if (++act_pte == I915_PPGTT_PT_ENTRIES) { + + if (++act_pte == GEN6_PTES) { kunmap_atomic(pt_vaddr); pt_vaddr = NULL; act_pt++; @@ -968,26 +1164,134 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, kunmap_atomic(pt_vaddr); } -static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) +/* PDE TLBs are a pain invalidate pre GEN8. It requires a context reload. If we + * are switching between contexts with the same LRCA, we also must do a force + * restore. + */ +static inline void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) +{ + /* If current vm != vm, */ + ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask; +} + +static void gen6_initialize_pt(struct i915_address_space *vm, + struct i915_page_table_entry *pt) { + gen6_pte_t *pt_vaddr, scratch_pte; int i; - if (ppgtt->pt_dma_addr) { - for (i = 0; i < ppgtt->num_pd_entries; i++) - pci_unmap_page(ppgtt->base.dev->pdev, - ppgtt->pt_dma_addr[i], - 4096, PCI_DMA_BIDIRECTIONAL); + WARN_ON(vm->scratch.addr == 0); + + scratch_pte = vm->pte_encode(vm->scratch.addr, + I915_CACHE_LLC, true, 0); + + pt_vaddr = kmap_atomic(pt->page); + + for (i = 0; i < GEN6_PTES; i++) + pt_vaddr[i] = scratch_pte; + + kunmap_atomic(pt_vaddr); +} + +static int gen6_alloc_va_range(struct i915_address_space *vm, + uint64_t start, uint64_t length) +{ + DECLARE_BITMAP(new_page_tables, I915_PDES); + struct drm_device *dev = vm->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); + struct i915_page_table_entry *pt; + const uint32_t start_save = start, length_save = length; + uint32_t pde, temp; + int ret; + + WARN_ON(upper_32_bits(start)); + + bitmap_zero(new_page_tables, I915_PDES); + + /* The allocation is done in two stages so that we can bail out with + * minimal amount of pain. The first stage finds new page tables that + * need allocation. The second stage marks use ptes within the page + * tables. + */ + gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { + if (pt != ppgtt->scratch_pt) { + WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES)); + continue; + } + + /* We've already allocated a page table */ + WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES)); + + pt = alloc_pt_single(dev); + if (IS_ERR(pt)) { + ret = PTR_ERR(pt); + goto unwind_out; + } + + gen6_initialize_pt(vm, pt); + + ppgtt->pd.page_table[pde] = pt; + set_bit(pde, new_page_tables); + trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT); + } + + start = start_save; + length = length_save; + + gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { + DECLARE_BITMAP(tmp_bitmap, GEN6_PTES); + + bitmap_zero(tmp_bitmap, GEN6_PTES); + bitmap_set(tmp_bitmap, gen6_pte_index(start), + gen6_pte_count(start, length)); + + if (test_and_clear_bit(pde, new_page_tables)) + gen6_write_pde(&ppgtt->pd, pde, pt); + + trace_i915_page_table_entry_map(vm, pde, pt, + gen6_pte_index(start), + gen6_pte_count(start, length), + GEN6_PTES); + bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes, + GEN6_PTES); + } + + WARN_ON(!bitmap_empty(new_page_tables, I915_PDES)); + + /* Make sure write is complete before other code can use this page + * table. Also require for WC mapped PTEs */ + readl(dev_priv->gtt.gsm); + + mark_tlbs_dirty(ppgtt); + return 0; + +unwind_out: + for_each_set_bit(pde, new_page_tables, I915_PDES) { + struct i915_page_table_entry *pt = ppgtt->pd.page_table[pde]; + + ppgtt->pd.page_table[pde] = ppgtt->scratch_pt; + unmap_and_free_pt(pt, vm->dev); } + + mark_tlbs_dirty(ppgtt); + return ret; } static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) { int i; - kfree(ppgtt->pt_dma_addr); - for (i = 0; i < ppgtt->num_pd_entries; i++) - __free_page(ppgtt->pt_pages[i]); - kfree(ppgtt->pt_pages); + for (i = 0; i < ppgtt->num_pd_entries; i++) { + struct i915_page_table_entry *pt = ppgtt->pd.page_table[i]; + + if (pt != ppgtt->scratch_pt) + unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev); + } + + unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); + unmap_and_free_pd(&ppgtt->pd); } static void gen6_ppgtt_cleanup(struct i915_address_space *vm) @@ -997,7 +1301,6 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) drm_mm_remove_node(&ppgtt->node); - gen6_ppgtt_unmap_pages(ppgtt); gen6_ppgtt_free(ppgtt); } @@ -1013,6 +1316,12 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) * size. We allocate at the top of the GTT to avoid fragmentation. */ BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm)); + ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev); + if (IS_ERR(ppgtt->scratch_pt)) + return PTR_ERR(ppgtt->scratch_pt); + + gen6_initialize_pt(&ppgtt->base, ppgtt->scratch_pt); + alloc: ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, &ppgtt->node, GEN6_PD_SIZE, @@ -1026,88 +1335,43 @@ alloc: 0, dev_priv->gtt.base.total, 0); if (ret) - return ret; + goto err_out; retried = true; goto alloc; } - if (ppgtt->node.start < dev_priv->gtt.mappable_end) - DRM_DEBUG("Forced to use aperture for PDEs\n"); - - ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; - return ret; -} - -static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt) -{ - int i; + if (ret) + goto err_out; - ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct vm_page *), - GFP_KERNEL); - if (!ppgtt->pt_pages) - return -ENOMEM; - - for (i = 0; i < ppgtt->num_pd_entries; i++) { - ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); - if (!ppgtt->pt_pages[i]) { - gen6_ppgtt_free(ppgtt); - return -ENOMEM; - } - } + if (ppgtt->node.start < dev_priv->gtt.mappable_end) + DRM_DEBUG("Forced to use aperture for PDEs\n"); + ppgtt->num_pd_entries = I915_PDES; return 0; + +err_out: + unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); + return ret; } static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) { - int ret; - - ret = gen6_ppgtt_allocate_page_directories(ppgtt); - if (ret) - return ret; - - ret = gen6_ppgtt_allocate_page_tables(ppgtt); - if (ret) { - drm_mm_remove_node(&ppgtt->node); - return ret; - } - - ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t), - GFP_KERNEL); - if (!ppgtt->pt_dma_addr) { - drm_mm_remove_node(&ppgtt->node); - gen6_ppgtt_free(ppgtt); - return -ENOMEM; - } - - return 0; + return gen6_ppgtt_allocate_page_directories(ppgtt); } -static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt) +static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, + uint64_t start, uint64_t length) { - struct drm_device *dev = ppgtt->base.dev; - int i; - - for (i = 0; i < ppgtt->num_pd_entries; i++) { - dma_addr_t pt_addr; - - pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096, - PCI_DMA_BIDIRECTIONAL); + struct i915_page_table_entry *unused; + uint32_t pde, temp; - if (pci_dma_mapping_error(dev->pdev, pt_addr)) { - gen6_ppgtt_unmap_pages(ppgtt); - return -EIO; - } - - ppgtt->pt_dma_addr[i] = pt_addr; - } - - return 0; + gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) + ppgtt->pd.page_table[pde] = ppgtt->scratch_pt; } -static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) +static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing) { struct drm_device *dev = ppgtt->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1123,40 +1387,57 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) } else BUG(); + if (intel_vgpu_active(dev)) + ppgtt->switch_mm = vgpu_mm_switch; + ret = gen6_ppgtt_alloc(ppgtt); if (ret) return ret; - ret = gen6_ppgtt_setup_page_tables(ppgtt); - if (ret) { - gen6_ppgtt_free(ppgtt); - return ret; + if (aliasing) { + /* preallocate all pts */ + ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries, + ppgtt->base.dev); + + if (ret) { + gen6_ppgtt_cleanup(&ppgtt->base); + return ret; + } } + ppgtt->base.allocate_va_range = gen6_alloc_va_range; ppgtt->base.clear_range = gen6_ppgtt_clear_range; ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; ppgtt->base.cleanup = gen6_ppgtt_cleanup; ppgtt->base.start = 0; - ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; + ppgtt->base.total = ppgtt->num_pd_entries * GEN6_PTES * PAGE_SIZE; ppgtt->debug_dump = gen6_dump_ppgtt; - ppgtt->pd_offset = - ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t); + ppgtt->pd.pd_offset = + ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t); + + ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm + + ppgtt->pd.pd_offset / sizeof(gen6_pte_t); - ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); + if (aliasing) + ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); + else + gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total); + + gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total); DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n", ppgtt->node.size >> 20, ppgtt->node.start / PAGE_SIZE); - gen6_write_pdes(ppgtt); DRM_DEBUG("Adding PPGTT at offset %x\n", - ppgtt->pd_offset << 10); + ppgtt->pd.pd_offset << 10); return 0; } -static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) +static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt, + bool aliasing) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1164,7 +1445,7 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) ppgtt->base.scratch = dev_priv->gtt.base.scratch; if (INTEL_INFO(dev)->gen < 8) - return gen6_ppgtt_init(ppgtt); + return gen6_ppgtt_init(ppgtt, aliasing); else return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); } @@ -1173,7 +1454,7 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) struct drm_i915_private *dev_priv = dev->dev_private; int ret = 0; - ret = __hw_ppgtt_init(dev, ppgtt); + ret = __hw_ppgtt_init(dev, ppgtt, false); if (ret == 0) { kref_init(&ppgtt->ref); drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, @@ -1425,15 +1706,20 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) return; } - list_for_each_entry(vm, &dev_priv->vm_list, global_link) { - /* TODO: Perhaps it shouldn't be gen6 specific */ - if (i915_is_ggtt(vm)) { - if (dev_priv->mm.aliasing_ppgtt) - gen6_write_pdes(dev_priv->mm.aliasing_ppgtt); - continue; - } + if (USES_PPGTT(dev)) { + list_for_each_entry(vm, &dev_priv->vm_list, global_link) { + /* TODO: Perhaps it shouldn't be gen6 specific */ + + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, + base); - gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base)); + if (i915_is_ggtt(vm)) + ppgtt = dev_priv->mm.aliasing_ppgtt; + + gen6_write_page_range(dev_priv, &ppgtt->pd, + 0, ppgtt->base.total); + } } i915_ggtt_flush(dev_priv); @@ -1454,7 +1740,7 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) return 0; } -static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte) +static inline void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) { #if 0 writeq(pte, addr); @@ -1472,8 +1758,8 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, { struct drm_i915_private *dev_priv = vm->dev->dev_private; unsigned first_entry = start >> PAGE_SHIFT; - gen8_gtt_pte_t __iomem *gtt_entries = - (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; + gen8_pte_t __iomem *gtt_entries = + (gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; int i = 0; dma_addr_t addr = 0; @@ -1516,8 +1802,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, { struct drm_i915_private *dev_priv = vm->dev->dev_private; unsigned first_entry = start >> PAGE_SHIFT; - gen6_gtt_pte_t __iomem *gtt_entries = - (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; + gen6_pte_t __iomem *gtt_entries = + (gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; int i = 0; dma_addr_t addr = 0; /* shut up gcc */ @@ -1553,8 +1839,8 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, struct drm_i915_private *dev_priv = vm->dev->dev_private; unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; - gen8_gtt_pte_t scratch_pte, __iomem *gtt_base = - (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; + gen8_pte_t scratch_pte, __iomem *gtt_base = + (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; int i; @@ -1579,8 +1865,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, struct drm_i915_private *dev_priv = vm->dev->dev_private; unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; - gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = - (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; + gen6_pte_t scratch_pte, __iomem *gtt_base = + (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; int i; @@ -1637,11 +1923,15 @@ static void ggtt_bind_vma(struct i915_vma *vma, struct drm_device *dev = vma->vm->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj = vma->obj; + struct vm_page **pages = obj->pages; /* Currently applicable only to VLV */ if (obj->gt_ro) flags |= PTE_READ_ONLY; + if (i915_is_ggtt(vma->vm)) + pages = vma->ggtt_view.pages; + /* If there is no aliasing PPGTT, or the caller needs a global mapping, * or we have a global mapping already but the cacheability flags have * changed, set the global PTEs. @@ -1656,7 +1946,7 @@ static void ggtt_bind_vma(struct i915_vma *vma, if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { if (!(vma->bound & GLOBAL_BIND) || (cache_level != obj->cache_level)) { - vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages, + vma->vm->insert_entries(vma->vm, pages, vma->node.start, obj->base.size >> PAGE_SHIFT, cache_level, flags); @@ -1668,8 +1958,7 @@ static void ggtt_bind_vma(struct i915_vma *vma, (!(vma->bound & LOCAL_BIND) || (cache_level != obj->cache_level))) { struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; - appgtt->base.insert_entries(&appgtt->base, - vma->ggtt_view.pages, + appgtt->base.insert_entries(&appgtt->base, pages, vma->node.start, obj->base.size >> PAGE_SHIFT, cache_level, flags); @@ -1767,6 +2056,16 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, /* Subtract the guard page ... */ drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE); + + dev_priv->gtt.base.start = start; + dev_priv->gtt.base.total = end - start; + + if (intel_vgpu_active(dev)) { + ret = intel_vgt_balloon(dev); + if (ret) + return ret; + } + if (!HAS_LLC(dev)) dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust; @@ -1786,9 +2085,6 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, vma->bound |= GLOBAL_BIND; } - dev_priv->gtt.base.start = start; - dev_priv->gtt.base.total = end - start; - /* Clear any non-preallocated blocks */ drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", @@ -1815,9 +2111,11 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, if (!ppgtt) return -ENOMEM; - ret = __hw_ppgtt_init(dev, ppgtt); - if (ret != 0) + ret = __hw_ppgtt_init(dev, ppgtt, true); + if (ret) { + kfree(ppgtt); return ret; + } dev_priv->mm.aliasing_ppgtt = ppgtt; } @@ -1848,6 +2146,9 @@ void i915_global_gtt_cleanup(struct drm_device *dev) } if (drm_mm_initialized(&vm->mm)) { + if (intel_vgpu_active(dev)) + intel_vgt_deballoon(); + drm_mm_takedown(&vm->mm); list_del(&vm->global_link); } @@ -1982,7 +2283,6 @@ static int ggtt_probe_common(struct drm_device *dev, gtt_phys_addr = pci_resource_start(dev->pdev, 0) + (pci_resource_len(dev->pdev, 0) / 2); - kprintf("gtt_probe_common: gtt_phys_addr=0x%lx\n", gtt_phys_addr); dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size); if (!dev_priv->gtt.gsm) { DRM_ERROR("Failed to map the gtt page table\n"); @@ -2107,7 +2407,7 @@ static int gen8_gmch_probe(struct drm_device *dev, gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); } - *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT; + *gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT; if (IS_CHERRYVIEW(dev)) chv_setup_private_ppat(dev_priv); @@ -2154,7 +2454,7 @@ static int gen6_gmch_probe(struct drm_device *dev, *stolen = gen6_get_stolen_size(snb_gmch_ctl); gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); - *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; + *gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT; ret = ggtt_probe_common(dev, gtt_size); @@ -2262,11 +2562,16 @@ int i915_gem_gtt_init(struct drm_device *dev) return 0; } -static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *view) +static struct i915_vma * +__i915_gem_vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *ggtt_view) { - struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); + struct i915_vma *vma; + + if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) + return ERR_PTR(-EINVAL); + vma = kzalloc(sizeof(*vma), GFP_KERNEL); if (vma == NULL) return ERR_PTR(-ENOMEM); @@ -2275,10 +2580,11 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, INIT_LIST_HEAD(&vma->exec_list); vma->vm = vm; vma->obj = obj; - vma->ggtt_view = *view; if (INTEL_INFO(vm->dev)->gen >= 6) { if (i915_is_ggtt(vm)) { + vma->ggtt_view = *ggtt_view; + vma->unbind_vma = ggtt_unbind_vma; vma->bind_vma = ggtt_bind_vma; } else { @@ -2287,6 +2593,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, } } else { BUG_ON(!i915_is_ggtt(vm)); + vma->ggtt_view = *ggtt_view; vma->unbind_vma = i915_ggtt_unbind_vma; vma->bind_vma = i915_ggtt_bind_vma; } @@ -2299,38 +2606,174 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, } struct i915_vma * -i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, +i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm) +{ + struct i915_vma *vma; + + vma = i915_gem_obj_to_vma(obj, vm); + if (!vma) + vma = __i915_gem_vma_create(obj, vm, + i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL); + + return vma; +} + +struct i915_vma * +i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view) { + struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); struct i915_vma *vma; - vma = i915_gem_obj_to_vma_view(obj, vm, view); + if (WARN_ON(!view)) + return ERR_PTR(-EINVAL); + + vma = i915_gem_obj_to_ggtt_view(obj, view); + + if (IS_ERR(vma)) + return vma; + if (!vma) - vma = __i915_gem_vma_create(obj, vm, view); + vma = __i915_gem_vma_create(obj, ggtt, view); return vma; + } -static inline -int i915_get_vma_pages(struct i915_vma *vma) +#if 0 +static void +rotate_pages(dma_addr_t *in, unsigned int width, unsigned int height, + struct sg_table *st) +{ + unsigned int column, row; + unsigned int src_idx; + struct scatterlist *sg = st->sgl; + + st->nents = 0; + + for (column = 0; column < width; column++) { + src_idx = width * (height - 1) + column; + for (row = 0; row < height; row++) { + st->nents++; + /* We don't need the pages, but need to initialize + * the entries so the sg list can be happily traversed. + * The only thing we need are DMA addresses. + */ + sg_set_page(sg, NULL, PAGE_SIZE, 0); + sg_dma_address(sg) = in[src_idx]; + sg_dma_len(sg) = PAGE_SIZE; + sg = sg_next(sg); + src_idx -= width; + } + } +} + +static struct sg_table * +intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, + struct drm_i915_gem_object *obj) { + struct drm_device *dev = obj->base.dev; + struct intel_rotation_info *rot_info = &ggtt_view->rotation_info; + unsigned long size, pages, rot_pages; + struct sg_page_iter sg_iter; + unsigned long i; + dma_addr_t *page_addr_list; + struct sg_table *st; + unsigned int tile_pitch, tile_height; + unsigned int width_pages, height_pages; + int ret = -ENOMEM; + + pages = obj->base.size / PAGE_SIZE; + + /* Calculate tiling geometry. */ + tile_height = intel_tile_height(dev, rot_info->pixel_format, + rot_info->fb_modifier); + tile_pitch = PAGE_SIZE / tile_height; + width_pages = DIV_ROUND_UP(rot_info->pitch, tile_pitch); + height_pages = DIV_ROUND_UP(rot_info->height, tile_height); + rot_pages = width_pages * height_pages; + size = rot_pages * PAGE_SIZE; + + /* Allocate a temporary list of source pages for random access. */ + page_addr_list = drm_malloc_ab(pages, sizeof(dma_addr_t)); + if (!page_addr_list) + return ERR_PTR(ret); + + /* Allocate target SG list. */ + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + goto err_st_alloc; + + ret = sg_alloc_table(st, rot_pages, GFP_KERNEL); + if (ret) + goto err_sg_alloc; + + /* Populate source page list from the object. */ + i = 0; + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { + page_addr_list[i] = sg_page_iter_dma_address(&sg_iter); + i++; + } + + /* Rotate the pages. */ + rotate_pages(page_addr_list, width_pages, height_pages, st); + + DRM_DEBUG_KMS( + "Created rotated page mapping for object size %lu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages).\n", + size, rot_info->pitch, rot_info->height, + rot_info->pixel_format, width_pages, height_pages, + rot_pages); + + drm_free_large(page_addr_list); + + return st; + +err_sg_alloc: + kfree(st); +err_st_alloc: + drm_free_large(page_addr_list); + + DRM_DEBUG_KMS( + "Failed to create rotated mapping for object size %lu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages)\n", + size, ret, rot_info->pitch, rot_info->height, + rot_info->pixel_format, width_pages, height_pages, + rot_pages); + return ERR_PTR(ret); +} +#endif + +static inline int +i915_get_ggtt_vma_pages(struct i915_vma *vma) +{ + int ret = 0; + if (vma->ggtt_view.pages) return 0; if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) vma->ggtt_view.pages = vma->obj->pages; +#if 0 + else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED) + vma->ggtt_view.pages = + intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj); +#endif else WARN_ONCE(1, "GGTT view %u not implemented!\n", vma->ggtt_view.type); if (!vma->ggtt_view.pages) { - DRM_ERROR("Failed to get pages for VMA view type %u!\n", + DRM_ERROR("Failed to get pages for GGTT view type %u!\n", vma->ggtt_view.type); - return -EINVAL; + ret = -EINVAL; + } else if (IS_ERR(vma->ggtt_view.pages)) { + ret = PTR_ERR(vma->ggtt_view.pages); + vma->ggtt_view.pages = NULL; + DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", + vma->ggtt_view.type, ret); } - return 0; + return ret; } /** @@ -2346,10 +2789,12 @@ int i915_get_vma_pages(struct i915_vma *vma) int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { - int ret = i915_get_vma_pages(vma); + if (i915_is_ggtt(vma->vm)) { + int ret = i915_get_ggtt_vma_pages(vma); - if (ret) - return ret; + if (ret) + return ret; + } vma->bind_vma(vma, cache_level, flags); diff --git a/sys/dev/drm/i915/i915_gem_gtt.h b/sys/dev/drm/i915/i915_gem_gtt.h index 527ca854d5..0db1b68e94 100644 --- a/sys/dev/drm/i915/i915_gem_gtt.h +++ b/sys/dev/drm/i915/i915_gem_gtt.h @@ -34,17 +34,15 @@ #ifndef __I915_GEM_GTT_H__ #define __I915_GEM_GTT_H__ -#include - struct drm_i915_file_private; -typedef uint32_t gen6_gtt_pte_t; -typedef uint64_t gen8_gtt_pte_t; -typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; +typedef uint32_t gen6_pte_t; +typedef uint64_t gen8_pte_t; +typedef uint64_t gen8_pde_t; #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) -#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t)) + /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) @@ -53,9 +51,16 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; #define GEN6_PTE_UNCACHED (1 << 1) #define GEN6_PTE_VALID (1 << 0) -#define GEN6_PPGTT_PD_ENTRIES 512 -#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE) +#define I915_PTES(pte_len) (PAGE_SIZE / (pte_len)) +#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1) +#define I915_PDES 512 +#define I915_PDE_MASK (I915_PDES - 1) +#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT)) + +#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t)) +#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE) #define GEN6_PD_ALIGN (PAGE_SIZE * 16) +#define GEN6_PDE_SHIFT 22 #define GEN6_PDE_VALID (1 << 0) #define GEN7_PTE_CACHE_L3_LLC (3 << 1) @@ -90,9 +95,8 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; #define GEN8_PDE_MASK 0x1ff #define GEN8_PTE_SHIFT 12 #define GEN8_PTE_MASK 0x1ff -#define GEN8_LEGACY_PDPS 4 -#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) -#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) +#define GEN8_LEGACY_PDPES 4 +#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t)) #define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) #define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ @@ -113,15 +117,28 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; enum i915_ggtt_view_type { I915_GGTT_VIEW_NORMAL = 0, + I915_GGTT_VIEW_ROTATED +}; + +struct intel_rotation_info { + unsigned int height; + unsigned int pitch; + uint32_t pixel_format; + uint64_t fb_modifier; }; struct i915_ggtt_view { enum i915_ggtt_view_type type; struct vm_page **pages; + + union { + struct intel_rotation_info rotation_info; + }; }; extern const struct i915_ggtt_view i915_ggtt_view_normal; +extern const struct i915_ggtt_view i915_ggtt_view_rotated; enum i915_cache_level; @@ -189,6 +206,28 @@ struct i915_vma { u32 flags); }; +struct i915_page_table_entry { + struct vm_page *page; + dma_addr_t daddr; + + unsigned long *used_ptes; +}; + +struct i915_page_directory_entry { + struct vm_page *page; /* NULL for GEN6-GEN7 */ + union { + uint32_t pd_offset; + dma_addr_t daddr; + }; + + struct i915_page_table_entry *page_table[I915_PDES]; /* PDEs */ +}; + +struct i915_page_directory_pointer_entry { + /* struct vm_page *page; */ + struct i915_page_directory_entry *page_directory[GEN8_LEGACY_PDPES]; +}; + struct i915_address_space { struct drm_mm mm; struct drm_device *dev; @@ -225,9 +264,12 @@ struct i915_address_space { struct list_head inactive_list; /* FIXME: Need a more generic return type */ - gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, - enum i915_cache_level level, - bool valid, u32 flags); /* Create a valid PTE */ + gen6_pte_t (*pte_encode)(dma_addr_t addr, + enum i915_cache_level level, + bool valid, u32 flags); /* Create a valid PTE */ + int (*allocate_va_range)(struct i915_address_space *vm, + uint64_t start, + uint64_t length); void (*clear_range)(struct i915_address_space *vm, uint64_t start, uint64_t length, @@ -272,30 +314,90 @@ struct i915_hw_ppgtt { struct i915_address_space base; struct kref ref; struct drm_mm_node node; + unsigned long pd_dirty_rings; unsigned num_pd_entries; unsigned num_pd_pages; /* gen8+ */ union { - struct vm_page **pt_pages; - struct vm_page **gen8_pt_pages[GEN8_LEGACY_PDPS]; - }; - struct vm_page *pd_pages; - union { - uint32_t pd_offset; - dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPS]; - }; - union { - dma_addr_t *pt_dma_addr; - dma_addr_t *gen8_pt_dma_addr[4]; + struct i915_page_directory_pointer_entry pdp; + struct i915_page_directory_entry pd; }; + struct i915_page_table_entry *scratch_pt; + struct drm_i915_file_private *file_priv; + gen6_pte_t __iomem *pd_addr; + int (*enable)(struct i915_hw_ppgtt *ppgtt); int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *ring); void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); }; +/* For each pde iterates over every pde between from start until start + length. + * If start, and start+length are not perfectly divisible, the macro will round + * down, and up as needed. The macro modifies pde, start, and length. Dev is + * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0, + * and length = 2G effectively iterates over every PDE in the system. + * + * XXX: temp is not actually needed, but it saves doing the ALIGN operation. + */ +#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \ + for (iter = gen6_pde_index(start); \ + pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \ + iter++, \ + temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \ + temp = min_t(unsigned, temp, length), \ + start += temp, length -= temp) + +static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift) +{ + const uint32_t mask = NUM_PTE(pde_shift) - 1; + + return (address >> PAGE_SHIFT) & mask; +} + +/* Helper to counts the number of PTEs within the given length. This count + * does not cross a page table boundary, so the max value would be + * GEN6_PTES for GEN6, and GEN8_PTES for GEN8. +*/ +static inline uint32_t i915_pte_count(uint64_t addr, size_t length, + uint32_t pde_shift) +{ + const uint64_t mask = ~((1 << pde_shift) - 1); + uint64_t end; + + WARN_ON(length == 0); + WARN_ON(offset_in_page(addr|length)); + + end = addr + length; + + if ((addr & mask) != (end & mask)) + return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift); + + return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift); +} + +static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift) +{ + return (addr >> shift) & I915_PDE_MASK; +} + +static inline uint32_t gen6_pte_index(uint32_t addr) +{ + return i915_pte_index(addr, GEN6_PDE_SHIFT); +} + +static inline size_t gen6_pte_count(uint32_t addr, uint32_t length) +{ + return i915_pte_count(addr, length, GEN6_PDE_SHIFT); +} + +static inline uint32_t gen6_pde_index(uint32_t addr) +{ + return i915_pde_index(addr, GEN6_PDE_SHIFT); +} + int i915_gem_gtt_init(struct drm_device *dev); void i915_gem_init_global_gtt(struct drm_device *dev); void i915_global_gtt_cleanup(struct drm_device *dev); @@ -324,4 +426,14 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev); int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); +static inline bool +i915_ggtt_view_equal(const struct i915_ggtt_view *a, + const struct i915_ggtt_view *b) +{ + if (WARN_ON(!a || !b)) + return false; + + return a->type == b->type; +} + #endif diff --git a/sys/dev/drm/i915/i915_gem_shrinker.c b/sys/dev/drm/i915/i915_gem_shrinker.c new file mode 100644 index 0000000000..6840294fa5 --- /dev/null +++ b/sys/dev/drm/i915/i915_gem_shrinker.c @@ -0,0 +1,338 @@ +/* + * Copyright © 2008-2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include +#include +#include +#include +#include + +#include "i915_drv.h" +#include "i915_trace.h" + +#if 0 +static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) +{ + if (!mutex_is_locked(mutex)) + return false; + +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) + return mutex->owner == task; +#else + /* Since UP may be pre-empted, we cannot assume that we own the lock */ + return false; +#endif +} +#endif + +/** + * i915_gem_shrink - Shrink buffer object caches + * @dev_priv: i915 device + * @target: amount of memory to make available, in pages + * @flags: control flags for selecting cache types + * + * This function is the main interface to the shrinker. It will try to release + * up to @target pages of main memory backing storage from buffer objects. + * Selection of the specific caches can be done with @flags. This is e.g. useful + * when purgeable objects should be removed from caches preferentially. + * + * Note that it's not guaranteed that released amount is actually available as + * free system memory - the pages might still be in-used to due to other reasons + * (like cpu mmaps) or the mm core has reused them before we could grab them. + * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to + * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all(). + * + * Also note that any kind of pinning (both per-vma address space pins and + * backing storage pins at the buffer object level) result in the shrinker code + * having to skip the object. + * + * Returns: + * The number of pages of backing storage actually released. + */ +unsigned long +i915_gem_shrink(struct drm_i915_private *dev_priv, + long target, unsigned flags) +{ + const struct { + struct list_head *list; + unsigned int bit; + } phases[] = { + { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND }, + { &dev_priv->mm.bound_list, I915_SHRINK_BOUND }, + { NULL, 0 }, + }, *phase; + unsigned long count = 0; + + /* + * As we may completely rewrite the (un)bound list whilst unbinding + * (due to retiring requests) we have to strictly process only + * one element of the list at the time, and recheck the list + * on every iteration. + * + * In particular, we must hold a reference whilst removing the + * object as we may end up waiting for and/or retiring the objects. + * This might release the final reference (held by the active list) + * and result in the object being freed from under us. This is + * similar to the precautions the eviction code must take whilst + * removing objects. + * + * Also note that although these lists do not hold a reference to + * the object we can safely grab one here: The final object + * unreferencing and the bound_list are both protected by the + * dev->struct_mutex and so we won't ever be able to observe an + * object on the bound_list with a reference count equals 0. + */ + for (phase = phases; phase->list; phase++) { + struct list_head still_in_list; + + if ((flags & phase->bit) == 0) + continue; + + INIT_LIST_HEAD(&still_in_list); + while (count < target && !list_empty(phase->list)) { + struct drm_i915_gem_object *obj; + struct i915_vma *vma, *v; + + obj = list_first_entry(phase->list, + typeof(*obj), global_list); + list_move_tail(&obj->global_list, &still_in_list); + + if (flags & I915_SHRINK_PURGEABLE && + obj->madv != I915_MADV_DONTNEED) + continue; + + drm_gem_object_reference(&obj->base); + + /* For the unbound phase, this should be a no-op! */ + list_for_each_entry_safe(vma, v, + &obj->vma_list, vma_link) + if (i915_vma_unbind(vma)) + break; + + if (i915_gem_object_put_pages(obj) == 0) + count += obj->base.size >> PAGE_SHIFT; + + drm_gem_object_unreference(&obj->base); + } + list_splice(&still_in_list, phase->list); + } + + return count; +} + +/** + * i915_gem_shrink - Shrink buffer object caches completely + * @dev_priv: i915 device + * + * This is a simple wraper around i915_gem_shrink() to aggressively shrink all + * caches completely. It also first waits for and retires all outstanding + * requests to also be able to release backing storage for active objects. + * + * This should only be used in code to intentionally quiescent the gpu or as a + * last-ditch effort when memory seems to have run out. + * + * Returns: + * The number of pages of backing storage actually released. + */ +unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) +{ + i915_gem_evict_everything(dev_priv->dev); + return i915_gem_shrink(dev_priv, LONG_MAX, + I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); +} + +#if 0 +static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) +{ + if (!mutex_trylock(&dev->struct_mutex)) { + if (!mutex_is_locked_by(&dev->struct_mutex, current)) + return false; + + if (to_i915(dev)->mm.shrinker_no_lock_stealing) + return false; + + *unlock = false; + } else + *unlock = true; + + return true; +} + +static int num_vma_bound(struct drm_i915_gem_object *obj) +{ + struct i915_vma *vma; + int count = 0; + + list_for_each_entry(vma, &obj->vma_list, vma_link) + if (drm_mm_node_allocated(&vma->node)) + count++; + + return count; +} + +static unsigned long +i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) +{ + struct drm_i915_private *dev_priv = + container_of(shrinker, struct drm_i915_private, mm.shrinker); + struct drm_device *dev = dev_priv->dev; + struct drm_i915_gem_object *obj; + unsigned long count; + bool unlock; + + if (!i915_gem_shrinker_lock(dev, &unlock)) + return 0; + + count = 0; + list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) + if (obj->pages_pin_count == 0) + count += obj->base.size >> PAGE_SHIFT; + + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + if (!i915_gem_obj_is_pinned(obj) && + obj->pages_pin_count == num_vma_bound(obj)) + count += obj->base.size >> PAGE_SHIFT; + } + + if (unlock) + mutex_unlock(&dev->struct_mutex); + + return count; +} + +static unsigned long +i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) +{ + struct drm_i915_private *dev_priv = + container_of(shrinker, struct drm_i915_private, mm.shrinker); + struct drm_device *dev = dev_priv->dev; + unsigned long freed; + bool unlock; + + if (!i915_gem_shrinker_lock(dev, &unlock)) + return SHRINK_STOP; + + freed = i915_gem_shrink(dev_priv, + sc->nr_to_scan, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_PURGEABLE); + if (freed < sc->nr_to_scan) + freed += i915_gem_shrink(dev_priv, + sc->nr_to_scan - freed, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND); + if (unlock) + mutex_unlock(&dev->struct_mutex); + + return freed; +} + +static int +i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) +{ + struct drm_i915_private *dev_priv = + container_of(nb, struct drm_i915_private, mm.oom_notifier); + struct drm_device *dev = dev_priv->dev; + struct drm_i915_gem_object *obj; + unsigned long timeout = msecs_to_jiffies(5000) + 1; + unsigned long pinned, bound, unbound, freed_pages; + bool was_interruptible; + bool unlock; + + while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) { + schedule_timeout_killable(1); + if (fatal_signal_pending(current)) + return NOTIFY_DONE; + } + if (timeout == 0) { + pr_err("Unable to purge GPU memory due lock contention.\n"); + return NOTIFY_DONE; + } + + was_interruptible = dev_priv->mm.interruptible; + dev_priv->mm.interruptible = false; + + freed_pages = i915_gem_shrink_all(dev_priv); + + dev_priv->mm.interruptible = was_interruptible; + + /* Because we may be allocating inside our own driver, we cannot + * assert that there are no objects with pinned pages that are not + * being pointed to by hardware. + */ + unbound = bound = pinned = 0; + list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { + if (!obj->base.filp) /* not backed by a freeable object */ + continue; + + if (obj->pages_pin_count) + pinned += obj->base.size; + else + unbound += obj->base.size; + } + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + if (!obj->base.filp) + continue; + + if (obj->pages_pin_count) + pinned += obj->base.size; + else + bound += obj->base.size; + } + + if (unlock) + mutex_unlock(&dev->struct_mutex); + + if (freed_pages || unbound || bound) + pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", + freed_pages << PAGE_SHIFT, pinned); + if (unbound || bound) + pr_err("%lu and %lu bytes still available in the " + "bound and unbound GPU page lists.\n", + bound, unbound); + + *(unsigned long *)ptr += freed_pages; + return NOTIFY_DONE; +} +#endif + +/** + * i915_gem_shrinker_init - Initialize i915 shrinker + * @dev_priv: i915 device + * + * This function registers and sets up the i915 shrinker and OOM handler. + */ +void i915_gem_shrinker_init(struct drm_i915_private *dev_priv) +{ +#if 0 + dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan; + dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count; + dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS; + register_shrinker(&dev_priv->mm.shrinker); + + dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; + register_oom_notifier(&dev_priv->mm.oom_notifier); +#endif +} diff --git a/sys/dev/drm/i915/i915_gem_stolen.c b/sys/dev/drm/i915/i915_gem_stolen.c index 86ed28f079..6c971e3545 100644 --- a/sys/dev/drm/i915/i915_gem_stolen.c +++ b/sys/dev/drm/i915/i915_gem_stolen.c @@ -232,7 +232,7 @@ static int i915_setup_compression(struct drm_device *dev, int size, int fb_cpp) dev_priv->mm.stolen_base + compressed_llb->start); } - dev_priv->fbc.size = size / dev_priv->fbc.threshold; + dev_priv->fbc.uncompressed_size = size; DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", size); @@ -254,7 +254,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_c if (!drm_mm_initialized(&dev_priv->mm.stolen)) return -ENODEV; - if (size < dev_priv->fbc.size) + if (size <= dev_priv->fbc.uncompressed_size) return 0; /* Release any current block */ @@ -267,7 +267,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->fbc.size == 0) + if (dev_priv->fbc.uncompressed_size == 0) return; drm_mm_remove_node(&dev_priv->fbc.compressed_fb); @@ -277,7 +277,7 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev) kfree(dev_priv->fbc.compressed_llb); } - dev_priv->fbc.size = 0; + dev_priv->fbc.uncompressed_size = 0; } void i915_gem_cleanup_stolen(struct drm_device *dev) diff --git a/sys/dev/drm/i915/i915_irq.c b/sys/dev/drm/i915/i915_irq.c index 41f68f3cb4..8ae2ef0b0a 100644 --- a/sys/dev/drm/i915/i915_irq.c +++ b/sys/dev/drm/i915/i915_irq.c @@ -272,6 +272,7 @@ void gen6_reset_rps_interrupts(struct drm_device *dev) I915_WRITE(reg, dev_priv->pm_rps_events); I915_WRITE(reg, dev_priv->pm_rps_events); POSTING_READ(reg); + dev_priv->rps.pm_iir = 0; lockmgr(&dev_priv->irq_lock, LK_RELEASE); } @@ -325,12 +326,13 @@ void gen6_disable_rps_interrupts(struct drm_device *dev) __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & ~dev_priv->pm_rps_events); - I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); - I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); - - dev_priv->rps.pm_iir = 0; lockmgr(&dev_priv->irq_lock, LK_RELEASE); + + /* Wait for pending IRQ handlers to complete (on other CPUs) */ +#if 0 + synchronize_irq(dev->irq); +#endif } /** @@ -487,31 +489,6 @@ static void i915_enable_asle_pipestat(struct drm_device *dev) lockmgr(&dev_priv->irq_lock, LK_RELEASE); } -/** - * i915_pipe_enabled - check if a pipe is enabled - * @dev: DRM device - * @pipe: pipe to check - * - * Reading certain registers when the pipe is disabled can hang the chip. - * Use this routine to make sure the PLL is running and the pipe is active - * before reading such registers if unsure. - */ -static int -i915_pipe_enabled(struct drm_device *dev, int pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - /* Locking is horribly broken here, but whatever. */ - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - return intel_crtc->active; - } else { - return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; - } -} - /* * This timing diagram depicts the video signal in and * around the vertical blanking period. @@ -577,34 +554,16 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) unsigned long high_frame; unsigned long low_frame; u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; + struct intel_crtc *intel_crtc = + to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); + const struct drm_display_mode *mode = + &intel_crtc->config->base.adjusted_mode; - if (!i915_pipe_enabled(dev, pipe)) { - DRM_DEBUG_DRIVER("trying to get vblank count for disabled " - "pipe %c\n", pipe_name(pipe)); - return 0; - } - - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - struct intel_crtc *intel_crtc = - to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); - const struct drm_display_mode *mode = - &intel_crtc->config->base.adjusted_mode; - - htotal = mode->crtc_htotal; - hsync_start = mode->crtc_hsync_start; - vbl_start = mode->crtc_vblank_start; - if (mode->flags & DRM_MODE_FLAG_INTERLACE) - vbl_start = DIV_ROUND_UP(vbl_start, 2); - } else { - enum transcoder cpu_transcoder = (enum transcoder) pipe; - - htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; - hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1; - vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; - if ((I915_READ(PIPECONF(cpu_transcoder)) & - PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE) - vbl_start = DIV_ROUND_UP(vbl_start, 2); - } + htotal = mode->crtc_htotal; + hsync_start = mode->crtc_hsync_start; + vbl_start = mode->crtc_vblank_start; + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + vbl_start = DIV_ROUND_UP(vbl_start, 2); /* Convert to pixel count */ vbl_start *= htotal; @@ -643,12 +602,6 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) struct drm_i915_private *dev_priv = dev->dev_private; int reg = PIPE_FRMCOUNT_GM45(pipe); - if (!i915_pipe_enabled(dev, pipe)) { - DRM_DEBUG_DRIVER("trying to get vblank count for disabled " - "pipe %c\n", pipe_name(pipe)); - return 0; - } - return I915_READ(reg); } @@ -833,7 +786,7 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, return -EINVAL; } - if (!crtc->enabled) { + if (!crtc->state->enable) { DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); return -EBUSY; } @@ -1037,129 +990,73 @@ static void notify_ring(struct drm_device *dev, wake_up_all(&ring->irq_queue); } -static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, - struct intel_rps_ei *rps_ei) +static void vlv_c0_read(struct drm_i915_private *dev_priv, + struct intel_rps_ei *ei) { - u32 cz_ts, cz_freq_khz; - u32 render_count, media_count; - u32 elapsed_render, elapsed_media, elapsed_time; - u32 residency = 0; - - cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); - cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4); - - render_count = I915_READ(VLV_RENDER_C0_COUNT_REG); - media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG); - - if (rps_ei->cz_clock == 0) { - rps_ei->cz_clock = cz_ts; - rps_ei->render_c0 = render_count; - rps_ei->media_c0 = media_count; - - return dev_priv->rps.cur_freq; - } - - elapsed_time = cz_ts - rps_ei->cz_clock; - rps_ei->cz_clock = cz_ts; + ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); + ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); + ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); +} - elapsed_render = render_count - rps_ei->render_c0; - rps_ei->render_c0 = render_count; +static bool vlv_c0_above(struct drm_i915_private *dev_priv, + const struct intel_rps_ei *old, + const struct intel_rps_ei *now, + int threshold) +{ + u64 time, c0; - elapsed_media = media_count - rps_ei->media_c0; - rps_ei->media_c0 = media_count; + if (old->cz_clock == 0) + return false; - /* Convert all the counters into common unit of milli sec */ - elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC; - elapsed_render /= cz_freq_khz; - elapsed_media /= cz_freq_khz; + time = now->cz_clock - old->cz_clock; + time *= threshold * dev_priv->mem_freq; - /* - * Calculate overall C0 residency percentage - * only if elapsed time is non zero + /* Workload can be split between render + media, e.g. SwapBuffers + * being blitted in X after being rendered in mesa. To account for + * this we need to combine both engines into our activity counter. */ - if (elapsed_time) { - residency = - ((max(elapsed_render, elapsed_media) * 100) - / elapsed_time); - } + c0 = now->render_c0 - old->render_c0; + c0 += now->media_c0 - old->media_c0; + c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000; - return residency; + return c0 >= time; } -/** - * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU - * busy-ness calculated from C0 counters of render & media power wells - * @dev_priv: DRM device private - * - */ -static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) +void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) { - u32 residency_C0_up = 0, residency_C0_down = 0; - int new_delay, adj; - - dev_priv->rps.ei_interrupt_count++; - - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); - - - if (dev_priv->rps.up_ei.cz_clock == 0) { - vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei); - vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei); - return dev_priv->rps.cur_freq; - } + vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); + dev_priv->rps.up_ei = dev_priv->rps.down_ei; +} +static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) +{ + struct intel_rps_ei now; + u32 events = 0; - /* - * To down throttle, C0 residency should be less than down threshold - * for continous EI intervals. So calculate down EI counters - * once in VLV_INT_COUNT_FOR_DOWN_EI - */ - if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) { + if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) + return 0; - dev_priv->rps.ei_interrupt_count = 0; + vlv_c0_read(dev_priv, &now); + if (now.cz_clock == 0) + return 0; - residency_C0_down = vlv_c0_residency(dev_priv, - &dev_priv->rps.down_ei); - } else { - residency_C0_up = vlv_c0_residency(dev_priv, - &dev_priv->rps.up_ei); + if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { + if (!vlv_c0_above(dev_priv, + &dev_priv->rps.down_ei, &now, + VLV_RP_DOWN_EI_THRESHOLD)) + events |= GEN6_PM_RP_DOWN_THRESHOLD; + dev_priv->rps.down_ei = now; } - new_delay = dev_priv->rps.cur_freq; - - adj = dev_priv->rps.last_adj; - /* C0 residency is greater than UP threshold. Increase Frequency */ - if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) { - if (adj > 0) - adj *= 2; - else - adj = 1; - - if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit) - new_delay = dev_priv->rps.cur_freq + adj; - - /* - * For better performance, jump directly - * to RPe if we're below it. - */ - if (new_delay < dev_priv->rps.efficient_freq) - new_delay = dev_priv->rps.efficient_freq; - - } else if (!dev_priv->rps.ei_interrupt_count && - (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) { - if (adj < 0) - adj *= 2; - else - adj = -1; - /* - * This means, C0 residency is less than down threshold over - * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq - */ - if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) - new_delay = dev_priv->rps.cur_freq + adj; + if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { + if (vlv_c0_above(dev_priv, + &dev_priv->rps.up_ei, &now, + VLV_RP_UP_EI_THRESHOLD)) + events |= GEN6_PM_RP_UP_THRESHOLD; + dev_priv->rps.up_ei = now; } - return new_delay; + return events; } static void gen6_pm_rps_work(struct work_struct *work) @@ -1189,6 +1086,8 @@ static void gen6_pm_rps_work(struct work_struct *work) mutex_lock(&dev_priv->rps.hw_lock); + pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); + adj = dev_priv->rps.last_adj; if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { if (adj > 0) @@ -1211,8 +1110,6 @@ static void gen6_pm_rps_work(struct work_struct *work) else new_delay = dev_priv->rps.min_freq_softlimit; adj = 0; - } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { - new_delay = vlv_calc_delay_from_C0_counters(dev_priv); } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { if (adj < 0) adj *= 2; @@ -1234,10 +1131,7 @@ static void gen6_pm_rps_work(struct work_struct *work) dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; - if (IS_VALLEYVIEW(dev_priv->dev)) - valleyview_set_rps(dev_priv->dev, new_delay); - else - gen6_set_rps(dev_priv->dev, new_delay); + intel_set_rps(dev_priv->dev, new_delay); mutex_unlock(&dev_priv->rps.hw_lock); } @@ -1734,11 +1628,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pip * the work queue. */ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) { - /* TODO: RPS on GEN9+ is not supported yet. */ - if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9, - "GEN9+: unexpected RPS IRQ\n")) - return; - if (pm_iir & dev_priv->pm_rps_events) { lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); @@ -2641,14 +2530,10 @@ static int i915_enable_vblank(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; - if (!i915_pipe_enabled(dev, pipe)) - return -EINVAL; - lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); if (INTEL_INFO(dev)->gen >= 4) i915_enable_pipestat(dev_priv, pipe, PIPE_START_VBLANK_INTERRUPT_STATUS); - else i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); lockmgr(&dev_priv->irq_lock, LK_RELEASE); @@ -2662,9 +2547,6 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe) uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); - if (!i915_pipe_enabled(dev, pipe)) - return -EINVAL; - lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); ironlake_enable_display_irq(dev_priv, bit); lockmgr(&dev_priv->irq_lock, LK_RELEASE); @@ -2676,9 +2558,6 @@ static int valleyview_enable_vblank(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; - if (!i915_pipe_enabled(dev, pipe)) - return -EINVAL; - lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); i915_enable_pipestat(dev_priv, pipe, PIPE_START_VBLANK_INTERRUPT_STATUS); @@ -2691,9 +2570,6 @@ static int gen8_enable_vblank(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; - if (!i915_pipe_enabled(dev, pipe)) - return -EINVAL; - lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); @@ -2741,9 +2617,6 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; - if (!i915_pipe_enabled(dev, pipe)) - return; - lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); @@ -3208,15 +3081,24 @@ static void gen8_irq_reset(struct drm_device *dev) ibx_irq_reset(dev); } -void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) +void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, + unsigned int pipe_mask) { uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); - GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], - ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); - GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], - ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); + if (pipe_mask & 1 << PIPE_A) + GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A, + dev_priv->de_irq_mask[PIPE_A], + ~dev_priv->de_irq_mask[PIPE_A] | extra_ier); + if (pipe_mask & 1 << PIPE_B) + GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, + dev_priv->de_irq_mask[PIPE_B], + ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); + if (pipe_mask & 1 << PIPE_C) + GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, + dev_priv->de_irq_mask[PIPE_C], + ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); lockmgr(&dev_priv->irq_lock, LK_RELEASE); } @@ -3690,14 +3572,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev) ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | - I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); I915_WRITE16(IMR, dev_priv->irq_mask); I915_WRITE16(IER, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | I915_USER_INTERRUPT); POSTING_READ16(IER); @@ -3858,14 +3738,12 @@ static int i915_irq_postinstall(struct drm_device *dev) I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | - I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); enable_mask = I915_ASLE_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | I915_USER_INTERRUPT; if (I915_HAS_HOTPLUG(dev)) { @@ -4328,7 +4206,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) /* Let's track the enabled rps events */ if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) /* WaGsvRC0ResidencyMethod:vlv */ - dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; + dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; else dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; @@ -4358,10 +4236,8 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (!IS_GEN2(dev_priv)) dev->vblank_disable_immediate = true; - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; - dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; - } + dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; + dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; if (IS_CHERRYVIEW(dev_priv)) { dev->driver->irq_handler = cherryview_irq_handler; @@ -4445,7 +4321,7 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) list_for_each_entry(connector, &mode_config->connector_list, head) { struct intel_connector *intel_connector = to_intel_connector(connector); connector->polled = intel_connector->polled; - if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) + if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) connector->polled = DRM_CONNECTOR_POLL_HPD; } diff --git a/sys/dev/drm/i915/i915_params.c b/sys/dev/drm/i915/i915_params.c index 356357ab9b..a66c1d1369 100644 --- a/sys/dev/drm/i915/i915_params.c +++ b/sys/dev/drm/i915/i915_params.c @@ -27,7 +27,6 @@ struct i915_params i915 __read_mostly = { .modeset = -1, .panel_ignore_lid = 1, - .powersave = 1, .semaphores = -1, .lvds_downclock = 0, .lvds_channel_mode = 0, @@ -44,6 +43,7 @@ struct i915_params i915 __read_mostly = { .enable_ips = 1, .fastboot = 0, .prefault_disable = 0, + .load_detect_test = 0, .reset = 1, .invert_brightness = 0, .disable_display = 0, @@ -67,11 +67,6 @@ MODULE_PARM_DESC(panel_ignore_lid, "Override lid status (0=autodetect, 1=autodetect disabled [default], " "-1=force lid closed, -2=force lid open)"); -module_param_named(powersave, i915.powersave, int, 0600); -TUNABLE_INT("drm.i915.powersave", &i915.powersave); -MODULE_PARM_DESC(powersave, - "Enable powersavings, fbc, downclocking, etc. (default: true)"); - module_param_named_unsafe(semaphores, i915.semaphores, int, 0400); TUNABLE_INT("drm.i915.semaphores", &i915.semaphores); MODULE_PARM_DESC(semaphores, @@ -158,11 +153,16 @@ module_param_named(fastboot, i915.fastboot, bool, 0600); MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time (default: false)"); -module_param_named(prefault_disable, i915.prefault_disable, bool, 0600); +module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600); MODULE_PARM_DESC(prefault_disable, "Disable page prefaulting for pread/pwrite/reloc (default:false). " "For developers only."); +module_param_named_unsafe(load_detect_test, i915.load_detect_test, bool, 0600); +MODULE_PARM_DESC(load_detect_test, + "Force-enable the VGA load detect code for testing (default:false). " + "For developers only."); + module_param_named(invert_brightness, i915.invert_brightness, int, 0600); TUNABLE_INT("drm.i915.panel_invert_brightness", &i915.invert_brightness); MODULE_PARM_DESC(invert_brightness, @@ -187,10 +187,10 @@ TUNABLE_INT("drm.i915.use_mmio_flip", &i915.use_mmio_flip); MODULE_PARM_DESC(use_mmio_flip, "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)"); -module_param_named(mmio_debug, i915.mmio_debug, bool, 0600); +module_param_named(mmio_debug, i915.mmio_debug, int, 0600); MODULE_PARM_DESC(mmio_debug, - "Enable the MMIO debug code (default: false). This may negatively " - "affect performance."); + "Enable the MMIO debug code for the first N failures (default: off). " + "This may negatively affect performance."); module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600); MODULE_PARM_DESC(verbose_state_checks, diff --git a/sys/dev/drm/i915/i915_reg.h b/sys/dev/drm/i915/i915_reg.h index 33b3d0a240..773d1d24e6 100644 --- a/sys/dev/drm/i915/i915_reg.h +++ b/sys/dev/drm/i915/i915_reg.h @@ -139,7 +139,21 @@ #define GEN8_RING_PDP_UDW(ring, n) ((ring)->mmio_base+0x270 + ((n) * 8 + 4)) #define GEN8_RING_PDP_LDW(ring, n) ((ring)->mmio_base+0x270 + (n) * 8) +#define GEN8_R_PWR_CLK_STATE 0x20C8 +#define GEN8_RPCS_ENABLE (1 << 31) +#define GEN8_RPCS_S_CNT_ENABLE (1 << 18) +#define GEN8_RPCS_S_CNT_SHIFT 15 +#define GEN8_RPCS_S_CNT_MASK (0x7 << GEN8_RPCS_S_CNT_SHIFT) +#define GEN8_RPCS_SS_CNT_ENABLE (1 << 11) +#define GEN8_RPCS_SS_CNT_SHIFT 8 +#define GEN8_RPCS_SS_CNT_MASK (0x7 << GEN8_RPCS_SS_CNT_SHIFT) +#define GEN8_RPCS_EU_MAX_SHIFT 4 +#define GEN8_RPCS_EU_MAX_MASK (0xf << GEN8_RPCS_EU_MAX_SHIFT) +#define GEN8_RPCS_EU_MIN_SHIFT 0 +#define GEN8_RPCS_EU_MIN_MASK (0xf << GEN8_RPCS_EU_MIN_SHIFT) + #define GAM_ECOCHK 0x4090 +#define BDW_DISABLE_HDC_INVALIDATION (1<<25) #define ECOCHK_SNB_BIT (1<<10) #define HSW_ECOCHK_ARB_PRIO_SOL (1<<6) #define ECOCHK_PPGTT_CACHE64B (0x3<<3) @@ -552,6 +566,9 @@ #define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT) #define DSPFREQGUAR_SHIFT 14 #define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT) +#define DSP_MAXFIFO_PM5_STATUS (1 << 22) /* chv */ +#define DSP_AUTO_CDCLK_GATE_DISABLE (1 << 7) /* chv */ +#define DSP_MAXFIFO_PM5_ENABLE (1 << 6) /* chv */ #define _DP_SSC(val, pipe) ((val) << (2 * (pipe))) #define DP_SSC_MASK(pipe) _DP_SSC(0x3, (pipe)) #define DP_SSC_PWR_ON(pipe) _DP_SSC(0x0, (pipe)) @@ -586,6 +603,19 @@ enum punit_power_well { PUNIT_POWER_WELL_NUM, }; +enum skl_disp_power_wells { + SKL_DISP_PW_MISC_IO, + SKL_DISP_PW_DDI_A_E, + SKL_DISP_PW_DDI_B, + SKL_DISP_PW_DDI_C, + SKL_DISP_PW_DDI_D, + SKL_DISP_PW_1 = 14, + SKL_DISP_PW_2, +}; + +#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2)) +#define SKL_POWER_WELL_REQ(pw) (1 << (((pw) * 2) + 1)) + #define PUNIT_REG_PWRGT_CTRL 0x60 #define PUNIT_REG_PWRGT_STATUS 0x61 #define PUNIT_PWRGT_MASK(power_well) (3 << ((power_well) * 2)) @@ -614,6 +644,11 @@ enum punit_power_well { #define FB_GFX_FMIN_AT_VMIN_FUSE 0x137 #define FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT 8 +#define PUNIT_REG_DDR_SETUP2 0x139 +#define FORCE_DDR_FREQ_REQ_ACK (1 << 8) +#define FORCE_DDR_LOW_FREQ (1 << 1) +#define FORCE_DDR_HIGH_FREQ (1 << 0) + #define PUNIT_GPU_STATUS_REG 0xdb #define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16 #define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff @@ -638,7 +673,6 @@ enum punit_power_well { #define VLV_CZ_CLOCK_TO_MILLI_SEC 100000 #define VLV_RP_UP_EI_THRESHOLD 90 #define VLV_RP_DOWN_EI_THRESHOLD 70 -#define VLV_INT_COUNT_FOR_DOWN_EI 5 /* vlv2 north clock has */ #define CCK_FUSE_REG 0x8 @@ -1002,6 +1036,7 @@ enum punit_power_well { #define DPIO_CHV_FIRST_MOD (0 << 8) #define DPIO_CHV_SECOND_MOD (1 << 8) #define DPIO_CHV_FEEDFWD_GAIN_SHIFT 0 +#define DPIO_CHV_FEEDFWD_GAIN_MASK (0xF << 0) #define CHV_PLL_DW3(ch) _PIPE(ch, _CHV_PLL_DW3_CH0, _CHV_PLL_DW3_CH1) #define _CHV_PLL_DW6_CH0 0x8018 @@ -1011,6 +1046,19 @@ enum punit_power_well { #define DPIO_CHV_PROP_COEFF_SHIFT 0 #define CHV_PLL_DW6(ch) _PIPE(ch, _CHV_PLL_DW6_CH0, _CHV_PLL_DW6_CH1) +#define _CHV_PLL_DW8_CH0 0x8020 +#define _CHV_PLL_DW8_CH1 0x81A0 +#define DPIO_CHV_TDC_TARGET_CNT_SHIFT 0 +#define DPIO_CHV_TDC_TARGET_CNT_MASK (0x3FF << 0) +#define CHV_PLL_DW8(ch) _PIPE(ch, _CHV_PLL_DW8_CH0, _CHV_PLL_DW8_CH1) + +#define _CHV_PLL_DW9_CH0 0x8024 +#define _CHV_PLL_DW9_CH1 0x81A4 +#define DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT 1 /* 3 bits */ +#define DPIO_CHV_INT_LOCK_THRESHOLD_MASK (7 << 1) +#define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */ +#define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1) + #define _CHV_CMN_DW5_CH0 0x8114 #define CHV_BUFRIGHTENA1_DISABLE (0 << 20) #define CHV_BUFRIGHTENA1_NORMAL (1 << 20) @@ -1258,6 +1306,9 @@ enum punit_power_well { #define ERR_INT_FIFO_UNDERRUN_A (1<<0) #define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) +#define GEN8_FAULT_TLB_DATA0 0x04b10 +#define GEN8_FAULT_TLB_DATA1 0x04b14 + #define FPGA_DBG 0x42300 #define FPGA_DBG_RM_NOCLAIM (1<<31) @@ -1314,6 +1365,8 @@ enum punit_power_well { #define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0) #define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1) #define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5) +#define GEN9_IZ_HASHING_MASK(slice) (0x3 << (slice * 2)) +#define GEN9_IZ_HASHING(slice, val) ((val) << (slice * 2)) #define GFX_MODE 0x02520 #define GFX_MODE_GEN7 0x0229c @@ -1470,6 +1523,7 @@ enum punit_power_well { #define CACHE_MODE_1 0x7004 /* IVB+ */ #define PIXEL_SUBSPAN_COLLECT_OPT_DISABLE (1<<6) #define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1<<6) +#define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1<<1) #define GEN6_BLITTER_ECOSKPD 0x221d0 #define GEN6_BLITTER_LOCK_SHIFT 16 @@ -1482,6 +1536,8 @@ enum punit_power_well { /* Fuse readout registers for GT */ #define CHV_FUSE_GT (VLV_DISPLAY_BASE + 0x2168) +#define CHV_FGT_DISABLE_SS0 (1 << 10) +#define CHV_FGT_DISABLE_SS1 (1 << 11) #define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16 #define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT) #define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20 @@ -1491,6 +1547,17 @@ enum punit_power_well { #define CHV_FGT_EU_DIS_SS1_R1_SHIFT 28 #define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT) +#define GEN8_FUSE2 0x9120 +#define GEN8_F2_S_ENA_SHIFT 25 +#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT) + +#define GEN9_F2_SS_DIS_SHIFT 20 +#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT) + +#define GEN8_EU_DISABLE0 0x9134 +#define GEN8_EU_DISABLE1 0x9138 +#define GEN8_EU_DISABLE2 0x913c + #define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050 #define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) #define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) @@ -1740,6 +1807,7 @@ enum punit_power_well { #define GMBUS_CYCLE_INDEX (2<<25) #define GMBUS_CYCLE_STOP (4<<25) #define GMBUS_BYTE_COUNT_SHIFT 16 +#define GMBUS_BYTE_COUNT_MAX 256U #define GMBUS_SLAVE_INDEX_SHIFT 8 #define GMBUS_SLAVE_ADDR_SHIFT 1 #define GMBUS_SLAVE_READ (1<<0) @@ -2048,6 +2116,14 @@ enum punit_power_well { #define CDCLK_FREQ_SHIFT 4 #define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT) #define CZCLK_FREQ_MASK 0xf + +#define GCI_CONTROL (VLV_DISPLAY_BASE + 0x650C) +#define PFI_CREDIT_63 (9 << 28) /* chv only */ +#define PFI_CREDIT_31 (8 << 28) /* chv only */ +#define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */ +#define PFI_CREDIT_RESEND (1 << 27) +#define VGA_FAST_MODE_DISABLE (1 << 14) + #define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510) /* @@ -2376,6 +2452,12 @@ enum punit_power_well { #define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994) #define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998) +#define INTERVAL_1_28_US(us) (((us) * 100) >> 7) +#define INTERVAL_1_33_US(us) (((us) * 3) >> 2) +#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \ + INTERVAL_1_33_US(us) : \ + INTERVAL_1_28_US(us)) + /* * Logical Context regs */ @@ -2968,7 +3050,7 @@ enum punit_power_well { /* Video Data Island Packet control */ #define VIDEO_DIP_DATA 0x61178 -/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC +/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte * of the infoframe structure specified by CEA-861. */ #define VIDEO_DIP_DATA_SIZE 32 @@ -3865,6 +3947,7 @@ enum punit_power_well { #define PIPECONF_INTERLACE_MODE_MASK (7 << 21) #define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20) #define PIPECONF_CXSR_DOWNCLOCK (1<<16) +#define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14) #define PIPECONF_COLOR_RANGE_SELECT (1 << 13) #define PIPECONF_BPC_MASK (0x7 << 5) #define PIPECONF_8BPC (0<<5) @@ -4013,7 +4096,7 @@ enum punit_power_well { #define DPINVGTT_STATUS_MASK 0xff #define DPINVGTT_STATUS_MASK_CHV 0xfff -#define DSPARB 0x70030 +#define DSPARB (dev_priv->info.display_mmio_offset + 0x70030) #define DSPARB_CSTART_MASK (0x7f << 7) #define DSPARB_CSTART_SHIFT 7 #define DSPARB_BSTART_MASK (0x7f) @@ -4021,6 +4104,9 @@ enum punit_power_well { #define DSPARB_BEND_SHIFT 9 /* on 855 */ #define DSPARB_AEND_SHIFT 0 +#define DSPARB2 (VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */ +#define DSPARB3 (VLV_DISPLAY_BASE + 0x7006c) /* chv */ + /* pnv/gen4/g4x/vlv/chv */ #define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034) #define DSPFW_SR_SHIFT 23 @@ -4044,8 +4130,8 @@ enum punit_power_well { #define DSPFW_SPRITEB_MASK_VLV (0xff<<16) /* vlv/chv */ #define DSPFW_CURSORA_SHIFT 8 #define DSPFW_CURSORA_MASK (0x3f<<8) -#define DSPFW_PLANEC_SHIFT_OLD 0 -#define DSPFW_PLANEC_MASK_OLD (0x7f<<0) /* pre-gen4 sprite C */ +#define DSPFW_PLANEC_OLD_SHIFT 0 +#define DSPFW_PLANEC_OLD_MASK (0x7f<<0) /* pre-gen4 sprite C */ #define DSPFW_SPRITEA_SHIFT 0 #define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */ #define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */ @@ -4084,25 +4170,25 @@ enum punit_power_well { #define DSPFW_SPRITED_WM1_SHIFT 24 #define DSPFW_SPRITED_WM1_MASK (0xff<<24) #define DSPFW_SPRITED_SHIFT 16 -#define DSPFW_SPRITED_MASK (0xff<<16) +#define DSPFW_SPRITED_MASK_VLV (0xff<<16) #define DSPFW_SPRITEC_WM1_SHIFT 8 #define DSPFW_SPRITEC_WM1_MASK (0xff<<8) #define DSPFW_SPRITEC_SHIFT 0 -#define DSPFW_SPRITEC_MASK (0xff<<0) +#define DSPFW_SPRITEC_MASK_VLV (0xff<<0) #define DSPFW8_CHV (VLV_DISPLAY_BASE + 0x700b8) #define DSPFW_SPRITEF_WM1_SHIFT 24 #define DSPFW_SPRITEF_WM1_MASK (0xff<<24) #define DSPFW_SPRITEF_SHIFT 16 -#define DSPFW_SPRITEF_MASK (0xff<<16) +#define DSPFW_SPRITEF_MASK_VLV (0xff<<16) #define DSPFW_SPRITEE_WM1_SHIFT 8 #define DSPFW_SPRITEE_WM1_MASK (0xff<<8) #define DSPFW_SPRITEE_SHIFT 0 -#define DSPFW_SPRITEE_MASK (0xff<<0) +#define DSPFW_SPRITEE_MASK_VLV (0xff<<0) #define DSPFW9_CHV (VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */ #define DSPFW_PLANEC_WM1_SHIFT 24 #define DSPFW_PLANEC_WM1_MASK (0xff<<24) #define DSPFW_PLANEC_SHIFT 16 -#define DSPFW_PLANEC_MASK (0xff<<16) +#define DSPFW_PLANEC_MASK_VLV (0xff<<16) #define DSPFW_CURSORC_WM1_SHIFT 8 #define DSPFW_CURSORC_WM1_MASK (0x3f<<16) #define DSPFW_CURSORC_SHIFT 0 @@ -4111,7 +4197,7 @@ enum punit_power_well { /* vlv/chv high order bits */ #define DSPHOWM (VLV_DISPLAY_BASE + 0x70064) #define DSPFW_SR_HI_SHIFT 24 -#define DSPFW_SR_HI_MASK (1<<24) +#define DSPFW_SR_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */ #define DSPFW_SPRITEF_HI_SHIFT 23 #define DSPFW_SPRITEF_HI_MASK (1<<23) #define DSPFW_SPRITEE_HI_SHIFT 22 @@ -4132,7 +4218,7 @@ enum punit_power_well { #define DSPFW_PLANEA_HI_MASK (1<<0) #define DSPHOWM1 (VLV_DISPLAY_BASE + 0x70068) #define DSPFW_SR_WM1_HI_SHIFT 24 -#define DSPFW_SR_WM1_HI_MASK (1<<24) +#define DSPFW_SR_WM1_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */ #define DSPFW_SPRITEF_WM1_HI_SHIFT 23 #define DSPFW_SPRITEF_WM1_HI_MASK (1<<23) #define DSPFW_SPRITEE_WM1_HI_SHIFT 22 @@ -4153,21 +4239,17 @@ enum punit_power_well { #define DSPFW_PLANEA_WM1_HI_MASK (1<<0) /* drain latency register values*/ -#define DRAIN_LATENCY_PRECISION_16 16 -#define DRAIN_LATENCY_PRECISION_32 32 -#define DRAIN_LATENCY_PRECISION_64 64 #define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe)) -#define DDL_CURSOR_PRECISION_HIGH (1<<31) -#define DDL_CURSOR_PRECISION_LOW (0<<31) #define DDL_CURSOR_SHIFT 24 -#define DDL_SPRITE_PRECISION_HIGH(sprite) (1<<(15+8*(sprite))) -#define DDL_SPRITE_PRECISION_LOW(sprite) (0<<(15+8*(sprite))) #define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite)) -#define DDL_PLANE_PRECISION_HIGH (1<<7) -#define DDL_PLANE_PRECISION_LOW (0<<7) #define DDL_PLANE_SHIFT 0 +#define DDL_PRECISION_HIGH (1<<7) +#define DDL_PRECISION_LOW (0<<7) #define DRAIN_LATENCY_MASK 0x7f +#define CBR1_VLV (VLV_DISPLAY_BASE + 0x70400) +#define CBR_PND_DEADLINE_DISABLE (1<<31) + /* FIFO watermark sizes etc */ #define G4X_FIFO_LINE_SIZE 64 #define I915_FIFO_LINE_SIZE 64 @@ -5221,14 +5303,22 @@ enum punit_power_well { #define HSW_NDE_RSTWRN_OPT 0x46408 #define RESET_PCH_HANDSHAKE_ENABLE (1<<4) +#define FF_SLICE_CS_CHICKEN2 0x02e4 +#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8) + /* GEN7 chicken */ #define GEN7_COMMON_SLICE_CHICKEN1 0x7010 # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) +# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14) #define COMMON_SLICE_CHICKEN2 0x7014 # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) -#define HIZ_CHICKEN 0x7018 -# define CHV_HZ_8X8_MODE_IN_1X (1<<15) +#define HIZ_CHICKEN 0x7018 +# define CHV_HZ_8X8_MODE_IN_1X (1<<15) +# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE (1<<3) + +#define GEN9_SLICE_COMMON_ECO_CHICKEN0 0x7308 +#define DISABLE_PIXEL_MASK_CAMMING (1<<14) #define GEN7_L3SQCREG1 0xB010 #define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 @@ -5245,11 +5335,16 @@ enum punit_power_well { #define GEN7_L3SQCREG4 0xb034 #define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27) +#define GEN8_L3SQCREG4 0xb118 +#define GEN8_LQSC_RO_PERF_DIS (1<<27) + /* GEN8 chicken */ #define HDC_CHICKEN0 0x7300 -#define HDC_FORCE_NON_COHERENT (1<<4) -#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11) #define HDC_FENCE_DEST_SLM_DISABLE (1<<14) +#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11) +#define HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT (1<<5) +#define HDC_FORCE_NON_COHERENT (1<<4) +#define HDC_BARRIER_PERFORMANCE_DISABLE (1<<10) /* WaCatErrorRejectionIssue */ #define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 @@ -5258,6 +5353,9 @@ enum punit_power_well { #define HSW_SCRATCH1 0xb038 #define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27) +#define BDW_SCRATCH1 0xb11c +#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1<<2) + /* PCH */ /* south display engine interrupt: IBX */ @@ -5976,10 +6074,13 @@ enum punit_power_well { #define GTFIFOCTL 0x120008 #define GT_FIFO_FREE_ENTRIES_MASK 0x7f #define GT_FIFO_NUM_RESERVED_ENTRIES 20 +#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12) +#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11) #define HSW_IDICR 0x9008 #define IDIHASHMSK(x) (((x) & 0x3f) << 16) #define HSW_EDRAM_PRESENT 0x120010 +#define EDRAM_ENABLED 0x1 #define GEN6_UCGCTL1 0x9400 # define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16) @@ -6003,6 +6104,7 @@ enum punit_power_well { #define GEN6_RSTCTL 0x9420 #define GEN8_UCGCTL6 0x9430 +#define GEN8_GAPSUNIT_CLOCK_GATE_DISABLE (1<<24) #define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1<<14) #define GEN6_GFXPAUSE 0xA000 @@ -6010,6 +6112,7 @@ enum punit_power_well { #define GEN6_TURBO_DISABLE (1<<31) #define GEN6_FREQUENCY(x) ((x)<<25) #define HSW_FREQUENCY(x) ((x)<<24) +#define GEN9_FREQUENCY(x) ((x)<<23) #define GEN6_OFFSET(x) ((x)<<19) #define GEN6_AGGRESSIVE_TURBO (0<<15) #define GEN6_RC_VIDEO_FREQ 0xA00C @@ -6028,8 +6131,10 @@ enum punit_power_well { #define GEN6_RPSTAT1 0xA01C #define GEN6_CAGF_SHIFT 8 #define HSW_CAGF_SHIFT 7 +#define GEN9_CAGF_SHIFT 23 #define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) #define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT) +#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT) #define GEN6_RP_CONTROL 0xA024 #define GEN6_RP_MEDIA_TURBO (1<<11) #define GEN6_RP_MEDIA_MODE_MASK (3<<9) @@ -6120,8 +6225,8 @@ enum punit_power_well { #define GEN6_GT_GFX_RC6p 0x13810C #define GEN6_GT_GFX_RC6pp 0x138110 -#define VLV_RENDER_C0_COUNT_REG 0x138118 -#define VLV_MEDIA_C0_COUNT_REG 0x13811C +#define VLV_RENDER_C0_COUNT 0x138118 +#define VLV_MEDIA_C0_COUNT 0x13811C #define GEN6_PCODE_MAILBOX 0x138124 #define GEN6_PCODE_READY (1<<31) @@ -6155,6 +6260,37 @@ enum punit_power_well { #define GEN6_RC6 3 #define GEN6_RC7 4 +#define CHV_POWER_SS0_SIG1 0xa720 +#define CHV_POWER_SS1_SIG1 0xa728 +#define CHV_SS_PG_ENABLE (1<<1) +#define CHV_EU08_PG_ENABLE (1<<9) +#define CHV_EU19_PG_ENABLE (1<<17) +#define CHV_EU210_PG_ENABLE (1<<25) + +#define CHV_POWER_SS0_SIG2 0xa724 +#define CHV_POWER_SS1_SIG2 0xa72c +#define CHV_EU311_PG_ENABLE (1<<1) + +#define GEN9_SLICE0_PGCTL_ACK 0x804c +#define GEN9_SLICE1_PGCTL_ACK 0x8050 +#define GEN9_SLICE2_PGCTL_ACK 0x8054 +#define GEN9_PGCTL_SLICE_ACK (1 << 0) + +#define GEN9_SLICE0_SS01_EU_PGCTL_ACK 0x805c +#define GEN9_SLICE0_SS23_EU_PGCTL_ACK 0x8060 +#define GEN9_SLICE1_SS01_EU_PGCTL_ACK 0x8064 +#define GEN9_SLICE1_SS23_EU_PGCTL_ACK 0x8068 +#define GEN9_SLICE2_SS01_EU_PGCTL_ACK 0x806c +#define GEN9_SLICE2_SS23_EU_PGCTL_ACK 0x8070 +#define GEN9_PGCTL_SSA_EU08_ACK (1 << 0) +#define GEN9_PGCTL_SSA_EU19_ACK (1 << 2) +#define GEN9_PGCTL_SSA_EU210_ACK (1 << 4) +#define GEN9_PGCTL_SSA_EU311_ACK (1 << 6) +#define GEN9_PGCTL_SSB_EU08_ACK (1 << 8) +#define GEN9_PGCTL_SSB_EU19_ACK (1 << 10) +#define GEN9_PGCTL_SSB_EU210_ACK (1 << 12) +#define GEN9_PGCTL_SSB_EU311_ACK (1 << 14) + #define GEN7_MISCCPCTL (0x9424) #define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0) @@ -6185,6 +6321,7 @@ enum punit_power_well { #define GEN9_HALF_SLICE_CHICKEN5 0xe188 #define GEN9_DG_MIRROR_FIX_ENABLE (1<<5) +#define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3) #define GEN8_ROW_CHICKEN 0xe4f0 #define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8) @@ -6200,8 +6337,12 @@ enum punit_power_well { #define HALF_SLICE_CHICKEN3 0xe184 #define HSW_SAMPLE_C_PERFORMANCE (1<<9) #define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8) +#define GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1<<5) #define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1) +#define GEN9_HALF_SLICE_CHICKEN7 0xe194 +#define GEN9_ENABLE_YV12_BUGFIX (1<<4) + /* Audio */ #define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020) #define INTEL_AUDIO_DEVCL 0x808629FB @@ -6351,6 +6492,13 @@ enum punit_power_well { #define HSW_PWR_WELL_FORCE_ON (1<<19) #define HSW_PWR_WELL_CTL6 0x45414 +/* SKL Fuse Status */ +#define SKL_FUSE_STATUS 0x42000 +#define SKL_FUSE_DOWNLOAD_STATUS (1<<31) +#define SKL_FUSE_PG0_DIST_STATUS (1<<27) +#define SKL_FUSE_PG1_DIST_STATUS (1<<26) +#define SKL_FUSE_PG2_DIST_STATUS (1<<25) + /* Per-pipe DDI Function Control */ #define TRANS_DDI_FUNC_CTL_A 0x60400 #define TRANS_DDI_FUNC_CTL_B 0x61400 diff --git a/sys/dev/drm/i915/i915_suspend.c b/sys/dev/drm/i915/i915_suspend.c index 9f19ed38cd..cf67f82f7b 100644 --- a/sys/dev/drm/i915/i915_suspend.c +++ b/sys/dev/drm/i915/i915_suspend.c @@ -29,166 +29,6 @@ #include "intel_drv.h" #include "i915_reg.h" -static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - I915_WRITE8(index_port, reg); - return I915_READ8(data_port); -} - -static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - I915_READ8(st01); - I915_WRITE8(VGA_AR_INDEX, palette_enable | reg); - return I915_READ8(VGA_AR_DATA_READ); -} - -static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - I915_READ8(st01); - I915_WRITE8(VGA_AR_INDEX, palette_enable | reg); - I915_WRITE8(VGA_AR_DATA_WRITE, val); -} - -static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - I915_WRITE8(index_port, reg); - I915_WRITE8(data_port, val); -} - -static void i915_save_vga(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int i; - u16 cr_index, cr_data, st01; - - /* VGA state */ - dev_priv->regfile.saveVGA0 = I915_READ(VGA0); - dev_priv->regfile.saveVGA1 = I915_READ(VGA1); - dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD); - dev_priv->regfile.saveVGACNTRL = I915_READ(i915_vgacntrl_reg(dev)); - - /* VGA color palette registers */ - dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK); - - /* MSR bits */ - dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ); - if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) { - cr_index = VGA_CR_INDEX_CGA; - cr_data = VGA_CR_DATA_CGA; - st01 = VGA_ST01_CGA; - } else { - cr_index = VGA_CR_INDEX_MDA; - cr_data = VGA_CR_DATA_MDA; - st01 = VGA_ST01_MDA; - } - - /* CRT controller regs */ - i915_write_indexed(dev, cr_index, cr_data, 0x11, - i915_read_indexed(dev, cr_index, cr_data, 0x11) & - (~0x80)); - for (i = 0; i <= 0x24; i++) - dev_priv->regfile.saveCR[i] = - i915_read_indexed(dev, cr_index, cr_data, i); - /* Make sure we don't turn off CR group 0 writes */ - dev_priv->regfile.saveCR[0x11] &= ~0x80; - - /* Attribute controller registers */ - I915_READ8(st01); - dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX); - for (i = 0; i <= 0x14; i++) - dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0); - I915_READ8(st01); - I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX); - I915_READ8(st01); - - /* Graphics controller registers */ - for (i = 0; i < 9; i++) - dev_priv->regfile.saveGR[i] = - i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i); - - dev_priv->regfile.saveGR[0x10] = - i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10); - dev_priv->regfile.saveGR[0x11] = - i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11); - dev_priv->regfile.saveGR[0x18] = - i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18); - - /* Sequencer registers */ - for (i = 0; i < 8; i++) - dev_priv->regfile.saveSR[i] = - i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i); -} - -static void i915_restore_vga(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int i; - u16 cr_index, cr_data, st01; - - /* VGA state */ - I915_WRITE(i915_vgacntrl_reg(dev), dev_priv->regfile.saveVGACNTRL); - - I915_WRITE(VGA0, dev_priv->regfile.saveVGA0); - I915_WRITE(VGA1, dev_priv->regfile.saveVGA1); - I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD); - POSTING_READ(VGA_PD); - udelay(150); - - /* MSR bits */ - I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR); - if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) { - cr_index = VGA_CR_INDEX_CGA; - cr_data = VGA_CR_DATA_CGA; - st01 = VGA_ST01_CGA; - } else { - cr_index = VGA_CR_INDEX_MDA; - cr_data = VGA_CR_DATA_MDA; - st01 = VGA_ST01_MDA; - } - - /* Sequencer registers, don't write SR07 */ - for (i = 0; i < 7; i++) - i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i, - dev_priv->regfile.saveSR[i]); - - /* CRT controller regs */ - /* Enable CR group 0 writes */ - i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]); - for (i = 0; i <= 0x24; i++) - i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]); - - /* Graphics controller regs */ - for (i = 0; i < 9; i++) - i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i, - dev_priv->regfile.saveGR[i]); - - i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10, - dev_priv->regfile.saveGR[0x10]); - i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11, - dev_priv->regfile.saveGR[0x11]); - i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18, - dev_priv->regfile.saveGR[0x18]); - - /* Attribute controller registers */ - I915_READ8(st01); /* switch back to index mode */ - for (i = 0; i <= 0x14; i++) - i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0); - I915_READ8(st01); /* switch back to index mode */ - I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20); - I915_READ8(st01); - - /* VGA color palette registers */ - I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK); -} - static void i915_save_display(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -197,11 +37,6 @@ static void i915_save_display(struct drm_device *dev) if (INTEL_INFO(dev)->gen <= 4) dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); - /* This is only meaningful in non-KMS mode */ - /* Don't regfile.save them in KMS mode */ - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - i915_save_display_reg(dev); - /* LVDS state */ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); @@ -224,9 +59,6 @@ static void i915_save_display(struct drm_device *dev) /* save FBC interval */ if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL); - - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - i915_save_vga(dev); } static void i915_restore_display(struct drm_device *dev) @@ -238,11 +70,7 @@ static void i915_restore_display(struct drm_device *dev) if (INTEL_INFO(dev)->gen <= 4) I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - i915_restore_display_reg(dev); - - if (drm_core_check_feature(dev, DRIVER_MODESET)) - mask = ~LVDS_PORT_EN; + mask = ~LVDS_PORT_EN; /* LVDS state */ if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) @@ -270,10 +98,7 @@ static void i915_restore_display(struct drm_device *dev) if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev)) I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - i915_restore_vga(dev); - else - i915_redisable_vga(dev); + i915_redisable_vga(dev); } int i915_save_state(struct drm_device *dev) @@ -285,24 +110,6 @@ int i915_save_state(struct drm_device *dev) i915_save_display(dev); - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - /* Interrupt state */ - if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.saveDEIER = I915_READ(DEIER); - dev_priv->regfile.saveDEIMR = I915_READ(DEIMR); - dev_priv->regfile.saveGTIER = I915_READ(GTIER); - dev_priv->regfile.saveGTIMR = I915_READ(GTIMR); - dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); - dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); - dev_priv->regfile.saveMCHBAR_RENDER_STANDBY = - I915_READ(RSTDBYCTL); - dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); - } else { - dev_priv->regfile.saveIER = I915_READ(IER); - dev_priv->regfile.saveIMR = I915_READ(IMR); - } - } - if (IS_GEN4(dev)) pci_read_config_word(dev->pdev, GCDGMBUS, &dev_priv->regfile.saveGCDGMBUS); @@ -341,24 +148,6 @@ int i915_restore_state(struct drm_device *dev) dev_priv->regfile.saveGCDGMBUS); i915_restore_display(dev); - if (!drm_core_check_feature(dev, DRIVER_MODESET)) { - /* Interrupt state */ - if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(DEIER, dev_priv->regfile.saveDEIER); - I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR); - I915_WRITE(GTIER, dev_priv->regfile.saveGTIER); - I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR); - I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR); - I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR); - I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG); - I915_WRITE(RSTDBYCTL, - dev_priv->regfile.saveMCHBAR_RENDER_STANDBY); - } else { - I915_WRITE(IER, dev_priv->regfile.saveIER); - I915_WRITE(IMR, dev_priv->regfile.saveIMR); - } - } - /* Cache mode state */ if (INTEL_INFO(dev)->gen < 7) I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | diff --git a/sys/dev/drm/i915/i915_sysfs.c b/sys/dev/drm/i915/i915_sysfs.c new file mode 100644 index 0000000000..fe3b16da6e --- /dev/null +++ b/sys/dev/drm/i915/i915_sysfs.c @@ -0,0 +1,682 @@ +/* + * Copyright © 2012 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Ben Widawsky + * + */ + +#include +#include +#include +#include "intel_drv.h" +#include "i915_drv.h" + +#if 0 +#define dev_to_drm_minor(d) dev_get_drvdata((d)) + +#ifdef CONFIG_PM +static u32 calc_residency(struct drm_device *dev, const u32 reg) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u64 raw_time; /* 32b value may overflow during fixed point math */ + u64 units = 128ULL, div = 100000ULL, bias = 100ULL; + u32 ret; + + if (!intel_enable_rc6(dev)) + return 0; + + intel_runtime_pm_get(dev_priv); + + /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ + if (IS_VALLEYVIEW(dev)) { + u32 clk_reg, czcount_30ns; + + if (IS_CHERRYVIEW(dev)) + clk_reg = CHV_CLK_CTL1; + else + clk_reg = VLV_CLK_CTL2; + + czcount_30ns = I915_READ(clk_reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT; + + if (!czcount_30ns) { + WARN(!czcount_30ns, "bogus CZ count value"); + ret = 0; + goto out; + } + + units = 0; + div = 1000000ULL; + + if (IS_CHERRYVIEW(dev)) { + /* Special case for 320Mhz */ + if (czcount_30ns == 1) { + div = 10000000ULL; + units = 3125ULL; + } else { + /* chv counts are one less */ + czcount_30ns += 1; + } + } + + if (units == 0) + units = DIV_ROUND_UP_ULL(30ULL * bias, + (u64)czcount_30ns); + + if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) + units <<= 8; + + div = div * bias; + } + + raw_time = I915_READ(reg) * units; + ret = DIV_ROUND_UP_ULL(raw_time, div); + +out: + intel_runtime_pm_put(dev_priv); + return ret; +} + +static ssize_t +show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *dminor = dev_to_drm_minor(kdev); + return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev)); +} + +static ssize_t +show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *dminor = dev_get_drvdata(kdev); + u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6); + return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency); +} + +static ssize_t +show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *dminor = dev_to_drm_minor(kdev); + u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p); + return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency); +} + +static ssize_t +show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *dminor = dev_to_drm_minor(kdev); + u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp); + return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency); +} + +static ssize_t +show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *dminor = dev_get_drvdata(kdev); + u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6); + return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency); +} + +static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL); +static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL); +static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL); +static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL); +static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL); + +static struct attribute *rc6_attrs[] = { + &dev_attr_rc6_enable.attr, + &dev_attr_rc6_residency_ms.attr, + NULL +}; + +static struct attribute_group rc6_attr_group = { + .name = power_group_name, + .attrs = rc6_attrs +}; + +static struct attribute *rc6p_attrs[] = { + &dev_attr_rc6p_residency_ms.attr, + &dev_attr_rc6pp_residency_ms.attr, + NULL +}; + +static struct attribute_group rc6p_attr_group = { + .name = power_group_name, + .attrs = rc6p_attrs +}; + +static struct attribute *media_rc6_attrs[] = { + &dev_attr_media_rc6_residency_ms.attr, + NULL +}; + +static struct attribute_group media_rc6_attr_group = { + .name = power_group_name, + .attrs = media_rc6_attrs +}; +#endif + +static int l3_access_valid(struct drm_device *dev, loff_t offset) +{ + if (!HAS_L3_DPF(dev)) + return -EPERM; + + if (offset % 4 != 0) + return -EINVAL; + + if (offset >= GEN7_L3LOG_SIZE) + return -ENXIO; + + return 0; +} + +static ssize_t +i915_l3_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t offset, size_t count) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct drm_minor *dminor = dev_to_drm_minor(dev); + struct drm_device *drm_dev = dminor->dev; + struct drm_i915_private *dev_priv = drm_dev->dev_private; + int slice = (int)(uintptr_t)attr->private; + int ret; + + count = round_down(count, 4); + + ret = l3_access_valid(drm_dev, offset); + if (ret) + return ret; + + count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count); + + ret = i915_mutex_lock_interruptible(drm_dev); + if (ret) + return ret; + + if (dev_priv->l3_parity.remap_info[slice]) + memcpy(buf, + dev_priv->l3_parity.remap_info[slice] + (offset/4), + count); + else + memset(buf, 0, count); + + mutex_unlock(&drm_dev->struct_mutex); + + return count; +} + +static ssize_t +i915_l3_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t offset, size_t count) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct drm_minor *dminor = dev_to_drm_minor(dev); + struct drm_device *drm_dev = dminor->dev; + struct drm_i915_private *dev_priv = drm_dev->dev_private; + struct intel_context *ctx; + u32 *temp = NULL; /* Just here to make handling failures easy */ + int slice = (int)(uintptr_t)attr->private; + int ret; + + if (!HAS_HW_CONTEXTS(drm_dev)) + return -ENXIO; + + ret = l3_access_valid(drm_dev, offset); + if (ret) + return ret; + + ret = i915_mutex_lock_interruptible(drm_dev); + if (ret) + return ret; + + if (!dev_priv->l3_parity.remap_info[slice]) { + temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); + if (!temp) { + mutex_unlock(&drm_dev->struct_mutex); + return -ENOMEM; + } + } + + ret = i915_gpu_idle(drm_dev); + if (ret) { + kfree(temp); + mutex_unlock(&drm_dev->struct_mutex); + return ret; + } + + /* TODO: Ideally we really want a GPU reset here to make sure errors + * aren't propagated. Since I cannot find a stable way to reset the GPU + * at this point it is left as a TODO. + */ + if (temp) + dev_priv->l3_parity.remap_info[slice] = temp; + + memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count); + + /* NB: We defer the remapping until we switch to the context */ + list_for_each_entry(ctx, &dev_priv->context_list, link) + ctx->remap_slice |= (1<struct_mutex); + + return count; +} + +static struct bin_attribute dpf_attrs = { + .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)}, + .size = GEN7_L3LOG_SIZE, + .read = i915_l3_read, + .write = i915_l3_write, + .mmap = NULL, + .private = (void *)0 +}; + +static struct bin_attribute dpf_attrs_1 = { + .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)}, + .size = GEN7_L3LOG_SIZE, + .read = i915_l3_read, + .write = i915_l3_write, + .mmap = NULL, + .private = (void *)1 +}; + +static ssize_t gt_act_freq_mhz_show(struct device *kdev, + struct device_attribute *attr, char *buf) +{ + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + + intel_runtime_pm_get(dev_priv); + + mutex_lock(&dev_priv->rps.hw_lock); + if (IS_VALLEYVIEW(dev_priv->dev)) { + u32 freq; + freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); + ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff); + } else { + u32 rpstat = I915_READ(GEN6_RPSTAT1); + if (IS_GEN9(dev_priv)) + ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; + else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; + else + ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; + ret = intel_gpu_freq(dev_priv, ret); + } + mutex_unlock(&dev_priv->rps.hw_lock); + + intel_runtime_pm_put(dev_priv); + + return snprintf(buf, PAGE_SIZE, "%d\n", ret); +} + +static ssize_t gt_cur_freq_mhz_show(struct device *kdev, + struct device_attribute *attr, char *buf) +{ + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + + intel_runtime_pm_get(dev_priv); + + mutex_lock(&dev_priv->rps.hw_lock); + ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq); + mutex_unlock(&dev_priv->rps.hw_lock); + + intel_runtime_pm_put(dev_priv); + + return snprintf(buf, PAGE_SIZE, "%d\n", ret); +} + +static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, + struct device_attribute *attr, char *buf) +{ + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + return snprintf(buf, PAGE_SIZE, + "%d\n", + intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); +} + +static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + + mutex_lock(&dev_priv->rps.hw_lock); + ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit); + mutex_unlock(&dev_priv->rps.hw_lock); + + return snprintf(buf, PAGE_SIZE, "%d\n", ret); +} + +static ssize_t gt_max_freq_mhz_store(struct device *kdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val; + ssize_t ret; + + ret = kstrtou32(buf, 0, &val); + if (ret) + return ret; + + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + + mutex_lock(&dev_priv->rps.hw_lock); + + val = intel_freq_opcode(dev_priv, val); + + if (val < dev_priv->rps.min_freq || + val > dev_priv->rps.max_freq || + val < dev_priv->rps.min_freq_softlimit) { + mutex_unlock(&dev_priv->rps.hw_lock); + return -EINVAL; + } + + if (val > dev_priv->rps.rp0_freq) + DRM_DEBUG("User requested overclocking to %d\n", + intel_gpu_freq(dev_priv, val)); + + dev_priv->rps.max_freq_softlimit = val; + + val = clamp_t(int, dev_priv->rps.cur_freq, + dev_priv->rps.min_freq_softlimit, + dev_priv->rps.max_freq_softlimit); + + /* We still need *_set_rps to process the new max_delay and + * update the interrupt limits and PMINTRMSK even though + * frequency request may be unchanged. */ + intel_set_rps(dev, val); + + mutex_unlock(&dev_priv->rps.hw_lock); + + return count; +} + +static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + + mutex_lock(&dev_priv->rps.hw_lock); + ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit); + mutex_unlock(&dev_priv->rps.hw_lock); + + return snprintf(buf, PAGE_SIZE, "%d\n", ret); +} + +static ssize_t gt_min_freq_mhz_store(struct device *kdev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val; + ssize_t ret; + + ret = kstrtou32(buf, 0, &val); + if (ret) + return ret; + + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + + mutex_lock(&dev_priv->rps.hw_lock); + + val = intel_freq_opcode(dev_priv, val); + + if (val < dev_priv->rps.min_freq || + val > dev_priv->rps.max_freq || + val > dev_priv->rps.max_freq_softlimit) { + mutex_unlock(&dev_priv->rps.hw_lock); + return -EINVAL; + } + + dev_priv->rps.min_freq_softlimit = val; + + val = clamp_t(int, dev_priv->rps.cur_freq, + dev_priv->rps.min_freq_softlimit, + dev_priv->rps.max_freq_softlimit); + + /* We still need *_set_rps to process the new min_delay and + * update the interrupt limits and PMINTRMSK even though + * frequency request may be unchanged. */ + intel_set_rps(dev, val); + + mutex_unlock(&dev_priv->rps.hw_lock); + + return count; + +} + +static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL); +static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL); +static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); +static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); + +static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL); + +static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); +static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); +static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); +static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); + +/* For now we have a static number of RP states */ +static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) +{ + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 val; + + if (attr == &dev_attr_gt_RP0_freq_mhz) + val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq); + else if (attr == &dev_attr_gt_RP1_freq_mhz) + val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq); + else if (attr == &dev_attr_gt_RPn_freq_mhz) + val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq); + else + BUG(); + + return snprintf(buf, PAGE_SIZE, "%d\n", val); +} + +static const struct attribute *gen6_attrs[] = { + &dev_attr_gt_act_freq_mhz.attr, + &dev_attr_gt_cur_freq_mhz.attr, + &dev_attr_gt_max_freq_mhz.attr, + &dev_attr_gt_min_freq_mhz.attr, + &dev_attr_gt_RP0_freq_mhz.attr, + &dev_attr_gt_RP1_freq_mhz.attr, + &dev_attr_gt_RPn_freq_mhz.attr, + NULL, +}; + +static const struct attribute *vlv_attrs[] = { + &dev_attr_gt_act_freq_mhz.attr, + &dev_attr_gt_cur_freq_mhz.attr, + &dev_attr_gt_max_freq_mhz.attr, + &dev_attr_gt_min_freq_mhz.attr, + &dev_attr_gt_RP0_freq_mhz.attr, + &dev_attr_gt_RP1_freq_mhz.attr, + &dev_attr_gt_RPn_freq_mhz.attr, + &dev_attr_vlv_rpe_freq_mhz.attr, + NULL, +}; + +static ssize_t error_state_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + + struct device *kdev = container_of(kobj, struct device, kobj); + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_device *dev = minor->dev; + struct i915_error_state_file_priv error_priv; + struct drm_i915_error_state_buf error_str; + ssize_t ret_count = 0; + int ret; + + memset(&error_priv, 0, sizeof(error_priv)); + + ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off); + if (ret) + return ret; + + error_priv.dev = dev; + i915_error_state_get(dev, &error_priv); + + ret = i915_error_state_to_str(&error_str, &error_priv); + if (ret) + goto out; + + ret_count = count < error_str.bytes ? count : error_str.bytes; + + memcpy(buf, error_str.buf, ret_count); +out: + i915_error_state_put(&error_priv); + i915_error_state_buf_release(&error_str); + + return ret ?: ret_count; +} + +static ssize_t error_state_write(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + struct device *kdev = container_of(kobj, struct device, kobj); + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_device *dev = minor->dev; + int ret; + + DRM_DEBUG_DRIVER("Resetting error state\n"); + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; + + i915_destroy_error_state(dev); + mutex_unlock(&dev->struct_mutex); + + return count; +} + +static struct bin_attribute error_state_attr = { + .attr.name = "error", + .attr.mode = S_IRUSR | S_IWUSR, + .size = 0, + .read = error_state_read, + .write = error_state_write, +}; +#endif + +void i915_setup_sysfs(struct drm_device *dev) +{ +#if 0 + int ret; + +#ifdef CONFIG_PM + if (HAS_RC6(dev)) { + ret = sysfs_merge_group(&dev->primary->kdev->kobj, + &rc6_attr_group); + if (ret) + DRM_ERROR("RC6 residency sysfs setup failed\n"); + } + if (HAS_RC6p(dev)) { + ret = sysfs_merge_group(&dev->primary->kdev->kobj, + &rc6p_attr_group); + if (ret) + DRM_ERROR("RC6p residency sysfs setup failed\n"); + } + if (IS_VALLEYVIEW(dev)) { + ret = sysfs_merge_group(&dev->primary->kdev->kobj, + &media_rc6_attr_group); + if (ret) + DRM_ERROR("Media RC6 residency sysfs setup failed\n"); + } +#endif + if (HAS_L3_DPF(dev)) { + ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs); + if (ret) + DRM_ERROR("l3 parity sysfs setup failed\n"); + + if (NUM_L3_SLICES(dev) > 1) { + ret = device_create_bin_file(dev->primary->kdev, + &dpf_attrs_1); + if (ret) + DRM_ERROR("l3 parity slice 1 setup failed\n"); + } + } + + ret = 0; + if (IS_VALLEYVIEW(dev)) + ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs); + else if (INTEL_INFO(dev)->gen >= 6) + ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs); + if (ret) + DRM_ERROR("RPS sysfs setup failed\n"); + + ret = sysfs_create_bin_file(&dev->primary->kdev->kobj, + &error_state_attr); + if (ret) + DRM_ERROR("error_state sysfs setup failed\n"); +#endif +} + +void i915_teardown_sysfs(struct drm_device *dev) +{ +#if 0 + sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr); + if (IS_VALLEYVIEW(dev)) + sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs); + else + sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs); + device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1); + device_remove_bin_file(dev->primary->kdev, &dpf_attrs); +#ifdef CONFIG_PM + sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group); + sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group); +#endif +#endif +} diff --git a/sys/dev/drm/i915/i915_trace.h b/sys/dev/drm/i915/i915_trace.h index 5acb61c903..2e3329fb90 100644 --- a/sys/dev/drm/i915/i915_trace.h +++ b/sys/dev/drm/i915/i915_trace.h @@ -83,4 +83,9 @@ trace_i915_gem_object_change_domain(struct drm_i915_gem_object *obj, u32 read, u #define trace_i915_ppgtt_create(base) #define trace_i915_ppgtt_release(base) +#define trace_i915_page_table_entry_alloc(a,b,c,d) +#define trace_i915_page_table_entry_map(a,b,c,d,e,f) + +#define trace_i915_va_alloc(a,b,c,d) + #endif /* _I915_TRACE_H_ */ diff --git a/sys/dev/drm/i915/i915_ums.c b/sys/dev/drm/i915/i915_ums.c deleted file mode 100644 index e62f9071b2..0000000000 --- a/sys/dev/drm/i915/i915_ums.c +++ /dev/null @@ -1,552 +0,0 @@ -/* - * - * Copyright 2008 (c) Intel Corporation - * Jesse Barnes - * Copyright 2013 (c) Intel Corporation - * Daniel Vetter - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. - * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include -#include "intel_drv.h" -#include "i915_reg.h" - -static bool i915_pipe_enabled(struct drm_device *dev, enum i915_pipe pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - u32 dpll_reg; - - /* On IVB, 3rd pipe shares PLL with another one */ - if (pipe > 1) - return false; - - if (HAS_PCH_SPLIT(dev)) - dpll_reg = PCH_DPLL(pipe); - else - dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B; - - return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE); -} - -static void i915_save_palette(struct drm_device *dev, enum i915_pipe pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); - u32 *array; - int i; - - if (!i915_pipe_enabled(dev, pipe)) - return; - - if (HAS_PCH_SPLIT(dev)) - reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; - - if (pipe == PIPE_A) - array = dev_priv->regfile.save_palette_a; - else - array = dev_priv->regfile.save_palette_b; - - for (i = 0; i < 256; i++) - array[i] = I915_READ(reg + (i << 2)); -} - -static void i915_restore_palette(struct drm_device *dev, enum i915_pipe pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B); - u32 *array; - int i; - - if (!i915_pipe_enabled(dev, pipe)) - return; - - if (HAS_PCH_SPLIT(dev)) - reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; - - if (pipe == PIPE_A) - array = dev_priv->regfile.save_palette_a; - else - array = dev_priv->regfile.save_palette_b; - - for (i = 0; i < 256; i++) - I915_WRITE(reg + (i << 2), array[i]); -} - -void i915_save_display_reg(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int i; - - /* Cursor state */ - dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR); - dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS); - dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE); - dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR); - dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS); - dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE); - if (IS_GEN2(dev)) - dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE); - - if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); - dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); - } - - /* Pipe & plane A info */ - dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF); - dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC); - if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0); - dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1); - dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A); - } else { - dev_priv->regfile.saveFPA0 = I915_READ(_FPA0); - dev_priv->regfile.saveFPA1 = I915_READ(_FPA1); - dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A); - } - if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) - dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD); - dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A); - dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A); - dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A); - dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A); - dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A); - dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A); - if (!HAS_PCH_SPLIT(dev)) - dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A); - - if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); - dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); - dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); - dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); - - dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); - dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); - - dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1); - dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); - dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); - - dev_priv->regfile.saveTRANSACONF = I915_READ(_PCH_TRANSACONF); - dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_PCH_TRANS_HTOTAL_A); - dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_PCH_TRANS_HBLANK_A); - dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_PCH_TRANS_HSYNC_A); - dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_PCH_TRANS_VTOTAL_A); - dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_PCH_TRANS_VBLANK_A); - dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_PCH_TRANS_VSYNC_A); - } - - dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR); - dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE); - dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE); - dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS); - dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR); - if (INTEL_INFO(dev)->gen >= 4) { - dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF); - dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF); - } - i915_save_palette(dev, PIPE_A); - dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT); - - /* Pipe & plane B info */ - dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF); - dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC); - if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0); - dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1); - dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B); - } else { - dev_priv->regfile.saveFPB0 = I915_READ(_FPB0); - dev_priv->regfile.saveFPB1 = I915_READ(_FPB1); - dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B); - } - if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) - dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD); - dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B); - dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B); - dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B); - dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B); - dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B); - dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B); - if (!HAS_PCH_SPLIT(dev)) - dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B); - - if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); - dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); - dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); - dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); - - dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); - dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); - - dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1); - dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); - dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); - - dev_priv->regfile.saveTRANSBCONF = I915_READ(_PCH_TRANSBCONF); - dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_PCH_TRANS_HTOTAL_B); - dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_PCH_TRANS_HBLANK_B); - dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_PCH_TRANS_HSYNC_B); - dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_PCH_TRANS_VTOTAL_B); - dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_PCH_TRANS_VBLANK_B); - dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_PCH_TRANS_VSYNC_B); - } - - dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR); - dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); - dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE); - dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS); - dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR); - if (INTEL_INFO(dev)->gen >= 4) { - dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF); - dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); - } - i915_save_palette(dev, PIPE_B); - dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT); - - /* Fences */ - switch (INTEL_INFO(dev)->gen) { - case 7: - case 6: - for (i = 0; i < 16; i++) - dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); - break; - case 5: - case 4: - for (i = 0; i < 16; i++) - dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); - break; - case 3: - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) - for (i = 0; i < 8; i++) - dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); - case 2: - for (i = 0; i < 8; i++) - dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); - break; - } - - /* CRT state */ - if (HAS_PCH_SPLIT(dev)) - dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA); - else - dev_priv->regfile.saveADPA = I915_READ(ADPA); - - /* Display Port state */ - if (SUPPORTS_INTEGRATED_DP(dev)) { - dev_priv->regfile.saveDP_B = I915_READ(DP_B); - dev_priv->regfile.saveDP_C = I915_READ(DP_C); - dev_priv->regfile.saveDP_D = I915_READ(DP_D); - dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_DATA_M_G4X); - dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_DATA_M_G4X); - dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_DATA_N_G4X); - dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_DATA_N_G4X); - dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_LINK_M_G4X); - dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_LINK_M_G4X); - dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_LINK_N_G4X); - dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_LINK_N_G4X); - } - /* FIXME: regfile.save TV & SDVO state */ - - /* Panel fitter */ - if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL); - dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); - } - - /* Backlight */ - if (INTEL_INFO(dev)->gen <= 4) - pci_read_config_byte(dev->pdev, PCI_LBPC, - &dev_priv->regfile.saveLBB); - - if (HAS_PCH_SPLIT(dev)) { - dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); - dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); - dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); - dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); - } else { - dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); - if (INTEL_INFO(dev)->gen >= 4) - dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); - dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); - } - - return; -} - -void i915_restore_display_reg(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int dpll_a_reg, fpa0_reg, fpa1_reg; - int dpll_b_reg, fpb0_reg, fpb1_reg; - int i; - - /* Backlight */ - if (INTEL_INFO(dev)->gen <= 4) - pci_write_config_byte(dev->pdev, PCI_LBPC, - dev_priv->regfile.saveLBB); - - if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL); - I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); - /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2; - * otherwise we get blank eDP screen after S3 on some machines - */ - I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2); - I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL); - } else { - if (INTEL_INFO(dev)->gen >= 4) - I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); - I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL); - I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL); - } - - /* Panel fitter */ - if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) { - I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS); - I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL); - } - - /* Display port ratios (must be done before clock is set) */ - if (SUPPORTS_INTEGRATED_DP(dev)) { - I915_WRITE(_PIPEA_DATA_M_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_M); - I915_WRITE(_PIPEB_DATA_M_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_M); - I915_WRITE(_PIPEA_DATA_N_G4X, dev_priv->regfile.savePIPEA_GMCH_DATA_N); - I915_WRITE(_PIPEB_DATA_N_G4X, dev_priv->regfile.savePIPEB_GMCH_DATA_N); - I915_WRITE(_PIPEA_LINK_M_G4X, dev_priv->regfile.savePIPEA_DP_LINK_M); - I915_WRITE(_PIPEB_LINK_M_G4X, dev_priv->regfile.savePIPEB_DP_LINK_M); - I915_WRITE(_PIPEA_LINK_N_G4X, dev_priv->regfile.savePIPEA_DP_LINK_N); - I915_WRITE(_PIPEB_LINK_N_G4X, dev_priv->regfile.savePIPEB_DP_LINK_N); - } - - /* Fences */ - switch (INTEL_INFO(dev)->gen) { - case 7: - case 6: - for (i = 0; i < 16; i++) - I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); - break; - case 5: - case 4: - for (i = 0; i < 16; i++) - I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]); - break; - case 3: - case 2: - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) - for (i = 0; i < 8; i++) - I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]); - for (i = 0; i < 8; i++) - I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]); - break; - } - - - if (HAS_PCH_SPLIT(dev)) { - dpll_a_reg = _PCH_DPLL_A; - dpll_b_reg = _PCH_DPLL_B; - fpa0_reg = _PCH_FPA0; - fpb0_reg = _PCH_FPB0; - fpa1_reg = _PCH_FPA1; - fpb1_reg = _PCH_FPB1; - } else { - dpll_a_reg = _DPLL_A; - dpll_b_reg = _DPLL_B; - fpa0_reg = _FPA0; - fpb0_reg = _FPB0; - fpa1_reg = _FPA1; - fpb1_reg = _FPB1; - } - - if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL); - I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL); - } - - /* Pipe & plane A info */ - /* Prime the clock */ - if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) { - I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A & - ~DPLL_VCO_ENABLE); - POSTING_READ(dpll_a_reg); - udelay(150); - } - I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0); - I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1); - /* Actually enable it */ - I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A); - POSTING_READ(dpll_a_reg); - udelay(150); - if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { - I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD); - POSTING_READ(_DPLL_A_MD); - } - udelay(150); - - /* Restore mode */ - I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A); - I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A); - I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A); - I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A); - I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A); - I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A); - if (!HAS_PCH_SPLIT(dev)) - I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A); - - if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1); - I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1); - I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1); - I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1); - - I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL); - I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL); - - I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1); - I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ); - I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS); - - I915_WRITE(_PCH_TRANSACONF, dev_priv->regfile.saveTRANSACONF); - I915_WRITE(_PCH_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A); - I915_WRITE(_PCH_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A); - I915_WRITE(_PCH_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A); - I915_WRITE(_PCH_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A); - I915_WRITE(_PCH_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A); - I915_WRITE(_PCH_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A); - } - - /* Restore plane info */ - I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE); - I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS); - I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC); - I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR); - I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE); - if (INTEL_INFO(dev)->gen >= 4) { - I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF); - I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF); - } - - I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF); - - i915_restore_palette(dev, PIPE_A); - /* Enable the plane */ - I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR); - I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR)); - - /* Pipe & plane B info */ - if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) { - I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B & - ~DPLL_VCO_ENABLE); - POSTING_READ(dpll_b_reg); - udelay(150); - } - I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0); - I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1); - /* Actually enable it */ - I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B); - POSTING_READ(dpll_b_reg); - udelay(150); - if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { - I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD); - POSTING_READ(_DPLL_B_MD); - } - udelay(150); - - /* Restore mode */ - I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B); - I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B); - I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B); - I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B); - I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B); - I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B); - if (!HAS_PCH_SPLIT(dev)) - I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B); - - if (HAS_PCH_SPLIT(dev)) { - I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1); - I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1); - I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1); - I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1); - - I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL); - I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL); - - I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1); - I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ); - I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS); - - I915_WRITE(_PCH_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF); - I915_WRITE(_PCH_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B); - I915_WRITE(_PCH_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B); - I915_WRITE(_PCH_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B); - I915_WRITE(_PCH_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B); - I915_WRITE(_PCH_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B); - I915_WRITE(_PCH_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B); - } - - /* Restore plane info */ - I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE); - I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS); - I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC); - I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR); - I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE); - if (INTEL_INFO(dev)->gen >= 4) { - I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF); - I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF); - } - - I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF); - - i915_restore_palette(dev, PIPE_B); - /* Enable the plane */ - I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR); - I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR)); - - /* Cursor state */ - I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS); - I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR); - I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE); - I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS); - I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR); - I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE); - if (IS_GEN2(dev)) - I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE); - - /* CRT state */ - if (HAS_PCH_SPLIT(dev)) - I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA); - else - I915_WRITE(ADPA, dev_priv->regfile.saveADPA); - - /* Display Port state */ - if (SUPPORTS_INTEGRATED_DP(dev)) { - I915_WRITE(DP_B, dev_priv->regfile.saveDP_B); - I915_WRITE(DP_C, dev_priv->regfile.saveDP_C); - I915_WRITE(DP_D, dev_priv->regfile.saveDP_D); - } - /* FIXME: restore TV & SDVO state */ - - return; -} diff --git a/sys/dev/drm/i915/i915_vgpu.c b/sys/dev/drm/i915/i915_vgpu.c new file mode 100644 index 0000000000..5eee75bff1 --- /dev/null +++ b/sys/dev/drm/i915/i915_vgpu.c @@ -0,0 +1,264 @@ +/* + * Copyright(c) 2011-2015 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "intel_drv.h" +#include "i915_vgpu.h" + +/** + * DOC: Intel GVT-g guest support + * + * Intel GVT-g is a graphics virtualization technology which shares the + * GPU among multiple virtual machines on a time-sharing basis. Each + * virtual machine is presented a virtual GPU (vGPU), which has equivalent + * features as the underlying physical GPU (pGPU), so i915 driver can run + * seamlessly in a virtual machine. This file provides vGPU specific + * optimizations when running in a virtual machine, to reduce the complexity + * of vGPU emulation and to improve the overall performance. + * + * A primary function introduced here is so-called "address space ballooning" + * technique. Intel GVT-g partitions global graphics memory among multiple VMs, + * so each VM can directly access a portion of the memory without hypervisor's + * intervention, e.g. filling textures or queuing commands. However with the + * partitioning an unmodified i915 driver would assume a smaller graphics + * memory starting from address ZERO, then requires vGPU emulation module to + * translate the graphics address between 'guest view' and 'host view', for + * all registers and command opcodes which contain a graphics memory address. + * To reduce the complexity, Intel GVT-g introduces "address space ballooning", + * by telling the exact partitioning knowledge to each guest i915 driver, which + * then reserves and prevents non-allocated portions from allocation. Thus vGPU + * emulation module only needs to scan and validate graphics addresses without + * complexity of address translation. + * + */ + +/** + * i915_check_vgpu - detect virtual GPU + * @dev: drm device * + * + * This function is called at the initialization stage, to detect whether + * running on a vGPU. + */ +void i915_check_vgpu(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + uint64_t magic; + uint32_t version; + + BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); + + if (!IS_HASWELL(dev)) + return; + + magic = readq(dev_priv->regs + vgtif_reg(magic)); + if (magic != VGT_MAGIC) + return; + + version = INTEL_VGT_IF_VERSION_ENCODE( + readw(dev_priv->regs + vgtif_reg(version_major)), + readw(dev_priv->regs + vgtif_reg(version_minor))); + if (version != INTEL_VGT_IF_VERSION) { + DRM_INFO("VGT interface version mismatch!\n"); + return; + } + + dev_priv->vgpu.active = true; + DRM_INFO("Virtual GPU for Intel GVT-g detected.\n"); +} + +struct _balloon_info_ { + /* + * There are up to 2 regions per mappable/unmappable graphic + * memory that might be ballooned. Here, index 0/1 is for mappable + * graphic memory, 2/3 for unmappable graphic memory. + */ + struct drm_mm_node space[4]; +}; + +static struct _balloon_info_ bl_info; + +/** + * intel_vgt_deballoon - deballoon reserved graphics address trunks + * + * This function is called to deallocate the ballooned-out graphic memory, when + * driver is unloaded or when ballooning fails. + */ +void intel_vgt_deballoon(void) +{ + int i; + + DRM_DEBUG("VGT deballoon.\n"); + + for (i = 0; i < 4; i++) { + if (bl_info.space[i].allocated) + drm_mm_remove_node(&bl_info.space[i]); + } + + memset(&bl_info, 0, sizeof(bl_info)); +} + +static int vgt_balloon_space(struct drm_mm *mm, + struct drm_mm_node *node, + unsigned long start, unsigned long end) +{ + unsigned long size = end - start; + + if (start == end) + return -EINVAL; + + DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n", + start, end, size / 1024); + + node->start = start; + node->size = size; + + return drm_mm_reserve_node(mm, node); +} + +/** + * intel_vgt_balloon - balloon out reserved graphics address trunks + * @dev: drm device + * + * This function is called at the initialization stage, to balloon out the + * graphic address space allocated to other vGPUs, by marking these spaces as + * reserved. The ballooning related knowledge(starting address and size of + * the mappable/unmappable graphic memory) is described in the vgt_if structure + * in a reserved mmio range. + * + * To give an example, the drawing below depicts one typical scenario after + * ballooning. Here the vGPU1 has 2 pieces of graphic address spaces ballooned + * out each for the mappable and the non-mappable part. From the vGPU1 point of + * view, the total size is the same as the physical one, with the start address + * of its graphic space being zero. Yet there are some portions ballooned out( + * the shadow part, which are marked as reserved by drm allocator). From the + * host point of view, the graphic address space is partitioned by multiple + * vGPUs in different VMs. + * + * vGPU1 view Host view + * 0 ------> +-----------+ +-----------+ + * ^ |///////////| | vGPU3 | + * | |///////////| +-----------+ + * | |///////////| | vGPU2 | + * | +-----------+ +-----------+ + * mappable GM | available | ==> | vGPU1 | + * | +-----------+ +-----------+ + * | |///////////| | | + * v |///////////| | Host | + * +=======+===========+ +===========+ + * ^ |///////////| | vGPU3 | + * | |///////////| +-----------+ + * | |///////////| | vGPU2 | + * | +-----------+ +-----------+ + * unmappable GM | available | ==> | vGPU1 | + * | +-----------+ +-----------+ + * | |///////////| | | + * | |///////////| | Host | + * v |///////////| | | + * total GM size ------> +-----------+ +-----------+ + * + * Returns: + * zero on success, non-zero if configuration invalid or ballooning failed + */ +int intel_vgt_balloon(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; + unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total; + + unsigned long mappable_base, mappable_size, mappable_end; + unsigned long unmappable_base, unmappable_size, unmappable_end; + int ret; + + mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base)); + mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size)); + unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base)); + unmappable_size = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.size)); + + mappable_end = mappable_base + mappable_size; + unmappable_end = unmappable_base + unmappable_size; + + DRM_INFO("VGT ballooning configuration:\n"); + DRM_INFO("Mappable graphic memory: base 0x%lx size %ldKiB\n", + mappable_base, mappable_size / 1024); + DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n", + unmappable_base, unmappable_size / 1024); + + if (mappable_base < ggtt_vm->start || + mappable_end > dev_priv->gtt.mappable_end || + unmappable_base < dev_priv->gtt.mappable_end || + unmappable_end > ggtt_vm_end) { + DRM_ERROR("Invalid ballooning configuration!\n"); + return -EINVAL; + } + + /* Unmappable graphic memory ballooning */ + if (unmappable_base > dev_priv->gtt.mappable_end) { + ret = vgt_balloon_space(&ggtt_vm->mm, + &bl_info.space[2], + dev_priv->gtt.mappable_end, + unmappable_base); + + if (ret) + goto err; + } + + /* + * No need to partition out the last physical page, + * because it is reserved to the guard page. + */ + if (unmappable_end < ggtt_vm_end - PAGE_SIZE) { + ret = vgt_balloon_space(&ggtt_vm->mm, + &bl_info.space[3], + unmappable_end, + ggtt_vm_end - PAGE_SIZE); + if (ret) + goto err; + } + + /* Mappable graphic memory ballooning */ + if (mappable_base > ggtt_vm->start) { + ret = vgt_balloon_space(&ggtt_vm->mm, + &bl_info.space[0], + ggtt_vm->start, mappable_base); + + if (ret) + goto err; + } + + if (mappable_end < dev_priv->gtt.mappable_end) { + ret = vgt_balloon_space(&ggtt_vm->mm, + &bl_info.space[1], + mappable_end, + dev_priv->gtt.mappable_end); + + if (ret) + goto err; + } + + DRM_INFO("VGT balloon successfully\n"); + return 0; + +err: + DRM_ERROR("VGT balloon fail\n"); + intel_vgt_deballoon(); + return ret; +} diff --git a/sys/dev/drm/i915/i915_vgpu.h b/sys/dev/drm/i915/i915_vgpu.h new file mode 100644 index 0000000000..97a88b5f6a --- /dev/null +++ b/sys/dev/drm/i915/i915_vgpu.h @@ -0,0 +1,91 @@ +/* + * Copyright(c) 2011-2015 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _I915_VGPU_H_ +#define _I915_VGPU_H_ + +/* The MMIO offset of the shared info between guest and host emulator */ +#define VGT_PVINFO_PAGE 0x78000 +#define VGT_PVINFO_SIZE 0x1000 + +/* + * The following structure pages are defined in GEN MMIO space + * for virtualization. (One page for now) + */ +#define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */ +#define VGT_VERSION_MAJOR 1 +#define VGT_VERSION_MINOR 0 + +#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor)) +#define INTEL_VGT_IF_VERSION \ + INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR) + +struct vgt_if { + uint64_t magic; /* VGT_MAGIC */ + uint16_t version_major; + uint16_t version_minor; + uint32_t vgt_id; /* ID of vGT instance */ + uint32_t rsv1[12]; /* pad to offset 0x40 */ + /* + * Data structure to describe the balooning info of resources. + * Each VM can only have one portion of continuous area for now. + * (May support scattered resource in future) + * (starting from offset 0x40) + */ + struct { + /* Aperture register balooning */ + struct { + uint32_t base; + uint32_t size; + } mappable_gmadr; /* aperture */ + /* GMADR register balooning */ + struct { + uint32_t base; + uint32_t size; + } nonmappable_gmadr; /* non aperture */ + /* allowed fence registers */ + uint32_t fence_num; + uint32_t rsv2[3]; + } avail_rs; /* available/assigned resource */ + uint32_t rsv3[0x200 - 24]; /* pad to half page */ + /* + * The bottom half page is for response from Gfx driver to hypervisor. + * Set to reserved fields temporarily by now. + */ + uint32_t rsv4; + uint32_t display_ready; /* ready for display owner switch */ + uint32_t rsv5[0x200 - 2]; /* pad to one page */ +} __packed; + +#define vgtif_reg(x) \ + (VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x) + +/* vGPU display status to be used by the host side */ +#define VGT_DRV_DISPLAY_NOT_READY 0 +#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */ + +extern void i915_check_vgpu(struct drm_device *dev); +extern int intel_vgt_balloon(struct drm_device *dev); +extern void intel_vgt_deballoon(void); + +#endif /* _I915_VGPU_H_ */ diff --git a/sys/dev/drm/i915/intel_atomic.c b/sys/dev/drm/i915/intel_atomic.c index 5014222722..80d5c2d9c3 100644 --- a/sys/dev/drm/i915/intel_atomic.c +++ b/sys/dev/drm/i915/intel_atomic.c @@ -134,9 +134,9 @@ int intel_atomic_commit(struct drm_device *dev, * FIXME: The proper sequence here will eventually be: * * drm_atomic_helper_swap_state(dev, state) - * drm_atomic_helper_commit_pre_planes(dev, state); + * drm_atomic_helper_commit_modeset_disables(dev, state); * drm_atomic_helper_commit_planes(dev, state); - * drm_atomic_helper_commit_post_planes(dev, state); + * drm_atomic_helper_commit_modeset_enables(dev, state); * drm_atomic_helper_wait_for_vblanks(dev, state); * drm_atomic_helper_cleanup_planes(dev, state); * drm_atomic_state_free(state); @@ -214,12 +214,18 @@ struct drm_crtc_state * intel_crtc_duplicate_state(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc_state *crtc_state; if (WARN_ON(!intel_crtc->config)) - return kzalloc(sizeof(*intel_crtc->config), GFP_KERNEL); + crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); + else + crtc_state = kmemdup(intel_crtc->config, + sizeof(*intel_crtc->config), GFP_KERNEL); - return kmemdup(intel_crtc->config, sizeof(*intel_crtc->config), - GFP_KERNEL); + if (crtc_state) + crtc_state->base.crtc = crtc; + + return &crtc_state->base; } /** diff --git a/sys/dev/drm/i915/intel_atomic_plane.c b/sys/dev/drm/i915/intel_atomic_plane.c index 9e6f727dfd..976b891565 100644 --- a/sys/dev/drm/i915/intel_atomic_plane.c +++ b/sys/dev/drm/i915/intel_atomic_plane.c @@ -203,16 +203,8 @@ intel_plane_atomic_get_property(struct drm_plane *plane, struct drm_property *property, uint64_t *val) { - struct drm_mode_config *config = &plane->dev->mode_config; - - if (property == config->rotation_property) { - *val = state->rotation; - } else { - DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name); - return -EINVAL; - } - - return 0; + DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name); + return -EINVAL; } /** @@ -233,14 +225,6 @@ intel_plane_atomic_set_property(struct drm_plane *plane, struct drm_property *property, uint64_t val) { - struct drm_mode_config *config = &plane->dev->mode_config; - - if (property == config->rotation_property) { - state->rotation = val; - } else { - DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name); - return -EINVAL; - } - - return 0; + DRM_DEBUG_KMS("Unknown plane property '%s'\n", property->name); + return -EINVAL; } diff --git a/sys/dev/drm/i915/intel_bios.c b/sys/dev/drm/i915/intel_bios.c index a817b8d253..bf2a280a89 100644 --- a/sys/dev/drm/i915/intel_bios.c +++ b/sys/dev/drm/i915/intel_bios.c @@ -663,6 +663,13 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) edp_link_params->vswing); break; } + + if (bdb->version >= 173) { + uint8_t vswing; + + vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF; + dev_priv->vbt.edp_low_vswing = vswing == 0; + } } static void diff --git a/sys/dev/drm/i915/intel_bios.h b/sys/dev/drm/i915/intel_bios.h index a6a8710f66..6afd5be333 100644 --- a/sys/dev/drm/i915/intel_bios.h +++ b/sys/dev/drm/i915/intel_bios.h @@ -554,6 +554,7 @@ struct bdb_edp { /* ith bit indicates enabled/disabled for (i+1)th panel */ u16 edp_s3d_feature; u16 edp_t3_optimization; + u64 edp_vswing_preemph; /* v173 */ } __packed; struct psr_table { diff --git a/sys/dev/drm/i915/intel_crt.c b/sys/dev/drm/i915/intel_crt.c index b34f90ab60..be39550115 100644 --- a/sys/dev/drm/i915/intel_crt.c +++ b/sys/dev/drm/i915/intel_crt.c @@ -689,7 +689,7 @@ intel_crt_detect(struct drm_connector *connector, bool force) * broken monitor (without edid) to work behind a broken kvm (that fails * to have the right resistors for HP detection) needs to fix this up. * For now just bail out. */ - if (I915_HAS_HOTPLUG(dev)) { + if (I915_HAS_HOTPLUG(dev) && !i915.load_detect_test) { status = connector_status_disconnected; goto out; } @@ -705,9 +705,11 @@ intel_crt_detect(struct drm_connector *connector, bool force) if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) { if (intel_crt_detect_ddc(connector)) status = connector_status_connected; - else + else if (INTEL_INFO(dev)->gen < 4) status = intel_crt_load_detect(crt); - intel_release_load_detect_pipe(connector, &tmp); + else + status = connector_status_unknown; + intel_release_load_detect_pipe(connector, &tmp, &ctx); } else status = connector_status_unknown; @@ -793,6 +795,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = { .destroy = intel_crt_destroy, .set_property = intel_crt_set_property, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_get_property = intel_connector_atomic_get_property, }; @@ -847,7 +850,7 @@ void intel_crt_init(struct drm_device *dev) if (!crt) return; - intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL); + intel_connector = intel_connector_alloc(); if (!intel_connector) { kfree(crt); return; diff --git a/sys/dev/drm/i915/intel_ddi.c b/sys/dev/drm/i915/intel_ddi.c index cc5d6d7bd0..1aafc80230 100644 --- a/sys/dev/drm/i915/intel_ddi.c +++ b/sys/dev/drm/i915/intel_ddi.c @@ -140,18 +140,24 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = { { 0x00004014, 0x00000087 }, }; +/* eDP 1.4 low vswing translation parameters */ +static const struct ddi_buf_trans skl_ddi_translations_edp[] = { + { 0x00000018, 0x000000a8 }, + { 0x00002016, 0x000000ab }, + { 0x00006012, 0x000000a2 }, + { 0x00008010, 0x00000088 }, + { 0x00000018, 0x000000ab }, + { 0x00004014, 0x000000a2 }, + { 0x00006012, 0x000000a6 }, + { 0x00000018, 0x000000a2 }, + { 0x00005013, 0x0000009c }, + { 0x00000018, 0x00000088 }, +}; + + static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { /* Idx NT mV T mV db */ - { 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */ - { 0x00004014, 0x00000098 }, /* 1: 400 600 3.5 */ - { 0x00006012, 0x00000088 }, /* 2: 400 800 6 */ - { 0x00000018, 0x0000003c }, /* 3: 450 450 0 */ - { 0x00000018, 0x00000098 }, /* 4: 600 600 0 */ - { 0x00003015, 0x00000088 }, /* 5: 600 800 2.5 */ - { 0x00005013, 0x00000080 }, /* 6: 600 1000 4.5 */ - { 0x00000018, 0x00000088 }, /* 7: 800 800 0 */ - { 0x00000096, 0x00000080 }, /* 8: 800 1000 2 */ - { 0x00000018, 0x00000080 }, /* 9: 1200 1200 0 */ + { 0x00004014, 0x00000087 }, /* 0: 800 1000 2 */ }; enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) @@ -188,7 +194,8 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; u32 reg; - int i, n_hdmi_entries, hdmi_800mV_0dB; + int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry, + size; int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; const struct ddi_buf_trans *ddi_translations_fdi; const struct ddi_buf_trans *ddi_translations_dp; @@ -199,60 +206,85 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) if (IS_SKYLAKE(dev)) { ddi_translations_fdi = NULL; ddi_translations_dp = skl_ddi_translations_dp; - ddi_translations_edp = skl_ddi_translations_dp; + n_dp_entries = ARRAY_SIZE(skl_ddi_translations_dp); + if (dev_priv->vbt.edp_low_vswing) { + ddi_translations_edp = skl_ddi_translations_edp; + n_edp_entries = ARRAY_SIZE(skl_ddi_translations_edp); + } else { + ddi_translations_edp = skl_ddi_translations_dp; + n_edp_entries = ARRAY_SIZE(skl_ddi_translations_dp); + } + + /* + * On SKL, the recommendation from the hw team is to always use + * a certain type of level shifter (and thus the corresponding + * 800mV+2dB entry). Given that's the only validated entry, we + * override what is in the VBT, at least until further notice. + */ + hdmi_level = 0; ddi_translations_hdmi = skl_ddi_translations_hdmi; n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi); - hdmi_800mV_0dB = 7; + hdmi_default_entry = 0; } else if (IS_BROADWELL(dev)) { ddi_translations_fdi = bdw_ddi_translations_fdi; ddi_translations_dp = bdw_ddi_translations_dp; ddi_translations_edp = bdw_ddi_translations_edp; ddi_translations_hdmi = bdw_ddi_translations_hdmi; + n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); + n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); - hdmi_800mV_0dB = 7; + hdmi_default_entry = 7; } else if (IS_HASWELL(dev)) { ddi_translations_fdi = hsw_ddi_translations_fdi; ddi_translations_dp = hsw_ddi_translations_dp; ddi_translations_edp = hsw_ddi_translations_dp; ddi_translations_hdmi = hsw_ddi_translations_hdmi; + n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp); n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); - hdmi_800mV_0dB = 6; + hdmi_default_entry = 6; } else { WARN(1, "ddi translation table missing\n"); ddi_translations_edp = bdw_ddi_translations_dp; ddi_translations_fdi = bdw_ddi_translations_fdi; ddi_translations_dp = bdw_ddi_translations_dp; ddi_translations_hdmi = bdw_ddi_translations_hdmi; + n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); + n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); - hdmi_800mV_0dB = 7; + hdmi_default_entry = 7; } switch (port) { case PORT_A: ddi_translations = ddi_translations_edp; + size = n_edp_entries; break; case PORT_B: case PORT_C: ddi_translations = ddi_translations_dp; + size = n_dp_entries; break; case PORT_D: - if (intel_dp_is_edp(dev, PORT_D)) + if (intel_dp_is_edp(dev, PORT_D)) { ddi_translations = ddi_translations_edp; - else + size = n_edp_entries; + } else { ddi_translations = ddi_translations_dp; + size = n_dp_entries; + } break; case PORT_E: if (ddi_translations_fdi) ddi_translations = ddi_translations_fdi; else ddi_translations = ddi_translations_dp; + size = n_dp_entries; break; default: BUG(); } - for (i = 0, reg = DDI_BUF_TRANS(port); - i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) { + for (i = 0, reg = DDI_BUF_TRANS(port); i < size; i++) { I915_WRITE(reg, ddi_translations[i].trans1); reg += 4; I915_WRITE(reg, ddi_translations[i].trans2); @@ -262,7 +294,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) /* Choose a good default if VBT is badly populated */ if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN || hdmi_level >= n_hdmi_entries) - hdmi_level = hdmi_800mV_0dB; + hdmi_level = hdmi_default_entry; /* Entry 9 is for HDMI: */ I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1); @@ -461,17 +493,23 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc) } static struct intel_encoder * -intel_ddi_get_crtc_new_encoder(struct intel_crtc *crtc) +intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct intel_encoder *intel_encoder, *ret = NULL; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_encoder *ret = NULL; + struct drm_atomic_state *state; int num_encoders = 0; + int i; - for_each_intel_encoder(dev, intel_encoder) { - if (intel_encoder->new_crtc == crtc) { - ret = intel_encoder; - num_encoders++; - } + state = crtc_state->base.state; + + for (i = 0; i < state->num_connector; i++) { + if (!state->connectors[i] || + state->connector_states[i]->crtc != crtc_state->base.crtc) + continue; + + ret = to_intel_encoder(state->connector_states[i]->best_encoder); + num_encoders++; } WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders, @@ -753,9 +791,18 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder, case DPLL_CRTL1_LINK_RATE_810: link_clock = 81000; break; + case DPLL_CRTL1_LINK_RATE_1080: + link_clock = 108000; + break; case DPLL_CRTL1_LINK_RATE_1350: link_clock = 135000; break; + case DPLL_CRTL1_LINK_RATE_1620: + link_clock = 162000; + break; + case DPLL_CRTL1_LINK_RATE_2160: + link_clock = 216000; + break; case DPLL_CRTL1_LINK_RATE_2700: link_clock = 270000; break; @@ -1176,7 +1223,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc, { struct drm_device *dev = intel_crtc->base.dev; struct intel_encoder *intel_encoder = - intel_ddi_get_crtc_new_encoder(intel_crtc); + intel_ddi_get_crtc_new_encoder(crtc_state); int clock = crtc_state->port_clock; if (IS_SKYLAKE(dev)) @@ -2154,7 +2201,7 @@ intel_ddi_init_dp_connector(struct intel_digital_port *intel_dig_port) struct intel_connector *connector; enum port port = intel_dig_port->port; - connector = kzalloc(sizeof(*connector), GFP_KERNEL); + connector = intel_connector_alloc(); if (!connector) return NULL; @@ -2173,7 +2220,7 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port) struct intel_connector *connector; enum port port = intel_dig_port->port; - connector = kzalloc(sizeof(*connector), GFP_KERNEL); + connector = intel_connector_alloc(); if (!connector) return NULL; diff --git a/sys/dev/drm/i915/intel_display.c b/sys/dev/drm/i915/intel_display.c index 58ede79020..d09236618a 100644 --- a/sys/dev/drm/i915/intel_display.c +++ b/sys/dev/drm/i915/intel_display.c @@ -79,7 +79,8 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, - int x, int y, struct drm_framebuffer *old_fb); + int x, int y, struct drm_framebuffer *old_fb, + struct drm_atomic_state *state); static int intel_framebuffer_init(struct drm_device *dev, struct intel_framebuffer *ifb, struct drm_mode_fb_cmd2 *mode_cmd, @@ -387,7 +388,7 @@ static const intel_limit_t intel_limits_chv = { * them would make no difference. */ .dot = { .min = 25000 * 5, .max = 540000 * 5}, - .vco = { .min = 4860000, .max = 6700000 }, + .vco = { .min = 4800000, .max = 6480000 }, .n = { .min = 1, .max = 1 }, .m1 = { .min = 2, .max = 2 }, .m2 = { .min = 24 << 22, .max = 175 << 22 }, @@ -426,25 +427,41 @@ bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type) * intel_pipe_has_type() but looking at encoder->new_crtc instead of * encoder->crtc. */ -static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type) +static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state, + int type) { - struct drm_device *dev = crtc->base.dev; + struct drm_atomic_state *state = crtc_state->base.state; + struct drm_connector_state *connector_state; struct intel_encoder *encoder; + int i, num_connectors = 0; + + for (i = 0; i < state->num_connector; i++) { + if (!state->connectors[i]) + continue; + + connector_state = state->connector_states[i]; + if (connector_state->crtc != crtc_state->base.crtc) + continue; + + num_connectors++; - for_each_intel_encoder(dev, encoder) - if (encoder->new_crtc == crtc && encoder->type == type) + encoder = to_intel_encoder(connector_state->best_encoder); + if (encoder->type == type) return true; + } + + WARN_ON(num_connectors == 0); return false; } -static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc, - int refclk) +static const intel_limit_t * +intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk) { - struct drm_device *dev = crtc->base.dev; + struct drm_device *dev = crtc_state->base.crtc->dev; const intel_limit_t *limit; - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_is_dual_link_lvds(dev)) { if (refclk == 100000) limit = &intel_limits_ironlake_dual_lvds_100m; @@ -462,20 +479,21 @@ static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc, return limit; } -static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc) +static const intel_limit_t * +intel_g4x_limit(struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; + struct drm_device *dev = crtc_state->base.crtc->dev; const intel_limit_t *limit; - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_is_dual_link_lvds(dev)) limit = &intel_limits_g4x_dual_channel_lvds; else limit = &intel_limits_g4x_single_channel_lvds; - } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) || - intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) { + } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) || + intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) { limit = &intel_limits_g4x_hdmi; - } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) { + } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) { limit = &intel_limits_g4x_sdvo; } else /* The option is for other outputs */ limit = &intel_limits_i9xx_sdvo; @@ -483,17 +501,18 @@ static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc) return limit; } -static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk) +static const intel_limit_t * +intel_limit(struct intel_crtc_state *crtc_state, int refclk) { - struct drm_device *dev = crtc->base.dev; + struct drm_device *dev = crtc_state->base.crtc->dev; const intel_limit_t *limit; if (HAS_PCH_SPLIT(dev)) - limit = intel_ironlake_limit(crtc, refclk); + limit = intel_ironlake_limit(crtc_state, refclk); else if (IS_G4X(dev)) { - limit = intel_g4x_limit(crtc); + limit = intel_g4x_limit(crtc_state); } else if (IS_PINEVIEW(dev)) { - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) limit = &intel_limits_pineview_lvds; else limit = &intel_limits_pineview_sdvo; @@ -502,14 +521,14 @@ static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk) } else if (IS_VALLEYVIEW(dev)) { limit = &intel_limits_vlv; } else if (!IS_GEN2(dev)) { - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) limit = &intel_limits_i9xx_lvds; else limit = &intel_limits_i9xx_sdvo; } else { - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) limit = &intel_limits_i8xx_lvds; - else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO)) + else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) limit = &intel_limits_i8xx_dvo; else limit = &intel_limits_i8xx_dac; @@ -596,15 +615,17 @@ static bool intel_PLL_is_valid(struct drm_device *dev, } static bool -i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, +i9xx_find_best_dpll(const intel_limit_t *limit, + struct intel_crtc_state *crtc_state, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; intel_clock_t clock; int err = target; - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { /* * For LVDS just rely on its current settings for dual-channel. * We haven't figured out how to reliably set up different @@ -657,15 +678,17 @@ i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, } static bool -pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, +pnv_find_best_dpll(const intel_limit_t *limit, + struct intel_crtc_state *crtc_state, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; intel_clock_t clock; int err = target; - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { /* * For LVDS just rely on its current settings for dual-channel. * We haven't figured out how to reliably set up different @@ -716,10 +739,12 @@ pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, } static bool -g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, +g4x_find_best_dpll(const intel_limit_t *limit, + struct intel_crtc_state *crtc_state, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; intel_clock_t clock; int max_n; @@ -728,7 +753,7 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, int err_most = (target >> 8) + (target >> 9); found = false; - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_is_dual_link_lvds(dev)) clock.p2 = limit->p2.p2_fast; else @@ -772,11 +797,53 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, return found; } +/* + * Check if the calculated PLL configuration is more optimal compared to the + * best configuration and error found so far. Return the calculated error. + */ +static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, + const intel_clock_t *calculated_clock, + const intel_clock_t *best_clock, + unsigned int best_error_ppm, + unsigned int *error_ppm) +{ + /* + * For CHV ignore the error and consider only the P value. + * Prefer a bigger P value based on HW requirements. + */ + if (IS_CHERRYVIEW(dev)) { + *error_ppm = 0; + + return calculated_clock->p > best_clock->p; + } + + if (WARN_ON_ONCE(!target_freq)) + return false; + + *error_ppm = div_u64(1000000ULL * + abs(target_freq - calculated_clock->dot), + target_freq); + /* + * Prefer a better P value over a better (smaller) error if the error + * is small. Ensure this preference for future configurations too by + * setting the error to 0. + */ + if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { + *error_ppm = 0; + + return true; + } + + return *error_ppm + 10 < best_error_ppm; +} + static bool -vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, +vlv_find_best_dpll(const intel_limit_t *limit, + struct intel_crtc_state *crtc_state, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; intel_clock_t clock; unsigned int bestppm = 1000000; @@ -796,7 +863,7 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, clock.p = clock.p1 * clock.p2; /* based on hardware requirement, prefer bigger m1,m2 values */ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { - unsigned int ppm, diff; + unsigned int ppm; clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, refclk * clock.m1); @@ -807,20 +874,15 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, &clock)) continue; - diff = abs(clock.dot - target); - ppm = div_u64(1000000ULL * diff, target); - - if (ppm < 100 && clock.p > best_clock->p) { - bestppm = 0; - *best_clock = clock; - found = true; - } + if (!vlv_PLL_is_optimal(dev, target, + &clock, + best_clock, + bestppm, &ppm)) + continue; - if (bestppm >= 10 && ppm < bestppm - 10) { - bestppm = ppm; - *best_clock = clock; - found = true; - } + *best_clock = clock; + bestppm = ppm; + found = true; } } } @@ -830,16 +892,20 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, } static bool -chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, +chv_find_best_dpll(const intel_limit_t *limit, + struct intel_crtc_state *crtc_state, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; + unsigned int best_error_ppm; intel_clock_t clock; uint64_t m2; int found = false; memset(best_clock, 0, sizeof(*best_clock)); + best_error_ppm = 1000000; /* * Based on hardware doc, the n always set to 1, and m1 always @@ -853,6 +919,7 @@ chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; clock.p2 -= clock.p2 > 10 ? 2 : 1) { + unsigned int error_ppm; clock.p = clock.p1 * clock.p2; @@ -869,12 +936,13 @@ chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, if (!intel_PLL_is_valid(dev, limit, &clock)) continue; - /* based on hardware requirement, prefer bigger p - */ - if (clock.p > best_clock->p) { - *best_clock = clock; - found = true; - } + if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, + best_error_ppm, &error_ppm)) + continue; + + *best_clock = clock; + best_error_ppm = error_ppm; + found = true; } } @@ -893,8 +961,12 @@ bool intel_crtc_active(struct drm_crtc *crtc) * * We can ditch the crtc->primary->fb check as soon as we can * properly reconstruct framebuffers. + * + * FIXME: The intel_crtc->active here should be switched to + * crtc->state->active once we have proper CRTC states wired up + * for atomic. */ - return intel_crtc->active && crtc->primary->fb && + return intel_crtc->active && crtc->primary->state->fb && intel_crtc->config->base.adjusted_mode.crtc_clock; } @@ -1297,14 +1369,14 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv, u32 val; if (INTEL_INFO(dev)->gen >= 9) { - for_each_sprite(pipe, sprite) { + for_each_sprite(dev_priv, pipe, sprite) { val = I915_READ(PLANE_CTL(pipe, sprite)); I915_STATE_WARN(val & PLANE_CTL_ENABLE, "plane %d assertion failure, should be off on pipe %c but is still active\n", sprite, pipe_name(pipe)); } } else if (IS_VALLEYVIEW(dev)) { - for_each_sprite(pipe, sprite) { + for_each_sprite(dev_priv, pipe, sprite) { reg = SPCNTR(pipe, sprite); val = I915_READ(reg); I915_STATE_WARN(val & SP_ENABLE, @@ -2186,30 +2258,109 @@ static bool need_vtd_wa(struct drm_device *dev) return false; } -int -intel_fb_align_height(struct drm_device *dev, int height, unsigned int tiling) +unsigned int +intel_tile_height(struct drm_device *dev, uint32_t pixel_format, + uint64_t fb_format_modifier) +{ + unsigned int tile_height; + uint32_t pixel_bytes; + + switch (fb_format_modifier) { + case DRM_FORMAT_MOD_NONE: + tile_height = 1; + break; + case I915_FORMAT_MOD_X_TILED: + tile_height = IS_GEN2(dev) ? 16 : 8; + break; + case I915_FORMAT_MOD_Y_TILED: + tile_height = 32; + break; + case I915_FORMAT_MOD_Yf_TILED: + pixel_bytes = drm_format_plane_cpp(pixel_format, 0); + switch (pixel_bytes) { + default: + case 1: + tile_height = 64; + break; + case 2: + case 4: + tile_height = 32; + break; + case 8: + tile_height = 16; + break; + case 16: + WARN_ONCE(1, + "128-bit pixels are not supported for display!"); + tile_height = 16; + break; + } + break; + default: + MISSING_CASE(fb_format_modifier); + tile_height = 1; + break; + } + + return tile_height; +} + +unsigned int +intel_fb_align_height(struct drm_device *dev, unsigned int height, + uint32_t pixel_format, uint64_t fb_format_modifier) { - int tile_height; + return ALIGN(height, intel_tile_height(dev, pixel_format, + fb_format_modifier)); +} + +static int +intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, + const struct drm_plane_state *plane_state) +{ + struct intel_rotation_info *info = &view->rotation_info; + + *view = i915_ggtt_view_normal; + + if (!plane_state) + return 0; + + if (!intel_rotation_90_or_270(plane_state->rotation)) + return 0; + + *view = i915_ggtt_view_rotated; + + info->height = fb->height; + info->pixel_format = fb->pixel_format; + info->pitch = fb->pitches[0]; + info->fb_modifier = fb->modifier[0]; + + if (!(info->fb_modifier == I915_FORMAT_MOD_Y_TILED || + info->fb_modifier == I915_FORMAT_MOD_Yf_TILED)) { + DRM_DEBUG_KMS( + "Y or Yf tiling is needed for 90/270 rotation!\n"); + return -EINVAL; + } - tile_height = tiling ? (IS_GEN2(dev) ? 16 : 8) : 1; - return ALIGN(height, tile_height); + return 0; } int intel_pin_and_fence_fb_obj(struct drm_plane *plane, struct drm_framebuffer *fb, + const struct drm_plane_state *plane_state, struct intel_engine_cs *pipelined) { struct drm_device *dev = fb->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct i915_ggtt_view view; u32 alignment; int ret; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - switch (obj->tiling_mode) { - case I915_TILING_NONE: + switch (fb->modifier[0]) { + case DRM_FORMAT_MOD_NONE: if (INTEL_INFO(dev)->gen >= 9) alignment = 256 * 1024; else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) @@ -2219,7 +2370,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane, else alignment = 64 * 1024; break; - case I915_TILING_X: + case I915_FORMAT_MOD_X_TILED: if (INTEL_INFO(dev)->gen >= 9) alignment = 256 * 1024; else { @@ -2227,13 +2378,22 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane, alignment = 0; } break; - case I915_TILING_Y: - WARN(1, "Y tiled bo slipped through, driver bug!\n"); - return -EINVAL; + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Yf_TILED: + if (WARN_ONCE(INTEL_INFO(dev)->gen < 9, + "Y tiling bo slipped through, driver bug!\n")) + return -EINVAL; + alignment = 1 * 1024 * 1024; + break; default: - BUG(); + MISSING_CASE(fb->modifier[0]); + return -EINVAL; } + ret = intel_fill_fb_ggtt_view(&view, fb, plane_state); + if (ret) + return ret; + /* Note that the w/a also requires 64 PTE of padding following the * bo. We currently fill all unused PTE with the shadow page and so * we should always have valid PTE following the scanout preventing @@ -2252,7 +2412,8 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane, intel_runtime_pm_get(dev_priv); dev_priv->mm.interruptible = false; - ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); + ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined, + &view); if (ret) goto err_interruptible; @@ -2272,19 +2433,27 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane, return 0; err_unpin: - i915_gem_object_unpin_from_display_plane(obj); + i915_gem_object_unpin_from_display_plane(obj, &view); err_interruptible: dev_priv->mm.interruptible = true; intel_runtime_pm_put(dev_priv); return ret; } -void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) +static void intel_unpin_fb_obj(struct drm_framebuffer *fb, + const struct drm_plane_state *plane_state) { + struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct i915_ggtt_view view; + int ret; + WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); + ret = intel_fill_fb_ggtt_view(&view, fb, plane_state); + WARN_ONCE(ret, "Couldn't get view from plane state!"); + i915_gem_object_unpin_fence(obj); - i915_gem_object_unpin_from_display_plane(obj); + i915_gem_object_unpin_from_display_plane(obj, &view); } /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel @@ -2362,49 +2531,43 @@ static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) } static bool -intel_alloc_plane_obj(struct intel_crtc *crtc, - struct intel_initial_plane_config *plane_config) +intel_alloc_initial_plane_obj(struct intel_crtc *crtc, + struct intel_initial_plane_config *plane_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_gem_object *obj = NULL; struct drm_mode_fb_cmd2 mode_cmd = { 0 }; - u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); - u32 size_aligned = round_up(plane_config->base + plane_config->size, - PAGE_SIZE); - - size_aligned -= base_aligned; + struct drm_framebuffer *fb = &plane_config->fb->base; + u32 base = plane_config->base; if (plane_config->size == 0) return false; - obj = i915_gem_object_create_stolen_for_preallocated(dev, - base_aligned, - base_aligned, - size_aligned); + obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base, + plane_config->size); if (!obj) return false; obj->tiling_mode = plane_config->tiling; if (obj->tiling_mode == I915_TILING_X) - obj->stride = crtc->base.primary->fb->pitches[0]; + obj->stride = fb->pitches[0]; - mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format; - mode_cmd.width = crtc->base.primary->fb->width; - mode_cmd.height = crtc->base.primary->fb->height; - mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0]; + mode_cmd.pixel_format = fb->pixel_format; + mode_cmd.width = fb->width; + mode_cmd.height = fb->height; + mode_cmd.pitches[0] = fb->pitches[0]; + mode_cmd.modifier[0] = fb->modifier[0]; + mode_cmd.flags = DRM_MODE_FB_MODIFIERS; mutex_lock(&dev->struct_mutex); - - if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb), + if (intel_framebuffer_init(dev, to_intel_framebuffer(fb), &mode_cmd, obj)) { DRM_DEBUG_KMS("intel fb init failed\n"); goto out_unref_obj; } - - obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe); mutex_unlock(&dev->struct_mutex); - DRM_DEBUG_KMS("plane fb obj %p\n", obj); + DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); return true; out_unref_obj: @@ -2417,35 +2580,37 @@ out_unref_obj: static void update_state_fb(struct drm_plane *plane) { - if (plane->fb != plane->state->fb) - drm_atomic_set_fb_for_plane(plane->state, plane->fb); + if (plane->fb == plane->state->fb) + return; + + if (plane->state->fb) + drm_framebuffer_unreference(plane->state->fb); + plane->state->fb = plane->fb; + if (plane->state->fb) + drm_framebuffer_reference(plane->state->fb); } static void -intel_find_plane_obj(struct intel_crtc *intel_crtc, - struct intel_initial_plane_config *plane_config) +intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, + struct intel_initial_plane_config *plane_config) { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *c; struct intel_crtc *i; struct drm_i915_gem_object *obj; + struct drm_plane *primary = intel_crtc->base.primary; + struct drm_framebuffer *fb; - if (!intel_crtc->base.primary->fb) + if (!plane_config->fb) return; - if (intel_alloc_plane_obj(intel_crtc, plane_config)) { - struct drm_plane *primary = intel_crtc->base.primary; - - primary->state->crtc = &intel_crtc->base; - primary->crtc = &intel_crtc->base; - update_state_fb(primary); - - return; + if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { + fb = &plane_config->fb->base; + goto valid_fb; } - kfree(intel_crtc->base.primary->fb); - intel_crtc->base.primary->fb = NULL; + kfree(plane_config->fb); /* * Failed to alloc the obj, check to see if we should share @@ -2460,26 +2625,29 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc, if (!i->active) continue; - obj = intel_fb_obj(c->primary->fb); - if (obj == NULL) + fb = c->primary->fb; + if (!fb) continue; + obj = intel_fb_obj(fb); if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { - struct drm_plane *primary = intel_crtc->base.primary; - - if (obj->tiling_mode != I915_TILING_NONE) - dev_priv->preserve_bios_swizzle = true; - - drm_framebuffer_reference(c->primary->fb); - primary->fb = c->primary->fb; - primary->state->crtc = &intel_crtc->base; - primary->crtc = &intel_crtc->base; - obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); - break; + drm_framebuffer_reference(fb); + goto valid_fb; } } - update_state_fb(intel_crtc->base.primary); + return; + +valid_fb: + obj = intel_fb_obj(fb); + if (obj->tiling_mode != I915_TILING_NONE) + dev_priv->preserve_bios_swizzle = true; + + primary->fb = fb; + primary->state->crtc = &intel_crtc->base; + primary->crtc = &intel_crtc->base; + update_state_fb(primary); + obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); } static void i9xx_update_primary_plane(struct drm_crtc *crtc, @@ -2600,9 +2768,6 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc, I915_WRITE(reg, dspcntr); - DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", - i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, - fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); if (INTEL_INFO(dev)->gen >= 4) { I915_WRITE(DSPSURF(plane), @@ -2704,9 +2869,6 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc, I915_WRITE(reg, dspcntr); - DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", - i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, - fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); I915_WRITE(DSPSURF(plane), i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); @@ -2719,6 +2881,51 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc, POSTING_READ(reg); } +u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier, + uint32_t pixel_format) +{ + u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8; + + /* + * The stride is either expressed as a multiple of 64 bytes + * chunks for linear buffers or in number of tiles for tiled + * buffers. + */ + switch (fb_modifier) { + case DRM_FORMAT_MOD_NONE: + return 64; + case I915_FORMAT_MOD_X_TILED: + if (INTEL_INFO(dev)->gen == 2) + return 128; + return 512; + case I915_FORMAT_MOD_Y_TILED: + /* No need to check for old gens and Y tiling since this is + * about the display engine and those will be blocked before + * we get here. + */ + return 128; + case I915_FORMAT_MOD_Yf_TILED: + if (bits_per_pixel == 8) + return 64; + else + return 128; + default: + MISSING_CASE(fb_modifier); + return 64; + } +} + +unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, + struct drm_i915_gem_object *obj) +{ + const struct i915_ggtt_view *view = &i915_ggtt_view_normal; + + if (intel_rotation_90_or_270(intel_plane->base.state->rotation)) + view = &i915_ggtt_view_rotated; + + return i915_gem_obj_ggtt_offset_view(obj, view); +} + static void skylake_update_primary_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y) @@ -2726,10 +2933,10 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj; int pipe = intel_crtc->pipe; - u32 plane_ctl, stride; + u32 plane_ctl, stride_div; + unsigned long surf_addr; if (!intel_crtc->primary_enabled) { I915_WRITE(PLANE_CTL(pipe, 0), 0); @@ -2773,43 +2980,39 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc, BUG(); } - intel_fb = to_intel_framebuffer(fb); - obj = intel_fb->obj; - - /* - * The stride is either expressed as a multiple of 64 bytes chunks for - * linear buffers or in number of tiles for tiled buffers. - */ - switch (obj->tiling_mode) { - case I915_TILING_NONE: - stride = fb->pitches[0] >> 6; + switch (fb->modifier[0]) { + case DRM_FORMAT_MOD_NONE: break; - case I915_TILING_X: + case I915_FORMAT_MOD_X_TILED: plane_ctl |= PLANE_CTL_TILED_X; - stride = fb->pitches[0] >> 9; + break; + case I915_FORMAT_MOD_Y_TILED: + plane_ctl |= PLANE_CTL_TILED_Y; + break; + case I915_FORMAT_MOD_Yf_TILED: + plane_ctl |= PLANE_CTL_TILED_YF; break; default: - BUG(); + MISSING_CASE(fb->modifier[0]); } plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) plane_ctl |= PLANE_CTL_ROTATE_180; - I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); - - DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n", - i915_gem_obj_ggtt_offset(obj), - x, y, fb->width, fb->height, - fb->pitches[0]); + obj = intel_fb_obj(fb); + stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], + fb->pixel_format); + surf_addr = intel_plane_obj_offset(to_intel_plane(crtc->primary), obj); + I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); I915_WRITE(PLANE_POS(pipe, 0), 0); I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x); I915_WRITE(PLANE_SIZE(pipe, 0), (intel_crtc->config->pipe_src_h - 1) << 16 | (intel_crtc->config->pipe_src_w - 1)); - I915_WRITE(PLANE_STRIDE(pipe, 0), stride); - I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj)); + I915_WRITE(PLANE_STRIDE(pipe, 0), fb->pitches[0] / stride_div); + I915_WRITE(PLANE_SURF(pipe, 0), surf_addr); POSTING_READ(PLANE_SURF(pipe, 0)); } @@ -3060,38 +3263,6 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc) FDI_FE_ERRC_ENABLE); } -static bool pipe_has_enabled_pch(struct intel_crtc *crtc) -{ - return crtc->base.enabled && crtc->active && - crtc->config->has_pch_encoder; -} - -static void ivb_modeset_global_resources(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *pipe_B_crtc = - to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); - struct intel_crtc *pipe_C_crtc = - to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]); - uint32_t temp; - - /* - * When everything is off disable fdi C so that we could enable fdi B - * with all lanes. Note that we don't care about enabled pipes without - * an enabled pch encoder. - */ - if (!pipe_has_enabled_pch(pipe_B_crtc) && - !pipe_has_enabled_pch(pipe_C_crtc)) { - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); - - temp = I915_READ(SOUTH_CHICKEN1); - temp &= ~FDI_BC_BIFURCATION_SELECT; - DRM_DEBUG_KMS("disabling fdi C rx\n"); - I915_WRITE(SOUTH_CHICKEN1, temp); - } -} - /* The FDI link training functions for ILK/Ibexpeak. */ static void ironlake_fdi_link_train(struct drm_crtc *crtc) { @@ -3747,20 +3918,23 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, I915_READ(VSYNCSHIFT(cpu_transcoder))); } -static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) +static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t temp; temp = I915_READ(SOUTH_CHICKEN1); - if (temp & FDI_BC_BIFURCATION_SELECT) + if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) return; WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); - temp |= FDI_BC_BIFURCATION_SELECT; - DRM_DEBUG_KMS("enabling fdi C rx\n"); + temp &= ~FDI_BC_BIFURCATION_SELECT; + if (enable) + temp |= FDI_BC_BIFURCATION_SELECT; + + DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); I915_WRITE(SOUTH_CHICKEN1, temp); POSTING_READ(SOUTH_CHICKEN1); } @@ -3768,20 +3942,19 @@ static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; switch (intel_crtc->pipe) { case PIPE_A: break; case PIPE_B: if (intel_crtc->config->fdi_lanes > 2) - WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); + cpt_set_fdi_bc_bifurcation(dev, false); else - cpt_enable_fdi_bc_bifurcation(dev); + cpt_set_fdi_bc_bifurcation(dev, true); break; case PIPE_C: - cpt_enable_fdi_bc_bifurcation(dev); + cpt_set_fdi_bc_bifurcation(dev, true); break; default: @@ -4116,6 +4289,24 @@ static void intel_enable_sprite_planes(struct drm_crtc *crtc) } } +/* + * Disable a plane internally without actually modifying the plane's state. + * This will allow us to easily restore the plane later by just reprogramming + * its state. + */ +static void disable_plane_internal(struct drm_plane *plane) +{ + struct intel_plane *intel_plane = to_intel_plane(plane); + struct drm_plane_state *state = + plane->funcs->atomic_duplicate_state(plane); + struct intel_plane_state *intel_state = to_intel_plane_state(state); + + intel_state->visible = false; + intel_plane->commit_plane(plane, intel_state); + + intel_plane_destroy_state(plane, state); +} + static void intel_disable_sprite_planes(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -4125,8 +4316,8 @@ static void intel_disable_sprite_planes(struct drm_crtc *crtc) drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { intel_plane = to_intel_plane(plane); - if (intel_plane->pipe == pipe) - plane->funcs->disable_plane(plane); + if (plane->fb && intel_plane->pipe == pipe) + disable_plane_internal(plane); } } @@ -4200,7 +4391,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc) bool reenable_ips = false; /* The clocks have to be on to load the palette. */ - if (!crtc->enabled || !intel_crtc->active) + if (!crtc->state->enable || !intel_crtc->active) return; if (!HAS_PCH_SPLIT(dev_priv->dev)) { @@ -4284,11 +4475,10 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int pipe = intel_crtc->pipe; - int plane = intel_crtc->plane; intel_crtc_wait_for_pending_flips(crtc); - if (dev_priv->fbc.plane == plane) + if (dev_priv->fbc.crtc == intel_crtc) intel_fbc_disable(dev); hsw_disable_ips(intel_crtc); @@ -4314,7 +4504,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) struct intel_encoder *encoder; int pipe = intel_crtc->pipe; - WARN_ON(!crtc->enabled); + WARN_ON(!crtc->state->enable); if (intel_crtc->active) return; @@ -4323,7 +4513,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_prepare_shared_dpll(intel_crtc); if (intel_crtc->config->has_dp_encoder) - intel_dp_set_m_n(intel_crtc); + intel_dp_set_m_n(intel_crtc, M1_N1); intel_set_pipe_timings(intel_crtc); @@ -4422,7 +4612,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) struct intel_encoder *encoder; int pipe = intel_crtc->pipe; - WARN_ON(!crtc->enabled); + WARN_ON(!crtc->state->enable); if (intel_crtc->active) return; @@ -4431,7 +4621,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_enable_shared_dpll(intel_crtc); if (intel_crtc->config->has_dp_encoder) - intel_dp_set_m_n(intel_crtc); + intel_dp_set_m_n(intel_crtc, M1_N1); intel_set_pipe_timings(intel_crtc); @@ -4753,8 +4943,9 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) return mask; } -static void modeset_update_crtc_power_domains(struct drm_device *dev) +static void modeset_update_crtc_power_domains(struct drm_atomic_state *state) { + struct drm_device *dev = state->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long pipe_domains[I915_MAX_PIPES] = { 0, }; struct intel_crtc *crtc; @@ -4766,7 +4957,7 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev) for_each_intel_crtc(dev, crtc) { enum intel_display_power_domain domain; - if (!crtc->base.enabled) + if (!crtc->base.state->enable) continue; pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base); @@ -4776,7 +4967,7 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev) } if (dev_priv->display.modeset_global_resources) - dev_priv->display.modeset_global_resources(dev); + dev_priv->display.modeset_global_resources(state); for_each_intel_crtc(dev, crtc) { enum intel_display_power_domain domain; @@ -4893,24 +5084,23 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq); switch (cdclk) { - case 400000: - cmd = 3; - break; case 333333: case 320000: - cmd = 2; - break; case 266667: - cmd = 1; - break; case 200000: - cmd = 0; break; default: MISSING_CASE(cdclk); return; } + /* + * Specs are full of misinformation, but testing on actual + * hardware has shown that we just need to write the desired + * CCK divider into the Punit register. + */ + cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; + mutex_lock(&dev_priv->rps.hw_lock); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); val &= ~DSPFREQGUAR_MASK_CHV; @@ -4930,27 +5120,25 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, int max_pixclk) { int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000; - - /* FIXME: Punit isn't quite ready yet */ - if (IS_CHERRYVIEW(dev_priv->dev)) - return 400000; + int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90; /* * Really only a few cases to deal with, as only 4 CDclks are supported: * 200MHz * 267MHz * 320/333MHz (depends on HPLL freq) - * 400MHz - * So we check to see whether we're above 90% of the lower bin and - * adjust if needed. + * 400MHz (VLV only) + * So we check to see whether we're above 90% (VLV) or 95% (CHV) + * of the lower bin and adjust if needed. * * We seem to get an unstable or solid color picture at 200MHz. * Not sure what's wrong. For now use 200MHz only when all pipes * are off. */ - if (max_pixclk > freq_320*9/10) + if (!IS_CHERRYVIEW(dev_priv) && + max_pixclk > freq_320*limit/100) return 400000; - else if (max_pixclk > 266667*9/10) + else if (max_pixclk > 266667*limit/100) return freq_320; else if (max_pixclk > 0) return 266667; @@ -4987,12 +5175,49 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev, /* disable/enable all currently active pipes while we change cdclk */ for_each_intel_crtc(dev, intel_crtc) - if (intel_crtc->base.enabled) + if (intel_crtc->base.state->enable) *prepare_pipes |= (1 << intel_crtc->pipe); } -static void valleyview_modeset_global_resources(struct drm_device *dev) +static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) +{ + unsigned int credits, default_credits; + + if (IS_CHERRYVIEW(dev_priv)) + default_credits = PFI_CREDIT(12); + else + default_credits = PFI_CREDIT(8); + + if (DIV_ROUND_CLOSEST(dev_priv->vlv_cdclk_freq, 1000) >= dev_priv->rps.cz_freq) { + /* CHV suggested value is 31 or 63 */ + if (IS_CHERRYVIEW(dev_priv)) + credits = PFI_CREDIT_31; + else + credits = PFI_CREDIT(15); + } else { + credits = default_credits; + } + + /* + * WA - write default credits before re-programming + * FIXME: should we also set the resend bit here? + */ + I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | + default_credits); + + I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | + credits | PFI_CREDIT_RESEND); + + /* + * FIXME is this guaranteed to clear + * immediately or should we poll for it? + */ + WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND); +} + +static void valleyview_modeset_global_resources(struct drm_atomic_state *state) { + struct drm_device *dev = state->dev; struct drm_i915_private *dev_priv = dev->dev_private; int max_pixclk = intel_mode_max_pixclk(dev_priv); int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); @@ -5014,6 +5239,8 @@ static void valleyview_modeset_global_resources(struct drm_device *dev) else valleyview_set_cdclk(dev, req_cdclk); + vlv_program_pfi_credits(dev_priv); + intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); } } @@ -5027,7 +5254,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) int pipe = intel_crtc->pipe; bool is_dsi; - WARN_ON(!crtc->enabled); + WARN_ON(!crtc->state->enable); if (intel_crtc->active) return; @@ -5042,7 +5269,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) } if (intel_crtc->config->has_dp_encoder) - intel_dp_set_m_n(intel_crtc); + intel_dp_set_m_n(intel_crtc, M1_N1); intel_set_pipe_timings(intel_crtc); @@ -5110,7 +5337,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) struct intel_encoder *encoder; int pipe = intel_crtc->pipe; - WARN_ON(!crtc->enabled); + WARN_ON(!crtc->state->enable); if (intel_crtc->active) return; @@ -5118,7 +5345,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) i9xx_set_pll_dividers(intel_crtc); if (intel_crtc->config->has_dp_encoder) - intel_dp_set_m_n(intel_crtc); + intel_dp_set_m_n(intel_crtc, M1_N1); intel_set_pipe_timings(intel_crtc); @@ -5285,6 +5512,8 @@ void intel_crtc_control(struct drm_crtc *crtc, bool enable) intel_crtc->enabled_power_domains = 0; } } + + update_state_fb(intel_crtc->base.primary); } /** @@ -5309,7 +5538,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = dev->dev_private; /* crtc should still be enabled when we disable it. */ - WARN_ON(!crtc->enabled); + WARN_ON(!crtc->state->enable); dev_priv->display.crtc_disable(crtc); dev_priv->display.off(crtc); @@ -5387,7 +5616,8 @@ static void intel_connector_check_state(struct intel_connector *connector) crtc = encoder->base.crtc; - I915_STATE_WARN(!crtc->enabled, "crtc not enabled\n"); + I915_STATE_WARN(!crtc->state->enable, + "crtc not enabled\n"); I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe, "encoder active on the wrong pipe\n"); @@ -5395,6 +5625,34 @@ static void intel_connector_check_state(struct intel_connector *connector) } } +int intel_connector_init(struct intel_connector *connector) +{ + struct drm_connector_state *connector_state; + + connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL); + if (!connector_state) + return -ENOMEM; + + connector->base.state = connector_state; + return 0; +} + +struct intel_connector *intel_connector_alloc(void) +{ + struct intel_connector *connector; + + connector = kzalloc(sizeof *connector, GFP_KERNEL); + if (!connector) + return NULL; + + if (intel_connector_init(connector) < 0) { + kfree(connector); + return NULL; + } + + return connector; +} + /* Even simpler default implementation, if there's really no special case to * consider. */ void intel_connector_dpms(struct drm_connector *connector, int mode) @@ -5426,13 +5684,21 @@ bool intel_connector_get_hw_state(struct intel_connector *connector) return encoder->get_hw_state(encoder, &pipe); } +static int pipe_required_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe) +{ + struct intel_crtc *crtc = + to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); + + if (crtc->base.state->enable && + crtc->config->has_pch_encoder) + return crtc->config->fdi_lanes; + + return 0; +} + static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *pipe_B_crtc = - to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); - DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", pipe_name(pipe), pipe_config->fdi_lanes); if (pipe_config->fdi_lanes > 4) { @@ -5459,22 +5725,20 @@ static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe case PIPE_A: return true; case PIPE_B: - if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled && - pipe_config->fdi_lanes > 2) { + if (pipe_config->fdi_lanes > 2 && + pipe_required_fdi_lanes(dev, PIPE_C) > 0) { DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", pipe_name(pipe), pipe_config->fdi_lanes); return false; } return true; case PIPE_C: - if (!pipe_has_enabled_pch(pipe_B_crtc) || - pipe_B_crtc->config->fdi_lanes <= 2) { - if (pipe_config->fdi_lanes > 2) { - DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", - pipe_name(pipe), pipe_config->fdi_lanes); - return false; - } - } else { + if (pipe_config->fdi_lanes > 2) { + DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", + pipe_name(pipe), pipe_config->fdi_lanes); + return false; + } + if (pipe_required_fdi_lanes(dev, PIPE_B) > 2) { DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); return false; } @@ -5574,7 +5838,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, * - LVDS dual channel mode * - Double wide pipe */ - if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && + if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) && intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) pipe_config->pipe_src_w &= ~1; @@ -5608,10 +5872,6 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev) u32 val; int divider; - /* FIXME: Punit isn't quite ready yet */ - if (IS_CHERRYVIEW(dev)) - return 400000; - if (dev_priv->hpll_freq == 0) dev_priv->hpll_freq = valleyview_get_vco(dev_priv); @@ -5757,15 +6017,18 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); } -static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors) +static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, + int num_connectors) { - struct drm_device *dev = crtc->base.dev; + struct drm_device *dev = crtc_state->base.crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; int refclk; + WARN_ON(!crtc_state->base.state); + if (IS_VALLEYVIEW(dev)) { refclk = 100000; - } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) && + } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { refclk = dev_priv->vbt.lvds_ssc_freq; DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); @@ -5808,8 +6071,8 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc, crtc_state->dpll_hw_state.fp0 = fp; crtc->lowfreq_avail = false; - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) && - reduced_clock && i915.powersave) { + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && + reduced_clock) { crtc_state->dpll_hw_state.fp1 = fp2; crtc->lowfreq_avail = true; } else { @@ -5877,7 +6140,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, * for gen < 8) and if DRRS is supported (to make sure the * registers are not unnecessarily accessed). */ - if (m2_n2 && INTEL_INFO(dev)->gen < 8 && + if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) && crtc->config->has_drrs) { I915_WRITE(PIPE_DATA_M2(transcoder), TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); @@ -5893,13 +6156,29 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, } } -void intel_dp_set_m_n(struct intel_crtc *crtc) +void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n) { + struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; + + if (m_n == M1_N1) { + dp_m_n = &crtc->config->dp_m_n; + dp_m2_n2 = &crtc->config->dp_m2_n2; + } else if (m_n == M2_N2) { + + /* + * M2_N2 registers are not supported. Hence m2_n2 divider value + * needs to be programmed into M1_N1. + */ + dp_m_n = &crtc->config->dp_m2_n2; + } else { + DRM_ERROR("Unsupported divider value\n"); + return; + } + if (crtc->config->has_pch_encoder) intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); else - intel_cpu_transcoder_set_m_n(crtc, &crtc->config->dp_m_n, - &crtc->config->dp_m2_n2); + intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2); } static void vlv_update_pll(struct intel_crtc *crtc, @@ -6037,9 +6316,10 @@ static void chv_prepare_pll(struct intel_crtc *crtc, int pipe = crtc->pipe; int dpll_reg = DPLL(crtc->pipe); enum dpio_channel port = vlv_pipe_to_channel(pipe); - u32 loopfilter, intcoeff; + u32 loopfilter, tribuf_calcntr; u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; - int refclk; + u32 dpio_val; + int vco; bestn = pipe_config->dpll.n; bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; @@ -6047,6 +6327,9 @@ static void chv_prepare_pll(struct intel_crtc *crtc, bestm2 = pipe_config->dpll.m2 >> 22; bestp1 = pipe_config->dpll.p1; bestp2 = pipe_config->dpll.p2; + vco = pipe_config->dpll.vco; + dpio_val = 0; + loopfilter = 0; /* * Enable Refclk and SSC @@ -6072,26 +6355,56 @@ static void chv_prepare_pll(struct intel_crtc *crtc, 1 << DPIO_CHV_N_DIV_SHIFT); /* M2 fraction division */ - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); + if (bestm2_frac) + vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); /* M2 fraction division enable */ - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), - DPIO_CHV_FRAC_DIV_EN | - (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT)); + dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); + dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); + dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); + if (bestm2_frac) + dpio_val |= DPIO_CHV_FRAC_DIV_EN; + vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); + + /* Program digital lock detect threshold */ + dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); + dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | + DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); + dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); + if (!bestm2_frac) + dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; + vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); /* Loop filter */ - refclk = i9xx_get_refclk(crtc, 0); - loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT | - 2 << DPIO_CHV_GAIN_CTRL_SHIFT; - if (refclk == 100000) - intcoeff = 11; - else if (refclk == 38400) - intcoeff = 10; - else - intcoeff = 9; - loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT; + if (vco == 5400000) { + loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); + loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); + loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); + tribuf_calcntr = 0x9; + } else if (vco <= 6200000) { + loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); + loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); + loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); + tribuf_calcntr = 0x9; + } else if (vco <= 6480000) { + loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); + loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); + loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); + tribuf_calcntr = 0x8; + } else { + /* Not supported. Apply the same limits as in the max case */ + loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); + loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); + loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); + tribuf_calcntr = 0; + } vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); + dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); + dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; + dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); + vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); + /* AFC Recal */ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | @@ -6116,6 +6429,7 @@ void vlv_force_pll_on(struct drm_device *dev, enum i915_pipe pipe, struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); struct intel_crtc_state pipe_config = { + .base.crtc = &crtc->base, .pixel_multiplier = 1, .dpll = *dpll, }; @@ -6160,12 +6474,12 @@ static void i9xx_update_pll(struct intel_crtc *crtc, i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); - is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) || - intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI); + is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) || + intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI); dpll = DPLL_VGA_MODE_DIS; - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) dpll |= DPLLB_MODE_LVDS; else dpll |= DPLLB_MODE_DAC_SERIAL; @@ -6208,7 +6522,7 @@ static void i9xx_update_pll(struct intel_crtc *crtc, if (crtc_state->sdvo_tv_clock) dpll |= PLL_REF_INPUT_TVCLKINBC; - else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) && + else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv) && num_connectors < 2) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else @@ -6238,7 +6552,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc, dpll = DPLL_VGA_MODE_DIS; - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; } else { if (clock->p1 == 2) @@ -6249,10 +6563,10 @@ static void i8xx_update_pll(struct intel_crtc *crtc, dpll |= PLL_P2_DIVIDE_BY_4; } - if (!IS_I830(dev) && intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO)) + if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) dpll |= DPLL_DVO_2X_MODE; - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) && + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv) && num_connectors < 2) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else @@ -6466,11 +6780,20 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, bool is_lvds = false, is_dsi = false; struct intel_encoder *encoder; const intel_limit_t *limit; + struct drm_atomic_state *state = crtc_state->base.state; + struct drm_connector_state *connector_state; + int i; - for_each_intel_encoder(dev, encoder) { - if (encoder->new_crtc != crtc) + for (i = 0; i < state->num_connector; i++) { + if (!state->connectors[i]) + continue; + + connector_state = state->connector_states[i]; + if (connector_state->crtc != &crtc->base) continue; + encoder = to_intel_encoder(connector_state->best_encoder); + switch (encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; @@ -6489,7 +6812,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, return 0; if (!crtc_state->clock_set) { - refclk = i9xx_get_refclk(crtc, num_connectors); + refclk = i9xx_get_refclk(crtc_state, num_connectors); /* * Returns a set of divisors for the desired target clock with @@ -6497,8 +6820,8 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + * 2) / p1 / p2. */ - limit = intel_limit(crtc, refclk); - ok = dev_priv->display.find_dpll(limit, crtc, + limit = intel_limit(crtc_state, refclk); + ok = dev_priv->display.find_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, &clock); if (!ok) { @@ -6514,7 +6837,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, * we will disable the LVDS downclock feature. */ has_reduced_clock = - dev_priv->display.find_dpll(limit, crtc, + dev_priv->display.find_dpll(limit, crtc_state, dev_priv->lvds_downclock, refclk, &clock, &reduced_clock); @@ -6613,7 +6936,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, u32 val, base, offset; int pipe = crtc->pipe, plane = crtc->plane; int fourcc, pixel_format; - int aligned_height; + unsigned int aligned_height; struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; @@ -6629,9 +6952,12 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, fb = &intel_fb->base; - if (INTEL_INFO(dev)->gen >= 4) - if (val & DISPPLANE_TILED) + if (INTEL_INFO(dev)->gen >= 4) { + if (val & DISPPLANE_TILED) { plane_config->tiling = I915_TILING_X; + fb->modifier[0] = I915_FORMAT_MOD_X_TILED; + } + } pixel_format = val & DISPPLANE_PIXFORMAT_MASK; fourcc = i9xx_format_to_fourcc(pixel_format); @@ -6657,16 +6983,17 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, fb->pitches[0] = val & 0xffffffc0; aligned_height = intel_fb_align_height(dev, fb->height, - plane_config->tiling); + fb->pixel_format, + fb->modifier[0]); - plane_config->size = fb->pitches[0] * aligned_height; + plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height); DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe_name(pipe), plane, fb->width, fb->height, fb->bits_per_pixel, base, fb->pitches[0], plane_config->size); - crtc->base.primary->fb = fb; + plane_config->fb = intel_fb; } static void chv_crtc_clock_get(struct intel_crtc *crtc, @@ -7140,18 +7467,26 @@ void intel_init_pch_refclk(struct drm_device *dev) lpt_init_pch_refclk(dev); } -static int ironlake_get_refclk(struct drm_crtc *crtc) +static int ironlake_get_refclk(struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = crtc_state->base.crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_atomic_state *state = crtc_state->base.state; + struct drm_connector_state *connector_state; struct intel_encoder *encoder; - int num_connectors = 0; + int num_connectors = 0, i; bool is_lvds = false; - for_each_intel_encoder(dev, encoder) { - if (encoder->new_crtc != to_intel_crtc(crtc)) + for (i = 0; i < state->num_connector; i++) { + if (!state->connectors[i]) + continue; + + connector_state = state->connector_states[i]; + if (connector_state->crtc != crtc_state->base.crtc) continue; + encoder = to_intel_encoder(connector_state->best_encoder); + switch (encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; @@ -7338,22 +7673,21 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int refclk; const intel_limit_t *limit; bool ret, is_lvds = false; - is_lvds = intel_pipe_will_have_type(intel_crtc, INTEL_OUTPUT_LVDS); + is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS); - refclk = ironlake_get_refclk(crtc); + refclk = ironlake_get_refclk(crtc_state); /* * Returns a set of divisors for the desired target clock with the given * refclk, or FALSE. The returned values represent the clock equation: * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ - limit = intel_limit(intel_crtc, refclk); - ret = dev_priv->display.find_dpll(limit, intel_crtc, + limit = intel_limit(crtc_state, refclk); + ret = dev_priv->display.find_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, clock); if (!ret) @@ -7367,7 +7701,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc, * downclock feature. */ *has_reduced_clock = - dev_priv->display.find_dpll(limit, intel_crtc, + dev_priv->display.find_dpll(limit, crtc_state, dev_priv->lvds_downclock, refclk, clock, reduced_clock); @@ -7400,16 +7734,24 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, struct drm_crtc *crtc = &intel_crtc->base; struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *intel_encoder; + struct drm_atomic_state *state = crtc_state->base.state; + struct drm_connector_state *connector_state; + struct intel_encoder *encoder; uint32_t dpll; - int factor, num_connectors = 0; + int factor, num_connectors = 0, i; bool is_lvds = false, is_sdvo = false; - for_each_intel_encoder(dev, intel_encoder) { - if (intel_encoder->new_crtc != to_intel_crtc(crtc)) + for (i = 0; i < state->num_connector; i++) { + if (!state->connectors[i]) + continue; + + connector_state = state->connector_states[i]; + if (connector_state->crtc != crtc_state->base.crtc) continue; - switch (intel_encoder->type) { + encoder = to_intel_encoder(connector_state->best_encoder); + + switch (encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; break; @@ -7538,7 +7880,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, } } - if (is_lvds && has_reduced_clock && i915.powersave) + if (is_lvds && has_reduced_clock) crtc->lowfreq_avail = true; else crtc->lowfreq_avail = false; @@ -7644,10 +7986,10 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - u32 val, base, offset, stride_mult; + u32 val, base, offset, stride_mult, tiling; int pipe = crtc->pipe; int fourcc, pixel_format; - int aligned_height; + unsigned int aligned_height; struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; @@ -7663,9 +8005,6 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, if (!(val & PLANE_CTL_ENABLE)) goto error; - if (val & PLANE_CTL_TILED_MASK) - plane_config->tiling = I915_TILING_X; - pixel_format = val & PLANE_CTL_FORMAT_MASK; fourcc = skl_format_to_fourcc(pixel_format, val & PLANE_CTL_ORDER_RGBX, @@ -7673,6 +8012,26 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, fb->pixel_format = fourcc; fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; + tiling = val & PLANE_CTL_TILED_MASK; + switch (tiling) { + case PLANE_CTL_TILED_LINEAR: + fb->modifier[0] = DRM_FORMAT_MOD_NONE; + break; + case PLANE_CTL_TILED_X: + plane_config->tiling = I915_TILING_X; + fb->modifier[0] = I915_FORMAT_MOD_X_TILED; + break; + case PLANE_CTL_TILED_Y: + fb->modifier[0] = I915_FORMAT_MOD_Y_TILED; + break; + case PLANE_CTL_TILED_YF: + fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED; + break; + default: + MISSING_CASE(tiling); + goto error; + } + base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000; plane_config->base = base; @@ -7683,30 +8042,22 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, fb->width = ((val >> 0) & 0x1fff) + 1; val = I915_READ(PLANE_STRIDE(pipe, 0)); - switch (plane_config->tiling) { - case I915_TILING_NONE: - stride_mult = 64; - break; - case I915_TILING_X: - stride_mult = 512; - break; - default: - MISSING_CASE(plane_config->tiling); - goto error; - } + stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0], + fb->pixel_format); fb->pitches[0] = (val & 0x3ff) * stride_mult; aligned_height = intel_fb_align_height(dev, fb->height, - plane_config->tiling); + fb->pixel_format, + fb->modifier[0]); - plane_config->size = fb->pitches[0] * aligned_height; + plane_config->size = ALIGN(fb->pitches[0] * aligned_height, PAGE_SIZE); DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe_name(pipe), fb->width, fb->height, fb->bits_per_pixel, base, fb->pitches[0], plane_config->size); - crtc->base.primary->fb = fb; + plane_config->fb = intel_fb; return; error: @@ -7746,7 +8097,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, u32 val, base, offset; int pipe = crtc->pipe; int fourcc, pixel_format; - int aligned_height; + unsigned int aligned_height; struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; @@ -7762,9 +8113,12 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, fb = &intel_fb->base; - if (INTEL_INFO(dev)->gen >= 4) - if (val & DISPPLANE_TILED) + if (INTEL_INFO(dev)->gen >= 4) { + if (val & DISPPLANE_TILED) { plane_config->tiling = I915_TILING_X; + fb->modifier[0] = I915_FORMAT_MOD_X_TILED; + } + } pixel_format = val & DISPPLANE_PIXFORMAT_MASK; fourcc = i9xx_format_to_fourcc(pixel_format); @@ -7790,16 +8144,17 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, fb->pitches[0] = val & 0xffffffc0; aligned_height = intel_fb_align_height(dev, fb->height, - plane_config->tiling); + fb->pixel_format, + fb->modifier[0]); - plane_config->size = fb->pitches[0] * aligned_height; + plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height); DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe_name(pipe), fb->width, fb->height, fb->bits_per_pixel, base, fb->pitches[0], plane_config->size); - crtc->base.primary->fb = fb; + plane_config->fb = intel_fb; } static bool ironlake_get_pipe_config(struct intel_crtc *crtc, @@ -8285,8 +8640,8 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base) uint32_t cntl = 0, size = 0; if (base) { - unsigned int width = intel_crtc->cursor_width; - unsigned int height = intel_crtc->cursor_height; + unsigned int width = intel_crtc->base.cursor->state->crtc_w; + unsigned int height = intel_crtc->base.cursor->state->crtc_h; unsigned int stride = roundup_pow_of_two(width) * 4; switch (stride) { @@ -8350,7 +8705,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) cntl = 0; if (base) { cntl = MCURSOR_GAMMA_ENABLE; - switch (intel_crtc->cursor_width) { + switch (intel_crtc->base.cursor->state->crtc_w) { case 64: cntl |= CURSOR_MODE_64_ARGB_AX; break; @@ -8361,7 +8716,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) cntl |= CURSOR_MODE_256_ARGB_AX; break; default: - MISSING_CASE(intel_crtc->cursor_width); + MISSING_CASE(intel_crtc->base.cursor->state->crtc_w); return; } cntl |= pipe << 28; /* Connect to correct pipe */ @@ -8408,7 +8763,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, base = 0; if (x < 0) { - if (x + intel_crtc->cursor_width <= 0) + if (x + intel_crtc->base.cursor->state->crtc_w <= 0) base = 0; pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; @@ -8417,7 +8772,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, pos |= x << CURSOR_X_SHIFT; if (y < 0) { - if (y + intel_crtc->cursor_height <= 0) + if (y + intel_crtc->base.cursor->state->crtc_h <= 0) base = 0; pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; @@ -8433,8 +8788,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, /* ILK+ do this automagically */ if (HAS_GMCH_DISPLAY(dev) && crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) { - base += (intel_crtc->cursor_height * - intel_crtc->cursor_width - 1) * 4; + base += (intel_crtc->base.cursor->state->crtc_h * + intel_crtc->base.cursor->state->crtc_w - 1) * 4; } if (IS_845G(dev) || IS_I865G(dev)) @@ -8626,6 +8981,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, struct drm_device *dev = encoder->dev; struct drm_framebuffer *fb; struct drm_mode_config *config = &dev->mode_config; + struct drm_atomic_state *state = NULL; + struct drm_connector_state *connector_state; int ret, i = -1; DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", @@ -8673,7 +9030,7 @@ retry: i++; if (!(encoder->possible_crtcs & (1 << i))) continue; - if (possible_crtc->enabled) + if (possible_crtc->state->enable) continue; /* This can occur when applying the pipe A quirk on resume. */ if (to_intel_crtc(possible_crtc)->new_enabled) @@ -8707,6 +9064,21 @@ retry: old->load_detect_temp = true; old->release_fb = NULL; + state = drm_atomic_state_alloc(dev); + if (!state) + return false; + + state->acquire_ctx = ctx; + + connector_state = drm_atomic_get_connector_state(state, connector); + if (IS_ERR(connector_state)) { + ret = PTR_ERR(connector_state); + goto fail; + } + + connector_state->crtc = crtc; + connector_state->best_encoder = &intel_encoder->base; + if (!mode) mode = &load_detect_mode; @@ -8729,7 +9101,7 @@ retry: goto fail; } - if (intel_set_mode(crtc, mode, 0, 0, fb)) { + if (intel_set_mode(crtc, mode, 0, 0, fb, state)) { DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); if (old->release_fb) old->release_fb->funcs->destroy(old->release_fb); @@ -8742,12 +9114,17 @@ retry: return true; fail: - intel_crtc->new_enabled = crtc->enabled; + intel_crtc->new_enabled = crtc->state->enable; if (intel_crtc->new_enabled) intel_crtc->new_config = intel_crtc->config; else intel_crtc->new_config = NULL; fail_unlock: + if (state) { + drm_atomic_state_free(state); + state = NULL; + } + if (ret == -EDEADLK) { drm_modeset_backoff(ctx); goto retry; @@ -8757,24 +9134,44 @@ fail_unlock: } void intel_release_load_detect_pipe(struct drm_connector *connector, - struct intel_load_detect_pipe *old) + struct intel_load_detect_pipe *old, + struct drm_modeset_acquire_ctx *ctx) { + struct drm_device *dev = connector->dev; struct intel_encoder *intel_encoder = intel_attached_encoder(connector); struct drm_encoder *encoder = &intel_encoder->base; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_atomic_state *state; + struct drm_connector_state *connector_state; DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, connector->name, encoder->base.id, encoder->name); if (old->load_detect_temp) { + state = drm_atomic_state_alloc(dev); + if (!state) + goto fail; + + state->acquire_ctx = ctx; + + connector_state = drm_atomic_get_connector_state(state, connector); + if (IS_ERR(connector_state)) + goto fail; + to_intel_connector(connector)->new_encoder = NULL; intel_encoder->new_crtc = NULL; intel_crtc->new_enabled = false; intel_crtc->new_config = NULL; - intel_set_mode(crtc, NULL, 0, 0, NULL); + + connector_state->best_encoder = NULL; + connector_state->crtc = NULL; + + intel_set_mode(crtc, NULL, 0, 0, NULL, state); + + drm_atomic_state_free(state); if (old->release_fb) { drm_framebuffer_unregister_private(old->release_fb); @@ -8787,6 +9184,11 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, /* Switch crtc and encoder back off if necessary */ if (old->dpms_mode != DRM_MODE_DPMS_ON) connector->funcs->dpms(connector, old->dpms_mode); + + return; +fail: + DRM_DEBUG_KMS("Couldn't release load detect pipe.\n"); + drm_atomic_state_free(state); } static int i9xx_pll_refclk(struct drm_device *dev, @@ -9025,6 +9427,8 @@ void intel_mark_busy(struct drm_device *dev) intel_runtime_pm_get(dev_priv); i915_update_gfx_val(dev_priv); + if (INTEL_INFO(dev)->gen >= 6) + gen6_rps_busy(dev_priv); dev_priv->mm.busy = true; } @@ -9038,9 +9442,6 @@ void intel_mark_idle(struct drm_device *dev) dev_priv->mm.busy = false; - if (!i915.powersave) - goto out; - for_each_crtc(dev, crtc) { if (!crtc->primary->fb) continue; @@ -9051,7 +9452,6 @@ void intel_mark_idle(struct drm_device *dev) if (INTEL_INFO(dev)->gen >= 6) gen6_rps_idle(dev->dev_private); -out: intel_runtime_pm_put(dev_priv); } @@ -9093,9 +9493,8 @@ static void intel_unpin_work_fn(struct work_struct *__work) enum i915_pipe pipe = to_intel_crtc(work->crtc)->pipe; mutex_lock(&dev->struct_mutex); - intel_unpin_fb_obj(work->old_fb_obj); + intel_unpin_fb_obj(work->old_fb, work->crtc->primary->state); drm_gem_object_unreference(&work->pending_flip_obj->base); - drm_gem_object_unreference(&work->old_fb_obj->base); intel_fbc_update(dev); @@ -9104,6 +9503,7 @@ static void intel_unpin_work_fn(struct work_struct *__work) mutex_unlock(&dev->struct_mutex); intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); + drm_framebuffer_unreference(work->old_fb); BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); @@ -9618,69 +10018,6 @@ static int intel_queue_mmio_flip(struct drm_device *dev, return 0; } -static int intel_gen9_queue_flip(struct drm_device *dev, - struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj, - struct intel_engine_cs *ring, - uint32_t flags) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - uint32_t plane = 0, stride; - int ret; - - switch(intel_crtc->pipe) { - case PIPE_A: - plane = MI_DISPLAY_FLIP_SKL_PLANE_1_A; - break; - case PIPE_B: - plane = MI_DISPLAY_FLIP_SKL_PLANE_1_B; - break; - case PIPE_C: - plane = MI_DISPLAY_FLIP_SKL_PLANE_1_C; - break; - default: - WARN_ONCE(1, "unknown plane in flip command\n"); - return -ENODEV; - } - - switch (obj->tiling_mode) { - case I915_TILING_NONE: - stride = fb->pitches[0] >> 6; - break; - case I915_TILING_X: - stride = fb->pitches[0] >> 9; - break; - default: - WARN_ONCE(1, "unknown tiling in flip command\n"); - return -ENODEV; - } - - ret = intel_ring_begin(ring, 10); - if (ret) - return ret; - - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(ring, DERRMR); - intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | - DERRMR_PIPEB_PRI_FLIP_DONE | - DERRMR_PIPEC_PRI_FLIP_DONE)); - intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) | - MI_SRM_LRM_GLOBAL_GTT); - intel_ring_emit(ring, DERRMR); - intel_ring_emit(ring, ring->scratch.gtt_offset + 256); - intel_ring_emit(ring, 0); - - intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane); - intel_ring_emit(ring, stride << 6 | obj->tiling_mode); - intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); - - intel_mark_page_flip_active(intel_crtc); - __intel_ring_advance(ring); - - return 0; -} - static int intel_default_queue_flip(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, @@ -9710,10 +10047,10 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev, !i915_gem_request_completed(work->flip_queued_req, true)) return false; - work->flip_ready_vblank = drm_vblank_count(dev, intel_crtc->pipe); + work->flip_ready_vblank = drm_crtc_vblank_count(crtc); } - if (drm_vblank_count(dev, intel_crtc->pipe) - work->flip_ready_vblank < 3) + if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3) return false; /* Potential stall - if we see that the flip has happened, @@ -9742,7 +10079,8 @@ void intel_check_page_flip(struct drm_device *dev, int pipe) lockmgr(&dev->event_lock, LK_EXCLUSIVE); if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) { WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", - intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe)); + intel_crtc->unpin_work->flip_queued_vblank, + drm_vblank_count(dev, pipe)); page_flip_completed(intel_crtc); } lockmgr(&dev->event_lock, LK_RELEASE); @@ -9794,7 +10132,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->event = event; work->crtc = crtc; - work->old_fb_obj = intel_fb_obj(old_fb); + work->old_fb = old_fb; INIT_WORK(&work->work, intel_unpin_work_fn); ret = drm_crtc_vblank_get(crtc); @@ -9825,19 +10163,26 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, if (atomic_read(&intel_crtc->unpin_work_count) >= 2) flush_workqueue(dev_priv->wq); - ret = i915_mutex_lock_interruptible(dev); - if (ret) - goto cleanup; - /* Reference the objects for the scheduled work. */ - drm_gem_object_reference(&work->old_fb_obj->base); + drm_framebuffer_reference(work->old_fb); drm_gem_object_reference(&obj->base); crtc->primary->fb = fb; update_state_fb(crtc->primary); + /* Keep state structure in sync */ + if (crtc->primary->state->fb) + drm_framebuffer_unreference(crtc->primary->state->fb); + crtc->primary->state->fb = fb; + if (crtc->primary->state->fb) + drm_framebuffer_reference(crtc->primary->state->fb); + work->pending_flip_obj = obj; + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto cleanup; + atomic_inc(&intel_crtc->unpin_work_count); intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); @@ -9846,7 +10191,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, if (IS_VALLEYVIEW(dev)) { ring = &dev_priv->ring[BCS]; - if (obj->tiling_mode != work->old_fb_obj->tiling_mode) + if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode) /* vlv: DISPLAY_FLIP fails to change tiling */ ring = NULL; } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { @@ -9859,12 +10204,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ring = &dev_priv->ring[RCS]; } - ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, ring); + ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, + crtc->primary->state, ring); if (ret) goto cleanup_pending; - work->gtt_offset = - i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset; + work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj) + + intel_crtc->dspaddr_offset; if (use_mmio_flip(ring, obj)) { ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, @@ -9884,10 +10230,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, intel_ring_get_request(ring)); } - work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe); + work->flip_queued_vblank = drm_crtc_vblank_count(crtc); work->enable_stall_check = true; - i915_gem_track_fb(work->old_fb_obj, obj, + i915_gem_track_fb(intel_fb_obj(work->old_fb), obj, INTEL_FRONTBUFFER_PRIMARY(pipe)); intel_fbc_disable(dev); @@ -9899,16 +10245,17 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return 0; cleanup_unpin: - intel_unpin_fb_obj(obj); + intel_unpin_fb_obj(fb, crtc->primary->state); cleanup_pending: atomic_dec(&intel_crtc->unpin_work_count); + mutex_unlock(&dev->struct_mutex); +cleanup: crtc->primary->fb = old_fb; update_state_fb(crtc->primary); - drm_gem_object_unreference(&work->old_fb_obj->base); - drm_gem_object_unreference(&obj->base); - mutex_unlock(&dev->struct_mutex); -cleanup: + drm_gem_object_unreference_unlocked(&obj->base); + drm_framebuffer_unreference(work->old_fb); + lockmgr(&dev->event_lock, LK_EXCLUSIVE); intel_crtc->unpin_work = NULL; lockmgr(&dev->event_lock, LK_RELEASE); @@ -9948,8 +10295,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev) struct intel_encoder *encoder; struct intel_connector *connector; - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { connector->new_encoder = to_intel_encoder(connector->base.encoder); } @@ -9960,7 +10306,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev) } for_each_intel_crtc(dev, crtc) { - crtc->new_enabled = crtc->base.enabled; + crtc->new_enabled = crtc->base.state->enable; if (crtc->new_enabled) crtc->new_config = crtc->config; @@ -9969,6 +10315,27 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev) } } +/* Transitional helper to copy current connector/encoder state to + * connector->state. This is needed so that code that is partially + * converted to atomic does the right thing. + */ +static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) +{ + struct intel_connector *connector; + + for_each_intel_connector(dev, connector) { + if (connector->base.encoder) { + connector->base.state->best_encoder = + connector->base.encoder; + connector->base.state->crtc = + connector->base.encoder->crtc; + } else { + connector->base.state->best_encoder = NULL; + connector->base.state->crtc = NULL; + } + } +} + /** * intel_modeset_commit_output_state * @@ -9980,8 +10347,7 @@ static void intel_modeset_commit_output_state(struct drm_device *dev) struct intel_encoder *encoder; struct intel_connector *connector; - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { connector->base.encoder = &connector->new_encoder->base; } @@ -9990,8 +10356,11 @@ static void intel_modeset_commit_output_state(struct drm_device *dev) } for_each_intel_crtc(dev, crtc) { + crtc->base.state->enable = crtc->new_enabled; crtc->base.enabled = crtc->new_enabled; } + + intel_modeset_update_connector_atomic_state(dev); } static void @@ -10026,8 +10395,9 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; + struct drm_atomic_state *state; struct intel_connector *connector; - int bpp; + int bpp, i; switch (fb->pixel_format) { case DRM_FORMAT_C8: @@ -10067,11 +10437,15 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc, pipe_config->pipe_bpp = bpp; + state = pipe_config->base.state; + /* Clamp display bpp to EDID value */ - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { - if (!connector->new_encoder || - connector->new_encoder->new_crtc != crtc) + for (i = 0; i < state->num_connector; i++) { + if (!state->connectors[i]) + continue; + + connector = to_intel_connector(state->connectors[i]); + if (state->connector_states[i]->crtc != &crtc->base) continue; connected_sink_compute_bpp(connector, pipe_config); @@ -10196,8 +10570,7 @@ static bool check_digital_port_conflicts(struct drm_device *dev) * list to detect the problem on ddi platforms * where there's just one encoder per digital port. */ - list_for_each_entry(connector, - &dev->mode_config.connector_list, base.head) { + for_each_intel_connector(dev, connector) { struct intel_encoder *encoder = connector->new_encoder; if (!encoder) @@ -10228,15 +10601,30 @@ static bool check_digital_port_conflicts(struct drm_device *dev) return true; } +static void +clear_intel_crtc_state(struct intel_crtc_state *crtc_state) +{ + struct drm_crtc_state tmp_state; + + /* Clear only the intel specific part of the crtc state */ + tmp_state = crtc_state->base; + memset(crtc_state, 0, sizeof *crtc_state); + crtc_state->base = tmp_state; +} + static struct intel_crtc_state * intel_modeset_pipe_config(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_display_mode *mode) + struct drm_display_mode *mode, + struct drm_atomic_state *state) { struct drm_device *dev = crtc->dev; struct intel_encoder *encoder; + struct intel_connector *connector; + struct drm_connector_state *connector_state; struct intel_crtc_state *pipe_config; int plane_bpp, ret = -EINVAL; + int i; bool retry = true; if (!check_encoder_cloning(to_intel_crtc(crtc))) { @@ -10249,10 +10637,13 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, return ERR_PTR(-EINVAL); } - pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); - if (!pipe_config) - return ERR_PTR(-ENOMEM); + pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc)); + if (IS_ERR(pipe_config)) + return pipe_config; + clear_intel_crtc_state(pipe_config); + + pipe_config->base.crtc = crtc; drm_mode_copy(&pipe_config->base.adjusted_mode, mode); drm_mode_copy(&pipe_config->base.mode, mode); @@ -10307,11 +10698,17 @@ encoder_retry: * adjust it according to limitations or connector properties, and also * a chance to reject the mode entirely. */ - for_each_intel_encoder(dev, encoder) { + for (i = 0; i < state->num_connector; i++) { + connector = to_intel_connector(state->connectors[i]); + if (!connector) + continue; - if (&encoder->new_crtc->base != crtc) + connector_state = state->connector_states[i]; + if (connector_state->crtc != crtc) continue; + encoder = to_intel_encoder(connector_state->best_encoder); + if (!(encoder->compute_config(encoder, pipe_config))) { DRM_DEBUG_KMS("Encoder config failure\n"); goto fail; @@ -10347,7 +10744,6 @@ encoder_retry: return pipe_config; fail: - kfree(pipe_config); return ERR_PTR(ret); } @@ -10369,8 +10765,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, * to be part of the prepare_pipes mask. We don't (yet) support global * modeset across multiple crtcs, so modeset_pipes will only have one * bit set at most. */ - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { if (connector->base.encoder == &connector->new_encoder->base) continue; @@ -10401,7 +10796,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, /* Check for pipes that will be enabled/disabled ... */ for_each_intel_crtc(dev, intel_crtc) { - if (intel_crtc->base.enabled == intel_crtc->new_enabled) + if (intel_crtc->base.state->enable == intel_crtc->new_enabled) continue; if (!intel_crtc->new_enabled) @@ -10476,10 +10871,10 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) /* Double check state. */ for_each_intel_crtc(dev, intel_crtc) { - WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base)); + WARN_ON(intel_crtc->base.state->enable != intel_crtc_in_use(&intel_crtc->base)); WARN_ON(intel_crtc->new_config && intel_crtc->new_config != intel_crtc->config); - WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config); + WARN_ON(intel_crtc->base.state->enable != !!intel_crtc->new_config); } list_for_each_entry(connector, &dev->mode_config.connector_list, head) { @@ -10739,7 +11134,7 @@ static void check_wm_state(struct drm_device *dev) continue; /* planes */ - for_each_plane(pipe, plane) { + for_each_plane(dev_priv, pipe, plane) { hw_entry = &hw_ddb.plane[pipe][plane]; sw_entry = &sw_ddb->plane[pipe][plane]; @@ -10773,8 +11168,7 @@ check_connector_state(struct drm_device *dev) { struct intel_connector *connector; - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { /* This also checks the encoder/connector hw state with the * ->get_hw_state callbacks. */ intel_connector_check_state(connector); @@ -10804,8 +11198,7 @@ check_encoder_state(struct drm_device *dev) I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc, "encoder's active_connectors set, but no crtc\n"); - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { if (connector->base.encoder != &encoder->base) continue; enabled = true; @@ -10866,7 +11259,7 @@ check_crtc_state(struct drm_device *dev) DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.base.id); - I915_STATE_WARN(crtc->active && !crtc->base.enabled, + I915_STATE_WARN(crtc->active && !crtc->base.state->enable, "active crtc, but not enabled in sw tracking\n"); for_each_intel_encoder(dev, encoder) { @@ -10880,9 +11273,10 @@ check_crtc_state(struct drm_device *dev) I915_STATE_WARN(active != crtc->active, "crtc's computed active state doesn't match tracked active state " "(expected %i, found %i)\n", active, crtc->active); - I915_STATE_WARN(enabled != crtc->base.enabled, + I915_STATE_WARN(enabled != crtc->base.state->enable, "crtc's computed enabled state doesn't match tracked enabled state " - "(expected %i, found %i)\n", enabled, crtc->base.enabled); + "(expected %i, found %i)\n", enabled, + crtc->base.state->enable); active = dev_priv->display.get_pipe_config(crtc, &pipe_config); @@ -10946,7 +11340,7 @@ check_shared_dpll_state(struct drm_device *dev) pll->on, active); for_each_intel_crtc(dev, crtc) { - if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll) + if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll) enabled_crtcs++; if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) active_crtcs++; @@ -11028,17 +11422,30 @@ static struct intel_crtc_state * intel_modeset_compute_config(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_framebuffer *fb, + struct drm_atomic_state *state, unsigned *modeset_pipes, unsigned *prepare_pipes, unsigned *disable_pipes) { + struct drm_device *dev = crtc->dev; struct intel_crtc_state *pipe_config = NULL; + struct intel_crtc *intel_crtc; + int ret = 0; + + ret = drm_atomic_add_affected_connectors(state, crtc); + if (ret) + return ERR_PTR(ret); intel_modeset_affected_pipes(crtc, modeset_pipes, prepare_pipes, disable_pipes); - if ((*modeset_pipes) == 0) - goto out; + for_each_intel_crtc_masked(dev, *disable_pipes, intel_crtc) { + pipe_config = intel_atomic_get_crtc_state(state, intel_crtc); + if (IS_ERR(pipe_config)) + return pipe_config; + + pipe_config->base.enable = false; + } /* * Note this needs changes when we start tracking multiple modes @@ -11046,15 +11453,21 @@ intel_modeset_compute_config(struct drm_crtc *crtc, * (i.e. one pipe_config for each crtc) rather than just the one * for this crtc. */ - pipe_config = intel_modeset_pipe_config(crtc, fb, mode); - if (IS_ERR(pipe_config)) { - goto out; + for_each_intel_crtc_masked(dev, *modeset_pipes, intel_crtc) { + /* FIXME: For now we still expect modeset_pipes has at most + * one bit set. */ + if (WARN_ON(&intel_crtc->base != crtc)) + continue; + + pipe_config = intel_modeset_pipe_config(crtc, fb, mode, state); + if (IS_ERR(pipe_config)) + return pipe_config; + + intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, + "[modeset]"); } - intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, - "[modeset]"); -out: - return pipe_config; + return intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));; } static int __intel_set_mode_setup_plls(struct drm_device *dev, @@ -11098,6 +11511,7 @@ static int __intel_set_mode(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_display_mode *saved_mode; + struct intel_crtc_state *crtc_state_copy = NULL; struct intel_crtc *intel_crtc; int ret = 0; @@ -11105,6 +11519,12 @@ static int __intel_set_mode(struct drm_crtc *crtc, if (!saved_mode) return -ENOMEM; + crtc_state_copy = kmalloc(sizeof(*crtc_state_copy), M_DRM, M_WAITOK); + if (!crtc_state_copy) { + ret = -ENOMEM; + goto done; + } + *saved_mode = crtc->mode; if (modeset_pipes) @@ -11132,7 +11552,7 @@ static int __intel_set_mode(struct drm_crtc *crtc, intel_crtc_disable(&intel_crtc->base); for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { - if (intel_crtc->base.enabled) + if (intel_crtc->base.state->enable) dev_priv->display.crtc_disable(&intel_crtc->base); } @@ -11162,7 +11582,7 @@ static int __intel_set_mode(struct drm_crtc *crtc, * update the the output configuration. */ intel_modeset_update_state(dev, prepare_pipes); - modeset_update_crtc_power_domains(dev); + modeset_update_crtc_power_domains(pipe_config->base.state); /* Set up the DPLL and any encoders state that needs to adjust or depend * on the DPLL. @@ -11188,9 +11608,25 @@ static int __intel_set_mode(struct drm_crtc *crtc, /* FIXME: add subpixel order */ done: - if (ret && crtc->enabled) + if (ret && crtc->state->enable) crtc->mode = *saved_mode; + if (ret == 0 && pipe_config) { + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + /* The pipe_config will be freed with the atomic state, so + * make a copy. */ + memcpy(crtc_state_copy, intel_crtc->config, + sizeof *crtc_state_copy); + intel_crtc->config = crtc_state_copy; + intel_crtc->base.state = &crtc_state_copy->base; + + if (modeset_pipes) + intel_crtc->new_config = intel_crtc->config; + } else { + kfree(crtc_state_copy); + } + kfree(saved_mode); return ret; } @@ -11216,27 +11652,81 @@ static int intel_set_mode_pipes(struct drm_crtc *crtc, static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, - int x, int y, struct drm_framebuffer *fb) + int x, int y, struct drm_framebuffer *fb, + struct drm_atomic_state *state) { struct intel_crtc_state *pipe_config; unsigned modeset_pipes, prepare_pipes, disable_pipes; + int ret = 0; - pipe_config = intel_modeset_compute_config(crtc, mode, fb, + pipe_config = intel_modeset_compute_config(crtc, mode, fb, state, &modeset_pipes, &prepare_pipes, &disable_pipes); - if (IS_ERR(pipe_config)) - return PTR_ERR(pipe_config); + if (IS_ERR(pipe_config)) { + ret = PTR_ERR(pipe_config); + goto out; + } + + ret = intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config, + modeset_pipes, prepare_pipes, + disable_pipes); + if (ret) + goto out; - return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config, - modeset_pipes, prepare_pipes, - disable_pipes); +out: + return ret; } void intel_crtc_restore_mode(struct drm_crtc *crtc) { - intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb); + struct drm_device *dev = crtc->dev; + struct drm_atomic_state *state; + struct intel_encoder *encoder; + struct intel_connector *connector; + struct drm_connector_state *connector_state; + + state = drm_atomic_state_alloc(dev); + if (!state) { + DRM_DEBUG_KMS("[CRTC:%d] mode restore failed, out of memory", + crtc->base.id); + return; + } + + state->acquire_ctx = dev->mode_config.acquire_ctx; + + /* The force restore path in the HW readout code relies on the staged + * config still keeping the user requested config while the actual + * state has been overwritten by the configuration read from HW. We + * need to copy the staged config to the atomic state, otherwise the + * mode set will just reapply the state the HW is already in. */ + for_each_intel_encoder(dev, encoder) { + if (&encoder->new_crtc->base != crtc) + continue; + + for_each_intel_connector(dev, connector) { + if (connector->new_encoder != encoder) + continue; + + connector_state = drm_atomic_get_connector_state(state, &connector->base); + if (IS_ERR(connector_state)) { + DRM_DEBUG_KMS("Failed to add [CONNECTOR:%d:%s] to state: %ld\n", + connector->base.base.id, + connector->base.name, + PTR_ERR(connector_state)); + continue; + } + + connector_state->crtc = crtc; + connector_state->best_encoder = &encoder->base; + } + } + + intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb, + state); + + drm_atomic_state_free(state); } #undef for_each_intel_crtc_masked @@ -11284,7 +11774,7 @@ static int intel_set_config_save_state(struct drm_device *dev, */ count = 0; for_each_crtc(dev, crtc) { - config->save_crtc_enabled[count++] = crtc->enabled; + config->save_crtc_enabled[count++] = crtc->state->enable; } count = 0; @@ -11325,7 +11815,7 @@ static void intel_set_config_restore_state(struct drm_device *dev, } count = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { + for_each_intel_connector(dev, connector) { connector->new_encoder = to_intel_encoder(config->save_connector_encoders[count++]); } @@ -11405,9 +11895,11 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, static int intel_modeset_stage_output_state(struct drm_device *dev, struct drm_mode_set *set, - struct intel_set_config *config) + struct intel_set_config *config, + struct drm_atomic_state *state) { struct intel_connector *connector; + struct drm_connector_state *connector_state; struct intel_encoder *encoder; struct intel_crtc *crtc; int ro; @@ -11417,8 +11909,7 @@ intel_modeset_stage_output_state(struct drm_device *dev, WARN_ON(!set->fb && (set->num_connectors != 0)); WARN_ON(set->fb && (set->num_connectors == 0)); - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { /* Otherwise traverse passed in connector list and get encoders * for them. */ for (ro = 0; ro < set->num_connectors; ro++) { @@ -11443,15 +11934,16 @@ intel_modeset_stage_output_state(struct drm_device *dev, if (&connector->new_encoder->base != connector->base.encoder) { - DRM_DEBUG_KMS("encoder changed, full mode switch\n"); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] encoder changed, full mode switch\n", + connector->base.base.id, + connector->base.name); config->mode_changed = true; } } /* connector->new_encoder is now updated for all connectors. */ /* Update crtc of enabled connectors. */ - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { struct drm_crtc *new_crtc; if (!connector->new_encoder) @@ -11471,6 +11963,14 @@ intel_modeset_stage_output_state(struct drm_device *dev, } connector->new_encoder->new_crtc = to_intel_crtc(new_crtc); + connector_state = + drm_atomic_get_connector_state(state, &connector->base); + if (IS_ERR(connector_state)) + return PTR_ERR(connector_state); + + connector_state->crtc = new_crtc; + connector_state->best_encoder = &connector->new_encoder->base; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", connector->base.base.id, connector->base.name, @@ -11480,9 +11980,7 @@ intel_modeset_stage_output_state(struct drm_device *dev, /* Check for any encoders that needs to be disabled. */ for_each_intel_encoder(dev, encoder) { int num_connectors = 0; - list_for_each_entry(connector, - &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { if (connector->new_encoder == encoder) { WARN_ON(!connector->new_encoder->new_crtc); num_connectors++; @@ -11497,16 +11995,25 @@ intel_modeset_stage_output_state(struct drm_device *dev, /* Only now check for crtc changes so we don't miss encoders * that will be disabled. */ if (&encoder->new_crtc->base != encoder->base.crtc) { - DRM_DEBUG_KMS("crtc changed, full mode switch\n"); + DRM_DEBUG_KMS("[ENCODER:%d:%s] crtc changed, full mode switch\n", + encoder->base.base.id, + encoder->base.name); config->mode_changed = true; } } /* Now we've also updated encoder->new_crtc for all encoders. */ - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { - if (connector->new_encoder) + for_each_intel_connector(dev, connector) { + connector_state = + drm_atomic_get_connector_state(state, &connector->base); + if (IS_ERR(connector_state)) + return PTR_ERR(connector_state); + + if (connector->new_encoder) { if (connector->new_encoder != connector->encoder) connector->encoder = connector->new_encoder; + } else { + connector_state->crtc = NULL; + } } for_each_intel_crtc(dev, crtc) { crtc->new_enabled = false; @@ -11518,8 +12025,9 @@ intel_modeset_stage_output_state(struct drm_device *dev, } } - if (crtc->new_enabled != crtc->base.enabled) { - DRM_DEBUG_KMS("crtc %sabled, full mode switch\n", + if (crtc->new_enabled != crtc->base.state->enable) { + DRM_DEBUG_KMS("[CRTC:%d] %sabled, full mode switch\n", + crtc->base.base.id, crtc->new_enabled ? "en" : "dis"); config->mode_changed = true; } @@ -11542,7 +12050,7 @@ static void disable_crtc_nofb(struct intel_crtc *crtc) DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n", pipe_name(crtc->pipe)); - list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { + for_each_intel_connector(dev, connector) { if (connector->new_encoder && connector->new_encoder->new_crtc == crtc) connector->new_encoder = NULL; @@ -11561,6 +12069,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set) { struct drm_device *dev; struct drm_mode_set save_set; + struct drm_atomic_state *state = NULL; struct intel_set_config *config; struct intel_crtc_state *pipe_config; unsigned modeset_pipes, prepare_pipes, disable_pipes; @@ -11605,12 +12114,20 @@ static int intel_crtc_set_config(struct drm_mode_set *set) * such cases. */ intel_set_config_compute_mode_changes(set, config); - ret = intel_modeset_stage_output_state(dev, set, config); + state = drm_atomic_state_alloc(dev); + if (!state) { + ret = -ENOMEM; + goto out_config; + } + + state->acquire_ctx = dev->mode_config.acquire_ctx; + + ret = intel_modeset_stage_output_state(dev, set, config, state); if (ret) goto fail; pipe_config = intel_modeset_compute_config(set->crtc, set->mode, - set->fb, + set->fb, state, &modeset_pipes, &prepare_pipes, &disable_pipes); @@ -11630,10 +12147,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set) */ } - /* set_mode will free it in the mode_changed case */ - if (!config->mode_changed) - kfree(pipe_config); - intel_update_pipe_size(to_intel_crtc(set->crtc)); if (config->mode_changed) { @@ -11679,6 +12192,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set) fail: intel_set_config_restore_state(dev, config); + drm_atomic_state_clear(state); + /* * HACK: if the pipe was on, but we didn't have a framebuffer, * force the pipe off to avoid oopsing in the modeset code @@ -11691,11 +12206,15 @@ fail: /* Try to restore the config */ if (config->mode_changed && intel_set_mode(save_set.crtc, save_set.mode, - save_set.x, save_set.y, save_set.fb)) + save_set.x, save_set.y, save_set.fb, + state)) DRM_ERROR("failed to restore config after modeset failure\n"); } out_config: + if (state) + drm_atomic_state_free(state); + intel_set_config_free(config); return ret; } @@ -11809,6 +12328,28 @@ static void intel_shared_dpll_init(struct drm_device *dev) BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); } +/** + * intel_wm_need_update - Check whether watermarks need updating + * @plane: drm plane + * @state: new plane state + * + * Check current plane state versus the new one to determine whether + * watermarks need to be recalculated. + * + * Returns true or false. + */ +bool intel_wm_need_update(struct drm_plane *plane, + struct drm_plane_state *state) +{ + /* Update watermarks on tiling changes. */ + if (!plane->state->fb || !state->fb || + plane->state->fb->modifier[0] != state->fb->modifier[0] || + plane->state->rotation != state->rotation) + return true; + + return false; +} + /** * intel_prepare_plane_fb - Prepare fb for usage on plane * @plane: drm plane to prepare for @@ -11823,7 +12364,8 @@ static void intel_shared_dpll_init(struct drm_device *dev) */ int intel_prepare_plane_fb(struct drm_plane *plane, - struct drm_framebuffer *fb) + struct drm_framebuffer *fb, + const struct drm_plane_state *new_state) { struct drm_device *dev = plane->dev; struct intel_plane *intel_plane = to_intel_plane(plane); @@ -11857,7 +12399,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, if (ret) DRM_DEBUG_KMS("failed to attach phys object\n"); } else { - ret = intel_pin_and_fence_fb_obj(plane, fb, NULL); + ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL); } if (ret == 0) @@ -11877,7 +12419,8 @@ intel_prepare_plane_fb(struct drm_plane *plane, */ void intel_cleanup_plane_fb(struct drm_plane *plane, - struct drm_framebuffer *fb) + struct drm_framebuffer *fb, + const struct drm_plane_state *old_state) { struct drm_device *dev = plane->dev; struct drm_i915_gem_object *obj = intel_fb_obj(fb); @@ -11888,7 +12431,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane, if (plane->type != DRM_PLANE_TYPE_CURSOR || !INTEL_INFO(dev)->cursor_needs_physical) { mutex_lock(&dev->struct_mutex); - intel_unpin_fb_obj(obj); + intel_unpin_fb_obj(fb, old_state); mutex_unlock(&dev->struct_mutex); } } @@ -11933,7 +12476,7 @@ intel_check_primary_plane(struct drm_plane *plane, */ if (intel_crtc->primary_enabled && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && - dev_priv->fbc.plane == intel_crtc->plane && + dev_priv->fbc.crtc == intel_crtc && state->base.rotation != BIT(DRM_ROTATE_0)) { intel_crtc->atomic.disable_fbc = true; } @@ -11952,6 +12495,9 @@ intel_check_primary_plane(struct drm_plane *plane, INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); intel_crtc->atomic.update_fbc = true; + + if (intel_wm_need_update(plane, &state->base)) + intel_crtc->atomic.update_wm = true; } return 0; @@ -11966,8 +12512,6 @@ intel_commit_primary_plane(struct drm_plane *plane, struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); - struct intel_plane *intel_plane = to_intel_plane(plane); struct drm_rect *src = &state->src; crtc = crtc ? crtc : plane->crtc; @@ -11977,8 +12521,6 @@ intel_commit_primary_plane(struct drm_plane *plane, crtc->x = src->x1 >> 16; crtc->y = src->y1 >> 16; - intel_plane->obj = obj; - if (intel_crtc->active) { if (state->visible) { /* FIXME: kill this fastboot hack */ @@ -12218,17 +12760,14 @@ intel_check_cursor_plane(struct drm_plane *plane, return -ENOMEM; } - /* we only need to pin inside GTT if cursor is non-phy */ - mutex_lock(&dev->struct_mutex); - if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) { + if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) { DRM_DEBUG_KMS("cursor cannot be tiled\n"); ret = -EINVAL; } - mutex_unlock(&dev->struct_mutex); finish: if (intel_crtc->active) { - if (intel_crtc->cursor_width != state->base.crtc_w) + if (plane->state->crtc_w != state->base.crtc_w) intel_crtc->atomic.update_wm = true; intel_crtc->atomic.fb_bits |= @@ -12245,7 +12784,6 @@ intel_commit_cursor_plane(struct drm_plane *plane, struct drm_crtc *crtc = state->base.crtc; struct drm_device *dev = plane->dev; struct intel_crtc *intel_crtc; - struct intel_plane *intel_plane = to_intel_plane(plane); struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb); uint32_t addr; @@ -12256,8 +12794,6 @@ intel_commit_cursor_plane(struct drm_plane *plane, crtc->cursor_x = state->base.crtc_x; crtc->cursor_y = state->base.crtc_y; - intel_plane->obj = obj; - if (intel_crtc->cursor_bo == obj) goto update; @@ -12271,8 +12807,6 @@ intel_commit_cursor_plane(struct drm_plane *plane, intel_crtc->cursor_addr = addr; intel_crtc->cursor_bo = obj; update: - intel_crtc->cursor_width = state->base.crtc_w; - intel_crtc->cursor_height = state->base.crtc_h; if (intel_crtc->active) intel_crtc_update_cursor(crtc, state->visible); @@ -12342,6 +12876,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) if (!crtc_state) goto fail; intel_crtc_set_state(intel_crtc, crtc_state); + crtc_state->base.crtc = &intel_crtc->base; primary = intel_primary_plane_create(dev, pipe); if (!primary) @@ -12419,9 +12954,6 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_crtc *drmmode_crtc; struct intel_crtc *crtc; - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -ENODEV; - drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); if (!drmmode_crtc) { @@ -12491,7 +13023,6 @@ static void intel_setup_outputs(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *encoder; - struct drm_connector *connector; bool dpd_is_edp = false; intel_lvds_init(dev); @@ -12502,10 +13033,15 @@ static void intel_setup_outputs(struct drm_device *dev) if (HAS_DDI(dev)) { int found; - /* Haswell uses DDI functions to detect digital outputs */ + /* + * Haswell uses DDI functions to detect digital outputs. + * On SKL pre-D0 the strap isn't connected, so we assume + * it's there. + */ found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED; - /* DDI A only supports eDP */ - if (found) + /* WaIgnoreDDIAStrap: skl */ + if (found || + (IS_SKYLAKE(dev) && INTEL_REVID(dev) < SKL_REVID_D0)) intel_ddi_init(dev, PORT_A); /* DDI B, C and D detection is indicated by the SFUSE_STRAP @@ -12622,37 +13158,6 @@ static void intel_setup_outputs(struct drm_device *dev) if (SUPPORTS_TV(dev)) intel_tv_init(dev); - /* - * FIXME: We don't have full atomic support yet, but we want to be - * able to enable/test plane updates via the atomic interface in the - * meantime. However as soon as we flip DRIVER_ATOMIC on, the DRM core - * will take some atomic codepaths to lookup properties during - * drmModeGetConnector() that unconditionally dereference - * connector->state. - * - * We create a dummy connector state here for each connector to ensure - * the DRM core doesn't try to dereference a NULL connector->state. - * The actual connector properties will never be updated or contain - * useful information, but since we're doing this specifically for - * testing/debug of the plane operations (and only when a specific - * kernel module option is given), that shouldn't really matter. - * - * Once atomic support for crtc's + connectors lands, this loop should - * be removed since we'll be setting up real connector state, which - * will contain Intel-specific properties. - */ - if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { - list_for_each_entry(connector, - &dev->mode_config.connector_list, - head) { - if (!WARN_ON(connector->state)) { - connector->state = - kzalloc(sizeof(*connector->state), - GFP_KERNEL); - } - } - } - intel_psr_init(dev); for_each_intel_encoder(dev, encoder) { @@ -12694,52 +13199,100 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { .create_handle = intel_user_framebuffer_create_handle, }; +static +u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier, + uint32_t pixel_format) +{ + u32 gen = INTEL_INFO(dev)->gen; + + if (gen >= 9) { + /* "The stride in bytes must not exceed the of the size of 8K + * pixels and 32K bytes." + */ + return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768); + } else if (gen >= 5 && !IS_VALLEYVIEW(dev)) { + return 32*1024; + } else if (gen >= 4) { + if (fb_modifier == I915_FORMAT_MOD_X_TILED) + return 16*1024; + else + return 32*1024; + } else if (gen >= 3) { + if (fb_modifier == I915_FORMAT_MOD_X_TILED) + return 8*1024; + else + return 16*1024; + } else { + /* XXX DSPC is limited to 4k tiled */ + return 8*1024; + } +} + static int intel_framebuffer_init(struct drm_device *dev, struct intel_framebuffer *intel_fb, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj) { - int aligned_height; - int pitch_limit; + unsigned int aligned_height; int ret; + u32 pitch_limit, stride_alignment; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - if (obj->tiling_mode == I915_TILING_Y) { - DRM_DEBUG("hardware does not support tiling Y\n"); - return -EINVAL; + if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { + /* Enforce that fb modifier and tiling mode match, but only for + * X-tiled. This is needed for FBC. */ + if (!!(obj->tiling_mode == I915_TILING_X) != + !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) { + DRM_DEBUG("tiling_mode doesn't match fb modifier\n"); + return -EINVAL; + } + } else { + if (obj->tiling_mode == I915_TILING_X) + mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; + else if (obj->tiling_mode == I915_TILING_Y) { + DRM_DEBUG("No Y tiling for legacy addfb\n"); + return -EINVAL; + } } - if (mode_cmd->pitches[0] & 63) { - DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n", - mode_cmd->pitches[0]); + /* Passed in modifier sanity checking. */ + switch (mode_cmd->modifier[0]) { + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Yf_TILED: + if (INTEL_INFO(dev)->gen < 9) { + DRM_DEBUG("Unsupported tiling 0x%lx!\n", + mode_cmd->modifier[0]); + return -EINVAL; + } + case DRM_FORMAT_MOD_NONE: + case I915_FORMAT_MOD_X_TILED: + break; + default: + DRM_DEBUG("Unsupported fb modifier 0x%lx!\n", + mode_cmd->modifier[0]); return -EINVAL; } - if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) { - pitch_limit = 32*1024; - } else if (INTEL_INFO(dev)->gen >= 4) { - if (obj->tiling_mode) - pitch_limit = 16*1024; - else - pitch_limit = 32*1024; - } else if (INTEL_INFO(dev)->gen >= 3) { - if (obj->tiling_mode) - pitch_limit = 8*1024; - else - pitch_limit = 16*1024; - } else - /* XXX DSPC is limited to 4k tiled */ - pitch_limit = 8*1024; + stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0], + mode_cmd->pixel_format); + if (mode_cmd->pitches[0] & (stride_alignment - 1)) { + DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n", + mode_cmd->pitches[0], stride_alignment); + return -EINVAL; + } + pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0], + mode_cmd->pixel_format); if (mode_cmd->pitches[0] > pitch_limit) { - DRM_DEBUG("%s pitch (%d) must be at less than %d\n", - obj->tiling_mode ? "tiled" : "linear", + DRM_DEBUG("%s pitch (%u) must be at less than %d\n", + mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ? + "tiled" : "linear", mode_cmd->pitches[0], pitch_limit); return -EINVAL; } - if (obj->tiling_mode != I915_TILING_NONE && + if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED && mode_cmd->pitches[0] != obj->stride) { DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", mode_cmd->pitches[0], obj->stride); @@ -12794,7 +13347,8 @@ static int intel_framebuffer_init(struct drm_device *dev, return -EINVAL; aligned_height = intel_fb_align_height(dev, mode_cmd->height, - obj->tiling_mode); + mode_cmd->pixel_format, + mode_cmd->modifier[0]); /* FIXME drm helper for size checks (especially planar formats)? */ if (obj->base.size < aligned_height * mode_cmd->pitches[0]) return -EINVAL; @@ -12947,8 +13501,6 @@ static void intel_init_display(struct drm_device *dev) } else if (IS_IVYBRIDGE(dev)) { /* FIXME: detect B0+ stepping and use auto training */ dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; - dev_priv->display.modeset_global_resources = - ivb_modeset_global_resources; } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { dev_priv->display.fdi_link_train = hsw_fdi_link_train; } else if (IS_VALLEYVIEW(dev)) { @@ -12956,9 +13508,6 @@ static void intel_init_display(struct drm_device *dev) valleyview_modeset_global_resources; } - /* Default just returns -ENODEV to indicate unsupported */ - dev_priv->display.queue_flip = intel_default_queue_flip; - switch (INTEL_INFO(dev)->gen) { case 2: dev_priv->display.queue_flip = intel_gen2_queue_flip; @@ -12981,8 +13530,10 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.queue_flip = intel_gen7_queue_flip; break; case 9: - dev_priv->display.queue_flip = intel_gen9_queue_flip; - break; + /* Drop through - unsupported since execlist only. */ + default: + /* Default just returns -ENODEV to indicate unsupported */ + dev_priv->display.queue_flip = intel_default_queue_flip; } intel_panel_init_backlight_funcs(dev); @@ -13076,9 +13627,6 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = { }; static struct intel_quirk intel_quirks[] = { - /* HP Mini needs pipe A force quirk (LP: #322104) */ - { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, - /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, @@ -13209,6 +13757,8 @@ void intel_modeset_init(struct drm_device *dev) dev->mode_config.preferred_depth = 24; dev->mode_config.prefer_shadow = 1; + dev->mode_config.allow_fb_modifiers = true; + dev->mode_config.funcs = &intel_mode_funcs; intel_init_quirks(dev); @@ -13251,7 +13801,7 @@ void intel_modeset_init(struct drm_device *dev) for_each_pipe(dev_priv, pipe) { intel_crtc_init(dev, pipe); - for_each_sprite(pipe, sprite) { + for_each_sprite(dev_priv, pipe, sprite) { ret = intel_plane_init(dev, pipe, sprite); if (ret) DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", @@ -13294,7 +13844,7 @@ void intel_modeset_init(struct drm_device *dev) * If the fb is shared between multiple heads, we'll * just get the first one. */ - intel_find_plane_obj(crtc, &crtc->plane_config); + intel_find_initial_plane_obj(crtc, &crtc->plane_config); } } } @@ -13309,9 +13859,7 @@ static void intel_enable_pipe_a(struct drm_device *dev) /* We can't just switch on the pipe A, we need to set things up with a * proper mode and output configuration. As a gross hack, enable pipe A * by enabling the load detect pipe once. */ - list_for_each_entry(connector, - &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { crt = &connector->base; break; @@ -13322,7 +13870,7 @@ static void intel_enable_pipe_a(struct drm_device *dev) return; if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) - intel_release_load_detect_pipe(crt, &load_detect_temp); + intel_release_load_detect_pipe(crt, &load_detect_temp, ctx); } static bool @@ -13356,11 +13904,11 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); /* restore vblank interrupts to correct state */ + drm_crtc_vblank_reset(&crtc->base); if (crtc->active) { update_scanline_offset(crtc); - drm_vblank_on(dev, crtc->pipe); - } else - drm_vblank_off(dev, crtc->pipe); + drm_crtc_vblank_on(&crtc->base); + } /* We need to sanitize the plane -> pipe mapping first because this will * disable the crtc (and hence change the state) if it is wrong. Note @@ -13382,8 +13930,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) crtc->plane = plane; /* ... and break all links. */ - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { if (connector->encoder->base.crtc != &crtc->base) continue; @@ -13392,14 +13939,14 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) } /* multiple connectors may have the same encoder: * handle them and break crtc link separately */ - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) + for_each_intel_connector(dev, connector) if (connector->encoder->base.crtc == &crtc->base) { connector->encoder->base.crtc = NULL; connector->encoder->connectors_active = false; } WARN_ON(crtc->active); + crtc->base.state->enable = false; crtc->base.enabled = false; } @@ -13416,7 +13963,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) * have active connectors/encoders. */ intel_crtc_update_dpms(&crtc->base); - if (crtc->active != crtc->base.enabled) { + if (crtc->active != crtc->base.state->enable) { struct intel_encoder *encoder; /* This can happen either due to bugs in the get_hw_state @@ -13424,9 +13971,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) * pipe A quirk. */ DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", crtc->base.base.id, - crtc->base.enabled ? "enabled" : "disabled", + crtc->base.state->enable ? "enabled" : "disabled", crtc->active ? "enabled" : "disabled"); + crtc->base.state->enable = crtc->active; crtc->base.enabled = crtc->active; /* Because we only establish the connector -> encoder -> @@ -13495,9 +14043,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) * a bug in one of the get_hw_state functions. Or someplace else * in our code, like the register restore mess on resume. Clamp * things to off as a safer default. */ - list_for_each_entry(connector, - &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { if (connector->encoder != encoder) continue; connector->base.dpms = DRM_MODE_DPMS_OFF; @@ -13563,6 +14109,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) crtc->active = dev_priv->display.get_pipe_config(crtc, crtc->config); + crtc->base.state->enable = crtc->active; crtc->base.enabled = crtc->active; crtc->primary_enabled = primary_get_hw_state(crtc); @@ -13611,8 +14158,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) pipe_name(pipe)); } - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { if (connector->get_hw_state(connector)) { connector->base.dpms = DRM_MODE_DPMS_ON; connector->encoder->connectors_active = true; @@ -13668,6 +14214,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, "[setup_hw_state]"); } + intel_modeset_update_connector_atomic_state(dev); + for (i = 0; i < dev_priv->num_shared_dpll; i++) { struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; @@ -13696,8 +14244,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, - crtc->primary->fb); + intel_crtc_restore_mode(crtc); } } else { intel_modeset_update_staged_output_state(dev); @@ -13711,6 +14258,7 @@ void intel_modeset_gem_init(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *c; struct drm_i915_gem_object *obj; + int ret; mutex_lock(&dev->struct_mutex); intel_init_gt_powersave(dev); @@ -13735,15 +14283,18 @@ void intel_modeset_gem_init(struct drm_device *dev) * pinned & fenced. When we do the allocation it's too early * for this. */ - mutex_lock(&dev->struct_mutex); for_each_crtc(dev, c) { obj = intel_fb_obj(c->primary->fb); if (obj == NULL) continue; - if (intel_pin_and_fence_fb_obj(c->primary, - c->primary->fb, - NULL)) { + mutex_lock(&dev->struct_mutex); + ret = intel_pin_and_fence_fb_obj(c->primary, + c->primary->fb, + c->primary->state, + NULL); + mutex_unlock(&dev->struct_mutex); + if (ret) { DRM_ERROR("failed to pin boot fb on pipe %d\n", to_intel_crtc(c)->pipe); drm_framebuffer_unreference(c->primary->fb); @@ -13751,7 +14302,6 @@ void intel_modeset_gem_init(struct drm_device *dev) update_state_fb(c->primary); } } - mutex_unlock(&dev->struct_mutex); intel_backlight_register(dev); } @@ -13792,8 +14342,6 @@ void intel_modeset_cleanup(struct drm_device *dev) intel_fbc_disable(dev); - ironlake_teardown_rc6(dev); - mutex_unlock(&dev->struct_mutex); /* flush any delayed tasks or pending work */ diff --git a/sys/dev/drm/i915/intel_dp.c b/sys/dev/drm/i915/intel_dp.c index 4b70287e69..be93020d10 100644 --- a/sys/dev/drm/i915/intel_dp.c +++ b/sys/dev/drm/i915/intel_dp.c @@ -85,6 +85,13 @@ static const struct dp_link_dpll chv_dpll[] = { { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */ { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } } }; +/* Skylake supports following rates */ +static const int gen9_rates[] = { 162000, 216000, 270000, + 324000, 432000, 540000 }; +static const int chv_rates[] = { 162000, 202500, 210000, 216000, + 243000, 270000, 324000, 405000, + 420000, 432000, 540000 }; +static const int default_rates[] = { 162000, 270000, 540000 }; /** * is_edp - is the given port attached to an eDP panel (either CPU or PCH) @@ -119,23 +126,15 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp); static void vlv_steal_power_sequencer(struct drm_device *dev, enum i915_pipe pipe); -int -intel_dp_max_link_bw(struct intel_dp *intel_dp) +static int +intel_dp_max_link_bw(struct intel_dp *intel_dp) { int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; - struct drm_device *dev = intel_dp->attached_connector->base.dev; switch (max_link_bw) { case DP_LINK_BW_1_62: case DP_LINK_BW_2_7: - break; - case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ - if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || - INTEL_INFO(dev)->gen >= 8) && - intel_dp->dpcd[DP_DPCD_REV] >= 0x12) - max_link_bw = DP_LINK_BW_5_4; - else - max_link_bw = DP_LINK_BW_2_7; + case DP_LINK_BW_5_4: break; default: WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n", @@ -211,7 +210,7 @@ intel_dp_mode_valid(struct drm_connector *connector, target_clock = fixed_mode->clock; } - max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); + max_link_clock = intel_dp_max_link_rate(intel_dp); max_lanes = intel_dp_max_lane_count(intel_dp); max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); @@ -241,7 +240,7 @@ uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes) return v; } -void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) +static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) { int i; if (dst_bytes > 4) @@ -884,10 +883,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, DP_AUX_CH_CTL_RECEIVE_ERROR)) continue; if (status & DP_AUX_CH_CTL_DONE) - break; + goto done; } - if (status & DP_AUX_CH_CTL_DONE) - break; } if ((status & DP_AUX_CH_CTL_DONE) == 0) { @@ -896,6 +893,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, goto out; } +done: /* Check for timeout or receive error. * Timeouts occur when the sink is not connected */ @@ -946,8 +944,9 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) size_t txsize, rxsize; int ret; - txbuf[0] = msg->request << 4; - txbuf[1] = msg->address >> 8; + txbuf[0] = (msg->request << 4) | + ((msg->address >> 16) & 0xf); + txbuf[1] = (msg->address >> 8) & 0xff; txbuf[2] = msg->address & 0xff; txbuf[3] = msg->size - 1; @@ -955,7 +954,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) case DP_AUX_NATIVE_WRITE: case DP_AUX_I2C_WRITE: txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; - rxsize = 1; + rxsize = 2; /* 0 or 1 data bytes */ if (WARN_ON(txsize > 20)) return -E2BIG; @@ -966,8 +965,13 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) if (ret > 0) { msg->reply = rxbuf[0] >> 4; - /* Return payload size. */ - ret = msg->size; + if (ret > 1) { + /* Number of bytes written in a short write. */ + ret = clamp_t(int, rxbuf[1], 0, msg->size); + } else { + /* Return payload size. */ + ret = msg->size; + } } break; @@ -1216,7 +1220,7 @@ intel_dp_i2c_init(struct intel_dp *intel_dp, #endif static void -skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_bw) +skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock) { u32 ctrl1; @@ -1225,19 +1229,35 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_bw) pipe_config->dpll_hw_state.cfgcr2 = 0; ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0); - switch (link_bw) { - case DP_LINK_BW_1_62: + switch (link_clock / 2) { + case 81000: ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810, SKL_DPLL0); break; - case DP_LINK_BW_2_7: + case 135000: ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350, SKL_DPLL0); break; - case DP_LINK_BW_5_4: + case 270000: ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700, SKL_DPLL0); break; + case 162000: + ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620, + SKL_DPLL0); + break; + /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which + results in CDCLK change. Need to handle the change of CDCLK by + disabling pipes and re-enabling them */ + case 108000: + ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080, + SKL_DPLL0); + break; + case 216000: + ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160, + SKL_DPLL0); + break; + } pipe_config->dpll_hw_state.ctrl1 = ctrl1; } @@ -1258,6 +1278,42 @@ hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw) } } +static int +intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates) +{ + if (intel_dp->num_sink_rates) { + *sink_rates = intel_dp->sink_rates; + return intel_dp->num_sink_rates; + } + + *sink_rates = default_rates; + + return (intel_dp_max_link_bw(intel_dp) >> 3) + 1; +} + +static int +intel_dp_source_rates(struct drm_device *dev, const int **source_rates) +{ + if (INTEL_INFO(dev)->gen >= 9) { + *source_rates = gen9_rates; + return ARRAY_SIZE(gen9_rates); + } else if (IS_CHERRYVIEW(dev)) { + *source_rates = chv_rates; + return ARRAY_SIZE(chv_rates); + } + + *source_rates = default_rates; + + if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) + /* WaDisableHBR2:skl */ + return (DP_LINK_BW_2_7 >> 3) + 1; + else if (INTEL_INFO(dev)->gen >= 8 || + (IS_HASWELL(dev) && !IS_HSW_ULX(dev))) + return (DP_LINK_BW_5_4 >> 3) + 1; + else + return (DP_LINK_BW_2_7 >> 3) + 1; +} + static void intel_dp_set_clock(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, int link_bw) @@ -1291,6 +1347,113 @@ intel_dp_set_clock(struct intel_encoder *encoder, } } +static int intersect_rates(const int *source_rates, int source_len, + const int *sink_rates, int sink_len, + int *common_rates) +{ + int i = 0, j = 0, k = 0; + + while (i < source_len && j < sink_len) { + if (source_rates[i] == sink_rates[j]) { + if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) + return k; + common_rates[k] = source_rates[i]; + ++k; + ++i; + ++j; + } else if (source_rates[i] < sink_rates[j]) { + ++i; + } else { + ++j; + } + } + return k; +} + +static int intel_dp_common_rates(struct intel_dp *intel_dp, + int *common_rates) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + const int *source_rates, *sink_rates; + int source_len, sink_len; + + sink_len = intel_dp_sink_rates(intel_dp, &sink_rates); + source_len = intel_dp_source_rates(dev, &source_rates); + + return intersect_rates(source_rates, source_len, + sink_rates, sink_len, + common_rates); +} + +static void snprintf_int_array(char *str, size_t len, + const int *array, int nelem) +{ + int i; + + str[0] = '\0'; + + for (i = 0; i < nelem; i++) { + int r = ksnprintf(str, len, "%d,", array[i]); + if (r >= len) + return; + str += r; + len -= r; + } +} + +static void intel_dp_print_rates(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + const int *source_rates, *sink_rates; + int source_len, sink_len, common_len; + int common_rates[DP_MAX_SUPPORTED_RATES]; + char str[128]; /* FIXME: too big for stack? */ + + if ((drm_debug & DRM_UT_KMS) == 0) + return; + + source_len = intel_dp_source_rates(dev, &source_rates); + snprintf_int_array(str, sizeof(str), source_rates, source_len); + DRM_DEBUG_KMS("source rates: %s\n", str); + + sink_len = intel_dp_sink_rates(intel_dp, &sink_rates); + snprintf_int_array(str, sizeof(str), sink_rates, sink_len); + DRM_DEBUG_KMS("sink rates: %s\n", str); + + common_len = intel_dp_common_rates(intel_dp, common_rates); + snprintf_int_array(str, sizeof(str), common_rates, common_len); + DRM_DEBUG_KMS("common rates: %s\n", str); +} + +static int rate_to_index(int find, const int *rates) +{ + int i = 0; + + for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i) + if (find == rates[i]) + break; + + return i; +} + +int +intel_dp_max_link_rate(struct intel_dp *intel_dp) +{ + int rates[DP_MAX_SUPPORTED_RATES] = {}; + int len; + + len = intel_dp_common_rates(intel_dp, rates); + if (WARN_ON(len <= 0)) + return 162000; + + return rates[rate_to_index(0, rates) - 1]; +} + +int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) +{ + return rate_to_index(rate, intel_dp->sink_rates); +} + bool intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) @@ -1300,24 +1463,32 @@ intel_dp_compute_config(struct intel_encoder *encoder, struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = dp_to_dig_port(intel_dp)->port; - struct intel_crtc *intel_crtc = encoder->new_crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); struct intel_connector *intel_connector = intel_dp->attached_connector; int lane_count, clock; int min_lane_count = 1; int max_lane_count = intel_dp_max_lane_count(intel_dp); /* Conveniently, the link BW constants become indices with a shift...*/ int min_clock = 0; - int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; + int max_clock; int bpp, mode_rate; - static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; int link_avail, link_clock; + int common_rates[DP_MAX_SUPPORTED_RATES] = {}; + int common_len; + + common_len = intel_dp_common_rates(intel_dp, common_rates); + + /* No common link rates between source and sink */ + WARN_ON(common_len <= 0); + + max_clock = common_len - 1; if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) pipe_config->has_pch_encoder = true; pipe_config->has_dp_encoder = true; pipe_config->has_drrs = false; - pipe_config->has_audio = intel_dp->has_audio; + pipe_config->has_audio = intel_dp->has_audio && port != PORT_A; if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { intel_fixed_panel_mode(intel_connector->panel.fixed_mode, @@ -1334,8 +1505,8 @@ intel_dp_compute_config(struct intel_encoder *encoder, return false; DRM_DEBUG_KMS("DP link computation with max lane count %i " - "max bw %02x pixel clock %iKHz\n", - max_lane_count, bws[max_clock], + "max bw %d pixel clock %iKHz\n", + max_lane_count, common_rates[max_clock], adjusted_mode->crtc_clock); /* Walk through all bpp values. Luckily they're all nicely spaced with 2 @@ -1364,8 +1535,11 @@ intel_dp_compute_config(struct intel_encoder *encoder, bpp); for (clock = min_clock; clock <= max_clock; clock++) { - for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { - link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); + for (lane_count = min_lane_count; + lane_count <= max_lane_count; + lane_count <<= 1) { + + link_clock = common_rates[clock]; link_avail = intel_dp_max_data_rate(link_clock, lane_count); @@ -1394,10 +1568,20 @@ found: if (intel_dp->color_range) pipe_config->limited_color_range = true; - intel_dp->link_bw = bws[clock]; intel_dp->lane_count = lane_count; + + if (intel_dp->num_sink_rates) { + intel_dp->link_bw = 0; + intel_dp->rate_select = + intel_dp_rate_select(intel_dp, common_rates[clock]); + } else { + intel_dp->link_bw = + drm_dp_link_rate_to_bw_code(common_rates[clock]); + intel_dp->rate_select = 0; + } + pipe_config->pipe_bpp = bpp; - pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); + pipe_config->port_clock = common_rates[clock]; DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", intel_dp->link_bw, intel_dp->lane_count, @@ -1420,7 +1604,7 @@ found: } if (IS_SKYLAKE(dev) && is_edp(intel_dp)) - skl_edp_set_pll_config(pipe_config, intel_dp->link_bw); + skl_edp_set_pll_config(pipe_config, common_rates[clock]); else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw); else @@ -2167,8 +2351,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder, int dotclock; tmp = I915_READ(intel_dp->output_reg); - if (tmp & DP_AUDIO_OUTPUT_ENABLE) - pipe_config->has_audio = true; + + pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { if (tmp & DP_SYNC_HS_HIGH) @@ -2698,11 +2882,6 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder) /* Program Tx lane latency optimal setting*/ for (i = 0; i < 4; i++) { - /* Set the latency optimal bit */ - data = (i == 1) ? 0x0 : 0x6; - vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i), - data << DPIO_FRC_LATENCY_SHFIT); - /* Set the upar bit */ data = (i == 1) ? 0x0 : 0x1; vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i), @@ -2728,6 +2907,8 @@ static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) enum i915_pipe pipe = intel_crtc->pipe; u32 val; + intel_dp_prepare(encoder); + mutex_lock(&dev_priv->dpio_lock); /* program left/right clock distribution */ @@ -2830,11 +3011,14 @@ static uint8_t intel_dp_voltage_max(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); + struct drm_i915_private *dev_priv = dev->dev_private; enum port port = dp_to_dig_port(intel_dp)->port; - if (INTEL_INFO(dev)->gen >= 9) + if (INTEL_INFO(dev)->gen >= 9) { + if (dev_priv->vbt.edp_low_vswing && port == PORT_A) + return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; - else if (IS_VALLEYVIEW(dev)) + } else if (IS_VALLEYVIEW(dev)) return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; else if (IS_GEN7(dev) && port == PORT_A) return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; @@ -2858,6 +3042,8 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) return DP_TRAIN_PRE_EMPH_LEVEL_2; case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: return DP_TRAIN_PRE_EMPH_LEVEL_1; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + return DP_TRAIN_PRE_EMPH_LEVEL_0; default: return DP_TRAIN_PRE_EMPH_LEVEL_0; } @@ -3340,6 +3526,9 @@ intel_hsw_signal_levels(uint8_t train_set) return DDI_BUF_TRANS_SELECT(7); case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1: return DDI_BUF_TRANS_SELECT(8); + + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0: + return DDI_BUF_TRANS_SELECT(9); default: DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" "0x%x\n", signal_levels); @@ -3497,6 +3686,9 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); + if (intel_dp->num_sink_rates) + drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET, + &intel_dp->rate_select, 1); link_config[0] = 0; link_config[1] = DP_SET_ANSI_8B10B; @@ -3709,6 +3901,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + uint8_t rev; if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd, sizeof(intel_dp->dpcd)) < 0) @@ -3740,6 +3933,33 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) } else intel_dp->use_tps3 = false; + /* Intermediate frequency support */ + if (is_edp(intel_dp) && + (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && + (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) && + (rev >= 0x03)) { /* eDp v1.4 or higher */ + __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; + int i; + + intel_dp_dpcd_read_wake(&intel_dp->aux, + DP_SUPPORTED_LINK_RATES, + sink_rates, + sizeof(sink_rates)); + + for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { + int val = le16_to_cpu(sink_rates[i]); + + if (val == 0) + break; + + /* Value read is in kHz while drm clock is saved in deca-kHz */ + intel_dp->sink_rates[i] = (val * 200) / 10; + } + intel_dp->num_sink_rates = i; + } + + intel_dp_print_rates(intel_dp); + if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) return true; /* native DP sink */ @@ -3932,7 +4152,7 @@ go_again: * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 * 4. Check link status on receipt of hot-plug interrupt */ -void +static void intel_dp_check_link_status(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); @@ -4105,41 +4325,23 @@ static enum drm_connector_status g4x_dp_detect(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - uint32_t bit; + int ret; - if (IS_VALLEYVIEW(dev)) { - switch (intel_dig_port->port) { - case PORT_B: - bit = PORTB_HOTPLUG_LIVE_STATUS_VLV; - break; - case PORT_C: - bit = PORTC_HOTPLUG_LIVE_STATUS_VLV; - break; - case PORT_D: - bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; - break; - default: - return connector_status_unknown; - } - } else { - switch (intel_dig_port->port) { - case PORT_B: - bit = PORTB_HOTPLUG_LIVE_STATUS_G4X; - break; - case PORT_C: - bit = PORTC_HOTPLUG_LIVE_STATUS_G4X; - break; - case PORT_D: - bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; - break; - default: - return connector_status_unknown; - } + /* Can't disconnect eDP, but you can close the lid... */ + if (is_edp(intel_dp)) { + enum drm_connector_status status; + + status = intel_panel_detect(dev); + if (status == connector_status_unknown) + status = connector_status_connected; + return status; } - if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) + ret = g4x_digital_port_connected(dev, intel_dig_port); + if (ret == -EINVAL) + return connector_status_unknown; + else if (ret == 0) return connector_status_disconnected; return intel_dp_detect_dpcd(intel_dp); @@ -4540,6 +4742,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { .atomic_get_property = intel_connector_atomic_get_property, .destroy = intel_dp_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, }; static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { @@ -4556,9 +4759,7 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = { void intel_dp_hot_plug(struct intel_encoder *intel_encoder) { - struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); - - intel_dp_check_link_status(intel_dp); + return; } bool @@ -4595,7 +4796,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) if (long_hpd) { ret = true; - goto put_power; if (HAS_PCH_SPLIT(dev)) { if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) @@ -4608,16 +4808,32 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) if (!intel_dp_get_dpcd(intel_dp)) { goto mst_fail; } - } - /* - * we'll check the link status via the normal hot plug path later - - * but for short hpds we should check it now - */ - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - intel_dp_check_link_status(intel_dp); - drm_modeset_unlock(&dev->mode_config.connection_mutex); - ret = false; + intel_dp_probe_oui(intel_dp); + +#if 0 + if (!intel_dp_probe_mst(intel_dp)) + goto mst_fail; +#endif + + } else { + if (intel_dp->is_mst) { +#if 0 + if (intel_dp_check_mst_status(intel_dp) == -EINVAL) + goto mst_fail; +#endif + } + + if (!intel_dp->is_mst) { + /* + * we'll check the link status via the normal hot plug path later - + * but for short hpds we should check it now + */ + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + intel_dp_check_link_status(intel_dp); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + } + } goto put_power; mst_fail: @@ -4625,7 +4841,9 @@ mst_fail: if (intel_dp->is_mst) { DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state); intel_dp->is_mst = false; +#if 0 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); +#endif } put_power: intel_display_power_put(dev_priv, power_domain); @@ -4876,6 +5094,18 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, I915_READ(pp_div_reg)); } +/** + * intel_dp_set_drrs_state - program registers for RR switch to take effect + * @dev: DRM device + * @refresh_rate: RR to be programmed + * + * This function gets called when refresh rate (RR) has to be changed from + * one frequency to another. Switches can be between high and low RR + * supported by the panel or to any other RR based on media playback (in + * this case, RR value needs to be passed from user space). + * + * The caller of this function needs to take a lock on dev_priv->drrs. + */ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -4904,7 +5134,7 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) dig_port = dp_to_dig_port(intel_dp); encoder = &dig_port->base; - intel_crtc = encoder->new_crtc; + intel_crtc = to_intel_crtc(encoder->base.crtc); if (!intel_crtc) { DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n"); @@ -4933,14 +5163,32 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) return; } - if (INTEL_INFO(dev)->gen > 6 && INTEL_INFO(dev)->gen < 8) { + if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) { + switch (index) { + case DRRS_HIGH_RR: + intel_dp_set_m_n(intel_crtc, M1_N1); + break; + case DRRS_LOW_RR: + intel_dp_set_m_n(intel_crtc, M2_N2); + break; + case DRRS_MAX_RR: + default: + DRM_ERROR("Unsupported refreshrate type\n"); + } + } else if (INTEL_INFO(dev)->gen > 6) { reg = PIPECONF(intel_crtc->config->cpu_transcoder); val = I915_READ(reg); + if (index > DRRS_HIGH_RR) { - val |= PIPECONF_EDP_RR_MODE_SWITCH; - intel_dp_set_m_n(intel_crtc); + if (IS_VALLEYVIEW(dev)) + val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; + else + val |= PIPECONF_EDP_RR_MODE_SWITCH; } else { - val &= ~PIPECONF_EDP_RR_MODE_SWITCH; + if (IS_VALLEYVIEW(dev)) + val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; + else + val &= ~PIPECONF_EDP_RR_MODE_SWITCH; } I915_WRITE(reg, val); } @@ -4950,6 +5198,12 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate); } +/** + * intel_edp_drrs_enable - init drrs struct if supported + * @intel_dp: DP struct + * + * Initializes frontbuffer_bits and drrs.dp + */ void intel_edp_drrs_enable(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); @@ -4977,6 +5231,11 @@ unlock: mutex_unlock(&dev_priv->drrs.mutex); } +/** + * intel_edp_drrs_disable - Disable DRRS + * @intel_dp: DP struct + * + */ void intel_edp_drrs_disable(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); @@ -5032,10 +5291,20 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work) downclock_mode->vrefresh); unlock: - mutex_unlock(&dev_priv->drrs.mutex); } +/** + * intel_edp_drrs_invalidate - Invalidate DRRS + * @dev: DRM device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * When there is a disturbance on screen (due to cursor movement/time + * update etc), DRRS needs to be invalidated, i.e. need to switch to + * high RR. + * + * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. + */ void intel_edp_drrs_invalidate(struct drm_device *dev, unsigned frontbuffer_bits) { @@ -5043,15 +5312,21 @@ void intel_edp_drrs_invalidate(struct drm_device *dev, struct drm_crtc *crtc; enum i915_pipe pipe; - if (!dev_priv->drrs.dp) + if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) return; + cancel_delayed_work(&dev_priv->drrs.work); + mutex_lock(&dev_priv->drrs.mutex); + if (!dev_priv->drrs.dp) { + mutex_unlock(&dev_priv->drrs.mutex); + return; + } + crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; pipe = to_intel_crtc(crtc)->pipe; if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) { - cancel_delayed_work_sync(&dev_priv->drrs.work); intel_dp_set_drrs_state(dev_priv->dev, dev_priv->drrs.dp->attached_connector->panel. fixed_mode->vrefresh); @@ -5063,6 +5338,17 @@ void intel_edp_drrs_invalidate(struct drm_device *dev, mutex_unlock(&dev_priv->drrs.mutex); } +/** + * intel_edp_drrs_flush - Flush DRRS + * @dev: DRM device + * @frontbuffer_bits: frontbuffer plane tracking bits + * + * When there is no movement on screen, DRRS work can be scheduled. + * This DRRS work is responsible for setting relevant registers after a + * timeout of 1 second. + * + * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits. + */ void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits) { @@ -5070,16 +5356,21 @@ void intel_edp_drrs_flush(struct drm_device *dev, struct drm_crtc *crtc; enum i915_pipe pipe; - if (!dev_priv->drrs.dp) + if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED) return; + cancel_delayed_work(&dev_priv->drrs.work); + mutex_lock(&dev_priv->drrs.mutex); + if (!dev_priv->drrs.dp) { + mutex_unlock(&dev_priv->drrs.mutex); + return; + } + crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; pipe = to_intel_crtc(crtc)->pipe; dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; - cancel_delayed_work_sync(&dev_priv->drrs.work); - if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR && !dev_priv->drrs.busy_frontbuffer_bits) schedule_delayed_work(&dev_priv->drrs.work, @@ -5087,6 +5378,56 @@ void intel_edp_drrs_flush(struct drm_device *dev, mutex_unlock(&dev_priv->drrs.mutex); } +/** + * DOC: Display Refresh Rate Switching (DRRS) + * + * Display Refresh Rate Switching (DRRS) is a power conservation feature + * which enables swtching between low and high refresh rates, + * dynamically, based on the usage scenario. This feature is applicable + * for internal panels. + * + * Indication that the panel supports DRRS is given by the panel EDID, which + * would list multiple refresh rates for one resolution. + * + * DRRS is of 2 types - static and seamless. + * Static DRRS involves changing refresh rate (RR) by doing a full modeset + * (may appear as a blink on screen) and is used in dock-undock scenario. + * Seamless DRRS involves changing RR without any visual effect to the user + * and can be used during normal system usage. This is done by programming + * certain registers. + * + * Support for static/seamless DRRS may be indicated in the VBT based on + * inputs from the panel spec. + * + * DRRS saves power by switching to low RR based on usage scenarios. + * + * eDP DRRS:- + * The implementation is based on frontbuffer tracking implementation. + * When there is a disturbance on the screen triggered by user activity or a + * periodic system activity, DRRS is disabled (RR is changed to high RR). + * When there is no movement on screen, after a timeout of 1 second, a switch + * to low RR is made. + * For integration with frontbuffer tracking code, + * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called. + * + * DRRS can be further extended to support other internal panels and also + * the scenario of video playback wherein RR is set based on the rate + * requested by userspace. + */ + +/** + * intel_dp_drrs_init - Init basic DRRS work and mutex. + * @intel_connector: eDP connector + * @fixed_mode: preferred mode of panel + * + * This function is called only once at driver load to initialize basic + * DRRS stuff. + * + * Returns: + * Downclock mode if panel supports it, else return NULL. + * DRRS support is determined by the presence of downclock mode (apart + * from VBT setting). + */ static struct drm_display_mode * intel_dp_drrs_init(struct intel_connector *intel_connector, struct drm_display_mode *fixed_mode) @@ -5096,6 +5437,9 @@ intel_dp_drrs_init(struct intel_connector *intel_connector, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_display_mode *downclock_mode = NULL; + INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); + lockinit(&dev_priv->drrs.mutex, "i915dm", 0, LK_CANRECURSE); + if (INTEL_INFO(dev)->gen <= 6) { DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n"); return NULL; @@ -5110,14 +5454,10 @@ intel_dp_drrs_init(struct intel_connector *intel_connector, (dev, fixed_mode, connector); if (!downclock_mode) { - DRM_DEBUG_KMS("DRRS not supported\n"); + DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n"); return NULL; } - INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); - - lockinit(&dev_priv->drrs.mutex, "i915dm", 0, LK_CANRECURSE); - dev_priv->drrs.type = dev_priv->vbt.drrs_type; dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR; @@ -5140,8 +5480,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, struct edid *edid; enum i915_pipe pipe = INVALID_PIPE; - dev_priv->drrs.type = DRRS_NOT_SUPPORTED; - if (!is_edp(intel_dp)) return true; @@ -5393,7 +5731,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) if (!intel_dig_port) return; - intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL); + intel_connector = intel_connector_alloc(); if (!intel_connector) { kfree(intel_dig_port); return; @@ -5452,6 +5790,28 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port) } } +#if 0 +void intel_dp_mst_suspend(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + /* disable MST */ + for (i = 0; i < I915_MAX_PORTS; i++) { + struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i]; + if (!intel_dig_port) + continue; + + if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) { + if (!intel_dig_port->dp.can_mst) + continue; + if (intel_dig_port->dp.is_mst) + drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr); + } + } +} +#endif + void intel_dp_mst_resume(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; diff --git a/sys/dev/drm/i915/intel_dp_mst.c b/sys/dev/drm/i915/intel_dp_mst.c index 0e29fd2a29..93ba5a4b6c 100644 --- a/sys/dev/drm/i915/intel_dp_mst.c +++ b/sys/dev/drm/i915/intel_dp_mst.c @@ -37,11 +37,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; - struct drm_device *dev = encoder->base.dev; - int bpp; - int lane_count, slots; + struct drm_atomic_state *state; + int bpp, i; + int lane_count, slots, rate; struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; - struct intel_connector *found = NULL, *intel_connector; + struct intel_connector *found = NULL; int mst_pbn; pipe_config->dp_encoder_is_mst = true; @@ -53,15 +53,30 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, * seem to suggest we should do otherwise. */ lane_count = drm_dp_max_lane_count(intel_dp->dpcd); - intel_dp->link_bw = intel_dp_max_link_bw(intel_dp); + + rate = intel_dp_max_link_rate(intel_dp); + + if (intel_dp->num_sink_rates) { + intel_dp->link_bw = 0; + intel_dp->rate_select = intel_dp_rate_select(intel_dp, rate); + } else { + intel_dp->link_bw = drm_dp_link_rate_to_bw_code(rate); + intel_dp->rate_select = 0; + } + intel_dp->lane_count = lane_count; pipe_config->pipe_bpp = 24; - pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); + pipe_config->port_clock = rate; - list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) { - if (intel_connector->new_encoder == encoder) { - found = intel_connector; + state = pipe_config->base.state; + + for (i = 0; i < state->num_connector; i++) { + if (!state->connectors[i]) + continue; + + if (state->connector_states[i]->best_encoder == &encoder->base) { + found = to_intel_connector(state->connectors[i]); break; } } @@ -141,7 +156,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder) struct drm_crtc *crtc = encoder->base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) { + for_each_intel_connector(dev, intel_connector) { if (intel_connector->new_encoder == encoder) { found = intel_connector; break; @@ -318,6 +333,7 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { .atomic_get_property = intel_connector_atomic_get_property, .destroy = intel_dp_mst_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, }; static int intel_dp_mst_get_modes(struct drm_connector *connector) @@ -402,7 +418,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo struct drm_connector *connector; int i; - intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL); + intel_connector = intel_connector_alloc(); if (!intel_connector) return NULL; diff --git a/sys/dev/drm/i915/intel_drv.h b/sys/dev/drm/i915/intel_drv.h index 32af893c6c..9758081da4 100644 --- a/sys/dev/drm/i915/intel_drv.h +++ b/sys/dev/drm/i915/intel_drv.h @@ -35,9 +35,7 @@ #include #include #include - -#define DIV_ROUND_CLOSEST_ULL(ll, d) \ -({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; }) +#include /** * _wait_for - magic (register) wait macro @@ -56,8 +54,8 @@ ret__ = -ETIMEDOUT; \ break; \ } \ - if (W && drm_can_sleep()) { \ - msleep(W); \ + if ((W) && drm_can_sleep()) { \ + usleep_range((W)*1000, (W)*2000); \ } else { \ cpu_pause(); \ } \ @@ -258,6 +256,7 @@ struct intel_plane_state { }; struct intel_initial_plane_config { + struct intel_framebuffer *fb; unsigned int tiling; int size; u32 base; @@ -463,7 +462,6 @@ struct intel_crtc { struct drm_i915_gem_object *cursor_bo; uint32_t cursor_addr; - int16_t cursor_width, cursor_height; uint32_t cursor_cntl; uint32_t cursor_size; uint32_t cursor_base; @@ -500,16 +498,20 @@ struct intel_plane_wm_parameters { uint8_t bytes_per_pixel; bool enabled; bool scaled; + u64 tiling; + unsigned int rotation; }; struct intel_plane { struct drm_plane base; int plane; enum i915_pipe pipe; - struct drm_i915_gem_object *obj; bool can_scale; int max_downscale; + /* FIXME convert to properties */ + struct drm_intel_sprite_colorkey ckey; + /* Since we need to change the watermarks before/after * enabling/disabling the planes, we need to store the parameters here * as the other pieces of the struct may not reflect the values we want @@ -526,7 +528,6 @@ struct intel_plane { void (*update_plane)(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, @@ -537,10 +538,6 @@ struct intel_plane { struct intel_plane_state *state); void (*commit_plane)(struct drm_plane *plane, struct intel_plane_state *state); - int (*update_colorkey)(struct drm_plane *plane, - struct drm_intel_sprite_colorkey *key); - void (*get_colorkey)(struct drm_plane *plane, - struct drm_intel_sprite_colorkey *key); }; struct intel_watermark_params { @@ -563,6 +560,7 @@ struct cxsr_latency { }; #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) +#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base) #define to_intel_connector(x) container_of(x, struct intel_connector, base) #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) @@ -592,6 +590,26 @@ struct intel_hdmi { struct intel_dp_mst_encoder; #define DP_MAX_DOWNSTREAM_PORTS 0x10 +/* + * enum link_m_n_set: + * When platform provides two set of M_N registers for dp, we can + * program them and switch between them incase of DRRS. + * But When only one such register is provided, we have to program the + * required divider value on that registers itself based on the DRRS state. + * + * M1_N1 : Program dp_m_n on M1_N1 registers + * dp_m2_n2 on M2_N2 registers (If supported) + * + * M2_N2 : Program dp_m2_n2 on M1_N1 registers + * M2_N2 registers are not supported + */ + +enum link_m_n_set { + /* Sets the m1_n1 and m2_n2 */ + M1_N1 = 0, + M2_N2 +}; + struct intel_dp { uint32_t output_reg; uint32_t aux_ch_ctl_reg; @@ -601,10 +619,14 @@ struct intel_dp { uint32_t color_range; bool color_range_auto; uint8_t link_bw; + uint8_t rate_select; uint8_t lane_count; uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; + /* sink rates as reported by DP_SUPPORTED_LINK_RATES */ + uint8_t num_sink_rates; + int sink_rates[DP_MAX_SUPPORTED_RATES]; struct drm_dp_aux aux; device_t dp_iic_bus; uint8_t train_set[4]; @@ -711,7 +733,7 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane) struct intel_unpin_work { struct work_struct work; struct drm_crtc *crtc; - struct drm_i915_gem_object *old_fb_obj; + struct drm_framebuffer *old_fb; struct drm_i915_gem_object *pending_flip_obj; struct drm_pending_vblank_event *event; atomic_t pending; @@ -818,7 +840,8 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) } int intel_get_crtc_scanline(struct intel_crtc *crtc); -void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv); +void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, + unsigned int pipe_mask); /* intel_crt.c */ void intel_crt_init(struct drm_device *dev); @@ -853,7 +876,8 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state); /* intel_frontbuffer.c */ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, - struct intel_engine_cs *ring); + struct intel_engine_cs *ring, + enum fb_op_origin origin); void intel_frontbuffer_flip_prepare(struct drm_device *dev, unsigned frontbuffer_bits); void intel_frontbuffer_flip_complete(struct drm_device *dev, @@ -878,10 +902,14 @@ void intel_frontbuffer_flip(struct drm_device *dev, intel_frontbuffer_flush(dev, frontbuffer_bits); } -int intel_fb_align_height(struct drm_device *dev, int height, - unsigned int tiling); +unsigned int intel_fb_align_height(struct drm_device *dev, + unsigned int height, + uint32_t pixel_format, + uint64_t fb_format_modifier); void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire); +u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier, + uint32_t pixel_format); /* intel_audio.c */ void intel_init_audio(struct drm_device *dev); @@ -900,6 +928,8 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc); void intel_crtc_control(struct drm_crtc *crtc, bool enable); void intel_crtc_update_dpms(struct drm_crtc *crtc); void intel_encoder_destroy(struct drm_encoder *encoder); +int intel_connector_init(struct intel_connector *); +struct intel_connector *intel_connector_alloc(void); void intel_connector_dpms(struct drm_connector *, int mode); bool intel_connector_get_hw_state(struct intel_connector *connector); void intel_modeset_check_state(struct drm_device *dev); @@ -929,11 +959,12 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, struct intel_load_detect_pipe *old, struct drm_modeset_acquire_ctx *ctx); void intel_release_load_detect_pipe(struct drm_connector *connector, - struct intel_load_detect_pipe *old); + struct intel_load_detect_pipe *old, + struct drm_modeset_acquire_ctx *ctx); int intel_pin_and_fence_fb_obj(struct drm_plane *plane, struct drm_framebuffer *fb, + const struct drm_plane_state *plane_state, struct intel_engine_cs *pipelined); -void intel_unpin_fb_obj(struct drm_i915_gem_object *obj); struct drm_framebuffer * __intel_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, @@ -943,9 +974,11 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe); void intel_finish_page_flip_plane(struct drm_device *dev, int plane); void intel_check_page_flip(struct drm_device *dev, int pipe); int intel_prepare_plane_fb(struct drm_plane *plane, - struct drm_framebuffer *fb); + struct drm_framebuffer *fb, + const struct drm_plane_state *new_state); void intel_cleanup_plane_fb(struct drm_plane *plane, - struct drm_framebuffer *fb); + struct drm_framebuffer *fb, + const struct drm_plane_state *old_state); int intel_plane_atomic_get_property(struct drm_plane *plane, const struct drm_plane_state *state, struct drm_property *property, @@ -955,6 +988,19 @@ int intel_plane_atomic_set_property(struct drm_plane *plane, struct drm_property *property, uint64_t val); +unsigned int +intel_tile_height(struct drm_device *dev, uint32_t pixel_format, + uint64_t fb_format_modifier); + +static inline bool +intel_rotation_90_or_270(unsigned int rotation) +{ + return rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270)); +} + +bool intel_wm_need_update(struct drm_plane *plane, + struct drm_plane_state *state); + /* shared dpll functions */ struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); void assert_shared_dpll(struct drm_i915_private *dev_priv, @@ -994,7 +1040,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv); void hsw_disable_pc8(struct drm_i915_private *dev_priv); void intel_dp_get_m_n(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); -void intel_dp_set_m_n(struct intel_crtc *crtc); +void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config, @@ -1009,6 +1055,9 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc); void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file); +unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, + struct drm_i915_gem_object *obj); + /* intel_dp.c */ void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, @@ -1018,7 +1067,6 @@ void intel_dp_complete_link_train(struct intel_dp *intel_dp); void intel_dp_stop_link_train(struct intel_dp *intel_dp); void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); void intel_dp_encoder_destroy(struct drm_encoder *encoder); -void intel_dp_check_link_status(struct intel_dp *intel_dp); int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); bool intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config); @@ -1033,17 +1081,11 @@ void intel_edp_panel_off(struct intel_dp *intel_dp); void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector); void intel_dp_mst_suspend(struct drm_device *dev); void intel_dp_mst_resume(struct drm_device *dev); -int intel_dp_max_link_bw(struct intel_dp *intel_dp); +int intel_dp_max_link_rate(struct intel_dp *intel_dp); +int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); void intel_dp_hot_plug(struct intel_encoder *intel_encoder); void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv); uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); -void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes); -int intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, - struct drm_framebuffer *fb, int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - uint32_t src_x, uint32_t src_y, - uint32_t src_w, uint32_t src_h); -int intel_disable_plane(struct drm_plane *plane); void intel_plane_destroy(struct drm_plane *plane); void intel_edp_drrs_enable(struct intel_dp *intel_dp); void intel_edp_drrs_disable(struct intel_dp *intel_dp); @@ -1098,7 +1140,11 @@ bool intel_fbc_enabled(struct drm_device *dev); void intel_fbc_update(struct drm_device *dev); void intel_fbc_init(struct drm_i915_private *dev_priv); void intel_fbc_disable(struct drm_device *dev); -void bdw_fbc_sw_flush(struct drm_device *dev, u32 value); +void intel_fbc_invalidate(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits, + enum fb_op_origin origin); +void intel_fbc_flush(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits); /* intel_hdmi.c */ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); @@ -1214,8 +1260,9 @@ void intel_enable_gt_powersave(struct drm_device *dev); void intel_disable_gt_powersave(struct drm_device *dev); void intel_suspend_gt_powersave(struct drm_device *dev); void intel_reset_gt_powersave(struct drm_device *dev); -void ironlake_teardown_rc6(struct drm_device *dev); void gen6_update_ring_freq(struct drm_device *dev); +void gen6_rps_busy(struct drm_i915_private *dev_priv); +void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); void gen6_rps_idle(struct drm_i915_private *dev_priv); void gen6_rps_boost(struct drm_i915_private *dev_priv); void ilk_wm_get_hw_state(struct drm_device *dev); @@ -1232,14 +1279,9 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob); int intel_plane_init(struct drm_device *dev, enum i915_pipe pipe, int plane); void intel_flush_primary_plane(struct drm_i915_private *dev_priv, enum plane plane); -int intel_plane_set_property(struct drm_plane *plane, - struct drm_property *prop, - uint64_t val); int intel_plane_restore(struct drm_plane *plane); int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv); -int intel_sprite_get_colorkey(struct drm_device *dev, void *data, - struct drm_file *file_priv); bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count); void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count); @@ -1262,6 +1304,17 @@ int intel_connector_atomic_get_property(struct drm_connector *connector, struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc); void intel_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state); +static inline struct intel_crtc_state * +intel_atomic_get_crtc_state(struct drm_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_crtc_state *crtc_state; + crtc_state = drm_atomic_get_crtc_state(state, &crtc->base); + if (IS_ERR(crtc_state)) + return ERR_PTR(PTR_ERR(crtc_state)); + + return to_intel_crtc_state(crtc_state); +} /* intel_atomic_plane.c */ struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane); diff --git a/sys/dev/drm/i915/intel_dsi.c b/sys/dev/drm/i915/intel_dsi.c index 03d76799e7..8bed6a3b68 100644 --- a/sys/dev/drm/i915/intel_dsi.c +++ b/sys/dev/drm/i915/intel_dsi.c @@ -854,7 +854,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder) /* recovery disables */ - I915_WRITE(MIPI_EOT_DISABLE(port), val); + I915_WRITE(MIPI_EOT_DISABLE(port), tmp); /* in terms of low power clock */ I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count); @@ -975,6 +975,7 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_connector_atomic_get_property, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, }; void intel_dsi_init(struct drm_device *dev) @@ -1006,7 +1007,7 @@ void intel_dsi_init(struct drm_device *dev) if (!intel_dsi) return; - intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL); + intel_connector = intel_connector_alloc(); if (!intel_connector) { kfree(intel_dsi); return; diff --git a/sys/dev/drm/i915/intel_dsi_cmd.h b/sys/dev/drm/i915/intel_dsi_cmd.h index 886779030f..e69de29bb2 100644 --- a/sys/dev/drm/i915/intel_dsi_cmd.h +++ b/sys/dev/drm/i915/intel_dsi_cmd.h @@ -1,39 +0,0 @@ -/* - * Copyright © 2013 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Author: Jani Nikula - */ - -#ifndef _INTEL_DSI_DSI_H -#define _INTEL_DSI_DSI_H - -#include -#include -#include