2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
29 #include "intel_drv.h"
30 #include <linux/module.h>
31 #include <machine/clock.h>
32 #include <drm/i915_powerwell.h>
35 * RC6 is a special power stage which allows the GPU to enter an very
36 * low-voltage mode when idle, using down to 0V while at this stage. This
37 * stage is entered automatically when the GPU is idle when RC6 support is
38 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
40 * There are different RC6 modes available in Intel GPU, which differentiate
41 * among each other with the latency required to enter and leave RC6 and
42 * voltage consumed by the GPU in different states.
44 * The combination of the following flags define which states GPU is allowed
45 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
46 * RC6pp is deepest RC6. Their support by hardware varies according to the
47 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
48 * which brings the most power savings; deeper states save more power, but
49 * require higher latency to switch to and wake up.
51 #define INTEL_RC6_ENABLE (1<<0)
52 #define INTEL_RC6p_ENABLE (1<<1)
53 #define INTEL_RC6pp_ENABLE (1<<2)
55 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
56 * framebuffer contents in-memory, aiming at reducing the required bandwidth
57 * during in-memory transfers and, therefore, reduce the power packet.
59 * The benefits of FBC are mostly visible with solid backgrounds and
60 * variation-less patterns.
62 * FBC-related functionality can be enabled by the means of the
63 * i915.i915_enable_fbc parameter
66 static void i8xx_disable_fbc(struct drm_device *dev)
68 struct drm_i915_private *dev_priv = dev->dev_private;
71 /* Disable compression */
72 fbc_ctl = I915_READ(FBC_CONTROL);
73 if ((fbc_ctl & FBC_CTL_EN) == 0)
76 fbc_ctl &= ~FBC_CTL_EN;
77 I915_WRITE(FBC_CONTROL, fbc_ctl);
79 /* Wait for compressing bit to clear */
80 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
81 DRM_DEBUG_KMS("FBC idle timed out\n");
85 DRM_DEBUG_KMS("disabled FBC\n");
88 static void i8xx_enable_fbc(struct drm_crtc *crtc)
90 struct drm_device *dev = crtc->dev;
91 struct drm_i915_private *dev_priv = dev->dev_private;
92 struct drm_framebuffer *fb = crtc->primary->fb;
93 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
94 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
99 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
100 if (fb->pitches[0] < cfb_pitch)
101 cfb_pitch = fb->pitches[0];
103 /* FBC_CTL wants 32B or 64B units */
105 cfb_pitch = (cfb_pitch / 32) - 1;
107 cfb_pitch = (cfb_pitch / 64) - 1;
110 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
111 I915_WRITE(FBC_TAG + (i * 4), 0);
117 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
118 fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
119 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
120 I915_WRITE(FBC_FENCE_OFF, crtc->y);
124 fbc_ctl = I915_READ(FBC_CONTROL);
125 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
126 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
128 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
129 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
130 fbc_ctl |= obj->fence_reg;
131 I915_WRITE(FBC_CONTROL, fbc_ctl);
133 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
134 cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
137 static bool i8xx_fbc_enabled(struct drm_device *dev)
139 struct drm_i915_private *dev_priv = dev->dev_private;
141 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
144 static void g4x_enable_fbc(struct drm_crtc *crtc)
146 struct drm_device *dev = crtc->dev;
147 struct drm_i915_private *dev_priv = dev->dev_private;
148 struct drm_framebuffer *fb = crtc->primary->fb;
149 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
150 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
153 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
154 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
155 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
157 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
158 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
160 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
163 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
165 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
168 static void g4x_disable_fbc(struct drm_device *dev)
170 struct drm_i915_private *dev_priv = dev->dev_private;
173 /* Disable compression */
174 dpfc_ctl = I915_READ(DPFC_CONTROL);
175 if (dpfc_ctl & DPFC_CTL_EN) {
176 dpfc_ctl &= ~DPFC_CTL_EN;
177 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
179 DRM_DEBUG_KMS("disabled FBC\n");
183 static bool g4x_fbc_enabled(struct drm_device *dev)
185 struct drm_i915_private *dev_priv = dev->dev_private;
187 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
190 static void sandybridge_blit_fbc_update(struct drm_device *dev)
192 struct drm_i915_private *dev_priv = dev->dev_private;
195 /* Make sure blitter notifies FBC of writes */
197 /* Blitter is part of Media powerwell on VLV. No impact of
198 * his param in other platforms for now */
199 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
201 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
202 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
203 GEN6_BLITTER_LOCK_SHIFT;
204 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
205 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
206 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
207 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
208 GEN6_BLITTER_LOCK_SHIFT);
209 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
210 POSTING_READ(GEN6_BLITTER_ECOSKPD);
212 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
215 static void ironlake_enable_fbc(struct drm_crtc *crtc)
217 struct drm_device *dev = crtc->dev;
218 struct drm_i915_private *dev_priv = dev->dev_private;
219 struct drm_framebuffer *fb = crtc->primary->fb;
220 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
221 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
224 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
225 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
226 dev_priv->fbc.threshold++;
228 switch (dev_priv->fbc.threshold) {
231 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
234 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
237 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
240 dpfc_ctl |= DPFC_CTL_FENCE_EN;
242 dpfc_ctl |= obj->fence_reg;
244 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
245 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
247 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
250 I915_WRITE(SNB_DPFC_CTL_SA,
251 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
252 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
253 sandybridge_blit_fbc_update(dev);
256 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
259 static void ironlake_disable_fbc(struct drm_device *dev)
261 struct drm_i915_private *dev_priv = dev->dev_private;
264 /* Disable compression */
265 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
266 if (dpfc_ctl & DPFC_CTL_EN) {
267 dpfc_ctl &= ~DPFC_CTL_EN;
268 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
270 DRM_DEBUG_KMS("disabled FBC\n");
274 static bool ironlake_fbc_enabled(struct drm_device *dev)
276 struct drm_i915_private *dev_priv = dev->dev_private;
278 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
281 static void gen7_enable_fbc(struct drm_crtc *crtc)
283 struct drm_device *dev = crtc->dev;
284 struct drm_i915_private *dev_priv = dev->dev_private;
285 struct drm_framebuffer *fb = crtc->primary->fb;
286 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
287 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
290 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
291 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
292 dev_priv->fbc.threshold++;
294 switch (dev_priv->fbc.threshold) {
297 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
300 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
303 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
307 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
309 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
311 if (IS_IVYBRIDGE(dev)) {
312 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
313 I915_WRITE(ILK_DISPLAY_CHICKEN1,
314 I915_READ(ILK_DISPLAY_CHICKEN1) |
317 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
318 I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
319 I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
323 I915_WRITE(SNB_DPFC_CTL_SA,
324 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
325 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
327 sandybridge_blit_fbc_update(dev);
329 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
332 bool intel_fbc_enabled(struct drm_device *dev)
334 struct drm_i915_private *dev_priv = dev->dev_private;
336 if (!dev_priv->display.fbc_enabled)
339 return dev_priv->display.fbc_enabled(dev);
342 static void intel_fbc_work_fn(struct work_struct *__work)
344 struct intel_fbc_work *work =
345 container_of(to_delayed_work(__work),
346 struct intel_fbc_work, work);
347 struct drm_device *dev = work->crtc->dev;
348 struct drm_i915_private *dev_priv = dev->dev_private;
350 mutex_lock(&dev->struct_mutex);
351 if (work == dev_priv->fbc.fbc_work) {
352 /* Double check that we haven't switched fb without cancelling
355 if (work->crtc->primary->fb == work->fb) {
356 dev_priv->display.enable_fbc(work->crtc);
358 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
359 dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
360 dev_priv->fbc.y = work->crtc->y;
363 dev_priv->fbc.fbc_work = NULL;
365 mutex_unlock(&dev->struct_mutex);
370 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
372 if (dev_priv->fbc.fbc_work == NULL)
375 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
377 /* Synchronisation is provided by struct_mutex and checking of
378 * dev_priv->fbc.fbc_work, so we can perform the cancellation
379 * entirely asynchronously.
381 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
382 /* tasklet was killed before being run, clean up */
383 kfree(dev_priv->fbc.fbc_work);
385 /* Mark the work as no longer wanted so that if it does
386 * wake-up (because the work was already running and waiting
387 * for our mutex), it will discover that is no longer
390 dev_priv->fbc.fbc_work = NULL;
393 static void intel_enable_fbc(struct drm_crtc *crtc)
395 struct intel_fbc_work *work;
396 struct drm_device *dev = crtc->dev;
397 struct drm_i915_private *dev_priv = dev->dev_private;
399 if (!dev_priv->display.enable_fbc)
402 intel_cancel_fbc_work(dev_priv);
404 work = kzalloc(sizeof(*work), GFP_KERNEL);
406 DRM_ERROR("Failed to allocate FBC work structure\n");
407 dev_priv->display.enable_fbc(crtc);
412 work->fb = crtc->primary->fb;
413 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
415 dev_priv->fbc.fbc_work = work;
417 /* Delay the actual enabling to let pageflipping cease and the
418 * display to settle before starting the compression. Note that
419 * this delay also serves a second purpose: it allows for a
420 * vblank to pass after disabling the FBC before we attempt
421 * to modify the control registers.
423 * A more complicated solution would involve tracking vblanks
424 * following the termination of the page-flipping sequence
425 * and indeed performing the enable as a co-routine and not
426 * waiting synchronously upon the vblank.
428 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
430 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
433 void intel_disable_fbc(struct drm_device *dev)
435 struct drm_i915_private *dev_priv = dev->dev_private;
437 intel_cancel_fbc_work(dev_priv);
439 if (!dev_priv->display.disable_fbc)
442 dev_priv->display.disable_fbc(dev);
443 dev_priv->fbc.plane = -1;
446 static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
447 enum no_fbc_reason reason)
449 if (dev_priv->fbc.no_fbc_reason == reason)
452 dev_priv->fbc.no_fbc_reason = reason;
457 * intel_update_fbc - enable/disable FBC as needed
458 * @dev: the drm_device
460 * Set up the framebuffer compression hardware at mode set time. We
461 * enable it if possible:
462 * - plane A only (on pre-965)
463 * - no pixel mulitply/line duplication
464 * - no alpha buffer discard
466 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
468 * We can't assume that any compression will take place (worst case),
469 * so the compressed buffer has to be the same size as the uncompressed
470 * one. It also must reside (along with the line length buffer) in
473 * We need to enable/disable FBC on a global basis.
475 void intel_update_fbc(struct drm_device *dev)
477 struct drm_i915_private *dev_priv = dev->dev_private;
478 struct drm_crtc *crtc = NULL, *tmp_crtc;
479 struct intel_crtc *intel_crtc;
480 struct drm_framebuffer *fb;
481 struct drm_i915_gem_object *obj;
482 const struct drm_display_mode *adjusted_mode;
483 unsigned int max_width, max_height;
486 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
490 if (!i915.powersave) {
491 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
492 DRM_DEBUG_KMS("fbc disabled per module param\n");
497 * If FBC is already on, we just have to verify that we can
498 * keep it that way...
499 * Need to disable if:
500 * - more than one pipe is active
501 * - changing FBC params (stride, fence, mode)
502 * - new fb is too large to fit in compressed buffer
503 * - going to an unsupported config (interlace, pixel multiply, etc.)
505 for_each_crtc(dev, tmp_crtc) {
506 if (intel_crtc_active(tmp_crtc) &&
507 to_intel_crtc(tmp_crtc)->primary_enabled) {
509 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
510 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
517 if (!crtc || crtc->primary->fb == NULL) {
518 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
519 DRM_DEBUG_KMS("no output, disabling\n");
523 intel_crtc = to_intel_crtc(crtc);
524 fb = crtc->primary->fb;
525 obj = intel_fb_obj(fb);
526 adjusted_mode = &intel_crtc->config.adjusted_mode;
528 if (i915.enable_fbc < 0) {
529 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
530 DRM_DEBUG_KMS("disabled per chip default\n");
533 if (!i915.enable_fbc) {
534 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
535 DRM_DEBUG_KMS("fbc disabled per module param\n");
538 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
539 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
540 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
541 DRM_DEBUG_KMS("mode incompatible with compression, "
546 if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
549 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
556 if (intel_crtc->config.pipe_src_w > max_width ||
557 intel_crtc->config.pipe_src_h > max_height) {
558 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
559 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
562 if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
563 intel_crtc->plane != PLANE_A) {
564 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
565 DRM_DEBUG_KMS("plane not A, disabling compression\n");
569 /* The use of a CPU fence is mandatory in order to detect writes
570 * by the CPU to the scanout and trigger updates to the FBC.
572 if (obj->tiling_mode != I915_TILING_X ||
573 obj->fence_reg == I915_FENCE_REG_NONE) {
574 if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
575 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
579 /* If the kernel debugger is active, always disable compression */
585 if (i915_gem_stolen_setup_compression(dev, obj->base.size,
586 drm_format_plane_cpp(fb->pixel_format, 0))) {
587 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
588 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
592 /* If the scanout has not changed, don't modify the FBC settings.
593 * Note that we make the fundamental assumption that the fb->obj
594 * cannot be unpinned (and have its GTT offset and fence revoked)
595 * without first being decoupled from the scanout and FBC disabled.
597 if (dev_priv->fbc.plane == intel_crtc->plane &&
598 dev_priv->fbc.fb_id == fb->base.id &&
599 dev_priv->fbc.y == crtc->y)
602 if (intel_fbc_enabled(dev)) {
603 /* We update FBC along two paths, after changing fb/crtc
604 * configuration (modeswitching) and after page-flipping
605 * finishes. For the latter, we know that not only did
606 * we disable the FBC at the start of the page-flip
607 * sequence, but also more than one vblank has passed.
609 * For the former case of modeswitching, it is possible
610 * to switch between two FBC valid configurations
611 * instantaneously so we do need to disable the FBC
612 * before we can modify its control registers. We also
613 * have to wait for the next vblank for that to take
614 * effect. However, since we delay enabling FBC we can
615 * assume that a vblank has passed since disabling and
616 * that we can safely alter the registers in the deferred
619 * In the scenario that we go from a valid to invalid
620 * and then back to valid FBC configuration we have
621 * no strict enforcement that a vblank occurred since
622 * disabling the FBC. However, along all current pipe
623 * disabling paths we do need to wait for a vblank at
624 * some point. And we wait before enabling FBC anyway.
626 DRM_DEBUG_KMS("disabling active FBC for update\n");
627 intel_disable_fbc(dev);
630 intel_enable_fbc(crtc);
631 dev_priv->fbc.no_fbc_reason = FBC_OK;
635 /* Multiple disables should be harmless */
636 if (intel_fbc_enabled(dev)) {
637 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
638 intel_disable_fbc(dev);
640 i915_gem_stolen_cleanup_compression(dev);
643 static void i915_pineview_get_mem_freq(struct drm_device *dev)
645 struct drm_i915_private *dev_priv = dev->dev_private;
648 tmp = I915_READ(CLKCFG);
650 switch (tmp & CLKCFG_FSB_MASK) {
652 dev_priv->fsb_freq = 533; /* 133*4 */
655 dev_priv->fsb_freq = 800; /* 200*4 */
658 dev_priv->fsb_freq = 667; /* 167*4 */
661 dev_priv->fsb_freq = 400; /* 100*4 */
665 switch (tmp & CLKCFG_MEM_MASK) {
667 dev_priv->mem_freq = 533;
670 dev_priv->mem_freq = 667;
673 dev_priv->mem_freq = 800;
677 /* detect pineview DDR3 setting */
678 tmp = I915_READ(CSHRDDR3CTL);
679 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
682 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
684 struct drm_i915_private *dev_priv = dev->dev_private;
687 ddrpll = I915_READ16(DDRMPLL1);
688 csipll = I915_READ16(CSIPLL0);
690 switch (ddrpll & 0xff) {
692 dev_priv->mem_freq = 800;
695 dev_priv->mem_freq = 1066;
698 dev_priv->mem_freq = 1333;
701 dev_priv->mem_freq = 1600;
704 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
706 dev_priv->mem_freq = 0;
710 dev_priv->ips.r_t = dev_priv->mem_freq;
712 switch (csipll & 0x3ff) {
714 dev_priv->fsb_freq = 3200;
717 dev_priv->fsb_freq = 3733;
720 dev_priv->fsb_freq = 4266;
723 dev_priv->fsb_freq = 4800;
726 dev_priv->fsb_freq = 5333;
729 dev_priv->fsb_freq = 5866;
732 dev_priv->fsb_freq = 6400;
735 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
737 dev_priv->fsb_freq = 0;
741 if (dev_priv->fsb_freq == 3200) {
742 dev_priv->ips.c_m = 0;
743 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
744 dev_priv->ips.c_m = 1;
746 dev_priv->ips.c_m = 2;
750 static const struct cxsr_latency cxsr_latency_table[] = {
751 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
752 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
753 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
754 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
755 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
757 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
758 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
759 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
760 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
761 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
763 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
764 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
765 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
766 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
767 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
769 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
770 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
771 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
772 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
773 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
775 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
776 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
777 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
778 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
779 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
781 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
782 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
783 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
784 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
785 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
788 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
793 const struct cxsr_latency *latency;
796 if (fsb == 0 || mem == 0)
799 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
800 latency = &cxsr_latency_table[i];
801 if (is_desktop == latency->is_desktop &&
802 is_ddr3 == latency->is_ddr3 &&
803 fsb == latency->fsb_freq && mem == latency->mem_freq)
807 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
812 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
814 struct drm_device *dev = dev_priv->dev;
817 if (IS_VALLEYVIEW(dev)) {
818 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
819 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
820 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
821 } else if (IS_PINEVIEW(dev)) {
822 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
823 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
824 I915_WRITE(DSPFW3, val);
825 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
826 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
827 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
828 I915_WRITE(FW_BLC_SELF, val);
829 } else if (IS_I915GM(dev)) {
830 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
831 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
832 I915_WRITE(INSTPM, val);
837 DRM_DEBUG_KMS("memory self-refresh is %s\n",
838 enable ? "enabled" : "disabled");
842 * Latency for FIFO fetches is dependent on several factors:
843 * - memory configuration (speed, channels)
845 * - current MCH state
846 * It can be fairly high in some situations, so here we assume a fairly
847 * pessimal value. It's a tradeoff between extra memory fetches (if we
848 * set this value too high, the FIFO will fetch frequently to stay full)
849 * and power consumption (set it too low to save power and we might see
850 * FIFO underruns and display "flicker").
852 * A value of 5us seems to be a good balance; safe for very low end
853 * platforms but not overly aggressive on lower latency configs.
855 static const int latency_ns = 5000;
857 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
859 struct drm_i915_private *dev_priv = dev->dev_private;
860 uint32_t dsparb = I915_READ(DSPARB);
863 size = dsparb & 0x7f;
865 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
867 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
868 plane ? "B" : "A", size);
873 static int i830_get_fifo_size(struct drm_device *dev, int plane)
875 struct drm_i915_private *dev_priv = dev->dev_private;
876 uint32_t dsparb = I915_READ(DSPARB);
879 size = dsparb & 0x1ff;
881 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
882 size >>= 1; /* Convert to cachelines */
884 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
885 plane ? "B" : "A", size);
890 static int i845_get_fifo_size(struct drm_device *dev, int plane)
892 struct drm_i915_private *dev_priv = dev->dev_private;
893 uint32_t dsparb = I915_READ(DSPARB);
896 size = dsparb & 0x7f;
897 size >>= 2; /* Convert to cachelines */
899 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
906 /* Pineview has different values for various configs */
907 static const struct intel_watermark_params pineview_display_wm = {
908 .fifo_size = PINEVIEW_DISPLAY_FIFO,
909 .max_wm = PINEVIEW_MAX_WM,
910 .default_wm = PINEVIEW_DFT_WM,
911 .guard_size = PINEVIEW_GUARD_WM,
912 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
914 static const struct intel_watermark_params pineview_display_hplloff_wm = {
915 .fifo_size = PINEVIEW_DISPLAY_FIFO,
916 .max_wm = PINEVIEW_MAX_WM,
917 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
918 .guard_size = PINEVIEW_GUARD_WM,
919 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
921 static const struct intel_watermark_params pineview_cursor_wm = {
922 .fifo_size = PINEVIEW_CURSOR_FIFO,
923 .max_wm = PINEVIEW_CURSOR_MAX_WM,
924 .default_wm = PINEVIEW_CURSOR_DFT_WM,
925 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
926 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
928 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
929 .fifo_size = PINEVIEW_CURSOR_FIFO,
930 .max_wm = PINEVIEW_CURSOR_MAX_WM,
931 .default_wm = PINEVIEW_CURSOR_DFT_WM,
932 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
933 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
935 static const struct intel_watermark_params g4x_wm_info = {
936 .fifo_size = G4X_FIFO_SIZE,
937 .max_wm = G4X_MAX_WM,
938 .default_wm = G4X_MAX_WM,
940 .cacheline_size = G4X_FIFO_LINE_SIZE,
942 static const struct intel_watermark_params g4x_cursor_wm_info = {
943 .fifo_size = I965_CURSOR_FIFO,
944 .max_wm = I965_CURSOR_MAX_WM,
945 .default_wm = I965_CURSOR_DFT_WM,
947 .cacheline_size = G4X_FIFO_LINE_SIZE,
949 static const struct intel_watermark_params valleyview_wm_info = {
950 .fifo_size = VALLEYVIEW_FIFO_SIZE,
951 .max_wm = VALLEYVIEW_MAX_WM,
952 .default_wm = VALLEYVIEW_MAX_WM,
954 .cacheline_size = G4X_FIFO_LINE_SIZE,
956 static const struct intel_watermark_params valleyview_cursor_wm_info = {
957 .fifo_size = I965_CURSOR_FIFO,
958 .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
959 .default_wm = I965_CURSOR_DFT_WM,
961 .cacheline_size = G4X_FIFO_LINE_SIZE,
963 static const struct intel_watermark_params i965_cursor_wm_info = {
964 .fifo_size = I965_CURSOR_FIFO,
965 .max_wm = I965_CURSOR_MAX_WM,
966 .default_wm = I965_CURSOR_DFT_WM,
968 .cacheline_size = I915_FIFO_LINE_SIZE,
970 static const struct intel_watermark_params i945_wm_info = {
971 .fifo_size = I945_FIFO_SIZE,
972 .max_wm = I915_MAX_WM,
975 .cacheline_size = I915_FIFO_LINE_SIZE,
977 static const struct intel_watermark_params i915_wm_info = {
978 .fifo_size = I915_FIFO_SIZE,
979 .max_wm = I915_MAX_WM,
982 .cacheline_size = I915_FIFO_LINE_SIZE,
984 static const struct intel_watermark_params i830_wm_info = {
985 .fifo_size = I855GM_FIFO_SIZE,
986 .max_wm = I915_MAX_WM,
989 .cacheline_size = I830_FIFO_LINE_SIZE,
991 static const struct intel_watermark_params i845_wm_info = {
992 .fifo_size = I830_FIFO_SIZE,
993 .max_wm = I915_MAX_WM,
996 .cacheline_size = I830_FIFO_LINE_SIZE,
1000 * intel_calculate_wm - calculate watermark level
1001 * @clock_in_khz: pixel clock
1002 * @wm: chip FIFO params
1003 * @pixel_size: display pixel size
1004 * @latency_ns: memory latency for the platform
1006 * Calculate the watermark level (the level at which the display plane will
1007 * start fetching from memory again). Each chip has a different display
1008 * FIFO size and allocation, so the caller needs to figure that out and pass
1009 * in the correct intel_watermark_params structure.
1011 * As the pixel clock runs, the FIFO will be drained at a rate that depends
1012 * on the pixel size. When it reaches the watermark level, it'll start
1013 * fetching FIFO line sized based chunks from memory until the FIFO fills
1014 * past the watermark point. If the FIFO drains completely, a FIFO underrun
1015 * will occur, and a display engine hang could result.
1017 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1018 const struct intel_watermark_params *wm,
1021 unsigned long latency_ns)
1023 long entries_required, wm_size;
1026 * Note: we need to make sure we don't overflow for various clock &
1028 * clocks go from a few thousand to several hundred thousand.
1029 * latency is usually a few thousand
1031 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
1033 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
1035 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
1037 wm_size = fifo_size - (entries_required + wm->guard_size);
1039 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
1041 /* Don't promote wm_size to unsigned... */
1042 if (wm_size > (long)wm->max_wm)
1043 wm_size = wm->max_wm;
1045 wm_size = wm->default_wm;
1049 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1051 struct drm_crtc *crtc, *enabled = NULL;
1053 for_each_crtc(dev, crtc) {
1054 if (intel_crtc_active(crtc)) {
1064 static void pineview_update_wm(struct drm_crtc *unused_crtc)
1066 struct drm_device *dev = unused_crtc->dev;
1067 struct drm_i915_private *dev_priv = dev->dev_private;
1068 struct drm_crtc *crtc;
1069 const struct cxsr_latency *latency;
1073 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1074 dev_priv->fsb_freq, dev_priv->mem_freq);
1076 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1077 intel_set_memory_cxsr(dev_priv, false);
1081 crtc = single_enabled_crtc(dev);
1083 const struct drm_display_mode *adjusted_mode;
1084 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1087 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1088 clock = adjusted_mode->crtc_clock;
1091 wm = intel_calculate_wm(clock, &pineview_display_wm,
1092 pineview_display_wm.fifo_size,
1093 pixel_size, latency->display_sr);
1094 reg = I915_READ(DSPFW1);
1095 reg &= ~DSPFW_SR_MASK;
1096 reg |= wm << DSPFW_SR_SHIFT;
1097 I915_WRITE(DSPFW1, reg);
1098 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1101 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1102 pineview_display_wm.fifo_size,
1103 pixel_size, latency->cursor_sr);
1104 reg = I915_READ(DSPFW3);
1105 reg &= ~DSPFW_CURSOR_SR_MASK;
1106 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1107 I915_WRITE(DSPFW3, reg);
1109 /* Display HPLL off SR */
1110 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1111 pineview_display_hplloff_wm.fifo_size,
1112 pixel_size, latency->display_hpll_disable);
1113 reg = I915_READ(DSPFW3);
1114 reg &= ~DSPFW_HPLL_SR_MASK;
1115 reg |= wm & DSPFW_HPLL_SR_MASK;
1116 I915_WRITE(DSPFW3, reg);
1118 /* cursor HPLL off SR */
1119 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1120 pineview_display_hplloff_wm.fifo_size,
1121 pixel_size, latency->cursor_hpll_disable);
1122 reg = I915_READ(DSPFW3);
1123 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1124 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1125 I915_WRITE(DSPFW3, reg);
1126 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1128 intel_set_memory_cxsr(dev_priv, true);
1130 intel_set_memory_cxsr(dev_priv, false);
1134 static bool g4x_compute_wm0(struct drm_device *dev,
1136 const struct intel_watermark_params *display,
1137 int display_latency_ns,
1138 const struct intel_watermark_params *cursor,
1139 int cursor_latency_ns,
1143 struct drm_crtc *crtc;
1144 const struct drm_display_mode *adjusted_mode;
1145 int htotal, hdisplay, clock, pixel_size;
1146 int line_time_us, line_count;
1147 int entries, tlb_miss;
1149 crtc = intel_get_crtc_for_plane(dev, plane);
1150 if (!intel_crtc_active(crtc)) {
1151 *cursor_wm = cursor->guard_size;
1152 *plane_wm = display->guard_size;
1156 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1157 clock = adjusted_mode->crtc_clock;
1158 htotal = adjusted_mode->crtc_htotal;
1159 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1160 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1162 /* Use the small buffer method to calculate plane watermark */
1163 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1164 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1166 entries += tlb_miss;
1167 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1168 *plane_wm = entries + display->guard_size;
1169 if (*plane_wm > (int)display->max_wm)
1170 *plane_wm = display->max_wm;
1172 /* Use the large buffer method to calculate cursor watermark */
1173 line_time_us = max(htotal * 1000 / clock, 1);
1174 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1175 entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
1176 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1178 entries += tlb_miss;
1179 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1180 *cursor_wm = entries + cursor->guard_size;
1181 if (*cursor_wm > (int)cursor->max_wm)
1182 *cursor_wm = (int)cursor->max_wm;
1188 * Check the wm result.
1190 * If any calculated watermark values is larger than the maximum value that
1191 * can be programmed into the associated watermark register, that watermark
1194 static bool g4x_check_srwm(struct drm_device *dev,
1195 int display_wm, int cursor_wm,
1196 const struct intel_watermark_params *display,
1197 const struct intel_watermark_params *cursor)
1199 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1200 display_wm, cursor_wm);
1202 if (display_wm > display->max_wm) {
1203 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1204 display_wm, display->max_wm);
1208 if (cursor_wm > cursor->max_wm) {
1209 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1210 cursor_wm, cursor->max_wm);
1214 if (!(display_wm || cursor_wm)) {
1215 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1222 static bool g4x_compute_srwm(struct drm_device *dev,
1225 const struct intel_watermark_params *display,
1226 const struct intel_watermark_params *cursor,
1227 int *display_wm, int *cursor_wm)
1229 struct drm_crtc *crtc;
1230 const struct drm_display_mode *adjusted_mode;
1231 int hdisplay, htotal, pixel_size, clock;
1232 unsigned long line_time_us;
1233 int line_count, line_size;
1238 *display_wm = *cursor_wm = 0;
1242 crtc = intel_get_crtc_for_plane(dev, plane);
1243 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1244 clock = adjusted_mode->crtc_clock;
1245 htotal = adjusted_mode->crtc_htotal;
1246 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1247 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1249 line_time_us = max(htotal * 1000 / clock, 1);
1250 line_count = (latency_ns / line_time_us + 1000) / 1000;
1251 line_size = hdisplay * pixel_size;
1253 /* Use the minimum of the small and large buffer method for primary */
1254 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1255 large = line_count * line_size;
1257 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1258 *display_wm = entries + display->guard_size;
1260 /* calculate the self-refresh watermark for display cursor */
1261 entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
1262 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1263 *cursor_wm = entries + cursor->guard_size;
1265 return g4x_check_srwm(dev,
1266 *display_wm, *cursor_wm,
1270 static bool vlv_compute_drain_latency(struct drm_device *dev,
1272 int *plane_prec_mult,
1274 int *cursor_prec_mult,
1277 struct drm_crtc *crtc;
1278 int clock, pixel_size;
1281 crtc = intel_get_crtc_for_plane(dev, plane);
1282 if (!intel_crtc_active(crtc))
1285 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1286 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
1288 entries = (clock / 1000) * pixel_size;
1289 *plane_prec_mult = (entries > 128) ?
1290 DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32;
1291 *plane_dl = (64 * (*plane_prec_mult) * 4) / entries;
1293 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
1294 *cursor_prec_mult = (entries > 128) ?
1295 DRAIN_LATENCY_PRECISION_64 : DRAIN_LATENCY_PRECISION_32;
1296 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / entries;
1302 * Update drain latency registers of memory arbiter
1304 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1305 * to be programmed. Each plane has a drain latency multiplier and a drain
1309 static void vlv_update_drain_latency(struct drm_device *dev)
1311 struct drm_i915_private *dev_priv = dev->dev_private;
1312 int planea_prec, planea_dl, planeb_prec, planeb_dl;
1313 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1314 int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1317 /* For plane A, Cursor A */
1318 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1319 &cursor_prec_mult, &cursora_dl)) {
1320 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1321 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_64;
1322 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1323 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_64;
1325 I915_WRITE(VLV_DDL1, cursora_prec |
1326 (cursora_dl << DDL_CURSORA_SHIFT) |
1327 planea_prec | planea_dl);
1330 /* For plane B, Cursor B */
1331 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1332 &cursor_prec_mult, &cursorb_dl)) {
1333 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1334 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_64;
1335 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1336 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_64;
1338 I915_WRITE(VLV_DDL2, cursorb_prec |
1339 (cursorb_dl << DDL_CURSORB_SHIFT) |
1340 planeb_prec | planeb_dl);
1344 #define single_plane_enabled(mask) is_power_of_2(mask)
1346 static void valleyview_update_wm(struct drm_crtc *crtc)
1348 struct drm_device *dev = crtc->dev;
1349 static const int sr_latency_ns = 12000;
1350 struct drm_i915_private *dev_priv = dev->dev_private;
1351 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1352 int plane_sr, cursor_sr;
1353 int ignore_plane_sr, ignore_cursor_sr;
1354 unsigned int enabled = 0;
1357 vlv_update_drain_latency(dev);
1359 if (g4x_compute_wm0(dev, PIPE_A,
1360 &valleyview_wm_info, latency_ns,
1361 &valleyview_cursor_wm_info, latency_ns,
1362 &planea_wm, &cursora_wm))
1363 enabled |= 1 << PIPE_A;
1365 if (g4x_compute_wm0(dev, PIPE_B,
1366 &valleyview_wm_info, latency_ns,
1367 &valleyview_cursor_wm_info, latency_ns,
1368 &planeb_wm, &cursorb_wm))
1369 enabled |= 1 << PIPE_B;
1371 if (single_plane_enabled(enabled) &&
1372 g4x_compute_srwm(dev, ffs(enabled) - 1,
1374 &valleyview_wm_info,
1375 &valleyview_cursor_wm_info,
1376 &plane_sr, &ignore_cursor_sr) &&
1377 g4x_compute_srwm(dev, ffs(enabled) - 1,
1379 &valleyview_wm_info,
1380 &valleyview_cursor_wm_info,
1381 &ignore_plane_sr, &cursor_sr)) {
1382 cxsr_enabled = true;
1384 cxsr_enabled = false;
1385 intel_set_memory_cxsr(dev_priv, false);
1386 plane_sr = cursor_sr = 0;
1389 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1390 planea_wm, cursora_wm,
1391 planeb_wm, cursorb_wm,
1392 plane_sr, cursor_sr);
1395 (plane_sr << DSPFW_SR_SHIFT) |
1396 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1397 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1400 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1401 (cursora_wm << DSPFW_CURSORA_SHIFT));
1403 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1404 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1407 intel_set_memory_cxsr(dev_priv, true);
1410 static void g4x_update_wm(struct drm_crtc *crtc)
1412 struct drm_device *dev = crtc->dev;
1413 static const int sr_latency_ns = 12000;
1414 struct drm_i915_private *dev_priv = dev->dev_private;
1415 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1416 int plane_sr, cursor_sr;
1417 unsigned int enabled = 0;
1420 if (g4x_compute_wm0(dev, PIPE_A,
1421 &g4x_wm_info, latency_ns,
1422 &g4x_cursor_wm_info, latency_ns,
1423 &planea_wm, &cursora_wm))
1424 enabled |= 1 << PIPE_A;
1426 if (g4x_compute_wm0(dev, PIPE_B,
1427 &g4x_wm_info, latency_ns,
1428 &g4x_cursor_wm_info, latency_ns,
1429 &planeb_wm, &cursorb_wm))
1430 enabled |= 1 << PIPE_B;
1432 if (single_plane_enabled(enabled) &&
1433 g4x_compute_srwm(dev, ffs(enabled) - 1,
1436 &g4x_cursor_wm_info,
1437 &plane_sr, &cursor_sr)) {
1438 cxsr_enabled = true;
1440 cxsr_enabled = false;
1441 intel_set_memory_cxsr(dev_priv, false);
1442 plane_sr = cursor_sr = 0;
1445 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1446 planea_wm, cursora_wm,
1447 planeb_wm, cursorb_wm,
1448 plane_sr, cursor_sr);
1451 (plane_sr << DSPFW_SR_SHIFT) |
1452 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1453 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1456 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1457 (cursora_wm << DSPFW_CURSORA_SHIFT));
1458 /* HPLL off in SR has some issues on G4x... disable it */
1460 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1461 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1464 intel_set_memory_cxsr(dev_priv, true);
1467 static void i965_update_wm(struct drm_crtc *unused_crtc)
1469 struct drm_device *dev = unused_crtc->dev;
1470 struct drm_i915_private *dev_priv = dev->dev_private;
1471 struct drm_crtc *crtc;
1476 /* Calc sr entries for one plane configs */
1477 crtc = single_enabled_crtc(dev);
1479 /* self-refresh has much higher latency */
1480 static const int sr_latency_ns = 12000;
1481 const struct drm_display_mode *adjusted_mode =
1482 &to_intel_crtc(crtc)->config.adjusted_mode;
1483 int clock = adjusted_mode->crtc_clock;
1484 int htotal = adjusted_mode->crtc_htotal;
1485 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1486 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1487 unsigned long line_time_us;
1490 line_time_us = max(htotal * 1000 / clock, 1);
1492 /* Use ns/us then divide to preserve precision */
1493 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1494 pixel_size * hdisplay;
1495 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1496 srwm = I965_FIFO_SIZE - entries;
1500 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1503 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1504 pixel_size * to_intel_crtc(crtc)->cursor_width;
1505 entries = DIV_ROUND_UP(entries,
1506 i965_cursor_wm_info.cacheline_size);
1507 cursor_sr = i965_cursor_wm_info.fifo_size -
1508 (entries + i965_cursor_wm_info.guard_size);
1510 if (cursor_sr > i965_cursor_wm_info.max_wm)
1511 cursor_sr = i965_cursor_wm_info.max_wm;
1513 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1514 "cursor %d\n", srwm, cursor_sr);
1516 cxsr_enabled = true;
1518 cxsr_enabled = false;
1519 /* Turn off self refresh if both pipes are enabled */
1520 intel_set_memory_cxsr(dev_priv, false);
1523 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1526 /* 965 has limitations... */
1527 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1528 (8 << 16) | (8 << 8) | (8 << 0));
1529 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1530 /* update cursor SR watermark */
1531 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1534 intel_set_memory_cxsr(dev_priv, true);
1537 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1539 struct drm_device *dev = unused_crtc->dev;
1540 struct drm_i915_private *dev_priv = dev->dev_private;
1541 const struct intel_watermark_params *wm_info;
1546 int planea_wm, planeb_wm;
1547 struct drm_crtc *crtc, *enabled = NULL;
1550 wm_info = &i945_wm_info;
1551 else if (!IS_GEN2(dev))
1552 wm_info = &i915_wm_info;
1554 wm_info = &i830_wm_info;
1556 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1557 crtc = intel_get_crtc_for_plane(dev, 0);
1558 if (intel_crtc_active(crtc)) {
1559 const struct drm_display_mode *adjusted_mode;
1560 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1564 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1565 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1566 wm_info, fifo_size, cpp,
1570 planea_wm = fifo_size - wm_info->guard_size;
1572 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1573 crtc = intel_get_crtc_for_plane(dev, 1);
1574 if (intel_crtc_active(crtc)) {
1575 const struct drm_display_mode *adjusted_mode;
1576 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1580 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1581 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1582 wm_info, fifo_size, cpp,
1584 if (enabled == NULL)
1589 planeb_wm = fifo_size - wm_info->guard_size;
1591 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1593 if (IS_I915GM(dev) && enabled) {
1594 struct drm_i915_gem_object *obj;
1596 obj = intel_fb_obj(enabled->primary->fb);
1598 /* self-refresh seems busted with untiled */
1599 if (obj->tiling_mode == I915_TILING_NONE)
1604 * Overlay gets an aggressive default since video jitter is bad.
1608 /* Play safe and disable self-refresh before adjusting watermarks. */
1609 intel_set_memory_cxsr(dev_priv, false);
1611 /* Calc sr entries for one plane configs */
1612 if (HAS_FW_BLC(dev) && enabled) {
1613 /* self-refresh has much higher latency */
1614 static const int sr_latency_ns = 6000;
1615 const struct drm_display_mode *adjusted_mode =
1616 &to_intel_crtc(enabled)->config.adjusted_mode;
1617 int clock = adjusted_mode->crtc_clock;
1618 int htotal = adjusted_mode->crtc_htotal;
1619 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
1620 int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
1621 unsigned long line_time_us;
1624 line_time_us = max(htotal * 1000 / clock, 1);
1626 /* Use ns/us then divide to preserve precision */
1627 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1628 pixel_size * hdisplay;
1629 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1630 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1631 srwm = wm_info->fifo_size - entries;
1635 if (IS_I945G(dev) || IS_I945GM(dev))
1636 I915_WRITE(FW_BLC_SELF,
1637 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1638 else if (IS_I915GM(dev))
1639 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1642 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1643 planea_wm, planeb_wm, cwm, srwm);
1645 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1646 fwater_hi = (cwm & 0x1f);
1648 /* Set request length to 8 cachelines per fetch */
1649 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1650 fwater_hi = fwater_hi | (1 << 8);
1652 I915_WRITE(FW_BLC, fwater_lo);
1653 I915_WRITE(FW_BLC2, fwater_hi);
1656 intel_set_memory_cxsr(dev_priv, true);
1659 static void i845_update_wm(struct drm_crtc *unused_crtc)
1661 struct drm_device *dev = unused_crtc->dev;
1662 struct drm_i915_private *dev_priv = dev->dev_private;
1663 struct drm_crtc *crtc;
1664 const struct drm_display_mode *adjusted_mode;
1668 crtc = single_enabled_crtc(dev);
1672 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1673 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1675 dev_priv->display.get_fifo_size(dev, 0),
1677 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1678 fwater_lo |= (3<<8) | planea_wm;
1680 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1682 I915_WRITE(FW_BLC, fwater_lo);
1685 static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
1686 struct drm_crtc *crtc)
1688 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1689 uint32_t pixel_rate;
1691 pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
1693 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1694 * adjust the pixel_rate here. */
1696 if (intel_crtc->config.pch_pfit.enabled) {
1697 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1698 uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
1700 pipe_w = intel_crtc->config.pipe_src_w;
1701 pipe_h = intel_crtc->config.pipe_src_h;
1702 pfit_w = (pfit_size >> 16) & 0xFFFF;
1703 pfit_h = pfit_size & 0xFFFF;
1704 if (pipe_w < pfit_w)
1706 if (pipe_h < pfit_h)
1709 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1716 /* latency must be in 0.1us units. */
1717 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
1722 if (WARN(latency == 0, "Latency value missing\n"))
1725 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
1726 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1731 /* latency must be in 0.1us units. */
1732 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1733 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
1738 if (WARN(latency == 0, "Latency value missing\n"))
1741 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1742 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
1743 ret = DIV_ROUND_UP(ret, 64) + 2;
1747 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1748 uint8_t bytes_per_pixel)
1750 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1753 struct ilk_pipe_wm_parameters {
1755 uint32_t pipe_htotal;
1756 uint32_t pixel_rate;
1757 struct intel_plane_wm_parameters pri;
1758 struct intel_plane_wm_parameters spr;
1759 struct intel_plane_wm_parameters cur;
1762 struct ilk_wm_maximums {
1769 /* used in computing the new watermarks state */
1770 struct intel_wm_config {
1771 unsigned int num_pipes_active;
1772 bool sprites_enabled;
1773 bool sprites_scaled;
1777 * For both WM_PIPE and WM_LP.
1778 * mem_value must be in 0.1us units.
1780 static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
1784 uint32_t method1, method2;
1786 if (!params->active || !params->pri.enabled)
1789 method1 = ilk_wm_method1(params->pixel_rate,
1790 params->pri.bytes_per_pixel,
1796 method2 = ilk_wm_method2(params->pixel_rate,
1797 params->pipe_htotal,
1798 params->pri.horiz_pixels,
1799 params->pri.bytes_per_pixel,
1802 return min(method1, method2);
1806 * For both WM_PIPE and WM_LP.
1807 * mem_value must be in 0.1us units.
1809 static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
1812 uint32_t method1, method2;
1814 if (!params->active || !params->spr.enabled)
1817 method1 = ilk_wm_method1(params->pixel_rate,
1818 params->spr.bytes_per_pixel,
1820 method2 = ilk_wm_method2(params->pixel_rate,
1821 params->pipe_htotal,
1822 params->spr.horiz_pixels,
1823 params->spr.bytes_per_pixel,
1825 return min(method1, method2);
1829 * For both WM_PIPE and WM_LP.
1830 * mem_value must be in 0.1us units.
1832 static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
1835 if (!params->active || !params->cur.enabled)
1838 return ilk_wm_method2(params->pixel_rate,
1839 params->pipe_htotal,
1840 params->cur.horiz_pixels,
1841 params->cur.bytes_per_pixel,
1845 /* Only for WM_LP. */
1846 static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
1849 if (!params->active || !params->pri.enabled)
1852 return ilk_wm_fbc(pri_val,
1853 params->pri.horiz_pixels,
1854 params->pri.bytes_per_pixel);
1857 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1859 if (INTEL_INFO(dev)->gen >= 8)
1861 else if (INTEL_INFO(dev)->gen >= 7)
1867 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1868 int level, bool is_sprite)
1870 if (INTEL_INFO(dev)->gen >= 8)
1871 /* BDW primary/sprite plane watermarks */
1872 return level == 0 ? 255 : 2047;
1873 else if (INTEL_INFO(dev)->gen >= 7)
1874 /* IVB/HSW primary/sprite plane watermarks */
1875 return level == 0 ? 127 : 1023;
1876 else if (!is_sprite)
1877 /* ILK/SNB primary plane watermarks */
1878 return level == 0 ? 127 : 511;
1880 /* ILK/SNB sprite plane watermarks */
1881 return level == 0 ? 63 : 255;
1884 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1887 if (INTEL_INFO(dev)->gen >= 7)
1888 return level == 0 ? 63 : 255;
1890 return level == 0 ? 31 : 63;
1893 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1895 if (INTEL_INFO(dev)->gen >= 8)
1901 /* Calculate the maximum primary/sprite plane watermark */
1902 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1904 const struct intel_wm_config *config,
1905 enum intel_ddb_partitioning ddb_partitioning,
1908 unsigned int fifo_size = ilk_display_fifo_size(dev);
1910 /* if sprites aren't enabled, sprites get nothing */
1911 if (is_sprite && !config->sprites_enabled)
1914 /* HSW allows LP1+ watermarks even with multiple pipes */
1915 if (level == 0 || config->num_pipes_active > 1) {
1916 fifo_size /= INTEL_INFO(dev)->num_pipes;
1919 * For some reason the non self refresh
1920 * FIFO size is only half of the self
1921 * refresh FIFO size on ILK/SNB.
1923 if (INTEL_INFO(dev)->gen <= 6)
1927 if (config->sprites_enabled) {
1928 /* level 0 is always calculated with 1:1 split */
1929 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1938 /* clamp to max that the registers can hold */
1939 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
1942 /* Calculate the maximum cursor plane watermark */
1943 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1945 const struct intel_wm_config *config)
1947 /* HSW LP1+ watermarks w/ multiple pipes */
1948 if (level > 0 && config->num_pipes_active > 1)
1951 /* otherwise just report max that registers can hold */
1952 return ilk_cursor_wm_reg_max(dev, level);
1955 static void ilk_compute_wm_maximums(const struct drm_device *dev,
1957 const struct intel_wm_config *config,
1958 enum intel_ddb_partitioning ddb_partitioning,
1959 struct ilk_wm_maximums *max)
1961 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1962 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1963 max->cur = ilk_cursor_wm_max(dev, level, config);
1964 max->fbc = ilk_fbc_wm_reg_max(dev);
1967 static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
1969 struct ilk_wm_maximums *max)
1971 max->pri = ilk_plane_wm_reg_max(dev, level, false);
1972 max->spr = ilk_plane_wm_reg_max(dev, level, true);
1973 max->cur = ilk_cursor_wm_reg_max(dev, level);
1974 max->fbc = ilk_fbc_wm_reg_max(dev);
1977 static bool ilk_validate_wm_level(int level,
1978 const struct ilk_wm_maximums *max,
1979 struct intel_wm_level *result)
1983 /* already determined to be invalid? */
1984 if (!result->enable)
1987 result->enable = result->pri_val <= max->pri &&
1988 result->spr_val <= max->spr &&
1989 result->cur_val <= max->cur;
1991 ret = result->enable;
1994 * HACK until we can pre-compute everything,
1995 * and thus fail gracefully if LP0 watermarks
1998 if (level == 0 && !result->enable) {
1999 if (result->pri_val > max->pri)
2000 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2001 level, result->pri_val, max->pri);
2002 if (result->spr_val > max->spr)
2003 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2004 level, result->spr_val, max->spr);
2005 if (result->cur_val > max->cur)
2006 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2007 level, result->cur_val, max->cur);
2009 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2010 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2011 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2012 result->enable = true;
2018 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2020 const struct ilk_pipe_wm_parameters *p,
2021 struct intel_wm_level *result)
2023 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2024 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2025 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2027 /* WM1+ latency values stored in 0.5us units */
2034 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
2035 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
2036 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
2037 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
2038 result->enable = true;
2042 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2044 struct drm_i915_private *dev_priv = dev->dev_private;
2045 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2046 struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
2047 u32 linetime, ips_linetime;
2049 if (!intel_crtc_active(crtc))
2052 /* The WM are computed with base on how long it takes to fill a single
2053 * row at the given clock rate, multiplied by 8.
2055 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2057 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2058 intel_ddi_get_cdclk_freq(dev_priv));
2060 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2061 PIPE_WM_LINETIME_TIME(linetime);
2064 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
2066 struct drm_i915_private *dev_priv = dev->dev_private;
2068 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2069 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2071 wm[0] = (sskpd >> 56) & 0xFF;
2073 wm[0] = sskpd & 0xF;
2074 wm[1] = (sskpd >> 4) & 0xFF;
2075 wm[2] = (sskpd >> 12) & 0xFF;
2076 wm[3] = (sskpd >> 20) & 0x1FF;
2077 wm[4] = (sskpd >> 32) & 0x1FF;
2078 } else if (INTEL_INFO(dev)->gen >= 6) {
2079 uint32_t sskpd = I915_READ(MCH_SSKPD);
2081 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2082 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2083 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2084 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2085 } else if (INTEL_INFO(dev)->gen >= 5) {
2086 uint32_t mltr = I915_READ(MLTR_ILK);
2088 /* ILK primary LP0 latency is 700 ns */
2090 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2091 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2095 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2097 /* ILK sprite LP0 latency is 1300 ns */
2098 if (INTEL_INFO(dev)->gen == 5)
2102 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2104 /* ILK cursor LP0 latency is 1300 ns */
2105 if (INTEL_INFO(dev)->gen == 5)
2108 /* WaDoubleCursorLP3Latency:ivb */
2109 if (IS_IVYBRIDGE(dev))
2113 int ilk_wm_max_level(const struct drm_device *dev)
2115 /* how many WM levels are we expecting */
2116 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2118 else if (INTEL_INFO(dev)->gen >= 6)
2124 static void intel_print_wm_latency(struct drm_device *dev,
2126 const uint16_t wm[5])
2128 int level, max_level = ilk_wm_max_level(dev);
2130 for (level = 0; level <= max_level; level++) {
2131 unsigned int latency = wm[level];
2134 DRM_ERROR("%s WM%d latency not provided\n",
2139 /* WM1+ latency values in 0.5us units */
2143 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2144 name, level, wm[level],
2145 latency / 10, latency % 10);
2149 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2150 uint16_t wm[5], uint16_t min)
2152 int level, max_level = ilk_wm_max_level(dev_priv->dev);
2157 wm[0] = max(wm[0], min);
2158 for (level = 1; level <= max_level; level++)
2159 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2164 static void snb_wm_latency_quirk(struct drm_device *dev)
2166 struct drm_i915_private *dev_priv = dev->dev_private;
2170 * The BIOS provided WM memory latency values are often
2171 * inadequate for high resolution displays. Adjust them.
2173 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2174 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2175 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2180 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2181 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2182 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2183 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2186 static void ilk_setup_wm_latency(struct drm_device *dev)
2188 struct drm_i915_private *dev_priv = dev->dev_private;
2190 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2192 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2193 sizeof(dev_priv->wm.pri_latency));
2194 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2195 sizeof(dev_priv->wm.pri_latency));
2197 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2198 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2200 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2201 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2202 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2205 snb_wm_latency_quirk(dev);
2208 static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2209 struct ilk_pipe_wm_parameters *p)
2211 struct drm_device *dev = crtc->dev;
2212 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2213 enum i915_pipe pipe = intel_crtc->pipe;
2214 struct drm_plane *plane;
2216 if (!intel_crtc_active(crtc))
2220 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2221 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2222 p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
2223 p->cur.bytes_per_pixel = 4;
2224 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2225 p->cur.horiz_pixels = intel_crtc->cursor_width;
2226 /* TODO: for now, assume primary and cursor planes are always enabled. */
2227 p->pri.enabled = true;
2228 p->cur.enabled = true;
2230 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
2231 struct intel_plane *intel_plane = to_intel_plane(plane);
2233 if (intel_plane->pipe == pipe) {
2234 p->spr = intel_plane->wm;
2240 static void ilk_compute_wm_config(struct drm_device *dev,
2241 struct intel_wm_config *config)
2243 struct intel_crtc *intel_crtc;
2245 /* Compute the currently _active_ config */
2246 for_each_intel_crtc(dev, intel_crtc) {
2247 const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
2249 if (!wm->pipe_enabled)
2252 config->sprites_enabled |= wm->sprites_enabled;
2253 config->sprites_scaled |= wm->sprites_scaled;
2254 config->num_pipes_active++;
2258 /* Compute new watermarks for the pipe */
2259 static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2260 const struct ilk_pipe_wm_parameters *params,
2261 struct intel_pipe_wm *pipe_wm)
2263 struct drm_device *dev = crtc->dev;
2264 const struct drm_i915_private *dev_priv = dev->dev_private;
2265 int level, max_level = ilk_wm_max_level(dev);
2266 /* LP0 watermark maximums depend on this pipe alone */
2267 struct intel_wm_config config = {
2268 .num_pipes_active = 1,
2269 .sprites_enabled = params->spr.enabled,
2270 .sprites_scaled = params->spr.scaled,
2272 struct ilk_wm_maximums max;
2274 pipe_wm->pipe_enabled = params->active;
2275 pipe_wm->sprites_enabled = params->spr.enabled;
2276 pipe_wm->sprites_scaled = params->spr.scaled;
2278 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2279 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2282 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2283 if (params->spr.scaled)
2286 ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
2288 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2289 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2291 /* LP0 watermarks always use 1/2 DDB partitioning */
2292 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2294 /* At least LP0 must be valid */
2295 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
2298 ilk_compute_wm_reg_maximums(dev, 1, &max);
2300 for (level = 1; level <= max_level; level++) {
2301 struct intel_wm_level wm = {};
2303 ilk_compute_wm_level(dev_priv, level, params, &wm);
2306 * Disable any watermark level that exceeds the
2307 * register maximums since such watermarks are
2310 if (!ilk_validate_wm_level(level, &max, &wm))
2313 pipe_wm->wm[level] = wm;
2320 * Merge the watermarks from all active pipes for a specific level.
2322 static void ilk_merge_wm_level(struct drm_device *dev,
2324 struct intel_wm_level *ret_wm)
2326 struct intel_crtc *intel_crtc;
2328 ret_wm->enable = true;
2330 for_each_intel_crtc(dev, intel_crtc) {
2331 const struct intel_pipe_wm *active = &intel_crtc->wm.active;
2332 const struct intel_wm_level *wm = &active->wm[level];
2334 if (!active->pipe_enabled)
2338 * The watermark values may have been used in the past,
2339 * so we must maintain them in the registers for some
2340 * time even if the level is now disabled.
2343 ret_wm->enable = false;
2345 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2346 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2347 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2348 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2353 * Merge all low power watermarks for all active pipes.
2355 static void ilk_wm_merge(struct drm_device *dev,
2356 const struct intel_wm_config *config,
2357 const struct ilk_wm_maximums *max,
2358 struct intel_pipe_wm *merged)
2360 int level, max_level = ilk_wm_max_level(dev);
2361 int last_enabled_level = max_level;
2363 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2364 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2365 config->num_pipes_active > 1)
2368 /* ILK: FBC WM must be disabled always */
2369 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2371 /* merge each WM1+ level */
2372 for (level = 1; level <= max_level; level++) {
2373 struct intel_wm_level *wm = &merged->wm[level];
2375 ilk_merge_wm_level(dev, level, wm);
2377 if (level > last_enabled_level)
2379 else if (!ilk_validate_wm_level(level, max, wm))
2380 /* make sure all following levels get disabled */
2381 last_enabled_level = level - 1;
2384 * The spec says it is preferred to disable
2385 * FBC WMs instead of disabling a WM level.
2387 if (wm->fbc_val > max->fbc) {
2389 merged->fbc_wm_enabled = false;
2394 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2396 * FIXME this is racy. FBC might get enabled later.
2397 * What we should check here is whether FBC can be
2398 * enabled sometime later.
2400 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
2401 for (level = 2; level <= max_level; level++) {
2402 struct intel_wm_level *wm = &merged->wm[level];
2409 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2411 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2412 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2415 /* The value we need to program into the WM_LPx latency field */
2416 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2418 struct drm_i915_private *dev_priv = dev->dev_private;
2420 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2423 return dev_priv->wm.pri_latency[level];
2426 static void ilk_compute_wm_results(struct drm_device *dev,
2427 const struct intel_pipe_wm *merged,
2428 enum intel_ddb_partitioning partitioning,
2429 struct ilk_wm_values *results)
2431 struct intel_crtc *intel_crtc;
2434 results->enable_fbc_wm = merged->fbc_wm_enabled;
2435 results->partitioning = partitioning;
2437 /* LP1+ register values */
2438 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2439 const struct intel_wm_level *r;
2441 level = ilk_wm_lp_to_level(wm_lp, merged);
2443 r = &merged->wm[level];
2446 * Maintain the watermark values even if the level is
2447 * disabled. Doing otherwise could cause underruns.
2449 results->wm_lp[wm_lp - 1] =
2450 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2451 (r->pri_val << WM1_LP_SR_SHIFT) |
2455 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2457 if (INTEL_INFO(dev)->gen >= 8)
2458 results->wm_lp[wm_lp - 1] |=
2459 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2461 results->wm_lp[wm_lp - 1] |=
2462 r->fbc_val << WM1_LP_FBC_SHIFT;
2465 * Always set WM1S_LP_EN when spr_val != 0, even if the
2466 * level is disabled. Doing otherwise could cause underruns.
2468 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2469 WARN_ON(wm_lp != 1);
2470 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2472 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2475 /* LP0 register values */
2476 for_each_intel_crtc(dev, intel_crtc) {
2477 enum i915_pipe pipe = intel_crtc->pipe;
2478 const struct intel_wm_level *r =
2479 &intel_crtc->wm.active.wm[0];
2481 if (WARN_ON(!r->enable))
2484 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2486 results->wm_pipe[pipe] =
2487 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2488 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2493 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2494 * case both are at the same level. Prefer r1 in case they're the same. */
2495 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2496 struct intel_pipe_wm *r1,
2497 struct intel_pipe_wm *r2)
2499 int level, max_level = ilk_wm_max_level(dev);
2500 int level1 = 0, level2 = 0;
2502 for (level = 1; level <= max_level; level++) {
2503 if (r1->wm[level].enable)
2505 if (r2->wm[level].enable)
2509 if (level1 == level2) {
2510 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2514 } else if (level1 > level2) {
2521 /* dirty bits used to track which watermarks need changes */
2522 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2523 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2524 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2525 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2526 #define WM_DIRTY_FBC (1 << 24)
2527 #define WM_DIRTY_DDB (1 << 25)
2529 static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
2530 const struct ilk_wm_values *old,
2531 const struct ilk_wm_values *new)
2533 unsigned int dirty = 0;
2534 enum i915_pipe pipe;
2537 for_each_pipe(pipe) {
2538 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2539 dirty |= WM_DIRTY_LINETIME(pipe);
2540 /* Must disable LP1+ watermarks too */
2541 dirty |= WM_DIRTY_LP_ALL;
2544 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2545 dirty |= WM_DIRTY_PIPE(pipe);
2546 /* Must disable LP1+ watermarks too */
2547 dirty |= WM_DIRTY_LP_ALL;
2551 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2552 dirty |= WM_DIRTY_FBC;
2553 /* Must disable LP1+ watermarks too */
2554 dirty |= WM_DIRTY_LP_ALL;
2557 if (old->partitioning != new->partitioning) {
2558 dirty |= WM_DIRTY_DDB;
2559 /* Must disable LP1+ watermarks too */
2560 dirty |= WM_DIRTY_LP_ALL;
2563 /* LP1+ watermarks already deemed dirty, no need to continue */
2564 if (dirty & WM_DIRTY_LP_ALL)
2567 /* Find the lowest numbered LP1+ watermark in need of an update... */
2568 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2569 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2570 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2574 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2575 for (; wm_lp <= 3; wm_lp++)
2576 dirty |= WM_DIRTY_LP(wm_lp);
2581 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2584 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2585 bool changed = false;
2587 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2588 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2589 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2592 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2593 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2594 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2597 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2598 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2599 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2604 * Don't touch WM1S_LP_EN here.
2605 * Doing so could cause underruns.
2612 * The spec says we shouldn't write when we don't need, because every write
2613 * causes WMs to be re-evaluated, expending some power.
2615 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2616 struct ilk_wm_values *results)
2618 struct drm_device *dev = dev_priv->dev;
2619 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2623 dirty = ilk_compute_wm_dirty(dev, previous, results);
2627 _ilk_disable_lp_wm(dev_priv, dirty);
2629 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2630 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2631 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2632 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2633 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2634 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2636 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2637 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2638 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2639 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2640 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2641 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2643 if (dirty & WM_DIRTY_DDB) {
2644 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2645 val = I915_READ(WM_MISC);
2646 if (results->partitioning == INTEL_DDB_PART_1_2)
2647 val &= ~WM_MISC_DATA_PARTITION_5_6;
2649 val |= WM_MISC_DATA_PARTITION_5_6;
2650 I915_WRITE(WM_MISC, val);
2652 val = I915_READ(DISP_ARB_CTL2);
2653 if (results->partitioning == INTEL_DDB_PART_1_2)
2654 val &= ~DISP_DATA_PARTITION_5_6;
2656 val |= DISP_DATA_PARTITION_5_6;
2657 I915_WRITE(DISP_ARB_CTL2, val);
2661 if (dirty & WM_DIRTY_FBC) {
2662 val = I915_READ(DISP_ARB_CTL);
2663 if (results->enable_fbc_wm)
2664 val &= ~DISP_FBC_WM_DIS;
2666 val |= DISP_FBC_WM_DIS;
2667 I915_WRITE(DISP_ARB_CTL, val);
2670 if (dirty & WM_DIRTY_LP(1) &&
2671 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2672 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2674 if (INTEL_INFO(dev)->gen >= 7) {
2675 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2676 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2677 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2678 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2681 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2682 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2683 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2684 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2685 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2686 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2688 dev_priv->wm.hw = *results;
2691 static bool ilk_disable_lp_wm(struct drm_device *dev)
2693 struct drm_i915_private *dev_priv = dev->dev_private;
2695 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2698 static void ilk_update_wm(struct drm_crtc *crtc)
2700 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2701 struct drm_device *dev = crtc->dev;
2702 struct drm_i915_private *dev_priv = dev->dev_private;
2703 struct ilk_wm_maximums max;
2704 struct ilk_pipe_wm_parameters params = {};
2705 struct ilk_wm_values results = {};
2706 enum intel_ddb_partitioning partitioning;
2707 struct intel_pipe_wm pipe_wm = {};
2708 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2709 struct intel_wm_config config = {};
2711 ilk_compute_wm_parameters(crtc, ¶ms);
2713 intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm);
2715 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
2718 intel_crtc->wm.active = pipe_wm;
2720 ilk_compute_wm_config(dev, &config);
2722 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2723 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
2725 /* 5/6 split only in single pipe config on IVB+ */
2726 if (INTEL_INFO(dev)->gen >= 7 &&
2727 config.num_pipes_active == 1 && config.sprites_enabled) {
2728 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
2729 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
2731 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
2733 best_lp_wm = &lp_wm_1_2;
2736 partitioning = (best_lp_wm == &lp_wm_1_2) ?
2737 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
2739 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
2741 ilk_write_wm_values(dev_priv, &results);
2745 ilk_update_sprite_wm(struct drm_plane *plane,
2746 struct drm_crtc *crtc,
2747 uint32_t sprite_width, uint32_t sprite_height,
2748 int pixel_size, bool enabled, bool scaled)
2750 struct drm_device *dev = plane->dev;
2751 struct intel_plane *intel_plane = to_intel_plane(plane);
2753 intel_plane->wm.enabled = enabled;
2754 intel_plane->wm.scaled = scaled;
2755 intel_plane->wm.horiz_pixels = sprite_width;
2756 intel_plane->wm.vert_pixels = sprite_width;
2757 intel_plane->wm.bytes_per_pixel = pixel_size;
2760 * IVB workaround: must disable low power watermarks for at least
2761 * one frame before enabling scaling. LP watermarks can be re-enabled
2762 * when scaling is disabled.
2764 * WaCxSRDisabledForSpriteScaling:ivb
2766 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
2767 intel_wait_for_vblank(dev, intel_plane->pipe);
2769 ilk_update_wm(crtc);
2772 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
2774 struct drm_device *dev = crtc->dev;
2775 struct drm_i915_private *dev_priv = dev->dev_private;
2776 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2777 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2778 struct intel_pipe_wm *active = &intel_crtc->wm.active;
2779 enum i915_pipe pipe = intel_crtc->pipe;
2780 static const unsigned int wm0_pipe_reg[] = {
2781 [PIPE_A] = WM0_PIPEA_ILK,
2782 [PIPE_B] = WM0_PIPEB_ILK,
2783 [PIPE_C] = WM0_PIPEC_IVB,
2786 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
2787 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2788 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
2790 active->pipe_enabled = intel_crtc_active(crtc);
2792 if (active->pipe_enabled) {
2793 u32 tmp = hw->wm_pipe[pipe];
2796 * For active pipes LP0 watermark is marked as
2797 * enabled, and LP1+ watermaks as disabled since
2798 * we can't really reverse compute them in case
2799 * multiple pipes are active.
2801 active->wm[0].enable = true;
2802 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
2803 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
2804 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
2805 active->linetime = hw->wm_linetime[pipe];
2807 int level, max_level = ilk_wm_max_level(dev);
2810 * For inactive pipes, all watermark levels
2811 * should be marked as enabled but zeroed,
2812 * which is what we'd compute them to.
2814 for (level = 0; level <= max_level; level++)
2815 active->wm[level].enable = true;
2819 void ilk_wm_get_hw_state(struct drm_device *dev)
2821 struct drm_i915_private *dev_priv = dev->dev_private;
2822 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2823 struct drm_crtc *crtc;
2825 for_each_crtc(dev, crtc)
2826 ilk_pipe_wm_get_hw_state(crtc);
2828 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
2829 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
2830 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
2832 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2833 if (INTEL_INFO(dev)->gen >= 7) {
2834 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2835 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2838 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2839 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2840 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2841 else if (IS_IVYBRIDGE(dev))
2842 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
2843 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2846 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2850 * intel_update_watermarks - update FIFO watermark values based on current modes
2852 * Calculate watermark values for the various WM regs based on current mode
2853 * and plane configuration.
2855 * There are several cases to deal with here:
2856 * - normal (i.e. non-self-refresh)
2857 * - self-refresh (SR) mode
2858 * - lines are large relative to FIFO size (buffer can hold up to 2)
2859 * - lines are small relative to FIFO size (buffer can hold more than 2
2860 * lines), so need to account for TLB latency
2862 * The normal calculation is:
2863 * watermark = dotclock * bytes per pixel * latency
2864 * where latency is platform & configuration dependent (we assume pessimal
2867 * The SR calculation is:
2868 * watermark = (trunc(latency/line time)+1) * surface width *
2871 * line time = htotal / dotclock
2872 * surface width = hdisplay for normal plane and 64 for cursor
2873 * and latency is assumed to be high, as above.
2875 * The final value programmed to the register should always be rounded up,
2876 * and include an extra 2 entries to account for clock crossings.
2878 * We don't use the sprite, so we can ignore that. And on Crestline we have
2879 * to set the non-SR watermarks to 8.
2881 void intel_update_watermarks(struct drm_crtc *crtc)
2883 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
2885 if (dev_priv->display.update_wm)
2886 dev_priv->display.update_wm(crtc);
2889 void intel_update_sprite_watermarks(struct drm_plane *plane,
2890 struct drm_crtc *crtc,
2891 uint32_t sprite_width,
2892 uint32_t sprite_height,
2894 bool enabled, bool scaled)
2896 struct drm_i915_private *dev_priv = plane->dev->dev_private;
2898 if (dev_priv->display.update_sprite_wm)
2899 dev_priv->display.update_sprite_wm(plane, crtc,
2900 sprite_width, sprite_height,
2901 pixel_size, enabled, scaled);
2904 static struct drm_i915_gem_object *
2905 intel_alloc_context_page(struct drm_device *dev)
2907 struct drm_i915_gem_object *ctx;
2910 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2912 ctx = i915_gem_alloc_object(dev, 4096);
2914 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2918 ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
2920 DRM_ERROR("failed to pin power context: %d\n", ret);
2924 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2926 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2933 i915_gem_object_ggtt_unpin(ctx);
2935 drm_gem_object_unreference(&ctx->base);
2940 * Lock protecting IPS related data structures
2942 struct lock mchdev_lock;
2943 LOCK_SYSINIT(mchdev, &mchdev_lock, "mchdev", LK_CANRECURSE);
2945 /* Global for IPS driver to get at the current i915 device. Protected by
2947 static struct drm_i915_private *i915_mch_dev;
2949 bool ironlake_set_drps(struct drm_device *dev, u8 val)
2951 struct drm_i915_private *dev_priv = dev->dev_private;
2954 assert_spin_locked(&mchdev_lock);
2956 rgvswctl = I915_READ16(MEMSWCTL);
2957 if (rgvswctl & MEMCTL_CMD_STS) {
2958 DRM_DEBUG("gpu busy, RCS change rejected\n");
2959 return false; /* still busy with another command */
2962 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2963 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2964 I915_WRITE16(MEMSWCTL, rgvswctl);
2965 POSTING_READ16(MEMSWCTL);
2967 rgvswctl |= MEMCTL_CMD_STS;
2968 I915_WRITE16(MEMSWCTL, rgvswctl);
2973 static void ironlake_enable_drps(struct drm_device *dev)
2975 struct drm_i915_private *dev_priv = dev->dev_private;
2976 u32 rgvmodectl = I915_READ(MEMMODECTL);
2977 u8 fmax, fmin, fstart, vstart;
2979 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
2981 /* Enable temp reporting */
2982 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2983 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2985 /* 100ms RC evaluation intervals */
2986 I915_WRITE(RCUPEI, 100000);
2987 I915_WRITE(RCDNEI, 100000);
2989 /* Set max/min thresholds to 90ms and 80ms respectively */
2990 I915_WRITE(RCBMAXAVG, 90000);
2991 I915_WRITE(RCBMINAVG, 80000);
2993 I915_WRITE(MEMIHYST, 1);
2995 /* Set up min, max, and cur for interrupt handling */
2996 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2997 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2998 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2999 MEMMODE_FSTART_SHIFT;
3001 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
3004 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
3005 dev_priv->ips.fstart = fstart;
3007 dev_priv->ips.max_delay = fstart;
3008 dev_priv->ips.min_delay = fmin;
3009 dev_priv->ips.cur_delay = fstart;
3011 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
3012 fmax, fmin, fstart);
3014 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
3017 * Interrupts will be enabled in ironlake_irq_postinstall
3020 I915_WRITE(VIDSTART, vstart);
3021 POSTING_READ(VIDSTART);
3023 rgvmodectl |= MEMMODE_SWMODE_EN;
3024 I915_WRITE(MEMMODECTL, rgvmodectl);
3026 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
3027 DRM_ERROR("stuck trying to change perf mode\n");
3030 ironlake_set_drps(dev, fstart);
3032 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
3034 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
3035 dev_priv->ips.last_count2 = I915_READ(0x112f4);
3036 getrawmonotonic(&dev_priv->ips.last_time2);
3038 lockmgr(&mchdev_lock, LK_RELEASE);
3041 static void ironlake_disable_drps(struct drm_device *dev)
3043 struct drm_i915_private *dev_priv = dev->dev_private;
3046 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
3048 rgvswctl = I915_READ16(MEMSWCTL);
3050 /* Ack interrupts, disable EFC interrupt */
3051 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
3052 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
3053 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
3054 I915_WRITE(DEIIR, DE_PCU_EVENT);
3055 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
3057 /* Go back to the starting frequency */
3058 ironlake_set_drps(dev, dev_priv->ips.fstart);
3060 rgvswctl |= MEMCTL_CMD_STS;
3061 I915_WRITE(MEMSWCTL, rgvswctl);
3064 lockmgr(&mchdev_lock, LK_RELEASE);
3067 /* There's a funny hw issue where the hw returns all 0 when reading from
3068 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
3069 * ourselves, instead of doing a rmw cycle (which might result in us clearing
3070 * all limits and the gpu stuck at whatever frequency it is at atm).
3072 static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
3076 /* Only set the down limit when we've reached the lowest level to avoid
3077 * getting more interrupts, otherwise leave this clear. This prevents a
3078 * race in the hw when coming out of rc6: There's a tiny window where
3079 * the hw runs at the minimal clock before selecting the desired
3080 * frequency, if the down threshold expires in that window we will not
3081 * receive a down interrupt. */
3082 limits = dev_priv->rps.max_freq_softlimit << 24;
3083 if (val <= dev_priv->rps.min_freq_softlimit)
3084 limits |= dev_priv->rps.min_freq_softlimit << 16;
3089 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3093 new_power = dev_priv->rps.power;
3094 switch (dev_priv->rps.power) {
3096 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
3097 new_power = BETWEEN;
3101 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
3102 new_power = LOW_POWER;
3103 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
3104 new_power = HIGH_POWER;
3108 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
3109 new_power = BETWEEN;
3112 /* Max/min bins are special */
3113 if (val == dev_priv->rps.min_freq_softlimit)
3114 new_power = LOW_POWER;
3115 if (val == dev_priv->rps.max_freq_softlimit)
3116 new_power = HIGH_POWER;
3117 if (new_power == dev_priv->rps.power)
3120 /* Note the units here are not exactly 1us, but 1280ns. */
3121 switch (new_power) {
3123 /* Upclock if more than 95% busy over 16ms */
3124 I915_WRITE(GEN6_RP_UP_EI, 12500);
3125 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
3127 /* Downclock if less than 85% busy over 32ms */
3128 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3129 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
3131 I915_WRITE(GEN6_RP_CONTROL,
3132 GEN6_RP_MEDIA_TURBO |
3133 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3134 GEN6_RP_MEDIA_IS_GFX |
3136 GEN6_RP_UP_BUSY_AVG |
3137 GEN6_RP_DOWN_IDLE_AVG);
3141 /* Upclock if more than 90% busy over 13ms */
3142 I915_WRITE(GEN6_RP_UP_EI, 10250);
3143 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
3145 /* Downclock if less than 75% busy over 32ms */
3146 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3147 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
3149 I915_WRITE(GEN6_RP_CONTROL,
3150 GEN6_RP_MEDIA_TURBO |
3151 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3152 GEN6_RP_MEDIA_IS_GFX |
3154 GEN6_RP_UP_BUSY_AVG |
3155 GEN6_RP_DOWN_IDLE_AVG);
3159 /* Upclock if more than 85% busy over 10ms */
3160 I915_WRITE(GEN6_RP_UP_EI, 8000);
3161 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
3163 /* Downclock if less than 60% busy over 32ms */
3164 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3165 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
3167 I915_WRITE(GEN6_RP_CONTROL,
3168 GEN6_RP_MEDIA_TURBO |
3169 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3170 GEN6_RP_MEDIA_IS_GFX |
3172 GEN6_RP_UP_BUSY_AVG |
3173 GEN6_RP_DOWN_IDLE_AVG);
3177 dev_priv->rps.power = new_power;
3178 dev_priv->rps.last_adj = 0;
3181 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3185 if (val > dev_priv->rps.min_freq_softlimit)
3186 mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
3187 if (val < dev_priv->rps.max_freq_softlimit)
3188 mask |= GEN6_PM_RP_UP_THRESHOLD;
3190 mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
3191 mask &= dev_priv->pm_rps_events;
3193 /* IVB and SNB hard hangs on looping batchbuffer
3194 * if GEN6_PM_UP_EI_EXPIRED is masked.
3196 if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
3197 mask |= GEN6_PM_RP_UP_EI_EXPIRED;
3199 if (IS_GEN8(dev_priv->dev))
3200 mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
3205 /* gen6_set_rps is called to update the frequency request, but should also be
3206 * called when the range (min_delay and max_delay) is modified so that we can
3207 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
3208 void gen6_set_rps(struct drm_device *dev, u8 val)
3210 struct drm_i915_private *dev_priv = dev->dev_private;
3212 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3213 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3214 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3216 /* min/max delay may still have been modified so be sure to
3217 * write the limits value.
3219 if (val != dev_priv->rps.cur_freq) {
3220 gen6_set_rps_thresholds(dev_priv, val);
3222 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3223 I915_WRITE(GEN6_RPNSWREQ,
3224 HSW_FREQUENCY(val));
3226 I915_WRITE(GEN6_RPNSWREQ,
3227 GEN6_FREQUENCY(val) |
3229 GEN6_AGGRESSIVE_TURBO);
3232 /* Make sure we continue to get interrupts
3233 * until we hit the minimum or maximum frequencies.
3235 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
3236 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3238 POSTING_READ(GEN6_RPNSWREQ);
3240 dev_priv->rps.cur_freq = val;
3241 trace_intel_gpu_freq_change(val * 50);
3244 /* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
3246 * * If Gfx is Idle, then
3247 * 1. Mask Turbo interrupts
3248 * 2. Bring up Gfx clock
3249 * 3. Change the freq to Rpn and wait till P-Unit updates freq
3250 * 4. Clear the Force GFX CLK ON bit so that Gfx can down
3251 * 5. Unmask Turbo interrupts
3253 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
3257 struct drm_device *dev = dev_priv->dev;
3259 /* Latest VLV doesn't need to force the gfx clock */
3260 revision = pci_read_config(dev->dev, PCIR_REVID, 1);
3261 if (revision >= 0xd) {
3262 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3267 * When we are idle. Drop to min voltage state.
3270 if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
3273 /* Mask turbo interrupt so that they will not come in between */
3274 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3276 vlv_force_gfx_clock(dev_priv, true);
3278 dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
3280 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
3281 dev_priv->rps.min_freq_softlimit);
3283 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
3284 & GENFREQSTATUS) == 0, 5))
3285 DRM_ERROR("timed out waiting for Punit\n");
3287 vlv_force_gfx_clock(dev_priv, false);
3289 I915_WRITE(GEN6_PMINTRMSK,
3290 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
3293 void gen6_rps_idle(struct drm_i915_private *dev_priv)
3295 struct drm_device *dev = dev_priv->dev;
3297 mutex_lock(&dev_priv->rps.hw_lock);
3298 if (dev_priv->rps.enabled) {
3299 if (IS_CHERRYVIEW(dev))
3300 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3301 else if (IS_VALLEYVIEW(dev))
3302 vlv_set_rps_idle(dev_priv);
3304 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3305 dev_priv->rps.last_adj = 0;
3307 mutex_unlock(&dev_priv->rps.hw_lock);
3310 void gen6_rps_boost(struct drm_i915_private *dev_priv)
3312 struct drm_device *dev = dev_priv->dev;
3314 mutex_lock(&dev_priv->rps.hw_lock);
3315 if (dev_priv->rps.enabled) {
3316 if (IS_VALLEYVIEW(dev))
3317 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3319 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3320 dev_priv->rps.last_adj = 0;
3322 mutex_unlock(&dev_priv->rps.hw_lock);
3325 void valleyview_set_rps(struct drm_device *dev, u8 val)
3327 struct drm_i915_private *dev_priv = dev->dev_private;
3329 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3330 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3331 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3333 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3334 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3335 dev_priv->rps.cur_freq,
3336 vlv_gpu_freq(dev_priv, val), val);
3338 if (val != dev_priv->rps.cur_freq)
3339 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3341 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3343 dev_priv->rps.cur_freq = val;
3344 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
3347 static void gen8_disable_rps_interrupts(struct drm_device *dev)
3349 struct drm_i915_private *dev_priv = dev->dev_private;
3351 I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
3352 I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
3353 ~dev_priv->pm_rps_events);
3354 /* Complete PM interrupt masking here doesn't race with the rps work
3355 * item again unmasking PM interrupts because that is using a different
3356 * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
3357 * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
3358 * gen8_enable_rps will clean up. */
3360 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3361 dev_priv->rps.pm_iir = 0;
3362 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3364 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3367 static void gen6_disable_rps_interrupts(struct drm_device *dev)
3369 struct drm_i915_private *dev_priv = dev->dev_private;
3371 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3372 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
3373 ~dev_priv->pm_rps_events);
3374 /* Complete PM interrupt masking here doesn't race with the rps work
3375 * item again unmasking PM interrupts because that is using a different
3376 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3377 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3379 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3380 dev_priv->rps.pm_iir = 0;
3381 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3383 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3386 static void gen6_disable_rps(struct drm_device *dev)
3388 struct drm_i915_private *dev_priv = dev->dev_private;
3390 I915_WRITE(GEN6_RC_CONTROL, 0);
3391 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3393 if (IS_BROADWELL(dev))
3394 gen8_disable_rps_interrupts(dev);
3396 gen6_disable_rps_interrupts(dev);
3399 static void cherryview_disable_rps(struct drm_device *dev)
3401 struct drm_i915_private *dev_priv = dev->dev_private;
3403 I915_WRITE(GEN6_RC_CONTROL, 0);
3405 gen8_disable_rps_interrupts(dev);
3408 static void valleyview_disable_rps(struct drm_device *dev)
3410 struct drm_i915_private *dev_priv = dev->dev_private;
3412 I915_WRITE(GEN6_RC_CONTROL, 0);
3414 gen6_disable_rps_interrupts(dev);
3417 static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3419 if (IS_VALLEYVIEW(dev)) {
3420 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
3421 mode = GEN6_RC_CTL_RC6_ENABLE;
3425 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3426 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3427 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3428 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3431 static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
3433 /* No RC6 before Ironlake */
3434 if (INTEL_INFO(dev)->gen < 5)
3437 /* RC6 is only on Ironlake mobile not on desktop */
3438 if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
3441 /* Respect the kernel parameter if it is set */
3442 if (enable_rc6 >= 0) {
3445 if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
3446 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
3449 mask = INTEL_RC6_ENABLE;
3451 if ((enable_rc6 & mask) != enable_rc6)
3452 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
3453 enable_rc6 & mask, enable_rc6, mask);
3455 return enable_rc6 & mask;
3458 /* Disable RC6 on Ironlake */
3459 if (INTEL_INFO(dev)->gen == 5)
3462 if (IS_IVYBRIDGE(dev))
3463 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3465 return INTEL_RC6_ENABLE;
3468 int intel_enable_rc6(const struct drm_device *dev)
3470 return i915.enable_rc6;
3473 static void gen8_enable_rps_interrupts(struct drm_device *dev)
3475 struct drm_i915_private *dev_priv = dev->dev_private;
3477 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3478 WARN_ON(dev_priv->rps.pm_iir);
3479 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3480 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3481 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3484 static void gen6_enable_rps_interrupts(struct drm_device *dev)
3486 struct drm_i915_private *dev_priv = dev->dev_private;
3488 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3489 WARN_ON(dev_priv->rps.pm_iir);
3490 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3491 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3492 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3495 static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
3497 /* All of these values are in units of 50MHz */
3498 dev_priv->rps.cur_freq = 0;
3499 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
3500 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
3501 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
3502 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
3503 /* XXX: only BYT has a special efficient freq */
3504 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
3505 /* hw_max = RP0 until we check for overclocking */
3506 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
3508 /* Preserve min/max settings in case of re-init */
3509 if (dev_priv->rps.max_freq_softlimit == 0)
3510 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3512 if (dev_priv->rps.min_freq_softlimit == 0)
3513 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3516 static void gen8_enable_rps(struct drm_device *dev)
3518 struct drm_i915_private *dev_priv = dev->dev_private;
3519 struct intel_engine_cs *ring;
3520 uint32_t rc6_mask = 0, rp_state_cap;
3523 /* 1a: Software RC state - RC0 */
3524 I915_WRITE(GEN6_RC_STATE, 0);
3526 /* 1c & 1d: Get forcewake during program sequence. Although the driver
3527 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
3528 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3530 /* 2a: Disable RC states. */
3531 I915_WRITE(GEN6_RC_CONTROL, 0);
3533 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3534 parse_rp_state_cap(dev_priv, rp_state_cap);
3536 /* 2b: Program RC6 thresholds.*/
3537 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
3538 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
3539 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
3540 for_each_ring(ring, dev_priv, unused)
3541 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3542 I915_WRITE(GEN6_RC_SLEEP, 0);
3543 if (IS_BROADWELL(dev))
3544 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
3546 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
3549 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3550 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
3551 intel_print_rc6_info(dev, rc6_mask);
3552 if (IS_BROADWELL(dev))
3553 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3554 GEN7_RC_CTL_TO_MODE |
3557 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3558 GEN6_RC_CTL_EI_MODE(1) |
3561 /* 4 Program defaults and thresholds for RPS*/
3562 I915_WRITE(GEN6_RPNSWREQ,
3563 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3564 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3565 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3566 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
3567 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
3569 /* Docs recommend 900MHz, and 300 MHz respectively */
3570 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3571 dev_priv->rps.max_freq_softlimit << 24 |
3572 dev_priv->rps.min_freq_softlimit << 16);
3574 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
3575 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
3576 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
3577 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
3579 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3582 I915_WRITE(GEN6_RP_CONTROL,
3583 GEN6_RP_MEDIA_TURBO |
3584 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3585 GEN6_RP_MEDIA_IS_GFX |
3587 GEN6_RP_UP_BUSY_AVG |
3588 GEN6_RP_DOWN_IDLE_AVG);
3590 /* 6: Ring frequency + overclocking (our driver does this later */
3592 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
3594 gen8_enable_rps_interrupts(dev);
3596 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3599 static void gen6_enable_rps(struct drm_device *dev)
3601 struct drm_i915_private *dev_priv = dev->dev_private;
3602 struct intel_engine_cs *ring;
3605 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
3610 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3612 /* Here begins a magic sequence of register writes to enable
3613 * auto-downclocking.
3615 * Perhaps there might be some value in exposing these to
3618 I915_WRITE(GEN6_RC_STATE, 0);
3620 /* Clear the DBG now so we don't confuse earlier errors */
3621 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3622 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3623 I915_WRITE(GTFIFODBG, gtfifodbg);
3626 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3628 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3629 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3631 parse_rp_state_cap(dev_priv, rp_state_cap);
3633 /* disable the counters and set deterministic thresholds */
3634 I915_WRITE(GEN6_RC_CONTROL, 0);
3636 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
3637 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
3638 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
3639 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3640 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3642 for_each_ring(ring, dev_priv, i)
3643 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3645 I915_WRITE(GEN6_RC_SLEEP, 0);
3646 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3647 if (IS_IVYBRIDGE(dev))
3648 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
3650 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3651 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
3652 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
3654 /* Check if we are enabling RC6 */
3655 rc6_mode = intel_enable_rc6(dev_priv->dev);
3656 if (rc6_mode & INTEL_RC6_ENABLE)
3657 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
3659 /* We don't use those on Haswell */
3660 if (!IS_HASWELL(dev)) {
3661 if (rc6_mode & INTEL_RC6p_ENABLE)
3662 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
3664 if (rc6_mode & INTEL_RC6pp_ENABLE)
3665 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3668 intel_print_rc6_info(dev, rc6_mask);
3670 I915_WRITE(GEN6_RC_CONTROL,
3672 GEN6_RC_CTL_EI_MODE(1) |
3673 GEN6_RC_CTL_HW_ENABLE);
3675 /* Power down if completely idle for over 50ms */
3676 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
3677 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3679 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3681 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3683 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
3684 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
3685 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3686 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
3687 (pcu_mbox & 0xff) * 50);
3688 dev_priv->rps.max_freq = pcu_mbox & 0xff;
3691 dev_priv->rps.power = HIGH_POWER; /* force a reset */
3692 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3694 gen6_enable_rps_interrupts(dev);
3697 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
3698 if (IS_GEN6(dev) && ret) {
3699 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
3700 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
3701 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
3702 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
3703 rc6vids &= 0xffff00;
3704 rc6vids |= GEN6_ENCODE_RC6_VID(450);
3705 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
3707 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
3710 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3713 static void __gen6_update_ring_freq(struct drm_device *dev)
3715 struct drm_i915_private *dev_priv = dev->dev_private;
3717 unsigned int gpu_freq;
3718 unsigned int max_ia_freq, min_ring_freq;
3719 int scaling_factor = 180;
3721 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3724 policy = cpufreq_cpu_get(0);
3726 max_ia_freq = policy->cpuinfo.max_freq;
3727 cpufreq_cpu_put(policy);
3730 * Default to measured freq if none found, PCU will ensure we
3733 max_ia_freq = tsc_khz;
3736 max_ia_freq = tsc_frequency / 1000;
3739 /* Convert from kHz to MHz */
3740 max_ia_freq /= 1000;
3742 min_ring_freq = I915_READ(DCLK) & 0xf;
3743 /* convert DDR frequency from units of 266.6MHz to bandwidth */
3744 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3747 * For each potential GPU frequency, load a ring frequency we'd like
3748 * to use for memory access. We do this by specifying the IA frequency
3749 * the PCU should use as a reference to determine the ring frequency.
3751 for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
3753 int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
3754 unsigned int ia_freq = 0, ring_freq = 0;
3756 if (INTEL_INFO(dev)->gen >= 8) {
3757 /* max(2 * GT, DDR). NB: GT is 50MHz units */
3758 ring_freq = max(min_ring_freq, gpu_freq);
3759 } else if (IS_HASWELL(dev)) {
3760 ring_freq = mult_frac(gpu_freq, 5, 4);
3761 ring_freq = max(min_ring_freq, ring_freq);
3762 /* leave ia_freq as the default, chosen by cpufreq */
3764 /* On older processors, there is no separate ring
3765 * clock domain, so in order to boost the bandwidth
3766 * of the ring, we need to upclock the CPU (ia_freq).
3768 * For GPU frequencies less than 750MHz,
3769 * just use the lowest ring freq.
3771 if (gpu_freq < min_freq)
3774 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
3775 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
3778 sandybridge_pcode_write(dev_priv,
3779 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
3780 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
3781 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
3786 void gen6_update_ring_freq(struct drm_device *dev)
3788 struct drm_i915_private *dev_priv = dev->dev_private;
3790 if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
3793 mutex_lock(&dev_priv->rps.hw_lock);
3794 __gen6_update_ring_freq(dev);
3795 mutex_unlock(&dev_priv->rps.hw_lock);
3798 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
3802 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
3803 rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
3808 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3812 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
3813 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
3818 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
3822 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3823 rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
3828 static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
3832 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
3833 rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
3837 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
3841 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
3843 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
3848 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
3852 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
3854 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
3856 rp0 = min_t(u32, rp0, 0xea);
3861 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3865 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
3866 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
3867 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
3868 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
3873 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3875 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3878 /* Check that the pctx buffer wasn't move under us. */
3879 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
3881 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
3883 /* DragonFly - if EDID fails vlv_pctx can wind up NULL */
3884 if (WARN_ON(!dev_priv->vlv_pctx))
3887 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
3888 dev_priv->vlv_pctx->stolen->start);
3892 /* Check that the pcbr address is not empty. */
3893 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
3895 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
3897 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
3900 static void cherryview_setup_pctx(struct drm_device *dev)
3902 struct drm_i915_private *dev_priv = dev->dev_private;
3903 unsigned long pctx_paddr, paddr;
3904 struct i915_gtt *gtt = &dev_priv->gtt;
3906 int pctx_size = 32*1024;
3908 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3910 pcbr = I915_READ(VLV_PCBR);
3911 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
3912 paddr = (dev_priv->mm.stolen_base +
3913 (gtt->stolen_size - pctx_size));
3915 pctx_paddr = (paddr & (~4095));
3916 I915_WRITE(VLV_PCBR, pctx_paddr);
3920 static void valleyview_setup_pctx(struct drm_device *dev)
3922 struct drm_i915_private *dev_priv = dev->dev_private;
3923 struct drm_i915_gem_object *pctx;
3924 unsigned long pctx_paddr;
3926 int pctx_size = 24*1024;
3928 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3930 pcbr = I915_READ(VLV_PCBR);
3932 /* BIOS set it up already, grab the pre-alloc'd space */
3935 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
3936 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
3938 I915_GTT_OFFSET_NONE,
3944 * From the Gunit register HAS:
3945 * The Gfx driver is expected to program this register and ensure
3946 * proper allocation within Gfx stolen memory. For example, this
3947 * register should be programmed such than the PCBR range does not
3948 * overlap with other ranges, such as the frame buffer, protected
3949 * memory, or any other relevant ranges.
3951 pctx = i915_gem_object_create_stolen(dev, pctx_size);
3953 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
3957 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
3958 I915_WRITE(VLV_PCBR, pctx_paddr);
3961 dev_priv->vlv_pctx = pctx;
3964 static void valleyview_cleanup_pctx(struct drm_device *dev)
3966 struct drm_i915_private *dev_priv = dev->dev_private;
3968 if (WARN_ON(!dev_priv->vlv_pctx))
3971 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3972 dev_priv->vlv_pctx = NULL;
3975 static void valleyview_init_gt_powersave(struct drm_device *dev)
3977 struct drm_i915_private *dev_priv = dev->dev_private;
3979 valleyview_setup_pctx(dev);
3981 mutex_lock(&dev_priv->rps.hw_lock);
3983 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
3984 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
3985 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3986 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
3987 dev_priv->rps.max_freq);
3989 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
3990 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3991 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3992 dev_priv->rps.efficient_freq);
3994 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
3995 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
3996 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
3997 dev_priv->rps.rp1_freq);
3999 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
4000 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4001 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4002 dev_priv->rps.min_freq);
4004 /* Preserve min/max settings in case of re-init */
4005 if (dev_priv->rps.max_freq_softlimit == 0)
4006 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4008 if (dev_priv->rps.min_freq_softlimit == 0)
4009 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
4011 mutex_unlock(&dev_priv->rps.hw_lock);
4014 static void cherryview_init_gt_powersave(struct drm_device *dev)
4016 struct drm_i915_private *dev_priv = dev->dev_private;
4018 cherryview_setup_pctx(dev);
4020 mutex_lock(&dev_priv->rps.hw_lock);
4022 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
4023 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
4024 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
4025 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
4026 dev_priv->rps.max_freq);
4028 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
4029 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
4030 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4031 dev_priv->rps.efficient_freq);
4033 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
4034 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
4035 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
4036 dev_priv->rps.rp1_freq);
4038 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
4039 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
4040 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
4041 dev_priv->rps.min_freq);
4043 /* Preserve min/max settings in case of re-init */
4044 if (dev_priv->rps.max_freq_softlimit == 0)
4045 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4047 if (dev_priv->rps.min_freq_softlimit == 0)
4048 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
4050 mutex_unlock(&dev_priv->rps.hw_lock);
4053 static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
4055 valleyview_cleanup_pctx(dev);
4058 static void cherryview_enable_rps(struct drm_device *dev)
4060 struct drm_i915_private *dev_priv = dev->dev_private;
4061 struct intel_engine_cs *ring;
4062 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
4065 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4067 gtfifodbg = I915_READ(GTFIFODBG);
4069 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4071 I915_WRITE(GTFIFODBG, gtfifodbg);
4074 cherryview_check_pctx(dev_priv);
4076 /* 1a & 1b: Get forcewake during program sequence. Although the driver
4077 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4078 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4080 /* 2a: Program RC6 thresholds.*/
4081 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
4082 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4083 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4085 for_each_ring(ring, dev_priv, i)
4086 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4087 I915_WRITE(GEN6_RC_SLEEP, 0);
4089 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
4091 /* allows RC6 residency counter to work */
4092 I915_WRITE(VLV_COUNTER_CONTROL,
4093 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
4094 VLV_MEDIA_RC6_COUNT_EN |
4095 VLV_RENDER_RC6_COUNT_EN));
4097 /* For now we assume BIOS is allocating and populating the PCBR */
4098 pcbr = I915_READ(VLV_PCBR);
4100 DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
4103 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
4104 (pcbr >> VLV_PCBR_ADDR_SHIFT))
4105 rc6_mode = GEN6_RC_CTL_EI_MODE(1);
4107 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
4109 /* 4 Program defaults and thresholds for RPS*/
4110 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
4111 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
4112 I915_WRITE(GEN6_RP_UP_EI, 66000);
4113 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
4115 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4117 /* WaDisablePwrmtrEvent:chv (pre-production hw) */
4118 I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
4119 I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
4122 I915_WRITE(GEN6_RP_CONTROL,
4123 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4124 GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
4126 GEN6_RP_UP_BUSY_AVG |
4127 GEN6_RP_DOWN_IDLE_AVG);
4129 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4131 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
4132 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4134 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
4135 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4136 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
4137 dev_priv->rps.cur_freq);
4139 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4140 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4141 dev_priv->rps.efficient_freq);
4143 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4145 gen8_enable_rps_interrupts(dev);
4147 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4150 static void valleyview_enable_rps(struct drm_device *dev)
4152 struct drm_i915_private *dev_priv = dev->dev_private;
4153 struct intel_engine_cs *ring;
4154 u32 gtfifodbg, val, rc6_mode = 0;
4157 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4159 valleyview_check_pctx(dev_priv);
4161 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
4162 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4164 I915_WRITE(GTFIFODBG, gtfifodbg);
4167 /* If VLV, Forcewake all wells, else re-direct to regular path */
4168 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4170 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
4171 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
4172 I915_WRITE(GEN6_RP_UP_EI, 66000);
4173 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
4175 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4176 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
4178 I915_WRITE(GEN6_RP_CONTROL,
4179 GEN6_RP_MEDIA_TURBO |
4180 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4181 GEN6_RP_MEDIA_IS_GFX |
4183 GEN6_RP_UP_BUSY_AVG |
4184 GEN6_RP_DOWN_IDLE_CONT);
4186 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
4187 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
4188 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
4190 for_each_ring(ring, dev_priv, i)
4191 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4193 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
4195 /* allows RC6 residency counter to work */
4196 I915_WRITE(VLV_COUNTER_CONTROL,
4197 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
4198 VLV_RENDER_RC0_COUNT_EN |
4199 VLV_MEDIA_RC6_COUNT_EN |
4200 VLV_RENDER_RC6_COUNT_EN));
4202 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4203 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
4205 intel_print_rc6_info(dev, rc6_mode);
4207 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
4209 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4211 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
4212 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4214 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
4215 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4216 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
4217 dev_priv->rps.cur_freq);
4219 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4220 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4221 dev_priv->rps.efficient_freq);
4223 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4225 gen6_enable_rps_interrupts(dev);
4227 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4230 void ironlake_teardown_rc6(struct drm_device *dev)
4232 struct drm_i915_private *dev_priv = dev->dev_private;
4234 if (dev_priv->ips.renderctx) {
4235 i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
4236 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
4237 dev_priv->ips.renderctx = NULL;
4240 if (dev_priv->ips.pwrctx) {
4241 i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
4242 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
4243 dev_priv->ips.pwrctx = NULL;
4247 static void ironlake_disable_rc6(struct drm_device *dev)
4249 struct drm_i915_private *dev_priv = dev->dev_private;
4251 if (I915_READ(PWRCTXA)) {
4252 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
4253 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
4254 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
4257 I915_WRITE(PWRCTXA, 0);
4258 POSTING_READ(PWRCTXA);
4260 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4261 POSTING_READ(RSTDBYCTL);
4265 static int ironlake_setup_rc6(struct drm_device *dev)
4267 struct drm_i915_private *dev_priv = dev->dev_private;
4269 if (dev_priv->ips.renderctx == NULL)
4270 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
4271 if (!dev_priv->ips.renderctx)
4274 if (dev_priv->ips.pwrctx == NULL)
4275 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
4276 if (!dev_priv->ips.pwrctx) {
4277 ironlake_teardown_rc6(dev);
4284 static void ironlake_enable_rc6(struct drm_device *dev)
4286 struct drm_i915_private *dev_priv = dev->dev_private;
4287 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
4288 bool was_interruptible;
4291 /* rc6 disabled by default due to repeated reports of hanging during
4294 if (!intel_enable_rc6(dev))
4297 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4299 ret = ironlake_setup_rc6(dev);
4303 was_interruptible = dev_priv->mm.interruptible;
4304 dev_priv->mm.interruptible = false;
4307 * GPU can automatically power down the render unit if given a page
4310 ret = intel_ring_begin(ring, 6);
4312 ironlake_teardown_rc6(dev);
4313 dev_priv->mm.interruptible = was_interruptible;
4317 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
4318 intel_ring_emit(ring, MI_SET_CONTEXT);
4319 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
4321 MI_SAVE_EXT_STATE_EN |
4322 MI_RESTORE_EXT_STATE_EN |
4323 MI_RESTORE_INHIBIT);
4324 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
4325 intel_ring_emit(ring, MI_NOOP);
4326 intel_ring_emit(ring, MI_FLUSH);
4327 intel_ring_advance(ring);
4330 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
4331 * does an implicit flush, combined with MI_FLUSH above, it should be
4332 * safe to assume that renderctx is valid
4334 ret = intel_ring_idle(ring);
4335 dev_priv->mm.interruptible = was_interruptible;
4337 DRM_ERROR("failed to enable ironlake power savings\n");
4338 ironlake_teardown_rc6(dev);
4342 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
4343 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4345 intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
4348 static unsigned long intel_pxfreq(u32 vidfreq)
4351 int div = (vidfreq & 0x3f0000) >> 16;
4352 int post = (vidfreq & 0x3000) >> 12;
4353 int pre = (vidfreq & 0x7);
4358 freq = ((div * 133333) / ((1<<post) * pre));
4363 static const struct cparams {
4369 { 1, 1333, 301, 28664 },
4370 { 1, 1066, 294, 24460 },
4371 { 1, 800, 294, 25192 },
4372 { 0, 1333, 276, 27605 },
4373 { 0, 1066, 276, 27605 },
4374 { 0, 800, 231, 23784 },
4377 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
4379 u64 total_count, diff, ret;
4380 u32 count1, count2, count3, m = 0, c = 0;
4381 unsigned long now = jiffies_to_msecs(jiffies), diff1;
4384 assert_spin_locked(&mchdev_lock);
4386 diff1 = now - dev_priv->ips.last_time1;
4388 /* Prevent division-by-zero if we are asking too fast.
4389 * Also, we don't get interesting results if we are polling
4390 * faster than once in 10ms, so just return the saved value
4394 return dev_priv->ips.chipset_power;
4396 count1 = I915_READ(DMIEC);
4397 count2 = I915_READ(DDREC);
4398 count3 = I915_READ(CSIEC);
4400 total_count = count1 + count2 + count3;
4402 /* FIXME: handle per-counter overflow */
4403 if (total_count < dev_priv->ips.last_count1) {
4404 diff = ~0UL - dev_priv->ips.last_count1;
4405 diff += total_count;
4407 diff = total_count - dev_priv->ips.last_count1;
4410 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
4411 if (cparams[i].i == dev_priv->ips.c_m &&
4412 cparams[i].t == dev_priv->ips.r_t) {
4419 diff = div_u64(diff, diff1);
4420 ret = ((m * diff) + c);
4421 ret = div_u64(ret, 10);
4423 dev_priv->ips.last_count1 = total_count;
4424 dev_priv->ips.last_time1 = now;
4426 dev_priv->ips.chipset_power = ret;
4431 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
4433 struct drm_device *dev = dev_priv->dev;
4436 if (INTEL_INFO(dev)->gen != 5)
4439 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
4441 val = __i915_chipset_val(dev_priv);
4443 lockmgr(&mchdev_lock, LK_RELEASE);
4448 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
4450 unsigned long m, x, b;
4453 tsfs = I915_READ(TSFS);
4455 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
4456 x = I915_READ8(TR1);
4458 b = tsfs & TSFS_INTR_MASK;
4460 return ((m * x) / 127) - b;
4463 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
4465 struct drm_device *dev = dev_priv->dev;
4466 static const struct v_table {
4467 u16 vd; /* in .1 mil */
4468 u16 vm; /* in .1 mil */
4599 if (INTEL_INFO(dev)->is_mobile)
4600 return v_table[pxvid].vm;
4602 return v_table[pxvid].vd;
4605 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
4607 struct timespec now, diff1;
4609 unsigned long diffms;
4612 assert_spin_locked(&mchdev_lock);
4614 getrawmonotonic(&now);
4615 diff1 = timespec_sub(now, dev_priv->ips.last_time2);
4617 /* Don't divide by 0 */
4618 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
4622 count = I915_READ(GFXEC);
4624 if (count < dev_priv->ips.last_count2) {
4625 diff = ~0UL - dev_priv->ips.last_count2;
4628 diff = count - dev_priv->ips.last_count2;
4631 dev_priv->ips.last_count2 = count;
4632 dev_priv->ips.last_time2 = now;
4634 /* More magic constants... */
4636 diff = div_u64(diff, diffms * 10);
4637 dev_priv->ips.gfx_power = diff;
4640 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
4642 struct drm_device *dev = dev_priv->dev;
4644 if (INTEL_INFO(dev)->gen != 5)
4647 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
4649 __i915_update_gfx_val(dev_priv);
4651 lockmgr(&mchdev_lock, LK_RELEASE);
4654 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
4656 unsigned long t, corr, state1, corr2, state2;
4659 assert_spin_locked(&mchdev_lock);
4661 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
4662 pxvid = (pxvid >> 24) & 0x7f;
4663 ext_v = pvid_to_extvid(dev_priv, pxvid);
4667 t = i915_mch_val(dev_priv);
4669 /* Revel in the empirically derived constants */
4671 /* Correction factor in 1/100000 units */
4673 corr = ((t * 2349) + 135940);
4675 corr = ((t * 964) + 29317);
4677 corr = ((t * 301) + 1004);
4679 corr = corr * ((150142 * state1) / 10000 - 78642);
4681 corr2 = (corr * dev_priv->ips.corr);
4683 state2 = (corr2 * state1) / 10000;
4684 state2 /= 100; /* convert to mW */
4686 __i915_update_gfx_val(dev_priv);
4688 return dev_priv->ips.gfx_power + state2;
4691 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
4693 struct drm_device *dev = dev_priv->dev;
4696 if (INTEL_INFO(dev)->gen != 5)
4699 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
4701 val = __i915_gfx_val(dev_priv);
4703 lockmgr(&mchdev_lock, LK_RELEASE);
4709 * i915_read_mch_val - return value for IPS use
4711 * Calculate and return a value for the IPS driver to use when deciding whether
4712 * we have thermal and power headroom to increase CPU or GPU power budget.
4714 unsigned long i915_read_mch_val(void)
4716 struct drm_i915_private *dev_priv;
4717 unsigned long chipset_val, graphics_val, ret = 0;
4719 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
4722 dev_priv = i915_mch_dev;
4724 chipset_val = __i915_chipset_val(dev_priv);
4725 graphics_val = __i915_gfx_val(dev_priv);
4727 ret = chipset_val + graphics_val;
4730 lockmgr(&mchdev_lock, LK_RELEASE);
4736 * i915_gpu_raise - raise GPU frequency limit
4738 * Raise the limit; IPS indicates we have thermal headroom.
4740 bool i915_gpu_raise(void)
4742 struct drm_i915_private *dev_priv;
4745 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
4746 if (!i915_mch_dev) {
4750 dev_priv = i915_mch_dev;
4752 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
4753 dev_priv->ips.max_delay--;
4756 lockmgr(&mchdev_lock, LK_RELEASE);
4762 * i915_gpu_lower - lower GPU frequency limit
4764 * IPS indicates we're close to a thermal limit, so throttle back the GPU
4765 * frequency maximum.
4767 bool i915_gpu_lower(void)
4769 struct drm_i915_private *dev_priv;
4772 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
4773 if (!i915_mch_dev) {
4777 dev_priv = i915_mch_dev;
4779 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
4780 dev_priv->ips.max_delay++;
4783 lockmgr(&mchdev_lock, LK_RELEASE);
4789 * i915_gpu_busy - indicate GPU business to IPS
4791 * Tell the IPS driver whether or not the GPU is busy.
4793 bool i915_gpu_busy(void)
4795 struct drm_i915_private *dev_priv;
4796 struct intel_engine_cs *ring;
4800 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
4803 dev_priv = i915_mch_dev;
4805 for_each_ring(ring, dev_priv, i)
4806 ret |= !list_empty(&ring->request_list);
4809 lockmgr(&mchdev_lock, LK_RELEASE);
4815 * i915_gpu_turbo_disable - disable graphics turbo
4817 * Disable graphics turbo by resetting the max frequency and setting the
4818 * current frequency to the default.
4820 bool i915_gpu_turbo_disable(void)
4822 struct drm_i915_private *dev_priv;
4825 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
4826 if (!i915_mch_dev) {
4830 dev_priv = i915_mch_dev;
4832 dev_priv->ips.max_delay = dev_priv->ips.fstart;
4834 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
4838 lockmgr(&mchdev_lock, LK_RELEASE);
4845 * Tells the intel_ips driver that the i915 driver is now loaded, if
4846 * IPS got loaded first.
4848 * This awkward dance is so that neither module has to depend on the
4849 * other in order for IPS to do the appropriate communication of
4850 * GPU turbo limits to i915.
4853 ips_ping_for_i915_load(void)
4857 link = symbol_get(ips_link_to_i915_driver);
4860 symbol_put(ips_link_to_i915_driver);
4865 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
4867 /* We only register the i915 ips part with intel-ips once everything is
4868 * set up, to avoid intel-ips sneaking in and reading bogus values. */
4869 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
4870 i915_mch_dev = dev_priv;
4871 lockmgr(&mchdev_lock, LK_RELEASE);
4874 void intel_gpu_ips_teardown(void)
4876 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
4877 i915_mch_dev = NULL;
4878 lockmgr(&mchdev_lock, LK_RELEASE);
4881 static void intel_init_emon(struct drm_device *dev)
4883 struct drm_i915_private *dev_priv = dev->dev_private;
4888 /* Disable to program */
4892 /* Program energy weights for various events */
4893 I915_WRITE(SDEW, 0x15040d00);
4894 I915_WRITE(CSIEW0, 0x007f0000);
4895 I915_WRITE(CSIEW1, 0x1e220004);
4896 I915_WRITE(CSIEW2, 0x04000004);
4898 for (i = 0; i < 5; i++)
4899 I915_WRITE(PEW + (i * 4), 0);
4900 for (i = 0; i < 3; i++)
4901 I915_WRITE(DEW + (i * 4), 0);
4903 /* Program P-state weights to account for frequency power adjustment */
4904 for (i = 0; i < 16; i++) {
4905 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
4906 unsigned long freq = intel_pxfreq(pxvidfreq);
4907 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
4912 val *= (freq / 1000);
4914 val /= (127*127*900);
4916 DRM_ERROR("bad pxval: %ld\n", val);
4919 /* Render standby states get 0 weight */
4923 for (i = 0; i < 4; i++) {
4924 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
4925 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
4926 I915_WRITE(PXW + (i * 4), val);
4929 /* Adjust magic regs to magic values (more experimental results) */
4930 I915_WRITE(OGW0, 0);
4931 I915_WRITE(OGW1, 0);
4932 I915_WRITE(EG0, 0x00007f00);
4933 I915_WRITE(EG1, 0x0000000e);
4934 I915_WRITE(EG2, 0x000e0000);
4935 I915_WRITE(EG3, 0x68000300);
4936 I915_WRITE(EG4, 0x42000000);
4937 I915_WRITE(EG5, 0x00140031);
4941 for (i = 0; i < 8; i++)
4942 I915_WRITE(PXWL + (i * 4), 0);
4944 /* Enable PMON + select events */
4945 I915_WRITE(ECR, 0x80000019);
4947 lcfuse = I915_READ(LCFUSE02);
4949 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
4952 void intel_init_gt_powersave(struct drm_device *dev)
4954 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
4956 if (IS_CHERRYVIEW(dev))
4957 cherryview_init_gt_powersave(dev);
4958 else if (IS_VALLEYVIEW(dev))
4959 valleyview_init_gt_powersave(dev);
4962 void intel_cleanup_gt_powersave(struct drm_device *dev)
4964 if (IS_CHERRYVIEW(dev))
4966 else if (IS_VALLEYVIEW(dev))
4967 valleyview_cleanup_gt_powersave(dev);
4971 * intel_suspend_gt_powersave - suspend PM work and helper threads
4974 * We don't want to disable RC6 or other features here, we just want
4975 * to make sure any work we've queued has finished and won't bother
4976 * us while we're suspended.
4978 void intel_suspend_gt_powersave(struct drm_device *dev)
4980 struct drm_i915_private *dev_priv = dev->dev_private;
4982 /* Interrupts should be disabled already to avoid re-arming. */
4983 WARN_ON(intel_irqs_enabled(dev_priv));
4986 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4989 cancel_work_sync(&dev_priv->rps.work);
4991 /* Force GPU to min freq during suspend */
4992 gen6_rps_idle(dev_priv);
4995 void intel_disable_gt_powersave(struct drm_device *dev)
4997 struct drm_i915_private *dev_priv = dev->dev_private;
4999 /* Interrupts should be disabled already to avoid re-arming. */
5000 WARN_ON(intel_irqs_enabled(dev_priv));
5002 if (IS_IRONLAKE_M(dev)) {
5003 ironlake_disable_drps(dev);
5004 ironlake_disable_rc6(dev);
5005 } else if (INTEL_INFO(dev)->gen >= 6) {
5006 intel_suspend_gt_powersave(dev);
5008 mutex_lock(&dev_priv->rps.hw_lock);
5009 if (IS_CHERRYVIEW(dev))
5010 cherryview_disable_rps(dev);
5011 else if (IS_VALLEYVIEW(dev))
5012 valleyview_disable_rps(dev);
5014 gen6_disable_rps(dev);
5015 dev_priv->rps.enabled = false;
5016 mutex_unlock(&dev_priv->rps.hw_lock);
5020 static void intel_gen6_powersave_work(struct work_struct *work)
5022 struct drm_i915_private *dev_priv =
5023 container_of(work, struct drm_i915_private,
5024 rps.delayed_resume_work.work);
5025 struct drm_device *dev = dev_priv->dev;
5027 mutex_lock(&dev_priv->rps.hw_lock);
5029 if (IS_CHERRYVIEW(dev)) {
5030 cherryview_enable_rps(dev);
5031 } else if (IS_VALLEYVIEW(dev)) {
5032 valleyview_enable_rps(dev);
5033 } else if (IS_BROADWELL(dev)) {
5034 gen8_enable_rps(dev);
5035 __gen6_update_ring_freq(dev);
5037 gen6_enable_rps(dev);
5038 __gen6_update_ring_freq(dev);
5040 dev_priv->rps.enabled = true;
5041 mutex_unlock(&dev_priv->rps.hw_lock);
5043 intel_runtime_pm_put(dev_priv);
5046 void intel_enable_gt_powersave(struct drm_device *dev)
5048 struct drm_i915_private *dev_priv = dev->dev_private;
5050 if (IS_IRONLAKE_M(dev)) {
5051 mutex_lock(&dev->struct_mutex);
5052 ironlake_enable_drps(dev);
5053 ironlake_enable_rc6(dev);
5054 intel_init_emon(dev);
5055 mutex_unlock(&dev->struct_mutex);
5056 } else if (INTEL_INFO(dev)->gen >= 6) {
5058 * PCU communication is slow and this doesn't need to be
5059 * done at any specific time, so do this out of our fast path
5060 * to make resume and init faster.
5062 * We depend on the HW RC6 power context save/restore
5063 * mechanism when entering D3 through runtime PM suspend. So
5064 * disable RPM until RPS/RC6 is properly setup. We can only
5065 * get here via the driver load/system resume/runtime resume
5066 * paths, so the _noresume version is enough (and in case of
5067 * runtime resume it's necessary).
5069 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
5070 round_jiffies_up_relative(HZ)))
5071 intel_runtime_pm_get_noresume(dev_priv);
5075 void intel_reset_gt_powersave(struct drm_device *dev)
5077 struct drm_i915_private *dev_priv = dev->dev_private;
5079 dev_priv->rps.enabled = false;
5080 intel_enable_gt_powersave(dev);
5083 static void ibx_init_clock_gating(struct drm_device *dev)
5085 struct drm_i915_private *dev_priv = dev->dev_private;
5088 * On Ibex Peak and Cougar Point, we need to disable clock
5089 * gating for the panel power sequencer or it will fail to
5090 * start up when no ports are active.
5092 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
5095 static void g4x_disable_trickle_feed(struct drm_device *dev)
5097 struct drm_i915_private *dev_priv = dev->dev_private;
5100 for_each_pipe(pipe) {
5101 I915_WRITE(DSPCNTR(pipe),
5102 I915_READ(DSPCNTR(pipe)) |
5103 DISPPLANE_TRICKLE_FEED_DISABLE);
5104 intel_flush_primary_plane(dev_priv, pipe);
5108 static void ilk_init_lp_watermarks(struct drm_device *dev)
5110 struct drm_i915_private *dev_priv = dev->dev_private;
5112 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
5113 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
5114 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
5117 * Don't touch WM1S_LP_EN here.
5118 * Doing so could cause underruns.
5122 static void ironlake_init_clock_gating(struct drm_device *dev)
5124 struct drm_i915_private *dev_priv = dev->dev_private;
5125 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
5129 * WaFbcDisableDpfcClockGating:ilk
5131 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
5132 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
5133 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
5135 I915_WRITE(PCH_3DCGDIS0,
5136 MARIUNIT_CLOCK_GATE_DISABLE |
5137 SVSMUNIT_CLOCK_GATE_DISABLE);
5138 I915_WRITE(PCH_3DCGDIS1,
5139 VFMUNIT_CLOCK_GATE_DISABLE);
5142 * According to the spec the following bits should be set in
5143 * order to enable memory self-refresh
5144 * The bit 22/21 of 0x42004
5145 * The bit 5 of 0x42020
5146 * The bit 15 of 0x45000
5148 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5149 (I915_READ(ILK_DISPLAY_CHICKEN2) |
5150 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
5151 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
5152 I915_WRITE(DISP_ARB_CTL,
5153 (I915_READ(DISP_ARB_CTL) |
5156 ilk_init_lp_watermarks(dev);
5159 * Based on the document from hardware guys the following bits
5160 * should be set unconditionally in order to enable FBC.
5161 * The bit 22 of 0x42000
5162 * The bit 22 of 0x42004
5163 * The bit 7,8,9 of 0x42020.
5165 if (IS_IRONLAKE_M(dev)) {
5166 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
5167 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5168 I915_READ(ILK_DISPLAY_CHICKEN1) |
5170 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5171 I915_READ(ILK_DISPLAY_CHICKEN2) |
5175 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
5177 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5178 I915_READ(ILK_DISPLAY_CHICKEN2) |
5179 ILK_ELPIN_409_SELECT);
5180 I915_WRITE(_3D_CHICKEN2,
5181 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
5182 _3D_CHICKEN2_WM_READ_PIPELINED);
5184 /* WaDisableRenderCachePipelinedFlush:ilk */
5185 I915_WRITE(CACHE_MODE_0,
5186 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5188 /* WaDisable_RenderCache_OperationalFlush:ilk */
5189 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5191 g4x_disable_trickle_feed(dev);
5193 ibx_init_clock_gating(dev);
5196 static void cpt_init_clock_gating(struct drm_device *dev)
5198 struct drm_i915_private *dev_priv = dev->dev_private;
5203 * On Ibex Peak and Cougar Point, we need to disable clock
5204 * gating for the panel power sequencer or it will fail to
5205 * start up when no ports are active.
5207 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
5208 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
5209 PCH_CPUNIT_CLOCK_GATE_DISABLE);
5210 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
5211 DPLS_EDP_PPS_FIX_DIS);
5212 /* The below fixes the weird display corruption, a few pixels shifted
5213 * downward, on (only) LVDS of some HP laptops with IVY.
5215 for_each_pipe(pipe) {
5216 val = I915_READ(TRANS_CHICKEN2(pipe));
5217 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
5218 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
5219 if (dev_priv->vbt.fdi_rx_polarity_inverted)
5220 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
5221 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
5222 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
5223 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
5224 I915_WRITE(TRANS_CHICKEN2(pipe), val);
5226 /* WADP0ClockGatingDisable */
5227 for_each_pipe(pipe) {
5228 I915_WRITE(TRANS_CHICKEN1(pipe),
5229 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
5233 static void gen6_check_mch_setup(struct drm_device *dev)
5235 struct drm_i915_private *dev_priv = dev->dev_private;
5238 tmp = I915_READ(MCH_SSKPD);
5239 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
5240 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
5244 static void gen6_init_clock_gating(struct drm_device *dev)
5246 struct drm_i915_private *dev_priv = dev->dev_private;
5247 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
5249 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
5251 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5252 I915_READ(ILK_DISPLAY_CHICKEN2) |
5253 ILK_ELPIN_409_SELECT);
5255 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
5256 I915_WRITE(_3D_CHICKEN,
5257 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
5259 /* WaSetupGtModeTdRowDispatch:snb */
5260 if (IS_SNB_GT1(dev))
5261 I915_WRITE(GEN6_GT_MODE,
5262 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
5264 /* WaDisable_RenderCache_OperationalFlush:snb */
5265 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5268 * BSpec recoomends 8x4 when MSAA is used,
5269 * however in practice 16x4 seems fastest.
5271 * Note that PS/WM thread counts depend on the WIZ hashing
5272 * disable bit, which we don't touch here, but it's good
5273 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5275 I915_WRITE(GEN6_GT_MODE,
5276 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5278 ilk_init_lp_watermarks(dev);
5280 I915_WRITE(CACHE_MODE_0,
5281 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
5283 I915_WRITE(GEN6_UCGCTL1,
5284 I915_READ(GEN6_UCGCTL1) |
5285 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
5286 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
5288 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
5289 * gating disable must be set. Failure to set it results in
5290 * flickering pixels due to Z write ordering failures after
5291 * some amount of runtime in the Mesa "fire" demo, and Unigine
5292 * Sanctuary and Tropics, and apparently anything else with
5293 * alpha test or pixel discard.
5295 * According to the spec, bit 11 (RCCUNIT) must also be set,
5296 * but we didn't debug actual testcases to find it out.
5298 * WaDisableRCCUnitClockGating:snb
5299 * WaDisableRCPBUnitClockGating:snb
5301 I915_WRITE(GEN6_UCGCTL2,
5302 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
5303 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
5305 /* WaStripsFansDisableFastClipPerformanceFix:snb */
5306 I915_WRITE(_3D_CHICKEN3,
5307 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
5311 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
5312 * 3DSTATE_SF number of SF output attributes is more than 16."
5314 I915_WRITE(_3D_CHICKEN3,
5315 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
5318 * According to the spec the following bits should be
5319 * set in order to enable memory self-refresh and fbc:
5320 * The bit21 and bit22 of 0x42000
5321 * The bit21 and bit22 of 0x42004
5322 * The bit5 and bit7 of 0x42020
5323 * The bit14 of 0x70180
5324 * The bit14 of 0x71180
5326 * WaFbcAsynchFlipDisableFbcQueue:snb
5328 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5329 I915_READ(ILK_DISPLAY_CHICKEN1) |
5330 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
5331 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5332 I915_READ(ILK_DISPLAY_CHICKEN2) |
5333 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
5334 I915_WRITE(ILK_DSPCLK_GATE_D,
5335 I915_READ(ILK_DSPCLK_GATE_D) |
5336 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
5337 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
5339 g4x_disable_trickle_feed(dev);
5341 cpt_init_clock_gating(dev);
5343 gen6_check_mch_setup(dev);
5346 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
5348 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
5351 * WaVSThreadDispatchOverride:ivb,vlv
5353 * This actually overrides the dispatch
5354 * mode for all thread types.
5356 reg &= ~GEN7_FF_SCHED_MASK;
5357 reg |= GEN7_FF_TS_SCHED_HW;
5358 reg |= GEN7_FF_VS_SCHED_HW;
5359 reg |= GEN7_FF_DS_SCHED_HW;
5361 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
5364 static void lpt_init_clock_gating(struct drm_device *dev)
5366 struct drm_i915_private *dev_priv = dev->dev_private;
5369 * TODO: this bit should only be enabled when really needed, then
5370 * disabled when not needed anymore in order to save power.
5372 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
5373 I915_WRITE(SOUTH_DSPCLK_GATE_D,
5374 I915_READ(SOUTH_DSPCLK_GATE_D) |
5375 PCH_LP_PARTITION_LEVEL_DISABLE);
5377 /* WADPOClockGatingDisable:hsw */
5378 I915_WRITE(_TRANSA_CHICKEN1,
5379 I915_READ(_TRANSA_CHICKEN1) |
5380 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
5383 static void lpt_suspend_hw(struct drm_device *dev)
5385 struct drm_i915_private *dev_priv = dev->dev_private;
5387 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
5388 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
5390 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
5391 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
5395 static void gen8_init_clock_gating(struct drm_device *dev)
5397 struct drm_i915_private *dev_priv = dev->dev_private;
5398 enum i915_pipe pipe;
5400 I915_WRITE(WM3_LP_ILK, 0);
5401 I915_WRITE(WM2_LP_ILK, 0);
5402 I915_WRITE(WM1_LP_ILK, 0);
5404 /* FIXME(BDW): Check all the w/a, some might only apply to
5405 * pre-production hw. */
5407 /* WaDisablePartialInstShootdown:bdw */
5408 I915_WRITE(GEN8_ROW_CHICKEN,
5409 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
5411 /* WaDisableThreadStallDopClockGating:bdw */
5412 /* FIXME: Unclear whether we really need this on production bdw. */
5413 I915_WRITE(GEN8_ROW_CHICKEN,
5414 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
5417 * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
5418 * pre-production hardware
5420 I915_WRITE(HALF_SLICE_CHICKEN3,
5421 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
5422 I915_WRITE(HALF_SLICE_CHICKEN3,
5423 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
5424 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
5426 I915_WRITE(_3D_CHICKEN3,
5427 _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
5429 I915_WRITE(COMMON_SLICE_CHICKEN2,
5430 _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
5432 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5433 _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
5435 /* WaDisableDopClockGating:bdw May not be needed for production */
5436 I915_WRITE(GEN7_ROW_CHICKEN2,
5437 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5439 /* WaSwitchSolVfFArbitrationPriority:bdw */
5440 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5442 /* WaPsrDPAMaskVBlankInSRD:bdw */
5443 I915_WRITE(CHICKEN_PAR1_1,
5444 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
5446 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
5447 for_each_pipe(pipe) {
5448 I915_WRITE(CHICKEN_PIPESL_1(pipe),
5449 I915_READ(CHICKEN_PIPESL_1(pipe)) |
5450 BDW_DPRS_MASK_VBLANK_SRD);
5453 /* Use Force Non-Coherent whenever executing a 3D context. This is a
5454 * workaround for for a possible hang in the unlikely event a TLB
5455 * invalidation occurs during a PSD flush.
5457 I915_WRITE(HDC_CHICKEN0,
5458 I915_READ(HDC_CHICKEN0) |
5459 _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
5461 /* WaVSRefCountFullforceMissDisable:bdw */
5462 /* WaDSRefCountFullforceMissDisable:bdw */
5463 I915_WRITE(GEN7_FF_THREAD_MODE,
5464 I915_READ(GEN7_FF_THREAD_MODE) &
5465 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
5468 * BSpec recommends 8x4 when MSAA is used,
5469 * however in practice 16x4 seems fastest.
5471 * Note that PS/WM thread counts depend on the WIZ hashing
5472 * disable bit, which we don't touch here, but it's good
5473 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5475 I915_WRITE(GEN7_GT_MODE,
5476 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5478 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5479 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
5481 /* WaDisableSDEUnitClockGating:bdw */
5482 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
5483 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5485 /* Wa4x4STCOptimizationDisable:bdw */
5486 I915_WRITE(CACHE_MODE_1,
5487 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
5490 static void haswell_init_clock_gating(struct drm_device *dev)
5492 struct drm_i915_private *dev_priv = dev->dev_private;
5494 ilk_init_lp_watermarks(dev);
5496 /* L3 caching of data atomics doesn't work -- disable it. */
5497 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
5498 I915_WRITE(HSW_ROW_CHICKEN3,
5499 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
5501 /* This is required by WaCatErrorRejectionIssue:hsw */
5502 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5503 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5504 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5506 /* WaVSRefCountFullforceMissDisable:hsw */
5507 I915_WRITE(GEN7_FF_THREAD_MODE,
5508 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
5510 /* WaDisable_RenderCache_OperationalFlush:hsw */
5511 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5513 /* enable HiZ Raw Stall Optimization */
5514 I915_WRITE(CACHE_MODE_0_GEN7,
5515 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
5517 /* WaDisable4x2SubspanOptimization:hsw */
5518 I915_WRITE(CACHE_MODE_1,
5519 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5522 * BSpec recommends 8x4 when MSAA is used,
5523 * however in practice 16x4 seems fastest.
5525 * Note that PS/WM thread counts depend on the WIZ hashing
5526 * disable bit, which we don't touch here, but it's good
5527 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5529 I915_WRITE(GEN7_GT_MODE,
5530 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5532 /* WaSwitchSolVfFArbitrationPriority:hsw */
5533 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5535 /* WaRsPkgCStateDisplayPMReq:hsw */
5536 I915_WRITE(CHICKEN_PAR1_1,
5537 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
5539 lpt_init_clock_gating(dev);
5542 static void ivybridge_init_clock_gating(struct drm_device *dev)
5544 struct drm_i915_private *dev_priv = dev->dev_private;
5547 ilk_init_lp_watermarks(dev);
5549 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
5551 /* WaDisableEarlyCull:ivb */
5552 I915_WRITE(_3D_CHICKEN3,
5553 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5555 /* WaDisableBackToBackFlipFix:ivb */
5556 I915_WRITE(IVB_CHICKEN3,
5557 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5558 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5560 /* WaDisablePSDDualDispatchEnable:ivb */
5561 if (IS_IVB_GT1(dev))
5562 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5563 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5565 /* WaDisable_RenderCache_OperationalFlush:ivb */
5566 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5568 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
5569 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
5570 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
5572 /* WaApplyL3ControlAndL3ChickenMode:ivb */
5573 I915_WRITE(GEN7_L3CNTLREG1,
5574 GEN7_WA_FOR_GEN7_L3_CONTROL);
5575 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
5576 GEN7_WA_L3_CHICKEN_MODE);
5577 if (IS_IVB_GT1(dev))
5578 I915_WRITE(GEN7_ROW_CHICKEN2,
5579 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5581 /* must write both registers */
5582 I915_WRITE(GEN7_ROW_CHICKEN2,
5583 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5584 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
5585 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5588 /* WaForceL3Serialization:ivb */
5589 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5590 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5593 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5594 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
5596 I915_WRITE(GEN6_UCGCTL2,
5597 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5599 /* This is required by WaCatErrorRejectionIssue:ivb */
5600 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5601 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5602 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5604 g4x_disable_trickle_feed(dev);
5606 gen7_setup_fixed_func_scheduler(dev_priv);
5608 if (0) { /* causes HiZ corruption on ivb:gt1 */
5609 /* enable HiZ Raw Stall Optimization */
5610 I915_WRITE(CACHE_MODE_0_GEN7,
5611 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
5614 /* WaDisable4x2SubspanOptimization:ivb */
5615 I915_WRITE(CACHE_MODE_1,
5616 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5619 * BSpec recommends 8x4 when MSAA is used,
5620 * however in practice 16x4 seems fastest.
5622 * Note that PS/WM thread counts depend on the WIZ hashing
5623 * disable bit, which we don't touch here, but it's good
5624 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5626 I915_WRITE(GEN7_GT_MODE,
5627 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5629 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5630 snpcr &= ~GEN6_MBC_SNPCR_MASK;
5631 snpcr |= GEN6_MBC_SNPCR_MED;
5632 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
5634 if (!HAS_PCH_NOP(dev))
5635 cpt_init_clock_gating(dev);
5637 gen6_check_mch_setup(dev);
5640 static void valleyview_init_clock_gating(struct drm_device *dev)
5642 struct drm_i915_private *dev_priv = dev->dev_private;
5645 mutex_lock(&dev_priv->rps.hw_lock);
5646 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5647 mutex_unlock(&dev_priv->rps.hw_lock);
5648 switch ((val >> 6) & 3) {
5651 dev_priv->mem_freq = 800;
5654 dev_priv->mem_freq = 1066;
5657 dev_priv->mem_freq = 1333;
5660 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
5662 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5664 /* WaDisableEarlyCull:vlv */
5665 I915_WRITE(_3D_CHICKEN3,
5666 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5668 /* WaDisableBackToBackFlipFix:vlv */
5669 I915_WRITE(IVB_CHICKEN3,
5670 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5671 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5673 /* WaPsdDispatchEnable:vlv */
5674 /* WaDisablePSDDualDispatchEnable:vlv */
5675 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5676 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
5677 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5679 /* WaDisable_RenderCache_OperationalFlush:vlv */
5680 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5682 /* WaForceL3Serialization:vlv */
5683 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5684 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5686 /* WaDisableDopClockGating:vlv */
5687 I915_WRITE(GEN7_ROW_CHICKEN2,
5688 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5690 /* This is required by WaCatErrorRejectionIssue:vlv */
5691 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5692 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5693 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5695 gen7_setup_fixed_func_scheduler(dev_priv);
5698 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5699 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
5701 I915_WRITE(GEN6_UCGCTL2,
5702 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5704 /* WaDisableL3Bank2xClockGate:vlv
5705 * Disabling L3 clock gating- MMIO 940c[25] = 1
5706 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
5707 I915_WRITE(GEN7_UCGCTL4,
5708 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
5710 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5713 * BSpec says this must be set, even though
5714 * WaDisable4x2SubspanOptimization isn't listed for VLV.
5716 I915_WRITE(CACHE_MODE_1,
5717 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5720 * WaIncreaseL3CreditsForVLVB0:vlv
5721 * This is the hardware default actually.
5723 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
5726 * WaDisableVLVClockGating_VBIIssue:vlv
5727 * Disable clock gating on th GCFG unit to prevent a delay
5728 * in the reporting of vblank events.
5730 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
5733 static void cherryview_init_clock_gating(struct drm_device *dev)
5735 struct drm_i915_private *dev_priv = dev->dev_private;
5738 mutex_lock(&dev_priv->rps.hw_lock);
5739 val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
5740 mutex_unlock(&dev_priv->rps.hw_lock);
5741 switch ((val >> 2) & 0x7) {
5744 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200;
5745 dev_priv->mem_freq = 1600;
5748 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267;
5749 dev_priv->mem_freq = 1600;
5752 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333;
5753 dev_priv->mem_freq = 2000;
5756 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320;
5757 dev_priv->mem_freq = 1600;
5760 dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400;
5761 dev_priv->mem_freq = 1600;
5764 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
5766 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5768 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5770 /* WaDisablePartialInstShootdown:chv */
5771 I915_WRITE(GEN8_ROW_CHICKEN,
5772 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
5774 /* WaDisableThreadStallDopClockGating:chv */
5775 I915_WRITE(GEN8_ROW_CHICKEN,
5776 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
5778 /* WaVSRefCountFullforceMissDisable:chv */
5779 /* WaDSRefCountFullforceMissDisable:chv */
5780 I915_WRITE(GEN7_FF_THREAD_MODE,
5781 I915_READ(GEN7_FF_THREAD_MODE) &
5782 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
5784 /* WaDisableSemaphoreAndSyncFlipWait:chv */
5785 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5786 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
5788 /* WaDisableCSUnitClockGating:chv */
5789 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
5790 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
5792 /* WaDisableSDEUnitClockGating:chv */
5793 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
5794 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5796 /* WaDisableSamplerPowerBypass:chv (pre-production hw) */
5797 I915_WRITE(HALF_SLICE_CHICKEN3,
5798 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
5800 /* WaDisableGunitClockGating:chv (pre-production hw) */
5801 I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
5804 /* WaDisableFfDopClockGating:chv (pre-production hw) */
5805 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5806 _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
5808 /* WaDisableDopClockGating:chv (pre-production hw) */
5809 I915_WRITE(GEN7_ROW_CHICKEN2,
5810 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5811 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
5812 GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
5815 static void g4x_init_clock_gating(struct drm_device *dev)
5817 struct drm_i915_private *dev_priv = dev->dev_private;
5818 uint32_t dspclk_gate;
5820 I915_WRITE(RENCLK_GATE_D1, 0);
5821 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
5822 GS_UNIT_CLOCK_GATE_DISABLE |
5823 CL_UNIT_CLOCK_GATE_DISABLE);
5824 I915_WRITE(RAMCLK_GATE_D, 0);
5825 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
5826 OVRUNIT_CLOCK_GATE_DISABLE |
5827 OVCUNIT_CLOCK_GATE_DISABLE;
5829 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
5830 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
5832 /* WaDisableRenderCachePipelinedFlush */
5833 I915_WRITE(CACHE_MODE_0,
5834 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5836 /* WaDisable_RenderCache_OperationalFlush:g4x */
5837 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5839 g4x_disable_trickle_feed(dev);
5842 static void crestline_init_clock_gating(struct drm_device *dev)
5844 struct drm_i915_private *dev_priv = dev->dev_private;
5846 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
5847 I915_WRITE(RENCLK_GATE_D2, 0);
5848 I915_WRITE(DSPCLK_GATE_D, 0);
5849 I915_WRITE(RAMCLK_GATE_D, 0);
5850 I915_WRITE16(DEUC, 0);
5851 I915_WRITE(MI_ARB_STATE,
5852 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5854 /* WaDisable_RenderCache_OperationalFlush:gen4 */
5855 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5858 static void broadwater_init_clock_gating(struct drm_device *dev)
5860 struct drm_i915_private *dev_priv = dev->dev_private;
5862 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
5863 I965_RCC_CLOCK_GATE_DISABLE |
5864 I965_RCPB_CLOCK_GATE_DISABLE |
5865 I965_ISC_CLOCK_GATE_DISABLE |
5866 I965_FBC_CLOCK_GATE_DISABLE);
5867 I915_WRITE(RENCLK_GATE_D2, 0);
5868 I915_WRITE(MI_ARB_STATE,
5869 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5871 /* WaDisable_RenderCache_OperationalFlush:gen4 */
5872 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5875 static void gen3_init_clock_gating(struct drm_device *dev)
5877 struct drm_i915_private *dev_priv = dev->dev_private;
5878 u32 dstate = I915_READ(D_STATE);
5880 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
5881 DSTATE_DOT_CLOCK_GATING;
5882 I915_WRITE(D_STATE, dstate);
5884 if (IS_PINEVIEW(dev))
5885 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
5887 /* IIR "flip pending" means done if this bit is set */
5888 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
5890 /* interrupts should cause a wake up from C3 */
5891 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
5893 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
5894 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
5897 static void i85x_init_clock_gating(struct drm_device *dev)
5899 struct drm_i915_private *dev_priv = dev->dev_private;
5901 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
5903 /* interrupts should cause a wake up from C3 */
5904 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
5905 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
5908 static void i830_init_clock_gating(struct drm_device *dev)
5910 struct drm_i915_private *dev_priv = dev->dev_private;
5912 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
5915 void intel_init_clock_gating(struct drm_device *dev)
5917 struct drm_i915_private *dev_priv = dev->dev_private;
5919 dev_priv->display.init_clock_gating(dev);
5922 void intel_suspend_hw(struct drm_device *dev)
5924 if (HAS_PCH_LPT(dev))
5925 lpt_suspend_hw(dev);
5928 #define for_each_power_well(i, power_well, domain_mask, power_domains) \
5930 i < (power_domains)->power_well_count && \
5931 ((power_well) = &(power_domains)->power_wells[i]); \
5933 if ((power_well)->domains & (domain_mask))
5935 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
5936 for (i = (power_domains)->power_well_count - 1; \
5937 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
5939 if ((power_well)->domains & (domain_mask))
5942 * We should only use the power well if we explicitly asked the hardware to
5943 * enable it, so check if it's enabled and also check if we've requested it to
5946 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
5947 struct i915_power_well *power_well)
5949 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5950 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5953 bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
5954 enum intel_display_power_domain domain)
5956 struct i915_power_domains *power_domains;
5957 struct i915_power_well *power_well;
5961 if (dev_priv->pm.suspended)
5964 power_domains = &dev_priv->power_domains;
5968 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
5969 if (power_well->always_on)
5972 if (!power_well->hw_enabled) {
5981 bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
5982 enum intel_display_power_domain domain)
5984 struct i915_power_domains *power_domains;
5987 power_domains = &dev_priv->power_domains;
5989 mutex_lock(&power_domains->lock);
5990 ret = intel_display_power_enabled_unlocked(dev_priv, domain);
5991 mutex_unlock(&power_domains->lock);
5997 * Starting with Haswell, we have a "Power Down Well" that can be turned off
5998 * when not needed anymore. We have 4 registers that can request the power well
5999 * to be enabled, and it will only be disabled if none of the registers is
6000 * requesting it to be enabled.
6002 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
6004 struct drm_device *dev = dev_priv->dev;
6007 * After we re-enable the power well, if we touch VGA register 0x3d5
6008 * we'll get unclaimed register interrupts. This stops after we write
6009 * anything to the VGA MSR register. The vgacon module uses this
6010 * register all the time, so if we unbind our driver and, as a
6011 * consequence, bind vgacon, we'll get stuck in an infinite loop at
6012 * console_unlock(). So make here we touch the VGA MSR register, making
6013 * sure vgacon can keep working normally without triggering interrupts
6014 * and error messages.
6017 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
6018 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
6019 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
6022 if (IS_BROADWELL(dev))
6023 gen8_irq_power_well_post_enable(dev_priv);
6026 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
6027 struct i915_power_well *power_well, bool enable)
6029 bool is_enabled, enable_requested;
6032 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
6033 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
6034 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
6037 if (!enable_requested)
6038 I915_WRITE(HSW_PWR_WELL_DRIVER,
6039 HSW_PWR_WELL_ENABLE_REQUEST);
6042 DRM_DEBUG_KMS("Enabling power well\n");
6043 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
6044 HSW_PWR_WELL_STATE_ENABLED), 20))
6045 DRM_ERROR("Timeout enabling power well\n");
6048 hsw_power_well_post_enable(dev_priv);
6050 if (enable_requested) {
6051 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
6052 POSTING_READ(HSW_PWR_WELL_DRIVER);
6053 DRM_DEBUG_KMS("Requesting to disable the power well\n");
6058 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
6059 struct i915_power_well *power_well)
6061 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
6064 * We're taking over the BIOS, so clear any requests made by it since
6065 * the driver is in charge now.
6067 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
6068 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
6071 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
6072 struct i915_power_well *power_well)
6074 hsw_set_power_well(dev_priv, power_well, true);
6077 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
6078 struct i915_power_well *power_well)
6080 hsw_set_power_well(dev_priv, power_well, false);
6083 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
6084 struct i915_power_well *power_well)
6088 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
6089 struct i915_power_well *power_well)
6094 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
6095 struct i915_power_well *power_well, bool enable)
6097 enum punit_power_well power_well_id = power_well->data;
6102 mask = PUNIT_PWRGT_MASK(power_well_id);
6103 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
6104 PUNIT_PWRGT_PWR_GATE(power_well_id);
6106 mutex_lock(&dev_priv->rps.hw_lock);
6109 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
6114 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
6117 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
6119 if (wait_for(COND, 100))
6120 DRM_ERROR("timout setting power well state %08x (%08x)\n",
6122 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
6127 mutex_unlock(&dev_priv->rps.hw_lock);
6130 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
6131 struct i915_power_well *power_well)
6133 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
6136 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
6137 struct i915_power_well *power_well)
6139 vlv_set_power_well(dev_priv, power_well, true);
6142 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
6143 struct i915_power_well *power_well)
6145 vlv_set_power_well(dev_priv, power_well, false);
6148 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
6149 struct i915_power_well *power_well)
6151 int power_well_id = power_well->data;
6152 bool enabled = false;
6157 mask = PUNIT_PWRGT_MASK(power_well_id);
6158 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
6160 mutex_lock(&dev_priv->rps.hw_lock);
6162 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
6164 * We only ever set the power-on and power-gate states, anything
6165 * else is unexpected.
6167 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
6168 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
6173 * A transient state at this point would mean some unexpected party
6174 * is poking at the power controls too.
6176 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
6177 WARN_ON(ctrl != state);
6179 mutex_unlock(&dev_priv->rps.hw_lock);
6184 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
6185 struct i915_power_well *power_well)
6187 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
6189 vlv_set_power_well(dev_priv, power_well, true);
6191 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
6192 valleyview_enable_display_irqs(dev_priv);
6193 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
6196 * During driver initialization/resume we can avoid restoring the
6197 * part of the HW/SW state that will be inited anyway explicitly.
6199 if (dev_priv->power_domains.initializing)
6202 intel_hpd_init(dev_priv->dev);
6204 i915_redisable_vga_power_on(dev_priv->dev);
6207 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
6208 struct i915_power_well *power_well)
6210 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
6212 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
6213 valleyview_disable_display_irqs(dev_priv);
6214 lockmgr(&dev_priv->irq_lock, LK_RELEASE);
6216 vlv_set_power_well(dev_priv, power_well, false);
6219 static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6220 struct i915_power_well *power_well)
6222 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6225 * Enable the CRI clock source so we can get at the
6226 * display and the reference clock for VGA
6227 * hotplug / manual detection.
6229 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6230 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6231 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6233 vlv_set_power_well(dev_priv, power_well, true);
6236 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
6237 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
6238 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
6239 * b. The other bits such as sfr settings / modesel may all
6242 * This should only be done on init and resume from S3 with
6243 * both PLLs disabled, or we risk losing DPIO and PLL
6246 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
6249 static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6250 struct i915_power_well *power_well)
6252 struct drm_device *dev = dev_priv->dev;
6253 enum i915_pipe pipe;
6255 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6258 assert_pll_disabled(dev_priv, pipe);
6260 /* Assert common reset */
6261 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
6263 vlv_set_power_well(dev_priv, power_well, false);
6266 static void check_power_well_state(struct drm_i915_private *dev_priv,
6267 struct i915_power_well *power_well)
6269 bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
6271 if (power_well->always_on || !i915.disable_power_well) {
6278 if (enabled != (power_well->count > 0))
6284 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
6285 power_well->name, power_well->always_on, enabled,
6286 power_well->count, i915.disable_power_well);
6289 void intel_display_power_get(struct drm_i915_private *dev_priv,
6290 enum intel_display_power_domain domain)
6292 struct i915_power_domains *power_domains;
6293 struct i915_power_well *power_well;
6296 intel_runtime_pm_get(dev_priv);
6298 power_domains = &dev_priv->power_domains;
6300 mutex_lock(&power_domains->lock);
6302 for_each_power_well(i, power_well, BIT(domain), power_domains) {
6303 if (!power_well->count++) {
6304 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
6305 power_well->ops->enable(dev_priv, power_well);
6306 power_well->hw_enabled = true;
6309 check_power_well_state(dev_priv, power_well);
6312 power_domains->domain_use_count[domain]++;
6314 mutex_unlock(&power_domains->lock);
6317 void intel_display_power_put(struct drm_i915_private *dev_priv,
6318 enum intel_display_power_domain domain)
6320 struct i915_power_domains *power_domains;
6321 struct i915_power_well *power_well;
6324 power_domains = &dev_priv->power_domains;
6326 mutex_lock(&power_domains->lock);
6328 WARN_ON(!power_domains->domain_use_count[domain]);
6329 power_domains->domain_use_count[domain]--;
6331 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
6332 WARN_ON(!power_well->count);
6334 if (!--power_well->count && i915.disable_power_well) {
6335 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
6336 power_well->hw_enabled = false;
6337 power_well->ops->disable(dev_priv, power_well);
6340 check_power_well_state(dev_priv, power_well);
6343 mutex_unlock(&power_domains->lock);
6345 intel_runtime_pm_put(dev_priv);
6348 static struct i915_power_domains *hsw_pwr;
6350 /* Display audio driver power well request */
6351 int i915_request_power_well(void)
6353 struct drm_i915_private *dev_priv;
6358 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6360 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
6364 /* Display audio driver power well release */
6365 int i915_release_power_well(void)
6367 struct drm_i915_private *dev_priv;
6372 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6374 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
6379 * Private interface for the audio driver to get CDCLK in kHz.
6381 * Caller must request power well using i915_request_power_well() prior to
6384 int i915_get_cdclk_freq(void)
6386 struct drm_i915_private *dev_priv;
6391 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6394 return intel_ddi_get_cdclk_freq(dev_priv);
6397 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
6399 #define HSW_ALWAYS_ON_POWER_DOMAINS ( \
6400 BIT(POWER_DOMAIN_PIPE_A) | \
6401 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
6402 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
6403 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
6404 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6405 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6406 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6407 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6408 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6409 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6410 BIT(POWER_DOMAIN_PORT_CRT) | \
6411 BIT(POWER_DOMAIN_PLLS) | \
6412 BIT(POWER_DOMAIN_INIT))
6413 #define HSW_DISPLAY_POWER_DOMAINS ( \
6414 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
6415 BIT(POWER_DOMAIN_INIT))
6417 #define BDW_ALWAYS_ON_POWER_DOMAINS ( \
6418 HSW_ALWAYS_ON_POWER_DOMAINS | \
6419 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
6420 #define BDW_DISPLAY_POWER_DOMAINS ( \
6421 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
6422 BIT(POWER_DOMAIN_INIT))
6424 #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
6425 #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
6427 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
6428 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6429 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6430 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6431 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6432 BIT(POWER_DOMAIN_PORT_CRT) | \
6433 BIT(POWER_DOMAIN_INIT))
6435 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
6436 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6437 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6438 BIT(POWER_DOMAIN_INIT))
6440 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
6441 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6442 BIT(POWER_DOMAIN_INIT))
6444 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
6445 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6446 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6447 BIT(POWER_DOMAIN_INIT))
6449 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
6450 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6451 BIT(POWER_DOMAIN_INIT))
6453 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
6454 .sync_hw = i9xx_always_on_power_well_noop,
6455 .enable = i9xx_always_on_power_well_noop,
6456 .disable = i9xx_always_on_power_well_noop,
6457 .is_enabled = i9xx_always_on_power_well_enabled,
6460 static struct i915_power_well i9xx_always_on_power_well[] = {
6462 .name = "always-on",
6464 .domains = POWER_DOMAIN_MASK,
6465 .ops = &i9xx_always_on_power_well_ops,
6469 static const struct i915_power_well_ops hsw_power_well_ops = {
6470 .sync_hw = hsw_power_well_sync_hw,
6471 .enable = hsw_power_well_enable,
6472 .disable = hsw_power_well_disable,
6473 .is_enabled = hsw_power_well_enabled,
6476 static struct i915_power_well hsw_power_wells[] = {
6478 .name = "always-on",
6480 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
6481 .ops = &i9xx_always_on_power_well_ops,
6485 .domains = HSW_DISPLAY_POWER_DOMAINS,
6486 .ops = &hsw_power_well_ops,
6490 static struct i915_power_well bdw_power_wells[] = {
6492 .name = "always-on",
6494 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
6495 .ops = &i9xx_always_on_power_well_ops,
6499 .domains = BDW_DISPLAY_POWER_DOMAINS,
6500 .ops = &hsw_power_well_ops,
6504 static const struct i915_power_well_ops vlv_display_power_well_ops = {
6505 .sync_hw = vlv_power_well_sync_hw,
6506 .enable = vlv_display_power_well_enable,
6507 .disable = vlv_display_power_well_disable,
6508 .is_enabled = vlv_power_well_enabled,
6511 static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
6512 .sync_hw = vlv_power_well_sync_hw,
6513 .enable = vlv_dpio_cmn_power_well_enable,
6514 .disable = vlv_dpio_cmn_power_well_disable,
6515 .is_enabled = vlv_power_well_enabled,
6518 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
6519 .sync_hw = vlv_power_well_sync_hw,
6520 .enable = vlv_power_well_enable,
6521 .disable = vlv_power_well_disable,
6522 .is_enabled = vlv_power_well_enabled,
6525 static struct i915_power_well vlv_power_wells[] = {
6527 .name = "always-on",
6529 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
6530 .ops = &i9xx_always_on_power_well_ops,
6534 .domains = VLV_DISPLAY_POWER_DOMAINS,
6535 .data = PUNIT_POWER_WELL_DISP2D,
6536 .ops = &vlv_display_power_well_ops,
6539 .name = "dpio-tx-b-01",
6540 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6541 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6542 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6543 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6544 .ops = &vlv_dpio_power_well_ops,
6545 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
6548 .name = "dpio-tx-b-23",
6549 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6550 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6551 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6552 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6553 .ops = &vlv_dpio_power_well_ops,
6554 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
6557 .name = "dpio-tx-c-01",
6558 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6559 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6560 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6561 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6562 .ops = &vlv_dpio_power_well_ops,
6563 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
6566 .name = "dpio-tx-c-23",
6567 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6568 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6569 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6570 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6571 .ops = &vlv_dpio_power_well_ops,
6572 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
6575 .name = "dpio-common",
6576 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
6577 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
6578 .ops = &vlv_dpio_cmn_power_well_ops,
6582 static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
6583 enum punit_power_well power_well_id)
6585 struct i915_power_domains *power_domains = &dev_priv->power_domains;
6586 struct i915_power_well *power_well;
6589 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
6590 if (power_well->data == power_well_id)
6597 #define set_power_wells(power_domains, __power_wells) ({ \
6598 (power_domains)->power_wells = (__power_wells); \
6599 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
6602 int intel_power_domains_init(struct drm_i915_private *dev_priv)
6604 struct i915_power_domains *power_domains = &dev_priv->power_domains;
6606 lockinit(&power_domains->lock, "i915pl", 0, LK_CANRECURSE);
6609 * The enabling order will be from lower to higher indexed wells,
6610 * the disabling order is reversed.
6612 if (IS_HASWELL(dev_priv->dev)) {
6613 set_power_wells(power_domains, hsw_power_wells);
6614 hsw_pwr = power_domains;
6615 } else if (IS_BROADWELL(dev_priv->dev)) {
6616 set_power_wells(power_domains, bdw_power_wells);
6617 hsw_pwr = power_domains;
6618 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
6619 set_power_wells(power_domains, vlv_power_wells);
6621 set_power_wells(power_domains, i9xx_always_on_power_well);
6627 void intel_power_domains_remove(struct drm_i915_private *dev_priv)
6632 static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
6634 struct i915_power_domains *power_domains = &dev_priv->power_domains;
6635 struct i915_power_well *power_well;
6638 mutex_lock(&power_domains->lock);
6639 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
6640 power_well->ops->sync_hw(dev_priv, power_well);
6641 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
6644 mutex_unlock(&power_domains->lock);
6647 static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
6649 struct i915_power_well *cmn =
6650 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
6651 struct i915_power_well *disp2d =
6652 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
6654 /* nothing to do if common lane is already off */
6655 if (!cmn->ops->is_enabled(dev_priv, cmn))
6658 /* If the display might be already active skip this */
6659 if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
6660 I915_READ(DPIO_CTL) & DPIO_CMNRST)
6663 DRM_DEBUG_KMS("toggling display PHY side reset\n");
6665 /* cmnlane needs DPLL registers */
6666 disp2d->ops->enable(dev_priv, disp2d);
6669 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
6670 * Need to assert and de-assert PHY SB reset by gating the
6671 * common lane power, then un-gating it.
6672 * Simply ungating isn't enough to reset the PHY enough to get
6673 * ports and lanes running.
6675 cmn->ops->disable(dev_priv, cmn);
6678 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
6680 struct drm_device *dev = dev_priv->dev;
6681 struct i915_power_domains *power_domains = &dev_priv->power_domains;
6683 power_domains->initializing = true;
6685 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
6686 mutex_lock(&power_domains->lock);
6687 vlv_cmnlane_wa(dev_priv);
6688 mutex_unlock(&power_domains->lock);
6691 /* For now, we need the power well to be always enabled. */
6692 intel_display_set_init_power(dev_priv, true);
6693 intel_power_domains_resume(dev_priv);
6694 power_domains->initializing = false;
6697 void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
6699 intel_runtime_pm_get(dev_priv);
6702 void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
6704 intel_runtime_pm_put(dev_priv);
6707 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
6710 struct drm_device *dev = dev_priv->dev;
6711 struct device *device = &dev->pdev->dev;
6713 if (!HAS_RUNTIME_PM(dev))
6716 pm_runtime_get_sync(device);
6717 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
6721 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
6723 struct drm_device *dev = dev_priv->dev;
6725 struct device *device = &dev->pdev->dev;
6728 if (!HAS_RUNTIME_PM(dev))
6731 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
6733 pm_runtime_get_noresume(device);
6737 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
6740 struct drm_device *dev = dev_priv->dev;
6741 struct device *device = &dev->pdev->dev;
6743 if (!HAS_RUNTIME_PM(dev))
6746 pm_runtime_mark_last_busy(device);
6747 pm_runtime_put_autosuspend(device);
6751 void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
6753 struct drm_device *dev = dev_priv->dev;
6755 struct device *device = &dev->pdev->dev;
6758 if (!HAS_RUNTIME_PM(dev))
6762 pm_runtime_set_active(device);
6765 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6768 if (!intel_enable_rc6(dev)) {
6769 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6773 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
6774 pm_runtime_mark_last_busy(device);
6775 pm_runtime_use_autosuspend(device);
6777 pm_runtime_put_autosuspend(device);
6781 void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
6784 struct drm_device *dev = dev_priv->dev;
6785 struct device *device = &dev->pdev->dev;
6787 if (!HAS_RUNTIME_PM(dev))
6790 if (!intel_enable_rc6(dev))
6793 /* Make sure we're not suspended first. */
6794 pm_runtime_get_sync(device);
6795 pm_runtime_disable(device);
6799 /* Set up chip specific power management-related functions */
6800 void intel_init_pm(struct drm_device *dev)
6802 struct drm_i915_private *dev_priv = dev->dev_private;
6805 if (INTEL_INFO(dev)->gen >= 7) {
6806 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6807 dev_priv->display.enable_fbc = gen7_enable_fbc;
6808 dev_priv->display.disable_fbc = ironlake_disable_fbc;
6809 } else if (INTEL_INFO(dev)->gen >= 5) {
6810 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6811 dev_priv->display.enable_fbc = ironlake_enable_fbc;
6812 dev_priv->display.disable_fbc = ironlake_disable_fbc;
6813 } else if (IS_GM45(dev)) {
6814 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
6815 dev_priv->display.enable_fbc = g4x_enable_fbc;
6816 dev_priv->display.disable_fbc = g4x_disable_fbc;
6818 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
6819 dev_priv->display.enable_fbc = i8xx_enable_fbc;
6820 dev_priv->display.disable_fbc = i8xx_disable_fbc;
6822 /* This value was pulled out of someone's hat */
6823 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
6828 if (IS_PINEVIEW(dev))
6829 i915_pineview_get_mem_freq(dev);
6830 else if (IS_GEN5(dev))
6831 i915_ironlake_get_mem_freq(dev);
6833 /* For FIFO watermark updates */
6834 if (HAS_PCH_SPLIT(dev)) {
6835 ilk_setup_wm_latency(dev);
6837 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
6838 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
6839 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
6840 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
6841 dev_priv->display.update_wm = ilk_update_wm;
6842 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
6844 DRM_DEBUG_KMS("Failed to read display plane latency. "
6849 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
6850 else if (IS_GEN6(dev))
6851 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
6852 else if (IS_IVYBRIDGE(dev))
6853 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
6854 else if (IS_HASWELL(dev))
6855 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
6856 else if (INTEL_INFO(dev)->gen == 8)
6857 dev_priv->display.init_clock_gating = gen8_init_clock_gating;
6858 } else if (IS_CHERRYVIEW(dev)) {
6859 dev_priv->display.update_wm = valleyview_update_wm;
6860 dev_priv->display.init_clock_gating =
6861 cherryview_init_clock_gating;
6862 } else if (IS_VALLEYVIEW(dev)) {
6863 dev_priv->display.update_wm = valleyview_update_wm;
6864 dev_priv->display.init_clock_gating =
6865 valleyview_init_clock_gating;
6866 } else if (IS_PINEVIEW(dev)) {
6867 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
6870 dev_priv->mem_freq)) {
6871 DRM_INFO("failed to find known CxSR latency "
6872 "(found ddr%s fsb freq %d, mem freq %d), "
6874 (dev_priv->is_ddr3 == 1) ? "3" : "2",
6875 dev_priv->fsb_freq, dev_priv->mem_freq);
6876 /* Disable CxSR and never update its watermark again */
6877 intel_set_memory_cxsr(dev_priv, false);
6878 dev_priv->display.update_wm = NULL;
6880 dev_priv->display.update_wm = pineview_update_wm;
6881 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6882 } else if (IS_G4X(dev)) {
6883 dev_priv->display.update_wm = g4x_update_wm;
6884 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
6885 } else if (IS_GEN4(dev)) {
6886 dev_priv->display.update_wm = i965_update_wm;
6887 if (IS_CRESTLINE(dev))
6888 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
6889 else if (IS_BROADWATER(dev))
6890 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
6891 } else if (IS_GEN3(dev)) {
6892 dev_priv->display.update_wm = i9xx_update_wm;
6893 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
6894 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6895 } else if (IS_GEN2(dev)) {
6896 if (INTEL_INFO(dev)->num_pipes == 1) {
6897 dev_priv->display.update_wm = i845_update_wm;
6898 dev_priv->display.get_fifo_size = i845_get_fifo_size;
6900 dev_priv->display.update_wm = i9xx_update_wm;
6901 dev_priv->display.get_fifo_size = i830_get_fifo_size;
6904 if (IS_I85X(dev) || IS_I865G(dev))
6905 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
6907 dev_priv->display.init_clock_gating = i830_init_clock_gating;
6909 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
6913 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
6915 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6917 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
6918 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
6922 I915_WRITE(GEN6_PCODE_DATA, *val);
6923 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
6925 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6927 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
6931 *val = I915_READ(GEN6_PCODE_DATA);
6932 I915_WRITE(GEN6_PCODE_DATA, 0);
6937 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
6939 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6941 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
6942 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
6946 I915_WRITE(GEN6_PCODE_DATA, val);
6947 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
6949 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6951 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
6955 I915_WRITE(GEN6_PCODE_DATA, 0);
6960 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
6965 switch (dev_priv->mem_freq) {
6979 return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
6982 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
6987 switch (dev_priv->mem_freq) {
7001 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
7004 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7008 switch (dev_priv->rps.cz_freq) {
7024 freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
7029 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7033 switch (dev_priv->rps.cz_freq) {
7049 opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
7054 int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7058 if (IS_CHERRYVIEW(dev_priv->dev))
7059 ret = chv_gpu_freq(dev_priv, val);
7060 else if (IS_VALLEYVIEW(dev_priv->dev))
7061 ret = byt_gpu_freq(dev_priv, val);
7066 int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7070 if (IS_CHERRYVIEW(dev_priv->dev))
7071 ret = chv_freq_opcode(dev_priv, val);
7072 else if (IS_VALLEYVIEW(dev_priv->dev))
7073 ret = byt_freq_opcode(dev_priv, val);
7078 void intel_pm_setup(struct drm_device *dev)
7080 struct drm_i915_private *dev_priv = dev->dev_private;
7082 lockinit(&dev_priv->rps.hw_lock, "i915 rps.hw_lock", 0, LK_CANRECURSE);
7084 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
7085 intel_gen6_powersave_work);
7087 dev_priv->pm.suspended = false;
7088 dev_priv->pm._irqs_disabled = false;