2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
29 #include "intel_drv.h"
32 void i8xx_disable_fbc(struct drm_device *dev)
34 struct drm_i915_private *dev_priv = dev->dev_private;
37 /* Disable compression */
38 fbc_ctl = I915_READ(FBC_CONTROL);
39 if ((fbc_ctl & FBC_CTL_EN) == 0)
42 fbc_ctl &= ~FBC_CTL_EN;
43 I915_WRITE(FBC_CONTROL, fbc_ctl);
45 /* Wait for compressing bit to clear */
46 if (_intel_wait_for(dev,
47 (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10,
49 DRM_DEBUG_KMS("FBC idle timed out\n");
53 DRM_DEBUG_KMS("disabled FBC\n");
56 void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
58 struct drm_device *dev = crtc->dev;
59 struct drm_i915_private *dev_priv = dev->dev_private;
60 struct drm_framebuffer *fb = crtc->fb;
61 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
62 struct drm_i915_gem_object *obj = intel_fb->obj;
63 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
66 u32 fbc_ctl, fbc_ctl2;
68 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
69 if (fb->pitches[0] < cfb_pitch)
70 cfb_pitch = fb->pitches[0];
72 /* FBC_CTL wants 64B units */
73 cfb_pitch = (cfb_pitch / 64) - 1;
74 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
77 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
78 I915_WRITE(FBC_TAG + (i * 4), 0);
81 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
83 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
84 I915_WRITE(FBC_FENCE_OFF, crtc->y);
87 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
89 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
90 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
91 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
92 fbc_ctl |= obj->fence_reg;
93 I915_WRITE(FBC_CONTROL, fbc_ctl);
95 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
96 cfb_pitch, crtc->y, intel_crtc->plane);
99 bool i8xx_fbc_enabled(struct drm_device *dev)
101 struct drm_i915_private *dev_priv = dev->dev_private;
103 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
106 void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
108 struct drm_device *dev = crtc->dev;
109 struct drm_i915_private *dev_priv = dev->dev_private;
110 struct drm_framebuffer *fb = crtc->fb;
111 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
112 struct drm_i915_gem_object *obj = intel_fb->obj;
113 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
114 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
115 unsigned long stall_watermark = 200;
118 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
119 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
120 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
122 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
123 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
124 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
125 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
128 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
130 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
133 void g4x_disable_fbc(struct drm_device *dev)
135 struct drm_i915_private *dev_priv = dev->dev_private;
138 /* Disable compression */
139 dpfc_ctl = I915_READ(DPFC_CONTROL);
140 if (dpfc_ctl & DPFC_CTL_EN) {
141 dpfc_ctl &= ~DPFC_CTL_EN;
142 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
144 DRM_DEBUG_KMS("disabled FBC\n");
148 bool g4x_fbc_enabled(struct drm_device *dev)
150 struct drm_i915_private *dev_priv = dev->dev_private;
152 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
155 static void sandybridge_blit_fbc_update(struct drm_device *dev)
157 struct drm_i915_private *dev_priv = dev->dev_private;
160 /* Make sure blitter notifies FBC of writes */
161 gen6_gt_force_wake_get(dev_priv);
162 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
163 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
164 GEN6_BLITTER_LOCK_SHIFT;
165 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
166 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
167 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
168 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
169 GEN6_BLITTER_LOCK_SHIFT);
170 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
171 POSTING_READ(GEN6_BLITTER_ECOSKPD);
172 gen6_gt_force_wake_put(dev_priv);
175 void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
177 struct drm_device *dev = crtc->dev;
178 struct drm_i915_private *dev_priv = dev->dev_private;
179 struct drm_framebuffer *fb = crtc->fb;
180 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
181 struct drm_i915_gem_object *obj = intel_fb->obj;
182 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
183 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
184 unsigned long stall_watermark = 200;
187 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
188 dpfc_ctl &= DPFC_RESERVED;
189 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
190 /* Set persistent mode for front-buffer rendering, ala X. */
191 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
192 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
193 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
195 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
196 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
197 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
198 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
199 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
201 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
204 I915_WRITE(SNB_DPFC_CTL_SA,
205 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
206 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
207 sandybridge_blit_fbc_update(dev);
210 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
213 void ironlake_disable_fbc(struct drm_device *dev)
215 struct drm_i915_private *dev_priv = dev->dev_private;
218 /* Disable compression */
219 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
220 if (dpfc_ctl & DPFC_CTL_EN) {
221 dpfc_ctl &= ~DPFC_CTL_EN;
222 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
224 DRM_DEBUG_KMS("disabled FBC\n");
228 bool ironlake_fbc_enabled(struct drm_device *dev)
230 struct drm_i915_private *dev_priv = dev->dev_private;
232 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
235 bool intel_fbc_enabled(struct drm_device *dev)
237 struct drm_i915_private *dev_priv = dev->dev_private;
239 if (!dev_priv->display.fbc_enabled)
242 return dev_priv->display.fbc_enabled(dev);
245 static void intel_fbc_work_fn(struct work_struct *__work)
247 struct intel_fbc_work *work =
248 container_of(to_delayed_work(__work),
249 struct intel_fbc_work, work);
250 struct drm_device *dev = work->crtc->dev;
251 struct drm_i915_private *dev_priv = dev->dev_private;
254 if (work == dev_priv->fbc_work) {
255 /* Double check that we haven't switched fb without cancelling
258 if (work->crtc->fb == work->fb) {
259 dev_priv->display.enable_fbc(work->crtc,
262 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
263 dev_priv->cfb_fb = work->crtc->fb->base.id;
264 dev_priv->cfb_y = work->crtc->y;
267 dev_priv->fbc_work = NULL;
271 drm_free(work, DRM_MEM_KMS);
274 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
276 if (dev_priv->fbc_work == NULL)
279 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
281 /* Synchronisation is provided by struct_mutex and checking of
282 * dev_priv->fbc_work, so we can perform the cancellation
283 * entirely asynchronously.
285 if (cancel_delayed_work(&dev_priv->fbc_work->work))
286 /* tasklet was killed before being run, clean up */
287 kfree(dev_priv->fbc_work, DRM_MEM_KMS);
289 /* Mark the work as no longer wanted so that if it does
290 * wake-up (because the work was already running and waiting
291 * for our mutex), it will discover that is no longer
294 dev_priv->fbc_work = NULL;
297 void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
299 struct intel_fbc_work *work;
300 struct drm_device *dev = crtc->dev;
301 struct drm_i915_private *dev_priv = dev->dev_private;
303 if (!dev_priv->display.enable_fbc)
306 intel_cancel_fbc_work(dev_priv);
308 work = kmalloc(sizeof(*work), DRM_MEM_KMS, M_WAITOK | M_ZERO);
311 work->interval = interval;
312 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
314 dev_priv->fbc_work = work;
316 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
318 /* Delay the actual enabling to let pageflipping cease and the
319 * display to settle before starting the compression. Note that
320 * this delay also serves a second purpose: it allows for a
321 * vblank to pass after disabling the FBC before we attempt
322 * to modify the control registers.
324 * A more complicated solution would involve tracking vblanks
325 * following the termination of the page-flipping sequence
326 * and indeed performing the enable as a co-routine and not
327 * waiting synchronously upon the vblank.
329 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
332 void intel_disable_fbc(struct drm_device *dev)
334 struct drm_i915_private *dev_priv = dev->dev_private;
336 intel_cancel_fbc_work(dev_priv);
338 if (!dev_priv->display.disable_fbc)
341 dev_priv->display.disable_fbc(dev);
342 dev_priv->cfb_plane = -1;
346 * intel_update_fbc - enable/disable FBC as needed
347 * @dev: the drm_device
349 * Set up the framebuffer compression hardware at mode set time. We
350 * enable it if possible:
351 * - plane A only (on pre-965)
352 * - no pixel mulitply/line duplication
353 * - no alpha buffer discard
355 * - framebuffer <= 2048 in width, 1536 in height
357 * We can't assume that any compression will take place (worst case),
358 * so the compressed buffer has to be the same size as the uncompressed
359 * one. It also must reside (along with the line length buffer) in
362 * We need to enable/disable FBC on a global basis.
364 void intel_update_fbc(struct drm_device *dev)
366 struct drm_i915_private *dev_priv = dev->dev_private;
367 struct drm_crtc *crtc = NULL, *tmp_crtc;
368 struct intel_crtc *intel_crtc;
369 struct drm_framebuffer *fb;
370 struct intel_framebuffer *intel_fb;
371 struct drm_i915_gem_object *obj;
379 if (!I915_HAS_FBC(dev))
383 * If FBC is already on, we just have to verify that we can
384 * keep it that way...
385 * Need to disable if:
386 * - more than one pipe is active
387 * - changing FBC params (stride, fence, mode)
388 * - new fb is too large to fit in compressed buffer
389 * - going to an unsupported config (interlace, pixel multiply, etc.)
391 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
392 if (tmp_crtc->enabled && tmp_crtc->fb) {
394 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
395 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
402 if (!crtc || crtc->fb == NULL) {
403 DRM_DEBUG_KMS("no output, disabling\n");
404 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
408 intel_crtc = to_intel_crtc(crtc);
410 intel_fb = to_intel_framebuffer(fb);
413 enable_fbc = i915_enable_fbc;
414 if (enable_fbc < 0) {
415 DRM_DEBUG_KMS("fbc set to per-chip default\n");
417 if (INTEL_INFO(dev)->gen <= 6)
421 DRM_DEBUG_KMS("fbc disabled per module param\n");
422 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
425 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
426 DRM_DEBUG_KMS("framebuffer too large, disabling "
428 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
431 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
432 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
433 DRM_DEBUG_KMS("mode incompatible with compression, "
435 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
438 if ((crtc->mode.hdisplay > 2048) ||
439 (crtc->mode.vdisplay > 1536)) {
440 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
441 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
444 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
445 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
446 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
449 if (obj->tiling_mode != I915_TILING_X ||
450 obj->fence_reg == I915_FENCE_REG_NONE) {
451 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
452 dev_priv->no_fbc_reason = FBC_NOT_TILED;
457 /* If the kernel debugger is active, always disable compression */
462 /* If the scanout has not changed, don't modify the FBC settings.
463 * Note that we make the fundamental assumption that the fb->obj
464 * cannot be unpinned (and have its GTT offset and fence revoked)
465 * without first being decoupled from the scanout and FBC disabled.
467 if (dev_priv->cfb_plane == intel_crtc->plane &&
468 dev_priv->cfb_fb == fb->base.id &&
469 dev_priv->cfb_y == crtc->y)
472 if (intel_fbc_enabled(dev)) {
473 /* We update FBC along two paths, after changing fb/crtc
474 * configuration (modeswitching) and after page-flipping
475 * finishes. For the latter, we know that not only did
476 * we disable the FBC at the start of the page-flip
477 * sequence, but also more than one vblank has passed.
479 * For the former case of modeswitching, it is possible
480 * to switch between two FBC valid configurations
481 * instantaneously so we do need to disable the FBC
482 * before we can modify its control registers. We also
483 * have to wait for the next vblank for that to take
484 * effect. However, since we delay enabling FBC we can
485 * assume that a vblank has passed since disabling and
486 * that we can safely alter the registers in the deferred
489 * In the scenario that we go from a valid to invalid
490 * and then back to valid FBC configuration we have
491 * no strict enforcement that a vblank occurred since
492 * disabling the FBC. However, along all current pipe
493 * disabling paths we do need to wait for a vblank at
494 * some point. And we wait before enabling FBC anyway.
496 DRM_DEBUG_KMS("disabling active FBC for update\n");
497 intel_disable_fbc(dev);
500 intel_enable_fbc(crtc, 500);
504 /* Multiple disables should be harmless */
505 if (intel_fbc_enabled(dev)) {
506 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
507 intel_disable_fbc(dev);
511 void i915_ironlake_get_mem_freq(struct drm_device *dev);
512 void i915_pineview_get_mem_freq(struct drm_device *dev);
514 void i915_pineview_get_mem_freq(struct drm_device *dev)
516 drm_i915_private_t *dev_priv = dev->dev_private;
519 tmp = I915_READ(CLKCFG);
521 switch (tmp & CLKCFG_FSB_MASK) {
523 dev_priv->fsb_freq = 533; /* 133*4 */
526 dev_priv->fsb_freq = 800; /* 200*4 */
529 dev_priv->fsb_freq = 667; /* 167*4 */
532 dev_priv->fsb_freq = 400; /* 100*4 */
536 switch (tmp & CLKCFG_MEM_MASK) {
538 dev_priv->mem_freq = 533;
541 dev_priv->mem_freq = 667;
544 dev_priv->mem_freq = 800;
548 /* detect pineview DDR3 setting */
549 tmp = I915_READ(CSHRDDR3CTL);
550 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
553 void i915_ironlake_get_mem_freq(struct drm_device *dev)
555 drm_i915_private_t *dev_priv = dev->dev_private;
558 ddrpll = I915_READ16(DDRMPLL1);
559 csipll = I915_READ16(CSIPLL0);
561 switch (ddrpll & 0xff) {
563 dev_priv->mem_freq = 800;
566 dev_priv->mem_freq = 1066;
569 dev_priv->mem_freq = 1333;
572 dev_priv->mem_freq = 1600;
575 DRM_DEBUG("unknown memory frequency 0x%02x\n",
577 dev_priv->mem_freq = 0;
581 dev_priv->r_t = dev_priv->mem_freq;
583 switch (csipll & 0x3ff) {
585 dev_priv->fsb_freq = 3200;
588 dev_priv->fsb_freq = 3733;
591 dev_priv->fsb_freq = 4266;
594 dev_priv->fsb_freq = 4800;
597 dev_priv->fsb_freq = 5333;
600 dev_priv->fsb_freq = 5866;
603 dev_priv->fsb_freq = 6400;
606 DRM_DEBUG("unknown fsb frequency 0x%04x\n",
608 dev_priv->fsb_freq = 0;
612 if (dev_priv->fsb_freq == 3200) {
614 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
621 /* Pineview has different values for various configs */
622 static const struct intel_watermark_params pineview_display_wm = {
623 PINEVIEW_DISPLAY_FIFO,
627 PINEVIEW_FIFO_LINE_SIZE
629 static const struct intel_watermark_params pineview_display_hplloff_wm = {
630 PINEVIEW_DISPLAY_FIFO,
632 PINEVIEW_DFT_HPLLOFF_WM,
634 PINEVIEW_FIFO_LINE_SIZE
636 static const struct intel_watermark_params pineview_cursor_wm = {
637 PINEVIEW_CURSOR_FIFO,
638 PINEVIEW_CURSOR_MAX_WM,
639 PINEVIEW_CURSOR_DFT_WM,
640 PINEVIEW_CURSOR_GUARD_WM,
641 PINEVIEW_FIFO_LINE_SIZE,
643 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
644 PINEVIEW_CURSOR_FIFO,
645 PINEVIEW_CURSOR_MAX_WM,
646 PINEVIEW_CURSOR_DFT_WM,
647 PINEVIEW_CURSOR_GUARD_WM,
648 PINEVIEW_FIFO_LINE_SIZE
650 static const struct intel_watermark_params g4x_wm_info = {
657 static const struct intel_watermark_params g4x_cursor_wm_info = {
664 static const struct intel_watermark_params i965_cursor_wm_info = {
671 static const struct intel_watermark_params i945_wm_info = {
678 static const struct intel_watermark_params i915_wm_info = {
685 static const struct intel_watermark_params i855_wm_info = {
692 static const struct intel_watermark_params i830_wm_info = {
700 static const struct intel_watermark_params ironlake_display_wm_info = {
707 static const struct intel_watermark_params ironlake_cursor_wm_info = {
714 static const struct intel_watermark_params ironlake_display_srwm_info = {
716 ILK_DISPLAY_MAX_SRWM,
717 ILK_DISPLAY_DFT_SRWM,
721 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
729 static const struct intel_watermark_params sandybridge_display_wm_info = {
736 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
743 static const struct intel_watermark_params sandybridge_display_srwm_info = {
745 SNB_DISPLAY_MAX_SRWM,
746 SNB_DISPLAY_DFT_SRWM,
750 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
760 * intel_calculate_wm - calculate watermark level
761 * @clock_in_khz: pixel clock
762 * @wm: chip FIFO params
763 * @pixel_size: display pixel size
764 * @latency_ns: memory latency for the platform
766 * Calculate the watermark level (the level at which the display plane will
767 * start fetching from memory again). Each chip has a different display
768 * FIFO size and allocation, so the caller needs to figure that out and pass
769 * in the correct intel_watermark_params structure.
771 * As the pixel clock runs, the FIFO will be drained at a rate that depends
772 * on the pixel size. When it reaches the watermark level, it'll start
773 * fetching FIFO line sized based chunks from memory until the FIFO fills
774 * past the watermark point. If the FIFO drains completely, a FIFO underrun
775 * will occur, and a display engine hang could result.
777 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
778 const struct intel_watermark_params *wm,
781 unsigned long latency_ns)
783 long entries_required, wm_size;
786 * Note: we need to make sure we don't overflow for various clock &
788 * clocks go from a few thousand to several hundred thousand.
789 * latency is usually a few thousand
791 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
793 entries_required = howmany(entries_required, wm->cacheline_size);
795 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
797 wm_size = fifo_size - (entries_required + wm->guard_size);
799 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
801 /* Don't promote wm_size to unsigned... */
802 if (wm_size > (long)wm->max_wm)
803 wm_size = wm->max_wm;
805 wm_size = wm->default_wm;
809 struct cxsr_latency {
812 unsigned long fsb_freq;
813 unsigned long mem_freq;
814 unsigned long display_sr;
815 unsigned long display_hpll_disable;
816 unsigned long cursor_sr;
817 unsigned long cursor_hpll_disable;
820 static const struct cxsr_latency cxsr_latency_table[] = {
821 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
822 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
823 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
824 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
825 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
827 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
828 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
829 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
830 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
831 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
833 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
834 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
835 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
836 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
837 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
839 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
840 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
841 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
842 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
843 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
845 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
846 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
847 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
848 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
849 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
851 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
852 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
853 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
854 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
855 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
858 const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
863 const struct cxsr_latency *latency;
866 if (fsb == 0 || mem == 0)
869 for (i = 0; i < DRM_ARRAY_SIZE(cxsr_latency_table); i++) {
870 latency = &cxsr_latency_table[i];
871 if (is_desktop == latency->is_desktop &&
872 is_ddr3 == latency->is_ddr3 &&
873 fsb == latency->fsb_freq && mem == latency->mem_freq)
877 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
882 void pineview_disable_cxsr(struct drm_device *dev)
884 struct drm_i915_private *dev_priv = dev->dev_private;
886 /* deactivate cxsr */
887 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
891 * Latency for FIFO fetches is dependent on several factors:
892 * - memory configuration (speed, channels)
894 * - current MCH state
895 * It can be fairly high in some situations, so here we assume a fairly
896 * pessimal value. It's a tradeoff between extra memory fetches (if we
897 * set this value too high, the FIFO will fetch frequently to stay full)
898 * and power consumption (set it too low to save power and we might see
899 * FIFO underruns and display "flicker").
901 * A value of 5us seems to be a good balance; safe for very low end
902 * platforms but not overly aggressive on lower latency configs.
904 static const int latency_ns = 5000;
906 int i9xx_get_fifo_size(struct drm_device *dev, int plane)
908 struct drm_i915_private *dev_priv = dev->dev_private;
909 uint32_t dsparb = I915_READ(DSPARB);
912 size = dsparb & 0x7f;
914 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
916 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
917 plane ? "B" : "A", size);
922 int i85x_get_fifo_size(struct drm_device *dev, int plane)
924 struct drm_i915_private *dev_priv = dev->dev_private;
925 uint32_t dsparb = I915_READ(DSPARB);
928 size = dsparb & 0x1ff;
930 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
931 size >>= 1; /* Convert to cachelines */
933 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
934 plane ? "B" : "A", size);
939 int i845_get_fifo_size(struct drm_device *dev, int plane)
941 struct drm_i915_private *dev_priv = dev->dev_private;
942 uint32_t dsparb = I915_READ(DSPARB);
945 size = dsparb & 0x7f;
946 size >>= 2; /* Convert to cachelines */
948 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
955 int i830_get_fifo_size(struct drm_device *dev, int plane)
957 struct drm_i915_private *dev_priv = dev->dev_private;
958 uint32_t dsparb = I915_READ(DSPARB);
961 size = dsparb & 0x7f;
962 size >>= 1; /* Convert to cachelines */
964 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
965 plane ? "B" : "A", size);
970 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
972 struct drm_crtc *crtc, *enabled = NULL;
974 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
975 if (crtc->enabled && crtc->fb) {
985 void pineview_update_wm(struct drm_device *dev)
987 struct drm_i915_private *dev_priv = dev->dev_private;
988 struct drm_crtc *crtc;
989 const struct cxsr_latency *latency;
993 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
994 dev_priv->fsb_freq, dev_priv->mem_freq);
996 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
997 pineview_disable_cxsr(dev);
1001 crtc = single_enabled_crtc(dev);
1003 int clock = crtc->mode.clock;
1004 int pixel_size = crtc->fb->bits_per_pixel / 8;
1007 wm = intel_calculate_wm(clock, &pineview_display_wm,
1008 pineview_display_wm.fifo_size,
1009 pixel_size, latency->display_sr);
1010 reg = I915_READ(DSPFW1);
1011 reg &= ~DSPFW_SR_MASK;
1012 reg |= wm << DSPFW_SR_SHIFT;
1013 I915_WRITE(DSPFW1, reg);
1014 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1017 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1018 pineview_display_wm.fifo_size,
1019 pixel_size, latency->cursor_sr);
1020 reg = I915_READ(DSPFW3);
1021 reg &= ~DSPFW_CURSOR_SR_MASK;
1022 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1023 I915_WRITE(DSPFW3, reg);
1025 /* Display HPLL off SR */
1026 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1027 pineview_display_hplloff_wm.fifo_size,
1028 pixel_size, latency->display_hpll_disable);
1029 reg = I915_READ(DSPFW3);
1030 reg &= ~DSPFW_HPLL_SR_MASK;
1031 reg |= wm & DSPFW_HPLL_SR_MASK;
1032 I915_WRITE(DSPFW3, reg);
1034 /* cursor HPLL off SR */
1035 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1036 pineview_display_hplloff_wm.fifo_size,
1037 pixel_size, latency->cursor_hpll_disable);
1038 reg = I915_READ(DSPFW3);
1039 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1040 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1041 I915_WRITE(DSPFW3, reg);
1042 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1046 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1047 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1049 pineview_disable_cxsr(dev);
1050 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1054 static bool g4x_compute_wm0(struct drm_device *dev,
1056 const struct intel_watermark_params *display,
1057 int display_latency_ns,
1058 const struct intel_watermark_params *cursor,
1059 int cursor_latency_ns,
1063 struct drm_crtc *crtc;
1064 int htotal, hdisplay, clock, pixel_size;
1065 int line_time_us, line_count;
1066 int entries, tlb_miss;
1068 crtc = intel_get_crtc_for_plane(dev, plane);
1069 if (crtc->fb == NULL || !crtc->enabled) {
1070 *cursor_wm = cursor->guard_size;
1071 *plane_wm = display->guard_size;
1075 htotal = crtc->mode.htotal;
1076 hdisplay = crtc->mode.hdisplay;
1077 clock = crtc->mode.clock;
1078 pixel_size = crtc->fb->bits_per_pixel / 8;
1080 /* Use the small buffer method to calculate plane watermark */
1081 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1082 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1084 entries += tlb_miss;
1085 entries = howmany(entries, display->cacheline_size);
1086 *plane_wm = entries + display->guard_size;
1087 if (*plane_wm > (int)display->max_wm)
1088 *plane_wm = display->max_wm;
1090 /* Use the large buffer method to calculate cursor watermark */
1091 line_time_us = ((htotal * 1000) / clock);
1092 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1093 entries = line_count * 64 * pixel_size;
1094 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1096 entries += tlb_miss;
1097 entries = howmany(entries, cursor->cacheline_size);
1098 *cursor_wm = entries + cursor->guard_size;
1099 if (*cursor_wm > (int)cursor->max_wm)
1100 *cursor_wm = (int)cursor->max_wm;
1106 * Check the wm result.
1108 * If any calculated watermark values is larger than the maximum value that
1109 * can be programmed into the associated watermark register, that watermark
1112 static bool g4x_check_srwm(struct drm_device *dev,
1113 int display_wm, int cursor_wm,
1114 const struct intel_watermark_params *display,
1115 const struct intel_watermark_params *cursor)
1117 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1118 display_wm, cursor_wm);
1120 if (display_wm > display->max_wm) {
1121 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1122 display_wm, display->max_wm);
1126 if (cursor_wm > cursor->max_wm) {
1127 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1128 cursor_wm, cursor->max_wm);
1132 if (!(display_wm || cursor_wm)) {
1133 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1140 static bool g4x_compute_srwm(struct drm_device *dev,
1143 const struct intel_watermark_params *display,
1144 const struct intel_watermark_params *cursor,
1145 int *display_wm, int *cursor_wm)
1147 struct drm_crtc *crtc;
1148 int hdisplay, htotal, pixel_size, clock;
1149 unsigned long line_time_us;
1150 int line_count, line_size;
1155 *display_wm = *cursor_wm = 0;
1159 crtc = intel_get_crtc_for_plane(dev, plane);
1160 hdisplay = crtc->mode.hdisplay;
1161 htotal = crtc->mode.htotal;
1162 clock = crtc->mode.clock;
1163 pixel_size = crtc->fb->bits_per_pixel / 8;
1165 line_time_us = (htotal * 1000) / clock;
1166 line_count = (latency_ns / line_time_us + 1000) / 1000;
1167 line_size = hdisplay * pixel_size;
1169 /* Use the minimum of the small and large buffer method for primary */
1170 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1171 large = line_count * line_size;
1173 entries = howmany(min(small, large), display->cacheline_size);
1174 *display_wm = entries + display->guard_size;
1176 /* calculate the self-refresh watermark for display cursor */
1177 entries = line_count * pixel_size * 64;
1178 entries = howmany(entries, cursor->cacheline_size);
1179 *cursor_wm = entries + cursor->guard_size;
1181 return g4x_check_srwm(dev,
1182 *display_wm, *cursor_wm,
1186 #define single_plane_enabled(mask) ((mask) != 0 && powerof2(mask))
1188 void g4x_update_wm(struct drm_device *dev)
1190 static const int sr_latency_ns = 12000;
1191 struct drm_i915_private *dev_priv = dev->dev_private;
1192 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1193 int plane_sr, cursor_sr;
1194 unsigned int enabled = 0;
1196 if (g4x_compute_wm0(dev, 0,
1197 &g4x_wm_info, latency_ns,
1198 &g4x_cursor_wm_info, latency_ns,
1199 &planea_wm, &cursora_wm))
1202 if (g4x_compute_wm0(dev, 1,
1203 &g4x_wm_info, latency_ns,
1204 &g4x_cursor_wm_info, latency_ns,
1205 &planeb_wm, &cursorb_wm))
1208 plane_sr = cursor_sr = 0;
1209 if (single_plane_enabled(enabled) &&
1210 g4x_compute_srwm(dev, ffs(enabled) - 1,
1213 &g4x_cursor_wm_info,
1214 &plane_sr, &cursor_sr))
1215 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1217 I915_WRITE(FW_BLC_SELF,
1218 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1220 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1221 planea_wm, cursora_wm,
1222 planeb_wm, cursorb_wm,
1223 plane_sr, cursor_sr);
1226 (plane_sr << DSPFW_SR_SHIFT) |
1227 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1228 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1231 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
1232 (cursora_wm << DSPFW_CURSORA_SHIFT));
1233 /* HPLL off in SR has some issues on G4x... disable it */
1235 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
1236 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1239 void i965_update_wm(struct drm_device *dev)
1241 struct drm_i915_private *dev_priv = dev->dev_private;
1242 struct drm_crtc *crtc;
1246 /* Calc sr entries for one plane configs */
1247 crtc = single_enabled_crtc(dev);
1249 /* self-refresh has much higher latency */
1250 static const int sr_latency_ns = 12000;
1251 int clock = crtc->mode.clock;
1252 int htotal = crtc->mode.htotal;
1253 int hdisplay = crtc->mode.hdisplay;
1254 int pixel_size = crtc->fb->bits_per_pixel / 8;
1255 unsigned long line_time_us;
1258 line_time_us = ((htotal * 1000) / clock);
1260 /* Use ns/us then divide to preserve precision */
1261 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1262 pixel_size * hdisplay;
1263 entries = howmany(entries, I915_FIFO_LINE_SIZE);
1264 srwm = I965_FIFO_SIZE - entries;
1268 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1271 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1273 entries = howmany(entries, i965_cursor_wm_info.cacheline_size);
1274 cursor_sr = i965_cursor_wm_info.fifo_size -
1275 (entries + i965_cursor_wm_info.guard_size);
1277 if (cursor_sr > i965_cursor_wm_info.max_wm)
1278 cursor_sr = i965_cursor_wm_info.max_wm;
1280 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1281 "cursor %d\n", srwm, cursor_sr);
1283 if (IS_CRESTLINE(dev))
1284 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1286 /* Turn off self refresh if both pipes are enabled */
1287 if (IS_CRESTLINE(dev))
1288 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1292 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1295 /* 965 has limitations... */
1296 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1297 (8 << 16) | (8 << 8) | (8 << 0));
1298 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1299 /* update cursor SR watermark */
1300 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1303 void i9xx_update_wm(struct drm_device *dev)
1305 struct drm_i915_private *dev_priv = dev->dev_private;
1306 const struct intel_watermark_params *wm_info;
1311 int planea_wm, planeb_wm;
1312 struct drm_crtc *crtc, *enabled = NULL;
1315 wm_info = &i945_wm_info;
1316 else if (!IS_GEN2(dev))
1317 wm_info = &i915_wm_info;
1319 wm_info = &i855_wm_info;
1321 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1322 crtc = intel_get_crtc_for_plane(dev, 0);
1323 if (crtc->enabled && crtc->fb) {
1324 planea_wm = intel_calculate_wm(crtc->mode.clock,
1326 crtc->fb->bits_per_pixel / 8,
1330 planea_wm = fifo_size - wm_info->guard_size;
1332 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1333 crtc = intel_get_crtc_for_plane(dev, 1);
1334 if (crtc->enabled && crtc->fb) {
1335 planeb_wm = intel_calculate_wm(crtc->mode.clock,
1337 crtc->fb->bits_per_pixel / 8,
1339 if (enabled == NULL)
1344 planeb_wm = fifo_size - wm_info->guard_size;
1346 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1349 * Overlay gets an aggressive default since video jitter is bad.
1353 /* Play safe and disable self-refresh before adjusting watermarks. */
1354 if (IS_I945G(dev) || IS_I945GM(dev))
1355 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1356 else if (IS_I915GM(dev))
1357 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
1359 /* Calc sr entries for one plane configs */
1360 if (HAS_FW_BLC(dev) && enabled) {
1361 /* self-refresh has much higher latency */
1362 static const int sr_latency_ns = 6000;
1363 int clock = enabled->mode.clock;
1364 int htotal = enabled->mode.htotal;
1365 int hdisplay = enabled->mode.hdisplay;
1366 int pixel_size = enabled->fb->bits_per_pixel / 8;
1367 unsigned long line_time_us;
1370 line_time_us = (htotal * 1000) / clock;
1372 /* Use ns/us then divide to preserve precision */
1373 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1374 pixel_size * hdisplay;
1375 entries = howmany(entries, wm_info->cacheline_size);
1376 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1377 srwm = wm_info->fifo_size - entries;
1381 if (IS_I945G(dev) || IS_I945GM(dev))
1382 I915_WRITE(FW_BLC_SELF,
1383 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1384 else if (IS_I915GM(dev))
1385 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1388 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1389 planea_wm, planeb_wm, cwm, srwm);
1391 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1392 fwater_hi = (cwm & 0x1f);
1394 /* Set request length to 8 cachelines per fetch */
1395 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1396 fwater_hi = fwater_hi | (1 << 8);
1398 I915_WRITE(FW_BLC, fwater_lo);
1399 I915_WRITE(FW_BLC2, fwater_hi);
1401 if (HAS_FW_BLC(dev)) {
1403 if (IS_I945G(dev) || IS_I945GM(dev))
1404 I915_WRITE(FW_BLC_SELF,
1405 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1406 else if (IS_I915GM(dev))
1407 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
1408 DRM_DEBUG_KMS("memory self refresh enabled\n");
1410 DRM_DEBUG_KMS("memory self refresh disabled\n");
1414 void i830_update_wm(struct drm_device *dev)
1416 struct drm_i915_private *dev_priv = dev->dev_private;
1417 struct drm_crtc *crtc;
1421 crtc = single_enabled_crtc(dev);
1425 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
1426 dev_priv->display.get_fifo_size(dev, 0),
1427 crtc->fb->bits_per_pixel / 8,
1429 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1430 fwater_lo |= (3<<8) | planea_wm;
1432 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1434 I915_WRITE(FW_BLC, fwater_lo);
1437 #define ILK_LP0_PLANE_LATENCY 700
1438 #define ILK_LP0_CURSOR_LATENCY 1300
1441 * Check the wm result.
1443 * If any calculated watermark values is larger than the maximum value that
1444 * can be programmed into the associated watermark register, that watermark
1447 static bool ironlake_check_srwm(struct drm_device *dev, int level,
1448 int fbc_wm, int display_wm, int cursor_wm,
1449 const struct intel_watermark_params *display,
1450 const struct intel_watermark_params *cursor)
1452 struct drm_i915_private *dev_priv = dev->dev_private;
1454 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1455 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
1457 if (fbc_wm > SNB_FBC_MAX_SRWM) {
1458 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1459 fbc_wm, SNB_FBC_MAX_SRWM, level);
1461 /* fbc has it's own way to disable FBC WM */
1462 I915_WRITE(DISP_ARB_CTL,
1463 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1467 if (display_wm > display->max_wm) {
1468 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1469 display_wm, SNB_DISPLAY_MAX_SRWM, level);
1473 if (cursor_wm > cursor->max_wm) {
1474 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1475 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
1479 if (!(fbc_wm || display_wm || cursor_wm)) {
1480 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
1488 * Compute watermark values of WM[1-3],
1490 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1492 const struct intel_watermark_params *display,
1493 const struct intel_watermark_params *cursor,
1494 int *fbc_wm, int *display_wm, int *cursor_wm)
1496 struct drm_crtc *crtc;
1497 unsigned long line_time_us;
1498 int hdisplay, htotal, pixel_size, clock;
1499 int line_count, line_size;
1504 *fbc_wm = *display_wm = *cursor_wm = 0;
1508 crtc = intel_get_crtc_for_plane(dev, plane);
1509 hdisplay = crtc->mode.hdisplay;
1510 htotal = crtc->mode.htotal;
1511 clock = crtc->mode.clock;
1512 pixel_size = crtc->fb->bits_per_pixel / 8;
1514 line_time_us = (htotal * 1000) / clock;
1515 line_count = (latency_ns / line_time_us + 1000) / 1000;
1516 line_size = hdisplay * pixel_size;
1518 /* Use the minimum of the small and large buffer method for primary */
1519 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1520 large = line_count * line_size;
1522 entries = howmany(min(small, large), display->cacheline_size);
1523 *display_wm = entries + display->guard_size;
1527 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1529 *fbc_wm = howmany(*display_wm * 64, line_size) + 2;
1531 /* calculate the self-refresh watermark for display cursor */
1532 entries = line_count * pixel_size * 64;
1533 entries = howmany(entries, cursor->cacheline_size);
1534 *cursor_wm = entries + cursor->guard_size;
1536 return ironlake_check_srwm(dev, level,
1537 *fbc_wm, *display_wm, *cursor_wm,
1541 void ironlake_update_wm(struct drm_device *dev)
1543 struct drm_i915_private *dev_priv = dev->dev_private;
1544 int fbc_wm, plane_wm, cursor_wm;
1545 unsigned int enabled;
1548 if (g4x_compute_wm0(dev, 0,
1549 &ironlake_display_wm_info,
1550 ILK_LP0_PLANE_LATENCY,
1551 &ironlake_cursor_wm_info,
1552 ILK_LP0_CURSOR_LATENCY,
1553 &plane_wm, &cursor_wm)) {
1554 I915_WRITE(WM0_PIPEA_ILK,
1555 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1556 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1557 " plane %d, " "cursor: %d\n",
1558 plane_wm, cursor_wm);
1562 if (g4x_compute_wm0(dev, 1,
1563 &ironlake_display_wm_info,
1564 ILK_LP0_PLANE_LATENCY,
1565 &ironlake_cursor_wm_info,
1566 ILK_LP0_CURSOR_LATENCY,
1567 &plane_wm, &cursor_wm)) {
1568 I915_WRITE(WM0_PIPEB_ILK,
1569 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1570 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1571 " plane %d, cursor: %d\n",
1572 plane_wm, cursor_wm);
1577 * Calculate and update the self-refresh watermark only when one
1578 * display plane is used.
1580 I915_WRITE(WM3_LP_ILK, 0);
1581 I915_WRITE(WM2_LP_ILK, 0);
1582 I915_WRITE(WM1_LP_ILK, 0);
1584 if (!single_plane_enabled(enabled))
1586 enabled = ffs(enabled) - 1;
1589 if (!ironlake_compute_srwm(dev, 1, enabled,
1590 ILK_READ_WM1_LATENCY() * 500,
1591 &ironlake_display_srwm_info,
1592 &ironlake_cursor_srwm_info,
1593 &fbc_wm, &plane_wm, &cursor_wm))
1596 I915_WRITE(WM1_LP_ILK,
1598 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1599 (fbc_wm << WM1_LP_FBC_SHIFT) |
1600 (plane_wm << WM1_LP_SR_SHIFT) |
1604 if (!ironlake_compute_srwm(dev, 2, enabled,
1605 ILK_READ_WM2_LATENCY() * 500,
1606 &ironlake_display_srwm_info,
1607 &ironlake_cursor_srwm_info,
1608 &fbc_wm, &plane_wm, &cursor_wm))
1611 I915_WRITE(WM2_LP_ILK,
1613 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1614 (fbc_wm << WM1_LP_FBC_SHIFT) |
1615 (plane_wm << WM1_LP_SR_SHIFT) |
1619 * WM3 is unsupported on ILK, probably because we don't have latency
1620 * data for that power state
1624 void sandybridge_update_wm(struct drm_device *dev)
1626 struct drm_i915_private *dev_priv = dev->dev_private;
1627 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1629 int fbc_wm, plane_wm, cursor_wm;
1630 unsigned int enabled;
1633 if (g4x_compute_wm0(dev, 0,
1634 &sandybridge_display_wm_info, latency,
1635 &sandybridge_cursor_wm_info, latency,
1636 &plane_wm, &cursor_wm)) {
1637 val = I915_READ(WM0_PIPEA_ILK);
1638 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1639 I915_WRITE(WM0_PIPEA_ILK, val |
1640 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1641 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1642 " plane %d, " "cursor: %d\n",
1643 plane_wm, cursor_wm);
1647 if (g4x_compute_wm0(dev, 1,
1648 &sandybridge_display_wm_info, latency,
1649 &sandybridge_cursor_wm_info, latency,
1650 &plane_wm, &cursor_wm)) {
1651 val = I915_READ(WM0_PIPEB_ILK);
1652 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1653 I915_WRITE(WM0_PIPEB_ILK, val |
1654 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1655 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1656 " plane %d, cursor: %d\n",
1657 plane_wm, cursor_wm);
1661 /* IVB has 3 pipes */
1662 if (IS_IVYBRIDGE(dev) &&
1663 g4x_compute_wm0(dev, 2,
1664 &sandybridge_display_wm_info, latency,
1665 &sandybridge_cursor_wm_info, latency,
1666 &plane_wm, &cursor_wm)) {
1667 val = I915_READ(WM0_PIPEC_IVB);
1668 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1669 I915_WRITE(WM0_PIPEC_IVB, val |
1670 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1671 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1672 " plane %d, cursor: %d\n",
1673 plane_wm, cursor_wm);
1678 * Calculate and update the self-refresh watermark only when one
1679 * display plane is used.
1681 * SNB support 3 levels of watermark.
1683 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1684 * and disabled in the descending order
1687 I915_WRITE(WM3_LP_ILK, 0);
1688 I915_WRITE(WM2_LP_ILK, 0);
1689 I915_WRITE(WM1_LP_ILK, 0);
1691 if (!single_plane_enabled(enabled) ||
1692 dev_priv->sprite_scaling_enabled)
1694 enabled = ffs(enabled) - 1;
1697 if (!ironlake_compute_srwm(dev, 1, enabled,
1698 SNB_READ_WM1_LATENCY() * 500,
1699 &sandybridge_display_srwm_info,
1700 &sandybridge_cursor_srwm_info,
1701 &fbc_wm, &plane_wm, &cursor_wm))
1704 I915_WRITE(WM1_LP_ILK,
1706 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1707 (fbc_wm << WM1_LP_FBC_SHIFT) |
1708 (plane_wm << WM1_LP_SR_SHIFT) |
1712 if (!ironlake_compute_srwm(dev, 2, enabled,
1713 SNB_READ_WM2_LATENCY() * 500,
1714 &sandybridge_display_srwm_info,
1715 &sandybridge_cursor_srwm_info,
1716 &fbc_wm, &plane_wm, &cursor_wm))
1719 I915_WRITE(WM2_LP_ILK,
1721 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1722 (fbc_wm << WM1_LP_FBC_SHIFT) |
1723 (plane_wm << WM1_LP_SR_SHIFT) |
1727 if (!ironlake_compute_srwm(dev, 3, enabled,
1728 SNB_READ_WM3_LATENCY() * 500,
1729 &sandybridge_display_srwm_info,
1730 &sandybridge_cursor_srwm_info,
1731 &fbc_wm, &plane_wm, &cursor_wm))
1734 I915_WRITE(WM3_LP_ILK,
1736 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1737 (fbc_wm << WM1_LP_FBC_SHIFT) |
1738 (plane_wm << WM1_LP_SR_SHIFT) |
1743 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
1744 uint32_t sprite_width, int pixel_size,
1745 const struct intel_watermark_params *display,
1746 int display_latency_ns, int *sprite_wm)
1748 struct drm_crtc *crtc;
1750 int entries, tlb_miss;
1752 crtc = intel_get_crtc_for_plane(dev, plane);
1753 if (crtc->fb == NULL || !crtc->enabled) {
1754 *sprite_wm = display->guard_size;
1758 clock = crtc->mode.clock;
1760 /* Use the small buffer method to calculate the sprite watermark */
1761 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1762 tlb_miss = display->fifo_size*display->cacheline_size -
1765 entries += tlb_miss;
1766 entries = howmany(entries, display->cacheline_size);
1767 *sprite_wm = entries + display->guard_size;
1768 if (*sprite_wm > (int)display->max_wm)
1769 *sprite_wm = display->max_wm;
1775 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
1776 uint32_t sprite_width, int pixel_size,
1777 const struct intel_watermark_params *display,
1778 int latency_ns, int *sprite_wm)
1780 struct drm_crtc *crtc;
1781 unsigned long line_time_us;
1783 int line_count, line_size;
1792 crtc = intel_get_crtc_for_plane(dev, plane);
1793 clock = crtc->mode.clock;
1799 line_time_us = (sprite_width * 1000) / clock;
1800 if (!line_time_us) {
1805 line_count = (latency_ns / line_time_us + 1000) / 1000;
1806 line_size = sprite_width * pixel_size;
1808 /* Use the minimum of the small and large buffer method for primary */
1809 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1810 large = line_count * line_size;
1812 entries = howmany(min(small, large), display->cacheline_size);
1813 *sprite_wm = entries + display->guard_size;
1815 return *sprite_wm > 0x3ff ? false : true;
1818 void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
1819 uint32_t sprite_width, int pixel_size)
1821 struct drm_i915_private *dev_priv = dev->dev_private;
1822 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1829 reg = WM0_PIPEA_ILK;
1832 reg = WM0_PIPEB_ILK;
1835 reg = WM0_PIPEC_IVB;
1838 return; /* bad pipe */
1841 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
1842 &sandybridge_display_wm_info,
1843 latency, &sprite_wm);
1845 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
1850 val = I915_READ(reg);
1851 val &= ~WM0_PIPE_SPRITE_MASK;
1852 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
1853 DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
1856 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
1858 &sandybridge_display_srwm_info,
1859 SNB_READ_WM1_LATENCY() * 500,
1862 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
1866 I915_WRITE(WM1S_LP_ILK, sprite_wm);
1868 /* Only IVB has two more LP watermarks for sprite */
1869 if (!IS_IVYBRIDGE(dev))
1872 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
1874 &sandybridge_display_srwm_info,
1875 SNB_READ_WM2_LATENCY() * 500,
1878 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
1882 I915_WRITE(WM2S_LP_IVB, sprite_wm);
1884 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
1886 &sandybridge_display_srwm_info,
1887 SNB_READ_WM3_LATENCY() * 500,
1890 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
1894 I915_WRITE(WM3S_LP_IVB, sprite_wm);
1898 * intel_update_watermarks - update FIFO watermark values based on current modes
1900 * Calculate watermark values for the various WM regs based on current mode
1901 * and plane configuration.
1903 * There are several cases to deal with here:
1904 * - normal (i.e. non-self-refresh)
1905 * - self-refresh (SR) mode
1906 * - lines are large relative to FIFO size (buffer can hold up to 2)
1907 * - lines are small relative to FIFO size (buffer can hold more than 2
1908 * lines), so need to account for TLB latency
1910 * The normal calculation is:
1911 * watermark = dotclock * bytes per pixel * latency
1912 * where latency is platform & configuration dependent (we assume pessimal
1915 * The SR calculation is:
1916 * watermark = (trunc(latency/line time)+1) * surface width *
1919 * line time = htotal / dotclock
1920 * surface width = hdisplay for normal plane and 64 for cursor
1921 * and latency is assumed to be high, as above.
1923 * The final value programmed to the register should always be rounded up,
1924 * and include an extra 2 entries to account for clock crossings.
1926 * We don't use the sprite, so we can ignore that. And on Crestline we have
1927 * to set the non-SR watermarks to 8.
1929 void intel_update_watermarks(struct drm_device *dev)
1931 struct drm_i915_private *dev_priv = dev->dev_private;
1933 if (dev_priv->display.update_wm)
1934 dev_priv->display.update_wm(dev);
1937 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
1938 uint32_t sprite_width, int pixel_size)
1940 struct drm_i915_private *dev_priv = dev->dev_private;
1942 if (dev_priv->display.update_sprite_wm)
1943 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
1947 static struct drm_i915_gem_object *
1948 intel_alloc_context_page(struct drm_device *dev)
1950 struct drm_i915_gem_object *ctx;
1953 DRM_LOCK_ASSERT(dev);
1955 ctx = i915_gem_alloc_object(dev, 4096);
1957 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
1961 ret = i915_gem_object_pin(ctx, 4096, true);
1963 DRM_ERROR("failed to pin power context: %d\n", ret);
1967 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
1969 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
1976 i915_gem_object_unpin(ctx);
1978 drm_gem_object_unreference(&ctx->base);
1984 * Lock protecting IPS related data structures
1986 struct lock mchdev_lock;
1987 LOCK_SYSINIT(mchdev, &mchdev_lock, "mchdev", LK_CANRECURSE);
1989 /* Global for IPS driver to get at the current i915 device. Protected by
1991 struct drm_i915_private *i915_mch_dev;
1993 bool ironlake_set_drps(struct drm_device *dev, u8 val)
1995 struct drm_i915_private *dev_priv = dev->dev_private;
1998 rgvswctl = I915_READ16(MEMSWCTL);
1999 if (rgvswctl & MEMCTL_CMD_STS) {
2000 DRM_DEBUG("gpu busy, RCS change rejected\n");
2001 return false; /* still busy with another command */
2004 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2005 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2006 I915_WRITE16(MEMSWCTL, rgvswctl);
2007 POSTING_READ16(MEMSWCTL);
2009 rgvswctl |= MEMCTL_CMD_STS;
2010 I915_WRITE16(MEMSWCTL, rgvswctl);
2015 void ironlake_enable_drps(struct drm_device *dev)
2017 struct drm_i915_private *dev_priv = dev->dev_private;
2018 u32 rgvmodectl = I915_READ(MEMMODECTL);
2019 u8 fmax, fmin, fstart, vstart;
2021 /* Enable temp reporting */
2022 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2023 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2025 /* 100ms RC evaluation intervals */
2026 I915_WRITE(RCUPEI, 100000);
2027 I915_WRITE(RCDNEI, 100000);
2029 /* Set max/min thresholds to 90ms and 80ms respectively */
2030 I915_WRITE(RCBMAXAVG, 90000);
2031 I915_WRITE(RCBMINAVG, 80000);
2033 I915_WRITE(MEMIHYST, 1);
2035 /* Set up min, max, and cur for interrupt handling */
2036 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2037 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2038 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2039 MEMMODE_FSTART_SHIFT;
2041 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2044 dev_priv->fmax = fmax; /* IPS callback will increase this */
2045 dev_priv->fstart = fstart;
2047 dev_priv->rps.max_delay = fstart;
2048 dev_priv->rps.min_delay = fmin;
2049 dev_priv->rps.cur_delay = fstart;
2051 DRM_DEBUG("fmax: %d, fmin: %d, fstart: %d\n",
2052 fmax, fmin, fstart);
2054 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2057 * Interrupts will be enabled in ironlake_irq_postinstall
2060 I915_WRITE(VIDSTART, vstart);
2061 POSTING_READ(VIDSTART);
2063 rgvmodectl |= MEMMODE_SWMODE_EN;
2064 I915_WRITE(MEMMODECTL, rgvmodectl);
2066 if (_intel_wait_for(dev,
2067 (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10,
2069 DRM_ERROR("stuck trying to change perf mode\n");
2072 ironlake_set_drps(dev, fstart);
2074 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2076 dev_priv->last_time1 = jiffies_to_msecs(jiffies);
2077 dev_priv->last_count2 = I915_READ(0x112f4);
2078 nanotime(&dev_priv->last_time2);
2081 void ironlake_disable_drps(struct drm_device *dev)
2083 struct drm_i915_private *dev_priv = dev->dev_private;
2084 u16 rgvswctl = I915_READ16(MEMSWCTL);
2086 /* Ack interrupts, disable EFC interrupt */
2087 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
2088 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
2089 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
2090 I915_WRITE(DEIIR, DE_PCU_EVENT);
2091 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2093 /* Go back to the starting frequency */
2094 ironlake_set_drps(dev, dev_priv->fstart);
2096 rgvswctl |= MEMCTL_CMD_STS;
2097 I915_WRITE(MEMSWCTL, rgvswctl);
2102 void gen6_set_rps(struct drm_device *dev, u8 val)
2104 struct drm_i915_private *dev_priv = dev->dev_private;
2107 swreq = (val & 0x3ff) << 25;
2108 I915_WRITE(GEN6_RPNSWREQ, swreq);
2111 void gen6_disable_rps(struct drm_device *dev)
2113 struct drm_i915_private *dev_priv = dev->dev_private;
2115 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
2116 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
2117 I915_WRITE(GEN6_PMIER, 0);
2118 /* Complete PM interrupt masking here doesn't race with the rps work
2119 * item again unmasking PM interrupts because that is using a different
2120 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
2121 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
2123 spin_lock(&dev_priv->rps.lock);
2124 dev_priv->rps.pm_iir = 0;
2125 spin_unlock(&dev_priv->rps.lock);
2127 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2130 static unsigned long intel_pxfreq(u32 vidfreq)
2133 int div = (vidfreq & 0x3f0000) >> 16;
2134 int post = (vidfreq & 0x3000) >> 12;
2135 int pre = (vidfreq & 0x7);
2140 freq = ((div * 133333) / ((1<<post) * pre));
2145 static const struct cparams {
2151 { 1, 1333, 301, 28664 },
2152 { 1, 1066, 294, 24460 },
2153 { 1, 800, 294, 25192 },
2154 { 0, 1333, 276, 27605 },
2155 { 0, 1066, 276, 27605 },
2156 { 0, 800, 231, 23784 },
2159 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2161 u64 total_count, diff, ret;
2162 u32 count1, count2, count3, m = 0, c = 0;
2163 unsigned long now = jiffies_to_msecs(jiffies), diff1;
2166 diff1 = now - dev_priv->last_time1;
2168 * sysctl(8) reads the value of sysctl twice in rapid
2169 * succession. There is high chance that it happens in the
2170 * same timer tick. Use the cached value to not divide by
2171 * zero and give the hw a chance to gather more samples.
2174 return (dev_priv->chipset_power);
2176 count1 = I915_READ(DMIEC);
2177 count2 = I915_READ(DDREC);
2178 count3 = I915_READ(CSIEC);
2180 total_count = count1 + count2 + count3;
2182 /* FIXME: handle per-counter overflow */
2183 if (total_count < dev_priv->last_count1) {
2184 diff = ~0UL - dev_priv->last_count1;
2185 diff += total_count;
2187 diff = total_count - dev_priv->last_count1;
2190 for (i = 0; i < DRM_ARRAY_SIZE(cparams); i++) {
2191 if (cparams[i].i == dev_priv->c_m &&
2192 cparams[i].t == dev_priv->r_t) {
2199 diff = diff / diff1;
2200 ret = ((m * diff) + c);
2203 dev_priv->last_count1 = total_count;
2204 dev_priv->last_time1 = now;
2206 dev_priv->chipset_power = ret;
2210 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
2212 unsigned long m, x, b;
2215 tsfs = I915_READ(TSFS);
2217 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
2218 x = I915_READ8(TR1);
2220 b = tsfs & TSFS_INTR_MASK;
2222 return ((m * x) / 127) - b;
2225 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
2227 static const struct v_table {
2228 u16 vd; /* in .1 mil */
2229 u16 vm; /* in .1 mil */
2360 if (dev_priv->info->is_mobile)
2361 return v_table[pxvid].vm;
2363 return v_table[pxvid].vd;
2366 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
2368 struct timespec now, diff1;
2370 unsigned long diffms;
2373 if (dev_priv->info->gen != 5)
2378 timespecsub(&diff1, &dev_priv->last_time2);
2380 /* Don't divide by 0 */
2381 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
2385 count = I915_READ(GFXEC);
2387 if (count < dev_priv->last_count2) {
2388 diff = ~0UL - dev_priv->last_count2;
2391 diff = count - dev_priv->last_count2;
2394 dev_priv->last_count2 = count;
2395 dev_priv->last_time2 = now;
2397 /* More magic constants... */
2399 diff = diff / (diffms * 10);
2400 dev_priv->gfx_power = diff;
2403 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
2405 unsigned long t, corr, state1, corr2, state2;
2408 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
2409 pxvid = (pxvid >> 24) & 0x7f;
2410 ext_v = pvid_to_extvid(dev_priv, pxvid);
2414 t = i915_mch_val(dev_priv);
2416 /* Revel in the empirically derived constants */
2418 /* Correction factor in 1/100000 units */
2420 corr = ((t * 2349) + 135940);
2422 corr = ((t * 964) + 29317);
2424 corr = ((t * 301) + 1004);
2426 corr = corr * ((150142 * state1) / 10000 - 78642);
2428 corr2 = (corr * dev_priv->corr);
2430 state2 = (corr2 * state1) / 10000;
2431 state2 /= 100; /* convert to mW */
2433 i915_update_gfx_val(dev_priv);
2435 return dev_priv->gfx_power + state2;
2439 * i915_read_mch_val - return value for IPS use
2441 * Calculate and return a value for the IPS driver to use when deciding whether
2442 * we have thermal and power headroom to increase CPU or GPU power budget.
2444 unsigned long i915_read_mch_val(void)
2446 struct drm_i915_private *dev_priv;
2447 unsigned long chipset_val, graphics_val, ret = 0;
2449 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
2452 dev_priv = i915_mch_dev;
2454 chipset_val = i915_chipset_val(dev_priv);
2455 graphics_val = i915_gfx_val(dev_priv);
2457 ret = chipset_val + graphics_val;
2460 lockmgr(&mchdev_lock, LK_RELEASE);
2466 * i915_gpu_raise - raise GPU frequency limit
2468 * Raise the limit; IPS indicates we have thermal headroom.
2470 bool i915_gpu_raise(void)
2472 struct drm_i915_private *dev_priv;
2475 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
2476 if (!i915_mch_dev) {
2480 dev_priv = i915_mch_dev;
2482 if (dev_priv->rps.max_delay > dev_priv->fmax)
2483 dev_priv->rps.max_delay--;
2486 lockmgr(&mchdev_lock, LK_RELEASE);
2492 * i915_gpu_lower - lower GPU frequency limit
2494 * IPS indicates we're close to a thermal limit, so throttle back the GPU
2495 * frequency maximum.
2497 bool i915_gpu_lower(void)
2499 struct drm_i915_private *dev_priv;
2502 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
2503 if (!i915_mch_dev) {
2507 dev_priv = i915_mch_dev;
2509 if (dev_priv->rps.max_delay < dev_priv->rps.min_delay)
2510 dev_priv->rps.max_delay++;
2513 lockmgr(&mchdev_lock, LK_RELEASE);
2519 * i915_gpu_busy - indicate GPU business to IPS
2521 * Tell the IPS driver whether or not the GPU is busy.
2523 bool i915_gpu_busy(void)
2525 struct drm_i915_private *dev_priv;
2528 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
2531 dev_priv = i915_mch_dev;
2533 ret = dev_priv->busy;
2536 lockmgr(&mchdev_lock, LK_RELEASE);
2542 * i915_gpu_turbo_disable - disable graphics turbo
2544 * Disable graphics turbo by resetting the max frequency and setting the
2545 * current frequency to the default.
2547 bool i915_gpu_turbo_disable(void)
2549 struct drm_i915_private *dev_priv;
2552 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
2553 if (!i915_mch_dev) {
2557 dev_priv = i915_mch_dev;
2559 dev_priv->rps.max_delay = dev_priv->fstart;
2561 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
2565 lockmgr(&mchdev_lock, LK_RELEASE);
2570 void intel_init_emon(struct drm_device *dev)
2572 struct drm_i915_private *dev_priv = dev->dev_private;
2577 /* Disable to program */
2581 /* Program energy weights for various events */
2582 I915_WRITE(SDEW, 0x15040d00);
2583 I915_WRITE(CSIEW0, 0x007f0000);
2584 I915_WRITE(CSIEW1, 0x1e220004);
2585 I915_WRITE(CSIEW2, 0x04000004);
2587 for (i = 0; i < 5; i++)
2588 I915_WRITE(PEW + (i * 4), 0);
2589 for (i = 0; i < 3; i++)
2590 I915_WRITE(DEW + (i * 4), 0);
2592 /* Program P-state weights to account for frequency power adjustment */
2593 for (i = 0; i < 16; i++) {
2594 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
2595 unsigned long freq = intel_pxfreq(pxvidfreq);
2596 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
2601 val *= (freq / 1000);
2603 val /= (127*127*900);
2605 DRM_ERROR("bad pxval: %ld\n", val);
2608 /* Render standby states get 0 weight */
2612 for (i = 0; i < 4; i++) {
2613 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
2614 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
2615 I915_WRITE(PXW + (i * 4), val);
2618 /* Adjust magic regs to magic values (more experimental results) */
2619 I915_WRITE(OGW0, 0);
2620 I915_WRITE(OGW1, 0);
2621 I915_WRITE(EG0, 0x00007f00);
2622 I915_WRITE(EG1, 0x0000000e);
2623 I915_WRITE(EG2, 0x000e0000);
2624 I915_WRITE(EG3, 0x68000300);
2625 I915_WRITE(EG4, 0x42000000);
2626 I915_WRITE(EG5, 0x00140031);
2630 for (i = 0; i < 8; i++)
2631 I915_WRITE(PXWL + (i * 4), 0);
2633 /* Enable PMON + select events */
2634 I915_WRITE(ECR, 0x80000019);
2636 lcfuse = I915_READ(LCFUSE02);
2638 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
2641 static int intel_enable_rc6(struct drm_device *dev)
2644 * Respect the kernel parameter if it is set
2646 if (i915_enable_rc6 >= 0)
2647 return i915_enable_rc6;
2650 * Disable RC6 on Ironlake
2652 if (INTEL_INFO(dev)->gen == 5)
2656 * Enable rc6 on Sandybridge if DMA remapping is disabled
2658 if (INTEL_INFO(dev)->gen == 6) {
2660 "Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
2661 intel_iommu_enabled ? "true" : "false",
2662 !intel_iommu_enabled ? "en" : "dis");
2663 return (intel_iommu_enabled ? 0 : INTEL_RC6_ENABLE);
2665 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
2666 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
2669 void gen6_enable_rps(struct drm_i915_private *dev_priv)
2671 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
2672 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2673 u32 pcu_mbox, rc6_mask = 0;
2675 int cur_freq, min_freq, max_freq;
2679 /* Here begins a magic sequence of register writes to enable
2680 * auto-downclocking.
2682 * Perhaps there might be some value in exposing these to
2685 I915_WRITE(GEN6_RC_STATE, 0);
2687 /* Clear the DBG now so we don't confuse earlier errors */
2688 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
2689 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
2690 I915_WRITE(GTFIFODBG, gtfifodbg);
2693 gen6_gt_force_wake_get(dev_priv);
2695 /* disable the counters and set deterministic thresholds */
2696 I915_WRITE(GEN6_RC_CONTROL, 0);
2698 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
2699 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
2700 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
2701 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
2702 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
2704 for (i = 0; i < I915_NUM_RINGS; i++)
2705 I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
2707 I915_WRITE(GEN6_RC_SLEEP, 0);
2708 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
2709 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
2710 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
2711 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
2713 rc6_mode = intel_enable_rc6(dev_priv->dev);
2714 if (rc6_mode & INTEL_RC6_ENABLE)
2715 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
2717 if (rc6_mode & INTEL_RC6p_ENABLE)
2718 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
2720 if (rc6_mode & INTEL_RC6pp_ENABLE)
2721 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
2723 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
2724 (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
2725 (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
2726 (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
2728 I915_WRITE(GEN6_RC_CONTROL,
2730 GEN6_RC_CTL_EI_MODE(1) |
2731 GEN6_RC_CTL_HW_ENABLE);
2733 I915_WRITE(GEN6_RPNSWREQ,
2734 GEN6_FREQUENCY(10) |
2736 GEN6_AGGRESSIVE_TURBO);
2737 I915_WRITE(GEN6_RC_VIDEO_FREQ,
2738 GEN6_FREQUENCY(12));
2740 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2741 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
2744 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
2745 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
2746 I915_WRITE(GEN6_RP_UP_EI, 100000);
2747 I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
2748 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2749 I915_WRITE(GEN6_RP_CONTROL,
2750 GEN6_RP_MEDIA_TURBO |
2751 GEN6_RP_MEDIA_HW_MODE |
2752 GEN6_RP_MEDIA_IS_GFX |
2754 GEN6_RP_UP_BUSY_AVG |
2755 GEN6_RP_DOWN_IDLE_CONT);
2757 if (_intel_wait_for(dev,
2758 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
2760 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2762 I915_WRITE(GEN6_PCODE_DATA, 0);
2763 I915_WRITE(GEN6_PCODE_MAILBOX,
2765 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2766 if (_intel_wait_for(dev,
2767 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
2769 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2771 min_freq = (rp_state_cap & 0xff0000) >> 16;
2772 max_freq = rp_state_cap & 0xff;
2773 cur_freq = (gt_perf_status & 0xff00) >> 8;
2775 /* Check for overclock support */
2776 if (_intel_wait_for(dev,
2777 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
2779 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2780 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
2781 pcu_mbox = I915_READ(GEN6_PCODE_DATA);
2782 if (_intel_wait_for(dev,
2783 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
2785 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2786 if (pcu_mbox & (1<<31)) { /* OC supported */
2787 max_freq = pcu_mbox & 0xff;
2788 DRM_DEBUG("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2791 /* In units of 100MHz */
2792 dev_priv->rps.max_delay = max_freq;
2793 dev_priv->rps.min_delay = min_freq;
2794 dev_priv->rps.cur_delay = cur_freq;
2796 /* requires MSI enabled */
2797 I915_WRITE(GEN6_PMIER,
2798 GEN6_PM_MBOX_EVENT |
2799 GEN6_PM_THERMAL_EVENT |
2800 GEN6_PM_RP_DOWN_TIMEOUT |
2801 GEN6_PM_RP_UP_THRESHOLD |
2802 GEN6_PM_RP_DOWN_THRESHOLD |
2803 GEN6_PM_RP_UP_EI_EXPIRED |
2804 GEN6_PM_RP_DOWN_EI_EXPIRED);
2805 spin_lock(&dev_priv->rps.lock);
2806 WARN_ON(dev_priv->rps.pm_iir != 0);
2807 I915_WRITE(GEN6_PMIMR, 0);
2808 spin_unlock(&dev_priv->rps.lock);
2809 /* enable all PM interrupts */
2810 I915_WRITE(GEN6_PMINTRMSK, 0);
2812 gen6_gt_force_wake_put(dev_priv);
2815 void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
2817 struct drm_device *dev;
2819 int gpu_freq, ia_freq, max_ia_freq;
2820 int scaling_factor = 180;
2823 dev = dev_priv->dev;
2825 max_ia_freq = cpufreq_quick_get_max(0);
2827 * Default to measured freq if none found, PCU will ensure we don't go
2831 max_ia_freq = tsc_freq;
2833 /* Convert from Hz to MHz */
2834 max_ia_freq /= 1000;
2836 tsc_freq = atomic_load_acq_64(&tsc_freq);
2837 max_ia_freq = tsc_freq / 1000 / 1000;
2843 * For each potential GPU frequency, load a ring frequency we'd like
2844 * to use for memory access. We do this by specifying the IA frequency
2845 * the PCU should use as a reference to determine the ring frequency.
2847 for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
2849 int diff = dev_priv->rps.max_delay - gpu_freq;
2853 * For GPU frequencies less than 750MHz, just use the lowest
2856 if (gpu_freq < min_freq)
2859 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
2861 ia_freq = (ia_freq + d / 2) / d;
2863 I915_WRITE(GEN6_PCODE_DATA,
2864 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
2866 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
2867 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2868 if (_intel_wait_for(dev,
2869 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2871 DRM_ERROR("pcode write of freq table timed out\n");
2879 void ironlake_init_clock_gating(struct drm_device *dev)
2881 struct drm_i915_private *dev_priv = dev->dev_private;
2882 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
2884 /* Required for FBC */
2885 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
2886 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
2887 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
2889 I915_WRITE(PCH_3DCGDIS0,
2890 MARIUNIT_CLOCK_GATE_DISABLE |
2891 SVSMUNIT_CLOCK_GATE_DISABLE);
2892 I915_WRITE(PCH_3DCGDIS1,
2893 VFMUNIT_CLOCK_GATE_DISABLE);
2896 * According to the spec the following bits should be set in
2897 * order to enable memory self-refresh
2898 * The bit 22/21 of 0x42004
2899 * The bit 5 of 0x42020
2900 * The bit 15 of 0x45000
2902 I915_WRITE(ILK_DISPLAY_CHICKEN2,
2903 (I915_READ(ILK_DISPLAY_CHICKEN2) |
2904 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
2905 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
2906 I915_WRITE(DISP_ARB_CTL,
2907 (I915_READ(DISP_ARB_CTL) |
2909 I915_WRITE(WM3_LP_ILK, 0);
2910 I915_WRITE(WM2_LP_ILK, 0);
2911 I915_WRITE(WM1_LP_ILK, 0);
2914 * Based on the document from hardware guys the following bits
2915 * should be set unconditionally in order to enable FBC.
2916 * The bit 22 of 0x42000
2917 * The bit 22 of 0x42004
2918 * The bit 7,8,9 of 0x42020.
2920 if (IS_IRONLAKE_M(dev)) {
2921 I915_WRITE(ILK_DISPLAY_CHICKEN1,
2922 I915_READ(ILK_DISPLAY_CHICKEN1) |
2924 I915_WRITE(ILK_DISPLAY_CHICKEN2,
2925 I915_READ(ILK_DISPLAY_CHICKEN2) |
2929 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
2931 I915_WRITE(ILK_DISPLAY_CHICKEN2,
2932 I915_READ(ILK_DISPLAY_CHICKEN2) |
2933 ILK_ELPIN_409_SELECT);
2934 I915_WRITE(_3D_CHICKEN2,
2935 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
2936 _3D_CHICKEN2_WM_READ_PIPELINED);
2938 /* WaDisableRenderCachePipelinedFlush */
2939 I915_WRITE(CACHE_MODE_0,
2940 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
2942 ibx_init_clock_gating(dev);
2945 void cpt_init_clock_gating(struct drm_device *dev)
2947 struct drm_i915_private *dev_priv = dev->dev_private;
2951 * On Ibex Peak and Cougar Point, we need to disable clock
2952 * gating for the panel power sequencer or it will fail to
2953 * start up when no ports are active.
2955 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
2956 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
2957 DPLS_EDP_PPS_FIX_DIS);
2958 /* The below fixes the weird display corruption, a few pixels shifted
2959 * downward, on (only) LVDS of some HP laptops with IVY.
2962 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
2963 /* WADP0ClockGatingDisable */
2964 for_each_pipe(pipe) {
2965 I915_WRITE(TRANS_CHICKEN1(pipe),
2966 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
2970 void gen6_init_clock_gating(struct drm_device *dev)
2972 struct drm_i915_private *dev_priv = dev->dev_private;
2974 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
2976 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
2978 I915_WRITE(ILK_DISPLAY_CHICKEN2,
2979 I915_READ(ILK_DISPLAY_CHICKEN2) |
2980 ILK_ELPIN_409_SELECT);
2982 /* WaDisableHiZPlanesWhenMSAAEnabled */
2983 I915_WRITE(_3D_CHICKEN,
2984 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
2986 /* WaSetupGtModeTdRowDispatch */
2987 if (IS_SNB_GT1(dev))
2988 I915_WRITE(GEN6_GT_MODE,
2989 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
2991 I915_WRITE(WM3_LP_ILK, 0);
2992 I915_WRITE(WM2_LP_ILK, 0);
2993 I915_WRITE(WM1_LP_ILK, 0);
2995 I915_WRITE(CACHE_MODE_0,
2996 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
2998 I915_WRITE(GEN6_UCGCTL1,
2999 I915_READ(GEN6_UCGCTL1) |
3000 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
3001 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
3003 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3004 * gating disable must be set. Failure to set it results in
3005 * flickering pixels due to Z write ordering failures after
3006 * some amount of runtime in the Mesa "fire" demo, and Unigine
3007 * Sanctuary and Tropics, and apparently anything else with
3008 * alpha test or pixel discard.
3010 * According to the spec, bit 11 (RCCUNIT) must also be set,
3011 * but we didn't debug actual testcases to find it out.
3013 * Also apply WaDisableVDSUnitClockGating and
3014 * WaDisableRCPBUnitClockGating.
3016 I915_WRITE(GEN6_UCGCTL2,
3017 GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
3018 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
3019 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
3021 /* Bspec says we need to always set all mask bits. */
3022 I915_WRITE(_3D_CHICKEN3, (0xFFFF << 16) |
3023 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL);
3026 * According to the spec the following bits should be
3027 * set in order to enable memory self-refresh and fbc:
3028 * The bit21 and bit22 of 0x42000
3029 * The bit21 and bit22 of 0x42004
3030 * The bit5 and bit7 of 0x42020
3031 * The bit14 of 0x70180
3032 * The bit14 of 0x71180
3034 I915_WRITE(ILK_DISPLAY_CHICKEN1,
3035 I915_READ(ILK_DISPLAY_CHICKEN1) |
3036 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
3037 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3038 I915_READ(ILK_DISPLAY_CHICKEN2) |
3039 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
3040 I915_WRITE(ILK_DSPCLK_GATE_D,
3041 I915_READ(ILK_DSPCLK_GATE_D) |
3042 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
3043 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
3045 /* WaMbcDriverBootEnable */
3046 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3047 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3049 for_each_pipe(pipe) {
3050 I915_WRITE(DSPCNTR(pipe),
3051 I915_READ(DSPCNTR(pipe)) |
3052 DISPPLANE_TRICKLE_FEED_DISABLE);
3053 intel_flush_display_plane(dev_priv, pipe);
3056 /* The default value should be 0x200 according to docs, but the two
3057 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
3058 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
3059 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
3061 cpt_init_clock_gating(dev);
3064 void ivybridge_init_clock_gating(struct drm_device *dev)
3066 struct drm_i915_private *dev_priv = dev->dev_private;
3068 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3070 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
3072 I915_WRITE(WM3_LP_ILK, 0);
3073 I915_WRITE(WM2_LP_ILK, 0);
3074 I915_WRITE(WM1_LP_ILK, 0);
3076 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3077 * This implements the WaDisableRCZUnitClockGating workaround.
3079 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
3081 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
3083 I915_WRITE(IVB_CHICKEN3,
3084 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3085 CHICKEN3_DGMG_DONE_FIX_DISABLE);
3087 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3088 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3089 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3091 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3092 I915_WRITE(GEN7_L3CNTLREG1,
3093 GEN7_WA_FOR_GEN7_L3_CONTROL);
3094 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
3095 GEN7_WA_L3_CHICKEN_MODE);
3097 /* This is required by WaCatErrorRejectionIssue */
3098 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3099 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3100 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3102 for_each_pipe(pipe) {
3103 I915_WRITE(DSPCNTR(pipe),
3104 I915_READ(DSPCNTR(pipe)) |
3105 DISPPLANE_TRICKLE_FEED_DISABLE);
3106 intel_flush_display_plane(dev_priv, pipe);
3110 void g4x_init_clock_gating(struct drm_device *dev)
3112 struct drm_i915_private *dev_priv = dev->dev_private;
3113 uint32_t dspclk_gate;
3115 I915_WRITE(RENCLK_GATE_D1, 0);
3116 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
3117 GS_UNIT_CLOCK_GATE_DISABLE |
3118 CL_UNIT_CLOCK_GATE_DISABLE);
3119 I915_WRITE(RAMCLK_GATE_D, 0);
3120 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
3121 OVRUNIT_CLOCK_GATE_DISABLE |
3122 OVCUNIT_CLOCK_GATE_DISABLE;
3124 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
3125 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
3128 void crestline_init_clock_gating(struct drm_device *dev)
3130 struct drm_i915_private *dev_priv = dev->dev_private;
3132 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
3133 I915_WRITE(RENCLK_GATE_D2, 0);
3134 I915_WRITE(DSPCLK_GATE_D, 0);
3135 I915_WRITE(RAMCLK_GATE_D, 0);
3136 I915_WRITE16(DEUC, 0);
3139 void broadwater_init_clock_gating(struct drm_device *dev)
3141 struct drm_i915_private *dev_priv = dev->dev_private;
3143 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
3144 I965_RCC_CLOCK_GATE_DISABLE |
3145 I965_RCPB_CLOCK_GATE_DISABLE |
3146 I965_ISC_CLOCK_GATE_DISABLE |
3147 I965_FBC_CLOCK_GATE_DISABLE);
3148 I915_WRITE(RENCLK_GATE_D2, 0);
3151 void gen3_init_clock_gating(struct drm_device *dev)
3153 struct drm_i915_private *dev_priv = dev->dev_private;
3154 u32 dstate = I915_READ(D_STATE);
3156 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
3157 DSTATE_DOT_CLOCK_GATING;
3158 I915_WRITE(D_STATE, dstate);
3161 void i85x_init_clock_gating(struct drm_device *dev)
3163 struct drm_i915_private *dev_priv = dev->dev_private;
3165 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
3168 void i830_init_clock_gating(struct drm_device *dev)
3170 struct drm_i915_private *dev_priv = dev->dev_private;
3172 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
3175 void ibx_init_clock_gating(struct drm_device *dev)
3177 struct drm_i915_private *dev_priv = dev->dev_private;
3180 * On Ibex Peak and Cougar Point, we need to disable clock
3181 * gating for the panel power sequencer or it will fail to
3182 * start up when no ports are active.
3184 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3187 static void ironlake_teardown_rc6(struct drm_device *dev)
3189 struct drm_i915_private *dev_priv = dev->dev_private;
3191 if (dev_priv->renderctx) {
3192 i915_gem_object_unpin(dev_priv->renderctx);
3193 drm_gem_object_unreference(&dev_priv->renderctx->base);
3194 dev_priv->renderctx = NULL;
3197 if (dev_priv->pwrctx) {
3198 i915_gem_object_unpin(dev_priv->pwrctx);
3199 drm_gem_object_unreference(&dev_priv->pwrctx->base);
3200 dev_priv->pwrctx = NULL;
3204 void ironlake_disable_rc6(struct drm_device *dev)
3206 struct drm_i915_private *dev_priv = dev->dev_private;
3208 if (I915_READ(PWRCTXA)) {
3209 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
3210 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
3211 (void)_intel_wait_for(dev,
3212 ((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
3215 I915_WRITE(PWRCTXA, 0);
3216 POSTING_READ(PWRCTXA);
3218 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3219 POSTING_READ(RSTDBYCTL);
3222 ironlake_teardown_rc6(dev);
3225 static int ironlake_setup_rc6(struct drm_device *dev)
3227 struct drm_i915_private *dev_priv = dev->dev_private;
3229 if (dev_priv->renderctx == NULL)
3230 dev_priv->renderctx = intel_alloc_context_page(dev);
3231 if (!dev_priv->renderctx)
3234 if (dev_priv->pwrctx == NULL)
3235 dev_priv->pwrctx = intel_alloc_context_page(dev);
3236 if (!dev_priv->pwrctx) {
3237 ironlake_teardown_rc6(dev);
3244 void ironlake_enable_rc6(struct drm_device *dev)
3246 struct drm_i915_private *dev_priv = dev->dev_private;
3249 /* rc6 disabled by default due to repeated reports of hanging during
3252 if (!intel_enable_rc6(dev))
3256 ret = ironlake_setup_rc6(dev);
3263 * GPU can automatically power down the render unit if given a page
3266 ret = BEGIN_LP_RING(6);
3268 ironlake_teardown_rc6(dev);
3273 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
3274 OUT_RING(MI_SET_CONTEXT);
3275 OUT_RING(dev_priv->renderctx->gtt_offset |
3277 MI_SAVE_EXT_STATE_EN |
3278 MI_RESTORE_EXT_STATE_EN |
3279 MI_RESTORE_INHIBIT);
3280 OUT_RING(MI_SUSPEND_FLUSH);
3286 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
3287 * does an implicit flush, combined with MI_FLUSH above, it should be
3288 * safe to assume that renderctx is valid
3290 ret = intel_wait_ring_idle(LP_RING(dev_priv));
3292 DRM_ERROR("failed to enable ironlake power savings\n");
3293 ironlake_teardown_rc6(dev);
3298 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
3299 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3303 void intel_init_clock_gating(struct drm_device *dev)
3305 struct drm_i915_private *dev_priv = dev->dev_private;
3307 dev_priv->display.init_clock_gating(dev);
3309 if (dev_priv->display.init_pch_clock_gating)
3310 dev_priv->display.init_pch_clock_gating(dev);