2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
29 #include "intel_drv.h"
32 void i8xx_disable_fbc(struct drm_device *dev)
34 struct drm_i915_private *dev_priv = dev->dev_private;
37 /* Disable compression */
38 fbc_ctl = I915_READ(FBC_CONTROL);
39 if ((fbc_ctl & FBC_CTL_EN) == 0)
42 fbc_ctl &= ~FBC_CTL_EN;
43 I915_WRITE(FBC_CONTROL, fbc_ctl);
45 /* Wait for compressing bit to clear */
46 if (_intel_wait_for(dev,
47 (I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10,
49 DRM_DEBUG_KMS("FBC idle timed out\n");
53 DRM_DEBUG_KMS("disabled FBC\n");
56 void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
58 struct drm_device *dev = crtc->dev;
59 struct drm_i915_private *dev_priv = dev->dev_private;
60 struct drm_framebuffer *fb = crtc->fb;
61 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
62 struct drm_i915_gem_object *obj = intel_fb->obj;
63 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
66 u32 fbc_ctl, fbc_ctl2;
68 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
69 if (fb->pitches[0] < cfb_pitch)
70 cfb_pitch = fb->pitches[0];
72 /* FBC_CTL wants 64B units */
73 cfb_pitch = (cfb_pitch / 64) - 1;
74 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
77 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
78 I915_WRITE(FBC_TAG + (i * 4), 0);
81 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
83 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
84 I915_WRITE(FBC_FENCE_OFF, crtc->y);
87 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
89 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
90 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
91 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
92 fbc_ctl |= obj->fence_reg;
93 I915_WRITE(FBC_CONTROL, fbc_ctl);
95 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
96 cfb_pitch, crtc->y, intel_crtc->plane);
99 bool i8xx_fbc_enabled(struct drm_device *dev)
101 struct drm_i915_private *dev_priv = dev->dev_private;
103 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
106 void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
108 struct drm_device *dev = crtc->dev;
109 struct drm_i915_private *dev_priv = dev->dev_private;
110 struct drm_framebuffer *fb = crtc->fb;
111 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
112 struct drm_i915_gem_object *obj = intel_fb->obj;
113 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
114 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
115 unsigned long stall_watermark = 200;
118 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
119 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
120 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
122 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
123 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
124 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
125 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
128 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
130 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
133 void g4x_disable_fbc(struct drm_device *dev)
135 struct drm_i915_private *dev_priv = dev->dev_private;
138 /* Disable compression */
139 dpfc_ctl = I915_READ(DPFC_CONTROL);
140 if (dpfc_ctl & DPFC_CTL_EN) {
141 dpfc_ctl &= ~DPFC_CTL_EN;
142 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
144 DRM_DEBUG_KMS("disabled FBC\n");
148 bool g4x_fbc_enabled(struct drm_device *dev)
150 struct drm_i915_private *dev_priv = dev->dev_private;
152 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
155 static void sandybridge_blit_fbc_update(struct drm_device *dev)
157 struct drm_i915_private *dev_priv = dev->dev_private;
160 /* Make sure blitter notifies FBC of writes */
161 gen6_gt_force_wake_get(dev_priv);
162 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
163 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
164 GEN6_BLITTER_LOCK_SHIFT;
165 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
166 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
167 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
168 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
169 GEN6_BLITTER_LOCK_SHIFT);
170 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
171 POSTING_READ(GEN6_BLITTER_ECOSKPD);
172 gen6_gt_force_wake_put(dev_priv);
175 void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
177 struct drm_device *dev = crtc->dev;
178 struct drm_i915_private *dev_priv = dev->dev_private;
179 struct drm_framebuffer *fb = crtc->fb;
180 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
181 struct drm_i915_gem_object *obj = intel_fb->obj;
182 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
183 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
184 unsigned long stall_watermark = 200;
187 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
188 dpfc_ctl &= DPFC_RESERVED;
189 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
190 /* Set persistent mode for front-buffer rendering, ala X. */
191 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
192 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
193 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
195 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
196 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
197 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
198 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
199 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
201 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
204 I915_WRITE(SNB_DPFC_CTL_SA,
205 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
206 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
207 sandybridge_blit_fbc_update(dev);
210 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
213 void ironlake_disable_fbc(struct drm_device *dev)
215 struct drm_i915_private *dev_priv = dev->dev_private;
218 /* Disable compression */
219 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
220 if (dpfc_ctl & DPFC_CTL_EN) {
221 dpfc_ctl &= ~DPFC_CTL_EN;
222 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
224 DRM_DEBUG_KMS("disabled FBC\n");
228 bool ironlake_fbc_enabled(struct drm_device *dev)
230 struct drm_i915_private *dev_priv = dev->dev_private;
232 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
235 bool intel_fbc_enabled(struct drm_device *dev)
237 struct drm_i915_private *dev_priv = dev->dev_private;
239 if (!dev_priv->display.fbc_enabled)
242 return dev_priv->display.fbc_enabled(dev);
245 static void intel_fbc_work_fn(void *arg, int pending)
247 struct intel_fbc_work *work = arg;
248 struct drm_device *dev = work->crtc->dev;
249 struct drm_i915_private *dev_priv = dev->dev_private;
252 if (work == dev_priv->fbc_work) {
253 /* Double check that we haven't switched fb without cancelling
256 if (work->crtc->fb == work->fb) {
257 dev_priv->display.enable_fbc(work->crtc,
260 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
261 dev_priv->cfb_fb = work->crtc->fb->base.id;
262 dev_priv->cfb_y = work->crtc->y;
265 dev_priv->fbc_work = NULL;
269 drm_free(work, DRM_MEM_KMS);
272 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
276 if (dev_priv->fbc_work == NULL)
279 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
281 /* Synchronisation is provided by struct_mutex and checking of
282 * dev_priv->fbc_work, so we can perform the cancellation
283 * entirely asynchronously.
285 if (taskqueue_cancel_timeout(dev_priv->tq, &dev_priv->fbc_work->task,
287 /* tasklet was killed before being run, clean up */
288 drm_free(dev_priv->fbc_work, DRM_MEM_KMS);
290 /* Mark the work as no longer wanted so that if it does
291 * wake-up (because the work was already running and waiting
292 * for our mutex), it will discover that is no longer
295 dev_priv->fbc_work = NULL;
298 void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
300 struct intel_fbc_work *work;
301 struct drm_device *dev = crtc->dev;
302 struct drm_i915_private *dev_priv = dev->dev_private;
304 if (!dev_priv->display.enable_fbc)
307 intel_cancel_fbc_work(dev_priv);
309 work = kmalloc(sizeof(*work), DRM_MEM_KMS, M_WAITOK | M_ZERO);
312 work->interval = interval;
313 TIMEOUT_TASK_INIT(dev_priv->tq, &work->task, 0, intel_fbc_work_fn,
316 dev_priv->fbc_work = work;
318 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
320 /* Delay the actual enabling to let pageflipping cease and the
321 * display to settle before starting the compression. Note that
322 * this delay also serves a second purpose: it allows for a
323 * vblank to pass after disabling the FBC before we attempt
324 * to modify the control registers.
326 * A more complicated solution would involve tracking vblanks
327 * following the termination of the page-flipping sequence
328 * and indeed performing the enable as a co-routine and not
329 * waiting synchronously upon the vblank.
331 taskqueue_enqueue_timeout(dev_priv->tq, &work->task,
332 msecs_to_jiffies(50));
335 void intel_disable_fbc(struct drm_device *dev)
337 struct drm_i915_private *dev_priv = dev->dev_private;
339 intel_cancel_fbc_work(dev_priv);
341 if (!dev_priv->display.disable_fbc)
344 dev_priv->display.disable_fbc(dev);
345 dev_priv->cfb_plane = -1;
349 * intel_update_fbc - enable/disable FBC as needed
350 * @dev: the drm_device
352 * Set up the framebuffer compression hardware at mode set time. We
353 * enable it if possible:
354 * - plane A only (on pre-965)
355 * - no pixel mulitply/line duplication
356 * - no alpha buffer discard
358 * - framebuffer <= 2048 in width, 1536 in height
360 * We can't assume that any compression will take place (worst case),
361 * so the compressed buffer has to be the same size as the uncompressed
362 * one. It also must reside (along with the line length buffer) in
365 * We need to enable/disable FBC on a global basis.
367 void intel_update_fbc(struct drm_device *dev)
369 struct drm_i915_private *dev_priv = dev->dev_private;
370 struct drm_crtc *crtc = NULL, *tmp_crtc;
371 struct intel_crtc *intel_crtc;
372 struct drm_framebuffer *fb;
373 struct intel_framebuffer *intel_fb;
374 struct drm_i915_gem_object *obj;
382 if (!I915_HAS_FBC(dev))
386 * If FBC is already on, we just have to verify that we can
387 * keep it that way...
388 * Need to disable if:
389 * - more than one pipe is active
390 * - changing FBC params (stride, fence, mode)
391 * - new fb is too large to fit in compressed buffer
392 * - going to an unsupported config (interlace, pixel multiply, etc.)
394 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
395 if (tmp_crtc->enabled && tmp_crtc->fb) {
397 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
398 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
405 if (!crtc || crtc->fb == NULL) {
406 DRM_DEBUG_KMS("no output, disabling\n");
407 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
411 intel_crtc = to_intel_crtc(crtc);
413 intel_fb = to_intel_framebuffer(fb);
416 enable_fbc = i915_enable_fbc;
417 if (enable_fbc < 0) {
418 DRM_DEBUG_KMS("fbc set to per-chip default\n");
420 if (INTEL_INFO(dev)->gen <= 6)
424 DRM_DEBUG_KMS("fbc disabled per module param\n");
425 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
428 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
429 DRM_DEBUG_KMS("framebuffer too large, disabling "
431 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
434 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
435 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
436 DRM_DEBUG_KMS("mode incompatible with compression, "
438 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
441 if ((crtc->mode.hdisplay > 2048) ||
442 (crtc->mode.vdisplay > 1536)) {
443 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
444 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
447 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
448 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
449 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
452 if (obj->tiling_mode != I915_TILING_X ||
453 obj->fence_reg == I915_FENCE_REG_NONE) {
454 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
455 dev_priv->no_fbc_reason = FBC_NOT_TILED;
460 /* If the kernel debugger is active, always disable compression */
465 /* If the scanout has not changed, don't modify the FBC settings.
466 * Note that we make the fundamental assumption that the fb->obj
467 * cannot be unpinned (and have its GTT offset and fence revoked)
468 * without first being decoupled from the scanout and FBC disabled.
470 if (dev_priv->cfb_plane == intel_crtc->plane &&
471 dev_priv->cfb_fb == fb->base.id &&
472 dev_priv->cfb_y == crtc->y)
475 if (intel_fbc_enabled(dev)) {
476 /* We update FBC along two paths, after changing fb/crtc
477 * configuration (modeswitching) and after page-flipping
478 * finishes. For the latter, we know that not only did
479 * we disable the FBC at the start of the page-flip
480 * sequence, but also more than one vblank has passed.
482 * For the former case of modeswitching, it is possible
483 * to switch between two FBC valid configurations
484 * instantaneously so we do need to disable the FBC
485 * before we can modify its control registers. We also
486 * have to wait for the next vblank for that to take
487 * effect. However, since we delay enabling FBC we can
488 * assume that a vblank has passed since disabling and
489 * that we can safely alter the registers in the deferred
492 * In the scenario that we go from a valid to invalid
493 * and then back to valid FBC configuration we have
494 * no strict enforcement that a vblank occurred since
495 * disabling the FBC. However, along all current pipe
496 * disabling paths we do need to wait for a vblank at
497 * some point. And we wait before enabling FBC anyway.
499 DRM_DEBUG_KMS("disabling active FBC for update\n");
500 intel_disable_fbc(dev);
503 intel_enable_fbc(crtc, 500);
507 /* Multiple disables should be harmless */
508 if (intel_fbc_enabled(dev)) {
509 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
510 intel_disable_fbc(dev);
514 void i915_ironlake_get_mem_freq(struct drm_device *dev);
515 void i915_pineview_get_mem_freq(struct drm_device *dev)
517 drm_i915_private_t *dev_priv = dev->dev_private;
520 tmp = I915_READ(CLKCFG);
522 switch (tmp & CLKCFG_FSB_MASK) {
524 dev_priv->fsb_freq = 533; /* 133*4 */
527 dev_priv->fsb_freq = 800; /* 200*4 */
530 dev_priv->fsb_freq = 667; /* 167*4 */
533 dev_priv->fsb_freq = 400; /* 100*4 */
537 switch (tmp & CLKCFG_MEM_MASK) {
539 dev_priv->mem_freq = 533;
542 dev_priv->mem_freq = 667;
545 dev_priv->mem_freq = 800;
549 /* detect pineview DDR3 setting */
550 tmp = I915_READ(CSHRDDR3CTL);
551 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
554 void i915_ironlake_get_mem_freq(struct drm_device *dev)
556 drm_i915_private_t *dev_priv = dev->dev_private;
559 ddrpll = I915_READ16(DDRMPLL1);
560 csipll = I915_READ16(CSIPLL0);
562 switch (ddrpll & 0xff) {
564 dev_priv->mem_freq = 800;
567 dev_priv->mem_freq = 1066;
570 dev_priv->mem_freq = 1333;
573 dev_priv->mem_freq = 1600;
576 DRM_DEBUG("unknown memory frequency 0x%02x\n",
578 dev_priv->mem_freq = 0;
582 dev_priv->r_t = dev_priv->mem_freq;
584 switch (csipll & 0x3ff) {
586 dev_priv->fsb_freq = 3200;
589 dev_priv->fsb_freq = 3733;
592 dev_priv->fsb_freq = 4266;
595 dev_priv->fsb_freq = 4800;
598 dev_priv->fsb_freq = 5333;
601 dev_priv->fsb_freq = 5866;
604 dev_priv->fsb_freq = 6400;
607 DRM_DEBUG("unknown fsb frequency 0x%04x\n",
609 dev_priv->fsb_freq = 0;
613 if (dev_priv->fsb_freq == 3200) {
615 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
622 /* Pineview has different values for various configs */
623 static const struct intel_watermark_params pineview_display_wm = {
624 PINEVIEW_DISPLAY_FIFO,
628 PINEVIEW_FIFO_LINE_SIZE
630 static const struct intel_watermark_params pineview_display_hplloff_wm = {
631 PINEVIEW_DISPLAY_FIFO,
633 PINEVIEW_DFT_HPLLOFF_WM,
635 PINEVIEW_FIFO_LINE_SIZE
637 static const struct intel_watermark_params pineview_cursor_wm = {
638 PINEVIEW_CURSOR_FIFO,
639 PINEVIEW_CURSOR_MAX_WM,
640 PINEVIEW_CURSOR_DFT_WM,
641 PINEVIEW_CURSOR_GUARD_WM,
642 PINEVIEW_FIFO_LINE_SIZE,
644 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
645 PINEVIEW_CURSOR_FIFO,
646 PINEVIEW_CURSOR_MAX_WM,
647 PINEVIEW_CURSOR_DFT_WM,
648 PINEVIEW_CURSOR_GUARD_WM,
649 PINEVIEW_FIFO_LINE_SIZE
651 static const struct intel_watermark_params g4x_wm_info = {
658 static const struct intel_watermark_params g4x_cursor_wm_info = {
665 static const struct intel_watermark_params i965_cursor_wm_info = {
672 static const struct intel_watermark_params i945_wm_info = {
679 static const struct intel_watermark_params i915_wm_info = {
686 static const struct intel_watermark_params i855_wm_info = {
693 static const struct intel_watermark_params i830_wm_info = {
701 static const struct intel_watermark_params ironlake_display_wm_info = {
708 static const struct intel_watermark_params ironlake_cursor_wm_info = {
715 static const struct intel_watermark_params ironlake_display_srwm_info = {
717 ILK_DISPLAY_MAX_SRWM,
718 ILK_DISPLAY_DFT_SRWM,
722 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
730 static const struct intel_watermark_params sandybridge_display_wm_info = {
737 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
744 static const struct intel_watermark_params sandybridge_display_srwm_info = {
746 SNB_DISPLAY_MAX_SRWM,
747 SNB_DISPLAY_DFT_SRWM,
751 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
761 * intel_calculate_wm - calculate watermark level
762 * @clock_in_khz: pixel clock
763 * @wm: chip FIFO params
764 * @pixel_size: display pixel size
765 * @latency_ns: memory latency for the platform
767 * Calculate the watermark level (the level at which the display plane will
768 * start fetching from memory again). Each chip has a different display
769 * FIFO size and allocation, so the caller needs to figure that out and pass
770 * in the correct intel_watermark_params structure.
772 * As the pixel clock runs, the FIFO will be drained at a rate that depends
773 * on the pixel size. When it reaches the watermark level, it'll start
774 * fetching FIFO line sized based chunks from memory until the FIFO fills
775 * past the watermark point. If the FIFO drains completely, a FIFO underrun
776 * will occur, and a display engine hang could result.
778 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
779 const struct intel_watermark_params *wm,
782 unsigned long latency_ns)
784 long entries_required, wm_size;
787 * Note: we need to make sure we don't overflow for various clock &
789 * clocks go from a few thousand to several hundred thousand.
790 * latency is usually a few thousand
792 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
794 entries_required = howmany(entries_required, wm->cacheline_size);
796 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
798 wm_size = fifo_size - (entries_required + wm->guard_size);
800 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
802 /* Don't promote wm_size to unsigned... */
803 if (wm_size > (long)wm->max_wm)
804 wm_size = wm->max_wm;
806 wm_size = wm->default_wm;
810 struct cxsr_latency {
813 unsigned long fsb_freq;
814 unsigned long mem_freq;
815 unsigned long display_sr;
816 unsigned long display_hpll_disable;
817 unsigned long cursor_sr;
818 unsigned long cursor_hpll_disable;
821 static const struct cxsr_latency cxsr_latency_table[] = {
822 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
823 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
824 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
825 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
826 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
828 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
829 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
830 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
831 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
832 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
834 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
835 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
836 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
837 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
838 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
840 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
841 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
842 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
843 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
844 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
846 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
847 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
848 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
849 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
850 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
852 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
853 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
854 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
855 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
856 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
859 const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
864 const struct cxsr_latency *latency;
867 if (fsb == 0 || mem == 0)
870 for (i = 0; i < DRM_ARRAY_SIZE(cxsr_latency_table); i++) {
871 latency = &cxsr_latency_table[i];
872 if (is_desktop == latency->is_desktop &&
873 is_ddr3 == latency->is_ddr3 &&
874 fsb == latency->fsb_freq && mem == latency->mem_freq)
878 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
883 void pineview_disable_cxsr(struct drm_device *dev)
885 struct drm_i915_private *dev_priv = dev->dev_private;
887 /* deactivate cxsr */
888 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
892 * Latency for FIFO fetches is dependent on several factors:
893 * - memory configuration (speed, channels)
895 * - current MCH state
896 * It can be fairly high in some situations, so here we assume a fairly
897 * pessimal value. It's a tradeoff between extra memory fetches (if we
898 * set this value too high, the FIFO will fetch frequently to stay full)
899 * and power consumption (set it too low to save power and we might see
900 * FIFO underruns and display "flicker").
902 * A value of 5us seems to be a good balance; safe for very low end
903 * platforms but not overly aggressive on lower latency configs.
905 static const int latency_ns = 5000;
907 int i9xx_get_fifo_size(struct drm_device *dev, int plane)
909 struct drm_i915_private *dev_priv = dev->dev_private;
910 uint32_t dsparb = I915_READ(DSPARB);
913 size = dsparb & 0x7f;
915 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
917 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
918 plane ? "B" : "A", size);
923 int i85x_get_fifo_size(struct drm_device *dev, int plane)
925 struct drm_i915_private *dev_priv = dev->dev_private;
926 uint32_t dsparb = I915_READ(DSPARB);
929 size = dsparb & 0x1ff;
931 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
932 size >>= 1; /* Convert to cachelines */
934 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
935 plane ? "B" : "A", size);
940 int i845_get_fifo_size(struct drm_device *dev, int plane)
942 struct drm_i915_private *dev_priv = dev->dev_private;
943 uint32_t dsparb = I915_READ(DSPARB);
946 size = dsparb & 0x7f;
947 size >>= 2; /* Convert to cachelines */
949 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
956 int i830_get_fifo_size(struct drm_device *dev, int plane)
958 struct drm_i915_private *dev_priv = dev->dev_private;
959 uint32_t dsparb = I915_READ(DSPARB);
962 size = dsparb & 0x7f;
963 size >>= 1; /* Convert to cachelines */
965 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
966 plane ? "B" : "A", size);
971 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
973 struct drm_crtc *crtc, *enabled = NULL;
975 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
976 if (crtc->enabled && crtc->fb) {
986 void pineview_update_wm(struct drm_device *dev)
988 struct drm_i915_private *dev_priv = dev->dev_private;
989 struct drm_crtc *crtc;
990 const struct cxsr_latency *latency;
994 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
995 dev_priv->fsb_freq, dev_priv->mem_freq);
997 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
998 pineview_disable_cxsr(dev);
1002 crtc = single_enabled_crtc(dev);
1004 int clock = crtc->mode.clock;
1005 int pixel_size = crtc->fb->bits_per_pixel / 8;
1008 wm = intel_calculate_wm(clock, &pineview_display_wm,
1009 pineview_display_wm.fifo_size,
1010 pixel_size, latency->display_sr);
1011 reg = I915_READ(DSPFW1);
1012 reg &= ~DSPFW_SR_MASK;
1013 reg |= wm << DSPFW_SR_SHIFT;
1014 I915_WRITE(DSPFW1, reg);
1015 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1018 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1019 pineview_display_wm.fifo_size,
1020 pixel_size, latency->cursor_sr);
1021 reg = I915_READ(DSPFW3);
1022 reg &= ~DSPFW_CURSOR_SR_MASK;
1023 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1024 I915_WRITE(DSPFW3, reg);
1026 /* Display HPLL off SR */
1027 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1028 pineview_display_hplloff_wm.fifo_size,
1029 pixel_size, latency->display_hpll_disable);
1030 reg = I915_READ(DSPFW3);
1031 reg &= ~DSPFW_HPLL_SR_MASK;
1032 reg |= wm & DSPFW_HPLL_SR_MASK;
1033 I915_WRITE(DSPFW3, reg);
1035 /* cursor HPLL off SR */
1036 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1037 pineview_display_hplloff_wm.fifo_size,
1038 pixel_size, latency->cursor_hpll_disable);
1039 reg = I915_READ(DSPFW3);
1040 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1041 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1042 I915_WRITE(DSPFW3, reg);
1043 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1047 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1048 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1050 pineview_disable_cxsr(dev);
1051 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1055 static bool g4x_compute_wm0(struct drm_device *dev,
1057 const struct intel_watermark_params *display,
1058 int display_latency_ns,
1059 const struct intel_watermark_params *cursor,
1060 int cursor_latency_ns,
1064 struct drm_crtc *crtc;
1065 int htotal, hdisplay, clock, pixel_size;
1066 int line_time_us, line_count;
1067 int entries, tlb_miss;
1069 crtc = intel_get_crtc_for_plane(dev, plane);
1070 if (crtc->fb == NULL || !crtc->enabled) {
1071 *cursor_wm = cursor->guard_size;
1072 *plane_wm = display->guard_size;
1076 htotal = crtc->mode.htotal;
1077 hdisplay = crtc->mode.hdisplay;
1078 clock = crtc->mode.clock;
1079 pixel_size = crtc->fb->bits_per_pixel / 8;
1081 /* Use the small buffer method to calculate plane watermark */
1082 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1083 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1085 entries += tlb_miss;
1086 entries = howmany(entries, display->cacheline_size);
1087 *plane_wm = entries + display->guard_size;
1088 if (*plane_wm > (int)display->max_wm)
1089 *plane_wm = display->max_wm;
1091 /* Use the large buffer method to calculate cursor watermark */
1092 line_time_us = ((htotal * 1000) / clock);
1093 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1094 entries = line_count * 64 * pixel_size;
1095 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1097 entries += tlb_miss;
1098 entries = howmany(entries, cursor->cacheline_size);
1099 *cursor_wm = entries + cursor->guard_size;
1100 if (*cursor_wm > (int)cursor->max_wm)
1101 *cursor_wm = (int)cursor->max_wm;
1107 * Check the wm result.
1109 * If any calculated watermark values is larger than the maximum value that
1110 * can be programmed into the associated watermark register, that watermark
1113 static bool g4x_check_srwm(struct drm_device *dev,
1114 int display_wm, int cursor_wm,
1115 const struct intel_watermark_params *display,
1116 const struct intel_watermark_params *cursor)
1118 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1119 display_wm, cursor_wm);
1121 if (display_wm > display->max_wm) {
1122 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1123 display_wm, display->max_wm);
1127 if (cursor_wm > cursor->max_wm) {
1128 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1129 cursor_wm, cursor->max_wm);
1133 if (!(display_wm || cursor_wm)) {
1134 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1141 static bool g4x_compute_srwm(struct drm_device *dev,
1144 const struct intel_watermark_params *display,
1145 const struct intel_watermark_params *cursor,
1146 int *display_wm, int *cursor_wm)
1148 struct drm_crtc *crtc;
1149 int hdisplay, htotal, pixel_size, clock;
1150 unsigned long line_time_us;
1151 int line_count, line_size;
1156 *display_wm = *cursor_wm = 0;
1160 crtc = intel_get_crtc_for_plane(dev, plane);
1161 hdisplay = crtc->mode.hdisplay;
1162 htotal = crtc->mode.htotal;
1163 clock = crtc->mode.clock;
1164 pixel_size = crtc->fb->bits_per_pixel / 8;
1166 line_time_us = (htotal * 1000) / clock;
1167 line_count = (latency_ns / line_time_us + 1000) / 1000;
1168 line_size = hdisplay * pixel_size;
1170 /* Use the minimum of the small and large buffer method for primary */
1171 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1172 large = line_count * line_size;
1174 entries = howmany(min(small, large), display->cacheline_size);
1175 *display_wm = entries + display->guard_size;
1177 /* calculate the self-refresh watermark for display cursor */
1178 entries = line_count * pixel_size * 64;
1179 entries = howmany(entries, cursor->cacheline_size);
1180 *cursor_wm = entries + cursor->guard_size;
1182 return g4x_check_srwm(dev,
1183 *display_wm, *cursor_wm,
1187 #define single_plane_enabled(mask) ((mask) != 0 && powerof2(mask))
1189 void g4x_update_wm(struct drm_device *dev)
1191 static const int sr_latency_ns = 12000;
1192 struct drm_i915_private *dev_priv = dev->dev_private;
1193 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1194 int plane_sr, cursor_sr;
1195 unsigned int enabled = 0;
1197 if (g4x_compute_wm0(dev, 0,
1198 &g4x_wm_info, latency_ns,
1199 &g4x_cursor_wm_info, latency_ns,
1200 &planea_wm, &cursora_wm))
1203 if (g4x_compute_wm0(dev, 1,
1204 &g4x_wm_info, latency_ns,
1205 &g4x_cursor_wm_info, latency_ns,
1206 &planeb_wm, &cursorb_wm))
1209 plane_sr = cursor_sr = 0;
1210 if (single_plane_enabled(enabled) &&
1211 g4x_compute_srwm(dev, ffs(enabled) - 1,
1214 &g4x_cursor_wm_info,
1215 &plane_sr, &cursor_sr))
1216 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1218 I915_WRITE(FW_BLC_SELF,
1219 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1221 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1222 planea_wm, cursora_wm,
1223 planeb_wm, cursorb_wm,
1224 plane_sr, cursor_sr);
1227 (plane_sr << DSPFW_SR_SHIFT) |
1228 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1229 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1232 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
1233 (cursora_wm << DSPFW_CURSORA_SHIFT));
1234 /* HPLL off in SR has some issues on G4x... disable it */
1236 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
1237 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1240 void i965_update_wm(struct drm_device *dev)
1242 struct drm_i915_private *dev_priv = dev->dev_private;
1243 struct drm_crtc *crtc;
1247 /* Calc sr entries for one plane configs */
1248 crtc = single_enabled_crtc(dev);
1250 /* self-refresh has much higher latency */
1251 static const int sr_latency_ns = 12000;
1252 int clock = crtc->mode.clock;
1253 int htotal = crtc->mode.htotal;
1254 int hdisplay = crtc->mode.hdisplay;
1255 int pixel_size = crtc->fb->bits_per_pixel / 8;
1256 unsigned long line_time_us;
1259 line_time_us = ((htotal * 1000) / clock);
1261 /* Use ns/us then divide to preserve precision */
1262 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1263 pixel_size * hdisplay;
1264 entries = howmany(entries, I915_FIFO_LINE_SIZE);
1265 srwm = I965_FIFO_SIZE - entries;
1269 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1272 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1274 entries = howmany(entries, i965_cursor_wm_info.cacheline_size);
1275 cursor_sr = i965_cursor_wm_info.fifo_size -
1276 (entries + i965_cursor_wm_info.guard_size);
1278 if (cursor_sr > i965_cursor_wm_info.max_wm)
1279 cursor_sr = i965_cursor_wm_info.max_wm;
1281 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1282 "cursor %d\n", srwm, cursor_sr);
1284 if (IS_CRESTLINE(dev))
1285 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1287 /* Turn off self refresh if both pipes are enabled */
1288 if (IS_CRESTLINE(dev))
1289 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1293 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1296 /* 965 has limitations... */
1297 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1298 (8 << 16) | (8 << 8) | (8 << 0));
1299 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1300 /* update cursor SR watermark */
1301 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1304 void i9xx_update_wm(struct drm_device *dev)
1306 struct drm_i915_private *dev_priv = dev->dev_private;
1307 const struct intel_watermark_params *wm_info;
1312 int planea_wm, planeb_wm;
1313 struct drm_crtc *crtc, *enabled = NULL;
1316 wm_info = &i945_wm_info;
1317 else if (!IS_GEN2(dev))
1318 wm_info = &i915_wm_info;
1320 wm_info = &i855_wm_info;
1322 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1323 crtc = intel_get_crtc_for_plane(dev, 0);
1324 if (crtc->enabled && crtc->fb) {
1325 planea_wm = intel_calculate_wm(crtc->mode.clock,
1327 crtc->fb->bits_per_pixel / 8,
1331 planea_wm = fifo_size - wm_info->guard_size;
1333 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1334 crtc = intel_get_crtc_for_plane(dev, 1);
1335 if (crtc->enabled && crtc->fb) {
1336 planeb_wm = intel_calculate_wm(crtc->mode.clock,
1338 crtc->fb->bits_per_pixel / 8,
1340 if (enabled == NULL)
1345 planeb_wm = fifo_size - wm_info->guard_size;
1347 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1350 * Overlay gets an aggressive default since video jitter is bad.
1354 /* Play safe and disable self-refresh before adjusting watermarks. */
1355 if (IS_I945G(dev) || IS_I945GM(dev))
1356 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1357 else if (IS_I915GM(dev))
1358 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
1360 /* Calc sr entries for one plane configs */
1361 if (HAS_FW_BLC(dev) && enabled) {
1362 /* self-refresh has much higher latency */
1363 static const int sr_latency_ns = 6000;
1364 int clock = enabled->mode.clock;
1365 int htotal = enabled->mode.htotal;
1366 int hdisplay = enabled->mode.hdisplay;
1367 int pixel_size = enabled->fb->bits_per_pixel / 8;
1368 unsigned long line_time_us;
1371 line_time_us = (htotal * 1000) / clock;
1373 /* Use ns/us then divide to preserve precision */
1374 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1375 pixel_size * hdisplay;
1376 entries = howmany(entries, wm_info->cacheline_size);
1377 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1378 srwm = wm_info->fifo_size - entries;
1382 if (IS_I945G(dev) || IS_I945GM(dev))
1383 I915_WRITE(FW_BLC_SELF,
1384 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1385 else if (IS_I915GM(dev))
1386 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1389 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1390 planea_wm, planeb_wm, cwm, srwm);
1392 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1393 fwater_hi = (cwm & 0x1f);
1395 /* Set request length to 8 cachelines per fetch */
1396 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1397 fwater_hi = fwater_hi | (1 << 8);
1399 I915_WRITE(FW_BLC, fwater_lo);
1400 I915_WRITE(FW_BLC2, fwater_hi);
1402 if (HAS_FW_BLC(dev)) {
1404 if (IS_I945G(dev) || IS_I945GM(dev))
1405 I915_WRITE(FW_BLC_SELF,
1406 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1407 else if (IS_I915GM(dev))
1408 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
1409 DRM_DEBUG_KMS("memory self refresh enabled\n");
1411 DRM_DEBUG_KMS("memory self refresh disabled\n");
1415 void i830_update_wm(struct drm_device *dev)
1417 struct drm_i915_private *dev_priv = dev->dev_private;
1418 struct drm_crtc *crtc;
1422 crtc = single_enabled_crtc(dev);
1426 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
1427 dev_priv->display.get_fifo_size(dev, 0),
1428 crtc->fb->bits_per_pixel / 8,
1430 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1431 fwater_lo |= (3<<8) | planea_wm;
1433 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1435 I915_WRITE(FW_BLC, fwater_lo);
1438 #define ILK_LP0_PLANE_LATENCY 700
1439 #define ILK_LP0_CURSOR_LATENCY 1300
1442 * Check the wm result.
1444 * If any calculated watermark values is larger than the maximum value that
1445 * can be programmed into the associated watermark register, that watermark
1448 static bool ironlake_check_srwm(struct drm_device *dev, int level,
1449 int fbc_wm, int display_wm, int cursor_wm,
1450 const struct intel_watermark_params *display,
1451 const struct intel_watermark_params *cursor)
1453 struct drm_i915_private *dev_priv = dev->dev_private;
1455 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1456 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
1458 if (fbc_wm > SNB_FBC_MAX_SRWM) {
1459 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1460 fbc_wm, SNB_FBC_MAX_SRWM, level);
1462 /* fbc has it's own way to disable FBC WM */
1463 I915_WRITE(DISP_ARB_CTL,
1464 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1468 if (display_wm > display->max_wm) {
1469 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1470 display_wm, SNB_DISPLAY_MAX_SRWM, level);
1474 if (cursor_wm > cursor->max_wm) {
1475 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1476 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
1480 if (!(fbc_wm || display_wm || cursor_wm)) {
1481 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
1489 * Compute watermark values of WM[1-3],
1491 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1493 const struct intel_watermark_params *display,
1494 const struct intel_watermark_params *cursor,
1495 int *fbc_wm, int *display_wm, int *cursor_wm)
1497 struct drm_crtc *crtc;
1498 unsigned long line_time_us;
1499 int hdisplay, htotal, pixel_size, clock;
1500 int line_count, line_size;
1505 *fbc_wm = *display_wm = *cursor_wm = 0;
1509 crtc = intel_get_crtc_for_plane(dev, plane);
1510 hdisplay = crtc->mode.hdisplay;
1511 htotal = crtc->mode.htotal;
1512 clock = crtc->mode.clock;
1513 pixel_size = crtc->fb->bits_per_pixel / 8;
1515 line_time_us = (htotal * 1000) / clock;
1516 line_count = (latency_ns / line_time_us + 1000) / 1000;
1517 line_size = hdisplay * pixel_size;
1519 /* Use the minimum of the small and large buffer method for primary */
1520 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1521 large = line_count * line_size;
1523 entries = howmany(min(small, large), display->cacheline_size);
1524 *display_wm = entries + display->guard_size;
1528 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1530 *fbc_wm = howmany(*display_wm * 64, line_size) + 2;
1532 /* calculate the self-refresh watermark for display cursor */
1533 entries = line_count * pixel_size * 64;
1534 entries = howmany(entries, cursor->cacheline_size);
1535 *cursor_wm = entries + cursor->guard_size;
1537 return ironlake_check_srwm(dev, level,
1538 *fbc_wm, *display_wm, *cursor_wm,
1542 void ironlake_update_wm(struct drm_device *dev)
1544 struct drm_i915_private *dev_priv = dev->dev_private;
1545 int fbc_wm, plane_wm, cursor_wm;
1546 unsigned int enabled;
1549 if (g4x_compute_wm0(dev, 0,
1550 &ironlake_display_wm_info,
1551 ILK_LP0_PLANE_LATENCY,
1552 &ironlake_cursor_wm_info,
1553 ILK_LP0_CURSOR_LATENCY,
1554 &plane_wm, &cursor_wm)) {
1555 I915_WRITE(WM0_PIPEA_ILK,
1556 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1557 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1558 " plane %d, " "cursor: %d\n",
1559 plane_wm, cursor_wm);
1563 if (g4x_compute_wm0(dev, 1,
1564 &ironlake_display_wm_info,
1565 ILK_LP0_PLANE_LATENCY,
1566 &ironlake_cursor_wm_info,
1567 ILK_LP0_CURSOR_LATENCY,
1568 &plane_wm, &cursor_wm)) {
1569 I915_WRITE(WM0_PIPEB_ILK,
1570 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1571 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1572 " plane %d, cursor: %d\n",
1573 plane_wm, cursor_wm);
1578 * Calculate and update the self-refresh watermark only when one
1579 * display plane is used.
1581 I915_WRITE(WM3_LP_ILK, 0);
1582 I915_WRITE(WM2_LP_ILK, 0);
1583 I915_WRITE(WM1_LP_ILK, 0);
1585 if (!single_plane_enabled(enabled))
1587 enabled = ffs(enabled) - 1;
1590 if (!ironlake_compute_srwm(dev, 1, enabled,
1591 ILK_READ_WM1_LATENCY() * 500,
1592 &ironlake_display_srwm_info,
1593 &ironlake_cursor_srwm_info,
1594 &fbc_wm, &plane_wm, &cursor_wm))
1597 I915_WRITE(WM1_LP_ILK,
1599 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1600 (fbc_wm << WM1_LP_FBC_SHIFT) |
1601 (plane_wm << WM1_LP_SR_SHIFT) |
1605 if (!ironlake_compute_srwm(dev, 2, enabled,
1606 ILK_READ_WM2_LATENCY() * 500,
1607 &ironlake_display_srwm_info,
1608 &ironlake_cursor_srwm_info,
1609 &fbc_wm, &plane_wm, &cursor_wm))
1612 I915_WRITE(WM2_LP_ILK,
1614 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1615 (fbc_wm << WM1_LP_FBC_SHIFT) |
1616 (plane_wm << WM1_LP_SR_SHIFT) |
1620 * WM3 is unsupported on ILK, probably because we don't have latency
1621 * data for that power state
1625 void sandybridge_update_wm(struct drm_device *dev)
1627 struct drm_i915_private *dev_priv = dev->dev_private;
1628 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1630 int fbc_wm, plane_wm, cursor_wm;
1631 unsigned int enabled;
1634 if (g4x_compute_wm0(dev, 0,
1635 &sandybridge_display_wm_info, latency,
1636 &sandybridge_cursor_wm_info, latency,
1637 &plane_wm, &cursor_wm)) {
1638 val = I915_READ(WM0_PIPEA_ILK);
1639 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1640 I915_WRITE(WM0_PIPEA_ILK, val |
1641 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1642 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1643 " plane %d, " "cursor: %d\n",
1644 plane_wm, cursor_wm);
1648 if (g4x_compute_wm0(dev, 1,
1649 &sandybridge_display_wm_info, latency,
1650 &sandybridge_cursor_wm_info, latency,
1651 &plane_wm, &cursor_wm)) {
1652 val = I915_READ(WM0_PIPEB_ILK);
1653 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1654 I915_WRITE(WM0_PIPEB_ILK, val |
1655 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1656 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1657 " plane %d, cursor: %d\n",
1658 plane_wm, cursor_wm);
1662 /* IVB has 3 pipes */
1663 if (IS_IVYBRIDGE(dev) &&
1664 g4x_compute_wm0(dev, 2,
1665 &sandybridge_display_wm_info, latency,
1666 &sandybridge_cursor_wm_info, latency,
1667 &plane_wm, &cursor_wm)) {
1668 val = I915_READ(WM0_PIPEC_IVB);
1669 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1670 I915_WRITE(WM0_PIPEC_IVB, val |
1671 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1672 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1673 " plane %d, cursor: %d\n",
1674 plane_wm, cursor_wm);
1679 * Calculate and update the self-refresh watermark only when one
1680 * display plane is used.
1682 * SNB support 3 levels of watermark.
1684 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1685 * and disabled in the descending order
1688 I915_WRITE(WM3_LP_ILK, 0);
1689 I915_WRITE(WM2_LP_ILK, 0);
1690 I915_WRITE(WM1_LP_ILK, 0);
1692 if (!single_plane_enabled(enabled) ||
1693 dev_priv->sprite_scaling_enabled)
1695 enabled = ffs(enabled) - 1;
1698 if (!ironlake_compute_srwm(dev, 1, enabled,
1699 SNB_READ_WM1_LATENCY() * 500,
1700 &sandybridge_display_srwm_info,
1701 &sandybridge_cursor_srwm_info,
1702 &fbc_wm, &plane_wm, &cursor_wm))
1705 I915_WRITE(WM1_LP_ILK,
1707 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1708 (fbc_wm << WM1_LP_FBC_SHIFT) |
1709 (plane_wm << WM1_LP_SR_SHIFT) |
1713 if (!ironlake_compute_srwm(dev, 2, enabled,
1714 SNB_READ_WM2_LATENCY() * 500,
1715 &sandybridge_display_srwm_info,
1716 &sandybridge_cursor_srwm_info,
1717 &fbc_wm, &plane_wm, &cursor_wm))
1720 I915_WRITE(WM2_LP_ILK,
1722 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1723 (fbc_wm << WM1_LP_FBC_SHIFT) |
1724 (plane_wm << WM1_LP_SR_SHIFT) |
1728 if (!ironlake_compute_srwm(dev, 3, enabled,
1729 SNB_READ_WM3_LATENCY() * 500,
1730 &sandybridge_display_srwm_info,
1731 &sandybridge_cursor_srwm_info,
1732 &fbc_wm, &plane_wm, &cursor_wm))
1735 I915_WRITE(WM3_LP_ILK,
1737 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1738 (fbc_wm << WM1_LP_FBC_SHIFT) |
1739 (plane_wm << WM1_LP_SR_SHIFT) |
1744 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
1745 uint32_t sprite_width, int pixel_size,
1746 const struct intel_watermark_params *display,
1747 int display_latency_ns, int *sprite_wm)
1749 struct drm_crtc *crtc;
1751 int entries, tlb_miss;
1753 crtc = intel_get_crtc_for_plane(dev, plane);
1754 if (crtc->fb == NULL || !crtc->enabled) {
1755 *sprite_wm = display->guard_size;
1759 clock = crtc->mode.clock;
1761 /* Use the small buffer method to calculate the sprite watermark */
1762 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1763 tlb_miss = display->fifo_size*display->cacheline_size -
1766 entries += tlb_miss;
1767 entries = howmany(entries, display->cacheline_size);
1768 *sprite_wm = entries + display->guard_size;
1769 if (*sprite_wm > (int)display->max_wm)
1770 *sprite_wm = display->max_wm;
1776 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
1777 uint32_t sprite_width, int pixel_size,
1778 const struct intel_watermark_params *display,
1779 int latency_ns, int *sprite_wm)
1781 struct drm_crtc *crtc;
1782 unsigned long line_time_us;
1784 int line_count, line_size;
1793 crtc = intel_get_crtc_for_plane(dev, plane);
1794 clock = crtc->mode.clock;
1800 line_time_us = (sprite_width * 1000) / clock;
1801 if (!line_time_us) {
1806 line_count = (latency_ns / line_time_us + 1000) / 1000;
1807 line_size = sprite_width * pixel_size;
1809 /* Use the minimum of the small and large buffer method for primary */
1810 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1811 large = line_count * line_size;
1813 entries = howmany(min(small, large), display->cacheline_size);
1814 *sprite_wm = entries + display->guard_size;
1816 return *sprite_wm > 0x3ff ? false : true;
1819 void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
1820 uint32_t sprite_width, int pixel_size)
1822 struct drm_i915_private *dev_priv = dev->dev_private;
1823 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1830 reg = WM0_PIPEA_ILK;
1833 reg = WM0_PIPEB_ILK;
1836 reg = WM0_PIPEC_IVB;
1839 return; /* bad pipe */
1842 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
1843 &sandybridge_display_wm_info,
1844 latency, &sprite_wm);
1846 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
1851 val = I915_READ(reg);
1852 val &= ~WM0_PIPE_SPRITE_MASK;
1853 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
1854 DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
1857 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
1859 &sandybridge_display_srwm_info,
1860 SNB_READ_WM1_LATENCY() * 500,
1863 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
1867 I915_WRITE(WM1S_LP_ILK, sprite_wm);
1869 /* Only IVB has two more LP watermarks for sprite */
1870 if (!IS_IVYBRIDGE(dev))
1873 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
1875 &sandybridge_display_srwm_info,
1876 SNB_READ_WM2_LATENCY() * 500,
1879 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
1883 I915_WRITE(WM2S_LP_IVB, sprite_wm);
1885 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
1887 &sandybridge_display_srwm_info,
1888 SNB_READ_WM3_LATENCY() * 500,
1891 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
1895 I915_WRITE(WM3S_LP_IVB, sprite_wm);
1899 * intel_update_watermarks - update FIFO watermark values based on current modes
1901 * Calculate watermark values for the various WM regs based on current mode
1902 * and plane configuration.
1904 * There are several cases to deal with here:
1905 * - normal (i.e. non-self-refresh)
1906 * - self-refresh (SR) mode
1907 * - lines are large relative to FIFO size (buffer can hold up to 2)
1908 * - lines are small relative to FIFO size (buffer can hold more than 2
1909 * lines), so need to account for TLB latency
1911 * The normal calculation is:
1912 * watermark = dotclock * bytes per pixel * latency
1913 * where latency is platform & configuration dependent (we assume pessimal
1916 * The SR calculation is:
1917 * watermark = (trunc(latency/line time)+1) * surface width *
1920 * line time = htotal / dotclock
1921 * surface width = hdisplay for normal plane and 64 for cursor
1922 * and latency is assumed to be high, as above.
1924 * The final value programmed to the register should always be rounded up,
1925 * and include an extra 2 entries to account for clock crossings.
1927 * We don't use the sprite, so we can ignore that. And on Crestline we have
1928 * to set the non-SR watermarks to 8.
1930 void intel_update_watermarks(struct drm_device *dev)
1932 struct drm_i915_private *dev_priv = dev->dev_private;
1934 if (dev_priv->display.update_wm)
1935 dev_priv->display.update_wm(dev);
1938 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
1939 uint32_t sprite_width, int pixel_size)
1941 struct drm_i915_private *dev_priv = dev->dev_private;
1943 if (dev_priv->display.update_sprite_wm)
1944 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
1948 static struct drm_i915_gem_object *
1949 intel_alloc_context_page(struct drm_device *dev)
1951 struct drm_i915_gem_object *ctx;
1954 DRM_LOCK_ASSERT(dev);
1956 ctx = i915_gem_alloc_object(dev, 4096);
1958 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
1962 ret = i915_gem_object_pin(ctx, 4096, true);
1964 DRM_ERROR("failed to pin power context: %d\n", ret);
1968 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
1970 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
1977 i915_gem_object_unpin(ctx);
1979 drm_gem_object_unreference(&ctx->base);
1985 * Lock protecting IPS related data structures
1987 struct lock mchdev_lock;
1988 LOCK_SYSINIT(mchdev, &mchdev_lock, "mchdev", LK_CANRECURSE);
1990 /* Global for IPS driver to get at the current i915 device. Protected by
1992 struct drm_i915_private *i915_mch_dev;
1994 bool ironlake_set_drps(struct drm_device *dev, u8 val)
1996 struct drm_i915_private *dev_priv = dev->dev_private;
1999 rgvswctl = I915_READ16(MEMSWCTL);
2000 if (rgvswctl & MEMCTL_CMD_STS) {
2001 DRM_DEBUG("gpu busy, RCS change rejected\n");
2002 return false; /* still busy with another command */
2005 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2006 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2007 I915_WRITE16(MEMSWCTL, rgvswctl);
2008 POSTING_READ16(MEMSWCTL);
2010 rgvswctl |= MEMCTL_CMD_STS;
2011 I915_WRITE16(MEMSWCTL, rgvswctl);
2016 void ironlake_enable_drps(struct drm_device *dev)
2018 struct drm_i915_private *dev_priv = dev->dev_private;
2019 u32 rgvmodectl = I915_READ(MEMMODECTL);
2020 u8 fmax, fmin, fstart, vstart;
2022 /* Enable temp reporting */
2023 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2024 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2026 /* 100ms RC evaluation intervals */
2027 I915_WRITE(RCUPEI, 100000);
2028 I915_WRITE(RCDNEI, 100000);
2030 /* Set max/min thresholds to 90ms and 80ms respectively */
2031 I915_WRITE(RCBMAXAVG, 90000);
2032 I915_WRITE(RCBMINAVG, 80000);
2034 I915_WRITE(MEMIHYST, 1);
2036 /* Set up min, max, and cur for interrupt handling */
2037 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2038 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2039 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2040 MEMMODE_FSTART_SHIFT;
2042 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2045 dev_priv->fmax = fmax; /* IPS callback will increase this */
2046 dev_priv->fstart = fstart;
2048 dev_priv->max_delay = fstart;
2049 dev_priv->min_delay = fmin;
2050 dev_priv->cur_delay = fstart;
2052 DRM_DEBUG("fmax: %d, fmin: %d, fstart: %d\n",
2053 fmax, fmin, fstart);
2055 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2058 * Interrupts will be enabled in ironlake_irq_postinstall
2061 I915_WRITE(VIDSTART, vstart);
2062 POSTING_READ(VIDSTART);
2064 rgvmodectl |= MEMMODE_SWMODE_EN;
2065 I915_WRITE(MEMMODECTL, rgvmodectl);
2067 if (_intel_wait_for(dev,
2068 (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10,
2070 DRM_ERROR("stuck trying to change perf mode\n");
2073 ironlake_set_drps(dev, fstart);
2075 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2077 dev_priv->last_time1 = jiffies_to_msecs(jiffies);
2078 dev_priv->last_count2 = I915_READ(0x112f4);
2079 nanotime(&dev_priv->last_time2);
2082 void ironlake_disable_drps(struct drm_device *dev)
2084 struct drm_i915_private *dev_priv = dev->dev_private;
2085 u16 rgvswctl = I915_READ16(MEMSWCTL);
2087 /* Ack interrupts, disable EFC interrupt */
2088 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
2089 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
2090 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
2091 I915_WRITE(DEIIR, DE_PCU_EVENT);
2092 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2094 /* Go back to the starting frequency */
2095 ironlake_set_drps(dev, dev_priv->fstart);
2097 rgvswctl |= MEMCTL_CMD_STS;
2098 I915_WRITE(MEMSWCTL, rgvswctl);
2103 void gen6_set_rps(struct drm_device *dev, u8 val)
2105 struct drm_i915_private *dev_priv = dev->dev_private;
2108 swreq = (val & 0x3ff) << 25;
2109 I915_WRITE(GEN6_RPNSWREQ, swreq);
2112 void gen6_disable_rps(struct drm_device *dev)
2114 struct drm_i915_private *dev_priv = dev->dev_private;
2116 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
2117 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
2118 I915_WRITE(GEN6_PMIER, 0);
2119 /* Complete PM interrupt masking here doesn't race with the rps work
2120 * item again unmasking PM interrupts because that is using a different
2121 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
2122 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
2124 lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE);
2125 dev_priv->pm_iir = 0;
2126 lockmgr(&dev_priv->rps_lock, LK_RELEASE);
2128 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2131 static unsigned long intel_pxfreq(u32 vidfreq)
2134 int div = (vidfreq & 0x3f0000) >> 16;
2135 int post = (vidfreq & 0x3000) >> 12;
2136 int pre = (vidfreq & 0x7);
2141 freq = ((div * 133333) / ((1<<post) * pre));
2146 static const struct cparams {
2152 { 1, 1333, 301, 28664 },
2153 { 1, 1066, 294, 24460 },
2154 { 1, 800, 294, 25192 },
2155 { 0, 1333, 276, 27605 },
2156 { 0, 1066, 276, 27605 },
2157 { 0, 800, 231, 23784 },
2160 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2162 u64 total_count, diff, ret;
2163 u32 count1, count2, count3, m = 0, c = 0;
2164 unsigned long now = jiffies_to_msecs(jiffies), diff1;
2167 diff1 = now - dev_priv->last_time1;
2169 * sysctl(8) reads the value of sysctl twice in rapid
2170 * succession. There is high chance that it happens in the
2171 * same timer tick. Use the cached value to not divide by
2172 * zero and give the hw a chance to gather more samples.
2175 return (dev_priv->chipset_power);
2177 count1 = I915_READ(DMIEC);
2178 count2 = I915_READ(DDREC);
2179 count3 = I915_READ(CSIEC);
2181 total_count = count1 + count2 + count3;
2183 /* FIXME: handle per-counter overflow */
2184 if (total_count < dev_priv->last_count1) {
2185 diff = ~0UL - dev_priv->last_count1;
2186 diff += total_count;
2188 diff = total_count - dev_priv->last_count1;
2191 for (i = 0; i < DRM_ARRAY_SIZE(cparams); i++) {
2192 if (cparams[i].i == dev_priv->c_m &&
2193 cparams[i].t == dev_priv->r_t) {
2200 diff = diff / diff1;
2201 ret = ((m * diff) + c);
2204 dev_priv->last_count1 = total_count;
2205 dev_priv->last_time1 = now;
2207 dev_priv->chipset_power = ret;
2211 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
2213 unsigned long m, x, b;
2216 tsfs = I915_READ(TSFS);
2218 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
2219 x = I915_READ8(I915_TR1);
2221 b = tsfs & TSFS_INTR_MASK;
2223 return ((m * x) / 127) - b;
2226 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
2228 static const struct v_table {
2229 u16 vd; /* in .1 mil */
2230 u16 vm; /* in .1 mil */
2361 if (dev_priv->info->is_mobile)
2362 return v_table[pxvid].vm;
2364 return v_table[pxvid].vd;
2367 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
2369 struct timespec now, diff1;
2371 unsigned long diffms;
2374 if (dev_priv->info->gen != 5)
2379 timespecsub(&diff1, &dev_priv->last_time2);
2381 /* Don't divide by 0 */
2382 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
2386 count = I915_READ(GFXEC);
2388 if (count < dev_priv->last_count2) {
2389 diff = ~0UL - dev_priv->last_count2;
2392 diff = count - dev_priv->last_count2;
2395 dev_priv->last_count2 = count;
2396 dev_priv->last_time2 = now;
2398 /* More magic constants... */
2400 diff = diff / (diffms * 10);
2401 dev_priv->gfx_power = diff;
2404 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
2406 unsigned long t, corr, state1, corr2, state2;
2409 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4));
2410 pxvid = (pxvid >> 24) & 0x7f;
2411 ext_v = pvid_to_extvid(dev_priv, pxvid);
2415 t = i915_mch_val(dev_priv);
2417 /* Revel in the empirically derived constants */
2419 /* Correction factor in 1/100000 units */
2421 corr = ((t * 2349) + 135940);
2423 corr = ((t * 964) + 29317);
2425 corr = ((t * 301) + 1004);
2427 corr = corr * ((150142 * state1) / 10000 - 78642);
2429 corr2 = (corr * dev_priv->corr);
2431 state2 = (corr2 * state1) / 10000;
2432 state2 /= 100; /* convert to mW */
2434 i915_update_gfx_val(dev_priv);
2436 return dev_priv->gfx_power + state2;
2440 * i915_read_mch_val - return value for IPS use
2442 * Calculate and return a value for the IPS driver to use when deciding whether
2443 * we have thermal and power headroom to increase CPU or GPU power budget.
2445 unsigned long i915_read_mch_val(void)
2447 struct drm_i915_private *dev_priv;
2448 unsigned long chipset_val, graphics_val, ret = 0;
2450 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
2453 dev_priv = i915_mch_dev;
2455 chipset_val = i915_chipset_val(dev_priv);
2456 graphics_val = i915_gfx_val(dev_priv);
2458 ret = chipset_val + graphics_val;
2461 lockmgr(&mchdev_lock, LK_RELEASE);
2467 * i915_gpu_raise - raise GPU frequency limit
2469 * Raise the limit; IPS indicates we have thermal headroom.
2471 bool i915_gpu_raise(void)
2473 struct drm_i915_private *dev_priv;
2476 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
2477 if (!i915_mch_dev) {
2481 dev_priv = i915_mch_dev;
2483 if (dev_priv->max_delay > dev_priv->fmax)
2484 dev_priv->max_delay--;
2487 lockmgr(&mchdev_lock, LK_RELEASE);
2493 * i915_gpu_lower - lower GPU frequency limit
2495 * IPS indicates we're close to a thermal limit, so throttle back the GPU
2496 * frequency maximum.
2498 bool i915_gpu_lower(void)
2500 struct drm_i915_private *dev_priv;
2503 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
2504 if (!i915_mch_dev) {
2508 dev_priv = i915_mch_dev;
2510 if (dev_priv->max_delay < dev_priv->min_delay)
2511 dev_priv->max_delay++;
2514 lockmgr(&mchdev_lock, LK_RELEASE);
2520 * i915_gpu_busy - indicate GPU business to IPS
2522 * Tell the IPS driver whether or not the GPU is busy.
2524 bool i915_gpu_busy(void)
2526 struct drm_i915_private *dev_priv;
2529 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
2532 dev_priv = i915_mch_dev;
2534 ret = dev_priv->busy;
2537 lockmgr(&mchdev_lock, LK_RELEASE);
2543 * i915_gpu_turbo_disable - disable graphics turbo
2545 * Disable graphics turbo by resetting the max frequency and setting the
2546 * current frequency to the default.
2548 bool i915_gpu_turbo_disable(void)
2550 struct drm_i915_private *dev_priv;
2553 lockmgr(&mchdev_lock, LK_EXCLUSIVE);
2554 if (!i915_mch_dev) {
2558 dev_priv = i915_mch_dev;
2560 dev_priv->max_delay = dev_priv->fstart;
2562 if (!ironlake_set_drps(dev_priv->dev, dev_priv->fstart))
2566 lockmgr(&mchdev_lock, LK_RELEASE);
2571 void intel_init_emon(struct drm_device *dev)
2573 struct drm_i915_private *dev_priv = dev->dev_private;
2578 /* Disable to program */
2582 /* Program energy weights for various events */
2583 I915_WRITE(SDEW, 0x15040d00);
2584 I915_WRITE(CSIEW0, 0x007f0000);
2585 I915_WRITE(CSIEW1, 0x1e220004);
2586 I915_WRITE(CSIEW2, 0x04000004);
2588 for (i = 0; i < 5; i++)
2589 I915_WRITE(PEW + (i * 4), 0);
2590 for (i = 0; i < 3; i++)
2591 I915_WRITE(DEW + (i * 4), 0);
2593 /* Program P-state weights to account for frequency power adjustment */
2594 for (i = 0; i < 16; i++) {
2595 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
2596 unsigned long freq = intel_pxfreq(pxvidfreq);
2597 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
2602 val *= (freq / 1000);
2604 val /= (127*127*900);
2606 DRM_ERROR("bad pxval: %ld\n", val);
2609 /* Render standby states get 0 weight */
2613 for (i = 0; i < 4; i++) {
2614 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
2615 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
2616 I915_WRITE(PXW + (i * 4), val);
2619 /* Adjust magic regs to magic values (more experimental results) */
2620 I915_WRITE(OGW0, 0);
2621 I915_WRITE(OGW1, 0);
2622 I915_WRITE(EG0, 0x00007f00);
2623 I915_WRITE(EG1, 0x0000000e);
2624 I915_WRITE(EG2, 0x000e0000);
2625 I915_WRITE(EG3, 0x68000300);
2626 I915_WRITE(EG4, 0x42000000);
2627 I915_WRITE(EG5, 0x00140031);
2631 for (i = 0; i < 8; i++)
2632 I915_WRITE(PXWL + (i * 4), 0);
2634 /* Enable PMON + select events */
2635 I915_WRITE(ECR, 0x80000019);
2637 lcfuse = I915_READ(LCFUSE02);
2639 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
2642 static int intel_enable_rc6(struct drm_device *dev)
2645 * Respect the kernel parameter if it is set
2647 if (i915_enable_rc6 >= 0)
2648 return i915_enable_rc6;
2651 * Disable RC6 on Ironlake
2653 if (INTEL_INFO(dev)->gen == 5)
2657 * Enable rc6 on Sandybridge if DMA remapping is disabled
2659 if (INTEL_INFO(dev)->gen == 6) {
2661 "Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
2662 intel_iommu_enabled ? "true" : "false",
2663 !intel_iommu_enabled ? "en" : "dis");
2664 return (intel_iommu_enabled ? 0 : INTEL_RC6_ENABLE);
2666 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
2667 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
2670 void gen6_enable_rps(struct drm_i915_private *dev_priv)
2672 struct drm_device *dev = dev_priv->dev;
2673 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
2674 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2675 u32 pcu_mbox, rc6_mask = 0;
2677 int cur_freq, min_freq, max_freq;
2681 /* Here begins a magic sequence of register writes to enable
2682 * auto-downclocking.
2684 * Perhaps there might be some value in exposing these to
2687 I915_WRITE(GEN6_RC_STATE, 0);
2690 /* Clear the DBG now so we don't confuse earlier errors */
2691 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
2692 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
2693 I915_WRITE(GTFIFODBG, gtfifodbg);
2696 gen6_gt_force_wake_get(dev_priv);
2698 /* disable the counters and set deterministic thresholds */
2699 I915_WRITE(GEN6_RC_CONTROL, 0);
2701 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
2702 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
2703 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
2704 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
2705 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
2707 for (i = 0; i < I915_NUM_RINGS; i++)
2708 I915_WRITE(RING_MAX_IDLE(dev_priv->rings[i].mmio_base), 10);
2710 I915_WRITE(GEN6_RC_SLEEP, 0);
2711 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
2712 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
2713 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
2714 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
2716 rc6_mode = intel_enable_rc6(dev_priv->dev);
2717 if (rc6_mode & INTEL_RC6_ENABLE)
2718 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
2720 if (rc6_mode & INTEL_RC6p_ENABLE)
2721 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
2723 if (rc6_mode & INTEL_RC6pp_ENABLE)
2724 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
2726 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
2727 (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
2728 (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
2729 (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
2731 I915_WRITE(GEN6_RC_CONTROL,
2733 GEN6_RC_CTL_EI_MODE(1) |
2734 GEN6_RC_CTL_HW_ENABLE);
2736 I915_WRITE(GEN6_RPNSWREQ,
2737 GEN6_FREQUENCY(10) |
2739 GEN6_AGGRESSIVE_TURBO);
2740 I915_WRITE(GEN6_RC_VIDEO_FREQ,
2741 GEN6_FREQUENCY(12));
2743 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2744 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
2747 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
2748 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
2749 I915_WRITE(GEN6_RP_UP_EI, 100000);
2750 I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
2751 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2752 I915_WRITE(GEN6_RP_CONTROL,
2753 GEN6_RP_MEDIA_TURBO |
2754 GEN6_RP_MEDIA_HW_MODE |
2755 GEN6_RP_MEDIA_IS_GFX |
2757 GEN6_RP_UP_BUSY_AVG |
2758 GEN6_RP_DOWN_IDLE_CONT);
2760 if (_intel_wait_for(dev,
2761 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
2763 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2765 I915_WRITE(GEN6_PCODE_DATA, 0);
2766 I915_WRITE(GEN6_PCODE_MAILBOX,
2768 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2769 if (_intel_wait_for(dev,
2770 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
2772 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2774 min_freq = (rp_state_cap & 0xff0000) >> 16;
2775 max_freq = rp_state_cap & 0xff;
2776 cur_freq = (gt_perf_status & 0xff00) >> 8;
2778 /* Check for overclock support */
2779 if (_intel_wait_for(dev,
2780 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
2782 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2783 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
2784 pcu_mbox = I915_READ(GEN6_PCODE_DATA);
2785 if (_intel_wait_for(dev,
2786 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 500,
2788 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2789 if (pcu_mbox & (1<<31)) { /* OC supported */
2790 max_freq = pcu_mbox & 0xff;
2791 DRM_DEBUG("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2794 /* In units of 100MHz */
2795 dev_priv->max_delay = max_freq;
2796 dev_priv->min_delay = min_freq;
2797 dev_priv->cur_delay = cur_freq;
2799 /* requires MSI enabled */
2800 I915_WRITE(GEN6_PMIER,
2801 GEN6_PM_MBOX_EVENT |
2802 GEN6_PM_THERMAL_EVENT |
2803 GEN6_PM_RP_DOWN_TIMEOUT |
2804 GEN6_PM_RP_UP_THRESHOLD |
2805 GEN6_PM_RP_DOWN_THRESHOLD |
2806 GEN6_PM_RP_UP_EI_EXPIRED |
2807 GEN6_PM_RP_DOWN_EI_EXPIRED);
2808 lockmgr(&dev_priv->rps_lock, LK_EXCLUSIVE);
2809 if (dev_priv->pm_iir != 0)
2810 kprintf("pm_iir %x\n", dev_priv->pm_iir);
2811 I915_WRITE(GEN6_PMIMR, 0);
2812 lockmgr(&dev_priv->rps_lock, LK_RELEASE);
2813 /* enable all PM interrupts */
2814 I915_WRITE(GEN6_PMINTRMSK, 0);
2816 gen6_gt_force_wake_put(dev_priv);
2820 void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
2822 struct drm_device *dev;
2824 int gpu_freq, ia_freq, max_ia_freq;
2825 int scaling_factor = 180;
2828 dev = dev_priv->dev;
2830 max_ia_freq = cpufreq_quick_get_max(0);
2832 * Default to measured freq if none found, PCU will ensure we don't go
2836 max_ia_freq = tsc_freq;
2838 /* Convert from Hz to MHz */
2839 max_ia_freq /= 1000;
2841 tsc_freq = atomic_load_acq_64(&tsc_freq);
2842 max_ia_freq = tsc_freq / 1000 / 1000;
2848 * For each potential GPU frequency, load a ring frequency we'd like
2849 * to use for memory access. We do this by specifying the IA frequency
2850 * the PCU should use as a reference to determine the ring frequency.
2852 for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
2854 int diff = dev_priv->max_delay - gpu_freq;
2858 * For GPU frequencies less than 750MHz, just use the lowest
2861 if (gpu_freq < min_freq)
2864 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
2866 ia_freq = (ia_freq + d / 2) / d;
2868 I915_WRITE(GEN6_PCODE_DATA,
2869 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
2871 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
2872 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2873 if (_intel_wait_for(dev,
2874 (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2876 DRM_ERROR("pcode write of freq table timed out\n");
2884 void ironlake_init_clock_gating(struct drm_device *dev)
2886 struct drm_i915_private *dev_priv = dev->dev_private;
2887 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
2889 /* Required for FBC */
2890 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
2891 DPFCRUNIT_CLOCK_GATE_DISABLE |
2892 DPFDUNIT_CLOCK_GATE_DISABLE;
2893 /* Required for CxSR */
2894 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
2896 I915_WRITE(PCH_3DCGDIS0,
2897 MARIUNIT_CLOCK_GATE_DISABLE |
2898 SVSMUNIT_CLOCK_GATE_DISABLE);
2899 I915_WRITE(PCH_3DCGDIS1,
2900 VFMUNIT_CLOCK_GATE_DISABLE);
2902 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
2905 * According to the spec the following bits should be set in
2906 * order to enable memory self-refresh
2907 * The bit 22/21 of 0x42004
2908 * The bit 5 of 0x42020
2909 * The bit 15 of 0x45000
2911 I915_WRITE(ILK_DISPLAY_CHICKEN2,
2912 (I915_READ(ILK_DISPLAY_CHICKEN2) |
2913 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
2914 I915_WRITE(ILK_DSPCLK_GATE,
2915 (I915_READ(ILK_DSPCLK_GATE) |
2916 ILK_DPARB_CLK_GATE));
2917 I915_WRITE(DISP_ARB_CTL,
2918 (I915_READ(DISP_ARB_CTL) |
2920 I915_WRITE(WM3_LP_ILK, 0);
2921 I915_WRITE(WM2_LP_ILK, 0);
2922 I915_WRITE(WM1_LP_ILK, 0);
2925 * Based on the document from hardware guys the following bits
2926 * should be set unconditionally in order to enable FBC.
2927 * The bit 22 of 0x42000
2928 * The bit 22 of 0x42004
2929 * The bit 7,8,9 of 0x42020.
2931 if (IS_IRONLAKE_M(dev)) {
2932 I915_WRITE(ILK_DISPLAY_CHICKEN1,
2933 I915_READ(ILK_DISPLAY_CHICKEN1) |
2935 I915_WRITE(ILK_DISPLAY_CHICKEN2,
2936 I915_READ(ILK_DISPLAY_CHICKEN2) |
2938 I915_WRITE(ILK_DSPCLK_GATE,
2939 I915_READ(ILK_DSPCLK_GATE) |
2945 I915_WRITE(ILK_DISPLAY_CHICKEN2,
2946 I915_READ(ILK_DISPLAY_CHICKEN2) |
2947 ILK_ELPIN_409_SELECT);
2948 I915_WRITE(_3D_CHICKEN2,
2949 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
2950 _3D_CHICKEN2_WM_READ_PIPELINED);
2953 void gen6_init_clock_gating(struct drm_device *dev)
2955 struct drm_i915_private *dev_priv = dev->dev_private;
2957 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
2959 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
2961 I915_WRITE(ILK_DISPLAY_CHICKEN2,
2962 I915_READ(ILK_DISPLAY_CHICKEN2) |
2963 ILK_ELPIN_409_SELECT);
2965 I915_WRITE(WM3_LP_ILK, 0);
2966 I915_WRITE(WM2_LP_ILK, 0);
2967 I915_WRITE(WM1_LP_ILK, 0);
2969 I915_WRITE(GEN6_UCGCTL1,
2970 I915_READ(GEN6_UCGCTL1) |
2971 GEN6_BLBUNIT_CLOCK_GATE_DISABLE);
2973 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
2974 * gating disable must be set. Failure to set it results in
2975 * flickering pixels due to Z write ordering failures after
2976 * some amount of runtime in the Mesa "fire" demo, and Unigine
2977 * Sanctuary and Tropics, and apparently anything else with
2978 * alpha test or pixel discard.
2980 * According to the spec, bit 11 (RCCUNIT) must also be set,
2981 * but we didn't debug actual testcases to find it out.
2983 I915_WRITE(GEN6_UCGCTL2,
2984 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
2985 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
2988 * According to the spec the following bits should be
2989 * set in order to enable memory self-refresh and fbc:
2990 * The bit21 and bit22 of 0x42000
2991 * The bit21 and bit22 of 0x42004
2992 * The bit5 and bit7 of 0x42020
2993 * The bit14 of 0x70180
2994 * The bit14 of 0x71180
2996 I915_WRITE(ILK_DISPLAY_CHICKEN1,
2997 I915_READ(ILK_DISPLAY_CHICKEN1) |
2998 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
2999 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3000 I915_READ(ILK_DISPLAY_CHICKEN2) |
3001 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
3002 I915_WRITE(ILK_DSPCLK_GATE,
3003 I915_READ(ILK_DSPCLK_GATE) |
3004 ILK_DPARB_CLK_GATE |
3007 for_each_pipe(pipe) {
3008 I915_WRITE(DSPCNTR(pipe),
3009 I915_READ(DSPCNTR(pipe)) |
3010 DISPPLANE_TRICKLE_FEED_DISABLE);
3011 intel_flush_display_plane(dev_priv, pipe);
3015 void ivybridge_init_clock_gating(struct drm_device *dev)
3017 struct drm_i915_private *dev_priv = dev->dev_private;
3019 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3021 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3023 I915_WRITE(WM3_LP_ILK, 0);
3024 I915_WRITE(WM2_LP_ILK, 0);
3025 I915_WRITE(WM1_LP_ILK, 0);
3027 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3028 * This implements the WaDisableRCZUnitClockGating workaround.
3030 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
3032 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
3034 I915_WRITE(IVB_CHICKEN3,
3035 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3036 CHICKEN3_DGMG_DONE_FIX_DISABLE);
3038 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3039 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3040 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3042 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3043 I915_WRITE(GEN7_L3CNTLREG1,
3044 GEN7_WA_FOR_GEN7_L3_CONTROL);
3045 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
3046 GEN7_WA_L3_CHICKEN_MODE);
3048 /* This is required by WaCatErrorRejectionIssue */
3049 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3050 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3051 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3053 for_each_pipe(pipe) {
3054 I915_WRITE(DSPCNTR(pipe),
3055 I915_READ(DSPCNTR(pipe)) |
3056 DISPPLANE_TRICKLE_FEED_DISABLE);
3057 intel_flush_display_plane(dev_priv, pipe);
3061 void g4x_init_clock_gating(struct drm_device *dev)
3063 struct drm_i915_private *dev_priv = dev->dev_private;
3064 uint32_t dspclk_gate;
3066 I915_WRITE(RENCLK_GATE_D1, 0);
3067 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
3068 GS_UNIT_CLOCK_GATE_DISABLE |
3069 CL_UNIT_CLOCK_GATE_DISABLE);
3070 I915_WRITE(RAMCLK_GATE_D, 0);
3071 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
3072 OVRUNIT_CLOCK_GATE_DISABLE |
3073 OVCUNIT_CLOCK_GATE_DISABLE;
3075 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
3076 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
3079 void crestline_init_clock_gating(struct drm_device *dev)
3081 struct drm_i915_private *dev_priv = dev->dev_private;
3083 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
3084 I915_WRITE(RENCLK_GATE_D2, 0);
3085 I915_WRITE(DSPCLK_GATE_D, 0);
3086 I915_WRITE(RAMCLK_GATE_D, 0);
3087 I915_WRITE16(DEUC, 0);
3090 void broadwater_init_clock_gating(struct drm_device *dev)
3092 struct drm_i915_private *dev_priv = dev->dev_private;
3094 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
3095 I965_RCC_CLOCK_GATE_DISABLE |
3096 I965_RCPB_CLOCK_GATE_DISABLE |
3097 I965_ISC_CLOCK_GATE_DISABLE |
3098 I965_FBC_CLOCK_GATE_DISABLE);
3099 I915_WRITE(RENCLK_GATE_D2, 0);
3102 void gen3_init_clock_gating(struct drm_device *dev)
3104 struct drm_i915_private *dev_priv = dev->dev_private;
3105 u32 dstate = I915_READ(D_STATE);
3107 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
3108 DSTATE_DOT_CLOCK_GATING;
3109 I915_WRITE(D_STATE, dstate);
3112 void i85x_init_clock_gating(struct drm_device *dev)
3114 struct drm_i915_private *dev_priv = dev->dev_private;
3116 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
3119 void i830_init_clock_gating(struct drm_device *dev)
3121 struct drm_i915_private *dev_priv = dev->dev_private;
3123 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
3126 void ibx_init_clock_gating(struct drm_device *dev)
3128 struct drm_i915_private *dev_priv = dev->dev_private;
3131 * On Ibex Peak and Cougar Point, we need to disable clock
3132 * gating for the panel power sequencer or it will fail to
3133 * start up when no ports are active.
3135 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3138 void cpt_init_clock_gating(struct drm_device *dev)
3140 struct drm_i915_private *dev_priv = dev->dev_private;
3144 * On Ibex Peak and Cougar Point, we need to disable clock
3145 * gating for the panel power sequencer or it will fail to
3146 * start up when no ports are active.
3148 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3149 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
3150 DPLS_EDP_PPS_FIX_DIS);
3151 /* Without this, mode sets may fail silently on FDI */
3153 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
3156 static void ironlake_teardown_rc6(struct drm_device *dev)
3158 struct drm_i915_private *dev_priv = dev->dev_private;
3160 if (dev_priv->renderctx) {
3161 i915_gem_object_unpin(dev_priv->renderctx);
3162 drm_gem_object_unreference(&dev_priv->renderctx->base);
3163 dev_priv->renderctx = NULL;
3166 if (dev_priv->pwrctx) {
3167 i915_gem_object_unpin(dev_priv->pwrctx);
3168 drm_gem_object_unreference(&dev_priv->pwrctx->base);
3169 dev_priv->pwrctx = NULL;
3173 void ironlake_disable_rc6(struct drm_device *dev)
3175 struct drm_i915_private *dev_priv = dev->dev_private;
3177 if (I915_READ(PWRCTXA)) {
3178 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
3179 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
3180 (void)_intel_wait_for(dev,
3181 ((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
3184 I915_WRITE(PWRCTXA, 0);
3185 POSTING_READ(PWRCTXA);
3187 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3188 POSTING_READ(RSTDBYCTL);
3191 ironlake_teardown_rc6(dev);
3194 static int ironlake_setup_rc6(struct drm_device *dev)
3196 struct drm_i915_private *dev_priv = dev->dev_private;
3198 if (dev_priv->renderctx == NULL)
3199 dev_priv->renderctx = intel_alloc_context_page(dev);
3200 if (!dev_priv->renderctx)
3203 if (dev_priv->pwrctx == NULL)
3204 dev_priv->pwrctx = intel_alloc_context_page(dev);
3205 if (!dev_priv->pwrctx) {
3206 ironlake_teardown_rc6(dev);
3213 void ironlake_enable_rc6(struct drm_device *dev)
3215 struct drm_i915_private *dev_priv = dev->dev_private;
3218 /* rc6 disabled by default due to repeated reports of hanging during
3221 if (!intel_enable_rc6(dev))
3225 ret = ironlake_setup_rc6(dev);
3232 * GPU can automatically power down the render unit if given a page
3235 ret = BEGIN_LP_RING(6);
3237 ironlake_teardown_rc6(dev);
3242 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
3243 OUT_RING(MI_SET_CONTEXT);
3244 OUT_RING(dev_priv->renderctx->gtt_offset |
3246 MI_SAVE_EXT_STATE_EN |
3247 MI_RESTORE_EXT_STATE_EN |
3248 MI_RESTORE_INHIBIT);
3249 OUT_RING(MI_SUSPEND_FLUSH);
3255 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
3256 * does an implicit flush, combined with MI_FLUSH above, it should be
3257 * safe to assume that renderctx is valid
3259 ret = intel_wait_ring_idle(LP_RING(dev_priv));
3261 DRM_ERROR("failed to enable ironlake power savings\n");
3262 ironlake_teardown_rc6(dev);
3267 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
3268 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3272 void intel_init_clock_gating(struct drm_device *dev)
3274 struct drm_i915_private *dev_priv = dev->dev_private;
3276 dev_priv->display.init_clock_gating(dev);
3278 if (dev_priv->display.init_pch_clock_gating)
3279 dev_priv->display.init_pch_clock_gating(dev);