2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
28 * $FreeBSD: head/sys/dev/drm2/i915/intel_ringbuffer.c 253709 2013-07-27 16:42:29Z kib $
31 #include <dev/drm/drmP.h>
32 #include <dev/drm/drm.h>
35 #include "intel_drv.h"
36 #include "intel_ringbuffer.h"
37 #include <sys/sched.h>
40 * 965+ support PIPE_CONTROL commands, which provide finer grained control
41 * over cache flushing.
44 struct drm_i915_gem_object *obj;
45 volatile u32 *cpu_page;
50 i915_trace_irq_get(struct intel_ring_buffer *ring, uint32_t seqno)
53 if (ring->trace_irq_seqno == 0) {
54 lockmgr(&ring->irq_lock, LK_EXCLUSIVE);
55 if (ring->irq_get(ring))
56 ring->trace_irq_seqno = seqno;
57 lockmgr(&ring->irq_lock, LK_RELEASE);
61 static inline int ring_space(struct intel_ring_buffer *ring)
63 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
70 render_ring_flush(struct intel_ring_buffer *ring,
71 uint32_t invalidate_domains,
72 uint32_t flush_domains)
74 struct drm_device *dev = ring->dev;
81 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
82 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
83 * also flushed at 2d versus 3d pipeline switches.
87 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
88 * MI_READ_FLUSH is set, and is always flushed on 965.
90 * I915_GEM_DOMAIN_COMMAND may not exist?
92 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
93 * invalidated when MI_EXE_FLUSH is set.
95 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
96 * invalidated with every MI_FLUSH.
100 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
101 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
102 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
103 * are flushed at any MI_FLUSH.
106 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
107 if ((invalidate_domains|flush_domains) &
108 I915_GEM_DOMAIN_RENDER)
109 cmd &= ~MI_NO_WRITE_FLUSH;
110 if (INTEL_INFO(dev)->gen < 4) {
112 * On the 965, the sampler cache always gets flushed
113 * and this bit is reserved.
115 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
116 cmd |= MI_READ_FLUSH;
118 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
121 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
122 (IS_G4X(dev) || IS_GEN5(dev)))
123 cmd |= MI_INVALIDATE_ISP;
125 ret = intel_ring_begin(ring, 2);
129 intel_ring_emit(ring, cmd);
130 intel_ring_emit(ring, MI_NOOP);
131 intel_ring_advance(ring);
137 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
138 * implementing two workarounds on gen6. From section 1.4.7.1
139 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
141 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
142 * produced by non-pipelined state commands), software needs to first
143 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
146 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
147 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
149 * And the workaround for these two requires this workaround first:
151 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
152 * BEFORE the pipe-control with a post-sync op and no write-cache
155 * And this last workaround is tricky because of the requirements on
156 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
159 * "1 of the following must also be set:
160 * - Render Target Cache Flush Enable ([12] of DW1)
161 * - Depth Cache Flush Enable ([0] of DW1)
162 * - Stall at Pixel Scoreboard ([1] of DW1)
163 * - Depth Stall ([13] of DW1)
164 * - Post-Sync Operation ([13] of DW1)
165 * - Notify Enable ([8] of DW1)"
167 * The cache flushes require the workaround flush that triggered this
168 * one, so we can't use it. Depth stall would trigger the same.
169 * Post-sync nonzero is what triggered this second workaround, so we
170 * can't use that one either. Notify enable is IRQs, which aren't
171 * really our business. That leaves only stall at scoreboard.
174 intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
176 struct pipe_control *pc = ring->private;
177 u32 scratch_addr = pc->gtt_offset + 128;
181 ret = intel_ring_begin(ring, 6);
185 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
186 intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
187 PIPE_CONTROL_STALL_AT_SCOREBOARD);
188 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
189 intel_ring_emit(ring, 0); /* low dword */
190 intel_ring_emit(ring, 0); /* high dword */
191 intel_ring_emit(ring, MI_NOOP);
192 intel_ring_advance(ring);
194 ret = intel_ring_begin(ring, 6);
198 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
199 intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
200 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
201 intel_ring_emit(ring, 0);
202 intel_ring_emit(ring, 0);
203 intel_ring_emit(ring, MI_NOOP);
204 intel_ring_advance(ring);
210 gen6_render_ring_flush(struct intel_ring_buffer *ring,
211 u32 invalidate_domains, u32 flush_domains)
214 struct pipe_control *pc = ring->private;
215 u32 scratch_addr = pc->gtt_offset + 128;
218 /* Force SNB workarounds for PIPE_CONTROL flushes */
219 intel_emit_post_sync_nonzero_flush(ring);
221 /* Just flush everything. Experiments have shown that reducing the
222 * number of bits based on the write domains has little performance
225 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
226 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
227 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
228 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
229 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
230 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
231 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
233 ret = intel_ring_begin(ring, 6);
237 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
238 intel_ring_emit(ring, flags);
239 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
240 intel_ring_emit(ring, 0); /* lower dword */
241 intel_ring_emit(ring, 0); /* uppwer dword */
242 intel_ring_emit(ring, MI_NOOP);
243 intel_ring_advance(ring);
248 static void ring_write_tail(struct intel_ring_buffer *ring,
251 drm_i915_private_t *dev_priv = ring->dev->dev_private;
252 I915_WRITE_TAIL(ring, value);
255 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
257 drm_i915_private_t *dev_priv = ring->dev->dev_private;
258 uint32_t acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
259 RING_ACTHD(ring->mmio_base) : ACTHD;
261 return I915_READ(acthd_reg);
264 static int init_ring_common(struct intel_ring_buffer *ring)
266 drm_i915_private_t *dev_priv = ring->dev->dev_private;
267 struct drm_i915_gem_object *obj = ring->obj;
270 /* Stop the ring if it's running. */
271 I915_WRITE_CTL(ring, 0);
272 I915_WRITE_HEAD(ring, 0);
273 ring->write_tail(ring, 0);
275 /* Initialize the ring. */
276 I915_WRITE_START(ring, obj->gtt_offset);
277 head = I915_READ_HEAD(ring) & HEAD_ADDR;
279 /* G45 ring initialization fails to reset head to zero */
281 DRM_DEBUG("%s head not reset to zero "
282 "ctl %08x head %08x tail %08x start %08x\n",
285 I915_READ_HEAD(ring),
286 I915_READ_TAIL(ring),
287 I915_READ_START(ring));
289 I915_WRITE_HEAD(ring, 0);
291 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
292 DRM_ERROR("failed to set %s head to zero "
293 "ctl %08x head %08x tail %08x start %08x\n",
296 I915_READ_HEAD(ring),
297 I915_READ_TAIL(ring),
298 I915_READ_START(ring));
303 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
306 /* If the head is still not zero, the ring is dead */
307 if (_intel_wait_for(ring->dev,
308 (I915_READ_CTL(ring) & RING_VALID) != 0 &&
309 I915_READ_START(ring) == obj->gtt_offset &&
310 (I915_READ_HEAD(ring) & HEAD_ADDR) == 0,
312 DRM_ERROR("%s initialization failed "
313 "ctl %08x head %08x tail %08x start %08x\n",
316 I915_READ_HEAD(ring),
317 I915_READ_TAIL(ring),
318 I915_READ_START(ring));
322 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
323 i915_kernel_lost_context(ring->dev);
325 ring->head = I915_READ_HEAD(ring);
326 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
327 ring->space = ring_space(ring);
334 init_pipe_control(struct intel_ring_buffer *ring)
336 struct pipe_control *pc;
337 struct drm_i915_gem_object *obj;
343 pc = kmalloc(sizeof(*pc), DRM_I915_GEM, M_WAITOK);
347 obj = i915_gem_alloc_object(ring->dev, 4096);
349 DRM_ERROR("Failed to allocate seqno page\n");
354 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
356 ret = i915_gem_object_pin(obj, 4096, true);
360 pc->gtt_offset = obj->gtt_offset;
361 pc->cpu_page = (uint32_t *)kmem_alloc_nofault(&kernel_map, PAGE_SIZE, PAGE_SIZE);
362 if (pc->cpu_page == NULL)
364 pmap_qenter((uintptr_t)pc->cpu_page, &obj->pages[0], 1);
365 pmap_invalidate_cache_range((vm_offset_t)pc->cpu_page,
366 (vm_offset_t)pc->cpu_page + PAGE_SIZE);
373 i915_gem_object_unpin(obj);
375 drm_gem_object_unreference(&obj->base);
377 drm_free(pc, DRM_I915_GEM);
382 cleanup_pipe_control(struct intel_ring_buffer *ring)
384 struct pipe_control *pc = ring->private;
385 struct drm_i915_gem_object *obj;
391 pmap_qremove((vm_offset_t)pc->cpu_page, 1);
392 kmem_free(&kernel_map, (uintptr_t)pc->cpu_page, PAGE_SIZE);
393 i915_gem_object_unpin(obj);
394 drm_gem_object_unreference(&obj->base);
396 drm_free(pc, DRM_I915_GEM);
397 ring->private = NULL;
400 static int init_render_ring(struct intel_ring_buffer *ring)
402 struct drm_device *dev = ring->dev;
403 struct drm_i915_private *dev_priv = dev->dev_private;
404 int ret = init_ring_common(ring);
406 if (INTEL_INFO(dev)->gen > 3) {
407 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
408 I915_WRITE(MI_MODE, mode);
410 I915_WRITE(GFX_MODE_GEN7,
411 GFX_MODE_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
412 GFX_MODE_ENABLE(GFX_REPLAY_MODE));
415 if (INTEL_INFO(dev)->gen >= 5) {
416 ret = init_pipe_control(ring);
423 /* From the Sandybridge PRM, volume 1 part 3, page 24:
424 * "If this bit is set, STCunit will have LRA as replacement
425 * policy. [...] This bit must be reset. LRA replacement
426 * policy is not supported."
428 I915_WRITE(CACHE_MODE_0,
429 CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
432 if (INTEL_INFO(dev)->gen >= 6) {
434 INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
440 static void render_ring_cleanup(struct intel_ring_buffer *ring)
445 cleanup_pipe_control(ring);
449 update_mboxes(struct intel_ring_buffer *ring,
453 intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
454 MI_SEMAPHORE_GLOBAL_GTT |
455 MI_SEMAPHORE_REGISTER |
456 MI_SEMAPHORE_UPDATE);
457 intel_ring_emit(ring, seqno);
458 intel_ring_emit(ring, mmio_offset);
462 * gen6_add_request - Update the semaphore mailbox registers
464 * @ring - ring that is adding a request
465 * @seqno - return seqno stuck into the ring
467 * Update the mailbox registers in the *other* rings with the current seqno.
468 * This acts like a signal in the canonical semaphore.
471 gen6_add_request(struct intel_ring_buffer *ring,
478 ret = intel_ring_begin(ring, 10);
482 mbox1_reg = ring->signal_mbox[0];
483 mbox2_reg = ring->signal_mbox[1];
485 *seqno = i915_gem_next_request_seqno(ring);
487 update_mboxes(ring, *seqno, mbox1_reg);
488 update_mboxes(ring, *seqno, mbox2_reg);
489 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
490 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
491 intel_ring_emit(ring, *seqno);
492 intel_ring_emit(ring, MI_USER_INTERRUPT);
493 intel_ring_advance(ring);
499 * intel_ring_sync - sync the waiter to the signaller on seqno
501 * @waiter - ring that is waiting
502 * @signaller - ring which has, or will signal
503 * @seqno - seqno which the waiter will block on
506 intel_ring_sync(struct intel_ring_buffer *waiter,
507 struct intel_ring_buffer *signaller,
512 u32 dw1 = MI_SEMAPHORE_MBOX |
513 MI_SEMAPHORE_COMPARE |
514 MI_SEMAPHORE_REGISTER;
516 ret = intel_ring_begin(waiter, 4);
520 intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
521 intel_ring_emit(waiter, seqno);
522 intel_ring_emit(waiter, 0);
523 intel_ring_emit(waiter, MI_NOOP);
524 intel_ring_advance(waiter);
529 int render_ring_sync_to(struct intel_ring_buffer *waiter,
530 struct intel_ring_buffer *signaller, u32 seqno);
531 int gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
532 struct intel_ring_buffer *signaller, u32 seqno);
533 int gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
534 struct intel_ring_buffer *signaller, u32 seqno);
536 /* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
538 render_ring_sync_to(struct intel_ring_buffer *waiter,
539 struct intel_ring_buffer *signaller,
542 KASSERT(signaller->semaphore_register[RCS] != MI_SEMAPHORE_SYNC_INVALID,
543 ("valid RCS semaphore"));
544 return intel_ring_sync(waiter,
550 /* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
552 gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
553 struct intel_ring_buffer *signaller,
556 KASSERT(signaller->semaphore_register[VCS] != MI_SEMAPHORE_SYNC_INVALID,
557 ("Valid VCS semaphore"));
558 return intel_ring_sync(waiter,
564 /* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
566 gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
567 struct intel_ring_buffer *signaller,
570 KASSERT(signaller->semaphore_register[BCS] != MI_SEMAPHORE_SYNC_INVALID,
571 ("Valid BCS semaphore"));
572 return intel_ring_sync(waiter,
578 #define PIPE_CONTROL_FLUSH(ring__, addr__) \
580 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
581 PIPE_CONTROL_DEPTH_STALL); \
582 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
583 intel_ring_emit(ring__, 0); \
584 intel_ring_emit(ring__, 0); \
588 pc_render_add_request(struct intel_ring_buffer *ring,
591 u32 seqno = i915_gem_next_request_seqno(ring);
592 struct pipe_control *pc = ring->private;
593 u32 scratch_addr = pc->gtt_offset + 128;
596 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
597 * incoherent with writes to memory, i.e. completely fubar,
598 * so we need to use PIPE_NOTIFY instead.
600 * However, we also need to workaround the qword write
601 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
602 * memory before requesting an interrupt.
604 ret = intel_ring_begin(ring, 32);
608 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
609 PIPE_CONTROL_WRITE_FLUSH |
610 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
611 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
612 intel_ring_emit(ring, seqno);
613 intel_ring_emit(ring, 0);
614 PIPE_CONTROL_FLUSH(ring, scratch_addr);
615 scratch_addr += 128; /* write to separate cachelines */
616 PIPE_CONTROL_FLUSH(ring, scratch_addr);
618 PIPE_CONTROL_FLUSH(ring, scratch_addr);
620 PIPE_CONTROL_FLUSH(ring, scratch_addr);
622 PIPE_CONTROL_FLUSH(ring, scratch_addr);
624 PIPE_CONTROL_FLUSH(ring, scratch_addr);
625 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
626 PIPE_CONTROL_WRITE_FLUSH |
627 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
628 PIPE_CONTROL_NOTIFY);
629 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
630 intel_ring_emit(ring, seqno);
631 intel_ring_emit(ring, 0);
632 intel_ring_advance(ring);
639 render_ring_add_request(struct intel_ring_buffer *ring,
642 u32 seqno = i915_gem_next_request_seqno(ring);
645 ret = intel_ring_begin(ring, 4);
649 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
650 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
651 intel_ring_emit(ring, seqno);
652 intel_ring_emit(ring, MI_USER_INTERRUPT);
653 intel_ring_advance(ring);
660 gen6_ring_get_seqno(struct intel_ring_buffer *ring)
662 struct drm_device *dev = ring->dev;
664 /* Workaround to force correct ordering between irq and seqno writes on
665 * ivb (and maybe also on snb) by reading from a CS register (like
666 * ACTHD) before reading the status page. */
667 if (/* IS_GEN6(dev) || */IS_GEN7(dev))
668 intel_ring_get_active_head(ring);
669 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
673 ring_get_seqno(struct intel_ring_buffer *ring)
675 if (ring->status_page.page_addr == NULL)
677 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
681 pc_render_get_seqno(struct intel_ring_buffer *ring)
683 struct pipe_control *pc = ring->private;
685 return pc->cpu_page[0];
691 ironlake_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
693 dev_priv->gt_irq_mask &= ~mask;
694 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
699 ironlake_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
701 dev_priv->gt_irq_mask |= mask;
702 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
707 i915_enable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
709 dev_priv->irq_mask &= ~mask;
710 I915_WRITE(IMR, dev_priv->irq_mask);
715 i915_disable_irq(drm_i915_private_t *dev_priv, uint32_t mask)
717 dev_priv->irq_mask |= mask;
718 I915_WRITE(IMR, dev_priv->irq_mask);
723 render_ring_get_irq(struct intel_ring_buffer *ring)
725 struct drm_device *dev = ring->dev;
726 drm_i915_private_t *dev_priv = dev->dev_private;
728 if (!dev->irq_enabled)
731 KKASSERT(lockstatus(&ring->irq_lock, curthread) != 0);
732 if (ring->irq_refcount++ == 0) {
733 if (HAS_PCH_SPLIT(dev))
734 ironlake_enable_irq(dev_priv,
735 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
737 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
744 render_ring_put_irq(struct intel_ring_buffer *ring)
746 struct drm_device *dev = ring->dev;
747 drm_i915_private_t *dev_priv = dev->dev_private;
749 KKASSERT(lockstatus(&ring->irq_lock, curthread) != 0);
750 if (--ring->irq_refcount == 0) {
751 if (HAS_PCH_SPLIT(dev))
752 ironlake_disable_irq(dev_priv,
756 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
760 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
762 struct drm_device *dev = ring->dev;
763 drm_i915_private_t *dev_priv = dev->dev_private;
766 /* The ring status page addresses are no longer next to the rest of
767 * the ring registers as of gen7.
772 mmio = RENDER_HWS_PGA_GEN7;
775 mmio = BLT_HWS_PGA_GEN7;
778 mmio = BSD_HWS_PGA_GEN7;
781 } else if (IS_GEN6(dev)) {
782 mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
784 mmio = RING_HWS_PGA(ring->mmio_base);
787 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
792 bsd_ring_flush(struct intel_ring_buffer *ring,
793 uint32_t invalidate_domains,
794 uint32_t flush_domains)
798 ret = intel_ring_begin(ring, 2);
802 intel_ring_emit(ring, MI_FLUSH);
803 intel_ring_emit(ring, MI_NOOP);
804 intel_ring_advance(ring);
809 ring_add_request(struct intel_ring_buffer *ring,
815 ret = intel_ring_begin(ring, 4);
819 seqno = i915_gem_next_request_seqno(ring);
821 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
822 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
823 intel_ring_emit(ring, seqno);
824 intel_ring_emit(ring, MI_USER_INTERRUPT);
825 intel_ring_advance(ring);
832 gen6_ring_get_irq(struct intel_ring_buffer *ring, uint32_t gflag, uint32_t rflag)
834 struct drm_device *dev = ring->dev;
835 drm_i915_private_t *dev_priv = dev->dev_private;
837 if (!dev->irq_enabled)
840 gen6_gt_force_wake_get(dev_priv);
842 KKASSERT(lockstatus(&ring->irq_lock, curthread) != 0);
843 if (ring->irq_refcount++ == 0) {
844 ring->irq_mask &= ~rflag;
845 I915_WRITE_IMR(ring, ring->irq_mask);
846 ironlake_enable_irq(dev_priv, gflag);
853 gen6_ring_put_irq(struct intel_ring_buffer *ring, uint32_t gflag, uint32_t rflag)
855 struct drm_device *dev = ring->dev;
856 drm_i915_private_t *dev_priv = dev->dev_private;
858 KKASSERT(lockstatus(&ring->irq_lock, curthread) != 0);
859 if (--ring->irq_refcount == 0) {
860 ring->irq_mask |= rflag;
861 I915_WRITE_IMR(ring, ring->irq_mask);
862 ironlake_disable_irq(dev_priv, gflag);
865 gen6_gt_force_wake_put(dev_priv);
869 bsd_ring_get_irq(struct intel_ring_buffer *ring)
871 struct drm_device *dev = ring->dev;
872 drm_i915_private_t *dev_priv = dev->dev_private;
874 if (!dev->irq_enabled)
877 KKASSERT(lockstatus(&ring->irq_lock, curthread) != 0);
878 if (ring->irq_refcount++ == 0) {
880 i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
882 ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
888 bsd_ring_put_irq(struct intel_ring_buffer *ring)
890 struct drm_device *dev = ring->dev;
891 drm_i915_private_t *dev_priv = dev->dev_private;
893 KKASSERT(lockstatus(&ring->irq_lock, curthread) != 0);
894 if (--ring->irq_refcount == 0) {
896 i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
898 ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
903 ring_dispatch_execbuffer(struct intel_ring_buffer *ring, uint32_t offset,
908 ret = intel_ring_begin(ring, 2);
912 intel_ring_emit(ring,
913 MI_BATCH_BUFFER_START | (2 << 6) |
914 MI_BATCH_NON_SECURE_I965);
915 intel_ring_emit(ring, offset);
916 intel_ring_advance(ring);
922 render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
923 uint32_t offset, uint32_t len)
925 struct drm_device *dev = ring->dev;
928 if (IS_I830(dev) || IS_845G(dev)) {
929 ret = intel_ring_begin(ring, 4);
933 intel_ring_emit(ring, MI_BATCH_BUFFER);
934 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
935 intel_ring_emit(ring, offset + len - 8);
936 intel_ring_emit(ring, 0);
938 ret = intel_ring_begin(ring, 2);
942 if (INTEL_INFO(dev)->gen >= 4) {
943 intel_ring_emit(ring,
944 MI_BATCH_BUFFER_START | (2 << 6) |
945 MI_BATCH_NON_SECURE_I965);
946 intel_ring_emit(ring, offset);
948 intel_ring_emit(ring,
949 MI_BATCH_BUFFER_START | (2 << 6));
950 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
953 intel_ring_advance(ring);
958 static void cleanup_status_page(struct intel_ring_buffer *ring)
960 drm_i915_private_t *dev_priv = ring->dev->dev_private;
961 struct drm_i915_gem_object *obj;
963 obj = ring->status_page.obj;
967 pmap_qremove((vm_offset_t)ring->status_page.page_addr, 1);
968 kmem_free(&kernel_map, (vm_offset_t)ring->status_page.page_addr,
970 i915_gem_object_unpin(obj);
971 drm_gem_object_unreference(&obj->base);
972 ring->status_page.obj = NULL;
974 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
977 static int init_status_page(struct intel_ring_buffer *ring)
979 struct drm_device *dev = ring->dev;
980 drm_i915_private_t *dev_priv = dev->dev_private;
981 struct drm_i915_gem_object *obj;
984 obj = i915_gem_alloc_object(dev, 4096);
986 DRM_ERROR("Failed to allocate status page\n");
991 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
993 ret = i915_gem_object_pin(obj, 4096, true);
998 ring->status_page.gfx_addr = obj->gtt_offset;
999 ring->status_page.page_addr = (void *)kmem_alloc_nofault(&kernel_map,
1000 PAGE_SIZE, PAGE_SIZE);
1001 if (ring->status_page.page_addr == NULL) {
1002 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
1005 pmap_qenter((vm_offset_t)ring->status_page.page_addr, &obj->pages[0],
1007 pmap_invalidate_cache_range((vm_offset_t)ring->status_page.page_addr,
1008 (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE);
1009 ring->status_page.obj = obj;
1010 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1012 intel_ring_setup_status_page(ring);
1013 DRM_DEBUG("i915: init_status_page %s hws offset: 0x%08x\n",
1014 ring->name, ring->status_page.gfx_addr);
1019 i915_gem_object_unpin(obj);
1021 drm_gem_object_unreference(&obj->base);
1027 int intel_init_ring_buffer(struct drm_device *dev,
1028 struct intel_ring_buffer *ring)
1030 struct drm_i915_gem_object *obj;
1034 INIT_LIST_HEAD(&ring->active_list);
1035 INIT_LIST_HEAD(&ring->request_list);
1036 INIT_LIST_HEAD(&ring->gpu_write_list);
1038 lockinit(&ring->irq_lock, "ringb", 0, LK_CANRECURSE);
1039 ring->irq_mask = ~0;
1041 if (I915_NEED_GFX_HWS(dev)) {
1042 ret = init_status_page(ring);
1047 obj = i915_gem_alloc_object(dev, ring->size);
1049 DRM_ERROR("Failed to allocate ringbuffer\n");
1056 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
1060 ring->map.size = ring->size;
1061 ring->map.offset = dev->agp->base + obj->gtt_offset;
1063 ring->map.flags = 0;
1066 drm_core_ioremap_wc(&ring->map, dev);
1067 if (ring->map.virtual == NULL) {
1068 DRM_ERROR("Failed to map ringbuffer.\n");
1073 ring->virtual_start = ring->map.virtual;
1074 ret = ring->init(ring);
1078 /* Workaround an erratum on the i830 which causes a hang if
1079 * the TAIL pointer points to within the last 2 cachelines
1082 ring->effective_size = ring->size;
1083 if (IS_I830(ring->dev) || IS_845G(ring->dev))
1084 ring->effective_size -= 128;
1089 drm_core_ioremapfree(&ring->map, dev);
1091 i915_gem_object_unpin(obj);
1093 drm_gem_object_unreference(&obj->base);
1096 cleanup_status_page(ring);
1100 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1102 struct drm_i915_private *dev_priv;
1105 if (ring->obj == NULL)
1108 /* Disable the ring buffer. The ring must be idle at this point */
1109 dev_priv = ring->dev->dev_private;
1110 ret = intel_wait_ring_idle(ring);
1111 I915_WRITE_CTL(ring, 0);
1113 drm_core_ioremapfree(&ring->map, ring->dev);
1115 i915_gem_object_unpin(ring->obj);
1116 drm_gem_object_unreference(&ring->obj->base);
1120 ring->cleanup(ring);
1122 cleanup_status_page(ring);
1125 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1128 int rem = ring->size - ring->tail;
1130 if (ring->space < rem) {
1131 int ret = intel_wait_ring_buffer(ring, rem);
1136 virt = (unsigned int *)((char *)ring->virtual_start + ring->tail);
1144 ring->space = ring_space(ring);
1149 static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1151 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1152 bool was_interruptible;
1155 /* XXX As we have not yet audited all the paths to check that
1156 * they are ready for ERESTARTSYS from intel_ring_begin, do not
1157 * allow us to be interruptible by a signal.
1159 was_interruptible = dev_priv->mm.interruptible;
1160 dev_priv->mm.interruptible = false;
1162 ret = i915_wait_request(ring, seqno, true);
1164 dev_priv->mm.interruptible = was_interruptible;
1169 static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1171 struct drm_i915_gem_request *request;
1175 i915_gem_retire_requests_ring(ring);
1177 if (ring->last_retired_head != -1) {
1178 ring->head = ring->last_retired_head;
1179 ring->last_retired_head = -1;
1180 ring->space = ring_space(ring);
1181 if (ring->space >= n)
1185 list_for_each_entry(request, &ring->request_list, list) {
1188 if (request->tail == -1)
1191 space = request->tail - (ring->tail + 8);
1193 space += ring->size;
1195 seqno = request->seqno;
1199 /* Consume this request in case we need more space than
1200 * is available and so need to prevent a race between
1201 * updating last_retired_head and direct reads of
1202 * I915_RING_HEAD. It also provides a nice sanity check.
1210 ret = intel_ring_wait_seqno(ring, seqno);
1214 if (ring->last_retired_head == -1)
1217 ring->head = ring->last_retired_head;
1218 ring->last_retired_head = -1;
1219 ring->space = ring_space(ring);
1220 if (ring->space < n)
1226 int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1228 struct drm_device *dev = ring->dev;
1229 struct drm_i915_private *dev_priv = dev->dev_private;
1233 ret = intel_ring_wait_request(ring, n);
1237 if (drm_core_check_feature(dev, DRIVER_GEM))
1238 /* With GEM the hangcheck timer should kick us out of the loop,
1239 * leaving it early runs the risk of corrupting GEM state (due
1240 * to running on almost untested codepaths). But on resume
1241 * timers don't work yet, so prevent a complete hang in that
1242 * case by choosing an insanely large timeout. */
1243 end = ticks + hz * 60;
1245 end = ticks + hz * 3;
1247 ring->head = I915_READ_HEAD(ring);
1248 ring->space = ring_space(ring);
1249 if (ring->space >= n) {
1254 if (dev->primary->master) {
1255 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1256 if (master_priv->sarea_priv)
1257 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1260 if (dev_priv->sarea_priv)
1261 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1265 if (atomic_load_acq_32(&dev_priv->mm.wedged) != 0) {
1268 } while (!time_after(ticks, end));
1272 int intel_ring_begin(struct intel_ring_buffer *ring,
1275 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1276 int n = 4*num_dwords;
1279 if (atomic_load_acq_int(&dev_priv->mm.wedged))
1282 if (ring->tail + n > ring->effective_size) {
1283 ret = intel_wrap_ring_buffer(ring);
1288 if (ring->space < n) {
1289 ret = intel_wait_ring_buffer(ring, n);
1298 void intel_ring_advance(struct intel_ring_buffer *ring)
1300 ring->tail &= ring->size - 1;
1301 ring->write_tail(ring, ring->tail);
1304 static const struct intel_ring_buffer render_ring = {
1305 .name = "render ring",
1307 .mmio_base = RENDER_RING_BASE,
1308 .size = 32 * PAGE_SIZE,
1309 .init = init_render_ring,
1310 .write_tail = ring_write_tail,
1311 .flush = render_ring_flush,
1312 .add_request = render_ring_add_request,
1313 .get_seqno = ring_get_seqno,
1314 .irq_get = render_ring_get_irq,
1315 .irq_put = render_ring_put_irq,
1316 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
1317 .cleanup = render_ring_cleanup,
1318 .sync_to = render_ring_sync_to,
1319 .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID,
1320 MI_SEMAPHORE_SYNC_RV,
1321 MI_SEMAPHORE_SYNC_RB},
1322 .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC},
1325 /* ring buffer for bit-stream decoder */
1327 static const struct intel_ring_buffer bsd_ring = {
1330 .mmio_base = BSD_RING_BASE,
1331 .size = 32 * PAGE_SIZE,
1332 .init = init_ring_common,
1333 .write_tail = ring_write_tail,
1334 .flush = bsd_ring_flush,
1335 .add_request = ring_add_request,
1336 .get_seqno = ring_get_seqno,
1337 .irq_get = bsd_ring_get_irq,
1338 .irq_put = bsd_ring_put_irq,
1339 .dispatch_execbuffer = ring_dispatch_execbuffer,
1343 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1346 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1348 /* Every tail move must follow the sequence below */
1349 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1350 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1351 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1352 I915_WRITE(GEN6_BSD_RNCID, 0x0);
1354 if (_intel_wait_for(ring->dev,
1355 (I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1356 GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, 50,
1357 true, "915g6i") != 0)
1358 DRM_ERROR("timed out waiting for IDLE Indicator\n");
1360 I915_WRITE_TAIL(ring, value);
1361 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1362 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1363 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1366 static int gen6_ring_flush(struct intel_ring_buffer *ring,
1367 uint32_t invalidate, uint32_t flush)
1372 ret = intel_ring_begin(ring, 4);
1377 if (invalidate & I915_GEM_GPU_DOMAINS)
1378 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1379 intel_ring_emit(ring, cmd);
1380 intel_ring_emit(ring, 0);
1381 intel_ring_emit(ring, 0);
1382 intel_ring_emit(ring, MI_NOOP);
1383 intel_ring_advance(ring);
1388 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1389 uint32_t offset, uint32_t len)
1393 ret = intel_ring_begin(ring, 2);
1397 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1398 /* bit0-7 is the length on GEN6+ */
1399 intel_ring_emit(ring, offset);
1400 intel_ring_advance(ring);
1406 gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
1408 return gen6_ring_get_irq(ring,
1410 GEN6_RENDER_USER_INTERRUPT);
1414 gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
1416 return gen6_ring_put_irq(ring,
1418 GEN6_RENDER_USER_INTERRUPT);
1422 gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1424 return gen6_ring_get_irq(ring,
1425 GT_GEN6_BSD_USER_INTERRUPT,
1426 GEN6_BSD_USER_INTERRUPT);
1430 gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1432 return gen6_ring_put_irq(ring,
1433 GT_GEN6_BSD_USER_INTERRUPT,
1434 GEN6_BSD_USER_INTERRUPT);
1437 /* ring buffer for Video Codec for Gen6+ */
1438 static const struct intel_ring_buffer gen6_bsd_ring = {
1439 .name = "gen6 bsd ring",
1441 .mmio_base = GEN6_BSD_RING_BASE,
1442 .size = 32 * PAGE_SIZE,
1443 .init = init_ring_common,
1444 .write_tail = gen6_bsd_ring_write_tail,
1445 .flush = gen6_ring_flush,
1446 .add_request = gen6_add_request,
1447 .get_seqno = gen6_ring_get_seqno,
1448 .irq_get = gen6_bsd_ring_get_irq,
1449 .irq_put = gen6_bsd_ring_put_irq,
1450 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1451 .sync_to = gen6_bsd_ring_sync_to,
1452 .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
1453 MI_SEMAPHORE_SYNC_INVALID,
1454 MI_SEMAPHORE_SYNC_VB},
1455 .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC},
1458 /* Blitter support (SandyBridge+) */
1461 blt_ring_get_irq(struct intel_ring_buffer *ring)
1463 return gen6_ring_get_irq(ring,
1464 GT_BLT_USER_INTERRUPT,
1465 GEN6_BLITTER_USER_INTERRUPT);
1469 blt_ring_put_irq(struct intel_ring_buffer *ring)
1471 gen6_ring_put_irq(ring,
1472 GT_BLT_USER_INTERRUPT,
1473 GEN6_BLITTER_USER_INTERRUPT);
1476 static int blt_ring_flush(struct intel_ring_buffer *ring,
1477 uint32_t invalidate, uint32_t flush)
1482 ret = intel_ring_begin(ring, 4);
1487 if (invalidate & I915_GEM_DOMAIN_RENDER)
1488 cmd |= MI_INVALIDATE_TLB;
1489 intel_ring_emit(ring, cmd);
1490 intel_ring_emit(ring, 0);
1491 intel_ring_emit(ring, 0);
1492 intel_ring_emit(ring, MI_NOOP);
1493 intel_ring_advance(ring);
1497 static const struct intel_ring_buffer gen6_blt_ring = {
1500 .mmio_base = BLT_RING_BASE,
1501 .size = 32 * PAGE_SIZE,
1502 .init = init_ring_common,
1503 .write_tail = ring_write_tail,
1504 .flush = blt_ring_flush,
1505 .add_request = gen6_add_request,
1506 .get_seqno = gen6_ring_get_seqno,
1507 .irq_get = blt_ring_get_irq,
1508 .irq_put = blt_ring_put_irq,
1509 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1510 .sync_to = gen6_blt_ring_sync_to,
1511 .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
1512 MI_SEMAPHORE_SYNC_BV,
1513 MI_SEMAPHORE_SYNC_INVALID},
1514 .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC},
1517 int intel_init_render_ring_buffer(struct drm_device *dev)
1519 drm_i915_private_t *dev_priv = dev->dev_private;
1520 struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
1522 *ring = render_ring;
1523 if (INTEL_INFO(dev)->gen >= 6) {
1524 ring->add_request = gen6_add_request;
1525 ring->flush = gen6_render_ring_flush;
1526 ring->irq_get = gen6_render_ring_get_irq;
1527 ring->irq_put = gen6_render_ring_put_irq;
1528 ring->get_seqno = gen6_ring_get_seqno;
1529 } else if (IS_GEN5(dev)) {
1530 ring->add_request = pc_render_add_request;
1531 ring->get_seqno = pc_render_get_seqno;
1534 if (!I915_NEED_GFX_HWS(dev)) {
1535 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1536 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1539 return intel_init_ring_buffer(dev, ring);
1542 int intel_render_ring_init_dri(struct drm_device *dev, uint64_t start,
1545 drm_i915_private_t *dev_priv = dev->dev_private;
1546 struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
1548 *ring = render_ring;
1549 if (INTEL_INFO(dev)->gen >= 6) {
1550 ring->add_request = gen6_add_request;
1551 ring->irq_get = gen6_render_ring_get_irq;
1552 ring->irq_put = gen6_render_ring_put_irq;
1553 } else if (IS_GEN5(dev)) {
1554 ring->add_request = pc_render_add_request;
1555 ring->get_seqno = pc_render_get_seqno;
1559 INIT_LIST_HEAD(&ring->active_list);
1560 INIT_LIST_HEAD(&ring->request_list);
1561 INIT_LIST_HEAD(&ring->gpu_write_list);
1564 ring->effective_size = ring->size;
1565 if (IS_I830(ring->dev))
1566 ring->effective_size -= 128;
1568 ring->map.offset = start;
1569 ring->map.size = size;
1571 ring->map.flags = 0;
1574 drm_core_ioremap_wc(&ring->map, dev);
1575 if (ring->map.virtual == NULL) {
1576 DRM_ERROR("can not ioremap virtual address for"
1581 ring->virtual_start = (void *)ring->map.virtual;
1585 int intel_init_bsd_ring_buffer(struct drm_device *dev)
1587 drm_i915_private_t *dev_priv = dev->dev_private;
1588 struct intel_ring_buffer *ring = &dev_priv->rings[VCS];
1590 if (IS_GEN6(dev) || IS_GEN7(dev))
1591 *ring = gen6_bsd_ring;
1595 return intel_init_ring_buffer(dev, ring);
1598 int intel_init_blt_ring_buffer(struct drm_device *dev)
1600 drm_i915_private_t *dev_priv = dev->dev_private;
1601 struct intel_ring_buffer *ring = &dev_priv->rings[BCS];
1603 *ring = gen6_blt_ring;
1605 return intel_init_ring_buffer(dev, ring);