2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
27 #include "radeon_asic.h"
31 #define CIK_SDMA_UCODE_SIZE 1050
32 #define CIK_SDMA_UCODE_VERSION 64
36 * Starting with CIK, the GPU has new asynchronous
37 * DMA engines. These engines are used for compute
38 * and gfx. There are two DMA engines (SDMA0, SDMA1)
39 * and each one supports 1 ring buffer used for gfx
40 * and 2 queues used for compute.
42 * The programming model is very similar to the CP
43 * (ring buffer, IBs, etc.), but sDMA has it's own
44 * packet format that is different from the PM4 format
45 * used by the CP. sDMA supports copying data, writing
46 * embedded data, solid fills, and a number of other
47 * things. It also has support for tiling/detiling of
52 * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
54 * @rdev: radeon_device pointer
55 * @ib: IB object to schedule
57 * Schedule an IB in the DMA ring (CIK).
59 void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
62 struct radeon_ring *ring = &rdev->ring[ib->ring];
63 u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
65 if (rdev->wb.enabled) {
66 u32 next_rptr = ring->wptr + 5;
67 while ((next_rptr & 7) != 4)
70 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
71 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
72 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
73 radeon_ring_write(ring, 1); /* number of DWs to follow */
74 radeon_ring_write(ring, next_rptr);
77 /* IB packet must end on a 8 DW boundary */
78 while ((ring->wptr & 7) != 4)
79 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
80 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
81 radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
82 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
83 radeon_ring_write(ring, ib->length_dw);
88 * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
90 * @rdev: radeon_device pointer
91 * @fence: radeon fence object
93 * Add a DMA fence packet to the ring to write
94 * the fence seq number and DMA trap packet to generate
95 * an interrupt if needed (CIK).
97 void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
98 struct radeon_fence *fence)
100 struct radeon_ring *ring = &rdev->ring[fence->ring];
101 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
102 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
103 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
106 if (fence->ring == R600_RING_TYPE_DMA_INDEX)
107 ref_and_mask = SDMA0;
109 ref_and_mask = SDMA1;
111 /* write the fence */
112 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
113 radeon_ring_write(ring, addr & 0xffffffff);
114 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
115 radeon_ring_write(ring, fence->seq);
116 /* generate an interrupt */
117 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
119 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
120 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
121 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
122 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
123 radeon_ring_write(ring, ref_and_mask); /* MASK */
124 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
128 * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
130 * @rdev: radeon_device pointer
131 * @ring: radeon_ring structure holding ring information
132 * @semaphore: radeon semaphore object
133 * @emit_wait: wait or signal semaphore
135 * Add a DMA semaphore packet to the ring wait on or signal
138 void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
139 struct radeon_ring *ring,
140 struct radeon_semaphore *semaphore,
143 u64 addr = semaphore->gpu_addr;
144 u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
146 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
147 radeon_ring_write(ring, addr & 0xfffffff8);
148 radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
152 * cik_sdma_gfx_stop - stop the gfx async dma engines
154 * @rdev: radeon_device pointer
156 * Stop the gfx async dma ring buffers (CIK).
158 static void cik_sdma_gfx_stop(struct radeon_device *rdev)
160 u32 rb_cntl, reg_offset;
163 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
165 for (i = 0; i < 2; i++) {
167 reg_offset = SDMA0_REGISTER_OFFSET;
169 reg_offset = SDMA1_REGISTER_OFFSET;
170 rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
171 rb_cntl &= ~SDMA_RB_ENABLE;
172 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
173 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
178 * cik_sdma_rlc_stop - stop the compute async dma engines
180 * @rdev: radeon_device pointer
182 * Stop the compute async dma queues (CIK).
184 static void cik_sdma_rlc_stop(struct radeon_device *rdev)
190 * cik_sdma_enable - stop the async dma engines
192 * @rdev: radeon_device pointer
193 * @enable: enable/disable the DMA MEs.
195 * Halt or unhalt the async dma engines (CIK).
197 void cik_sdma_enable(struct radeon_device *rdev, bool enable)
199 u32 me_cntl, reg_offset;
202 for (i = 0; i < 2; i++) {
204 reg_offset = SDMA0_REGISTER_OFFSET;
206 reg_offset = SDMA1_REGISTER_OFFSET;
207 me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
209 me_cntl &= ~SDMA_HALT;
211 me_cntl |= SDMA_HALT;
212 WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
217 * cik_sdma_gfx_resume - setup and start the async dma engines
219 * @rdev: radeon_device pointer
221 * Set up the gfx DMA ring buffers and enable them (CIK).
222 * Returns 0 for success, error for failure.
224 static int cik_sdma_gfx_resume(struct radeon_device *rdev)
226 struct radeon_ring *ring;
227 u32 rb_cntl, ib_cntl;
229 u32 reg_offset, wb_offset;
232 for (i = 0; i < 2; i++) {
234 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
235 reg_offset = SDMA0_REGISTER_OFFSET;
236 wb_offset = R600_WB_DMA_RPTR_OFFSET;
238 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
239 reg_offset = SDMA1_REGISTER_OFFSET;
240 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
243 WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
244 WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
246 /* Set ring buffer size in dwords */
247 rb_bufsz = order_base_2(ring->ring_size / 4);
248 rb_cntl = rb_bufsz << 1;
250 rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
252 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
254 /* Initialize the ring buffer's read and write pointers */
255 WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
256 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
258 /* set the wb address whether it's enabled or not */
259 WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
260 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
261 WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
262 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
264 if (rdev->wb.enabled)
265 rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
267 WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
268 WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
271 WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
273 ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2;
276 WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
278 ib_cntl = SDMA_IB_ENABLE;
280 ib_cntl |= SDMA_IB_SWAP_ENABLE;
283 WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
287 r = radeon_ring_test(rdev, ring->idx, ring);
294 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
300 * cik_sdma_rlc_resume - setup and start the async dma engines
302 * @rdev: radeon_device pointer
304 * Set up the compute DMA queues and enable them (CIK).
305 * Returns 0 for success, error for failure.
307 static int cik_sdma_rlc_resume(struct radeon_device *rdev)
314 * cik_sdma_load_microcode - load the sDMA ME ucode
316 * @rdev: radeon_device pointer
318 * Loads the sDMA0/1 ucode.
319 * Returns 0 for success, -EINVAL if the ucode is not available.
321 static int cik_sdma_load_microcode(struct radeon_device *rdev)
323 const __be32 *fw_data;
329 /* stop the gfx rings and rlc compute queues */
330 cik_sdma_gfx_stop(rdev);
331 cik_sdma_rlc_stop(rdev);
334 cik_sdma_enable(rdev, false);
337 fw_data = (const __be32 *)rdev->sdma_fw->data;
338 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
339 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
340 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
341 WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
344 fw_data = (const __be32 *)rdev->sdma_fw->data;
345 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
346 for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
347 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
348 WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
350 WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
351 WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
356 * cik_sdma_resume - setup and start the async dma engines
358 * @rdev: radeon_device pointer
360 * Set up the DMA engines and enable them (CIK).
361 * Returns 0 for success, error for failure.
363 int cik_sdma_resume(struct radeon_device *rdev)
368 WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
369 RREG32(SRBM_SOFT_RESET);
371 WREG32(SRBM_SOFT_RESET, 0);
372 RREG32(SRBM_SOFT_RESET);
374 r = cik_sdma_load_microcode(rdev);
379 cik_sdma_enable(rdev, true);
381 /* start the gfx rings and rlc compute queues */
382 r = cik_sdma_gfx_resume(rdev);
385 r = cik_sdma_rlc_resume(rdev);
393 * cik_sdma_fini - tear down the async dma engines
395 * @rdev: radeon_device pointer
397 * Stop the async dma engines and free the rings (CIK).
399 void cik_sdma_fini(struct radeon_device *rdev)
401 /* stop the gfx rings and rlc compute queues */
402 cik_sdma_gfx_stop(rdev);
403 cik_sdma_rlc_stop(rdev);
405 cik_sdma_enable(rdev, false);
406 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
407 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
408 /* XXX - compute dma queue tear down */
412 * cik_copy_dma - copy pages using the DMA engine
414 * @rdev: radeon_device pointer
415 * @src_offset: src GPU address
416 * @dst_offset: dst GPU address
417 * @num_gpu_pages: number of GPU pages to xfer
418 * @fence: radeon fence object
420 * Copy GPU paging using the DMA engine (CIK).
421 * Used by the radeon ttm implementation to move pages if
422 * registered as the asic copy callback.
424 int cik_copy_dma(struct radeon_device *rdev,
425 uint64_t src_offset, uint64_t dst_offset,
426 unsigned num_gpu_pages,
427 struct radeon_fence **fence)
429 struct radeon_semaphore *sem = NULL;
430 int ring_index = rdev->asic->copy.dma_ring_index;
431 struct radeon_ring *ring = &rdev->ring[ring_index];
432 u32 size_in_bytes, cur_size_in_bytes;
436 r = radeon_semaphore_create(rdev, &sem);
438 DRM_ERROR("radeon: moving bo (%d).\n", r);
442 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
443 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
444 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
446 DRM_ERROR("radeon: moving bo (%d).\n", r);
447 radeon_semaphore_free(rdev, &sem, NULL);
451 if (radeon_fence_need_sync(*fence, ring->idx)) {
452 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
454 radeon_fence_note_sync(*fence, ring->idx);
456 radeon_semaphore_free(rdev, &sem, NULL);
459 for (i = 0; i < num_loops; i++) {
460 cur_size_in_bytes = size_in_bytes;
461 if (cur_size_in_bytes > 0x1fffff)
462 cur_size_in_bytes = 0x1fffff;
463 size_in_bytes -= cur_size_in_bytes;
464 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
465 radeon_ring_write(ring, cur_size_in_bytes);
466 radeon_ring_write(ring, 0); /* src/dst endian swap */
467 radeon_ring_write(ring, src_offset & 0xffffffff);
468 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
469 radeon_ring_write(ring, dst_offset & 0xfffffffc);
470 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
471 src_offset += cur_size_in_bytes;
472 dst_offset += cur_size_in_bytes;
475 r = radeon_fence_emit(rdev, fence, ring->idx);
477 radeon_ring_unlock_undo(rdev, ring);
481 radeon_ring_unlock_commit(rdev, ring);
482 radeon_semaphore_free(rdev, &sem, *fence);
488 * cik_sdma_ring_test - simple async dma engine test
490 * @rdev: radeon_device pointer
491 * @ring: radeon_ring structure holding ring information
493 * Test the DMA engine by writing using it to write an
494 * value to memory. (CIK).
495 * Returns 0 for success, error for failure.
497 int cik_sdma_ring_test(struct radeon_device *rdev,
498 struct radeon_ring *ring)
502 volatile void __iomem *ptr = (volatile void *)rdev->vram_scratch.ptr;
506 DRM_ERROR("invalid vram scratch pointer\n");
513 r = radeon_ring_lock(rdev, ring, 4);
515 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
518 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
519 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
520 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff);
521 radeon_ring_write(ring, 1); /* number of DWs to follow */
522 radeon_ring_write(ring, 0xDEADBEEF);
523 radeon_ring_unlock_commit(rdev, ring);
525 for (i = 0; i < rdev->usec_timeout; i++) {
527 if (tmp == 0xDEADBEEF)
532 if (i < rdev->usec_timeout) {
533 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
535 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
543 * cik_sdma_ib_test - test an IB on the DMA engine
545 * @rdev: radeon_device pointer
546 * @ring: radeon_ring structure holding ring information
548 * Test a simple IB in the DMA ring (CIK).
549 * Returns 0 on success, error on failure.
551 int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
556 volatile void __iomem *ptr = (volatile void *)rdev->vram_scratch.ptr;
560 DRM_ERROR("invalid vram scratch pointer\n");
567 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
569 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
573 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
574 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
575 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff;
577 ib.ptr[4] = 0xDEADBEEF;
580 r = radeon_ib_schedule(rdev, &ib, NULL);
582 radeon_ib_free(rdev, &ib);
583 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
586 r = radeon_fence_wait(ib.fence, false);
588 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
591 for (i = 0; i < rdev->usec_timeout; i++) {
593 if (tmp == 0xDEADBEEF)
597 if (i < rdev->usec_timeout) {
598 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
600 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
603 radeon_ib_free(rdev, &ib);
608 * cik_sdma_is_lockup - Check if the DMA engine is locked up
610 * @rdev: radeon_device pointer
611 * @ring: radeon_ring structure holding ring information
613 * Check if the async DMA engine is locked up (CIK).
614 * Returns true if the engine appears to be locked up, false if not.
616 bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
618 u32 reset_mask = cik_gpu_check_soft_reset(rdev);
621 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
622 mask = RADEON_RESET_DMA;
624 mask = RADEON_RESET_DMA1;
626 if (!(reset_mask & mask)) {
627 radeon_ring_lockup_update(ring);
630 /* force ring activities */
631 radeon_ring_force_activity(rdev, ring);
632 return radeon_ring_test_lockup(rdev, ring);
636 * cik_sdma_vm_set_page - update the page tables using sDMA
638 * @rdev: radeon_device pointer
639 * @ib: indirect buffer to fill with commands
640 * @pe: addr of the page entry
641 * @addr: dst addr to write into pe
642 * @count: number of page entries to update
643 * @incr: increase next addr by incr bytes
644 * @flags: access flags
646 * Update the page tables using sDMA (CIK).
648 void cik_sdma_vm_set_page(struct radeon_device *rdev,
649 struct radeon_ib *ib,
651 uint64_t addr, unsigned count,
652 uint32_t incr, uint32_t flags)
654 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
658 if (flags & RADEON_VM_PAGE_SYSTEM) {
664 /* for non-physically contiguous pages (system) */
665 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
666 ib->ptr[ib->length_dw++] = pe;
667 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
668 ib->ptr[ib->length_dw++] = ndw;
669 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
670 if (flags & RADEON_VM_PAGE_SYSTEM) {
671 value = radeon_vm_map_gart(rdev, addr);
672 value &= 0xFFFFFFFFFFFFF000ULL;
673 } else if (flags & RADEON_VM_PAGE_VALID) {
680 ib->ptr[ib->length_dw++] = value;
681 ib->ptr[ib->length_dw++] = upper_32_bits(value);
690 if (flags & RADEON_VM_PAGE_VALID)
694 /* for physically contiguous pages (vram) */
695 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
696 ib->ptr[ib->length_dw++] = pe; /* dst addr */
697 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
698 ib->ptr[ib->length_dw++] = r600_flags; /* mask */
699 ib->ptr[ib->length_dw++] = 0;
700 ib->ptr[ib->length_dw++] = value; /* value */
701 ib->ptr[ib->length_dw++] = upper_32_bits(value);
702 ib->ptr[ib->length_dw++] = incr; /* increment size */
703 ib->ptr[ib->length_dw++] = 0;
704 ib->ptr[ib->length_dw++] = ndw; /* number of entries */
710 while (ib->length_dw & 0x7)
711 ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
715 * cik_dma_vm_flush - cik vm flush using sDMA
717 * @rdev: radeon_device pointer
719 * Update the page table base and flush the VM TLB
722 void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
724 struct radeon_ring *ring = &rdev->ring[ridx];
725 u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
726 SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
732 if (ridx == R600_RING_TYPE_DMA_INDEX)
733 ref_and_mask = SDMA0;
735 ref_and_mask = SDMA1;
737 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
739 radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
741 radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
743 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
745 /* update SH_MEM_* regs */
746 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
747 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
748 radeon_ring_write(ring, VMID(vm->id));
750 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
751 radeon_ring_write(ring, SH_MEM_BASES >> 2);
752 radeon_ring_write(ring, 0);
754 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
755 radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
756 radeon_ring_write(ring, 0);
758 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
759 radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
760 radeon_ring_write(ring, 1);
762 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
763 radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
764 radeon_ring_write(ring, 0);
766 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
767 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
768 radeon_ring_write(ring, VMID(0));
771 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
772 radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
773 radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
774 radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
775 radeon_ring_write(ring, ref_and_mask); /* MASK */
776 radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
779 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
780 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
781 radeon_ring_write(ring, 1 << vm->id);