drm/radeon: Partial update to Linux 3.12
[dragonfly.git] / sys / dev / drm / radeon / evergreen_dma.c
1 /*
2  * Copyright 2010 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 #include <drm/drmP.h>
25 #include "radeon.h"
26 #include "radeon_asic.h"
27 #include "evergreend.h"
28
29 /**
30  * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
31  *
32  * @rdev: radeon_device pointer
33  * @fence: radeon fence object
34  *
35  * Add a DMA fence packet to the ring to write
36  * the fence seq number and DMA trap packet to generate
37  * an interrupt if needed (evergreen-SI).
38  */
39 void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
40                                    struct radeon_fence *fence)
41 {
42         struct radeon_ring *ring = &rdev->ring[fence->ring];
43         u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
44         /* write the fence */
45         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
46         radeon_ring_write(ring, addr & 0xfffffffc);
47         radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
48         radeon_ring_write(ring, fence->seq);
49         /* generate an interrupt */
50         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
51         /* flush HDP */
52         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
53         radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
54         radeon_ring_write(ring, 1);
55 }
56
57 /**
58  * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
59  *
60  * @rdev: radeon_device pointer
61  * @ib: IB object to schedule
62  *
63  * Schedule an IB in the DMA ring (evergreen).
64  */
65 void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
66                                    struct radeon_ib *ib)
67 {
68         struct radeon_ring *ring = &rdev->ring[ib->ring];
69
70         if (rdev->wb.enabled) {
71                 u32 next_rptr = ring->wptr + 4;
72                 while ((next_rptr & 7) != 5)
73                         next_rptr++;
74                 next_rptr += 3;
75                 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
76                 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
77                 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
78                 radeon_ring_write(ring, next_rptr);
79         }
80
81         /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
82          * Pad as necessary with NOPs.
83          */
84         while ((ring->wptr & 7) != 5)
85                 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
86         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
87         radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
88         radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
89
90 }
91
92 /**
93  * evergreen_copy_dma - copy pages using the DMA engine
94  *
95  * @rdev: radeon_device pointer
96  * @src_offset: src GPU address
97  * @dst_offset: dst GPU address
98  * @num_gpu_pages: number of GPU pages to xfer
99  * @fence: radeon fence object
100  *
101  * Copy GPU paging using the DMA engine (evergreen-cayman).
102  * Used by the radeon ttm implementation to move pages if
103  * registered as the asic copy callback.
104  */
105 int evergreen_copy_dma(struct radeon_device *rdev,
106                        uint64_t src_offset, uint64_t dst_offset,
107                        unsigned num_gpu_pages,
108                        struct radeon_fence **fence)
109 {
110         struct radeon_semaphore *sem = NULL;
111         int ring_index = rdev->asic->copy.dma_ring_index;
112         struct radeon_ring *ring = &rdev->ring[ring_index];
113         u32 size_in_dw, cur_size_in_dw;
114         int i, num_loops;
115         int r = 0;
116
117         r = radeon_semaphore_create(rdev, &sem);
118         if (r) {
119                 DRM_ERROR("radeon: moving bo (%d).\n", r);
120                 return r;
121         }
122
123         size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
124         num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
125         r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
126         if (r) {
127                 DRM_ERROR("radeon: moving bo (%d).\n", r);
128                 radeon_semaphore_free(rdev, &sem, NULL);
129                 return r;
130         }
131
132         if (radeon_fence_need_sync(*fence, ring->idx)) {
133                 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
134                                             ring->idx);
135                 radeon_fence_note_sync(*fence, ring->idx);
136         } else {
137                 radeon_semaphore_free(rdev, &sem, NULL);
138         }
139
140         for (i = 0; i < num_loops; i++) {
141                 cur_size_in_dw = size_in_dw;
142                 if (cur_size_in_dw > 0xFFFFF)
143                         cur_size_in_dw = 0xFFFFF;
144                 size_in_dw -= cur_size_in_dw;
145                 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
146                 radeon_ring_write(ring, dst_offset & 0xfffffffc);
147                 radeon_ring_write(ring, src_offset & 0xfffffffc);
148                 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
149                 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
150                 src_offset += cur_size_in_dw * 4;
151                 dst_offset += cur_size_in_dw * 4;
152         }
153
154         r = radeon_fence_emit(rdev, fence, ring->idx);
155         if (r) {
156                 radeon_ring_unlock_undo(rdev, ring);
157                 return r;
158         }
159
160         radeon_ring_unlock_commit(rdev, ring);
161         radeon_semaphore_free(rdev, &sem, *fence);
162
163         return r;
164 }
165
166 /**
167  * evergreen_dma_is_lockup - Check if the DMA engine is locked up
168  *
169  * @rdev: radeon_device pointer
170  * @ring: radeon_ring structure holding ring information
171  *
172  * Check if the async DMA engine is locked up.
173  * Returns true if the engine appears to be locked up, false if not.
174  */
175 bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
176 {
177         u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
178
179         if (!(reset_mask & RADEON_RESET_DMA)) {
180                 radeon_ring_lockup_update(ring);
181                 return false;
182         }
183         /* force ring activities */
184         radeon_ring_force_activity(rdev, ring);
185         return radeon_ring_test_lockup(rdev, ring);
186 }