Update drm/radeon to Linux 4.7.10 as much as possible...
[dragonfly.git] / sys / dev / drm / radeon / si_dma.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24 #include <drm/drmP.h>
25 #include "radeon.h"
26 #include "radeon_asic.h"
27 #ifdef TRACE_TODO
28 #include "radeon_trace.h"
29 #endif
30 #include "sid.h"
31
32 /**
33  * si_dma_is_lockup - Check if the DMA engine is locked up
34  *
35  * @rdev: radeon_device pointer
36  * @ring: radeon_ring structure holding ring information
37  *
38  * Check if the async DMA engine is locked up.
39  * Returns true if the engine appears to be locked up, false if not.
40  */
41 bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
42 {
43         u32 reset_mask = si_gpu_check_soft_reset(rdev);
44         u32 mask;
45
46         if (ring->idx == R600_RING_TYPE_DMA_INDEX)
47                 mask = RADEON_RESET_DMA;
48         else
49                 mask = RADEON_RESET_DMA1;
50
51         if (!(reset_mask & mask)) {
52                 radeon_ring_lockup_update(rdev, ring);
53                 return false;
54         }
55         return radeon_ring_test_lockup(rdev, ring);
56 }
57
58 /**
59  * si_dma_vm_copy_pages - update PTEs by copying them from the GART
60  *
61  * @rdev: radeon_device pointer
62  * @ib: indirect buffer to fill with commands
63  * @pe: addr of the page entry
64  * @src: src addr where to copy from
65  * @count: number of page entries to update
66  *
67  * Update PTEs by copying them from the GART using the DMA (SI).
68  */
69 void si_dma_vm_copy_pages(struct radeon_device *rdev,
70                           struct radeon_ib *ib,
71                           uint64_t pe, uint64_t src,
72                           unsigned count)
73 {
74         while (count) {
75                 unsigned bytes = count * 8;
76                 if (bytes > 0xFFFF8)
77                         bytes = 0xFFFF8;
78
79                 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
80                                                       1, 0, 0, bytes);
81                 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
82                 ib->ptr[ib->length_dw++] = lower_32_bits(src);
83                 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
84                 ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
85
86                 pe += bytes;
87                 src += bytes;
88                 count -= bytes / 8;
89         }
90 }
91
92 /**
93  * si_dma_vm_write_pages - update PTEs by writing them manually
94  *
95  * @rdev: radeon_device pointer
96  * @ib: indirect buffer to fill with commands
97  * @pe: addr of the page entry
98  * @addr: dst addr to write into pe
99  * @count: number of page entries to update
100  * @incr: increase next addr by incr bytes
101  * @flags: access flags
102  *
103  * Update PTEs by writing them manually using the DMA (SI).
104  */
105 void si_dma_vm_write_pages(struct radeon_device *rdev,
106                            struct radeon_ib *ib,
107                            uint64_t pe,
108                            uint64_t addr, unsigned count,
109                            uint32_t incr, uint32_t flags)
110 {
111         uint64_t value;
112         unsigned ndw;
113
114         while (count) {
115                 ndw = count * 2;
116                 if (ndw > 0xFFFFE)
117                         ndw = 0xFFFFE;
118
119                 /* for non-physically contiguous pages (system) */
120                 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
121                 ib->ptr[ib->length_dw++] = pe;
122                 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
123                 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
124                         if (flags & R600_PTE_SYSTEM) {
125                                 value = radeon_vm_map_gart(rdev, addr);
126                         } else if (flags & R600_PTE_VALID) {
127                                 value = addr;
128                         } else {
129                                 value = 0;
130                         }
131                         addr += incr;
132                         value |= flags;
133                         ib->ptr[ib->length_dw++] = value;
134                         ib->ptr[ib->length_dw++] = upper_32_bits(value);
135                 }
136         }
137 }
138
139 /**
140  * si_dma_vm_set_pages - update the page tables using the DMA
141  *
142  * @rdev: radeon_device pointer
143  * @ib: indirect buffer to fill with commands
144  * @pe: addr of the page entry
145  * @addr: dst addr to write into pe
146  * @count: number of page entries to update
147  * @incr: increase next addr by incr bytes
148  * @flags: access flags
149  *
150  * Update the page tables using the DMA (SI).
151  */
152 void si_dma_vm_set_pages(struct radeon_device *rdev,
153                          struct radeon_ib *ib,
154                          uint64_t pe,
155                          uint64_t addr, unsigned count,
156                          uint32_t incr, uint32_t flags)
157 {
158         uint64_t value;
159         unsigned ndw;
160
161         while (count) {
162                 ndw = count * 2;
163                 if (ndw > 0xFFFFE)
164                         ndw = 0xFFFFE;
165
166                 if (flags & R600_PTE_VALID)
167                         value = addr;
168                 else
169                         value = 0;
170
171                 /* for physically contiguous pages (vram) */
172                 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
173                 ib->ptr[ib->length_dw++] = pe; /* dst addr */
174                 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
175                 ib->ptr[ib->length_dw++] = flags; /* mask */
176                 ib->ptr[ib->length_dw++] = 0;
177                 ib->ptr[ib->length_dw++] = value; /* value */
178                 ib->ptr[ib->length_dw++] = upper_32_bits(value);
179                 ib->ptr[ib->length_dw++] = incr; /* increment size */
180                 ib->ptr[ib->length_dw++] = 0;
181                 pe += ndw * 4;
182                 addr += (ndw / 2) * incr;
183                 count -= ndw / 2;
184         }
185 }
186
187 void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
188                      unsigned vm_id, uint64_t pd_addr)
189  
190 {
191         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
192
193         if (vm_id < 8) {
194                 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
195         } else {
196                 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2));
197         }
198         radeon_ring_write(ring, pd_addr >> 12);
199
200         /* flush hdp cache */
201         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
202         radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
203         radeon_ring_write(ring, 1);
204
205         /* bits 0-7 are the VM contexts0-7 */
206         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
207         radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
208         radeon_ring_write(ring, 1 << vm_id);
209
210         /* wait for invalidate to complete */
211         radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
212         radeon_ring_write(ring, VM_INVALIDATE_REQUEST);
213         radeon_ring_write(ring, 0xff << 16); /* retry */
214         radeon_ring_write(ring, 1 << vm_id); /* mask */
215         radeon_ring_write(ring, 0); /* value */
216         radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
217 }
218
219 /**
220  * si_copy_dma - copy pages using the DMA engine
221  *
222  * @rdev: radeon_device pointer
223  * @src_offset: src GPU address
224  * @dst_offset: dst GPU address
225  * @num_gpu_pages: number of GPU pages to xfer
226  * @fence: radeon fence object
227  *
228  * Copy GPU paging using the DMA engine (SI).
229  * Used by the radeon ttm implementation to move pages if
230  * registered as the asic copy callback.
231  */
232 int si_copy_dma(struct radeon_device *rdev,
233                 uint64_t src_offset, uint64_t dst_offset,
234                 unsigned num_gpu_pages,
235                 struct radeon_fence **fence)
236 {
237         struct radeon_semaphore *sem = NULL;
238         int ring_index = rdev->asic->copy.dma_ring_index;
239         struct radeon_ring *ring = &rdev->ring[ring_index];
240         u32 size_in_bytes, cur_size_in_bytes;
241         int i, num_loops;
242         int r = 0;
243
244         r = radeon_semaphore_create(rdev, &sem);
245         if (r) {
246                 DRM_ERROR("radeon: moving bo (%d).\n", r);
247                 return r;
248         }
249
250         size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
251         num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
252         r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
253         if (r) {
254                 DRM_ERROR("radeon: moving bo (%d).\n", r);
255                 radeon_semaphore_free(rdev, &sem, NULL);
256                 return r;
257         }
258
259         radeon_semaphore_sync_to(sem, *fence);
260         radeon_semaphore_sync_rings(rdev, sem, ring->idx);
261
262         for (i = 0; i < num_loops; i++) {
263                 cur_size_in_bytes = size_in_bytes;
264                 if (cur_size_in_bytes > 0xFFFFF)
265                         cur_size_in_bytes = 0xFFFFF;
266                 size_in_bytes -= cur_size_in_bytes;
267                 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
268                 radeon_ring_write(ring, lower_32_bits(dst_offset));
269                 radeon_ring_write(ring, lower_32_bits(src_offset));
270                 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
271                 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
272                 src_offset += cur_size_in_bytes;
273                 dst_offset += cur_size_in_bytes;
274         }
275
276         r = radeon_fence_emit(rdev, fence, ring->idx);
277         if (r) {
278                 radeon_ring_unlock_undo(rdev, ring);
279                 radeon_semaphore_free(rdev, &sem, NULL);
280                 return r;
281         }
282
283         radeon_ring_unlock_commit(rdev, ring, false);
284         radeon_semaphore_free(rdev, &sem, *fence);
285
286         return r;
287 }