2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
25 * Authors: Christian König <christian.koenig@amd.com>
28 #include <linux/firmware.h>
31 #include "radeon_asic.h"
34 #define VCE_V1_0_FW_SIZE (256 * 1024)
35 #define VCE_V1_0_STACK_SIZE (64 * 1024)
36 #define VCE_V1_0_DATA_SIZE (7808 * (RADEON_MAX_VCE_HANDLES + 1))
38 struct vce_v1_0_fw_signature
52 * vce_v1_0_get_rptr - get read pointer
54 * @rdev: radeon_device pointer
55 * @ring: radeon_ring pointer
57 * Returns the current hardware read pointer
59 uint32_t vce_v1_0_get_rptr(struct radeon_device *rdev,
60 struct radeon_ring *ring)
62 if (ring->idx == TN_RING_TYPE_VCE1_INDEX)
63 return RREG32(VCE_RB_RPTR);
65 return RREG32(VCE_RB_RPTR2);
69 * vce_v1_0_get_wptr - get write pointer
71 * @rdev: radeon_device pointer
72 * @ring: radeon_ring pointer
74 * Returns the current hardware write pointer
76 uint32_t vce_v1_0_get_wptr(struct radeon_device *rdev,
77 struct radeon_ring *ring)
79 if (ring->idx == TN_RING_TYPE_VCE1_INDEX)
80 return RREG32(VCE_RB_WPTR);
82 return RREG32(VCE_RB_WPTR2);
86 * vce_v1_0_set_wptr - set write pointer
88 * @rdev: radeon_device pointer
89 * @ring: radeon_ring pointer
91 * Commits the write pointer to the hardware
93 void vce_v1_0_set_wptr(struct radeon_device *rdev,
94 struct radeon_ring *ring)
96 if (ring->idx == TN_RING_TYPE_VCE1_INDEX)
97 WREG32(VCE_RB_WPTR, ring->wptr);
99 WREG32(VCE_RB_WPTR2, ring->wptr);
102 void vce_v1_0_enable_mgcg(struct radeon_device *rdev, bool enable);
103 void vce_v1_0_enable_mgcg(struct radeon_device *rdev, bool enable)
107 if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_VCE_MGCG)) {
108 tmp = RREG32(VCE_CLOCK_GATING_A);
109 tmp |= CGC_DYN_CLOCK_MODE;
110 WREG32(VCE_CLOCK_GATING_A, tmp);
112 tmp = RREG32(VCE_UENC_CLOCK_GATING);
115 WREG32(VCE_UENC_CLOCK_GATING, tmp);
117 tmp = RREG32(VCE_UENC_REG_CLOCK_GATING);
119 WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
121 tmp = RREG32(VCE_CLOCK_GATING_A);
122 tmp &= ~CGC_DYN_CLOCK_MODE;
123 WREG32(VCE_CLOCK_GATING_A, tmp);
125 tmp = RREG32(VCE_UENC_CLOCK_GATING);
128 WREG32(VCE_UENC_CLOCK_GATING, tmp);
130 tmp = RREG32(VCE_UENC_REG_CLOCK_GATING);
132 WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
136 static void vce_v1_0_init_cg(struct radeon_device *rdev)
140 tmp = RREG32(VCE_CLOCK_GATING_A);
141 tmp |= CGC_DYN_CLOCK_MODE;
142 WREG32(VCE_CLOCK_GATING_A, tmp);
144 tmp = RREG32(VCE_CLOCK_GATING_B);
147 WREG32(VCE_CLOCK_GATING_B, tmp);
149 tmp = RREG32(VCE_UENC_CLOCK_GATING);
151 WREG32(VCE_UENC_CLOCK_GATING, tmp);
153 tmp = RREG32(VCE_UENC_REG_CLOCK_GATING);
155 WREG32(VCE_UENC_REG_CLOCK_GATING, tmp);
158 int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
160 const struct vce_v1_0_fw_signature *sign = (const void*)rdev->vce_fw->data;
164 switch (rdev->family) {
166 chip_id = 0x01000014;
169 chip_id = 0x01000015;
173 chip_id = 0x01000016;
176 chip_id = 0x01000017;
182 for (i = 0; i < le32_to_cpu(sign->num); ++i) {
183 if (le32_to_cpu(sign->val[i].chip_id) == chip_id)
187 if (i == le32_to_cpu(sign->num))
190 data += (256 - 64) / 4;
191 data[0] = sign->val[i].nonce[0];
192 data[1] = sign->val[i].nonce[1];
193 data[2] = sign->val[i].nonce[2];
194 data[3] = sign->val[i].nonce[3];
195 data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64);
197 memset(&data[5], 0, 44);
198 memcpy(&data[16], &sign[1], rdev->vce_fw->datasize - sizeof(*sign));
200 data += le32_to_cpu(data[4]) / 4;
201 data[0] = sign->val[i].sigval[0];
202 data[1] = sign->val[i].sigval[1];
203 data[2] = sign->val[i].sigval[2];
204 data[3] = sign->val[i].sigval[3];
206 rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect);
211 unsigned vce_v1_0_bo_size(struct radeon_device *rdev)
213 WARN_ON(VCE_V1_0_FW_SIZE < rdev->vce_fw->datasize);
214 return VCE_V1_0_FW_SIZE + VCE_V1_0_STACK_SIZE + VCE_V1_0_DATA_SIZE;
217 int vce_v1_0_resume(struct radeon_device *rdev)
219 uint64_t addr = rdev->vce.gpu_addr;
223 WREG32_P(VCE_CLOCK_GATING_A, 0, ~(1 << 16));
224 WREG32_P(VCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
225 WREG32_P(VCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
226 WREG32(VCE_CLOCK_GATING_B, 0);
228 WREG32_P(VCE_LMI_FW_PERIODIC_CTRL, 0x4, ~0x4);
230 WREG32(VCE_LMI_CTRL, 0x00398000);
231 WREG32_P(VCE_LMI_CACHE_CTRL, 0x0, ~0x1);
232 WREG32(VCE_LMI_SWAP_CNTL, 0);
233 WREG32(VCE_LMI_SWAP_CNTL1, 0);
234 WREG32(VCE_LMI_VM_CTRL, 0);
236 WREG32(VCE_VCPU_SCRATCH7, RADEON_MAX_VCE_HANDLES);
239 size = VCE_V1_0_FW_SIZE;
240 WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff);
241 WREG32(VCE_VCPU_CACHE_SIZE0, size);
244 size = VCE_V1_0_STACK_SIZE;
245 WREG32(VCE_VCPU_CACHE_OFFSET1, addr & 0x7fffffff);
246 WREG32(VCE_VCPU_CACHE_SIZE1, size);
249 size = VCE_V1_0_DATA_SIZE;
250 WREG32(VCE_VCPU_CACHE_OFFSET2, addr & 0x7fffffff);
251 WREG32(VCE_VCPU_CACHE_SIZE2, size);
253 WREG32_P(VCE_LMI_CTRL2, 0x0, ~0x100);
255 WREG32(VCE_LMI_FW_START_KEYSEL, rdev->vce.keyselect);
257 for (i = 0; i < 10; ++i) {
259 if (RREG32(VCE_FW_REG_STATUS) & VCE_FW_REG_STATUS_DONE)
266 if (!(RREG32(VCE_FW_REG_STATUS) & VCE_FW_REG_STATUS_PASS))
269 for (i = 0; i < 10; ++i) {
271 if (!(RREG32(VCE_FW_REG_STATUS) & VCE_FW_REG_STATUS_BUSY))
278 vce_v1_0_init_cg(rdev);
284 * vce_v1_0_start - start VCE block
286 * @rdev: radeon_device pointer
288 * Setup and start the VCE block
290 int vce_v1_0_start(struct radeon_device *rdev)
292 struct radeon_ring *ring;
296 WREG32_P(VCE_STATUS, 1, ~1);
298 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
299 WREG32(VCE_RB_RPTR, ring->wptr);
300 WREG32(VCE_RB_WPTR, ring->wptr);
301 WREG32(VCE_RB_BASE_LO, ring->gpu_addr);
302 WREG32(VCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
303 WREG32(VCE_RB_SIZE, ring->ring_size / 4);
305 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
306 WREG32(VCE_RB_RPTR2, ring->wptr);
307 WREG32(VCE_RB_WPTR2, ring->wptr);
308 WREG32(VCE_RB_BASE_LO2, ring->gpu_addr);
309 WREG32(VCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
310 WREG32(VCE_RB_SIZE2, ring->ring_size / 4);
312 WREG32_P(VCE_VCPU_CNTL, VCE_CLK_EN, ~VCE_CLK_EN);
314 WREG32_P(VCE_SOFT_RESET,
315 VCE_ECPU_SOFT_RESET |
316 VCE_FME_SOFT_RESET, ~(
317 VCE_ECPU_SOFT_RESET |
318 VCE_FME_SOFT_RESET));
322 WREG32_P(VCE_SOFT_RESET, 0, ~(
323 VCE_ECPU_SOFT_RESET |
324 VCE_FME_SOFT_RESET));
326 for (i = 0; i < 10; ++i) {
328 for (j = 0; j < 100; ++j) {
329 status = RREG32(VCE_STATUS);
338 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
339 WREG32_P(VCE_SOFT_RESET, VCE_ECPU_SOFT_RESET, ~VCE_ECPU_SOFT_RESET);
341 WREG32_P(VCE_SOFT_RESET, 0, ~VCE_ECPU_SOFT_RESET);
346 /* clear BUSY flag */
347 WREG32_P(VCE_STATUS, 0, ~1);
350 DRM_ERROR("VCE not responding, giving up!!!\n");
357 int vce_v1_0_init(struct radeon_device *rdev)
359 struct radeon_ring *ring;
362 r = vce_v1_0_start(rdev);
366 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
368 r = radeon_ring_test(rdev, TN_RING_TYPE_VCE1_INDEX, ring);
374 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
376 r = radeon_ring_test(rdev, TN_RING_TYPE_VCE2_INDEX, ring);
382 DRM_INFO("VCE initialized successfully.\n");