2 * Copyright 2011 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Christian König <deathsimple@vodafone.de>
31 #include <linux/module.h>
37 /* 1 second timeout */
38 #define UVD_IDLE_TIMEOUT_MS 1000
41 #define FIRMWARE_RV710 "radeonkmsfw_RV710_uvd"
42 #define FIRMWARE_CYPRESS "radeonkmsfw_CYPRESS_uvd"
43 #define FIRMWARE_SUMO "radeonkmsfw_SUMO_uvd"
44 #define FIRMWARE_TAHITI "radeonkmsfw_TAHITI_uvd"
47 MODULE_FIRMWARE(FIRMWARE_RV710);
48 MODULE_FIRMWARE(FIRMWARE_CYPRESS);
49 MODULE_FIRMWARE(FIRMWARE_SUMO);
50 MODULE_FIRMWARE(FIRMWARE_TAHITI);
53 static void radeon_uvd_idle_work_handler(struct work_struct *work);
55 int radeon_uvd_init(struct radeon_device *rdev)
57 unsigned long bo_size;
61 INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
63 switch (rdev->family) {
67 fw_name = FIRMWARE_RV710;
75 fw_name = FIRMWARE_CYPRESS;
85 fw_name = FIRMWARE_SUMO;
92 fw_name = FIRMWARE_TAHITI;
99 rdev->uvd_fw = firmware_get(fw_name);
101 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
106 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->datasize + 8) +
107 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
108 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
109 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
111 dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
115 r = radeon_uvd_resume(rdev);
119 memset(rdev->uvd.cpu_addr, 0, bo_size);
120 memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->datasize);
122 r = radeon_uvd_suspend(rdev);
126 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
127 atomic_set(&rdev->uvd.handles[i], 0);
128 rdev->uvd.filp[i] = NULL;
134 void radeon_uvd_fini(struct radeon_device *rdev)
136 radeon_uvd_suspend(rdev);
137 radeon_bo_unref(&rdev->uvd.vcpu_bo);
140 int radeon_uvd_suspend(struct radeon_device *rdev)
144 if (rdev->uvd.vcpu_bo == NULL)
147 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
149 radeon_bo_kunmap(rdev->uvd.vcpu_bo);
150 radeon_bo_unpin(rdev->uvd.vcpu_bo);
151 rdev->uvd.cpu_addr = NULL;
152 if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) {
153 radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
155 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
157 if (rdev->uvd.cpu_addr) {
158 radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
160 rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL;
166 int radeon_uvd_resume(struct radeon_device *rdev)
170 if (rdev->uvd.vcpu_bo == NULL)
173 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
175 radeon_bo_unref(&rdev->uvd.vcpu_bo);
176 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
180 /* Have been pin in cpu unmap unpin */
181 radeon_bo_kunmap(rdev->uvd.vcpu_bo);
182 radeon_bo_unpin(rdev->uvd.vcpu_bo);
184 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
185 &rdev->uvd.gpu_addr);
187 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
188 radeon_bo_unref(&rdev->uvd.vcpu_bo);
189 dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
193 r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
195 dev_err(rdev->dev, "(%d) UVD map failed\n", r);
199 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
204 void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo)
206 rbo->placement.fpfn = 0 >> PAGE_SHIFT;
207 rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
210 void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
213 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
214 if (rdev->uvd.filp[i] == filp) {
215 uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
216 struct radeon_fence *fence;
218 r = radeon_uvd_get_destroy_msg(rdev,
219 R600_RING_TYPE_UVD_INDEX, handle, &fence);
221 DRM_ERROR("Error destroying UVD (%d)!\n", r);
225 radeon_fence_wait(fence, false);
226 radeon_fence_unref(&fence);
228 rdev->uvd.filp[i] = NULL;
229 atomic_set(&rdev->uvd.handles[i], 0);
234 static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
236 unsigned stream_type = msg[4];
237 unsigned width = msg[6];
238 unsigned height = msg[7];
239 unsigned dpb_size = msg[9];
240 unsigned pitch = msg[28];
242 unsigned width_in_mb = width / 16;
243 unsigned height_in_mb = ALIGN(height / 16, 2);
245 unsigned image_size, tmp, min_dpb_size;
247 image_size = width * height;
248 image_size += image_size / 2;
249 image_size = ALIGN(image_size, 1024);
251 switch (stream_type) {
254 /* reference picture buffer */
255 min_dpb_size = image_size * 17;
257 /* macroblock context buffer */
258 min_dpb_size += width_in_mb * height_in_mb * 17 * 192;
260 /* IT surface buffer */
261 min_dpb_size += width_in_mb * height_in_mb * 32;
266 /* reference picture buffer */
267 min_dpb_size = image_size * 3;
270 min_dpb_size += width_in_mb * height_in_mb * 128;
272 /* IT surface buffer */
273 min_dpb_size += width_in_mb * 64;
275 /* DB surface buffer */
276 min_dpb_size += width_in_mb * 128;
279 tmp = max(width_in_mb, height_in_mb);
280 min_dpb_size += ALIGN(tmp * 7 * 16, 64);
285 /* reference picture buffer */
286 min_dpb_size = image_size * 3;
291 /* reference picture buffer */
292 min_dpb_size = image_size * 3;
295 min_dpb_size += width_in_mb * height_in_mb * 64;
297 /* IT surface buffer */
298 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
302 DRM_ERROR("UVD codec not handled %d!\n", stream_type);
307 DRM_ERROR("Invalid UVD decoding target pitch!\n");
311 if (dpb_size < min_dpb_size) {
312 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
313 dpb_size, min_dpb_size);
317 buf_sizes[0x1] = dpb_size;
318 buf_sizes[0x2] = image_size;
322 static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
323 unsigned offset, unsigned buf_sizes[])
325 int32_t *msg, msg_type, handle;
331 DRM_ERROR("UVD messages must be 64 byte aligned!\n");
335 r = radeon_bo_kmap(bo, &ptr);
339 msg = (uint32_t*)((uint8_t*)ptr + offset);
345 DRM_ERROR("Invalid UVD handle!\n");
350 /* it's a decode msg, calc buffer sizes */
351 r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
352 radeon_bo_kunmap(bo);
356 } else if (msg_type == 2) {
357 /* it's a destroy msg, free the handle */
358 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
359 atomic_cmpset(&p->rdev->uvd.handles[i], handle, 0);
360 radeon_bo_kunmap(bo);
363 /* it's a create msg, no special handling needed */
364 radeon_bo_kunmap(bo);
367 /* create or decode, validate the handle */
368 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
369 if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
373 /* handle not found try to alloc a new one */
374 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
376 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
378 if (atomic_cmpset(&p->rdev->uvd.handles[i], 0, handle) == 1) {
379 p->rdev->uvd.filp[i] = p->filp;
384 DRM_ERROR("No more free UVD handles!\n");
388 static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
389 int data0, int data1,
390 unsigned buf_sizes[])
392 struct radeon_cs_chunk *relocs_chunk;
393 struct radeon_cs_reloc *reloc;
394 unsigned idx, cmd, offset;
398 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
399 offset = radeon_get_ib_value(p, data0);
400 idx = radeon_get_ib_value(p, data1);
401 if (idx >= relocs_chunk->length_dw) {
402 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
403 idx, relocs_chunk->length_dw);
407 reloc = p->relocs_ptr[(idx / 4)];
408 start = reloc->lobj.gpu_offset;
409 end = start + radeon_bo_size(reloc->robj);
412 p->ib.ptr[data0] = start & 0xFFFFFFFF;
413 p->ib.ptr[data1] = start >> 32;
415 cmd = radeon_get_ib_value(p, p->idx) >> 1;
418 if ((end - start) < buf_sizes[cmd]) {
419 DRM_ERROR("buffer to small (%d / %d)!\n",
420 (unsigned)(end - start), buf_sizes[cmd]);
424 } else if (cmd != 0x100) {
425 DRM_ERROR("invalid UVD command %X!\n", cmd);
429 if ((start >> 28) != (end >> 28)) {
430 DRM_ERROR("reloc %lX-%lX crossing 256MB boundary!\n",
435 /* TODO: is this still necessary on NI+ ? */
436 if ((cmd == 0 || cmd == 0x3) &&
437 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
438 DRM_ERROR("msg/fb buffer %lX-%lX out of 256MB segment!\n",
444 r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
452 static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
453 struct radeon_cs_packet *pkt,
454 int *data0, int *data1,
455 unsigned buf_sizes[])
460 for (i = 0; i <= pkt->count; ++i) {
461 switch (pkt->reg + i*4) {
462 case UVD_GPCOM_VCPU_DATA0:
465 case UVD_GPCOM_VCPU_DATA1:
468 case UVD_GPCOM_VCPU_CMD:
469 r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes);
473 case UVD_ENGINE_CNTL:
476 DRM_ERROR("Invalid reg 0x%X!\n",
485 int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
487 struct radeon_cs_packet pkt;
488 int r, data0 = 0, data1 = 0;
490 /* minimum buffer sizes */
491 unsigned buf_sizes[] = {
493 [0x00000001] = 32 * 1024 * 1024,
494 [0x00000002] = 2048 * 1152 * 3,
498 if (p->chunks[p->chunk_ib_idx].length_dw % 16) {
499 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
500 p->chunks[p->chunk_ib_idx].length_dw);
504 if (p->chunk_relocs_idx == -1) {
505 DRM_ERROR("No relocation chunk !\n");
511 r = radeon_cs_packet_parse(p, &pkt, p->idx);
515 case RADEON_PACKET_TYPE0:
516 r = radeon_uvd_cs_reg(p, &pkt, &data0,
521 case RADEON_PACKET_TYPE2:
522 p->idx += pkt.count + 2;
525 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
528 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
532 static int radeon_uvd_send_msg(struct radeon_device *rdev,
533 int ring, struct radeon_bo *bo,
534 struct radeon_fence **fence)
536 struct ttm_validate_buffer tv;
537 struct list_head head;
542 memset(&tv, 0, sizeof(tv));
545 INIT_LIST_HEAD(&head);
546 list_add(&tv.head, &head);
548 r = ttm_eu_reserve_buffers(&head);
552 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM);
553 radeon_uvd_force_into_uvd_segment(bo);
555 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
557 ttm_eu_backoff_reservation(&head);
561 r = radeon_ib_get(rdev, ring, &ib, NULL, 16);
563 ttm_eu_backoff_reservation(&head);
567 addr = radeon_bo_gpu_offset(bo);
568 ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
570 ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
571 ib.ptr[3] = addr >> 32;
572 ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0);
574 for (i = 6; i < 16; ++i)
575 ib.ptr[i] = PACKET2(0);
578 r = radeon_ib_schedule(rdev, &ib, NULL);
580 ttm_eu_backoff_reservation(&head);
583 ttm_eu_fence_buffer_objects(&head, ib.fence);
586 *fence = radeon_fence_ref(ib.fence);
588 radeon_ib_free(rdev, &ib);
589 radeon_bo_unref(&bo);
593 /* multiple fence commands without any stream commands in between can
594 crash the vcpu so just try to emmit a dummy create/destroy msg to
596 int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
597 uint32_t handle, struct radeon_fence **fence)
599 struct radeon_bo *bo;
603 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
604 RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
608 r = radeon_bo_reserve(bo, false);
610 radeon_bo_unref(&bo);
614 r = radeon_bo_kmap(bo, (void **)&msg);
616 radeon_bo_unreserve(bo);
617 radeon_bo_unref(&bo);
621 /* stitch together an UVD create msg */
622 msg[0] = cpu_to_le32(0x00000de4);
623 msg[1] = cpu_to_le32(0x00000000);
624 msg[2] = cpu_to_le32(handle);
625 msg[3] = cpu_to_le32(0x00000000);
626 msg[4] = cpu_to_le32(0x00000000);
627 msg[5] = cpu_to_le32(0x00000000);
628 msg[6] = cpu_to_le32(0x00000000);
629 msg[7] = cpu_to_le32(0x00000780);
630 msg[8] = cpu_to_le32(0x00000440);
631 msg[9] = cpu_to_le32(0x00000000);
632 msg[10] = cpu_to_le32(0x01b37000);
633 for (i = 11; i < 1024; ++i)
634 msg[i] = cpu_to_le32(0x0);
636 radeon_bo_kunmap(bo);
637 radeon_bo_unreserve(bo);
639 return radeon_uvd_send_msg(rdev, ring, bo, fence);
642 int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
643 uint32_t handle, struct radeon_fence **fence)
645 struct radeon_bo *bo;
649 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
650 RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
654 r = radeon_bo_reserve(bo, false);
656 radeon_bo_unref(&bo);
660 r = radeon_bo_kmap(bo, (void **)&msg);
662 radeon_bo_unreserve(bo);
663 radeon_bo_unref(&bo);
667 /* stitch together an UVD destroy msg */
668 msg[0] = cpu_to_le32(0x00000de4);
669 msg[1] = cpu_to_le32(0x00000002);
670 msg[2] = cpu_to_le32(handle);
671 msg[3] = cpu_to_le32(0x00000000);
672 for (i = 4; i < 1024; ++i)
673 msg[i] = cpu_to_le32(0x0);
675 radeon_bo_kunmap(bo);
676 radeon_bo_unreserve(bo);
678 return radeon_uvd_send_msg(rdev, ring, bo, fence);
681 static void radeon_uvd_idle_work_handler(struct work_struct *work)
683 struct radeon_device *rdev =
684 container_of(work, struct radeon_device, uvd.idle_work.work);
686 if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0)
687 radeon_set_uvd_clocks(rdev, 0, 0);
689 schedule_delayed_work(&rdev->uvd.idle_work,
690 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
693 void radeon_uvd_note_usage(struct radeon_device *rdev)
695 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
696 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
697 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
699 radeon_set_uvd_clocks(rdev, 53300, 40000);
702 static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,
703 unsigned target_freq,
707 unsigned post_div = vco_freq / target_freq;
709 /* adjust to post divider minimum value */
710 if (post_div < pd_min)
713 /* we alway need a frequency less than or equal the target */
714 if ((vco_freq / post_div) > target_freq)
717 /* post dividers above a certain value must be even */
718 if (post_div > pd_even && post_div % 2)
725 * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
727 * @rdev: radeon_device pointer
730 * @vco_min: minimum VCO frequency
731 * @vco_max: maximum VCO frequency
732 * @fb_factor: factor to multiply vco freq with
733 * @fb_mask: limit and bitmask for feedback divider
734 * @pd_min: post divider minimum
735 * @pd_max: post divider maximum
736 * @pd_even: post divider must be even above this value
737 * @optimal_fb_div: resulting feedback divider
738 * @optimal_vclk_div: resulting vclk post divider
739 * @optimal_dclk_div: resulting dclk post divider
741 * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
742 * Returns zero on success -EINVAL on error.
744 int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
745 unsigned vclk, unsigned dclk,
746 unsigned vco_min, unsigned vco_max,
747 unsigned fb_factor, unsigned fb_mask,
748 unsigned pd_min, unsigned pd_max,
750 unsigned *optimal_fb_div,
751 unsigned *optimal_vclk_div,
752 unsigned *optimal_dclk_div)
754 unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq;
756 /* start off with something large */
757 unsigned optimal_score = ~0;
759 /* loop through vco from low to high */
760 vco_min = max(max(vco_min, vclk), dclk);
761 for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) {
763 uint64_t fb_div = (uint64_t)vco_freq * fb_factor;
764 unsigned vclk_div, dclk_div, score;
766 do_div(fb_div, ref_freq);
768 /* fb div out of range ? */
769 if (fb_div > fb_mask)
770 break; /* it can oly get worse */
774 /* calc vclk divider with current vco freq */
775 vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk,
777 if (vclk_div > pd_max)
778 break; /* vco is too big, it has to stop */
780 /* calc dclk divider with current vco freq */
781 dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
783 if (vclk_div > pd_max)
784 break; /* vco is too big, it has to stop */
786 /* calc score with current vco freq */
787 score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div);
789 /* determine if this vco setting is better than current optimal settings */
790 if (score < optimal_score) {
791 *optimal_fb_div = fb_div;
792 *optimal_vclk_div = vclk_div;
793 *optimal_dclk_div = dclk_div;
794 optimal_score = score;
795 if (optimal_score == 0)
796 break; /* it can't get better than this */
800 /* did we found a valid setup ? */
801 if (optimal_score == ~0)
807 int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
808 unsigned cg_upll_func_cntl)
812 /* make sure UPLL_CTLREQ is deasserted */
813 WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
817 /* assert UPLL_CTLREQ */
818 WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
820 /* wait for CTLACK and CTLACK2 to get asserted */
821 for (i = 0; i < 100; ++i) {
822 uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
823 if ((RREG32(cg_upll_func_cntl) & mask) == mask)
828 /* deassert UPLL_CTLREQ */
829 WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
832 DRM_ERROR("Timeout setting UVD clocks!\n");