2 * Copyright 2010 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Alex Deucher <alexander.deucher@amd.com>
26 * $FreeBSD: head/sys/dev/drm2/radeon/evergreen_blit_kms.c 254885 2013-08-25 19:37:15Z dumbbell $
30 #include <uapi_drm/radeon_drm.h>
32 #include "radeon_asic.h"
34 #include "evergreend.h"
35 #include "evergreen_blit_shaders.h"
36 #include "cayman_blit_shaders.h"
37 #include "radeon_blit_common.h"
41 set_render_target(struct radeon_device *rdev, int format,
42 int w, int h, u64 gpu_addr)
44 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
52 cb_color_info = CB_FORMAT(format) |
53 CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
54 CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
56 slice = ((w * h) / 64) - 1;
58 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
59 radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
60 radeon_ring_write(ring, gpu_addr >> 8);
61 radeon_ring_write(ring, pitch);
62 radeon_ring_write(ring, slice);
63 radeon_ring_write(ring, 0);
64 radeon_ring_write(ring, cb_color_info);
65 radeon_ring_write(ring, 0);
66 radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
67 radeon_ring_write(ring, 0);
68 radeon_ring_write(ring, 0);
69 radeon_ring_write(ring, 0);
70 radeon_ring_write(ring, 0);
71 radeon_ring_write(ring, 0);
72 radeon_ring_write(ring, 0);
73 radeon_ring_write(ring, 0);
74 radeon_ring_write(ring, 0);
79 cp_set_surface_sync(struct radeon_device *rdev,
80 u32 sync_type, u32 size,
83 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
86 if (size == 0xffffffff)
87 cp_coher_size = 0xffffffff;
89 cp_coher_size = ((size + 255) >> 8);
91 if (rdev->family >= CHIP_CAYMAN) {
92 /* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync
93 * to the RB directly. For IBs, the CP programs this as part of the
94 * surface_sync packet.
96 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
97 radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
98 radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
100 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
101 radeon_ring_write(ring, sync_type);
102 radeon_ring_write(ring, cp_coher_size);
103 radeon_ring_write(ring, mc_addr >> 8);
104 radeon_ring_write(ring, 10); /* poll interval */
107 /* emits 11dw + 1 surface sync = 16dw */
109 set_shaders(struct radeon_device *rdev)
111 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
115 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
116 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
117 radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
118 radeon_ring_write(ring, gpu_addr >> 8);
119 radeon_ring_write(ring, 2);
120 radeon_ring_write(ring, 0);
123 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
124 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
125 radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
126 radeon_ring_write(ring, gpu_addr >> 8);
127 radeon_ring_write(ring, 1);
128 radeon_ring_write(ring, 0);
129 radeon_ring_write(ring, 2);
131 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
132 cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
135 /* emits 10 + 1 sync (5) = 15 */
137 set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
139 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
140 u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
142 /* high addr, stride */
143 sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
146 sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
149 sq_vtx_constant_word3 = SQ_VTCX_SEL_X(SQ_SEL_X) |
150 SQ_VTCX_SEL_Y(SQ_SEL_Y) |
151 SQ_VTCX_SEL_Z(SQ_SEL_Z) |
152 SQ_VTCX_SEL_W(SQ_SEL_W);
154 radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
155 radeon_ring_write(ring, 0x580);
156 radeon_ring_write(ring, gpu_addr & 0xffffffff);
157 radeon_ring_write(ring, 48 - 1); /* size */
158 radeon_ring_write(ring, sq_vtx_constant_word2);
159 radeon_ring_write(ring, sq_vtx_constant_word3);
160 radeon_ring_write(ring, 0);
161 radeon_ring_write(ring, 0);
162 radeon_ring_write(ring, 0);
163 radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
165 if ((rdev->family == CHIP_CEDAR) ||
166 (rdev->family == CHIP_PALM) ||
167 (rdev->family == CHIP_SUMO) ||
168 (rdev->family == CHIP_SUMO2) ||
169 (rdev->family == CHIP_CAICOS))
170 cp_set_surface_sync(rdev,
171 PACKET3_TC_ACTION_ENA, 48, gpu_addr);
173 cp_set_surface_sync(rdev,
174 PACKET3_VC_ACTION_ENA, 48, gpu_addr);
180 set_tex_resource(struct radeon_device *rdev,
181 int format, int w, int h, int pitch,
182 u64 gpu_addr, u32 size)
184 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
185 u32 sq_tex_resource_word0, sq_tex_resource_word1;
186 u32 sq_tex_resource_word4, sq_tex_resource_word7;
191 sq_tex_resource_word0 = TEX_DIM(SQ_TEX_DIM_2D);
192 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
194 sq_tex_resource_word1 = ((h - 1) << 0) |
195 TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
197 sq_tex_resource_word4 = TEX_DST_SEL_X(SQ_SEL_X) |
198 TEX_DST_SEL_Y(SQ_SEL_Y) |
199 TEX_DST_SEL_Z(SQ_SEL_Z) |
200 TEX_DST_SEL_W(SQ_SEL_W);
202 sq_tex_resource_word7 = format |
203 S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE);
205 cp_set_surface_sync(rdev,
206 PACKET3_TC_ACTION_ENA, size, gpu_addr);
208 radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
209 radeon_ring_write(ring, 0);
210 radeon_ring_write(ring, sq_tex_resource_word0);
211 radeon_ring_write(ring, sq_tex_resource_word1);
212 radeon_ring_write(ring, gpu_addr >> 8);
213 radeon_ring_write(ring, gpu_addr >> 8);
214 radeon_ring_write(ring, sq_tex_resource_word4);
215 radeon_ring_write(ring, 0);
216 radeon_ring_write(ring, 0);
217 radeon_ring_write(ring, sq_tex_resource_word7);
222 set_scissors(struct radeon_device *rdev, int x1, int y1,
225 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
226 /* workaround some hw bugs */
231 if (rdev->family >= CHIP_CAYMAN) {
232 if ((x2 == 1) && (y2 == 1))
236 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
237 radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
238 radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
239 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
241 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
242 radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
243 radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
244 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
246 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
247 radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
248 radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
249 radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
254 draw_auto(struct radeon_device *rdev)
256 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
257 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
258 radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
259 radeon_ring_write(ring, DI_PT_RECTLIST);
261 radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
262 radeon_ring_write(ring,
266 DI_INDEX_SIZE_16_BIT);
268 radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
269 radeon_ring_write(ring, 1);
271 radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
272 radeon_ring_write(ring, 3);
273 radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
279 set_default_state(struct radeon_device *rdev)
281 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
282 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
283 u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
284 u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
285 int num_ps_gprs, num_vs_gprs, num_temp_gprs;
286 int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs;
287 int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
288 int num_hs_threads, num_ls_threads;
289 int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
290 int num_hs_stack_entries, num_ls_stack_entries;
294 /* set clear context state */
295 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
296 radeon_ring_write(ring, 0);
298 if (rdev->family < CHIP_CAYMAN) {
299 switch (rdev->family) {
315 num_ps_stack_entries = 42;
316 num_vs_stack_entries = 42;
317 num_gs_stack_entries = 42;
318 num_es_stack_entries = 42;
319 num_hs_stack_entries = 42;
320 num_ls_stack_entries = 42;
330 num_ps_threads = 128;
336 num_ps_stack_entries = 42;
337 num_vs_stack_entries = 42;
338 num_gs_stack_entries = 42;
339 num_es_stack_entries = 42;
340 num_hs_stack_entries = 42;
341 num_ls_stack_entries = 42;
351 num_ps_threads = 128;
357 num_ps_stack_entries = 85;
358 num_vs_stack_entries = 85;
359 num_gs_stack_entries = 85;
360 num_es_stack_entries = 85;
361 num_hs_stack_entries = 85;
362 num_ls_stack_entries = 85;
373 num_ps_threads = 128;
379 num_ps_stack_entries = 85;
380 num_vs_stack_entries = 85;
381 num_gs_stack_entries = 85;
382 num_es_stack_entries = 85;
383 num_hs_stack_entries = 85;
384 num_ls_stack_entries = 85;
400 num_ps_stack_entries = 42;
401 num_vs_stack_entries = 42;
402 num_gs_stack_entries = 42;
403 num_es_stack_entries = 42;
404 num_hs_stack_entries = 42;
405 num_ls_stack_entries = 42;
421 num_ps_stack_entries = 42;
422 num_vs_stack_entries = 42;
423 num_gs_stack_entries = 42;
424 num_es_stack_entries = 42;
425 num_hs_stack_entries = 42;
426 num_ls_stack_entries = 42;
442 num_ps_stack_entries = 85;
443 num_vs_stack_entries = 85;
444 num_gs_stack_entries = 85;
445 num_es_stack_entries = 85;
446 num_hs_stack_entries = 85;
447 num_ls_stack_entries = 85;
457 num_ps_threads = 128;
463 num_ps_stack_entries = 85;
464 num_vs_stack_entries = 85;
465 num_gs_stack_entries = 85;
466 num_es_stack_entries = 85;
467 num_hs_stack_entries = 85;
468 num_ls_stack_entries = 85;
478 num_ps_threads = 128;
484 num_ps_stack_entries = 42;
485 num_vs_stack_entries = 42;
486 num_gs_stack_entries = 42;
487 num_es_stack_entries = 42;
488 num_hs_stack_entries = 42;
489 num_ls_stack_entries = 42;
499 num_ps_threads = 128;
505 num_ps_stack_entries = 42;
506 num_vs_stack_entries = 42;
507 num_gs_stack_entries = 42;
508 num_es_stack_entries = 42;
509 num_hs_stack_entries = 42;
510 num_ls_stack_entries = 42;
514 if ((rdev->family == CHIP_CEDAR) ||
515 (rdev->family == CHIP_PALM) ||
516 (rdev->family == CHIP_SUMO) ||
517 (rdev->family == CHIP_SUMO2) ||
518 (rdev->family == CHIP_CAICOS))
521 sq_config = VC_ENABLE;
523 sq_config |= (EXPORT_SRC_C |
532 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
533 NUM_VS_GPRS(num_vs_gprs) |
534 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
535 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
536 NUM_ES_GPRS(num_es_gprs));
537 sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
538 NUM_LS_GPRS(num_ls_gprs));
539 sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
540 NUM_VS_THREADS(num_vs_threads) |
541 NUM_GS_THREADS(num_gs_threads) |
542 NUM_ES_THREADS(num_es_threads));
543 sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
544 NUM_LS_THREADS(num_ls_threads));
545 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
546 NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
547 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
548 NUM_ES_STACK_ENTRIES(num_es_stack_entries));
549 sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
550 NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
552 /* disable dyn gprs */
553 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
554 radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
555 radeon_ring_write(ring, 0);
558 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
559 radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
560 radeon_ring_write(ring, 0x10001000);
563 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
564 radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
565 radeon_ring_write(ring, sq_config);
566 radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
567 radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
568 radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
569 radeon_ring_write(ring, 0);
570 radeon_ring_write(ring, 0);
571 radeon_ring_write(ring, sq_thread_resource_mgmt);
572 radeon_ring_write(ring, sq_thread_resource_mgmt_2);
573 radeon_ring_write(ring, sq_stack_resource_mgmt_1);
574 radeon_ring_write(ring, sq_stack_resource_mgmt_2);
575 radeon_ring_write(ring, sq_stack_resource_mgmt_3);
578 /* CONTEXT_CONTROL */
579 radeon_ring_write(ring, 0xc0012800);
580 radeon_ring_write(ring, 0x80000000);
581 radeon_ring_write(ring, 0x80000000);
583 /* SQ_VTX_BASE_VTX_LOC */
584 radeon_ring_write(ring, 0xc0026f00);
585 radeon_ring_write(ring, 0x00000000);
586 radeon_ring_write(ring, 0x00000000);
587 radeon_ring_write(ring, 0x00000000);
590 radeon_ring_write(ring, 0xc0036e00);
591 radeon_ring_write(ring, 0x00000000);
592 radeon_ring_write(ring, 0x00000012);
593 radeon_ring_write(ring, 0x00000000);
594 radeon_ring_write(ring, 0x00000000);
596 /* set to DX10/11 mode */
597 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
598 radeon_ring_write(ring, 1);
600 /* emit an IB pointing at default state */
601 dwords = roundup2(rdev->r600_blit.state_len, 0x10);
602 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
603 radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
604 radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
605 radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
606 radeon_ring_write(ring, dwords);
610 int evergreen_blit_init(struct radeon_device *rdev)
616 int num_packet2s = 0;
618 rdev->r600_blit.primitives.set_render_target = set_render_target;
619 rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
620 rdev->r600_blit.primitives.set_shaders = set_shaders;
621 rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
622 rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
623 rdev->r600_blit.primitives.set_scissors = set_scissors;
624 rdev->r600_blit.primitives.draw_auto = draw_auto;
625 rdev->r600_blit.primitives.set_default_state = set_default_state;
627 rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
628 rdev->r600_blit.ring_size_common += 55; /* shaders + def state */
629 rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
630 rdev->r600_blit.ring_size_common += 5; /* done copy */
631 rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
633 rdev->r600_blit.ring_size_per_loop = 74;
634 if (rdev->family >= CHIP_CAYMAN)
635 rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */
637 rdev->r600_blit.max_dim = 16384;
639 rdev->r600_blit.state_offset = 0;
641 if (rdev->family < CHIP_CAYMAN)
642 rdev->r600_blit.state_len = evergreen_default_size;
644 rdev->r600_blit.state_len = cayman_default_size;
646 dwords = rdev->r600_blit.state_len;
647 while (dwords & 0xf) {
648 packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
652 obj_size = dwords * 4;
653 obj_size = roundup2(obj_size, 256);
655 rdev->r600_blit.vs_offset = obj_size;
656 if (rdev->family < CHIP_CAYMAN)
657 obj_size += evergreen_vs_size * 4;
659 obj_size += cayman_vs_size * 4;
660 obj_size = roundup2(obj_size, 256);
662 rdev->r600_blit.ps_offset = obj_size;
663 if (rdev->family < CHIP_CAYMAN)
664 obj_size += evergreen_ps_size * 4;
666 obj_size += cayman_ps_size * 4;
667 obj_size = roundup2(obj_size, 256);
669 /* pin copy shader into vram if not already initialized */
670 if (!rdev->r600_blit.shader_obj) {
671 r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
672 RADEON_GEM_DOMAIN_VRAM,
673 NULL, &rdev->r600_blit.shader_obj);
675 DRM_ERROR("evergreen failed to allocate shader\n");
679 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
680 if (unlikely(r != 0))
682 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
683 &rdev->r600_blit.shader_gpu_addr);
684 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
686 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
691 DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n",
693 rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
695 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
696 if (unlikely(r != 0))
698 r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
700 DRM_ERROR("failed to map blit object %d\n", r);
704 if (rdev->family < CHIP_CAYMAN) {
705 memcpy_toio((char *)ptr + rdev->r600_blit.state_offset,
706 evergreen_default_state, rdev->r600_blit.state_len * 4);
709 memcpy_toio((char *)ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
710 packet2s, num_packet2s * 4);
711 for (i = 0; i < evergreen_vs_size; i++)
712 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
713 for (i = 0; i < evergreen_ps_size; i++)
714 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
716 memcpy_toio((char *)ptr + rdev->r600_blit.state_offset,
717 cayman_default_state, rdev->r600_blit.state_len * 4);
720 memcpy_toio((char *)ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
721 packet2s, num_packet2s * 4);
722 for (i = 0; i < cayman_vs_size; i++)
723 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(cayman_vs[i]);
724 for (i = 0; i < cayman_ps_size; i++)
725 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(cayman_ps[i]);
727 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
728 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
730 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);